xen-netback: correctly schedule rate-limited queues
[linux/fpc-iii.git] / drivers / thermal / cpu_cooling.c
blobf49d2989d0005e80fd7619e9ad7ffaef762ed57f
1 /*
2 * linux/drivers/thermal/cpu_cooling.c
4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
7 * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 #include <linux/module.h>
26 #include <linux/thermal.h>
27 #include <linux/cpufreq.h>
28 #include <linux/err.h>
29 #include <linux/pm_opp.h>
30 #include <linux/slab.h>
31 #include <linux/cpu.h>
32 #include <linux/cpu_cooling.h>
34 #include <trace/events/thermal.h>
37 * Cooling state <-> CPUFreq frequency
39 * Cooling states are translated to frequencies throughout this driver and this
40 * is the relation between them.
42 * Highest cooling state corresponds to lowest possible frequency.
44 * i.e.
45 * level 0 --> 1st Max Freq
46 * level 1 --> 2nd Max Freq
47 * ...
50 /**
51 * struct power_table - frequency to power conversion
52 * @frequency: frequency in KHz
53 * @power: power in mW
55 * This structure is built when the cooling device registers and helps
56 * in translating frequency to power and viceversa.
58 struct power_table {
59 u32 frequency;
60 u32 power;
63 /**
64 * struct cpufreq_cooling_device - data for cooling device with cpufreq
65 * @id: unique integer value corresponding to each cpufreq_cooling_device
66 * registered.
67 * @cool_dev: thermal_cooling_device pointer to keep track of the
68 * registered cooling device.
69 * @cpufreq_state: integer value representing the current state of cpufreq
70 * cooling devices.
71 * @clipped_freq: integer value representing the absolute value of the clipped
72 * frequency.
73 * @max_level: maximum cooling level. One less than total number of valid
74 * cpufreq frequencies.
75 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
76 * @node: list_head to link all cpufreq_cooling_device together.
77 * @last_load: load measured by the latest call to cpufreq_get_requested_power()
78 * @time_in_idle: previous reading of the absolute time that this cpu was idle
79 * @time_in_idle_timestamp: wall time of the last invocation of
80 * get_cpu_idle_time_us()
81 * @dyn_power_table: array of struct power_table for frequency to power
82 * conversion, sorted in ascending order.
83 * @dyn_power_table_entries: number of entries in the @dyn_power_table array
84 * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
85 * @plat_get_static_power: callback to calculate the static power
87 * This structure is required for keeping information of each registered
88 * cpufreq_cooling_device.
90 struct cpufreq_cooling_device {
91 int id;
92 struct thermal_cooling_device *cool_dev;
93 unsigned int cpufreq_state;
94 unsigned int clipped_freq;
95 unsigned int max_level;
96 unsigned int *freq_table; /* In descending order */
97 struct cpumask allowed_cpus;
98 struct list_head node;
99 u32 last_load;
100 u64 *time_in_idle;
101 u64 *time_in_idle_timestamp;
102 struct power_table *dyn_power_table;
103 int dyn_power_table_entries;
104 struct device *cpu_dev;
105 get_static_t plat_get_static_power;
107 static DEFINE_IDR(cpufreq_idr);
108 static DEFINE_MUTEX(cooling_cpufreq_lock);
110 static unsigned int cpufreq_dev_count;
112 static DEFINE_MUTEX(cooling_list_lock);
113 static LIST_HEAD(cpufreq_dev_list);
116 * get_idr - function to get a unique id.
117 * @idr: struct idr * handle used to create a id.
118 * @id: int * value generated by this function.
120 * This function will populate @id with an unique
121 * id, using the idr API.
123 * Return: 0 on success, an error code on failure.
125 static int get_idr(struct idr *idr, int *id)
127 int ret;
129 mutex_lock(&cooling_cpufreq_lock);
130 ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
131 mutex_unlock(&cooling_cpufreq_lock);
132 if (unlikely(ret < 0))
133 return ret;
134 *id = ret;
136 return 0;
140 * release_idr - function to free the unique id.
141 * @idr: struct idr * handle used for creating the id.
142 * @id: int value representing the unique id.
144 static void release_idr(struct idr *idr, int id)
146 mutex_lock(&cooling_cpufreq_lock);
147 idr_remove(idr, id);
148 mutex_unlock(&cooling_cpufreq_lock);
151 /* Below code defines functions to be used for cpufreq as cooling device */
154 * get_level: Find the level for a particular frequency
155 * @cpufreq_dev: cpufreq_dev for which the property is required
156 * @freq: Frequency
158 * Return: level on success, THERMAL_CSTATE_INVALID on error.
160 static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
161 unsigned int freq)
163 unsigned long level;
165 for (level = 0; level <= cpufreq_dev->max_level; level++) {
166 if (freq == cpufreq_dev->freq_table[level])
167 return level;
169 if (freq > cpufreq_dev->freq_table[level])
170 break;
173 return THERMAL_CSTATE_INVALID;
177 * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
178 * @cpu: cpu for which the level is required
179 * @freq: the frequency of interest
181 * This function will match the cooling level corresponding to the
182 * requested @freq and return it.
184 * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
185 * otherwise.
187 unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
189 struct cpufreq_cooling_device *cpufreq_dev;
191 mutex_lock(&cooling_list_lock);
192 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
193 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
194 unsigned long level = get_level(cpufreq_dev, freq);
196 mutex_unlock(&cooling_list_lock);
197 return level;
200 mutex_unlock(&cooling_list_lock);
202 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
203 return THERMAL_CSTATE_INVALID;
205 EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
208 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
209 * @nb: struct notifier_block * with callback info.
210 * @event: value showing cpufreq event for which this function invoked.
211 * @data: callback-specific data
213 * Callback to hijack the notification on cpufreq policy transition.
214 * Every time there is a change in policy, we will intercept and
215 * update the cpufreq policy with thermal constraints.
217 * Return: 0 (success)
219 static int cpufreq_thermal_notifier(struct notifier_block *nb,
220 unsigned long event, void *data)
222 struct cpufreq_policy *policy = data;
223 unsigned long clipped_freq;
224 struct cpufreq_cooling_device *cpufreq_dev;
226 if (event != CPUFREQ_ADJUST)
227 return NOTIFY_DONE;
229 mutex_lock(&cooling_list_lock);
230 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
231 if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
232 continue;
235 * policy->max is the maximum allowed frequency defined by user
236 * and clipped_freq is the maximum that thermal constraints
237 * allow.
239 * If clipped_freq is lower than policy->max, then we need to
240 * readjust policy->max.
242 * But, if clipped_freq is greater than policy->max, we don't
243 * need to do anything.
245 clipped_freq = cpufreq_dev->clipped_freq;
247 if (policy->max > clipped_freq)
248 cpufreq_verify_within_limits(policy, 0, clipped_freq);
249 break;
251 mutex_unlock(&cooling_list_lock);
253 return NOTIFY_OK;
257 * build_dyn_power_table() - create a dynamic power to frequency table
258 * @cpufreq_device: the cpufreq cooling device in which to store the table
259 * @capacitance: dynamic power coefficient for these cpus
261 * Build a dynamic power to frequency table for this cpu and store it
262 * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
263 * cpu_freq_to_power() to convert between power and frequency
264 * efficiently. Power is stored in mW, frequency in KHz. The
265 * resulting table is in ascending order.
267 * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
268 * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
269 * added/enabled while the function was executing.
271 static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
272 u32 capacitance)
274 struct power_table *power_table;
275 struct dev_pm_opp *opp;
276 struct device *dev = NULL;
277 int num_opps = 0, cpu, i, ret = 0;
278 unsigned long freq;
280 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
281 dev = get_cpu_device(cpu);
282 if (!dev) {
283 dev_warn(&cpufreq_device->cool_dev->device,
284 "No cpu device for cpu %d\n", cpu);
285 continue;
288 num_opps = dev_pm_opp_get_opp_count(dev);
289 if (num_opps > 0)
290 break;
291 else if (num_opps < 0)
292 return num_opps;
295 if (num_opps == 0)
296 return -EINVAL;
298 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
299 if (!power_table)
300 return -ENOMEM;
302 rcu_read_lock();
304 for (freq = 0, i = 0;
305 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
306 freq++, i++) {
307 u32 freq_mhz, voltage_mv;
308 u64 power;
310 if (i >= num_opps) {
311 rcu_read_unlock();
312 ret = -EAGAIN;
313 goto free_power_table;
316 freq_mhz = freq / 1000000;
317 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
320 * Do the multiplication with MHz and millivolt so as
321 * to not overflow.
323 power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
324 do_div(power, 1000000000);
326 /* frequency is stored in power_table in KHz */
327 power_table[i].frequency = freq / 1000;
329 /* power is stored in mW */
330 power_table[i].power = power;
333 rcu_read_unlock();
335 if (i != num_opps) {
336 ret = PTR_ERR(opp);
337 goto free_power_table;
340 cpufreq_device->cpu_dev = dev;
341 cpufreq_device->dyn_power_table = power_table;
342 cpufreq_device->dyn_power_table_entries = i;
344 return 0;
346 free_power_table:
347 kfree(power_table);
349 return ret;
352 static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
353 u32 freq)
355 int i;
356 struct power_table *pt = cpufreq_device->dyn_power_table;
358 for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
359 if (freq < pt[i].frequency)
360 break;
362 return pt[i - 1].power;
365 static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
366 u32 power)
368 int i;
369 struct power_table *pt = cpufreq_device->dyn_power_table;
371 for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
372 if (power < pt[i].power)
373 break;
375 return pt[i - 1].frequency;
379 * get_load() - get load for a cpu since last updated
380 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
381 * @cpu: cpu number
382 * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
384 * Return: The average load of cpu @cpu in percentage since this
385 * function was last called.
387 static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
388 int cpu_idx)
390 u32 load;
391 u64 now, now_idle, delta_time, delta_idle;
393 now_idle = get_cpu_idle_time(cpu, &now, 0);
394 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
395 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
397 if (delta_time <= delta_idle)
398 load = 0;
399 else
400 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
402 cpufreq_device->time_in_idle[cpu_idx] = now_idle;
403 cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
405 return load;
409 * get_static_power() - calculate the static power consumed by the cpus
410 * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
411 * @tz: thermal zone device in which we're operating
412 * @freq: frequency in KHz
413 * @power: pointer in which to store the calculated static power
415 * Calculate the static power consumed by the cpus described by
416 * @cpu_actor running at frequency @freq. This function relies on a
417 * platform specific function that should have been provided when the
418 * actor was registered. If it wasn't, the static power is assumed to
419 * be negligible. The calculated static power is stored in @power.
421 * Return: 0 on success, -E* on failure.
423 static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
424 struct thermal_zone_device *tz, unsigned long freq,
425 u32 *power)
427 struct dev_pm_opp *opp;
428 unsigned long voltage;
429 struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
430 unsigned long freq_hz = freq * 1000;
432 if (!cpufreq_device->plat_get_static_power ||
433 !cpufreq_device->cpu_dev) {
434 *power = 0;
435 return 0;
438 rcu_read_lock();
440 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
441 true);
442 voltage = dev_pm_opp_get_voltage(opp);
444 rcu_read_unlock();
446 if (voltage == 0) {
447 dev_warn_ratelimited(cpufreq_device->cpu_dev,
448 "Failed to get voltage for frequency %lu: %ld\n",
449 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
450 return -EINVAL;
453 return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
454 voltage, power);
458 * get_dynamic_power() - calculate the dynamic power
459 * @cpufreq_device: &cpufreq_cooling_device for this cdev
460 * @freq: current frequency
462 * Return: the dynamic power consumed by the cpus described by
463 * @cpufreq_device.
465 static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
466 unsigned long freq)
468 u32 raw_cpu_power;
470 raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
471 return (raw_cpu_power * cpufreq_device->last_load) / 100;
474 /* cpufreq cooling device callback functions are defined below */
477 * cpufreq_get_max_state - callback function to get the max cooling state.
478 * @cdev: thermal cooling device pointer.
479 * @state: fill this variable with the max cooling state.
481 * Callback for the thermal cooling device to return the cpufreq
482 * max cooling state.
484 * Return: 0 on success, an error code otherwise.
486 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
487 unsigned long *state)
489 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
491 *state = cpufreq_device->max_level;
492 return 0;
496 * cpufreq_get_cur_state - callback function to get the current cooling state.
497 * @cdev: thermal cooling device pointer.
498 * @state: fill this variable with the current cooling state.
500 * Callback for the thermal cooling device to return the cpufreq
501 * current cooling state.
503 * Return: 0 on success, an error code otherwise.
505 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
506 unsigned long *state)
508 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
510 *state = cpufreq_device->cpufreq_state;
512 return 0;
516 * cpufreq_set_cur_state - callback function to set the current cooling state.
517 * @cdev: thermal cooling device pointer.
518 * @state: set this variable to the current cooling state.
520 * Callback for the thermal cooling device to change the cpufreq
521 * current cooling state.
523 * Return: 0 on success, an error code otherwise.
525 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
526 unsigned long state)
528 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
529 unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
530 unsigned int clip_freq;
532 /* Request state should be less than max_level */
533 if (WARN_ON(state > cpufreq_device->max_level))
534 return -EINVAL;
536 /* Check if the old cooling action is same as new cooling action */
537 if (cpufreq_device->cpufreq_state == state)
538 return 0;
540 clip_freq = cpufreq_device->freq_table[state];
541 cpufreq_device->cpufreq_state = state;
542 cpufreq_device->clipped_freq = clip_freq;
544 cpufreq_update_policy(cpu);
546 return 0;
550 * cpufreq_get_requested_power() - get the current power
551 * @cdev: &thermal_cooling_device pointer
552 * @tz: a valid thermal zone device pointer
553 * @power: pointer in which to store the resulting power
555 * Calculate the current power consumption of the cpus in milliwatts
556 * and store it in @power. This function should actually calculate
557 * the requested power, but it's hard to get the frequency that
558 * cpufreq would have assigned if there were no thermal limits.
559 * Instead, we calculate the current power on the assumption that the
560 * immediate future will look like the immediate past.
562 * We use the current frequency and the average load since this
563 * function was last called. In reality, there could have been
564 * multiple opps since this function was last called and that affects
565 * the load calculation. While it's not perfectly accurate, this
566 * simplification is good enough and works. REVISIT this, as more
567 * complex code may be needed if experiments show that it's not
568 * accurate enough.
570 * Return: 0 on success, -E* if getting the static power failed.
572 static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
573 struct thermal_zone_device *tz,
574 u32 *power)
576 unsigned long freq;
577 int i = 0, cpu, ret;
578 u32 static_power, dynamic_power, total_load = 0;
579 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
580 u32 *load_cpu = NULL;
582 cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
585 * All the CPUs are offline, thus the requested power by
586 * the cdev is 0
588 if (cpu >= nr_cpu_ids) {
589 *power = 0;
590 return 0;
593 freq = cpufreq_quick_get(cpu);
595 if (trace_thermal_power_cpu_get_power_enabled()) {
596 u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
598 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
601 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
602 u32 load;
604 if (cpu_online(cpu))
605 load = get_load(cpufreq_device, cpu, i);
606 else
607 load = 0;
609 total_load += load;
610 if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
611 load_cpu[i] = load;
613 i++;
616 cpufreq_device->last_load = total_load;
618 dynamic_power = get_dynamic_power(cpufreq_device, freq);
619 ret = get_static_power(cpufreq_device, tz, freq, &static_power);
620 if (ret) {
621 kfree(load_cpu);
622 return ret;
625 if (load_cpu) {
626 trace_thermal_power_cpu_get_power(
627 &cpufreq_device->allowed_cpus,
628 freq, load_cpu, i, dynamic_power, static_power);
630 kfree(load_cpu);
633 *power = static_power + dynamic_power;
634 return 0;
638 * cpufreq_state2power() - convert a cpu cdev state to power consumed
639 * @cdev: &thermal_cooling_device pointer
640 * @tz: a valid thermal zone device pointer
641 * @state: cooling device state to be converted
642 * @power: pointer in which to store the resulting power
644 * Convert cooling device state @state into power consumption in
645 * milliwatts assuming 100% load. Store the calculated power in
646 * @power.
648 * Return: 0 on success, -EINVAL if the cooling device state could not
649 * be converted into a frequency or other -E* if there was an error
650 * when calculating the static power.
652 static int cpufreq_state2power(struct thermal_cooling_device *cdev,
653 struct thermal_zone_device *tz,
654 unsigned long state, u32 *power)
656 unsigned int freq, num_cpus;
657 cpumask_t cpumask;
658 u32 static_power, dynamic_power;
659 int ret;
660 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
662 cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
663 num_cpus = cpumask_weight(&cpumask);
665 /* None of our cpus are online, so no power */
666 if (num_cpus == 0) {
667 *power = 0;
668 return 0;
671 freq = cpufreq_device->freq_table[state];
672 if (!freq)
673 return -EINVAL;
675 dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
676 ret = get_static_power(cpufreq_device, tz, freq, &static_power);
677 if (ret)
678 return ret;
680 *power = static_power + dynamic_power;
681 return 0;
685 * cpufreq_power2state() - convert power to a cooling device state
686 * @cdev: &thermal_cooling_device pointer
687 * @tz: a valid thermal zone device pointer
688 * @power: power in milliwatts to be converted
689 * @state: pointer in which to store the resulting state
691 * Calculate a cooling device state for the cpus described by @cdev
692 * that would allow them to consume at most @power mW and store it in
693 * @state. Note that this calculation depends on external factors
694 * such as the cpu load or the current static power. Calling this
695 * function with the same power as input can yield different cooling
696 * device states depending on those external factors.
698 * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
699 * the calculated frequency could not be converted to a valid state.
700 * The latter should not happen unless the frequencies available to
701 * cpufreq have changed since the initialization of the cpu cooling
702 * device.
704 static int cpufreq_power2state(struct thermal_cooling_device *cdev,
705 struct thermal_zone_device *tz, u32 power,
706 unsigned long *state)
708 unsigned int cpu, cur_freq, target_freq;
709 int ret;
710 s32 dyn_power;
711 u32 last_load, normalised_power, static_power;
712 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
714 cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
716 /* None of our cpus are online */
717 if (cpu >= nr_cpu_ids)
718 return -ENODEV;
720 cur_freq = cpufreq_quick_get(cpu);
721 ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
722 if (ret)
723 return ret;
725 dyn_power = power - static_power;
726 dyn_power = dyn_power > 0 ? dyn_power : 0;
727 last_load = cpufreq_device->last_load ?: 1;
728 normalised_power = (dyn_power * 100) / last_load;
729 target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
731 *state = cpufreq_cooling_get_level(cpu, target_freq);
732 if (*state == THERMAL_CSTATE_INVALID) {
733 dev_warn_ratelimited(&cdev->device,
734 "Failed to convert %dKHz for cpu %d into a cdev state\n",
735 target_freq, cpu);
736 return -EINVAL;
739 trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
740 target_freq, *state, power);
741 return 0;
744 /* Bind cpufreq callbacks to thermal cooling device ops */
746 static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
747 .get_max_state = cpufreq_get_max_state,
748 .get_cur_state = cpufreq_get_cur_state,
749 .set_cur_state = cpufreq_set_cur_state,
752 static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
753 .get_max_state = cpufreq_get_max_state,
754 .get_cur_state = cpufreq_get_cur_state,
755 .set_cur_state = cpufreq_set_cur_state,
756 .get_requested_power = cpufreq_get_requested_power,
757 .state2power = cpufreq_state2power,
758 .power2state = cpufreq_power2state,
761 /* Notifier for cpufreq policy change */
762 static struct notifier_block thermal_cpufreq_notifier_block = {
763 .notifier_call = cpufreq_thermal_notifier,
766 static unsigned int find_next_max(struct cpufreq_frequency_table *table,
767 unsigned int prev_max)
769 struct cpufreq_frequency_table *pos;
770 unsigned int max = 0;
772 cpufreq_for_each_valid_entry(pos, table) {
773 if (pos->frequency > max && pos->frequency < prev_max)
774 max = pos->frequency;
777 return max;
781 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
782 * @np: a valid struct device_node to the cooling device device tree node
783 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
784 * Normally this should be same as cpufreq policy->related_cpus.
785 * @capacitance: dynamic power coefficient for these cpus
786 * @plat_static_func: function to calculate the static power consumed by these
787 * cpus (optional)
789 * This interface function registers the cpufreq cooling device with the name
790 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
791 * cooling devices. It also gives the opportunity to link the cooling device
792 * with a device tree node, in order to bind it via the thermal DT code.
794 * Return: a valid struct thermal_cooling_device pointer on success,
795 * on failure, it returns a corresponding ERR_PTR().
797 static struct thermal_cooling_device *
798 __cpufreq_cooling_register(struct device_node *np,
799 const struct cpumask *clip_cpus, u32 capacitance,
800 get_static_t plat_static_func)
802 struct cpufreq_policy *policy;
803 struct thermal_cooling_device *cool_dev;
804 struct cpufreq_cooling_device *cpufreq_dev;
805 char dev_name[THERMAL_NAME_LENGTH];
806 struct cpufreq_frequency_table *pos, *table;
807 struct cpumask temp_mask;
808 unsigned int freq, i, num_cpus;
809 int ret;
810 struct thermal_cooling_device_ops *cooling_ops;
812 cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
813 policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
814 if (!policy) {
815 pr_debug("%s: CPUFreq policy not found\n", __func__);
816 return ERR_PTR(-EPROBE_DEFER);
819 table = policy->freq_table;
820 if (!table) {
821 pr_debug("%s: CPUFreq table not found\n", __func__);
822 cool_dev = ERR_PTR(-ENODEV);
823 goto put_policy;
826 cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
827 if (!cpufreq_dev) {
828 cool_dev = ERR_PTR(-ENOMEM);
829 goto put_policy;
832 num_cpus = cpumask_weight(clip_cpus);
833 cpufreq_dev->time_in_idle = kcalloc(num_cpus,
834 sizeof(*cpufreq_dev->time_in_idle),
835 GFP_KERNEL);
836 if (!cpufreq_dev->time_in_idle) {
837 cool_dev = ERR_PTR(-ENOMEM);
838 goto free_cdev;
841 cpufreq_dev->time_in_idle_timestamp =
842 kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
843 GFP_KERNEL);
844 if (!cpufreq_dev->time_in_idle_timestamp) {
845 cool_dev = ERR_PTR(-ENOMEM);
846 goto free_time_in_idle;
849 /* Find max levels */
850 cpufreq_for_each_valid_entry(pos, table)
851 cpufreq_dev->max_level++;
853 cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
854 cpufreq_dev->max_level, GFP_KERNEL);
855 if (!cpufreq_dev->freq_table) {
856 cool_dev = ERR_PTR(-ENOMEM);
857 goto free_time_in_idle_timestamp;
860 /* max_level is an index, not a counter */
861 cpufreq_dev->max_level--;
863 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
865 if (capacitance) {
866 cpufreq_dev->plat_get_static_power = plat_static_func;
868 ret = build_dyn_power_table(cpufreq_dev, capacitance);
869 if (ret) {
870 cool_dev = ERR_PTR(ret);
871 goto free_table;
874 cooling_ops = &cpufreq_power_cooling_ops;
875 } else {
876 cooling_ops = &cpufreq_cooling_ops;
879 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
880 if (ret) {
881 cool_dev = ERR_PTR(ret);
882 goto free_power_table;
885 /* Fill freq-table in descending order of frequencies */
886 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
887 freq = find_next_max(table, freq);
888 cpufreq_dev->freq_table[i] = freq;
890 /* Warn for duplicate entries */
891 if (!freq)
892 pr_warn("%s: table has duplicate entries\n", __func__);
893 else
894 pr_debug("%s: freq:%u KHz\n", __func__, freq);
897 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
898 cpufreq_dev->id);
900 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
901 cooling_ops);
902 if (IS_ERR(cool_dev))
903 goto remove_idr;
905 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
906 cpufreq_dev->cool_dev = cool_dev;
908 mutex_lock(&cooling_cpufreq_lock);
910 mutex_lock(&cooling_list_lock);
911 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
912 mutex_unlock(&cooling_list_lock);
914 /* Register the notifier for first cpufreq cooling device */
915 if (!cpufreq_dev_count++)
916 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
917 CPUFREQ_POLICY_NOTIFIER);
918 mutex_unlock(&cooling_cpufreq_lock);
920 goto put_policy;
922 remove_idr:
923 release_idr(&cpufreq_idr, cpufreq_dev->id);
924 free_power_table:
925 kfree(cpufreq_dev->dyn_power_table);
926 free_table:
927 kfree(cpufreq_dev->freq_table);
928 free_time_in_idle_timestamp:
929 kfree(cpufreq_dev->time_in_idle_timestamp);
930 free_time_in_idle:
931 kfree(cpufreq_dev->time_in_idle);
932 free_cdev:
933 kfree(cpufreq_dev);
934 put_policy:
935 cpufreq_cpu_put(policy);
937 return cool_dev;
941 * cpufreq_cooling_register - function to create cpufreq cooling device.
942 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
944 * This interface function registers the cpufreq cooling device with the name
945 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
946 * cooling devices.
948 * Return: a valid struct thermal_cooling_device pointer on success,
949 * on failure, it returns a corresponding ERR_PTR().
951 struct thermal_cooling_device *
952 cpufreq_cooling_register(const struct cpumask *clip_cpus)
954 return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
956 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
959 * of_cpufreq_cooling_register - function to create cpufreq cooling device.
960 * @np: a valid struct device_node to the cooling device device tree node
961 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
963 * This interface function registers the cpufreq cooling device with the name
964 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
965 * cooling devices. Using this API, the cpufreq cooling device will be
966 * linked to the device tree node provided.
968 * Return: a valid struct thermal_cooling_device pointer on success,
969 * on failure, it returns a corresponding ERR_PTR().
971 struct thermal_cooling_device *
972 of_cpufreq_cooling_register(struct device_node *np,
973 const struct cpumask *clip_cpus)
975 if (!np)
976 return ERR_PTR(-EINVAL);
978 return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
980 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
983 * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
984 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
985 * @capacitance: dynamic power coefficient for these cpus
986 * @plat_static_func: function to calculate the static power consumed by these
987 * cpus (optional)
989 * This interface function registers the cpufreq cooling device with
990 * the name "thermal-cpufreq-%x". This api can support multiple
991 * instances of cpufreq cooling devices. Using this function, the
992 * cooling device will implement the power extensions by using a
993 * simple cpu power model. The cpus must have registered their OPPs
994 * using the OPP library.
996 * An optional @plat_static_func may be provided to calculate the
997 * static power consumed by these cpus. If the platform's static
998 * power consumption is unknown or negligible, make it NULL.
1000 * Return: a valid struct thermal_cooling_device pointer on success,
1001 * on failure, it returns a corresponding ERR_PTR().
1003 struct thermal_cooling_device *
1004 cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
1005 get_static_t plat_static_func)
1007 return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
1008 plat_static_func);
1010 EXPORT_SYMBOL(cpufreq_power_cooling_register);
1013 * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
1014 * @np: a valid struct device_node to the cooling device device tree node
1015 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
1016 * @capacitance: dynamic power coefficient for these cpus
1017 * @plat_static_func: function to calculate the static power consumed by these
1018 * cpus (optional)
1020 * This interface function registers the cpufreq cooling device with
1021 * the name "thermal-cpufreq-%x". This api can support multiple
1022 * instances of cpufreq cooling devices. Using this API, the cpufreq
1023 * cooling device will be linked to the device tree node provided.
1024 * Using this function, the cooling device will implement the power
1025 * extensions by using a simple cpu power model. The cpus must have
1026 * registered their OPPs using the OPP library.
1028 * An optional @plat_static_func may be provided to calculate the
1029 * static power consumed by these cpus. If the platform's static
1030 * power consumption is unknown or negligible, make it NULL.
1032 * Return: a valid struct thermal_cooling_device pointer on success,
1033 * on failure, it returns a corresponding ERR_PTR().
1035 struct thermal_cooling_device *
1036 of_cpufreq_power_cooling_register(struct device_node *np,
1037 const struct cpumask *clip_cpus,
1038 u32 capacitance,
1039 get_static_t plat_static_func)
1041 if (!np)
1042 return ERR_PTR(-EINVAL);
1044 return __cpufreq_cooling_register(np, clip_cpus, capacitance,
1045 plat_static_func);
1047 EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1050 * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
1051 * @cdev: thermal cooling device pointer.
1053 * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
1055 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1057 struct cpufreq_cooling_device *cpufreq_dev;
1059 if (!cdev)
1060 return;
1062 cpufreq_dev = cdev->devdata;
1064 /* Unregister the notifier for the last cpufreq cooling device */
1065 mutex_lock(&cooling_cpufreq_lock);
1066 if (!--cpufreq_dev_count)
1067 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1068 CPUFREQ_POLICY_NOTIFIER);
1070 mutex_lock(&cooling_list_lock);
1071 list_del(&cpufreq_dev->node);
1072 mutex_unlock(&cooling_list_lock);
1074 mutex_unlock(&cooling_cpufreq_lock);
1076 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1077 release_idr(&cpufreq_idr, cpufreq_dev->id);
1078 kfree(cpufreq_dev->dyn_power_table);
1079 kfree(cpufreq_dev->time_in_idle_timestamp);
1080 kfree(cpufreq_dev->time_in_idle);
1081 kfree(cpufreq_dev->freq_table);
1082 kfree(cpufreq_dev);
1084 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);