2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list
);
36 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
38 return cpumask_empty(policy
->cpus
);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list
);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver
*cpufreq_driver
;
65 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
66 static DEFINE_RWLOCK(cpufreq_driver_lock
);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended
;
71 static inline bool has_target(void)
73 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
78 static int cpufreq_init_governor(struct cpufreq_policy
*policy
);
79 static void cpufreq_exit_governor(struct cpufreq_policy
*policy
);
80 static int cpufreq_start_governor(struct cpufreq_policy
*policy
);
81 static void cpufreq_stop_governor(struct cpufreq_policy
*policy
);
82 static void cpufreq_governor_limits(struct cpufreq_policy
*policy
);
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
92 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
94 static bool init_cpufreq_transition_notifier_list_called
;
95 static int __init
init_cpufreq_transition_notifier_list(void)
97 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
98 init_cpufreq_transition_notifier_list_called
= true;
101 pure_initcall(init_cpufreq_transition_notifier_list
);
103 static int off __read_mostly
;
104 static int cpufreq_disabled(void)
108 void disable_cpufreq(void)
112 static DEFINE_MUTEX(cpufreq_governor_mutex
);
114 bool have_governor_per_policy(void)
116 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
118 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
120 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
122 if (have_governor_per_policy())
123 return &policy
->kobj
;
125 return cpufreq_global_kobject
;
127 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
129 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
135 cur_wall_time
= jiffies64_to_nsecs(get_jiffies_64());
137 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
138 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
139 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
140 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
141 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
142 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
144 idle_time
= cur_wall_time
- busy_time
;
146 *wall
= div_u64(cur_wall_time
, NSEC_PER_USEC
);
148 return div_u64(idle_time
, NSEC_PER_USEC
);
151 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
153 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
155 if (idle_time
== -1ULL)
156 return get_cpu_idle_time_jiffy(cpu
, wall
);
158 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
162 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
164 __weak
void arch_set_freq_scale(struct cpumask
*cpus
, unsigned long cur_freq
,
165 unsigned long max_freq
)
168 EXPORT_SYMBOL_GPL(arch_set_freq_scale
);
171 * This is a generic cpufreq init() routine which can be used by cpufreq
172 * drivers of SMP systems. It will do following:
173 * - validate & show freq table passed
174 * - set policies transition latency
175 * - policy->cpus with all possible CPUs
177 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
178 struct cpufreq_frequency_table
*table
,
179 unsigned int transition_latency
)
183 ret
= cpufreq_table_validate_and_show(policy
, table
);
185 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
189 policy
->cpuinfo
.transition_latency
= transition_latency
;
192 * The driver only supports the SMP configuration where all processors
193 * share the clock and voltage and clock.
195 cpumask_setall(policy
->cpus
);
199 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
201 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
203 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
205 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
207 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
209 unsigned int cpufreq_generic_get(unsigned int cpu
)
211 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
213 if (!policy
|| IS_ERR(policy
->clk
)) {
214 pr_err("%s: No %s associated to cpu: %d\n",
215 __func__
, policy
? "clk" : "policy", cpu
);
219 return clk_get_rate(policy
->clk
) / 1000;
221 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
224 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
226 * @cpu: cpu to find policy for.
228 * This returns policy for 'cpu', returns NULL if it doesn't exist.
229 * It also increments the kobject reference count to mark it busy and so would
230 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
231 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
232 * freed as that depends on the kobj count.
234 * Return: A valid policy on success, otherwise NULL on failure.
236 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
238 struct cpufreq_policy
*policy
= NULL
;
241 if (WARN_ON(cpu
>= nr_cpu_ids
))
244 /* get the cpufreq driver */
245 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
247 if (cpufreq_driver
) {
249 policy
= cpufreq_cpu_get_raw(cpu
);
251 kobject_get(&policy
->kobj
);
254 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
258 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
261 * cpufreq_cpu_put: Decrements the usage count of a policy
263 * @policy: policy earlier returned by cpufreq_cpu_get().
265 * This decrements the kobject reference count incremented earlier by calling
268 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
270 kobject_put(&policy
->kobj
);
272 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
274 /*********************************************************************
275 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
276 *********************************************************************/
279 * adjust_jiffies - adjust the system "loops_per_jiffy"
281 * This function alters the system "loops_per_jiffy" for the clock
282 * speed change. Note that loops_per_jiffy cannot be updated on SMP
283 * systems as each CPU might be scaled differently. So, use the arch
284 * per-CPU loops_per_jiffy value wherever possible.
286 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
289 static unsigned long l_p_j_ref
;
290 static unsigned int l_p_j_ref_freq
;
292 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
295 if (!l_p_j_ref_freq
) {
296 l_p_j_ref
= loops_per_jiffy
;
297 l_p_j_ref_freq
= ci
->old
;
298 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
299 l_p_j_ref
, l_p_j_ref_freq
);
301 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
302 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
304 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
305 loops_per_jiffy
, ci
->new);
310 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
311 struct cpufreq_freqs
*freqs
, unsigned int state
)
313 BUG_ON(irqs_disabled());
315 if (cpufreq_disabled())
318 freqs
->flags
= cpufreq_driver
->flags
;
319 pr_debug("notification %u of frequency transition to %u kHz\n",
324 case CPUFREQ_PRECHANGE
:
325 /* detect if the driver reported a value as "old frequency"
326 * which is not equal to what the cpufreq core thinks is
329 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
330 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
331 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
332 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333 freqs
->old
, policy
->cur
);
334 freqs
->old
= policy
->cur
;
337 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
338 CPUFREQ_PRECHANGE
, freqs
);
339 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
342 case CPUFREQ_POSTCHANGE
:
343 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
344 pr_debug("FREQ: %lu - CPU: %lu\n",
345 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
346 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
347 cpufreq_stats_record_transition(policy
, freqs
->new);
348 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
349 CPUFREQ_POSTCHANGE
, freqs
);
350 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
351 policy
->cur
= freqs
->new;
357 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
358 * on frequency transition.
360 * This function calls the transition notifiers and the "adjust_jiffies"
361 * function. It is called twice on all CPU frequency changes that have
364 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
365 struct cpufreq_freqs
*freqs
, unsigned int state
)
367 for_each_cpu(freqs
->cpu
, policy
->cpus
)
368 __cpufreq_notify_transition(policy
, freqs
, state
);
371 /* Do post notifications when there are chances that transition has failed */
372 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
373 struct cpufreq_freqs
*freqs
, int transition_failed
)
375 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
376 if (!transition_failed
)
379 swap(freqs
->old
, freqs
->new);
380 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
381 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
384 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
385 struct cpufreq_freqs
*freqs
)
389 * Catch double invocations of _begin() which lead to self-deadlock.
390 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
391 * doesn't invoke _begin() on their behalf, and hence the chances of
392 * double invocations are very low. Moreover, there are scenarios
393 * where these checks can emit false-positive warnings in these
394 * drivers; so we avoid that by skipping them altogether.
396 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
397 && current
== policy
->transition_task
);
400 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
402 spin_lock(&policy
->transition_lock
);
404 if (unlikely(policy
->transition_ongoing
)) {
405 spin_unlock(&policy
->transition_lock
);
409 policy
->transition_ongoing
= true;
410 policy
->transition_task
= current
;
412 spin_unlock(&policy
->transition_lock
);
414 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
416 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
418 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
419 struct cpufreq_freqs
*freqs
, int transition_failed
)
421 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
424 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
426 policy
->transition_ongoing
= false;
427 policy
->transition_task
= NULL
;
429 wake_up(&policy
->transition_wait
);
431 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
434 * Fast frequency switching status count. Positive means "enabled", negative
435 * means "disabled" and 0 means "not decided yet".
437 static int cpufreq_fast_switch_count
;
438 static DEFINE_MUTEX(cpufreq_fast_switch_lock
);
440 static void cpufreq_list_transition_notifiers(void)
442 struct notifier_block
*nb
;
444 pr_info("Registered transition notifiers:\n");
446 mutex_lock(&cpufreq_transition_notifier_list
.mutex
);
448 for (nb
= cpufreq_transition_notifier_list
.head
; nb
; nb
= nb
->next
)
449 pr_info("%pF\n", nb
->notifier_call
);
451 mutex_unlock(&cpufreq_transition_notifier_list
.mutex
);
455 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
456 * @policy: cpufreq policy to enable fast frequency switching for.
458 * Try to enable fast frequency switching for @policy.
460 * The attempt will fail if there is at least one transition notifier registered
461 * at this point, as fast frequency switching is quite fundamentally at odds
462 * with transition notifiers. Thus if successful, it will make registration of
463 * transition notifiers fail going forward.
465 void cpufreq_enable_fast_switch(struct cpufreq_policy
*policy
)
467 lockdep_assert_held(&policy
->rwsem
);
469 if (!policy
->fast_switch_possible
)
472 mutex_lock(&cpufreq_fast_switch_lock
);
473 if (cpufreq_fast_switch_count
>= 0) {
474 cpufreq_fast_switch_count
++;
475 policy
->fast_switch_enabled
= true;
477 pr_warn("CPU%u: Fast frequency switching not enabled\n",
479 cpufreq_list_transition_notifiers();
481 mutex_unlock(&cpufreq_fast_switch_lock
);
483 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch
);
486 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
487 * @policy: cpufreq policy to disable fast frequency switching for.
489 void cpufreq_disable_fast_switch(struct cpufreq_policy
*policy
)
491 mutex_lock(&cpufreq_fast_switch_lock
);
492 if (policy
->fast_switch_enabled
) {
493 policy
->fast_switch_enabled
= false;
494 if (!WARN_ON(cpufreq_fast_switch_count
<= 0))
495 cpufreq_fast_switch_count
--;
497 mutex_unlock(&cpufreq_fast_switch_lock
);
499 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch
);
502 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
504 * @target_freq: target frequency to resolve.
506 * The target to driver frequency mapping is cached in the policy.
508 * Return: Lowest driver-supported frequency greater than or equal to the
509 * given target_freq, subject to policy (min/max) and driver limitations.
511 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy
*policy
,
512 unsigned int target_freq
)
514 target_freq
= clamp_val(target_freq
, policy
->min
, policy
->max
);
515 policy
->cached_target_freq
= target_freq
;
517 if (cpufreq_driver
->target_index
) {
520 idx
= cpufreq_frequency_table_target(policy
, target_freq
,
522 policy
->cached_resolved_idx
= idx
;
523 return policy
->freq_table
[idx
].frequency
;
526 if (cpufreq_driver
->resolve_freq
)
527 return cpufreq_driver
->resolve_freq(policy
, target_freq
);
531 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq
);
533 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy
*policy
)
535 unsigned int latency
;
537 if (policy
->transition_delay_us
)
538 return policy
->transition_delay_us
;
540 latency
= policy
->cpuinfo
.transition_latency
/ NSEC_PER_USEC
;
543 * For platforms that can change the frequency very fast (< 10
544 * us), the above formula gives a decent transition delay. But
545 * for platforms where transition_latency is in milliseconds, it
546 * ends up giving unrealistic values.
548 * Cap the default transition delay to 10 ms, which seems to be
549 * a reasonable amount of time after which we should reevaluate
552 return min(latency
* LATENCY_MULTIPLIER
, (unsigned int)10000);
555 return LATENCY_MULTIPLIER
;
557 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us
);
559 /*********************************************************************
561 *********************************************************************/
562 static ssize_t
show_boost(struct kobject
*kobj
,
563 struct attribute
*attr
, char *buf
)
565 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
568 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
569 const char *buf
, size_t count
)
573 ret
= sscanf(buf
, "%d", &enable
);
574 if (ret
!= 1 || enable
< 0 || enable
> 1)
577 if (cpufreq_boost_trigger_state(enable
)) {
578 pr_err("%s: Cannot %s BOOST!\n",
579 __func__
, enable
? "enable" : "disable");
583 pr_debug("%s: cpufreq BOOST %s\n",
584 __func__
, enable
? "enabled" : "disabled");
588 define_one_global_rw(boost
);
590 static struct cpufreq_governor
*find_governor(const char *str_governor
)
592 struct cpufreq_governor
*t
;
595 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
602 * cpufreq_parse_governor - parse a governor string
604 static int cpufreq_parse_governor(char *str_governor
,
605 struct cpufreq_policy
*policy
)
607 if (cpufreq_driver
->setpolicy
) {
608 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
609 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
613 if (!strncasecmp(str_governor
, "powersave", CPUFREQ_NAME_LEN
)) {
614 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
618 struct cpufreq_governor
*t
;
620 mutex_lock(&cpufreq_governor_mutex
);
622 t
= find_governor(str_governor
);
626 mutex_unlock(&cpufreq_governor_mutex
);
628 ret
= request_module("cpufreq_%s", str_governor
);
632 mutex_lock(&cpufreq_governor_mutex
);
634 t
= find_governor(str_governor
);
636 if (t
&& !try_module_get(t
->owner
))
639 mutex_unlock(&cpufreq_governor_mutex
);
642 policy
->governor
= t
;
651 * cpufreq_per_cpu_attr_read() / show_##file_name() -
652 * print out cpufreq information
654 * Write out information from cpufreq_driver->policy[cpu]; object must be
658 #define show_one(file_name, object) \
659 static ssize_t show_##file_name \
660 (struct cpufreq_policy *policy, char *buf) \
662 return sprintf(buf, "%u\n", policy->object); \
665 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
666 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
667 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
668 show_one(scaling_min_freq
, min
);
669 show_one(scaling_max_freq
, max
);
671 __weak
unsigned int arch_freq_get_on_cpu(int cpu
)
676 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
681 freq
= arch_freq_get_on_cpu(policy
->cpu
);
683 ret
= sprintf(buf
, "%u\n", freq
);
684 else if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&&
686 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
688 ret
= sprintf(buf
, "%u\n", policy
->cur
);
692 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
693 struct cpufreq_policy
*new_policy
);
696 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
698 #define store_one(file_name, object) \
699 static ssize_t store_##file_name \
700 (struct cpufreq_policy *policy, const char *buf, size_t count) \
703 struct cpufreq_policy new_policy; \
705 memcpy(&new_policy, policy, sizeof(*policy)); \
707 ret = sscanf(buf, "%u", &new_policy.object); \
711 temp = new_policy.object; \
712 ret = cpufreq_set_policy(policy, &new_policy); \
714 policy->user_policy.object = temp; \
716 return ret ? ret : count; \
719 store_one(scaling_min_freq
, min
);
720 store_one(scaling_max_freq
, max
);
723 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
725 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
728 unsigned int cur_freq
= __cpufreq_get(policy
);
731 return sprintf(buf
, "%u\n", cur_freq
);
733 return sprintf(buf
, "<unknown>\n");
737 * show_scaling_governor - show the current policy for the specified CPU
739 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
741 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
742 return sprintf(buf
, "powersave\n");
743 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
744 return sprintf(buf
, "performance\n");
745 else if (policy
->governor
)
746 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
747 policy
->governor
->name
);
752 * store_scaling_governor - store policy for the specified CPU
754 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
755 const char *buf
, size_t count
)
758 char str_governor
[16];
759 struct cpufreq_policy new_policy
;
761 memcpy(&new_policy
, policy
, sizeof(*policy
));
763 ret
= sscanf(buf
, "%15s", str_governor
);
767 if (cpufreq_parse_governor(str_governor
, &new_policy
))
770 ret
= cpufreq_set_policy(policy
, &new_policy
);
772 if (new_policy
.governor
)
773 module_put(new_policy
.governor
->owner
);
775 return ret
? ret
: count
;
779 * show_scaling_driver - show the cpufreq driver currently loaded
781 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
783 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
787 * show_scaling_available_governors - show the available CPUfreq governors
789 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
793 struct cpufreq_governor
*t
;
796 i
+= sprintf(buf
, "performance powersave");
800 for_each_governor(t
) {
801 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
802 - (CPUFREQ_NAME_LEN
+ 2)))
804 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
807 i
+= sprintf(&buf
[i
], "\n");
811 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
816 for_each_cpu(cpu
, mask
) {
818 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
819 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
820 if (i
>= (PAGE_SIZE
- 5))
823 i
+= sprintf(&buf
[i
], "\n");
826 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
829 * show_related_cpus - show the CPUs affected by each transition even if
830 * hw coordination is in use
832 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
834 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
838 * show_affected_cpus - show the CPUs affected by each transition
840 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
842 return cpufreq_show_cpus(policy
->cpus
, buf
);
845 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
846 const char *buf
, size_t count
)
848 unsigned int freq
= 0;
851 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
854 ret
= sscanf(buf
, "%u", &freq
);
858 policy
->governor
->store_setspeed(policy
, freq
);
863 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
865 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
866 return sprintf(buf
, "<unsupported>\n");
868 return policy
->governor
->show_setspeed(policy
, buf
);
872 * show_bios_limit - show the current cpufreq HW/BIOS limitation
874 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
878 if (cpufreq_driver
->bios_limit
) {
879 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
881 return sprintf(buf
, "%u\n", limit
);
883 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
886 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
887 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
888 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
889 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
890 cpufreq_freq_attr_ro(scaling_available_governors
);
891 cpufreq_freq_attr_ro(scaling_driver
);
892 cpufreq_freq_attr_ro(scaling_cur_freq
);
893 cpufreq_freq_attr_ro(bios_limit
);
894 cpufreq_freq_attr_ro(related_cpus
);
895 cpufreq_freq_attr_ro(affected_cpus
);
896 cpufreq_freq_attr_rw(scaling_min_freq
);
897 cpufreq_freq_attr_rw(scaling_max_freq
);
898 cpufreq_freq_attr_rw(scaling_governor
);
899 cpufreq_freq_attr_rw(scaling_setspeed
);
901 static struct attribute
*default_attrs
[] = {
902 &cpuinfo_min_freq
.attr
,
903 &cpuinfo_max_freq
.attr
,
904 &cpuinfo_transition_latency
.attr
,
905 &scaling_min_freq
.attr
,
906 &scaling_max_freq
.attr
,
909 &scaling_governor
.attr
,
910 &scaling_driver
.attr
,
911 &scaling_available_governors
.attr
,
912 &scaling_setspeed
.attr
,
916 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
917 #define to_attr(a) container_of(a, struct freq_attr, attr)
919 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
921 struct cpufreq_policy
*policy
= to_policy(kobj
);
922 struct freq_attr
*fattr
= to_attr(attr
);
925 down_read(&policy
->rwsem
);
926 ret
= fattr
->show(policy
, buf
);
927 up_read(&policy
->rwsem
);
932 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
933 const char *buf
, size_t count
)
935 struct cpufreq_policy
*policy
= to_policy(kobj
);
936 struct freq_attr
*fattr
= to_attr(attr
);
937 ssize_t ret
= -EINVAL
;
941 if (cpu_online(policy
->cpu
)) {
942 down_write(&policy
->rwsem
);
943 ret
= fattr
->store(policy
, buf
, count
);
944 up_write(&policy
->rwsem
);
952 static void cpufreq_sysfs_release(struct kobject
*kobj
)
954 struct cpufreq_policy
*policy
= to_policy(kobj
);
955 pr_debug("last reference is dropped\n");
956 complete(&policy
->kobj_unregister
);
959 static const struct sysfs_ops sysfs_ops
= {
964 static struct kobj_type ktype_cpufreq
= {
965 .sysfs_ops
= &sysfs_ops
,
966 .default_attrs
= default_attrs
,
967 .release
= cpufreq_sysfs_release
,
970 static void add_cpu_dev_symlink(struct cpufreq_policy
*policy
, unsigned int cpu
)
972 struct device
*dev
= get_cpu_device(cpu
);
977 if (cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
))
980 dev_dbg(dev
, "%s: Adding symlink\n", __func__
);
981 if (sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq"))
982 dev_err(dev
, "cpufreq symlink creation failed\n");
985 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
,
988 dev_dbg(dev
, "%s: Removing symlink\n", __func__
);
989 sysfs_remove_link(&dev
->kobj
, "cpufreq");
992 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
994 struct freq_attr
**drv_attr
;
997 /* set up files for this cpu device */
998 drv_attr
= cpufreq_driver
->attr
;
999 while (drv_attr
&& *drv_attr
) {
1000 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
1005 if (cpufreq_driver
->get
) {
1006 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
1011 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
1015 if (cpufreq_driver
->bios_limit
) {
1016 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
1024 __weak
struct cpufreq_governor
*cpufreq_default_governor(void)
1029 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
1031 struct cpufreq_governor
*gov
= NULL
;
1032 struct cpufreq_policy new_policy
;
1034 memcpy(&new_policy
, policy
, sizeof(*policy
));
1036 /* Update governor of new_policy to the governor used before hotplug */
1037 gov
= find_governor(policy
->last_governor
);
1039 pr_debug("Restoring governor %s for cpu %d\n",
1040 policy
->governor
->name
, policy
->cpu
);
1042 gov
= cpufreq_default_governor();
1047 new_policy
.governor
= gov
;
1049 /* Use the default policy if there is no last_policy. */
1050 if (cpufreq_driver
->setpolicy
) {
1051 if (policy
->last_policy
)
1052 new_policy
.policy
= policy
->last_policy
;
1054 cpufreq_parse_governor(gov
->name
, &new_policy
);
1056 /* set default policy */
1057 return cpufreq_set_policy(policy
, &new_policy
);
1060 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1064 /* Has this CPU been taken care of already? */
1065 if (cpumask_test_cpu(cpu
, policy
->cpus
))
1068 down_write(&policy
->rwsem
);
1070 cpufreq_stop_governor(policy
);
1072 cpumask_set_cpu(cpu
, policy
->cpus
);
1075 ret
= cpufreq_start_governor(policy
);
1077 pr_err("%s: Failed to start governor\n", __func__
);
1079 up_write(&policy
->rwsem
);
1083 static void handle_update(struct work_struct
*work
)
1085 struct cpufreq_policy
*policy
=
1086 container_of(work
, struct cpufreq_policy
, update
);
1087 unsigned int cpu
= policy
->cpu
;
1088 pr_debug("handle_update for cpu %u called\n", cpu
);
1089 cpufreq_update_policy(cpu
);
1092 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
1094 struct cpufreq_policy
*policy
;
1097 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1101 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1102 goto err_free_policy
;
1104 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1105 goto err_free_cpumask
;
1107 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1108 goto err_free_rcpumask
;
1110 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
1111 cpufreq_global_kobject
, "policy%u", cpu
);
1113 pr_err("%s: failed to init policy->kobj: %d\n", __func__
, ret
);
1114 goto err_free_real_cpus
;
1117 INIT_LIST_HEAD(&policy
->policy_list
);
1118 init_rwsem(&policy
->rwsem
);
1119 spin_lock_init(&policy
->transition_lock
);
1120 init_waitqueue_head(&policy
->transition_wait
);
1121 init_completion(&policy
->kobj_unregister
);
1122 INIT_WORK(&policy
->update
, handle_update
);
1128 free_cpumask_var(policy
->real_cpus
);
1130 free_cpumask_var(policy
->related_cpus
);
1132 free_cpumask_var(policy
->cpus
);
1139 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
)
1141 struct kobject
*kobj
;
1142 struct completion
*cmp
;
1144 down_write(&policy
->rwsem
);
1145 cpufreq_stats_free_table(policy
);
1146 kobj
= &policy
->kobj
;
1147 cmp
= &policy
->kobj_unregister
;
1148 up_write(&policy
->rwsem
);
1152 * We need to make sure that the underlying kobj is
1153 * actually not referenced anymore by anybody before we
1154 * proceed with unloading.
1156 pr_debug("waiting for dropping of refcount\n");
1157 wait_for_completion(cmp
);
1158 pr_debug("wait complete\n");
1161 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
1163 unsigned long flags
;
1166 /* Remove policy from list */
1167 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1168 list_del(&policy
->policy_list
);
1170 for_each_cpu(cpu
, policy
->related_cpus
)
1171 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1172 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1174 cpufreq_policy_put_kobj(policy
);
1175 free_cpumask_var(policy
->real_cpus
);
1176 free_cpumask_var(policy
->related_cpus
);
1177 free_cpumask_var(policy
->cpus
);
1181 static int cpufreq_online(unsigned int cpu
)
1183 struct cpufreq_policy
*policy
;
1185 unsigned long flags
;
1189 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1191 /* Check if this CPU already has a policy to manage it */
1192 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1194 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1195 if (!policy_is_inactive(policy
))
1196 return cpufreq_add_policy_cpu(policy
, cpu
);
1198 /* This is the only online CPU for the policy. Start over. */
1200 down_write(&policy
->rwsem
);
1202 policy
->governor
= NULL
;
1203 up_write(&policy
->rwsem
);
1206 policy
= cpufreq_policy_alloc(cpu
);
1211 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1213 /* call driver. From then on the cpufreq must be able
1214 * to accept all calls to ->verify and ->setpolicy for this CPU
1216 ret
= cpufreq_driver
->init(policy
);
1218 pr_debug("initialization failed\n");
1219 goto out_free_policy
;
1222 down_write(&policy
->rwsem
);
1225 /* related_cpus should at least include policy->cpus. */
1226 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1230 * affected cpus must always be the one, which are online. We aren't
1231 * managing offline cpus here.
1233 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1236 policy
->user_policy
.min
= policy
->min
;
1237 policy
->user_policy
.max
= policy
->max
;
1239 for_each_cpu(j
, policy
->related_cpus
) {
1240 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1241 add_cpu_dev_symlink(policy
, j
);
1244 policy
->min
= policy
->user_policy
.min
;
1245 policy
->max
= policy
->user_policy
.max
;
1248 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1249 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1251 pr_err("%s: ->get() failed\n", __func__
);
1252 goto out_exit_policy
;
1257 * Sometimes boot loaders set CPU frequency to a value outside of
1258 * frequency table present with cpufreq core. In such cases CPU might be
1259 * unstable if it has to run on that frequency for long duration of time
1260 * and so its better to set it to a frequency which is specified in
1261 * freq-table. This also makes cpufreq stats inconsistent as
1262 * cpufreq-stats would fail to register because current frequency of CPU
1263 * isn't found in freq-table.
1265 * Because we don't want this change to effect boot process badly, we go
1266 * for the next freq which is >= policy->cur ('cur' must be set by now,
1267 * otherwise we will end up setting freq to lowest of the table as 'cur'
1268 * is initialized to zero).
1270 * We are passing target-freq as "policy->cur - 1" otherwise
1271 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1272 * equal to target-freq.
1274 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1276 /* Are we running at unknown frequency ? */
1277 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1278 if (ret
== -EINVAL
) {
1279 /* Warn user and fix it */
1280 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1281 __func__
, policy
->cpu
, policy
->cur
);
1282 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1283 CPUFREQ_RELATION_L
);
1286 * Reaching here after boot in a few seconds may not
1287 * mean that system will remain stable at "unknown"
1288 * frequency for longer duration. Hence, a BUG_ON().
1291 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1292 __func__
, policy
->cpu
, policy
->cur
);
1297 ret
= cpufreq_add_dev_interface(policy
);
1299 goto out_exit_policy
;
1301 cpufreq_stats_create_table(policy
);
1303 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1304 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1305 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1308 ret
= cpufreq_init_policy(policy
);
1310 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1311 __func__
, cpu
, ret
);
1312 /* cpufreq_policy_free() will notify based on this */
1314 goto out_exit_policy
;
1317 up_write(&policy
->rwsem
);
1319 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1321 /* Callback for handling stuff after policy is ready */
1322 if (cpufreq_driver
->ready
)
1323 cpufreq_driver
->ready(policy
);
1325 pr_debug("initialization complete\n");
1330 up_write(&policy
->rwsem
);
1332 if (cpufreq_driver
->exit
)
1333 cpufreq_driver
->exit(policy
);
1335 for_each_cpu(j
, policy
->real_cpus
)
1336 remove_cpu_dev_symlink(policy
, get_cpu_device(j
));
1339 cpufreq_policy_free(policy
);
1344 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1346 * @sif: Subsystem interface structure pointer (not used)
1348 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1350 struct cpufreq_policy
*policy
;
1351 unsigned cpu
= dev
->id
;
1354 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1356 if (cpu_online(cpu
)) {
1357 ret
= cpufreq_online(cpu
);
1362 /* Create sysfs link on CPU registration */
1363 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1365 add_cpu_dev_symlink(policy
, cpu
);
1370 static int cpufreq_offline(unsigned int cpu
)
1372 struct cpufreq_policy
*policy
;
1375 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1377 policy
= cpufreq_cpu_get_raw(cpu
);
1379 pr_debug("%s: No cpu_data found\n", __func__
);
1383 down_write(&policy
->rwsem
);
1385 cpufreq_stop_governor(policy
);
1387 cpumask_clear_cpu(cpu
, policy
->cpus
);
1389 if (policy_is_inactive(policy
)) {
1391 strncpy(policy
->last_governor
, policy
->governor
->name
,
1394 policy
->last_policy
= policy
->policy
;
1395 } else if (cpu
== policy
->cpu
) {
1396 /* Nominate new CPU */
1397 policy
->cpu
= cpumask_any(policy
->cpus
);
1400 /* Start governor again for active policy */
1401 if (!policy_is_inactive(policy
)) {
1403 ret
= cpufreq_start_governor(policy
);
1405 pr_err("%s: Failed to start governor\n", __func__
);
1411 if (cpufreq_driver
->stop_cpu
)
1412 cpufreq_driver
->stop_cpu(policy
);
1415 cpufreq_exit_governor(policy
);
1418 * Perform the ->exit() even during light-weight tear-down,
1419 * since this is a core component, and is essential for the
1420 * subsequent light-weight ->init() to succeed.
1422 if (cpufreq_driver
->exit
) {
1423 cpufreq_driver
->exit(policy
);
1424 policy
->freq_table
= NULL
;
1428 up_write(&policy
->rwsem
);
1433 * cpufreq_remove_dev - remove a CPU device
1435 * Removes the cpufreq interface for a CPU device.
1437 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1439 unsigned int cpu
= dev
->id
;
1440 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1445 if (cpu_online(cpu
))
1446 cpufreq_offline(cpu
);
1448 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1449 remove_cpu_dev_symlink(policy
, dev
);
1451 if (cpumask_empty(policy
->real_cpus
))
1452 cpufreq_policy_free(policy
);
1456 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1458 * @policy: policy managing CPUs
1459 * @new_freq: CPU frequency the CPU actually runs at
1461 * We adjust to current frequency first, and need to clean up later.
1462 * So either call to cpufreq_update_policy() or schedule handle_update()).
1464 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1465 unsigned int new_freq
)
1467 struct cpufreq_freqs freqs
;
1469 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1470 policy
->cur
, new_freq
);
1472 freqs
.old
= policy
->cur
;
1473 freqs
.new = new_freq
;
1475 cpufreq_freq_transition_begin(policy
, &freqs
);
1476 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1480 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1483 * This is the last known freq, without actually getting it from the driver.
1484 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1486 unsigned int cpufreq_quick_get(unsigned int cpu
)
1488 struct cpufreq_policy
*policy
;
1489 unsigned int ret_freq
= 0;
1490 unsigned long flags
;
1492 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1494 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
) {
1495 ret_freq
= cpufreq_driver
->get(cpu
);
1496 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1500 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1502 policy
= cpufreq_cpu_get(cpu
);
1504 ret_freq
= policy
->cur
;
1505 cpufreq_cpu_put(policy
);
1510 EXPORT_SYMBOL(cpufreq_quick_get
);
1513 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1516 * Just return the max possible frequency for a given CPU.
1518 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1520 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1521 unsigned int ret_freq
= 0;
1524 ret_freq
= policy
->max
;
1525 cpufreq_cpu_put(policy
);
1530 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1532 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1534 unsigned int ret_freq
= 0;
1536 if (!cpufreq_driver
->get
)
1539 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1542 * Updating inactive policies is invalid, so avoid doing that. Also
1543 * if fast frequency switching is used with the given policy, the check
1544 * against policy->cur is pointless, so skip it in that case too.
1546 if (unlikely(policy_is_inactive(policy
)) || policy
->fast_switch_enabled
)
1549 if (ret_freq
&& policy
->cur
&&
1550 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1551 /* verify no discrepancy between actual and
1552 saved value exists */
1553 if (unlikely(ret_freq
!= policy
->cur
)) {
1554 cpufreq_out_of_sync(policy
, ret_freq
);
1555 schedule_work(&policy
->update
);
1563 * cpufreq_get - get the current CPU frequency (in kHz)
1566 * Get the CPU current (static) CPU frequency
1568 unsigned int cpufreq_get(unsigned int cpu
)
1570 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1571 unsigned int ret_freq
= 0;
1574 down_read(&policy
->rwsem
);
1576 if (!policy_is_inactive(policy
))
1577 ret_freq
= __cpufreq_get(policy
);
1579 up_read(&policy
->rwsem
);
1581 cpufreq_cpu_put(policy
);
1586 EXPORT_SYMBOL(cpufreq_get
);
1588 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy
*policy
)
1590 unsigned int new_freq
;
1592 new_freq
= cpufreq_driver
->get(policy
->cpu
);
1597 pr_debug("cpufreq: Driver did not initialize current freq\n");
1598 policy
->cur
= new_freq
;
1599 } else if (policy
->cur
!= new_freq
&& has_target()) {
1600 cpufreq_out_of_sync(policy
, new_freq
);
1606 static struct subsys_interface cpufreq_interface
= {
1608 .subsys
= &cpu_subsys
,
1609 .add_dev
= cpufreq_add_dev
,
1610 .remove_dev
= cpufreq_remove_dev
,
1614 * In case platform wants some specific frequency to be configured
1617 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1621 if (!policy
->suspend_freq
) {
1622 pr_debug("%s: suspend_freq not defined\n", __func__
);
1626 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1627 policy
->suspend_freq
);
1629 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1630 CPUFREQ_RELATION_H
);
1632 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1633 __func__
, policy
->suspend_freq
, ret
);
1637 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1640 * cpufreq_suspend() - Suspend CPUFreq governors
1642 * Called during system wide Suspend/Hibernate cycles for suspending governors
1643 * as some platforms can't change frequency after this point in suspend cycle.
1644 * Because some of the devices (like: i2c, regulators, etc) they use for
1645 * changing frequency are suspended quickly after this point.
1647 void cpufreq_suspend(void)
1649 struct cpufreq_policy
*policy
;
1651 if (!cpufreq_driver
)
1654 if (!has_target() && !cpufreq_driver
->suspend
)
1657 pr_debug("%s: Suspending Governors\n", __func__
);
1659 for_each_active_policy(policy
) {
1661 down_write(&policy
->rwsem
);
1662 cpufreq_stop_governor(policy
);
1663 up_write(&policy
->rwsem
);
1666 if (cpufreq_driver
->suspend
&& cpufreq_driver
->suspend(policy
))
1667 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1672 cpufreq_suspended
= true;
1676 * cpufreq_resume() - Resume CPUFreq governors
1678 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1679 * are suspended with cpufreq_suspend().
1681 void cpufreq_resume(void)
1683 struct cpufreq_policy
*policy
;
1686 if (!cpufreq_driver
)
1689 if (unlikely(!cpufreq_suspended
))
1692 cpufreq_suspended
= false;
1694 if (!has_target() && !cpufreq_driver
->resume
)
1697 pr_debug("%s: Resuming Governors\n", __func__
);
1699 for_each_active_policy(policy
) {
1700 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
)) {
1701 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1703 } else if (has_target()) {
1704 down_write(&policy
->rwsem
);
1705 ret
= cpufreq_start_governor(policy
);
1706 up_write(&policy
->rwsem
);
1709 pr_err("%s: Failed to start governor for policy: %p\n",
1716 * cpufreq_get_current_driver - return current driver's name
1718 * Return the name string of the currently loaded cpufreq driver
1721 const char *cpufreq_get_current_driver(void)
1724 return cpufreq_driver
->name
;
1728 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1731 * cpufreq_get_driver_data - return current driver data
1733 * Return the private data of the currently loaded cpufreq
1734 * driver, or NULL if no cpufreq driver is loaded.
1736 void *cpufreq_get_driver_data(void)
1739 return cpufreq_driver
->driver_data
;
1743 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1745 /*********************************************************************
1746 * NOTIFIER LISTS INTERFACE *
1747 *********************************************************************/
1750 * cpufreq_register_notifier - register a driver with cpufreq
1751 * @nb: notifier function to register
1752 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1754 * Add a driver to one of two lists: either a list of drivers that
1755 * are notified about clock rate changes (once before and once after
1756 * the transition), or a list of drivers that are notified about
1757 * changes in cpufreq policy.
1759 * This function may sleep, and has the same return conditions as
1760 * blocking_notifier_chain_register.
1762 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1766 if (cpufreq_disabled())
1769 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1772 case CPUFREQ_TRANSITION_NOTIFIER
:
1773 mutex_lock(&cpufreq_fast_switch_lock
);
1775 if (cpufreq_fast_switch_count
> 0) {
1776 mutex_unlock(&cpufreq_fast_switch_lock
);
1779 ret
= srcu_notifier_chain_register(
1780 &cpufreq_transition_notifier_list
, nb
);
1782 cpufreq_fast_switch_count
--;
1784 mutex_unlock(&cpufreq_fast_switch_lock
);
1786 case CPUFREQ_POLICY_NOTIFIER
:
1787 ret
= blocking_notifier_chain_register(
1788 &cpufreq_policy_notifier_list
, nb
);
1796 EXPORT_SYMBOL(cpufreq_register_notifier
);
1799 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1800 * @nb: notifier block to be unregistered
1801 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1803 * Remove a driver from the CPU frequency notifier list.
1805 * This function may sleep, and has the same return conditions as
1806 * blocking_notifier_chain_unregister.
1808 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1812 if (cpufreq_disabled())
1816 case CPUFREQ_TRANSITION_NOTIFIER
:
1817 mutex_lock(&cpufreq_fast_switch_lock
);
1819 ret
= srcu_notifier_chain_unregister(
1820 &cpufreq_transition_notifier_list
, nb
);
1821 if (!ret
&& !WARN_ON(cpufreq_fast_switch_count
>= 0))
1822 cpufreq_fast_switch_count
++;
1824 mutex_unlock(&cpufreq_fast_switch_lock
);
1826 case CPUFREQ_POLICY_NOTIFIER
:
1827 ret
= blocking_notifier_chain_unregister(
1828 &cpufreq_policy_notifier_list
, nb
);
1836 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1839 /*********************************************************************
1841 *********************************************************************/
1844 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1845 * @policy: cpufreq policy to switch the frequency for.
1846 * @target_freq: New frequency to set (may be approximate).
1848 * Carry out a fast frequency switch without sleeping.
1850 * The driver's ->fast_switch() callback invoked by this function must be
1851 * suitable for being called from within RCU-sched read-side critical sections
1852 * and it is expected to select the minimum available frequency greater than or
1853 * equal to @target_freq (CPUFREQ_RELATION_L).
1855 * This function must not be called if policy->fast_switch_enabled is unset.
1857 * Governors calling this function must guarantee that it will never be invoked
1858 * twice in parallel for the same policy and that it will never be called in
1859 * parallel with either ->target() or ->target_index() for the same policy.
1861 * Returns the actual frequency set for the CPU.
1863 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
1864 * error condition, the hardware configuration must be preserved.
1866 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy
*policy
,
1867 unsigned int target_freq
)
1869 target_freq
= clamp_val(target_freq
, policy
->min
, policy
->max
);
1871 return cpufreq_driver
->fast_switch(policy
, target_freq
);
1873 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch
);
1875 /* Must set freqs->new to intermediate frequency */
1876 static int __target_intermediate(struct cpufreq_policy
*policy
,
1877 struct cpufreq_freqs
*freqs
, int index
)
1881 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1883 /* We don't need to switch to intermediate freq */
1887 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1888 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1890 cpufreq_freq_transition_begin(policy
, freqs
);
1891 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1892 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1895 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1901 static int __target_index(struct cpufreq_policy
*policy
, int index
)
1903 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1904 unsigned int intermediate_freq
= 0;
1905 unsigned int newfreq
= policy
->freq_table
[index
].frequency
;
1906 int retval
= -EINVAL
;
1909 if (newfreq
== policy
->cur
)
1912 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1914 /* Handle switching to intermediate frequency */
1915 if (cpufreq_driver
->get_intermediate
) {
1916 retval
= __target_intermediate(policy
, &freqs
, index
);
1920 intermediate_freq
= freqs
.new;
1921 /* Set old freq to intermediate */
1922 if (intermediate_freq
)
1923 freqs
.old
= freqs
.new;
1926 freqs
.new = newfreq
;
1927 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1928 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1930 cpufreq_freq_transition_begin(policy
, &freqs
);
1933 retval
= cpufreq_driver
->target_index(policy
, index
);
1935 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1939 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1942 * Failed after setting to intermediate freq? Driver should have
1943 * reverted back to initial frequency and so should we. Check
1944 * here for intermediate_freq instead of get_intermediate, in
1945 * case we haven't switched to intermediate freq at all.
1947 if (unlikely(retval
&& intermediate_freq
)) {
1948 freqs
.old
= intermediate_freq
;
1949 freqs
.new = policy
->restore_freq
;
1950 cpufreq_freq_transition_begin(policy
, &freqs
);
1951 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1958 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1959 unsigned int target_freq
,
1960 unsigned int relation
)
1962 unsigned int old_target_freq
= target_freq
;
1965 if (cpufreq_disabled())
1968 /* Make sure that target_freq is within supported range */
1969 target_freq
= clamp_val(target_freq
, policy
->min
, policy
->max
);
1971 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1972 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1975 * This might look like a redundant call as we are checking it again
1976 * after finding index. But it is left intentionally for cases where
1977 * exactly same freq is called again and so we can save on few function
1980 if (target_freq
== policy
->cur
)
1983 /* Save last value to restore later on errors */
1984 policy
->restore_freq
= policy
->cur
;
1986 if (cpufreq_driver
->target
)
1987 return cpufreq_driver
->target(policy
, target_freq
, relation
);
1989 if (!cpufreq_driver
->target_index
)
1992 index
= cpufreq_frequency_table_target(policy
, target_freq
, relation
);
1994 return __target_index(policy
, index
);
1996 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1998 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1999 unsigned int target_freq
,
2000 unsigned int relation
)
2004 down_write(&policy
->rwsem
);
2006 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
2008 up_write(&policy
->rwsem
);
2012 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
2014 __weak
struct cpufreq_governor
*cpufreq_fallback_governor(void)
2019 static int cpufreq_init_governor(struct cpufreq_policy
*policy
)
2023 /* Don't start any governor operations if we are entering suspend */
2024 if (cpufreq_suspended
)
2027 * Governor might not be initiated here if ACPI _PPC changed
2028 * notification happened, so check it.
2030 if (!policy
->governor
)
2033 /* Platform doesn't want dynamic frequency switching ? */
2034 if (policy
->governor
->dynamic_switching
&&
2035 cpufreq_driver
->flags
& CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING
) {
2036 struct cpufreq_governor
*gov
= cpufreq_fallback_governor();
2039 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2040 policy
->governor
->name
, gov
->name
);
2041 policy
->governor
= gov
;
2047 if (!try_module_get(policy
->governor
->owner
))
2050 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2052 if (policy
->governor
->init
) {
2053 ret
= policy
->governor
->init(policy
);
2055 module_put(policy
->governor
->owner
);
2063 static void cpufreq_exit_governor(struct cpufreq_policy
*policy
)
2065 if (cpufreq_suspended
|| !policy
->governor
)
2068 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2070 if (policy
->governor
->exit
)
2071 policy
->governor
->exit(policy
);
2073 module_put(policy
->governor
->owner
);
2076 static int cpufreq_start_governor(struct cpufreq_policy
*policy
)
2080 if (cpufreq_suspended
)
2083 if (!policy
->governor
)
2086 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2088 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
)
2089 cpufreq_update_current_freq(policy
);
2091 if (policy
->governor
->start
) {
2092 ret
= policy
->governor
->start(policy
);
2097 if (policy
->governor
->limits
)
2098 policy
->governor
->limits(policy
);
2103 static void cpufreq_stop_governor(struct cpufreq_policy
*policy
)
2105 if (cpufreq_suspended
|| !policy
->governor
)
2108 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2110 if (policy
->governor
->stop
)
2111 policy
->governor
->stop(policy
);
2114 static void cpufreq_governor_limits(struct cpufreq_policy
*policy
)
2116 if (cpufreq_suspended
|| !policy
->governor
)
2119 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2121 if (policy
->governor
->limits
)
2122 policy
->governor
->limits(policy
);
2125 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2132 if (cpufreq_disabled())
2135 mutex_lock(&cpufreq_governor_mutex
);
2138 if (!find_governor(governor
->name
)) {
2140 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2143 mutex_unlock(&cpufreq_governor_mutex
);
2146 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2148 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2150 struct cpufreq_policy
*policy
;
2151 unsigned long flags
;
2156 if (cpufreq_disabled())
2159 /* clear last_governor for all inactive policies */
2160 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2161 for_each_inactive_policy(policy
) {
2162 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2163 policy
->governor
= NULL
;
2164 strcpy(policy
->last_governor
, "\0");
2167 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2169 mutex_lock(&cpufreq_governor_mutex
);
2170 list_del(&governor
->governor_list
);
2171 mutex_unlock(&cpufreq_governor_mutex
);
2173 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2176 /*********************************************************************
2177 * POLICY INTERFACE *
2178 *********************************************************************/
2181 * cpufreq_get_policy - get the current cpufreq_policy
2182 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2185 * Reads the current cpufreq policy.
2187 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2189 struct cpufreq_policy
*cpu_policy
;
2193 cpu_policy
= cpufreq_cpu_get(cpu
);
2197 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2199 cpufreq_cpu_put(cpu_policy
);
2202 EXPORT_SYMBOL(cpufreq_get_policy
);
2205 * policy : current policy.
2206 * new_policy: policy to be set.
2208 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2209 struct cpufreq_policy
*new_policy
)
2211 struct cpufreq_governor
*old_gov
;
2214 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2215 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2217 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2220 * This check works well when we store new min/max freq attributes,
2221 * because new_policy is a copy of policy with one field updated.
2223 if (new_policy
->min
> new_policy
->max
)
2226 /* verify the cpu speed can be set within this limit */
2227 ret
= cpufreq_driver
->verify(new_policy
);
2231 /* adjust if necessary - all reasons */
2232 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2233 CPUFREQ_ADJUST
, new_policy
);
2236 * verify the cpu speed can be set within this limit, which might be
2237 * different to the first one
2239 ret
= cpufreq_driver
->verify(new_policy
);
2243 /* notification of the new policy */
2244 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2245 CPUFREQ_NOTIFY
, new_policy
);
2247 policy
->min
= new_policy
->min
;
2248 policy
->max
= new_policy
->max
;
2250 policy
->cached_target_freq
= UINT_MAX
;
2252 pr_debug("new min and max freqs are %u - %u kHz\n",
2253 policy
->min
, policy
->max
);
2255 if (cpufreq_driver
->setpolicy
) {
2256 policy
->policy
= new_policy
->policy
;
2257 pr_debug("setting range\n");
2258 return cpufreq_driver
->setpolicy(new_policy
);
2261 if (new_policy
->governor
== policy
->governor
) {
2262 pr_debug("cpufreq: governor limits update\n");
2263 cpufreq_governor_limits(policy
);
2267 pr_debug("governor switch\n");
2269 /* save old, working values */
2270 old_gov
= policy
->governor
;
2271 /* end old governor */
2273 cpufreq_stop_governor(policy
);
2274 cpufreq_exit_governor(policy
);
2277 /* start new governor */
2278 policy
->governor
= new_policy
->governor
;
2279 ret
= cpufreq_init_governor(policy
);
2281 ret
= cpufreq_start_governor(policy
);
2283 pr_debug("cpufreq: governor change\n");
2286 cpufreq_exit_governor(policy
);
2289 /* new governor failed, so re-start old one */
2290 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2292 policy
->governor
= old_gov
;
2293 if (cpufreq_init_governor(policy
))
2294 policy
->governor
= NULL
;
2296 cpufreq_start_governor(policy
);
2303 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2304 * @cpu: CPU which shall be re-evaluated
2306 * Useful for policy notifiers which have different necessities
2307 * at different times.
2309 void cpufreq_update_policy(unsigned int cpu
)
2311 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2312 struct cpufreq_policy new_policy
;
2317 down_write(&policy
->rwsem
);
2319 if (policy_is_inactive(policy
))
2322 pr_debug("updating policy for CPU %u\n", cpu
);
2323 memcpy(&new_policy
, policy
, sizeof(*policy
));
2324 new_policy
.min
= policy
->user_policy
.min
;
2325 new_policy
.max
= policy
->user_policy
.max
;
2328 * BIOS might change freq behind our back
2329 * -> ask driver for current freq and notify governors about a change
2331 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2332 if (cpufreq_suspended
)
2335 new_policy
.cur
= cpufreq_update_current_freq(policy
);
2336 if (WARN_ON(!new_policy
.cur
))
2340 cpufreq_set_policy(policy
, &new_policy
);
2343 up_write(&policy
->rwsem
);
2345 cpufreq_cpu_put(policy
);
2347 EXPORT_SYMBOL(cpufreq_update_policy
);
2349 /*********************************************************************
2351 *********************************************************************/
2352 static int cpufreq_boost_set_sw(int state
)
2354 struct cpufreq_policy
*policy
;
2357 for_each_active_policy(policy
) {
2358 if (!policy
->freq_table
)
2361 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2362 policy
->freq_table
);
2364 pr_err("%s: Policy frequency update failed\n",
2369 down_write(&policy
->rwsem
);
2370 policy
->user_policy
.max
= policy
->max
;
2371 cpufreq_governor_limits(policy
);
2372 up_write(&policy
->rwsem
);
2378 int cpufreq_boost_trigger_state(int state
)
2380 unsigned long flags
;
2383 if (cpufreq_driver
->boost_enabled
== state
)
2386 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2387 cpufreq_driver
->boost_enabled
= state
;
2388 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2390 ret
= cpufreq_driver
->set_boost(state
);
2392 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2393 cpufreq_driver
->boost_enabled
= !state
;
2394 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2396 pr_err("%s: Cannot %s BOOST\n",
2397 __func__
, state
? "enable" : "disable");
2403 static bool cpufreq_boost_supported(void)
2405 return likely(cpufreq_driver
) && cpufreq_driver
->set_boost
;
2408 static int create_boost_sysfs_file(void)
2412 ret
= sysfs_create_file(cpufreq_global_kobject
, &boost
.attr
);
2414 pr_err("%s: cannot register global BOOST sysfs file\n",
2420 static void remove_boost_sysfs_file(void)
2422 if (cpufreq_boost_supported())
2423 sysfs_remove_file(cpufreq_global_kobject
, &boost
.attr
);
2426 int cpufreq_enable_boost_support(void)
2428 if (!cpufreq_driver
)
2431 if (cpufreq_boost_supported())
2434 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2436 /* This will get removed on driver unregister */
2437 return create_boost_sysfs_file();
2439 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2441 int cpufreq_boost_enabled(void)
2443 return cpufreq_driver
->boost_enabled
;
2445 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2447 /*********************************************************************
2448 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2449 *********************************************************************/
2450 static enum cpuhp_state hp_online
;
2452 static int cpuhp_cpufreq_online(unsigned int cpu
)
2454 cpufreq_online(cpu
);
2459 static int cpuhp_cpufreq_offline(unsigned int cpu
)
2461 cpufreq_offline(cpu
);
2467 * cpufreq_register_driver - register a CPU Frequency driver
2468 * @driver_data: A struct cpufreq_driver containing the values#
2469 * submitted by the CPU Frequency driver.
2471 * Registers a CPU Frequency driver to this core code. This code
2472 * returns zero on success, -EEXIST when another driver got here first
2473 * (and isn't unregistered in the meantime).
2476 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2478 unsigned long flags
;
2481 if (cpufreq_disabled())
2484 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2485 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2486 driver_data
->target
) ||
2487 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2488 driver_data
->target
)) ||
2489 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2492 pr_debug("trying to register driver %s\n", driver_data
->name
);
2494 /* Protect against concurrent CPU online/offline. */
2497 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2498 if (cpufreq_driver
) {
2499 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2503 cpufreq_driver
= driver_data
;
2504 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2506 if (driver_data
->setpolicy
)
2507 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2509 if (cpufreq_boost_supported()) {
2510 ret
= create_boost_sysfs_file();
2512 goto err_null_driver
;
2515 ret
= subsys_interface_register(&cpufreq_interface
);
2517 goto err_boost_unreg
;
2519 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2520 list_empty(&cpufreq_policy_list
)) {
2521 /* if all ->init() calls failed, unregister */
2523 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2528 ret
= cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN
,
2530 cpuhp_cpufreq_online
,
2531 cpuhp_cpufreq_offline
);
2537 pr_debug("driver %s up and running\n", driver_data
->name
);
2541 subsys_interface_unregister(&cpufreq_interface
);
2543 remove_boost_sysfs_file();
2545 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2546 cpufreq_driver
= NULL
;
2547 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2552 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2555 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2557 * Unregister the current CPUFreq driver. Only call this if you have
2558 * the right to do so, i.e. if you have succeeded in initialising before!
2559 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2560 * currently not initialised.
2562 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2564 unsigned long flags
;
2566 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2569 pr_debug("unregistering driver %s\n", driver
->name
);
2571 /* Protect against concurrent cpu hotplug */
2573 subsys_interface_unregister(&cpufreq_interface
);
2574 remove_boost_sysfs_file();
2575 cpuhp_remove_state_nocalls_cpuslocked(hp_online
);
2577 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2579 cpufreq_driver
= NULL
;
2581 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2586 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2589 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2590 * or mutexes when secondary CPUs are halted.
2592 static struct syscore_ops cpufreq_syscore_ops
= {
2593 .shutdown
= cpufreq_suspend
,
2596 struct kobject
*cpufreq_global_kobject
;
2597 EXPORT_SYMBOL(cpufreq_global_kobject
);
2599 static int __init
cpufreq_core_init(void)
2601 if (cpufreq_disabled())
2604 cpufreq_global_kobject
= kobject_create_and_add("cpufreq", &cpu_subsys
.dev_root
->kobj
);
2605 BUG_ON(!cpufreq_global_kobject
);
2607 register_syscore_ops(&cpufreq_syscore_ops
);
2611 module_param(off
, int, 0444);
2612 core_initcall(cpufreq_core_init
);