2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 /* Macros to iterate over lists */
35 /* Iterate over online CPUs policies */
36 static LIST_HEAD(cpufreq_policy_list
);
37 #define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
40 /* Iterate over governors */
41 static LIST_HEAD(cpufreq_governor_list
);
42 #define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
46 * The "cpufreq driver" - the arch- or hardware-dependent low
47 * level driver of CPUFreq support, and its spinlock. This lock
48 * also protects the cpufreq_cpu_data array.
50 static struct cpufreq_driver
*cpufreq_driver
;
51 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
52 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data_fallback
);
53 static DEFINE_RWLOCK(cpufreq_driver_lock
);
54 DEFINE_MUTEX(cpufreq_governor_lock
);
56 /* This one keeps track of the previously set governor of a removed CPU */
57 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN
], cpufreq_cpu_governor
);
59 /* Flag to suspend/resume CPUFreq governors */
60 static bool cpufreq_suspended
;
62 static inline bool has_target(void)
64 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
68 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
71 static DECLARE_RWSEM(cpufreq_rwsem
);
73 /* internal prototypes */
74 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
76 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
77 static void handle_update(struct work_struct
*work
);
80 * Two notifier lists: the "policy" list is involved in the
81 * validation process for a new CPU frequency policy; the
82 * "transition" list for kernel code that needs to handle
83 * changes to devices when the CPU clock speed changes.
84 * The mutex locks both lists.
86 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
87 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
89 static bool init_cpufreq_transition_notifier_list_called
;
90 static int __init
init_cpufreq_transition_notifier_list(void)
92 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
93 init_cpufreq_transition_notifier_list_called
= true;
96 pure_initcall(init_cpufreq_transition_notifier_list
);
98 static int off __read_mostly
;
99 static int cpufreq_disabled(void)
103 void disable_cpufreq(void)
107 static DEFINE_MUTEX(cpufreq_governor_mutex
);
109 bool have_governor_per_policy(void)
111 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
113 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
115 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
117 if (have_governor_per_policy())
118 return &policy
->kobj
;
120 return cpufreq_global_kobject
;
122 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
124 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
130 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
132 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
133 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
134 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
135 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
136 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
137 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
139 idle_time
= cur_wall_time
- busy_time
;
141 *wall
= cputime_to_usecs(cur_wall_time
);
143 return cputime_to_usecs(idle_time
);
146 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
148 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
150 if (idle_time
== -1ULL)
151 return get_cpu_idle_time_jiffy(cpu
, wall
);
153 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
157 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
160 * This is a generic cpufreq init() routine which can be used by cpufreq
161 * drivers of SMP systems. It will do following:
162 * - validate & show freq table passed
163 * - set policies transition latency
164 * - policy->cpus with all possible CPUs
166 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
167 struct cpufreq_frequency_table
*table
,
168 unsigned int transition_latency
)
172 ret
= cpufreq_table_validate_and_show(policy
, table
);
174 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
178 policy
->cpuinfo
.transition_latency
= transition_latency
;
181 * The driver only supports the SMP configuartion where all processors
182 * share the clock and voltage and clock.
184 cpumask_setall(policy
->cpus
);
188 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
190 unsigned int cpufreq_generic_get(unsigned int cpu
)
192 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
194 if (!policy
|| IS_ERR(policy
->clk
)) {
195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__
, policy
? "clk" : "policy", cpu
);
200 return clk_get_rate(policy
->clk
) / 1000;
202 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
204 /* Only for cpufreq core internal use */
205 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
207 return per_cpu(cpufreq_cpu_data
, cpu
);
210 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
212 struct cpufreq_policy
*policy
= NULL
;
215 if (cpu
>= nr_cpu_ids
)
218 if (!down_read_trylock(&cpufreq_rwsem
))
221 /* get the cpufreq driver */
222 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
224 if (cpufreq_driver
) {
226 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
228 kobject_get(&policy
->kobj
);
231 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
234 up_read(&cpufreq_rwsem
);
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
240 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
242 kobject_put(&policy
->kobj
);
243 up_read(&cpufreq_rwsem
);
245 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
247 /*********************************************************************
248 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
249 *********************************************************************/
252 * adjust_jiffies - adjust the system "loops_per_jiffy"
254 * This function alters the system "loops_per_jiffy" for the clock
255 * speed change. Note that loops_per_jiffy cannot be updated on SMP
256 * systems as each CPU might be scaled differently. So, use the arch
257 * per-CPU loops_per_jiffy value wherever possible.
259 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
262 static unsigned long l_p_j_ref
;
263 static unsigned int l_p_j_ref_freq
;
265 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
268 if (!l_p_j_ref_freq
) {
269 l_p_j_ref
= loops_per_jiffy
;
270 l_p_j_ref_freq
= ci
->old
;
271 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
272 l_p_j_ref
, l_p_j_ref_freq
);
274 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
275 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
277 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
278 loops_per_jiffy
, ci
->new);
283 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
284 struct cpufreq_freqs
*freqs
, unsigned int state
)
286 BUG_ON(irqs_disabled());
288 if (cpufreq_disabled())
291 freqs
->flags
= cpufreq_driver
->flags
;
292 pr_debug("notification %u of frequency transition to %u kHz\n",
297 case CPUFREQ_PRECHANGE
:
298 /* detect if the driver reported a value as "old frequency"
299 * which is not equal to what the cpufreq core thinks is
302 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
303 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
304 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
305 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
306 freqs
->old
, policy
->cur
);
307 freqs
->old
= policy
->cur
;
310 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
311 CPUFREQ_PRECHANGE
, freqs
);
312 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
315 case CPUFREQ_POSTCHANGE
:
316 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
317 pr_debug("FREQ: %lu - CPU: %lu\n",
318 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
319 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
320 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
321 CPUFREQ_POSTCHANGE
, freqs
);
322 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
323 policy
->cur
= freqs
->new;
329 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
330 * on frequency transition.
332 * This function calls the transition notifiers and the "adjust_jiffies"
333 * function. It is called twice on all CPU frequency changes that have
336 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
337 struct cpufreq_freqs
*freqs
, unsigned int state
)
339 for_each_cpu(freqs
->cpu
, policy
->cpus
)
340 __cpufreq_notify_transition(policy
, freqs
, state
);
343 /* Do post notifications when there are chances that transition has failed */
344 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
345 struct cpufreq_freqs
*freqs
, int transition_failed
)
347 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
348 if (!transition_failed
)
351 swap(freqs
->old
, freqs
->new);
352 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
353 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
356 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
357 struct cpufreq_freqs
*freqs
)
361 * Catch double invocations of _begin() which lead to self-deadlock.
362 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
363 * doesn't invoke _begin() on their behalf, and hence the chances of
364 * double invocations are very low. Moreover, there are scenarios
365 * where these checks can emit false-positive warnings in these
366 * drivers; so we avoid that by skipping them altogether.
368 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
369 && current
== policy
->transition_task
);
372 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
374 spin_lock(&policy
->transition_lock
);
376 if (unlikely(policy
->transition_ongoing
)) {
377 spin_unlock(&policy
->transition_lock
);
381 policy
->transition_ongoing
= true;
382 policy
->transition_task
= current
;
384 spin_unlock(&policy
->transition_lock
);
386 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
388 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
390 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
391 struct cpufreq_freqs
*freqs
, int transition_failed
)
393 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
396 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
398 policy
->transition_ongoing
= false;
399 policy
->transition_task
= NULL
;
401 wake_up(&policy
->transition_wait
);
403 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
406 /*********************************************************************
408 *********************************************************************/
409 static ssize_t
show_boost(struct kobject
*kobj
,
410 struct attribute
*attr
, char *buf
)
412 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
415 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
416 const char *buf
, size_t count
)
420 ret
= sscanf(buf
, "%d", &enable
);
421 if (ret
!= 1 || enable
< 0 || enable
> 1)
424 if (cpufreq_boost_trigger_state(enable
)) {
425 pr_err("%s: Cannot %s BOOST!\n",
426 __func__
, enable
? "enable" : "disable");
430 pr_debug("%s: cpufreq BOOST %s\n",
431 __func__
, enable
? "enabled" : "disabled");
435 define_one_global_rw(boost
);
437 static struct cpufreq_governor
*find_governor(const char *str_governor
)
439 struct cpufreq_governor
*t
;
442 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
449 * cpufreq_parse_governor - parse a governor string
451 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
452 struct cpufreq_governor
**governor
)
459 if (cpufreq_driver
->setpolicy
) {
460 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
461 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
463 } else if (!strncasecmp(str_governor
, "powersave",
465 *policy
= CPUFREQ_POLICY_POWERSAVE
;
469 struct cpufreq_governor
*t
;
471 mutex_lock(&cpufreq_governor_mutex
);
473 t
= find_governor(str_governor
);
478 mutex_unlock(&cpufreq_governor_mutex
);
479 ret
= request_module("cpufreq_%s", str_governor
);
480 mutex_lock(&cpufreq_governor_mutex
);
483 t
= find_governor(str_governor
);
491 mutex_unlock(&cpufreq_governor_mutex
);
498 * cpufreq_per_cpu_attr_read() / show_##file_name() -
499 * print out cpufreq information
501 * Write out information from cpufreq_driver->policy[cpu]; object must be
505 #define show_one(file_name, object) \
506 static ssize_t show_##file_name \
507 (struct cpufreq_policy *policy, char *buf) \
509 return sprintf(buf, "%u\n", policy->object); \
512 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
513 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
514 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
515 show_one(scaling_min_freq
, min
);
516 show_one(scaling_max_freq
, max
);
518 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
522 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
523 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
525 ret
= sprintf(buf
, "%u\n", policy
->cur
);
529 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
530 struct cpufreq_policy
*new_policy
);
533 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
535 #define store_one(file_name, object) \
536 static ssize_t store_##file_name \
537 (struct cpufreq_policy *policy, const char *buf, size_t count) \
540 struct cpufreq_policy new_policy; \
542 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
546 ret = sscanf(buf, "%u", &new_policy.object); \
550 temp = new_policy.object; \
551 ret = cpufreq_set_policy(policy, &new_policy); \
553 policy->user_policy.object = temp; \
555 return ret ? ret : count; \
558 store_one(scaling_min_freq
, min
);
559 store_one(scaling_max_freq
, max
);
562 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
564 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
567 unsigned int cur_freq
= __cpufreq_get(policy
);
569 return sprintf(buf
, "<unknown>");
570 return sprintf(buf
, "%u\n", cur_freq
);
574 * show_scaling_governor - show the current policy for the specified CPU
576 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
578 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
579 return sprintf(buf
, "powersave\n");
580 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
581 return sprintf(buf
, "performance\n");
582 else if (policy
->governor
)
583 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
584 policy
->governor
->name
);
589 * store_scaling_governor - store policy for the specified CPU
591 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
592 const char *buf
, size_t count
)
595 char str_governor
[16];
596 struct cpufreq_policy new_policy
;
598 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
602 ret
= sscanf(buf
, "%15s", str_governor
);
606 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
607 &new_policy
.governor
))
610 ret
= cpufreq_set_policy(policy
, &new_policy
);
612 policy
->user_policy
.policy
= policy
->policy
;
613 policy
->user_policy
.governor
= policy
->governor
;
622 * show_scaling_driver - show the cpufreq driver currently loaded
624 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
626 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
630 * show_scaling_available_governors - show the available CPUfreq governors
632 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
636 struct cpufreq_governor
*t
;
639 i
+= sprintf(buf
, "performance powersave");
643 for_each_governor(t
) {
644 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
645 - (CPUFREQ_NAME_LEN
+ 2)))
647 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
650 i
+= sprintf(&buf
[i
], "\n");
654 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
659 for_each_cpu(cpu
, mask
) {
661 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
662 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
663 if (i
>= (PAGE_SIZE
- 5))
666 i
+= sprintf(&buf
[i
], "\n");
669 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
672 * show_related_cpus - show the CPUs affected by each transition even if
673 * hw coordination is in use
675 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
677 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
681 * show_affected_cpus - show the CPUs affected by each transition
683 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
685 return cpufreq_show_cpus(policy
->cpus
, buf
);
688 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
689 const char *buf
, size_t count
)
691 unsigned int freq
= 0;
694 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
697 ret
= sscanf(buf
, "%u", &freq
);
701 policy
->governor
->store_setspeed(policy
, freq
);
706 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
708 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
709 return sprintf(buf
, "<unsupported>\n");
711 return policy
->governor
->show_setspeed(policy
, buf
);
715 * show_bios_limit - show the current cpufreq HW/BIOS limitation
717 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
721 if (cpufreq_driver
->bios_limit
) {
722 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
724 return sprintf(buf
, "%u\n", limit
);
726 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
729 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
730 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
731 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
732 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
733 cpufreq_freq_attr_ro(scaling_available_governors
);
734 cpufreq_freq_attr_ro(scaling_driver
);
735 cpufreq_freq_attr_ro(scaling_cur_freq
);
736 cpufreq_freq_attr_ro(bios_limit
);
737 cpufreq_freq_attr_ro(related_cpus
);
738 cpufreq_freq_attr_ro(affected_cpus
);
739 cpufreq_freq_attr_rw(scaling_min_freq
);
740 cpufreq_freq_attr_rw(scaling_max_freq
);
741 cpufreq_freq_attr_rw(scaling_governor
);
742 cpufreq_freq_attr_rw(scaling_setspeed
);
744 static struct attribute
*default_attrs
[] = {
745 &cpuinfo_min_freq
.attr
,
746 &cpuinfo_max_freq
.attr
,
747 &cpuinfo_transition_latency
.attr
,
748 &scaling_min_freq
.attr
,
749 &scaling_max_freq
.attr
,
752 &scaling_governor
.attr
,
753 &scaling_driver
.attr
,
754 &scaling_available_governors
.attr
,
755 &scaling_setspeed
.attr
,
759 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
760 #define to_attr(a) container_of(a, struct freq_attr, attr)
762 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
764 struct cpufreq_policy
*policy
= to_policy(kobj
);
765 struct freq_attr
*fattr
= to_attr(attr
);
768 if (!down_read_trylock(&cpufreq_rwsem
))
771 down_read(&policy
->rwsem
);
774 ret
= fattr
->show(policy
, buf
);
778 up_read(&policy
->rwsem
);
779 up_read(&cpufreq_rwsem
);
784 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
785 const char *buf
, size_t count
)
787 struct cpufreq_policy
*policy
= to_policy(kobj
);
788 struct freq_attr
*fattr
= to_attr(attr
);
789 ssize_t ret
= -EINVAL
;
793 if (!cpu_online(policy
->cpu
))
796 if (!down_read_trylock(&cpufreq_rwsem
))
799 down_write(&policy
->rwsem
);
802 ret
= fattr
->store(policy
, buf
, count
);
806 up_write(&policy
->rwsem
);
808 up_read(&cpufreq_rwsem
);
815 static void cpufreq_sysfs_release(struct kobject
*kobj
)
817 struct cpufreq_policy
*policy
= to_policy(kobj
);
818 pr_debug("last reference is dropped\n");
819 complete(&policy
->kobj_unregister
);
822 static const struct sysfs_ops sysfs_ops
= {
827 static struct kobj_type ktype_cpufreq
= {
828 .sysfs_ops
= &sysfs_ops
,
829 .default_attrs
= default_attrs
,
830 .release
= cpufreq_sysfs_release
,
833 struct kobject
*cpufreq_global_kobject
;
834 EXPORT_SYMBOL(cpufreq_global_kobject
);
836 static int cpufreq_global_kobject_usage
;
838 int cpufreq_get_global_kobject(void)
840 if (!cpufreq_global_kobject_usage
++)
841 return kobject_add(cpufreq_global_kobject
,
842 &cpu_subsys
.dev_root
->kobj
, "%s", "cpufreq");
846 EXPORT_SYMBOL(cpufreq_get_global_kobject
);
848 void cpufreq_put_global_kobject(void)
850 if (!--cpufreq_global_kobject_usage
)
851 kobject_del(cpufreq_global_kobject
);
853 EXPORT_SYMBOL(cpufreq_put_global_kobject
);
855 int cpufreq_sysfs_create_file(const struct attribute
*attr
)
857 int ret
= cpufreq_get_global_kobject();
860 ret
= sysfs_create_file(cpufreq_global_kobject
, attr
);
862 cpufreq_put_global_kobject();
867 EXPORT_SYMBOL(cpufreq_sysfs_create_file
);
869 void cpufreq_sysfs_remove_file(const struct attribute
*attr
)
871 sysfs_remove_file(cpufreq_global_kobject
, attr
);
872 cpufreq_put_global_kobject();
874 EXPORT_SYMBOL(cpufreq_sysfs_remove_file
);
876 /* symlink affected CPUs */
877 static int cpufreq_add_dev_symlink(struct cpufreq_policy
*policy
)
882 for_each_cpu(j
, policy
->cpus
) {
883 struct device
*cpu_dev
;
885 if (j
== policy
->cpu
)
888 pr_debug("Adding link for CPU: %u\n", j
);
889 cpu_dev
= get_cpu_device(j
);
890 ret
= sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
898 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
,
901 struct freq_attr
**drv_attr
;
904 /* set up files for this cpu device */
905 drv_attr
= cpufreq_driver
->attr
;
906 while (drv_attr
&& *drv_attr
) {
907 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
912 if (cpufreq_driver
->get
) {
913 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
918 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
922 if (cpufreq_driver
->bios_limit
) {
923 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
928 return cpufreq_add_dev_symlink(policy
);
931 static void cpufreq_init_policy(struct cpufreq_policy
*policy
)
933 struct cpufreq_governor
*gov
= NULL
;
934 struct cpufreq_policy new_policy
;
937 memcpy(&new_policy
, policy
, sizeof(*policy
));
939 /* Update governor of new_policy to the governor used before hotplug */
940 gov
= find_governor(per_cpu(cpufreq_cpu_governor
, policy
->cpu
));
942 pr_debug("Restoring governor %s for cpu %d\n",
943 policy
->governor
->name
, policy
->cpu
);
945 gov
= CPUFREQ_DEFAULT_GOVERNOR
;
947 new_policy
.governor
= gov
;
949 /* Use the default policy if its valid. */
950 if (cpufreq_driver
->setpolicy
)
951 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
, NULL
);
953 /* set default policy */
954 ret
= cpufreq_set_policy(policy
, &new_policy
);
956 pr_debug("setting policy failed\n");
957 if (cpufreq_driver
->exit
)
958 cpufreq_driver
->exit(policy
);
962 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
,
963 unsigned int cpu
, struct device
*dev
)
969 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
971 pr_err("%s: Failed to stop governor\n", __func__
);
976 down_write(&policy
->rwsem
);
978 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
980 cpumask_set_cpu(cpu
, policy
->cpus
);
981 per_cpu(cpufreq_cpu_data
, cpu
) = policy
;
982 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
984 up_write(&policy
->rwsem
);
987 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
989 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
992 pr_err("%s: Failed to start governor\n", __func__
);
997 return sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq");
1000 static struct cpufreq_policy
*cpufreq_policy_restore(unsigned int cpu
)
1002 struct cpufreq_policy
*policy
;
1003 unsigned long flags
;
1005 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1007 policy
= per_cpu(cpufreq_cpu_data_fallback
, cpu
);
1009 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1012 policy
->governor
= NULL
;
1017 static struct cpufreq_policy
*cpufreq_policy_alloc(void)
1019 struct cpufreq_policy
*policy
;
1021 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1025 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1026 goto err_free_policy
;
1028 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1029 goto err_free_cpumask
;
1031 INIT_LIST_HEAD(&policy
->policy_list
);
1032 init_rwsem(&policy
->rwsem
);
1033 spin_lock_init(&policy
->transition_lock
);
1034 init_waitqueue_head(&policy
->transition_wait
);
1035 init_completion(&policy
->kobj_unregister
);
1036 INIT_WORK(&policy
->update
, handle_update
);
1041 free_cpumask_var(policy
->cpus
);
1048 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
)
1050 struct kobject
*kobj
;
1051 struct completion
*cmp
;
1053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1054 CPUFREQ_REMOVE_POLICY
, policy
);
1056 down_read(&policy
->rwsem
);
1057 kobj
= &policy
->kobj
;
1058 cmp
= &policy
->kobj_unregister
;
1059 up_read(&policy
->rwsem
);
1063 * We need to make sure that the underlying kobj is
1064 * actually not referenced anymore by anybody before we
1065 * proceed with unloading.
1067 pr_debug("waiting for dropping of refcount\n");
1068 wait_for_completion(cmp
);
1069 pr_debug("wait complete\n");
1072 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
1074 free_cpumask_var(policy
->related_cpus
);
1075 free_cpumask_var(policy
->cpus
);
1079 static int update_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
,
1080 struct device
*cpu_dev
)
1084 if (WARN_ON(cpu
== policy
->cpu
))
1087 /* Move kobject to the new policy->cpu */
1088 ret
= kobject_move(&policy
->kobj
, &cpu_dev
->kobj
);
1090 pr_err("%s: Failed to move kobj: %d\n", __func__
, ret
);
1094 down_write(&policy
->rwsem
);
1096 up_write(&policy
->rwsem
);
1101 static int __cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1103 unsigned int j
, cpu
= dev
->id
;
1105 struct cpufreq_policy
*policy
;
1106 unsigned long flags
;
1107 bool recover_policy
= cpufreq_suspended
;
1109 if (cpu_is_offline(cpu
))
1112 pr_debug("adding CPU %u\n", cpu
);
1114 /* check whether a different CPU already registered this
1115 * CPU because it is in the same boat. */
1116 policy
= cpufreq_cpu_get_raw(cpu
);
1117 if (unlikely(policy
))
1120 if (!down_read_trylock(&cpufreq_rwsem
))
1123 /* Check if this cpu was hot-unplugged earlier and has siblings */
1124 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1125 for_each_policy(policy
) {
1126 if (cpumask_test_cpu(cpu
, policy
->related_cpus
)) {
1127 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1128 ret
= cpufreq_add_policy_cpu(policy
, cpu
, dev
);
1129 up_read(&cpufreq_rwsem
);
1133 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1136 * Restore the saved policy when doing light-weight init and fall back
1137 * to the full init if that fails.
1139 policy
= recover_policy
? cpufreq_policy_restore(cpu
) : NULL
;
1141 recover_policy
= false;
1142 policy
= cpufreq_policy_alloc();
1148 * In the resume path, since we restore a saved policy, the assignment
1149 * to policy->cpu is like an update of the existing policy, rather than
1150 * the creation of a brand new one. So we need to perform this update
1151 * by invoking update_policy_cpu().
1153 if (recover_policy
&& cpu
!= policy
->cpu
)
1154 WARN_ON(update_policy_cpu(policy
, cpu
, dev
));
1158 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1160 /* call driver. From then on the cpufreq must be able
1161 * to accept all calls to ->verify and ->setpolicy for this CPU
1163 ret
= cpufreq_driver
->init(policy
);
1165 pr_debug("initialization failed\n");
1166 goto err_set_policy_cpu
;
1169 down_write(&policy
->rwsem
);
1171 /* related cpus should atleast have policy->cpus */
1172 cpumask_or(policy
->related_cpus
, policy
->related_cpus
, policy
->cpus
);
1175 * affected cpus must always be the one, which are online. We aren't
1176 * managing offline cpus here.
1178 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1180 if (!recover_policy
) {
1181 policy
->user_policy
.min
= policy
->min
;
1182 policy
->user_policy
.max
= policy
->max
;
1184 /* prepare interface data */
1185 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
1186 &dev
->kobj
, "cpufreq");
1188 pr_err("%s: failed to init policy->kobj: %d\n",
1190 goto err_init_policy_kobj
;
1194 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1195 for_each_cpu(j
, policy
->cpus
)
1196 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1197 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1199 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1200 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1202 pr_err("%s: ->get() failed\n", __func__
);
1208 * Sometimes boot loaders set CPU frequency to a value outside of
1209 * frequency table present with cpufreq core. In such cases CPU might be
1210 * unstable if it has to run on that frequency for long duration of time
1211 * and so its better to set it to a frequency which is specified in
1212 * freq-table. This also makes cpufreq stats inconsistent as
1213 * cpufreq-stats would fail to register because current frequency of CPU
1214 * isn't found in freq-table.
1216 * Because we don't want this change to effect boot process badly, we go
1217 * for the next freq which is >= policy->cur ('cur' must be set by now,
1218 * otherwise we will end up setting freq to lowest of the table as 'cur'
1219 * is initialized to zero).
1221 * We are passing target-freq as "policy->cur - 1" otherwise
1222 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1223 * equal to target-freq.
1225 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1227 /* Are we running at unknown frequency ? */
1228 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1229 if (ret
== -EINVAL
) {
1230 /* Warn user and fix it */
1231 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1232 __func__
, policy
->cpu
, policy
->cur
);
1233 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1234 CPUFREQ_RELATION_L
);
1237 * Reaching here after boot in a few seconds may not
1238 * mean that system will remain stable at "unknown"
1239 * frequency for longer duration. Hence, a BUG_ON().
1242 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1243 __func__
, policy
->cpu
, policy
->cur
);
1247 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1248 CPUFREQ_START
, policy
);
1250 if (!recover_policy
) {
1251 ret
= cpufreq_add_dev_interface(policy
, dev
);
1253 goto err_out_unregister
;
1254 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1255 CPUFREQ_CREATE_POLICY
, policy
);
1258 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1259 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1260 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1262 cpufreq_init_policy(policy
);
1264 if (!recover_policy
) {
1265 policy
->user_policy
.policy
= policy
->policy
;
1266 policy
->user_policy
.governor
= policy
->governor
;
1268 up_write(&policy
->rwsem
);
1270 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1272 up_read(&cpufreq_rwsem
);
1274 /* Callback for handling stuff after policy is ready */
1275 if (cpufreq_driver
->ready
)
1276 cpufreq_driver
->ready(policy
);
1278 pr_debug("initialization complete\n");
1284 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1285 for_each_cpu(j
, policy
->cpus
)
1286 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1287 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1289 if (!recover_policy
) {
1290 kobject_put(&policy
->kobj
);
1291 wait_for_completion(&policy
->kobj_unregister
);
1293 err_init_policy_kobj
:
1294 up_write(&policy
->rwsem
);
1296 if (cpufreq_driver
->exit
)
1297 cpufreq_driver
->exit(policy
);
1299 if (recover_policy
) {
1300 /* Do not leave stale fallback data behind. */
1301 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = NULL
;
1302 cpufreq_policy_put_kobj(policy
);
1304 cpufreq_policy_free(policy
);
1307 up_read(&cpufreq_rwsem
);
1313 * cpufreq_add_dev - add a CPU device
1315 * Adds the cpufreq interface for a CPU device.
1317 * The Oracle says: try running cpufreq registration/unregistration concurrently
1318 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1319 * mess up, but more thorough testing is needed. - Mathieu
1321 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1323 return __cpufreq_add_dev(dev
, sif
);
1326 static int __cpufreq_remove_dev_prepare(struct device
*dev
,
1327 struct subsys_interface
*sif
)
1329 unsigned int cpu
= dev
->id
, cpus
;
1331 unsigned long flags
;
1332 struct cpufreq_policy
*policy
;
1334 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1336 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1338 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1340 /* Save the policy somewhere when doing a light-weight tear-down */
1341 if (cpufreq_suspended
)
1342 per_cpu(cpufreq_cpu_data_fallback
, cpu
) = policy
;
1344 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1347 pr_debug("%s: No cpu_data found\n", __func__
);
1352 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
1354 pr_err("%s: Failed to stop governor\n", __func__
);
1358 strncpy(per_cpu(cpufreq_cpu_governor
, cpu
),
1359 policy
->governor
->name
, CPUFREQ_NAME_LEN
);
1362 down_read(&policy
->rwsem
);
1363 cpus
= cpumask_weight(policy
->cpus
);
1364 up_read(&policy
->rwsem
);
1366 if (cpu
!= policy
->cpu
) {
1367 sysfs_remove_link(&dev
->kobj
, "cpufreq");
1368 } else if (cpus
> 1) {
1369 /* Nominate new CPU */
1370 int new_cpu
= cpumask_any_but(policy
->cpus
, cpu
);
1371 struct device
*cpu_dev
= get_cpu_device(new_cpu
);
1373 sysfs_remove_link(&cpu_dev
->kobj
, "cpufreq");
1374 ret
= update_policy_cpu(policy
, new_cpu
, cpu_dev
);
1376 if (sysfs_create_link(&cpu_dev
->kobj
, &policy
->kobj
,
1378 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1379 __func__
, cpu_dev
->id
);
1383 if (!cpufreq_suspended
)
1384 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1385 __func__
, new_cpu
, cpu
);
1386 } else if (cpufreq_driver
->stop_cpu
) {
1387 cpufreq_driver
->stop_cpu(policy
);
1393 static int __cpufreq_remove_dev_finish(struct device
*dev
,
1394 struct subsys_interface
*sif
)
1396 unsigned int cpu
= dev
->id
, cpus
;
1398 unsigned long flags
;
1399 struct cpufreq_policy
*policy
;
1401 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1402 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1403 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1404 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1407 pr_debug("%s: No cpu_data found\n", __func__
);
1411 down_write(&policy
->rwsem
);
1412 cpus
= cpumask_weight(policy
->cpus
);
1415 cpumask_clear_cpu(cpu
, policy
->cpus
);
1416 up_write(&policy
->rwsem
);
1418 /* If cpu is last user of policy, free policy */
1421 ret
= __cpufreq_governor(policy
,
1422 CPUFREQ_GOV_POLICY_EXIT
);
1424 pr_err("%s: Failed to exit governor\n",
1430 if (!cpufreq_suspended
)
1431 cpufreq_policy_put_kobj(policy
);
1434 * Perform the ->exit() even during light-weight tear-down,
1435 * since this is a core component, and is essential for the
1436 * subsequent light-weight ->init() to succeed.
1438 if (cpufreq_driver
->exit
)
1439 cpufreq_driver
->exit(policy
);
1441 /* Remove policy from list of active policies */
1442 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1443 list_del(&policy
->policy_list
);
1444 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1446 if (!cpufreq_suspended
)
1447 cpufreq_policy_free(policy
);
1448 } else if (has_target()) {
1449 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
1451 ret
= __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
1454 pr_err("%s: Failed to start governor\n", __func__
);
1463 * cpufreq_remove_dev - remove a CPU device
1465 * Removes the cpufreq interface for a CPU device.
1467 static int cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1469 unsigned int cpu
= dev
->id
;
1472 if (cpu_is_offline(cpu
))
1475 ret
= __cpufreq_remove_dev_prepare(dev
, sif
);
1478 ret
= __cpufreq_remove_dev_finish(dev
, sif
);
1483 static void handle_update(struct work_struct
*work
)
1485 struct cpufreq_policy
*policy
=
1486 container_of(work
, struct cpufreq_policy
, update
);
1487 unsigned int cpu
= policy
->cpu
;
1488 pr_debug("handle_update for cpu %u called\n", cpu
);
1489 cpufreq_update_policy(cpu
);
1493 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1495 * @policy: policy managing CPUs
1496 * @new_freq: CPU frequency the CPU actually runs at
1498 * We adjust to current frequency first, and need to clean up later.
1499 * So either call to cpufreq_update_policy() or schedule handle_update()).
1501 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1502 unsigned int new_freq
)
1504 struct cpufreq_freqs freqs
;
1506 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1507 policy
->cur
, new_freq
);
1509 freqs
.old
= policy
->cur
;
1510 freqs
.new = new_freq
;
1512 cpufreq_freq_transition_begin(policy
, &freqs
);
1513 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1517 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1520 * This is the last known freq, without actually getting it from the driver.
1521 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1523 unsigned int cpufreq_quick_get(unsigned int cpu
)
1525 struct cpufreq_policy
*policy
;
1526 unsigned int ret_freq
= 0;
1528 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
)
1529 return cpufreq_driver
->get(cpu
);
1531 policy
= cpufreq_cpu_get(cpu
);
1533 ret_freq
= policy
->cur
;
1534 cpufreq_cpu_put(policy
);
1539 EXPORT_SYMBOL(cpufreq_quick_get
);
1542 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1545 * Just return the max possible frequency for a given CPU.
1547 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1549 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1550 unsigned int ret_freq
= 0;
1553 ret_freq
= policy
->max
;
1554 cpufreq_cpu_put(policy
);
1559 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1561 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1563 unsigned int ret_freq
= 0;
1565 if (!cpufreq_driver
->get
)
1568 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1570 if (ret_freq
&& policy
->cur
&&
1571 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1572 /* verify no discrepancy between actual and
1573 saved value exists */
1574 if (unlikely(ret_freq
!= policy
->cur
)) {
1575 cpufreq_out_of_sync(policy
, ret_freq
);
1576 schedule_work(&policy
->update
);
1584 * cpufreq_get - get the current CPU frequency (in kHz)
1587 * Get the CPU current (static) CPU frequency
1589 unsigned int cpufreq_get(unsigned int cpu
)
1591 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1592 unsigned int ret_freq
= 0;
1595 down_read(&policy
->rwsem
);
1596 ret_freq
= __cpufreq_get(policy
);
1597 up_read(&policy
->rwsem
);
1599 cpufreq_cpu_put(policy
);
1604 EXPORT_SYMBOL(cpufreq_get
);
1606 static struct subsys_interface cpufreq_interface
= {
1608 .subsys
= &cpu_subsys
,
1609 .add_dev
= cpufreq_add_dev
,
1610 .remove_dev
= cpufreq_remove_dev
,
1614 * In case platform wants some specific frequency to be configured
1617 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1621 if (!policy
->suspend_freq
) {
1622 pr_err("%s: suspend_freq can't be zero\n", __func__
);
1626 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1627 policy
->suspend_freq
);
1629 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1630 CPUFREQ_RELATION_H
);
1632 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1633 __func__
, policy
->suspend_freq
, ret
);
1637 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1640 * cpufreq_suspend() - Suspend CPUFreq governors
1642 * Called during system wide Suspend/Hibernate cycles for suspending governors
1643 * as some platforms can't change frequency after this point in suspend cycle.
1644 * Because some of the devices (like: i2c, regulators, etc) they use for
1645 * changing frequency are suspended quickly after this point.
1647 void cpufreq_suspend(void)
1649 struct cpufreq_policy
*policy
;
1651 if (!cpufreq_driver
)
1657 pr_debug("%s: Suspending Governors\n", __func__
);
1659 for_each_policy(policy
) {
1660 if (__cpufreq_governor(policy
, CPUFREQ_GOV_STOP
))
1661 pr_err("%s: Failed to stop governor for policy: %p\n",
1663 else if (cpufreq_driver
->suspend
1664 && cpufreq_driver
->suspend(policy
))
1665 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1670 cpufreq_suspended
= true;
1674 * cpufreq_resume() - Resume CPUFreq governors
1676 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1677 * are suspended with cpufreq_suspend().
1679 void cpufreq_resume(void)
1681 struct cpufreq_policy
*policy
;
1683 if (!cpufreq_driver
)
1686 cpufreq_suspended
= false;
1691 pr_debug("%s: Resuming Governors\n", __func__
);
1693 for_each_policy(policy
) {
1694 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
))
1695 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1697 else if (__cpufreq_governor(policy
, CPUFREQ_GOV_START
)
1698 || __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
))
1699 pr_err("%s: Failed to start governor for policy: %p\n",
1703 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1704 * policy in list. It will verify that the current freq is in
1705 * sync with what we believe it to be.
1707 if (list_is_last(&policy
->policy_list
, &cpufreq_policy_list
))
1708 schedule_work(&policy
->update
);
1713 * cpufreq_get_current_driver - return current driver's name
1715 * Return the name string of the currently loaded cpufreq driver
1718 const char *cpufreq_get_current_driver(void)
1721 return cpufreq_driver
->name
;
1725 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1728 * cpufreq_get_driver_data - return current driver data
1730 * Return the private data of the currently loaded cpufreq
1731 * driver, or NULL if no cpufreq driver is loaded.
1733 void *cpufreq_get_driver_data(void)
1736 return cpufreq_driver
->driver_data
;
1740 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1742 /*********************************************************************
1743 * NOTIFIER LISTS INTERFACE *
1744 *********************************************************************/
1747 * cpufreq_register_notifier - register a driver with cpufreq
1748 * @nb: notifier function to register
1749 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1751 * Add a driver to one of two lists: either a list of drivers that
1752 * are notified about clock rate changes (once before and once after
1753 * the transition), or a list of drivers that are notified about
1754 * changes in cpufreq policy.
1756 * This function may sleep, and has the same return conditions as
1757 * blocking_notifier_chain_register.
1759 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1763 if (cpufreq_disabled())
1766 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1769 case CPUFREQ_TRANSITION_NOTIFIER
:
1770 ret
= srcu_notifier_chain_register(
1771 &cpufreq_transition_notifier_list
, nb
);
1773 case CPUFREQ_POLICY_NOTIFIER
:
1774 ret
= blocking_notifier_chain_register(
1775 &cpufreq_policy_notifier_list
, nb
);
1783 EXPORT_SYMBOL(cpufreq_register_notifier
);
1786 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1787 * @nb: notifier block to be unregistered
1788 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1790 * Remove a driver from the CPU frequency notifier list.
1792 * This function may sleep, and has the same return conditions as
1793 * blocking_notifier_chain_unregister.
1795 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1799 if (cpufreq_disabled())
1803 case CPUFREQ_TRANSITION_NOTIFIER
:
1804 ret
= srcu_notifier_chain_unregister(
1805 &cpufreq_transition_notifier_list
, nb
);
1807 case CPUFREQ_POLICY_NOTIFIER
:
1808 ret
= blocking_notifier_chain_unregister(
1809 &cpufreq_policy_notifier_list
, nb
);
1817 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1820 /*********************************************************************
1822 *********************************************************************/
1824 /* Must set freqs->new to intermediate frequency */
1825 static int __target_intermediate(struct cpufreq_policy
*policy
,
1826 struct cpufreq_freqs
*freqs
, int index
)
1830 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1832 /* We don't need to switch to intermediate freq */
1836 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1837 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1839 cpufreq_freq_transition_begin(policy
, freqs
);
1840 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1841 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1844 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1850 static int __target_index(struct cpufreq_policy
*policy
,
1851 struct cpufreq_frequency_table
*freq_table
, int index
)
1853 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1854 unsigned int intermediate_freq
= 0;
1855 int retval
= -EINVAL
;
1858 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1860 /* Handle switching to intermediate frequency */
1861 if (cpufreq_driver
->get_intermediate
) {
1862 retval
= __target_intermediate(policy
, &freqs
, index
);
1866 intermediate_freq
= freqs
.new;
1867 /* Set old freq to intermediate */
1868 if (intermediate_freq
)
1869 freqs
.old
= freqs
.new;
1872 freqs
.new = freq_table
[index
].frequency
;
1873 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1874 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1876 cpufreq_freq_transition_begin(policy
, &freqs
);
1879 retval
= cpufreq_driver
->target_index(policy
, index
);
1881 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1885 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1888 * Failed after setting to intermediate freq? Driver should have
1889 * reverted back to initial frequency and so should we. Check
1890 * here for intermediate_freq instead of get_intermediate, in
1891 * case we have't switched to intermediate freq at all.
1893 if (unlikely(retval
&& intermediate_freq
)) {
1894 freqs
.old
= intermediate_freq
;
1895 freqs
.new = policy
->restore_freq
;
1896 cpufreq_freq_transition_begin(policy
, &freqs
);
1897 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1904 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1905 unsigned int target_freq
,
1906 unsigned int relation
)
1908 unsigned int old_target_freq
= target_freq
;
1909 int retval
= -EINVAL
;
1911 if (cpufreq_disabled())
1914 /* Make sure that target_freq is within supported range */
1915 if (target_freq
> policy
->max
)
1916 target_freq
= policy
->max
;
1917 if (target_freq
< policy
->min
)
1918 target_freq
= policy
->min
;
1920 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1921 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1924 * This might look like a redundant call as we are checking it again
1925 * after finding index. But it is left intentionally for cases where
1926 * exactly same freq is called again and so we can save on few function
1929 if (target_freq
== policy
->cur
)
1932 /* Save last value to restore later on errors */
1933 policy
->restore_freq
= policy
->cur
;
1935 if (cpufreq_driver
->target
)
1936 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1937 else if (cpufreq_driver
->target_index
) {
1938 struct cpufreq_frequency_table
*freq_table
;
1941 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
1942 if (unlikely(!freq_table
)) {
1943 pr_err("%s: Unable to find freq_table\n", __func__
);
1947 retval
= cpufreq_frequency_table_target(policy
, freq_table
,
1948 target_freq
, relation
, &index
);
1949 if (unlikely(retval
)) {
1950 pr_err("%s: Unable to find matching freq\n", __func__
);
1954 if (freq_table
[index
].frequency
== policy
->cur
) {
1959 retval
= __target_index(policy
, freq_table
, index
);
1965 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1967 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1968 unsigned int target_freq
,
1969 unsigned int relation
)
1973 down_write(&policy
->rwsem
);
1975 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1977 up_write(&policy
->rwsem
);
1981 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1983 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1988 /* Only must be defined when default governor is known to have latency
1989 restrictions, like e.g. conservative or ondemand.
1990 That this is the case is already ensured in Kconfig
1992 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1993 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1995 struct cpufreq_governor
*gov
= NULL
;
1998 /* Don't start any governor operations if we are entering suspend */
1999 if (cpufreq_suspended
)
2002 * Governor might not be initiated here if ACPI _PPC changed
2003 * notification happened, so check it.
2005 if (!policy
->governor
)
2008 if (policy
->governor
->max_transition_latency
&&
2009 policy
->cpuinfo
.transition_latency
>
2010 policy
->governor
->max_transition_latency
) {
2014 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2015 policy
->governor
->name
, gov
->name
);
2016 policy
->governor
= gov
;
2020 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2021 if (!try_module_get(policy
->governor
->owner
))
2024 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2025 policy
->cpu
, event
);
2027 mutex_lock(&cpufreq_governor_lock
);
2028 if ((policy
->governor_enabled
&& event
== CPUFREQ_GOV_START
)
2029 || (!policy
->governor_enabled
2030 && (event
== CPUFREQ_GOV_LIMITS
|| event
== CPUFREQ_GOV_STOP
))) {
2031 mutex_unlock(&cpufreq_governor_lock
);
2035 if (event
== CPUFREQ_GOV_STOP
)
2036 policy
->governor_enabled
= false;
2037 else if (event
== CPUFREQ_GOV_START
)
2038 policy
->governor_enabled
= true;
2040 mutex_unlock(&cpufreq_governor_lock
);
2042 ret
= policy
->governor
->governor(policy
, event
);
2045 if (event
== CPUFREQ_GOV_POLICY_INIT
)
2046 policy
->governor
->initialized
++;
2047 else if (event
== CPUFREQ_GOV_POLICY_EXIT
)
2048 policy
->governor
->initialized
--;
2050 /* Restore original values */
2051 mutex_lock(&cpufreq_governor_lock
);
2052 if (event
== CPUFREQ_GOV_STOP
)
2053 policy
->governor_enabled
= true;
2054 else if (event
== CPUFREQ_GOV_START
)
2055 policy
->governor_enabled
= false;
2056 mutex_unlock(&cpufreq_governor_lock
);
2059 if (((event
== CPUFREQ_GOV_POLICY_INIT
) && ret
) ||
2060 ((event
== CPUFREQ_GOV_POLICY_EXIT
) && !ret
))
2061 module_put(policy
->governor
->owner
);
2066 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2073 if (cpufreq_disabled())
2076 mutex_lock(&cpufreq_governor_mutex
);
2078 governor
->initialized
= 0;
2080 if (!find_governor(governor
->name
)) {
2082 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2085 mutex_unlock(&cpufreq_governor_mutex
);
2088 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2090 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2097 if (cpufreq_disabled())
2100 for_each_present_cpu(cpu
) {
2101 if (cpu_online(cpu
))
2103 if (!strcmp(per_cpu(cpufreq_cpu_governor
, cpu
), governor
->name
))
2104 strcpy(per_cpu(cpufreq_cpu_governor
, cpu
), "\0");
2107 mutex_lock(&cpufreq_governor_mutex
);
2108 list_del(&governor
->governor_list
);
2109 mutex_unlock(&cpufreq_governor_mutex
);
2112 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2115 /*********************************************************************
2116 * POLICY INTERFACE *
2117 *********************************************************************/
2120 * cpufreq_get_policy - get the current cpufreq_policy
2121 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2124 * Reads the current cpufreq policy.
2126 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2128 struct cpufreq_policy
*cpu_policy
;
2132 cpu_policy
= cpufreq_cpu_get(cpu
);
2136 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2138 cpufreq_cpu_put(cpu_policy
);
2141 EXPORT_SYMBOL(cpufreq_get_policy
);
2144 * policy : current policy.
2145 * new_policy: policy to be set.
2147 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2148 struct cpufreq_policy
*new_policy
)
2150 struct cpufreq_governor
*old_gov
;
2153 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2154 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2156 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2158 if (new_policy
->min
> policy
->max
|| new_policy
->max
< policy
->min
)
2161 /* verify the cpu speed can be set within this limit */
2162 ret
= cpufreq_driver
->verify(new_policy
);
2166 /* adjust if necessary - all reasons */
2167 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2168 CPUFREQ_ADJUST
, new_policy
);
2170 /* adjust if necessary - hardware incompatibility*/
2171 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2172 CPUFREQ_INCOMPATIBLE
, new_policy
);
2175 * verify the cpu speed can be set within this limit, which might be
2176 * different to the first one
2178 ret
= cpufreq_driver
->verify(new_policy
);
2182 /* notification of the new policy */
2183 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2184 CPUFREQ_NOTIFY
, new_policy
);
2186 policy
->min
= new_policy
->min
;
2187 policy
->max
= new_policy
->max
;
2189 pr_debug("new min and max freqs are %u - %u kHz\n",
2190 policy
->min
, policy
->max
);
2192 if (cpufreq_driver
->setpolicy
) {
2193 policy
->policy
= new_policy
->policy
;
2194 pr_debug("setting range\n");
2195 return cpufreq_driver
->setpolicy(new_policy
);
2198 if (new_policy
->governor
== policy
->governor
)
2201 pr_debug("governor switch\n");
2203 /* save old, working values */
2204 old_gov
= policy
->governor
;
2205 /* end old governor */
2207 __cpufreq_governor(policy
, CPUFREQ_GOV_STOP
);
2208 up_write(&policy
->rwsem
);
2209 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2210 down_write(&policy
->rwsem
);
2213 /* start new governor */
2214 policy
->governor
= new_policy
->governor
;
2215 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
)) {
2216 if (!__cpufreq_governor(policy
, CPUFREQ_GOV_START
))
2219 up_write(&policy
->rwsem
);
2220 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_EXIT
);
2221 down_write(&policy
->rwsem
);
2224 /* new governor failed, so re-start old one */
2225 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2227 policy
->governor
= old_gov
;
2228 __cpufreq_governor(policy
, CPUFREQ_GOV_POLICY_INIT
);
2229 __cpufreq_governor(policy
, CPUFREQ_GOV_START
);
2235 pr_debug("governor: change or update limits\n");
2236 return __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2240 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2241 * @cpu: CPU which shall be re-evaluated
2243 * Useful for policy notifiers which have different necessities
2244 * at different times.
2246 int cpufreq_update_policy(unsigned int cpu
)
2248 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2249 struct cpufreq_policy new_policy
;
2255 down_write(&policy
->rwsem
);
2257 pr_debug("updating policy for CPU %u\n", cpu
);
2258 memcpy(&new_policy
, policy
, sizeof(*policy
));
2259 new_policy
.min
= policy
->user_policy
.min
;
2260 new_policy
.max
= policy
->user_policy
.max
;
2261 new_policy
.policy
= policy
->user_policy
.policy
;
2262 new_policy
.governor
= policy
->user_policy
.governor
;
2265 * BIOS might change freq behind our back
2266 * -> ask driver for current freq and notify governors about a change
2268 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2269 new_policy
.cur
= cpufreq_driver
->get(cpu
);
2270 if (WARN_ON(!new_policy
.cur
)) {
2276 pr_debug("Driver did not initialize current freq\n");
2277 policy
->cur
= new_policy
.cur
;
2279 if (policy
->cur
!= new_policy
.cur
&& has_target())
2280 cpufreq_out_of_sync(policy
, new_policy
.cur
);
2284 ret
= cpufreq_set_policy(policy
, &new_policy
);
2287 up_write(&policy
->rwsem
);
2289 cpufreq_cpu_put(policy
);
2292 EXPORT_SYMBOL(cpufreq_update_policy
);
2294 static int cpufreq_cpu_callback(struct notifier_block
*nfb
,
2295 unsigned long action
, void *hcpu
)
2297 unsigned int cpu
= (unsigned long)hcpu
;
2300 dev
= get_cpu_device(cpu
);
2302 switch (action
& ~CPU_TASKS_FROZEN
) {
2304 __cpufreq_add_dev(dev
, NULL
);
2307 case CPU_DOWN_PREPARE
:
2308 __cpufreq_remove_dev_prepare(dev
, NULL
);
2312 __cpufreq_remove_dev_finish(dev
, NULL
);
2315 case CPU_DOWN_FAILED
:
2316 __cpufreq_add_dev(dev
, NULL
);
2323 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
2324 .notifier_call
= cpufreq_cpu_callback
,
2327 /*********************************************************************
2329 *********************************************************************/
2330 static int cpufreq_boost_set_sw(int state
)
2332 struct cpufreq_frequency_table
*freq_table
;
2333 struct cpufreq_policy
*policy
;
2336 for_each_policy(policy
) {
2337 freq_table
= cpufreq_frequency_get_table(policy
->cpu
);
2339 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2342 pr_err("%s: Policy frequency update failed\n",
2346 policy
->user_policy
.max
= policy
->max
;
2347 __cpufreq_governor(policy
, CPUFREQ_GOV_LIMITS
);
2354 int cpufreq_boost_trigger_state(int state
)
2356 unsigned long flags
;
2359 if (cpufreq_driver
->boost_enabled
== state
)
2362 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2363 cpufreq_driver
->boost_enabled
= state
;
2364 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2366 ret
= cpufreq_driver
->set_boost(state
);
2368 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2369 cpufreq_driver
->boost_enabled
= !state
;
2370 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2372 pr_err("%s: Cannot %s BOOST\n",
2373 __func__
, state
? "enable" : "disable");
2379 int cpufreq_boost_supported(void)
2381 if (likely(cpufreq_driver
))
2382 return cpufreq_driver
->boost_supported
;
2386 EXPORT_SYMBOL_GPL(cpufreq_boost_supported
);
2388 int cpufreq_boost_enabled(void)
2390 return cpufreq_driver
->boost_enabled
;
2392 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2394 /*********************************************************************
2395 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2396 *********************************************************************/
2399 * cpufreq_register_driver - register a CPU Frequency driver
2400 * @driver_data: A struct cpufreq_driver containing the values#
2401 * submitted by the CPU Frequency driver.
2403 * Registers a CPU Frequency driver to this core code. This code
2404 * returns zero on success, -EBUSY when another driver got here first
2405 * (and isn't unregistered in the meantime).
2408 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2410 unsigned long flags
;
2413 if (cpufreq_disabled())
2416 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2417 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2418 driver_data
->target
) ||
2419 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2420 driver_data
->target
)) ||
2421 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2424 pr_debug("trying to register driver %s\n", driver_data
->name
);
2426 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2427 if (cpufreq_driver
) {
2428 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2431 cpufreq_driver
= driver_data
;
2432 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2434 if (driver_data
->setpolicy
)
2435 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2437 if (cpufreq_boost_supported()) {
2439 * Check if driver provides function to enable boost -
2440 * if not, use cpufreq_boost_set_sw as default
2442 if (!cpufreq_driver
->set_boost
)
2443 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2445 ret
= cpufreq_sysfs_create_file(&boost
.attr
);
2447 pr_err("%s: cannot register global BOOST sysfs file\n",
2449 goto err_null_driver
;
2453 ret
= subsys_interface_register(&cpufreq_interface
);
2455 goto err_boost_unreg
;
2457 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2458 list_empty(&cpufreq_policy_list
)) {
2459 /* if all ->init() calls failed, unregister */
2460 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2465 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
2466 pr_debug("driver %s up and running\n", driver_data
->name
);
2470 subsys_interface_unregister(&cpufreq_interface
);
2472 if (cpufreq_boost_supported())
2473 cpufreq_sysfs_remove_file(&boost
.attr
);
2475 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2476 cpufreq_driver
= NULL
;
2477 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2480 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2483 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2485 * Unregister the current CPUFreq driver. Only call this if you have
2486 * the right to do so, i.e. if you have succeeded in initialising before!
2487 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2488 * currently not initialised.
2490 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2492 unsigned long flags
;
2494 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2497 pr_debug("unregistering driver %s\n", driver
->name
);
2499 subsys_interface_unregister(&cpufreq_interface
);
2500 if (cpufreq_boost_supported())
2501 cpufreq_sysfs_remove_file(&boost
.attr
);
2503 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2505 down_write(&cpufreq_rwsem
);
2506 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2508 cpufreq_driver
= NULL
;
2510 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2511 up_write(&cpufreq_rwsem
);
2515 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2518 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2519 * or mutexes when secondary CPUs are halted.
2521 static struct syscore_ops cpufreq_syscore_ops
= {
2522 .shutdown
= cpufreq_suspend
,
2525 static int __init
cpufreq_core_init(void)
2527 if (cpufreq_disabled())
2530 cpufreq_global_kobject
= kobject_create();
2531 BUG_ON(!cpufreq_global_kobject
);
2533 register_syscore_ops(&cpufreq_syscore_ops
);
2537 core_initcall(cpufreq_core_init
);