act_bpf: properly support late binding of bpf action to a classifier
[linux/fpc-iii.git] / drivers / cpufreq / cpufreq.c
blob7a3c30c4336f3cb9b7b67d5965b77175b4cad53f
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38 return cpumask_empty(policy->cpus);
41 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
43 return active == !policy_is_inactive(policy);
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
50 do {
51 policy = list_next_entry(policy, policy_list);
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
58 return policy;
61 static struct cpufreq_policy *first_policy(bool active)
63 struct cpufreq_policy *policy;
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
75 return policy;
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
97 /**
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
102 static struct cpufreq_driver *cpufreq_driver;
103 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104 static DEFINE_RWLOCK(cpufreq_driver_lock);
105 DEFINE_MUTEX(cpufreq_governor_lock);
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended;
110 static inline bool has_target(void)
112 return cpufreq_driver->target_index || cpufreq_driver->target;
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
119 static DECLARE_RWSEM(cpufreq_rwsem);
121 /* internal prototypes */
122 static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
124 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
125 static void handle_update(struct work_struct *work);
128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
134 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
135 static struct srcu_notifier_head cpufreq_transition_notifier_list;
137 static bool init_cpufreq_transition_notifier_list_called;
138 static int __init init_cpufreq_transition_notifier_list(void)
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
141 init_cpufreq_transition_notifier_list_called = true;
142 return 0;
144 pure_initcall(init_cpufreq_transition_notifier_list);
146 static int off __read_mostly;
147 static int cpufreq_disabled(void)
149 return off;
151 void disable_cpufreq(void)
153 off = 1;
155 static DEFINE_MUTEX(cpufreq_governor_mutex);
157 bool have_governor_per_policy(void)
159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
161 EXPORT_SYMBOL_GPL(have_governor_per_policy);
163 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
170 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
172 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
174 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
176 return policy && !policy_is_inactive(policy) ?
177 policy->freq_table : NULL;
179 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
181 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
183 u64 idle_time;
184 u64 cur_wall_time;
185 u64 busy_time;
187 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
189 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
190 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
191 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
192 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
193 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
194 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
196 idle_time = cur_wall_time - busy_time;
197 if (wall)
198 *wall = cputime_to_usecs(cur_wall_time);
200 return cputime_to_usecs(idle_time);
203 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
205 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
207 if (idle_time == -1ULL)
208 return get_cpu_idle_time_jiffy(cpu, wall);
209 else if (!io_busy)
210 idle_time += get_cpu_iowait_time_us(cpu, wall);
212 return idle_time;
214 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
217 * This is a generic cpufreq init() routine which can be used by cpufreq
218 * drivers of SMP systems. It will do following:
219 * - validate & show freq table passed
220 * - set policies transition latency
221 * - policy->cpus with all possible CPUs
223 int cpufreq_generic_init(struct cpufreq_policy *policy,
224 struct cpufreq_frequency_table *table,
225 unsigned int transition_latency)
227 int ret;
229 ret = cpufreq_table_validate_and_show(policy, table);
230 if (ret) {
231 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
232 return ret;
235 policy->cpuinfo.transition_latency = transition_latency;
238 * The driver only supports the SMP configuration where all processors
239 * share the clock and voltage and clock.
241 cpumask_setall(policy->cpus);
243 return 0;
245 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
247 /* Only for cpufreq core internal use */
248 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
250 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
252 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
255 unsigned int cpufreq_generic_get(unsigned int cpu)
257 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
259 if (!policy || IS_ERR(policy->clk)) {
260 pr_err("%s: No %s associated to cpu: %d\n",
261 __func__, policy ? "clk" : "policy", cpu);
262 return 0;
265 return clk_get_rate(policy->clk) / 1000;
267 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
270 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
272 * @cpu: cpu to find policy for.
274 * This returns policy for 'cpu', returns NULL if it doesn't exist.
275 * It also increments the kobject reference count to mark it busy and so would
276 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
277 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
278 * freed as that depends on the kobj count.
280 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
281 * valid policy is found. This is done to make sure the driver doesn't get
282 * unregistered while the policy is being used.
284 * Return: A valid policy on success, otherwise NULL on failure.
286 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
288 struct cpufreq_policy *policy = NULL;
289 unsigned long flags;
291 if (WARN_ON(cpu >= nr_cpu_ids))
292 return NULL;
294 if (!down_read_trylock(&cpufreq_rwsem))
295 return NULL;
297 /* get the cpufreq driver */
298 read_lock_irqsave(&cpufreq_driver_lock, flags);
300 if (cpufreq_driver) {
301 /* get the CPU */
302 policy = cpufreq_cpu_get_raw(cpu);
303 if (policy)
304 kobject_get(&policy->kobj);
307 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
309 if (!policy)
310 up_read(&cpufreq_rwsem);
312 return policy;
314 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
317 * cpufreq_cpu_put: Decrements the usage count of a policy
319 * @policy: policy earlier returned by cpufreq_cpu_get().
321 * This decrements the kobject reference count incremented earlier by calling
322 * cpufreq_cpu_get().
324 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
326 void cpufreq_cpu_put(struct cpufreq_policy *policy)
328 kobject_put(&policy->kobj);
329 up_read(&cpufreq_rwsem);
331 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
333 /*********************************************************************
334 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
335 *********************************************************************/
338 * adjust_jiffies - adjust the system "loops_per_jiffy"
340 * This function alters the system "loops_per_jiffy" for the clock
341 * speed change. Note that loops_per_jiffy cannot be updated on SMP
342 * systems as each CPU might be scaled differently. So, use the arch
343 * per-CPU loops_per_jiffy value wherever possible.
345 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
347 #ifndef CONFIG_SMP
348 static unsigned long l_p_j_ref;
349 static unsigned int l_p_j_ref_freq;
351 if (ci->flags & CPUFREQ_CONST_LOOPS)
352 return;
354 if (!l_p_j_ref_freq) {
355 l_p_j_ref = loops_per_jiffy;
356 l_p_j_ref_freq = ci->old;
357 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
358 l_p_j_ref, l_p_j_ref_freq);
360 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
361 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
362 ci->new);
363 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
364 loops_per_jiffy, ci->new);
366 #endif
369 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
370 struct cpufreq_freqs *freqs, unsigned int state)
372 BUG_ON(irqs_disabled());
374 if (cpufreq_disabled())
375 return;
377 freqs->flags = cpufreq_driver->flags;
378 pr_debug("notification %u of frequency transition to %u kHz\n",
379 state, freqs->new);
381 switch (state) {
383 case CPUFREQ_PRECHANGE:
384 /* detect if the driver reported a value as "old frequency"
385 * which is not equal to what the cpufreq core thinks is
386 * "old frequency".
388 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
389 if ((policy) && (policy->cpu == freqs->cpu) &&
390 (policy->cur) && (policy->cur != freqs->old)) {
391 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
392 freqs->old, policy->cur);
393 freqs->old = policy->cur;
396 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
397 CPUFREQ_PRECHANGE, freqs);
398 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
399 break;
401 case CPUFREQ_POSTCHANGE:
402 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
403 pr_debug("FREQ: %lu - CPU: %lu\n",
404 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
405 trace_cpu_frequency(freqs->new, freqs->cpu);
406 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
407 CPUFREQ_POSTCHANGE, freqs);
408 if (likely(policy) && likely(policy->cpu == freqs->cpu))
409 policy->cur = freqs->new;
410 break;
415 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
416 * on frequency transition.
418 * This function calls the transition notifiers and the "adjust_jiffies"
419 * function. It is called twice on all CPU frequency changes that have
420 * external effects.
422 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
423 struct cpufreq_freqs *freqs, unsigned int state)
425 for_each_cpu(freqs->cpu, policy->cpus)
426 __cpufreq_notify_transition(policy, freqs, state);
429 /* Do post notifications when there are chances that transition has failed */
430 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
431 struct cpufreq_freqs *freqs, int transition_failed)
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
434 if (!transition_failed)
435 return;
437 swap(freqs->old, freqs->new);
438 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
442 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
443 struct cpufreq_freqs *freqs)
447 * Catch double invocations of _begin() which lead to self-deadlock.
448 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
449 * doesn't invoke _begin() on their behalf, and hence the chances of
450 * double invocations are very low. Moreover, there are scenarios
451 * where these checks can emit false-positive warnings in these
452 * drivers; so we avoid that by skipping them altogether.
454 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
455 && current == policy->transition_task);
457 wait:
458 wait_event(policy->transition_wait, !policy->transition_ongoing);
460 spin_lock(&policy->transition_lock);
462 if (unlikely(policy->transition_ongoing)) {
463 spin_unlock(&policy->transition_lock);
464 goto wait;
467 policy->transition_ongoing = true;
468 policy->transition_task = current;
470 spin_unlock(&policy->transition_lock);
472 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
474 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
476 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
477 struct cpufreq_freqs *freqs, int transition_failed)
479 if (unlikely(WARN_ON(!policy->transition_ongoing)))
480 return;
482 cpufreq_notify_post_transition(policy, freqs, transition_failed);
484 policy->transition_ongoing = false;
485 policy->transition_task = NULL;
487 wake_up(&policy->transition_wait);
489 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
492 /*********************************************************************
493 * SYSFS INTERFACE *
494 *********************************************************************/
495 static ssize_t show_boost(struct kobject *kobj,
496 struct attribute *attr, char *buf)
498 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
501 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
502 const char *buf, size_t count)
504 int ret, enable;
506 ret = sscanf(buf, "%d", &enable);
507 if (ret != 1 || enable < 0 || enable > 1)
508 return -EINVAL;
510 if (cpufreq_boost_trigger_state(enable)) {
511 pr_err("%s: Cannot %s BOOST!\n",
512 __func__, enable ? "enable" : "disable");
513 return -EINVAL;
516 pr_debug("%s: cpufreq BOOST %s\n",
517 __func__, enable ? "enabled" : "disabled");
519 return count;
521 define_one_global_rw(boost);
523 static struct cpufreq_governor *find_governor(const char *str_governor)
525 struct cpufreq_governor *t;
527 for_each_governor(t)
528 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
529 return t;
531 return NULL;
535 * cpufreq_parse_governor - parse a governor string
537 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
538 struct cpufreq_governor **governor)
540 int err = -EINVAL;
542 if (!cpufreq_driver)
543 goto out;
545 if (cpufreq_driver->setpolicy) {
546 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
547 *policy = CPUFREQ_POLICY_PERFORMANCE;
548 err = 0;
549 } else if (!strncasecmp(str_governor, "powersave",
550 CPUFREQ_NAME_LEN)) {
551 *policy = CPUFREQ_POLICY_POWERSAVE;
552 err = 0;
554 } else {
555 struct cpufreq_governor *t;
557 mutex_lock(&cpufreq_governor_mutex);
559 t = find_governor(str_governor);
561 if (t == NULL) {
562 int ret;
564 mutex_unlock(&cpufreq_governor_mutex);
565 ret = request_module("cpufreq_%s", str_governor);
566 mutex_lock(&cpufreq_governor_mutex);
568 if (ret == 0)
569 t = find_governor(str_governor);
572 if (t != NULL) {
573 *governor = t;
574 err = 0;
577 mutex_unlock(&cpufreq_governor_mutex);
579 out:
580 return err;
584 * cpufreq_per_cpu_attr_read() / show_##file_name() -
585 * print out cpufreq information
587 * Write out information from cpufreq_driver->policy[cpu]; object must be
588 * "unsigned int".
591 #define show_one(file_name, object) \
592 static ssize_t show_##file_name \
593 (struct cpufreq_policy *policy, char *buf) \
595 return sprintf(buf, "%u\n", policy->object); \
598 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
599 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
600 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
601 show_one(scaling_min_freq, min);
602 show_one(scaling_max_freq, max);
604 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
606 ssize_t ret;
608 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
609 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
610 else
611 ret = sprintf(buf, "%u\n", policy->cur);
612 return ret;
615 static int cpufreq_set_policy(struct cpufreq_policy *policy,
616 struct cpufreq_policy *new_policy);
619 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
621 #define store_one(file_name, object) \
622 static ssize_t store_##file_name \
623 (struct cpufreq_policy *policy, const char *buf, size_t count) \
625 int ret, temp; \
626 struct cpufreq_policy new_policy; \
628 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
629 if (ret) \
630 return -EINVAL; \
632 ret = sscanf(buf, "%u", &new_policy.object); \
633 if (ret != 1) \
634 return -EINVAL; \
636 temp = new_policy.object; \
637 ret = cpufreq_set_policy(policy, &new_policy); \
638 if (!ret) \
639 policy->user_policy.object = temp; \
641 return ret ? ret : count; \
644 store_one(scaling_min_freq, min);
645 store_one(scaling_max_freq, max);
648 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
650 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
651 char *buf)
653 unsigned int cur_freq = __cpufreq_get(policy);
654 if (!cur_freq)
655 return sprintf(buf, "<unknown>");
656 return sprintf(buf, "%u\n", cur_freq);
660 * show_scaling_governor - show the current policy for the specified CPU
662 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
664 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
665 return sprintf(buf, "powersave\n");
666 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
667 return sprintf(buf, "performance\n");
668 else if (policy->governor)
669 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
670 policy->governor->name);
671 return -EINVAL;
675 * store_scaling_governor - store policy for the specified CPU
677 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
678 const char *buf, size_t count)
680 int ret;
681 char str_governor[16];
682 struct cpufreq_policy new_policy;
684 ret = cpufreq_get_policy(&new_policy, policy->cpu);
685 if (ret)
686 return ret;
688 ret = sscanf(buf, "%15s", str_governor);
689 if (ret != 1)
690 return -EINVAL;
692 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
693 &new_policy.governor))
694 return -EINVAL;
696 ret = cpufreq_set_policy(policy, &new_policy);
698 policy->user_policy.policy = policy->policy;
699 policy->user_policy.governor = policy->governor;
701 if (ret)
702 return ret;
703 else
704 return count;
708 * show_scaling_driver - show the cpufreq driver currently loaded
710 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
712 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
716 * show_scaling_available_governors - show the available CPUfreq governors
718 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
719 char *buf)
721 ssize_t i = 0;
722 struct cpufreq_governor *t;
724 if (!has_target()) {
725 i += sprintf(buf, "performance powersave");
726 goto out;
729 for_each_governor(t) {
730 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
731 - (CPUFREQ_NAME_LEN + 2)))
732 goto out;
733 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
735 out:
736 i += sprintf(&buf[i], "\n");
737 return i;
740 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
742 ssize_t i = 0;
743 unsigned int cpu;
745 for_each_cpu(cpu, mask) {
746 if (i)
747 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
748 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
749 if (i >= (PAGE_SIZE - 5))
750 break;
752 i += sprintf(&buf[i], "\n");
753 return i;
755 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
758 * show_related_cpus - show the CPUs affected by each transition even if
759 * hw coordination is in use
761 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
763 return cpufreq_show_cpus(policy->related_cpus, buf);
767 * show_affected_cpus - show the CPUs affected by each transition
769 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
771 return cpufreq_show_cpus(policy->cpus, buf);
774 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
775 const char *buf, size_t count)
777 unsigned int freq = 0;
778 unsigned int ret;
780 if (!policy->governor || !policy->governor->store_setspeed)
781 return -EINVAL;
783 ret = sscanf(buf, "%u", &freq);
784 if (ret != 1)
785 return -EINVAL;
787 policy->governor->store_setspeed(policy, freq);
789 return count;
792 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
794 if (!policy->governor || !policy->governor->show_setspeed)
795 return sprintf(buf, "<unsupported>\n");
797 return policy->governor->show_setspeed(policy, buf);
801 * show_bios_limit - show the current cpufreq HW/BIOS limitation
803 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
805 unsigned int limit;
806 int ret;
807 if (cpufreq_driver->bios_limit) {
808 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
809 if (!ret)
810 return sprintf(buf, "%u\n", limit);
812 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
815 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
816 cpufreq_freq_attr_ro(cpuinfo_min_freq);
817 cpufreq_freq_attr_ro(cpuinfo_max_freq);
818 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
819 cpufreq_freq_attr_ro(scaling_available_governors);
820 cpufreq_freq_attr_ro(scaling_driver);
821 cpufreq_freq_attr_ro(scaling_cur_freq);
822 cpufreq_freq_attr_ro(bios_limit);
823 cpufreq_freq_attr_ro(related_cpus);
824 cpufreq_freq_attr_ro(affected_cpus);
825 cpufreq_freq_attr_rw(scaling_min_freq);
826 cpufreq_freq_attr_rw(scaling_max_freq);
827 cpufreq_freq_attr_rw(scaling_governor);
828 cpufreq_freq_attr_rw(scaling_setspeed);
830 static struct attribute *default_attrs[] = {
831 &cpuinfo_min_freq.attr,
832 &cpuinfo_max_freq.attr,
833 &cpuinfo_transition_latency.attr,
834 &scaling_min_freq.attr,
835 &scaling_max_freq.attr,
836 &affected_cpus.attr,
837 &related_cpus.attr,
838 &scaling_governor.attr,
839 &scaling_driver.attr,
840 &scaling_available_governors.attr,
841 &scaling_setspeed.attr,
842 NULL
845 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
846 #define to_attr(a) container_of(a, struct freq_attr, attr)
848 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
850 struct cpufreq_policy *policy = to_policy(kobj);
851 struct freq_attr *fattr = to_attr(attr);
852 ssize_t ret;
854 if (!down_read_trylock(&cpufreq_rwsem))
855 return -EINVAL;
857 down_read(&policy->rwsem);
859 if (fattr->show)
860 ret = fattr->show(policy, buf);
861 else
862 ret = -EIO;
864 up_read(&policy->rwsem);
865 up_read(&cpufreq_rwsem);
867 return ret;
870 static ssize_t store(struct kobject *kobj, struct attribute *attr,
871 const char *buf, size_t count)
873 struct cpufreq_policy *policy = to_policy(kobj);
874 struct freq_attr *fattr = to_attr(attr);
875 ssize_t ret = -EINVAL;
877 get_online_cpus();
879 if (!cpu_online(policy->cpu))
880 goto unlock;
882 if (!down_read_trylock(&cpufreq_rwsem))
883 goto unlock;
885 down_write(&policy->rwsem);
887 /* Updating inactive policies is invalid, so avoid doing that. */
888 if (unlikely(policy_is_inactive(policy))) {
889 ret = -EBUSY;
890 goto unlock_policy_rwsem;
893 if (fattr->store)
894 ret = fattr->store(policy, buf, count);
895 else
896 ret = -EIO;
898 unlock_policy_rwsem:
899 up_write(&policy->rwsem);
901 up_read(&cpufreq_rwsem);
902 unlock:
903 put_online_cpus();
905 return ret;
908 static void cpufreq_sysfs_release(struct kobject *kobj)
910 struct cpufreq_policy *policy = to_policy(kobj);
911 pr_debug("last reference is dropped\n");
912 complete(&policy->kobj_unregister);
915 static const struct sysfs_ops sysfs_ops = {
916 .show = show,
917 .store = store,
920 static struct kobj_type ktype_cpufreq = {
921 .sysfs_ops = &sysfs_ops,
922 .default_attrs = default_attrs,
923 .release = cpufreq_sysfs_release,
926 struct kobject *cpufreq_global_kobject;
927 EXPORT_SYMBOL(cpufreq_global_kobject);
929 static int cpufreq_global_kobject_usage;
931 int cpufreq_get_global_kobject(void)
933 if (!cpufreq_global_kobject_usage++)
934 return kobject_add(cpufreq_global_kobject,
935 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
937 return 0;
939 EXPORT_SYMBOL(cpufreq_get_global_kobject);
941 void cpufreq_put_global_kobject(void)
943 if (!--cpufreq_global_kobject_usage)
944 kobject_del(cpufreq_global_kobject);
946 EXPORT_SYMBOL(cpufreq_put_global_kobject);
948 int cpufreq_sysfs_create_file(const struct attribute *attr)
950 int ret = cpufreq_get_global_kobject();
952 if (!ret) {
953 ret = sysfs_create_file(cpufreq_global_kobject, attr);
954 if (ret)
955 cpufreq_put_global_kobject();
958 return ret;
960 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
962 void cpufreq_sysfs_remove_file(const struct attribute *attr)
964 sysfs_remove_file(cpufreq_global_kobject, attr);
965 cpufreq_put_global_kobject();
967 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
969 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
971 struct device *cpu_dev;
973 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
975 if (!policy)
976 return 0;
978 cpu_dev = get_cpu_device(cpu);
979 if (WARN_ON(!cpu_dev))
980 return 0;
982 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
985 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
987 struct device *cpu_dev;
989 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
991 cpu_dev = get_cpu_device(cpu);
992 if (WARN_ON(!cpu_dev))
993 return;
995 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
998 /* Add/remove symlinks for all related CPUs */
999 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1001 unsigned int j;
1002 int ret = 0;
1004 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu(j, policy->real_cpus) {
1006 if (j == policy->kobj_cpu)
1007 continue;
1009 ret = add_cpu_dev_symlink(policy, j);
1010 if (ret)
1011 break;
1014 return ret;
1017 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1019 unsigned int j;
1021 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu(j, policy->real_cpus) {
1023 if (j == policy->kobj_cpu)
1024 continue;
1026 remove_cpu_dev_symlink(policy, j);
1030 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
1031 struct device *dev)
1033 struct freq_attr **drv_attr;
1034 int ret = 0;
1036 /* set up files for this cpu device */
1037 drv_attr = cpufreq_driver->attr;
1038 while (drv_attr && *drv_attr) {
1039 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1040 if (ret)
1041 return ret;
1042 drv_attr++;
1044 if (cpufreq_driver->get) {
1045 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1046 if (ret)
1047 return ret;
1050 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1051 if (ret)
1052 return ret;
1054 if (cpufreq_driver->bios_limit) {
1055 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1056 if (ret)
1057 return ret;
1060 return cpufreq_add_dev_symlink(policy);
1063 static void cpufreq_init_policy(struct cpufreq_policy *policy)
1065 struct cpufreq_governor *gov = NULL;
1066 struct cpufreq_policy new_policy;
1067 int ret = 0;
1069 memcpy(&new_policy, policy, sizeof(*policy));
1071 /* Update governor of new_policy to the governor used before hotplug */
1072 gov = find_governor(policy->last_governor);
1073 if (gov)
1074 pr_debug("Restoring governor %s for cpu %d\n",
1075 policy->governor->name, policy->cpu);
1076 else
1077 gov = CPUFREQ_DEFAULT_GOVERNOR;
1079 new_policy.governor = gov;
1081 /* Use the default policy if its valid. */
1082 if (cpufreq_driver->setpolicy)
1083 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1085 /* set default policy */
1086 ret = cpufreq_set_policy(policy, &new_policy);
1087 if (ret) {
1088 pr_debug("setting policy failed\n");
1089 if (cpufreq_driver->exit)
1090 cpufreq_driver->exit(policy);
1094 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
1095 unsigned int cpu, struct device *dev)
1097 int ret = 0;
1099 /* Has this CPU been taken care of already? */
1100 if (cpumask_test_cpu(cpu, policy->cpus))
1101 return 0;
1103 if (has_target()) {
1104 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1105 if (ret) {
1106 pr_err("%s: Failed to stop governor\n", __func__);
1107 return ret;
1111 down_write(&policy->rwsem);
1112 cpumask_set_cpu(cpu, policy->cpus);
1113 up_write(&policy->rwsem);
1115 if (has_target()) {
1116 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1117 if (!ret)
1118 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1120 if (ret) {
1121 pr_err("%s: Failed to start governor\n", __func__);
1122 return ret;
1126 return 0;
1129 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1131 struct cpufreq_policy *policy;
1132 unsigned long flags;
1134 read_lock_irqsave(&cpufreq_driver_lock, flags);
1135 policy = per_cpu(cpufreq_cpu_data, cpu);
1136 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1138 if (likely(policy)) {
1139 /* Policy should be inactive here */
1140 WARN_ON(!policy_is_inactive(policy));
1142 down_write(&policy->rwsem);
1143 policy->cpu = cpu;
1144 policy->governor = NULL;
1145 up_write(&policy->rwsem);
1148 return policy;
1151 static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1153 struct cpufreq_policy *policy;
1154 int ret;
1156 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1157 if (!policy)
1158 return NULL;
1160 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1161 goto err_free_policy;
1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164 goto err_free_cpumask;
1166 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1167 goto err_free_rcpumask;
1169 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1170 "cpufreq");
1171 if (ret) {
1172 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1173 goto err_free_real_cpus;
1176 INIT_LIST_HEAD(&policy->policy_list);
1177 init_rwsem(&policy->rwsem);
1178 spin_lock_init(&policy->transition_lock);
1179 init_waitqueue_head(&policy->transition_wait);
1180 init_completion(&policy->kobj_unregister);
1181 INIT_WORK(&policy->update, handle_update);
1183 policy->cpu = dev->id;
1185 /* Set this once on allocation */
1186 policy->kobj_cpu = dev->id;
1188 return policy;
1190 err_free_real_cpus:
1191 free_cpumask_var(policy->real_cpus);
1192 err_free_rcpumask:
1193 free_cpumask_var(policy->related_cpus);
1194 err_free_cpumask:
1195 free_cpumask_var(policy->cpus);
1196 err_free_policy:
1197 kfree(policy);
1199 return NULL;
1202 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1204 struct kobject *kobj;
1205 struct completion *cmp;
1207 if (notify)
1208 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1209 CPUFREQ_REMOVE_POLICY, policy);
1211 down_write(&policy->rwsem);
1212 cpufreq_remove_dev_symlink(policy);
1213 kobj = &policy->kobj;
1214 cmp = &policy->kobj_unregister;
1215 up_write(&policy->rwsem);
1216 kobject_put(kobj);
1219 * We need to make sure that the underlying kobj is
1220 * actually not referenced anymore by anybody before we
1221 * proceed with unloading.
1223 pr_debug("waiting for dropping of refcount\n");
1224 wait_for_completion(cmp);
1225 pr_debug("wait complete\n");
1228 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1230 unsigned long flags;
1231 int cpu;
1233 /* Remove policy from list */
1234 write_lock_irqsave(&cpufreq_driver_lock, flags);
1235 list_del(&policy->policy_list);
1237 for_each_cpu(cpu, policy->related_cpus)
1238 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1241 cpufreq_policy_put_kobj(policy, notify);
1242 free_cpumask_var(policy->real_cpus);
1243 free_cpumask_var(policy->related_cpus);
1244 free_cpumask_var(policy->cpus);
1245 kfree(policy);
1249 * cpufreq_add_dev - add a CPU device
1251 * Adds the cpufreq interface for a CPU device.
1253 * The Oracle says: try running cpufreq registration/unregistration concurrently
1254 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1255 * mess up, but more thorough testing is needed. - Mathieu
1257 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1259 unsigned int j, cpu = dev->id;
1260 int ret = -ENOMEM;
1261 struct cpufreq_policy *policy;
1262 unsigned long flags;
1263 bool recover_policy = !sif;
1265 pr_debug("adding CPU %u\n", cpu);
1267 if (cpu_is_offline(cpu)) {
1269 * Only possible if we are here from the subsys_interface add
1270 * callback. A hotplug notifier will follow and we will handle
1271 * it as CPU online then. For now, just create the sysfs link,
1272 * unless there is no policy or the link is already present.
1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1279 if (!down_read_trylock(&cpufreq_rwsem))
1280 return 0;
1282 /* Check if this CPU already has a policy to manage it */
1283 policy = per_cpu(cpufreq_cpu_data, cpu);
1284 if (policy && !policy_is_inactive(policy)) {
1285 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1286 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1287 up_read(&cpufreq_rwsem);
1288 return ret;
1292 * Restore the saved policy when doing light-weight init and fall back
1293 * to the full init if that fails.
1295 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1296 if (!policy) {
1297 recover_policy = false;
1298 policy = cpufreq_policy_alloc(dev);
1299 if (!policy)
1300 goto nomem_out;
1303 cpumask_copy(policy->cpus, cpumask_of(cpu));
1305 /* call driver. From then on the cpufreq must be able
1306 * to accept all calls to ->verify and ->setpolicy for this CPU
1308 ret = cpufreq_driver->init(policy);
1309 if (ret) {
1310 pr_debug("initialization failed\n");
1311 goto err_set_policy_cpu;
1314 down_write(&policy->rwsem);
1316 /* related cpus should atleast have policy->cpus */
1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1319 /* Remember which CPUs have been present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1324 * affected cpus must always be the one, which are online. We aren't
1325 * managing offline cpus here.
1327 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1329 if (!recover_policy) {
1330 policy->user_policy.min = policy->min;
1331 policy->user_policy.max = policy->max;
1333 write_lock_irqsave(&cpufreq_driver_lock, flags);
1334 for_each_cpu(j, policy->related_cpus)
1335 per_cpu(cpufreq_cpu_data, j) = policy;
1336 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1339 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1340 policy->cur = cpufreq_driver->get(policy->cpu);
1341 if (!policy->cur) {
1342 pr_err("%s: ->get() failed\n", __func__);
1343 goto err_get_freq;
1348 * Sometimes boot loaders set CPU frequency to a value outside of
1349 * frequency table present with cpufreq core. In such cases CPU might be
1350 * unstable if it has to run on that frequency for long duration of time
1351 * and so its better to set it to a frequency which is specified in
1352 * freq-table. This also makes cpufreq stats inconsistent as
1353 * cpufreq-stats would fail to register because current frequency of CPU
1354 * isn't found in freq-table.
1356 * Because we don't want this change to effect boot process badly, we go
1357 * for the next freq which is >= policy->cur ('cur' must be set by now,
1358 * otherwise we will end up setting freq to lowest of the table as 'cur'
1359 * is initialized to zero).
1361 * We are passing target-freq as "policy->cur - 1" otherwise
1362 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1363 * equal to target-freq.
1365 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1366 && has_target()) {
1367 /* Are we running at unknown frequency ? */
1368 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1369 if (ret == -EINVAL) {
1370 /* Warn user and fix it */
1371 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1372 __func__, policy->cpu, policy->cur);
1373 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1374 CPUFREQ_RELATION_L);
1377 * Reaching here after boot in a few seconds may not
1378 * mean that system will remain stable at "unknown"
1379 * frequency for longer duration. Hence, a BUG_ON().
1381 BUG_ON(ret);
1382 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1383 __func__, policy->cpu, policy->cur);
1387 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1388 CPUFREQ_START, policy);
1390 if (!recover_policy) {
1391 ret = cpufreq_add_dev_interface(policy, dev);
1392 if (ret)
1393 goto err_out_unregister;
1394 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1395 CPUFREQ_CREATE_POLICY, policy);
1397 write_lock_irqsave(&cpufreq_driver_lock, flags);
1398 list_add(&policy->policy_list, &cpufreq_policy_list);
1399 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1402 cpufreq_init_policy(policy);
1404 if (!recover_policy) {
1405 policy->user_policy.policy = policy->policy;
1406 policy->user_policy.governor = policy->governor;
1408 up_write(&policy->rwsem);
1410 kobject_uevent(&policy->kobj, KOBJ_ADD);
1412 up_read(&cpufreq_rwsem);
1414 /* Callback for handling stuff after policy is ready */
1415 if (cpufreq_driver->ready)
1416 cpufreq_driver->ready(policy);
1418 pr_debug("initialization complete\n");
1420 return 0;
1422 err_out_unregister:
1423 err_get_freq:
1424 up_write(&policy->rwsem);
1426 if (cpufreq_driver->exit)
1427 cpufreq_driver->exit(policy);
1428 err_set_policy_cpu:
1429 cpufreq_policy_free(policy, recover_policy);
1430 nomem_out:
1431 up_read(&cpufreq_rwsem);
1433 return ret;
1436 static int __cpufreq_remove_dev_prepare(struct device *dev)
1438 unsigned int cpu = dev->id;
1439 int ret = 0;
1440 struct cpufreq_policy *policy;
1442 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1444 policy = cpufreq_cpu_get_raw(cpu);
1445 if (!policy) {
1446 pr_debug("%s: No cpu_data found\n", __func__);
1447 return -EINVAL;
1450 if (has_target()) {
1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1452 if (ret)
1453 pr_err("%s: Failed to stop governor\n", __func__);
1456 down_write(&policy->rwsem);
1457 cpumask_clear_cpu(cpu, policy->cpus);
1459 if (policy_is_inactive(policy)) {
1460 if (has_target())
1461 strncpy(policy->last_governor, policy->governor->name,
1462 CPUFREQ_NAME_LEN);
1463 } else if (cpu == policy->cpu) {
1464 /* Nominate new CPU */
1465 policy->cpu = cpumask_any(policy->cpus);
1467 up_write(&policy->rwsem);
1469 /* Start governor again for active policy */
1470 if (!policy_is_inactive(policy)) {
1471 if (has_target()) {
1472 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1473 if (!ret)
1474 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1476 if (ret)
1477 pr_err("%s: Failed to start governor\n", __func__);
1479 } else if (cpufreq_driver->stop_cpu) {
1480 cpufreq_driver->stop_cpu(policy);
1483 return ret;
1486 static int __cpufreq_remove_dev_finish(struct device *dev)
1488 unsigned int cpu = dev->id;
1489 int ret;
1490 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1492 if (!policy) {
1493 pr_debug("%s: No cpu_data found\n", __func__);
1494 return -EINVAL;
1497 /* Only proceed for inactive policies */
1498 if (!policy_is_inactive(policy))
1499 return 0;
1501 /* If cpu is last user of policy, free policy */
1502 if (has_target()) {
1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1504 if (ret)
1505 pr_err("%s: Failed to exit governor\n", __func__);
1509 * Perform the ->exit() even during light-weight tear-down,
1510 * since this is a core component, and is essential for the
1511 * subsequent light-weight ->init() to succeed.
1513 if (cpufreq_driver->exit)
1514 cpufreq_driver->exit(policy);
1516 return 0;
1520 * cpufreq_remove_dev - remove a CPU device
1522 * Removes the cpufreq interface for a CPU device.
1524 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1526 unsigned int cpu = dev->id;
1527 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1529 if (!policy)
1530 return 0;
1532 if (cpu_online(cpu)) {
1533 __cpufreq_remove_dev_prepare(dev);
1534 __cpufreq_remove_dev_finish(dev);
1537 cpumask_clear_cpu(cpu, policy->real_cpus);
1539 if (cpumask_empty(policy->real_cpus)) {
1540 cpufreq_policy_free(policy, true);
1541 return 0;
1544 if (cpu != policy->kobj_cpu) {
1545 remove_cpu_dev_symlink(policy, cpu);
1546 } else {
1548 * The CPU owning the policy object is going away. Move it to
1549 * another suitable CPU.
1551 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1552 struct device *new_dev = get_cpu_device(new_cpu);
1554 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1556 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1557 policy->kobj_cpu = new_cpu;
1558 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1561 return 0;
1564 static void handle_update(struct work_struct *work)
1566 struct cpufreq_policy *policy =
1567 container_of(work, struct cpufreq_policy, update);
1568 unsigned int cpu = policy->cpu;
1569 pr_debug("handle_update for cpu %u called\n", cpu);
1570 cpufreq_update_policy(cpu);
1574 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1575 * in deep trouble.
1576 * @policy: policy managing CPUs
1577 * @new_freq: CPU frequency the CPU actually runs at
1579 * We adjust to current frequency first, and need to clean up later.
1580 * So either call to cpufreq_update_policy() or schedule handle_update()).
1582 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1583 unsigned int new_freq)
1585 struct cpufreq_freqs freqs;
1587 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1588 policy->cur, new_freq);
1590 freqs.old = policy->cur;
1591 freqs.new = new_freq;
1593 cpufreq_freq_transition_begin(policy, &freqs);
1594 cpufreq_freq_transition_end(policy, &freqs, 0);
1598 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1599 * @cpu: CPU number
1601 * This is the last known freq, without actually getting it from the driver.
1602 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1604 unsigned int cpufreq_quick_get(unsigned int cpu)
1606 struct cpufreq_policy *policy;
1607 unsigned int ret_freq = 0;
1609 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1610 return cpufreq_driver->get(cpu);
1612 policy = cpufreq_cpu_get(cpu);
1613 if (policy) {
1614 ret_freq = policy->cur;
1615 cpufreq_cpu_put(policy);
1618 return ret_freq;
1620 EXPORT_SYMBOL(cpufreq_quick_get);
1623 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1624 * @cpu: CPU number
1626 * Just return the max possible frequency for a given CPU.
1628 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1630 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1631 unsigned int ret_freq = 0;
1633 if (policy) {
1634 ret_freq = policy->max;
1635 cpufreq_cpu_put(policy);
1638 return ret_freq;
1640 EXPORT_SYMBOL(cpufreq_quick_get_max);
1642 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1644 unsigned int ret_freq = 0;
1646 if (!cpufreq_driver->get)
1647 return ret_freq;
1649 ret_freq = cpufreq_driver->get(policy->cpu);
1651 /* Updating inactive policies is invalid, so avoid doing that. */
1652 if (unlikely(policy_is_inactive(policy)))
1653 return ret_freq;
1655 if (ret_freq && policy->cur &&
1656 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1657 /* verify no discrepancy between actual and
1658 saved value exists */
1659 if (unlikely(ret_freq != policy->cur)) {
1660 cpufreq_out_of_sync(policy, ret_freq);
1661 schedule_work(&policy->update);
1665 return ret_freq;
1669 * cpufreq_get - get the current CPU frequency (in kHz)
1670 * @cpu: CPU number
1672 * Get the CPU current (static) CPU frequency
1674 unsigned int cpufreq_get(unsigned int cpu)
1676 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1677 unsigned int ret_freq = 0;
1679 if (policy) {
1680 down_read(&policy->rwsem);
1681 ret_freq = __cpufreq_get(policy);
1682 up_read(&policy->rwsem);
1684 cpufreq_cpu_put(policy);
1687 return ret_freq;
1689 EXPORT_SYMBOL(cpufreq_get);
1691 static struct subsys_interface cpufreq_interface = {
1692 .name = "cpufreq",
1693 .subsys = &cpu_subsys,
1694 .add_dev = cpufreq_add_dev,
1695 .remove_dev = cpufreq_remove_dev,
1699 * In case platform wants some specific frequency to be configured
1700 * during suspend..
1702 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1704 int ret;
1706 if (!policy->suspend_freq) {
1707 pr_err("%s: suspend_freq can't be zero\n", __func__);
1708 return -EINVAL;
1711 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1712 policy->suspend_freq);
1714 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1715 CPUFREQ_RELATION_H);
1716 if (ret)
1717 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1718 __func__, policy->suspend_freq, ret);
1720 return ret;
1722 EXPORT_SYMBOL(cpufreq_generic_suspend);
1725 * cpufreq_suspend() - Suspend CPUFreq governors
1727 * Called during system wide Suspend/Hibernate cycles for suspending governors
1728 * as some platforms can't change frequency after this point in suspend cycle.
1729 * Because some of the devices (like: i2c, regulators, etc) they use for
1730 * changing frequency are suspended quickly after this point.
1732 void cpufreq_suspend(void)
1734 struct cpufreq_policy *policy;
1736 if (!cpufreq_driver)
1737 return;
1739 if (!has_target())
1740 goto suspend;
1742 pr_debug("%s: Suspending Governors\n", __func__);
1744 for_each_active_policy(policy) {
1745 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1746 pr_err("%s: Failed to stop governor for policy: %p\n",
1747 __func__, policy);
1748 else if (cpufreq_driver->suspend
1749 && cpufreq_driver->suspend(policy))
1750 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1751 policy);
1754 suspend:
1755 cpufreq_suspended = true;
1759 * cpufreq_resume() - Resume CPUFreq governors
1761 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1762 * are suspended with cpufreq_suspend().
1764 void cpufreq_resume(void)
1766 struct cpufreq_policy *policy;
1768 if (!cpufreq_driver)
1769 return;
1771 cpufreq_suspended = false;
1773 if (!has_target())
1774 return;
1776 pr_debug("%s: Resuming Governors\n", __func__);
1778 for_each_active_policy(policy) {
1779 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1780 pr_err("%s: Failed to resume driver: %p\n", __func__,
1781 policy);
1782 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1783 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1784 pr_err("%s: Failed to start governor for policy: %p\n",
1785 __func__, policy);
1789 * schedule call cpufreq_update_policy() for first-online CPU, as that
1790 * wouldn't be hotplugged-out on suspend. It will verify that the
1791 * current freq is in sync with what we believe it to be.
1793 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1794 if (WARN_ON(!policy))
1795 return;
1797 schedule_work(&policy->update);
1801 * cpufreq_get_current_driver - return current driver's name
1803 * Return the name string of the currently loaded cpufreq driver
1804 * or NULL, if none.
1806 const char *cpufreq_get_current_driver(void)
1808 if (cpufreq_driver)
1809 return cpufreq_driver->name;
1811 return NULL;
1813 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1816 * cpufreq_get_driver_data - return current driver data
1818 * Return the private data of the currently loaded cpufreq
1819 * driver, or NULL if no cpufreq driver is loaded.
1821 void *cpufreq_get_driver_data(void)
1823 if (cpufreq_driver)
1824 return cpufreq_driver->driver_data;
1826 return NULL;
1828 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1830 /*********************************************************************
1831 * NOTIFIER LISTS INTERFACE *
1832 *********************************************************************/
1835 * cpufreq_register_notifier - register a driver with cpufreq
1836 * @nb: notifier function to register
1837 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1839 * Add a driver to one of two lists: either a list of drivers that
1840 * are notified about clock rate changes (once before and once after
1841 * the transition), or a list of drivers that are notified about
1842 * changes in cpufreq policy.
1844 * This function may sleep, and has the same return conditions as
1845 * blocking_notifier_chain_register.
1847 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1849 int ret;
1851 if (cpufreq_disabled())
1852 return -EINVAL;
1854 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1856 switch (list) {
1857 case CPUFREQ_TRANSITION_NOTIFIER:
1858 ret = srcu_notifier_chain_register(
1859 &cpufreq_transition_notifier_list, nb);
1860 break;
1861 case CPUFREQ_POLICY_NOTIFIER:
1862 ret = blocking_notifier_chain_register(
1863 &cpufreq_policy_notifier_list, nb);
1864 break;
1865 default:
1866 ret = -EINVAL;
1869 return ret;
1871 EXPORT_SYMBOL(cpufreq_register_notifier);
1874 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1875 * @nb: notifier block to be unregistered
1876 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1878 * Remove a driver from the CPU frequency notifier list.
1880 * This function may sleep, and has the same return conditions as
1881 * blocking_notifier_chain_unregister.
1883 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1885 int ret;
1887 if (cpufreq_disabled())
1888 return -EINVAL;
1890 switch (list) {
1891 case CPUFREQ_TRANSITION_NOTIFIER:
1892 ret = srcu_notifier_chain_unregister(
1893 &cpufreq_transition_notifier_list, nb);
1894 break;
1895 case CPUFREQ_POLICY_NOTIFIER:
1896 ret = blocking_notifier_chain_unregister(
1897 &cpufreq_policy_notifier_list, nb);
1898 break;
1899 default:
1900 ret = -EINVAL;
1903 return ret;
1905 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1908 /*********************************************************************
1909 * GOVERNORS *
1910 *********************************************************************/
1912 /* Must set freqs->new to intermediate frequency */
1913 static int __target_intermediate(struct cpufreq_policy *policy,
1914 struct cpufreq_freqs *freqs, int index)
1916 int ret;
1918 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1920 /* We don't need to switch to intermediate freq */
1921 if (!freqs->new)
1922 return 0;
1924 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1925 __func__, policy->cpu, freqs->old, freqs->new);
1927 cpufreq_freq_transition_begin(policy, freqs);
1928 ret = cpufreq_driver->target_intermediate(policy, index);
1929 cpufreq_freq_transition_end(policy, freqs, ret);
1931 if (ret)
1932 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1933 __func__, ret);
1935 return ret;
1938 static int __target_index(struct cpufreq_policy *policy,
1939 struct cpufreq_frequency_table *freq_table, int index)
1941 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1942 unsigned int intermediate_freq = 0;
1943 int retval = -EINVAL;
1944 bool notify;
1946 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1947 if (notify) {
1948 /* Handle switching to intermediate frequency */
1949 if (cpufreq_driver->get_intermediate) {
1950 retval = __target_intermediate(policy, &freqs, index);
1951 if (retval)
1952 return retval;
1954 intermediate_freq = freqs.new;
1955 /* Set old freq to intermediate */
1956 if (intermediate_freq)
1957 freqs.old = freqs.new;
1960 freqs.new = freq_table[index].frequency;
1961 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1962 __func__, policy->cpu, freqs.old, freqs.new);
1964 cpufreq_freq_transition_begin(policy, &freqs);
1967 retval = cpufreq_driver->target_index(policy, index);
1968 if (retval)
1969 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1970 retval);
1972 if (notify) {
1973 cpufreq_freq_transition_end(policy, &freqs, retval);
1976 * Failed after setting to intermediate freq? Driver should have
1977 * reverted back to initial frequency and so should we. Check
1978 * here for intermediate_freq instead of get_intermediate, in
1979 * case we haven't switched to intermediate freq at all.
1981 if (unlikely(retval && intermediate_freq)) {
1982 freqs.old = intermediate_freq;
1983 freqs.new = policy->restore_freq;
1984 cpufreq_freq_transition_begin(policy, &freqs);
1985 cpufreq_freq_transition_end(policy, &freqs, 0);
1989 return retval;
1992 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1993 unsigned int target_freq,
1994 unsigned int relation)
1996 unsigned int old_target_freq = target_freq;
1997 int retval = -EINVAL;
1999 if (cpufreq_disabled())
2000 return -ENODEV;
2002 /* Make sure that target_freq is within supported range */
2003 if (target_freq > policy->max)
2004 target_freq = policy->max;
2005 if (target_freq < policy->min)
2006 target_freq = policy->min;
2008 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2009 policy->cpu, target_freq, relation, old_target_freq);
2012 * This might look like a redundant call as we are checking it again
2013 * after finding index. But it is left intentionally for cases where
2014 * exactly same freq is called again and so we can save on few function
2015 * calls.
2017 if (target_freq == policy->cur)
2018 return 0;
2020 /* Save last value to restore later on errors */
2021 policy->restore_freq = policy->cur;
2023 if (cpufreq_driver->target)
2024 retval = cpufreq_driver->target(policy, target_freq, relation);
2025 else if (cpufreq_driver->target_index) {
2026 struct cpufreq_frequency_table *freq_table;
2027 int index;
2029 freq_table = cpufreq_frequency_get_table(policy->cpu);
2030 if (unlikely(!freq_table)) {
2031 pr_err("%s: Unable to find freq_table\n", __func__);
2032 goto out;
2035 retval = cpufreq_frequency_table_target(policy, freq_table,
2036 target_freq, relation, &index);
2037 if (unlikely(retval)) {
2038 pr_err("%s: Unable to find matching freq\n", __func__);
2039 goto out;
2042 if (freq_table[index].frequency == policy->cur) {
2043 retval = 0;
2044 goto out;
2047 retval = __target_index(policy, freq_table, index);
2050 out:
2051 return retval;
2053 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2055 int cpufreq_driver_target(struct cpufreq_policy *policy,
2056 unsigned int target_freq,
2057 unsigned int relation)
2059 int ret = -EINVAL;
2061 down_write(&policy->rwsem);
2063 ret = __cpufreq_driver_target(policy, target_freq, relation);
2065 up_write(&policy->rwsem);
2067 return ret;
2069 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2071 static int __cpufreq_governor(struct cpufreq_policy *policy,
2072 unsigned int event)
2074 int ret;
2076 /* Only must be defined when default governor is known to have latency
2077 restrictions, like e.g. conservative or ondemand.
2078 That this is the case is already ensured in Kconfig
2080 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2081 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2082 #else
2083 struct cpufreq_governor *gov = NULL;
2084 #endif
2086 /* Don't start any governor operations if we are entering suspend */
2087 if (cpufreq_suspended)
2088 return 0;
2090 * Governor might not be initiated here if ACPI _PPC changed
2091 * notification happened, so check it.
2093 if (!policy->governor)
2094 return -EINVAL;
2096 if (policy->governor->max_transition_latency &&
2097 policy->cpuinfo.transition_latency >
2098 policy->governor->max_transition_latency) {
2099 if (!gov)
2100 return -EINVAL;
2101 else {
2102 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2103 policy->governor->name, gov->name);
2104 policy->governor = gov;
2108 if (event == CPUFREQ_GOV_POLICY_INIT)
2109 if (!try_module_get(policy->governor->owner))
2110 return -EINVAL;
2112 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2113 policy->cpu, event);
2115 mutex_lock(&cpufreq_governor_lock);
2116 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2117 || (!policy->governor_enabled
2118 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2119 mutex_unlock(&cpufreq_governor_lock);
2120 return -EBUSY;
2123 if (event == CPUFREQ_GOV_STOP)
2124 policy->governor_enabled = false;
2125 else if (event == CPUFREQ_GOV_START)
2126 policy->governor_enabled = true;
2128 mutex_unlock(&cpufreq_governor_lock);
2130 ret = policy->governor->governor(policy, event);
2132 if (!ret) {
2133 if (event == CPUFREQ_GOV_POLICY_INIT)
2134 policy->governor->initialized++;
2135 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2136 policy->governor->initialized--;
2137 } else {
2138 /* Restore original values */
2139 mutex_lock(&cpufreq_governor_lock);
2140 if (event == CPUFREQ_GOV_STOP)
2141 policy->governor_enabled = true;
2142 else if (event == CPUFREQ_GOV_START)
2143 policy->governor_enabled = false;
2144 mutex_unlock(&cpufreq_governor_lock);
2147 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2148 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2149 module_put(policy->governor->owner);
2151 return ret;
2154 int cpufreq_register_governor(struct cpufreq_governor *governor)
2156 int err;
2158 if (!governor)
2159 return -EINVAL;
2161 if (cpufreq_disabled())
2162 return -ENODEV;
2164 mutex_lock(&cpufreq_governor_mutex);
2166 governor->initialized = 0;
2167 err = -EBUSY;
2168 if (!find_governor(governor->name)) {
2169 err = 0;
2170 list_add(&governor->governor_list, &cpufreq_governor_list);
2173 mutex_unlock(&cpufreq_governor_mutex);
2174 return err;
2176 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2178 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2180 struct cpufreq_policy *policy;
2181 unsigned long flags;
2183 if (!governor)
2184 return;
2186 if (cpufreq_disabled())
2187 return;
2189 /* clear last_governor for all inactive policies */
2190 read_lock_irqsave(&cpufreq_driver_lock, flags);
2191 for_each_inactive_policy(policy) {
2192 if (!strcmp(policy->last_governor, governor->name)) {
2193 policy->governor = NULL;
2194 strcpy(policy->last_governor, "\0");
2197 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2199 mutex_lock(&cpufreq_governor_mutex);
2200 list_del(&governor->governor_list);
2201 mutex_unlock(&cpufreq_governor_mutex);
2202 return;
2204 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2207 /*********************************************************************
2208 * POLICY INTERFACE *
2209 *********************************************************************/
2212 * cpufreq_get_policy - get the current cpufreq_policy
2213 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2214 * is written
2216 * Reads the current cpufreq policy.
2218 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2220 struct cpufreq_policy *cpu_policy;
2221 if (!policy)
2222 return -EINVAL;
2224 cpu_policy = cpufreq_cpu_get(cpu);
2225 if (!cpu_policy)
2226 return -EINVAL;
2228 memcpy(policy, cpu_policy, sizeof(*policy));
2230 cpufreq_cpu_put(cpu_policy);
2231 return 0;
2233 EXPORT_SYMBOL(cpufreq_get_policy);
2236 * policy : current policy.
2237 * new_policy: policy to be set.
2239 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2240 struct cpufreq_policy *new_policy)
2242 struct cpufreq_governor *old_gov;
2243 int ret;
2245 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2246 new_policy->cpu, new_policy->min, new_policy->max);
2248 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2250 if (new_policy->min > policy->max || new_policy->max < policy->min)
2251 return -EINVAL;
2253 /* verify the cpu speed can be set within this limit */
2254 ret = cpufreq_driver->verify(new_policy);
2255 if (ret)
2256 return ret;
2258 /* adjust if necessary - all reasons */
2259 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2260 CPUFREQ_ADJUST, new_policy);
2262 /* adjust if necessary - hardware incompatibility*/
2263 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2264 CPUFREQ_INCOMPATIBLE, new_policy);
2267 * verify the cpu speed can be set within this limit, which might be
2268 * different to the first one
2270 ret = cpufreq_driver->verify(new_policy);
2271 if (ret)
2272 return ret;
2274 /* notification of the new policy */
2275 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2276 CPUFREQ_NOTIFY, new_policy);
2278 policy->min = new_policy->min;
2279 policy->max = new_policy->max;
2281 pr_debug("new min and max freqs are %u - %u kHz\n",
2282 policy->min, policy->max);
2284 if (cpufreq_driver->setpolicy) {
2285 policy->policy = new_policy->policy;
2286 pr_debug("setting range\n");
2287 return cpufreq_driver->setpolicy(new_policy);
2290 if (new_policy->governor == policy->governor)
2291 goto out;
2293 pr_debug("governor switch\n");
2295 /* save old, working values */
2296 old_gov = policy->governor;
2297 /* end old governor */
2298 if (old_gov) {
2299 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2300 up_write(&policy->rwsem);
2301 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2302 down_write(&policy->rwsem);
2305 /* start new governor */
2306 policy->governor = new_policy->governor;
2307 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2308 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2309 goto out;
2311 up_write(&policy->rwsem);
2312 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2313 down_write(&policy->rwsem);
2316 /* new governor failed, so re-start old one */
2317 pr_debug("starting governor %s failed\n", policy->governor->name);
2318 if (old_gov) {
2319 policy->governor = old_gov;
2320 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2321 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2324 return -EINVAL;
2326 out:
2327 pr_debug("governor: change or update limits\n");
2328 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2332 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2333 * @cpu: CPU which shall be re-evaluated
2335 * Useful for policy notifiers which have different necessities
2336 * at different times.
2338 int cpufreq_update_policy(unsigned int cpu)
2340 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2341 struct cpufreq_policy new_policy;
2342 int ret;
2344 if (!policy)
2345 return -ENODEV;
2347 down_write(&policy->rwsem);
2349 pr_debug("updating policy for CPU %u\n", cpu);
2350 memcpy(&new_policy, policy, sizeof(*policy));
2351 new_policy.min = policy->user_policy.min;
2352 new_policy.max = policy->user_policy.max;
2353 new_policy.policy = policy->user_policy.policy;
2354 new_policy.governor = policy->user_policy.governor;
2357 * BIOS might change freq behind our back
2358 * -> ask driver for current freq and notify governors about a change
2360 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2361 new_policy.cur = cpufreq_driver->get(cpu);
2362 if (WARN_ON(!new_policy.cur)) {
2363 ret = -EIO;
2364 goto unlock;
2367 if (!policy->cur) {
2368 pr_debug("Driver did not initialize current freq\n");
2369 policy->cur = new_policy.cur;
2370 } else {
2371 if (policy->cur != new_policy.cur && has_target())
2372 cpufreq_out_of_sync(policy, new_policy.cur);
2376 ret = cpufreq_set_policy(policy, &new_policy);
2378 unlock:
2379 up_write(&policy->rwsem);
2381 cpufreq_cpu_put(policy);
2382 return ret;
2384 EXPORT_SYMBOL(cpufreq_update_policy);
2386 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2387 unsigned long action, void *hcpu)
2389 unsigned int cpu = (unsigned long)hcpu;
2390 struct device *dev;
2392 dev = get_cpu_device(cpu);
2393 if (dev) {
2394 switch (action & ~CPU_TASKS_FROZEN) {
2395 case CPU_ONLINE:
2396 cpufreq_add_dev(dev, NULL);
2397 break;
2399 case CPU_DOWN_PREPARE:
2400 __cpufreq_remove_dev_prepare(dev);
2401 break;
2403 case CPU_POST_DEAD:
2404 __cpufreq_remove_dev_finish(dev);
2405 break;
2407 case CPU_DOWN_FAILED:
2408 cpufreq_add_dev(dev, NULL);
2409 break;
2412 return NOTIFY_OK;
2415 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2416 .notifier_call = cpufreq_cpu_callback,
2419 /*********************************************************************
2420 * BOOST *
2421 *********************************************************************/
2422 static int cpufreq_boost_set_sw(int state)
2424 struct cpufreq_frequency_table *freq_table;
2425 struct cpufreq_policy *policy;
2426 int ret = -EINVAL;
2428 for_each_active_policy(policy) {
2429 freq_table = cpufreq_frequency_get_table(policy->cpu);
2430 if (freq_table) {
2431 ret = cpufreq_frequency_table_cpuinfo(policy,
2432 freq_table);
2433 if (ret) {
2434 pr_err("%s: Policy frequency update failed\n",
2435 __func__);
2436 break;
2438 policy->user_policy.max = policy->max;
2439 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2443 return ret;
2446 int cpufreq_boost_trigger_state(int state)
2448 unsigned long flags;
2449 int ret = 0;
2451 if (cpufreq_driver->boost_enabled == state)
2452 return 0;
2454 write_lock_irqsave(&cpufreq_driver_lock, flags);
2455 cpufreq_driver->boost_enabled = state;
2456 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2458 ret = cpufreq_driver->set_boost(state);
2459 if (ret) {
2460 write_lock_irqsave(&cpufreq_driver_lock, flags);
2461 cpufreq_driver->boost_enabled = !state;
2462 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2464 pr_err("%s: Cannot %s BOOST\n",
2465 __func__, state ? "enable" : "disable");
2468 return ret;
2471 int cpufreq_boost_supported(void)
2473 if (likely(cpufreq_driver))
2474 return cpufreq_driver->boost_supported;
2476 return 0;
2478 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2480 int cpufreq_boost_enabled(void)
2482 return cpufreq_driver->boost_enabled;
2484 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2486 /*********************************************************************
2487 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2488 *********************************************************************/
2491 * cpufreq_register_driver - register a CPU Frequency driver
2492 * @driver_data: A struct cpufreq_driver containing the values#
2493 * submitted by the CPU Frequency driver.
2495 * Registers a CPU Frequency driver to this core code. This code
2496 * returns zero on success, -EBUSY when another driver got here first
2497 * (and isn't unregistered in the meantime).
2500 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2502 unsigned long flags;
2503 int ret;
2505 if (cpufreq_disabled())
2506 return -ENODEV;
2508 if (!driver_data || !driver_data->verify || !driver_data->init ||
2509 !(driver_data->setpolicy || driver_data->target_index ||
2510 driver_data->target) ||
2511 (driver_data->setpolicy && (driver_data->target_index ||
2512 driver_data->target)) ||
2513 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2514 return -EINVAL;
2516 pr_debug("trying to register driver %s\n", driver_data->name);
2518 write_lock_irqsave(&cpufreq_driver_lock, flags);
2519 if (cpufreq_driver) {
2520 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2521 return -EEXIST;
2523 cpufreq_driver = driver_data;
2524 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2526 if (driver_data->setpolicy)
2527 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2529 if (cpufreq_boost_supported()) {
2531 * Check if driver provides function to enable boost -
2532 * if not, use cpufreq_boost_set_sw as default
2534 if (!cpufreq_driver->set_boost)
2535 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2537 ret = cpufreq_sysfs_create_file(&boost.attr);
2538 if (ret) {
2539 pr_err("%s: cannot register global BOOST sysfs file\n",
2540 __func__);
2541 goto err_null_driver;
2545 ret = subsys_interface_register(&cpufreq_interface);
2546 if (ret)
2547 goto err_boost_unreg;
2549 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2550 list_empty(&cpufreq_policy_list)) {
2551 /* if all ->init() calls failed, unregister */
2552 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2553 driver_data->name);
2554 goto err_if_unreg;
2557 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2558 pr_debug("driver %s up and running\n", driver_data->name);
2560 return 0;
2561 err_if_unreg:
2562 subsys_interface_unregister(&cpufreq_interface);
2563 err_boost_unreg:
2564 if (cpufreq_boost_supported())
2565 cpufreq_sysfs_remove_file(&boost.attr);
2566 err_null_driver:
2567 write_lock_irqsave(&cpufreq_driver_lock, flags);
2568 cpufreq_driver = NULL;
2569 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2570 return ret;
2572 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2575 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2577 * Unregister the current CPUFreq driver. Only call this if you have
2578 * the right to do so, i.e. if you have succeeded in initialising before!
2579 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2580 * currently not initialised.
2582 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2584 unsigned long flags;
2586 if (!cpufreq_driver || (driver != cpufreq_driver))
2587 return -EINVAL;
2589 pr_debug("unregistering driver %s\n", driver->name);
2591 subsys_interface_unregister(&cpufreq_interface);
2592 if (cpufreq_boost_supported())
2593 cpufreq_sysfs_remove_file(&boost.attr);
2595 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2597 down_write(&cpufreq_rwsem);
2598 write_lock_irqsave(&cpufreq_driver_lock, flags);
2600 cpufreq_driver = NULL;
2602 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2603 up_write(&cpufreq_rwsem);
2605 return 0;
2607 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2610 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2611 * or mutexes when secondary CPUs are halted.
2613 static struct syscore_ops cpufreq_syscore_ops = {
2614 .shutdown = cpufreq_suspend,
2617 static int __init cpufreq_core_init(void)
2619 if (cpufreq_disabled())
2620 return -ENODEV;
2622 cpufreq_global_kobject = kobject_create();
2623 BUG_ON(!cpufreq_global_kobject);
2625 register_syscore_ops(&cpufreq_syscore_ops);
2627 return 0;
2629 core_initcall(cpufreq_core_init);