treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / devfreq / devfreq.c
blobcceee8bc3c2f745a02ab7b648d29458cbeaf2283
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
4 * for Non-CPU Devices.
6 * Copyright (C) 2011 Samsung Electronics
7 * MyungJoo Ham <myungjoo.ham@samsung.com>
8 */
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
27 #include <linux/of.h>
28 #include <linux/pm_qos.h>
29 #include "governor.h"
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
34 #define HZ_PER_KHZ 1000
36 static struct class *devfreq_class;
37 static struct dentry *devfreq_debugfs;
40 * devfreq core provides delayed work based load monitoring helper
41 * functions. Governors can use these or can implement their own
42 * monitoring mechanism.
44 static struct workqueue_struct *devfreq_wq;
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list);
50 static DEFINE_MUTEX(devfreq_list_lock);
52 /**
53 * find_device_devfreq() - find devfreq struct using device pointer
54 * @dev: device pointer used to lookup device devfreq.
56 * Search the list of device devfreqs and return the matched device's
57 * devfreq info. devfreq_list_lock should be held by the caller.
59 static struct devfreq *find_device_devfreq(struct device *dev)
61 struct devfreq *tmp_devfreq;
63 if (IS_ERR_OR_NULL(dev)) {
64 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
65 return ERR_PTR(-EINVAL);
67 WARN(!mutex_is_locked(&devfreq_list_lock),
68 "devfreq_list_lock must be locked.");
70 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
71 if (tmp_devfreq->dev.parent == dev)
72 return tmp_devfreq;
75 return ERR_PTR(-ENODEV);
78 static unsigned long find_available_min_freq(struct devfreq *devfreq)
80 struct dev_pm_opp *opp;
81 unsigned long min_freq = 0;
83 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
84 if (IS_ERR(opp))
85 min_freq = 0;
86 else
87 dev_pm_opp_put(opp);
89 return min_freq;
92 static unsigned long find_available_max_freq(struct devfreq *devfreq)
94 struct dev_pm_opp *opp;
95 unsigned long max_freq = ULONG_MAX;
97 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
98 if (IS_ERR(opp))
99 max_freq = 0;
100 else
101 dev_pm_opp_put(opp);
103 return max_freq;
107 * get_freq_range() - Get the current freq range
108 * @devfreq: the devfreq instance
109 * @min_freq: the min frequency
110 * @max_freq: the max frequency
112 * This takes into consideration all constraints.
114 static void get_freq_range(struct devfreq *devfreq,
115 unsigned long *min_freq,
116 unsigned long *max_freq)
118 unsigned long *freq_table = devfreq->profile->freq_table;
119 s32 qos_min_freq, qos_max_freq;
121 lockdep_assert_held(&devfreq->lock);
124 * Initialize minimum/maximum frequency from freq table.
125 * The devfreq drivers can initialize this in either ascending or
126 * descending order and devfreq core supports both.
128 if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
129 *min_freq = freq_table[0];
130 *max_freq = freq_table[devfreq->profile->max_state - 1];
131 } else {
132 *min_freq = freq_table[devfreq->profile->max_state - 1];
133 *max_freq = freq_table[0];
136 /* Apply constraints from PM QoS */
137 qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
138 DEV_PM_QOS_MIN_FREQUENCY);
139 qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
140 DEV_PM_QOS_MAX_FREQUENCY);
141 *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
142 if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
143 *max_freq = min(*max_freq,
144 (unsigned long)HZ_PER_KHZ * qos_max_freq);
146 /* Apply constraints from OPP interface */
147 *min_freq = max(*min_freq, devfreq->scaling_min_freq);
148 *max_freq = min(*max_freq, devfreq->scaling_max_freq);
150 if (*min_freq > *max_freq)
151 *min_freq = *max_freq;
155 * devfreq_get_freq_level() - Lookup freq_table for the frequency
156 * @devfreq: the devfreq instance
157 * @freq: the target frequency
159 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
161 int lev;
163 for (lev = 0; lev < devfreq->profile->max_state; lev++)
164 if (freq == devfreq->profile->freq_table[lev])
165 return lev;
167 return -EINVAL;
170 static int set_freq_table(struct devfreq *devfreq)
172 struct devfreq_dev_profile *profile = devfreq->profile;
173 struct dev_pm_opp *opp;
174 unsigned long freq;
175 int i, count;
177 /* Initialize the freq_table from OPP table */
178 count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
179 if (count <= 0)
180 return -EINVAL;
182 profile->max_state = count;
183 profile->freq_table = devm_kcalloc(devfreq->dev.parent,
184 profile->max_state,
185 sizeof(*profile->freq_table),
186 GFP_KERNEL);
187 if (!profile->freq_table) {
188 profile->max_state = 0;
189 return -ENOMEM;
192 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
193 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
194 if (IS_ERR(opp)) {
195 devm_kfree(devfreq->dev.parent, profile->freq_table);
196 profile->max_state = 0;
197 return PTR_ERR(opp);
199 dev_pm_opp_put(opp);
200 profile->freq_table[i] = freq;
203 return 0;
207 * devfreq_update_status() - Update statistics of devfreq behavior
208 * @devfreq: the devfreq instance
209 * @freq: the update target frequency
211 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
213 int lev, prev_lev, ret = 0;
214 u64 cur_time;
216 lockdep_assert_held(&devfreq->lock);
217 cur_time = get_jiffies_64();
219 /* Immediately exit if previous_freq is not initialized yet. */
220 if (!devfreq->previous_freq)
221 goto out;
223 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
224 if (prev_lev < 0) {
225 ret = prev_lev;
226 goto out;
229 devfreq->stats.time_in_state[prev_lev] +=
230 cur_time - devfreq->stats.last_update;
232 lev = devfreq_get_freq_level(devfreq, freq);
233 if (lev < 0) {
234 ret = lev;
235 goto out;
238 if (lev != prev_lev) {
239 devfreq->stats.trans_table[
240 (prev_lev * devfreq->profile->max_state) + lev]++;
241 devfreq->stats.total_trans++;
244 out:
245 devfreq->stats.last_update = cur_time;
246 return ret;
248 EXPORT_SYMBOL(devfreq_update_status);
251 * find_devfreq_governor() - find devfreq governor from name
252 * @name: name of the governor
254 * Search the list of devfreq governors and return the matched
255 * governor's pointer. devfreq_list_lock should be held by the caller.
257 static struct devfreq_governor *find_devfreq_governor(const char *name)
259 struct devfreq_governor *tmp_governor;
261 if (IS_ERR_OR_NULL(name)) {
262 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
263 return ERR_PTR(-EINVAL);
265 WARN(!mutex_is_locked(&devfreq_list_lock),
266 "devfreq_list_lock must be locked.");
268 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
269 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
270 return tmp_governor;
273 return ERR_PTR(-ENODEV);
277 * try_then_request_governor() - Try to find the governor and request the
278 * module if is not found.
279 * @name: name of the governor
281 * Search the list of devfreq governors and request the module and try again
282 * if is not found. This can happen when both drivers (the governor driver
283 * and the driver that call devfreq_add_device) are built as modules.
284 * devfreq_list_lock should be held by the caller. Returns the matched
285 * governor's pointer or an error pointer.
287 static struct devfreq_governor *try_then_request_governor(const char *name)
289 struct devfreq_governor *governor;
290 int err = 0;
292 if (IS_ERR_OR_NULL(name)) {
293 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
294 return ERR_PTR(-EINVAL);
296 WARN(!mutex_is_locked(&devfreq_list_lock),
297 "devfreq_list_lock must be locked.");
299 governor = find_devfreq_governor(name);
300 if (IS_ERR(governor)) {
301 mutex_unlock(&devfreq_list_lock);
303 if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
304 DEVFREQ_NAME_LEN))
305 err = request_module("governor_%s", "simpleondemand");
306 else
307 err = request_module("governor_%s", name);
308 /* Restore previous state before return */
309 mutex_lock(&devfreq_list_lock);
310 if (err)
311 return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
313 governor = find_devfreq_governor(name);
316 return governor;
319 static int devfreq_notify_transition(struct devfreq *devfreq,
320 struct devfreq_freqs *freqs, unsigned int state)
322 if (!devfreq)
323 return -EINVAL;
325 switch (state) {
326 case DEVFREQ_PRECHANGE:
327 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
328 DEVFREQ_PRECHANGE, freqs);
329 break;
331 case DEVFREQ_POSTCHANGE:
332 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
333 DEVFREQ_POSTCHANGE, freqs);
334 break;
335 default:
336 return -EINVAL;
339 return 0;
342 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
343 u32 flags)
345 struct devfreq_freqs freqs;
346 unsigned long cur_freq;
347 int err = 0;
349 if (devfreq->profile->get_cur_freq)
350 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
351 else
352 cur_freq = devfreq->previous_freq;
354 freqs.old = cur_freq;
355 freqs.new = new_freq;
356 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
358 err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
359 if (err) {
360 freqs.new = cur_freq;
361 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
362 return err;
365 freqs.new = new_freq;
366 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
368 if (devfreq_update_status(devfreq, new_freq))
369 dev_err(&devfreq->dev,
370 "Couldn't update frequency transition information.\n");
372 devfreq->previous_freq = new_freq;
374 if (devfreq->suspend_freq)
375 devfreq->resume_freq = cur_freq;
377 return err;
380 /* Load monitoring helper functions for governors use */
383 * update_devfreq() - Reevaluate the device and configure frequency.
384 * @devfreq: the devfreq instance.
386 * Note: Lock devfreq->lock before calling update_devfreq
387 * This function is exported for governors.
389 int update_devfreq(struct devfreq *devfreq)
391 unsigned long freq, min_freq, max_freq;
392 int err = 0;
393 u32 flags = 0;
395 if (!mutex_is_locked(&devfreq->lock)) {
396 WARN(true, "devfreq->lock must be locked by the caller.\n");
397 return -EINVAL;
400 if (!devfreq->governor)
401 return -EINVAL;
403 /* Reevaluate the proper frequency */
404 err = devfreq->governor->get_target_freq(devfreq, &freq);
405 if (err)
406 return err;
407 get_freq_range(devfreq, &min_freq, &max_freq);
409 if (freq < min_freq) {
410 freq = min_freq;
411 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
413 if (freq > max_freq) {
414 freq = max_freq;
415 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
418 return devfreq_set_target(devfreq, freq, flags);
421 EXPORT_SYMBOL(update_devfreq);
424 * devfreq_monitor() - Periodically poll devfreq objects.
425 * @work: the work struct used to run devfreq_monitor periodically.
428 static void devfreq_monitor(struct work_struct *work)
430 int err;
431 struct devfreq *devfreq = container_of(work,
432 struct devfreq, work.work);
434 mutex_lock(&devfreq->lock);
435 err = update_devfreq(devfreq);
436 if (err)
437 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
439 queue_delayed_work(devfreq_wq, &devfreq->work,
440 msecs_to_jiffies(devfreq->profile->polling_ms));
441 mutex_unlock(&devfreq->lock);
443 trace_devfreq_monitor(devfreq);
447 * devfreq_monitor_start() - Start load monitoring of devfreq instance
448 * @devfreq: the devfreq instance.
450 * Helper function for starting devfreq device load monitoring. By
451 * default delayed work based monitoring is supported. Function
452 * to be called from governor in response to DEVFREQ_GOV_START
453 * event when device is added to devfreq framework.
455 void devfreq_monitor_start(struct devfreq *devfreq)
457 if (devfreq->governor->interrupt_driven)
458 return;
460 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
461 if (devfreq->profile->polling_ms)
462 queue_delayed_work(devfreq_wq, &devfreq->work,
463 msecs_to_jiffies(devfreq->profile->polling_ms));
465 EXPORT_SYMBOL(devfreq_monitor_start);
468 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
469 * @devfreq: the devfreq instance.
471 * Helper function to stop devfreq device load monitoring. Function
472 * to be called from governor in response to DEVFREQ_GOV_STOP
473 * event when device is removed from devfreq framework.
475 void devfreq_monitor_stop(struct devfreq *devfreq)
477 if (devfreq->governor->interrupt_driven)
478 return;
480 cancel_delayed_work_sync(&devfreq->work);
482 EXPORT_SYMBOL(devfreq_monitor_stop);
485 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
486 * @devfreq: the devfreq instance.
488 * Helper function to suspend devfreq device load monitoring. Function
489 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
490 * event or when polling interval is set to zero.
492 * Note: Though this function is same as devfreq_monitor_stop(),
493 * intentionally kept separate to provide hooks for collecting
494 * transition statistics.
496 void devfreq_monitor_suspend(struct devfreq *devfreq)
498 mutex_lock(&devfreq->lock);
499 if (devfreq->stop_polling) {
500 mutex_unlock(&devfreq->lock);
501 return;
504 devfreq_update_status(devfreq, devfreq->previous_freq);
505 devfreq->stop_polling = true;
506 mutex_unlock(&devfreq->lock);
508 if (devfreq->governor->interrupt_driven)
509 return;
511 cancel_delayed_work_sync(&devfreq->work);
513 EXPORT_SYMBOL(devfreq_monitor_suspend);
516 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
517 * @devfreq: the devfreq instance.
519 * Helper function to resume devfreq device load monitoring. Function
520 * to be called from governor in response to DEVFREQ_GOV_RESUME
521 * event or when polling interval is set to non-zero.
523 void devfreq_monitor_resume(struct devfreq *devfreq)
525 unsigned long freq;
527 mutex_lock(&devfreq->lock);
528 if (!devfreq->stop_polling)
529 goto out;
531 if (devfreq->governor->interrupt_driven)
532 goto out_update;
534 if (!delayed_work_pending(&devfreq->work) &&
535 devfreq->profile->polling_ms)
536 queue_delayed_work(devfreq_wq, &devfreq->work,
537 msecs_to_jiffies(devfreq->profile->polling_ms));
539 out_update:
540 devfreq->stats.last_update = get_jiffies_64();
541 devfreq->stop_polling = false;
543 if (devfreq->profile->get_cur_freq &&
544 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
545 devfreq->previous_freq = freq;
547 out:
548 mutex_unlock(&devfreq->lock);
550 EXPORT_SYMBOL(devfreq_monitor_resume);
553 * devfreq_interval_update() - Update device devfreq monitoring interval
554 * @devfreq: the devfreq instance.
555 * @delay: new polling interval to be set.
557 * Helper function to set new load monitoring polling interval. Function
558 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
560 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
562 unsigned int cur_delay = devfreq->profile->polling_ms;
563 unsigned int new_delay = *delay;
565 mutex_lock(&devfreq->lock);
566 devfreq->profile->polling_ms = new_delay;
568 if (devfreq->stop_polling)
569 goto out;
571 if (devfreq->governor->interrupt_driven)
572 goto out;
574 /* if new delay is zero, stop polling */
575 if (!new_delay) {
576 mutex_unlock(&devfreq->lock);
577 cancel_delayed_work_sync(&devfreq->work);
578 return;
581 /* if current delay is zero, start polling with new delay */
582 if (!cur_delay) {
583 queue_delayed_work(devfreq_wq, &devfreq->work,
584 msecs_to_jiffies(devfreq->profile->polling_ms));
585 goto out;
588 /* if current delay is greater than new delay, restart polling */
589 if (cur_delay > new_delay) {
590 mutex_unlock(&devfreq->lock);
591 cancel_delayed_work_sync(&devfreq->work);
592 mutex_lock(&devfreq->lock);
593 if (!devfreq->stop_polling)
594 queue_delayed_work(devfreq_wq, &devfreq->work,
595 msecs_to_jiffies(devfreq->profile->polling_ms));
597 out:
598 mutex_unlock(&devfreq->lock);
600 EXPORT_SYMBOL(devfreq_interval_update);
603 * devfreq_notifier_call() - Notify that the device frequency requirements
604 * has been changed out of devfreq framework.
605 * @nb: the notifier_block (supposed to be devfreq->nb)
606 * @type: not used
607 * @devp: not used
609 * Called by a notifier that uses devfreq->nb.
611 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
612 void *devp)
614 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
615 int err = -EINVAL;
617 mutex_lock(&devfreq->lock);
619 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
620 if (!devfreq->scaling_min_freq)
621 goto out;
623 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
624 if (!devfreq->scaling_max_freq) {
625 devfreq->scaling_max_freq = ULONG_MAX;
626 goto out;
629 err = update_devfreq(devfreq);
631 out:
632 mutex_unlock(&devfreq->lock);
633 if (err)
634 dev_err(devfreq->dev.parent,
635 "failed to update frequency from OPP notifier (%d)\n",
636 err);
638 return NOTIFY_OK;
642 * qos_notifier_call() - Common handler for QoS constraints.
643 * @devfreq: the devfreq instance.
645 static int qos_notifier_call(struct devfreq *devfreq)
647 int err;
649 mutex_lock(&devfreq->lock);
650 err = update_devfreq(devfreq);
651 mutex_unlock(&devfreq->lock);
652 if (err)
653 dev_err(devfreq->dev.parent,
654 "failed to update frequency from PM QoS (%d)\n",
655 err);
657 return NOTIFY_OK;
661 * qos_min_notifier_call() - Callback for QoS min_freq changes.
662 * @nb: Should be devfreq->nb_min
664 static int qos_min_notifier_call(struct notifier_block *nb,
665 unsigned long val, void *ptr)
667 return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
671 * qos_max_notifier_call() - Callback for QoS max_freq changes.
672 * @nb: Should be devfreq->nb_max
674 static int qos_max_notifier_call(struct notifier_block *nb,
675 unsigned long val, void *ptr)
677 return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
681 * devfreq_dev_release() - Callback for struct device to release the device.
682 * @dev: the devfreq device
684 * Remove devfreq from the list and release its resources.
686 static void devfreq_dev_release(struct device *dev)
688 struct devfreq *devfreq = to_devfreq(dev);
689 int err;
691 mutex_lock(&devfreq_list_lock);
692 list_del(&devfreq->node);
693 mutex_unlock(&devfreq_list_lock);
695 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
696 DEV_PM_QOS_MAX_FREQUENCY);
697 if (err && err != -ENOENT)
698 dev_warn(dev->parent,
699 "Failed to remove max_freq notifier: %d\n", err);
700 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
701 DEV_PM_QOS_MIN_FREQUENCY);
702 if (err && err != -ENOENT)
703 dev_warn(dev->parent,
704 "Failed to remove min_freq notifier: %d\n", err);
706 if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
707 err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
708 if (err)
709 dev_warn(dev->parent,
710 "Failed to remove max_freq request: %d\n", err);
712 if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
713 err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
714 if (err)
715 dev_warn(dev->parent,
716 "Failed to remove min_freq request: %d\n", err);
719 if (devfreq->profile->exit)
720 devfreq->profile->exit(devfreq->dev.parent);
722 mutex_destroy(&devfreq->lock);
723 kfree(devfreq);
727 * devfreq_add_device() - Add devfreq feature to the device
728 * @dev: the device to add devfreq feature.
729 * @profile: device-specific profile to run devfreq.
730 * @governor_name: name of the policy to choose frequency.
731 * @data: private data for the governor. The devfreq framework does not
732 * touch this value.
734 struct devfreq *devfreq_add_device(struct device *dev,
735 struct devfreq_dev_profile *profile,
736 const char *governor_name,
737 void *data)
739 struct devfreq *devfreq;
740 struct devfreq_governor *governor;
741 static atomic_t devfreq_no = ATOMIC_INIT(-1);
742 int err = 0;
744 if (!dev || !profile || !governor_name) {
745 dev_err(dev, "%s: Invalid parameters.\n", __func__);
746 return ERR_PTR(-EINVAL);
749 mutex_lock(&devfreq_list_lock);
750 devfreq = find_device_devfreq(dev);
751 mutex_unlock(&devfreq_list_lock);
752 if (!IS_ERR(devfreq)) {
753 dev_err(dev, "%s: devfreq device already exists!\n",
754 __func__);
755 err = -EINVAL;
756 goto err_out;
759 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
760 if (!devfreq) {
761 err = -ENOMEM;
762 goto err_out;
765 mutex_init(&devfreq->lock);
766 mutex_lock(&devfreq->lock);
767 devfreq->dev.parent = dev;
768 devfreq->dev.class = devfreq_class;
769 devfreq->dev.release = devfreq_dev_release;
770 INIT_LIST_HEAD(&devfreq->node);
771 devfreq->profile = profile;
772 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
773 devfreq->previous_freq = profile->initial_freq;
774 devfreq->last_status.current_frequency = profile->initial_freq;
775 devfreq->data = data;
776 devfreq->nb.notifier_call = devfreq_notifier_call;
778 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
779 mutex_unlock(&devfreq->lock);
780 err = set_freq_table(devfreq);
781 if (err < 0)
782 goto err_dev;
783 mutex_lock(&devfreq->lock);
786 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
787 if (!devfreq->scaling_min_freq) {
788 mutex_unlock(&devfreq->lock);
789 err = -EINVAL;
790 goto err_dev;
793 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
794 if (!devfreq->scaling_max_freq) {
795 mutex_unlock(&devfreq->lock);
796 err = -EINVAL;
797 goto err_dev;
800 devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
801 atomic_set(&devfreq->suspend_count, 0);
803 dev_set_name(&devfreq->dev, "devfreq%d",
804 atomic_inc_return(&devfreq_no));
805 err = device_register(&devfreq->dev);
806 if (err) {
807 mutex_unlock(&devfreq->lock);
808 put_device(&devfreq->dev);
809 goto err_out;
812 devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
813 array3_size(sizeof(unsigned int),
814 devfreq->profile->max_state,
815 devfreq->profile->max_state),
816 GFP_KERNEL);
817 if (!devfreq->stats.trans_table) {
818 mutex_unlock(&devfreq->lock);
819 err = -ENOMEM;
820 goto err_devfreq;
823 devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
824 devfreq->profile->max_state,
825 sizeof(*devfreq->stats.time_in_state),
826 GFP_KERNEL);
827 if (!devfreq->stats.time_in_state) {
828 mutex_unlock(&devfreq->lock);
829 err = -ENOMEM;
830 goto err_devfreq;
833 devfreq->stats.total_trans = 0;
834 devfreq->stats.last_update = get_jiffies_64();
836 srcu_init_notifier_head(&devfreq->transition_notifier_list);
838 mutex_unlock(&devfreq->lock);
840 err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
841 DEV_PM_QOS_MIN_FREQUENCY, 0);
842 if (err < 0)
843 goto err_devfreq;
844 err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
845 DEV_PM_QOS_MAX_FREQUENCY,
846 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
847 if (err < 0)
848 goto err_devfreq;
850 devfreq->nb_min.notifier_call = qos_min_notifier_call;
851 err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
852 DEV_PM_QOS_MIN_FREQUENCY);
853 if (err)
854 goto err_devfreq;
856 devfreq->nb_max.notifier_call = qos_max_notifier_call;
857 err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
858 DEV_PM_QOS_MAX_FREQUENCY);
859 if (err)
860 goto err_devfreq;
862 mutex_lock(&devfreq_list_lock);
864 governor = try_then_request_governor(devfreq->governor_name);
865 if (IS_ERR(governor)) {
866 dev_err(dev, "%s: Unable to find governor for the device\n",
867 __func__);
868 err = PTR_ERR(governor);
869 goto err_init;
872 devfreq->governor = governor;
873 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
874 NULL);
875 if (err) {
876 dev_err(dev, "%s: Unable to start governor for the device\n",
877 __func__);
878 goto err_init;
881 list_add(&devfreq->node, &devfreq_list);
883 mutex_unlock(&devfreq_list_lock);
885 return devfreq;
887 err_init:
888 mutex_unlock(&devfreq_list_lock);
889 err_devfreq:
890 devfreq_remove_device(devfreq);
891 devfreq = NULL;
892 err_dev:
893 kfree(devfreq);
894 err_out:
895 return ERR_PTR(err);
897 EXPORT_SYMBOL(devfreq_add_device);
900 * devfreq_remove_device() - Remove devfreq feature from a device.
901 * @devfreq: the devfreq instance to be removed
903 * The opposite of devfreq_add_device().
905 int devfreq_remove_device(struct devfreq *devfreq)
907 if (!devfreq)
908 return -EINVAL;
910 if (devfreq->governor)
911 devfreq->governor->event_handler(devfreq,
912 DEVFREQ_GOV_STOP, NULL);
913 device_unregister(&devfreq->dev);
915 return 0;
917 EXPORT_SYMBOL(devfreq_remove_device);
919 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
921 struct devfreq **r = res;
923 if (WARN_ON(!r || !*r))
924 return 0;
926 return *r == data;
929 static void devm_devfreq_dev_release(struct device *dev, void *res)
931 devfreq_remove_device(*(struct devfreq **)res);
935 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
936 * @dev: the device to add devfreq feature.
937 * @profile: device-specific profile to run devfreq.
938 * @governor_name: name of the policy to choose frequency.
939 * @data: private data for the governor. The devfreq framework does not
940 * touch this value.
942 * This function manages automatically the memory of devfreq device using device
943 * resource management and simplify the free operation for memory of devfreq
944 * device.
946 struct devfreq *devm_devfreq_add_device(struct device *dev,
947 struct devfreq_dev_profile *profile,
948 const char *governor_name,
949 void *data)
951 struct devfreq **ptr, *devfreq;
953 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
954 if (!ptr)
955 return ERR_PTR(-ENOMEM);
957 devfreq = devfreq_add_device(dev, profile, governor_name, data);
958 if (IS_ERR(devfreq)) {
959 devres_free(ptr);
960 return devfreq;
963 *ptr = devfreq;
964 devres_add(dev, ptr);
966 return devfreq;
968 EXPORT_SYMBOL(devm_devfreq_add_device);
970 #ifdef CONFIG_OF
972 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
973 * @dev - instance to the given device
974 * @index - index into list of devfreq
976 * return the instance of devfreq device
978 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
980 struct device_node *node;
981 struct devfreq *devfreq;
983 if (!dev)
984 return ERR_PTR(-EINVAL);
986 if (!dev->of_node)
987 return ERR_PTR(-EINVAL);
989 node = of_parse_phandle(dev->of_node, "devfreq", index);
990 if (!node)
991 return ERR_PTR(-ENODEV);
993 mutex_lock(&devfreq_list_lock);
994 list_for_each_entry(devfreq, &devfreq_list, node) {
995 if (devfreq->dev.parent
996 && devfreq->dev.parent->of_node == node) {
997 mutex_unlock(&devfreq_list_lock);
998 of_node_put(node);
999 return devfreq;
1002 mutex_unlock(&devfreq_list_lock);
1003 of_node_put(node);
1005 return ERR_PTR(-EPROBE_DEFER);
1007 #else
1008 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1010 return ERR_PTR(-ENODEV);
1012 #endif /* CONFIG_OF */
1013 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
1016 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1017 * @dev: the device from which to remove devfreq feature.
1018 * @devfreq: the devfreq instance to be removed
1020 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
1022 WARN_ON(devres_release(dev, devm_devfreq_dev_release,
1023 devm_devfreq_dev_match, devfreq));
1025 EXPORT_SYMBOL(devm_devfreq_remove_device);
1028 * devfreq_suspend_device() - Suspend devfreq of a device.
1029 * @devfreq: the devfreq instance to be suspended
1031 * This function is intended to be called by the pm callbacks
1032 * (e.g., runtime_suspend, suspend) of the device driver that
1033 * holds the devfreq.
1035 int devfreq_suspend_device(struct devfreq *devfreq)
1037 int ret;
1039 if (!devfreq)
1040 return -EINVAL;
1042 if (atomic_inc_return(&devfreq->suspend_count) > 1)
1043 return 0;
1045 if (devfreq->governor) {
1046 ret = devfreq->governor->event_handler(devfreq,
1047 DEVFREQ_GOV_SUSPEND, NULL);
1048 if (ret)
1049 return ret;
1052 if (devfreq->suspend_freq) {
1053 mutex_lock(&devfreq->lock);
1054 ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1055 mutex_unlock(&devfreq->lock);
1056 if (ret)
1057 return ret;
1060 return 0;
1062 EXPORT_SYMBOL(devfreq_suspend_device);
1065 * devfreq_resume_device() - Resume devfreq of a device.
1066 * @devfreq: the devfreq instance to be resumed
1068 * This function is intended to be called by the pm callbacks
1069 * (e.g., runtime_resume, resume) of the device driver that
1070 * holds the devfreq.
1072 int devfreq_resume_device(struct devfreq *devfreq)
1074 int ret;
1076 if (!devfreq)
1077 return -EINVAL;
1079 if (atomic_dec_return(&devfreq->suspend_count) >= 1)
1080 return 0;
1082 if (devfreq->resume_freq) {
1083 mutex_lock(&devfreq->lock);
1084 ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1085 mutex_unlock(&devfreq->lock);
1086 if (ret)
1087 return ret;
1090 if (devfreq->governor) {
1091 ret = devfreq->governor->event_handler(devfreq,
1092 DEVFREQ_GOV_RESUME, NULL);
1093 if (ret)
1094 return ret;
1097 return 0;
1099 EXPORT_SYMBOL(devfreq_resume_device);
1102 * devfreq_suspend() - Suspend devfreq governors and devices
1104 * Called during system wide Suspend/Hibernate cycles for suspending governors
1105 * and devices preserving the state for resume. On some platforms the devfreq
1106 * device must have precise state (frequency) after resume in order to provide
1107 * fully operating setup.
1109 void devfreq_suspend(void)
1111 struct devfreq *devfreq;
1112 int ret;
1114 mutex_lock(&devfreq_list_lock);
1115 list_for_each_entry(devfreq, &devfreq_list, node) {
1116 ret = devfreq_suspend_device(devfreq);
1117 if (ret)
1118 dev_err(&devfreq->dev,
1119 "failed to suspend devfreq device\n");
1121 mutex_unlock(&devfreq_list_lock);
1125 * devfreq_resume() - Resume devfreq governors and devices
1127 * Called during system wide Suspend/Hibernate cycle for resuming governors and
1128 * devices that are suspended with devfreq_suspend().
1130 void devfreq_resume(void)
1132 struct devfreq *devfreq;
1133 int ret;
1135 mutex_lock(&devfreq_list_lock);
1136 list_for_each_entry(devfreq, &devfreq_list, node) {
1137 ret = devfreq_resume_device(devfreq);
1138 if (ret)
1139 dev_warn(&devfreq->dev,
1140 "failed to resume devfreq device\n");
1142 mutex_unlock(&devfreq_list_lock);
1146 * devfreq_add_governor() - Add devfreq governor
1147 * @governor: the devfreq governor to be added
1149 int devfreq_add_governor(struct devfreq_governor *governor)
1151 struct devfreq_governor *g;
1152 struct devfreq *devfreq;
1153 int err = 0;
1155 if (!governor) {
1156 pr_err("%s: Invalid parameters.\n", __func__);
1157 return -EINVAL;
1160 mutex_lock(&devfreq_list_lock);
1161 g = find_devfreq_governor(governor->name);
1162 if (!IS_ERR(g)) {
1163 pr_err("%s: governor %s already registered\n", __func__,
1164 g->name);
1165 err = -EINVAL;
1166 goto err_out;
1169 list_add(&governor->node, &devfreq_governor_list);
1171 list_for_each_entry(devfreq, &devfreq_list, node) {
1172 int ret = 0;
1173 struct device *dev = devfreq->dev.parent;
1175 if (!strncmp(devfreq->governor_name, governor->name,
1176 DEVFREQ_NAME_LEN)) {
1177 /* The following should never occur */
1178 if (devfreq->governor) {
1179 dev_warn(dev,
1180 "%s: Governor %s already present\n",
1181 __func__, devfreq->governor->name);
1182 ret = devfreq->governor->event_handler(devfreq,
1183 DEVFREQ_GOV_STOP, NULL);
1184 if (ret) {
1185 dev_warn(dev,
1186 "%s: Governor %s stop = %d\n",
1187 __func__,
1188 devfreq->governor->name, ret);
1190 /* Fall through */
1192 devfreq->governor = governor;
1193 ret = devfreq->governor->event_handler(devfreq,
1194 DEVFREQ_GOV_START, NULL);
1195 if (ret) {
1196 dev_warn(dev, "%s: Governor %s start=%d\n",
1197 __func__, devfreq->governor->name,
1198 ret);
1203 err_out:
1204 mutex_unlock(&devfreq_list_lock);
1206 return err;
1208 EXPORT_SYMBOL(devfreq_add_governor);
1211 * devfreq_remove_governor() - Remove devfreq feature from a device.
1212 * @governor: the devfreq governor to be removed
1214 int devfreq_remove_governor(struct devfreq_governor *governor)
1216 struct devfreq_governor *g;
1217 struct devfreq *devfreq;
1218 int err = 0;
1220 if (!governor) {
1221 pr_err("%s: Invalid parameters.\n", __func__);
1222 return -EINVAL;
1225 mutex_lock(&devfreq_list_lock);
1226 g = find_devfreq_governor(governor->name);
1227 if (IS_ERR(g)) {
1228 pr_err("%s: governor %s not registered\n", __func__,
1229 governor->name);
1230 err = PTR_ERR(g);
1231 goto err_out;
1233 list_for_each_entry(devfreq, &devfreq_list, node) {
1234 int ret;
1235 struct device *dev = devfreq->dev.parent;
1237 if (!strncmp(devfreq->governor_name, governor->name,
1238 DEVFREQ_NAME_LEN)) {
1239 /* we should have a devfreq governor! */
1240 if (!devfreq->governor) {
1241 dev_warn(dev, "%s: Governor %s NOT present\n",
1242 __func__, governor->name);
1243 continue;
1244 /* Fall through */
1246 ret = devfreq->governor->event_handler(devfreq,
1247 DEVFREQ_GOV_STOP, NULL);
1248 if (ret) {
1249 dev_warn(dev, "%s: Governor %s stop=%d\n",
1250 __func__, devfreq->governor->name,
1251 ret);
1253 devfreq->governor = NULL;
1257 list_del(&governor->node);
1258 err_out:
1259 mutex_unlock(&devfreq_list_lock);
1261 return err;
1263 EXPORT_SYMBOL(devfreq_remove_governor);
1265 static ssize_t name_show(struct device *dev,
1266 struct device_attribute *attr, char *buf)
1268 struct devfreq *devfreq = to_devfreq(dev);
1269 return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1271 static DEVICE_ATTR_RO(name);
1273 static ssize_t governor_show(struct device *dev,
1274 struct device_attribute *attr, char *buf)
1276 if (!to_devfreq(dev)->governor)
1277 return -EINVAL;
1279 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1282 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1283 const char *buf, size_t count)
1285 struct devfreq *df = to_devfreq(dev);
1286 int ret;
1287 char str_governor[DEVFREQ_NAME_LEN + 1];
1288 const struct devfreq_governor *governor, *prev_governor;
1290 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1291 if (ret != 1)
1292 return -EINVAL;
1294 mutex_lock(&devfreq_list_lock);
1295 governor = try_then_request_governor(str_governor);
1296 if (IS_ERR(governor)) {
1297 ret = PTR_ERR(governor);
1298 goto out;
1300 if (df->governor == governor) {
1301 ret = 0;
1302 goto out;
1303 } else if ((df->governor && df->governor->immutable) ||
1304 governor->immutable) {
1305 ret = -EINVAL;
1306 goto out;
1309 if (df->governor) {
1310 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1311 if (ret) {
1312 dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1313 __func__, df->governor->name, ret);
1314 goto out;
1317 prev_governor = df->governor;
1318 df->governor = governor;
1319 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1320 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1321 if (ret) {
1322 dev_warn(dev, "%s: Governor %s not started(%d)\n",
1323 __func__, df->governor->name, ret);
1324 df->governor = prev_governor;
1325 strncpy(df->governor_name, prev_governor->name,
1326 DEVFREQ_NAME_LEN);
1327 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1328 if (ret) {
1329 dev_err(dev,
1330 "%s: reverting to Governor %s failed (%d)\n",
1331 __func__, df->governor_name, ret);
1332 df->governor = NULL;
1335 out:
1336 mutex_unlock(&devfreq_list_lock);
1338 if (!ret)
1339 ret = count;
1340 return ret;
1342 static DEVICE_ATTR_RW(governor);
1344 static ssize_t available_governors_show(struct device *d,
1345 struct device_attribute *attr,
1346 char *buf)
1348 struct devfreq *df = to_devfreq(d);
1349 ssize_t count = 0;
1351 mutex_lock(&devfreq_list_lock);
1354 * The devfreq with immutable governor (e.g., passive) shows
1355 * only own governor.
1357 if (df->governor && df->governor->immutable) {
1358 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1359 "%s ", df->governor_name);
1361 * The devfreq device shows the registered governor except for
1362 * immutable governors such as passive governor .
1364 } else {
1365 struct devfreq_governor *governor;
1367 list_for_each_entry(governor, &devfreq_governor_list, node) {
1368 if (governor->immutable)
1369 continue;
1370 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1371 "%s ", governor->name);
1375 mutex_unlock(&devfreq_list_lock);
1377 /* Truncate the trailing space */
1378 if (count)
1379 count--;
1381 count += sprintf(&buf[count], "\n");
1383 return count;
1385 static DEVICE_ATTR_RO(available_governors);
1387 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1388 char *buf)
1390 unsigned long freq;
1391 struct devfreq *devfreq = to_devfreq(dev);
1393 if (devfreq->profile->get_cur_freq &&
1394 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1395 return sprintf(buf, "%lu\n", freq);
1397 return sprintf(buf, "%lu\n", devfreq->previous_freq);
1399 static DEVICE_ATTR_RO(cur_freq);
1401 static ssize_t target_freq_show(struct device *dev,
1402 struct device_attribute *attr, char *buf)
1404 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1406 static DEVICE_ATTR_RO(target_freq);
1408 static ssize_t polling_interval_show(struct device *dev,
1409 struct device_attribute *attr, char *buf)
1411 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1414 static ssize_t polling_interval_store(struct device *dev,
1415 struct device_attribute *attr,
1416 const char *buf, size_t count)
1418 struct devfreq *df = to_devfreq(dev);
1419 unsigned int value;
1420 int ret;
1422 if (!df->governor)
1423 return -EINVAL;
1425 ret = sscanf(buf, "%u", &value);
1426 if (ret != 1)
1427 return -EINVAL;
1429 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1430 ret = count;
1432 return ret;
1434 static DEVICE_ATTR_RW(polling_interval);
1436 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1437 const char *buf, size_t count)
1439 struct devfreq *df = to_devfreq(dev);
1440 unsigned long value;
1441 int ret;
1444 * Protect against theoretical sysfs writes between
1445 * device_add and dev_pm_qos_add_request
1447 if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1448 return -EAGAIN;
1450 ret = sscanf(buf, "%lu", &value);
1451 if (ret != 1)
1452 return -EINVAL;
1454 /* Round down to kHz for PM QoS */
1455 ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1456 value / HZ_PER_KHZ);
1457 if (ret < 0)
1458 return ret;
1460 return count;
1463 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1464 char *buf)
1466 struct devfreq *df = to_devfreq(dev);
1467 unsigned long min_freq, max_freq;
1469 mutex_lock(&df->lock);
1470 get_freq_range(df, &min_freq, &max_freq);
1471 mutex_unlock(&df->lock);
1473 return sprintf(buf, "%lu\n", min_freq);
1475 static DEVICE_ATTR_RW(min_freq);
1477 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1478 const char *buf, size_t count)
1480 struct devfreq *df = to_devfreq(dev);
1481 unsigned long value;
1482 int ret;
1485 * Protect against theoretical sysfs writes between
1486 * device_add and dev_pm_qos_add_request
1488 if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1489 return -EINVAL;
1491 ret = sscanf(buf, "%lu", &value);
1492 if (ret != 1)
1493 return -EINVAL;
1496 * PM QoS frequencies are in kHz so we need to convert. Convert by
1497 * rounding upwards so that the acceptable interval never shrinks.
1499 * For example if the user writes "666666666" to sysfs this value will
1500 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1501 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1503 * A value of zero means "no limit".
1505 if (value)
1506 value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1507 else
1508 value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
1510 ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1511 if (ret < 0)
1512 return ret;
1514 return count;
1517 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1518 char *buf)
1520 struct devfreq *df = to_devfreq(dev);
1521 unsigned long min_freq, max_freq;
1523 mutex_lock(&df->lock);
1524 get_freq_range(df, &min_freq, &max_freq);
1525 mutex_unlock(&df->lock);
1527 return sprintf(buf, "%lu\n", max_freq);
1529 static DEVICE_ATTR_RW(max_freq);
1531 static ssize_t available_frequencies_show(struct device *d,
1532 struct device_attribute *attr,
1533 char *buf)
1535 struct devfreq *df = to_devfreq(d);
1536 ssize_t count = 0;
1537 int i;
1539 mutex_lock(&df->lock);
1541 for (i = 0; i < df->profile->max_state; i++)
1542 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1543 "%lu ", df->profile->freq_table[i]);
1545 mutex_unlock(&df->lock);
1546 /* Truncate the trailing space */
1547 if (count)
1548 count--;
1550 count += sprintf(&buf[count], "\n");
1552 return count;
1554 static DEVICE_ATTR_RO(available_frequencies);
1556 static ssize_t trans_stat_show(struct device *dev,
1557 struct device_attribute *attr, char *buf)
1559 struct devfreq *devfreq = to_devfreq(dev);
1560 ssize_t len;
1561 int i, j;
1562 unsigned int max_state = devfreq->profile->max_state;
1564 if (max_state == 0)
1565 return sprintf(buf, "Not Supported.\n");
1567 mutex_lock(&devfreq->lock);
1568 if (!devfreq->stop_polling &&
1569 devfreq_update_status(devfreq, devfreq->previous_freq)) {
1570 mutex_unlock(&devfreq->lock);
1571 return 0;
1573 mutex_unlock(&devfreq->lock);
1575 len = sprintf(buf, " From : To\n");
1576 len += sprintf(buf + len, " :");
1577 for (i = 0; i < max_state; i++)
1578 len += sprintf(buf + len, "%10lu",
1579 devfreq->profile->freq_table[i]);
1581 len += sprintf(buf + len, " time(ms)\n");
1583 for (i = 0; i < max_state; i++) {
1584 if (devfreq->profile->freq_table[i]
1585 == devfreq->previous_freq) {
1586 len += sprintf(buf + len, "*");
1587 } else {
1588 len += sprintf(buf + len, " ");
1590 len += sprintf(buf + len, "%10lu:",
1591 devfreq->profile->freq_table[i]);
1592 for (j = 0; j < max_state; j++)
1593 len += sprintf(buf + len, "%10u",
1594 devfreq->stats.trans_table[(i * max_state) + j]);
1596 len += sprintf(buf + len, "%10llu\n", (u64)
1597 jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
1600 len += sprintf(buf + len, "Total transition : %u\n",
1601 devfreq->stats.total_trans);
1602 return len;
1605 static ssize_t trans_stat_store(struct device *dev,
1606 struct device_attribute *attr,
1607 const char *buf, size_t count)
1609 struct devfreq *df = to_devfreq(dev);
1610 int err, value;
1612 if (df->profile->max_state == 0)
1613 return count;
1615 err = kstrtoint(buf, 10, &value);
1616 if (err || value != 0)
1617 return -EINVAL;
1619 mutex_lock(&df->lock);
1620 memset(df->stats.time_in_state, 0, (df->profile->max_state *
1621 sizeof(*df->stats.time_in_state)));
1622 memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
1623 df->profile->max_state,
1624 df->profile->max_state));
1625 df->stats.total_trans = 0;
1626 df->stats.last_update = get_jiffies_64();
1627 mutex_unlock(&df->lock);
1629 return count;
1631 static DEVICE_ATTR_RW(trans_stat);
1633 static struct attribute *devfreq_attrs[] = {
1634 &dev_attr_name.attr,
1635 &dev_attr_governor.attr,
1636 &dev_attr_available_governors.attr,
1637 &dev_attr_cur_freq.attr,
1638 &dev_attr_available_frequencies.attr,
1639 &dev_attr_target_freq.attr,
1640 &dev_attr_polling_interval.attr,
1641 &dev_attr_min_freq.attr,
1642 &dev_attr_max_freq.attr,
1643 &dev_attr_trans_stat.attr,
1644 NULL,
1646 ATTRIBUTE_GROUPS(devfreq);
1649 * devfreq_summary_show() - Show the summary of the devfreq devices
1650 * @s: seq_file instance to show the summary of devfreq devices
1651 * @data: not used
1653 * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1654 * It helps that user can know the detailed information of the devfreq devices.
1656 * Return 0 always because it shows the information without any data change.
1658 static int devfreq_summary_show(struct seq_file *s, void *data)
1660 struct devfreq *devfreq;
1661 struct devfreq *p_devfreq = NULL;
1662 unsigned long cur_freq, min_freq, max_freq;
1663 unsigned int polling_ms;
1665 seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
1666 "dev_name",
1667 "dev",
1668 "parent_dev",
1669 "governor",
1670 "polling_ms",
1671 "cur_freq_Hz",
1672 "min_freq_Hz",
1673 "max_freq_Hz");
1674 seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
1675 "------------------------------",
1676 "----------",
1677 "----------",
1678 "---------------",
1679 "----------",
1680 "------------",
1681 "------------",
1682 "------------");
1684 mutex_lock(&devfreq_list_lock);
1686 list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
1687 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1688 if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
1689 DEVFREQ_NAME_LEN)) {
1690 struct devfreq_passive_data *data = devfreq->data;
1692 if (data)
1693 p_devfreq = data->parent;
1694 } else {
1695 p_devfreq = NULL;
1697 #endif
1699 mutex_lock(&devfreq->lock);
1700 cur_freq = devfreq->previous_freq,
1701 get_freq_range(devfreq, &min_freq, &max_freq);
1702 polling_ms = devfreq->profile->polling_ms,
1703 mutex_unlock(&devfreq->lock);
1705 seq_printf(s,
1706 "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
1707 dev_name(devfreq->dev.parent),
1708 dev_name(&devfreq->dev),
1709 p_devfreq ? dev_name(&p_devfreq->dev) : "null",
1710 devfreq->governor_name,
1711 polling_ms,
1712 cur_freq,
1713 min_freq,
1714 max_freq);
1717 mutex_unlock(&devfreq_list_lock);
1719 return 0;
1721 DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
1723 static int __init devfreq_init(void)
1725 devfreq_class = class_create(THIS_MODULE, "devfreq");
1726 if (IS_ERR(devfreq_class)) {
1727 pr_err("%s: couldn't create class\n", __FILE__);
1728 return PTR_ERR(devfreq_class);
1731 devfreq_wq = create_freezable_workqueue("devfreq_wq");
1732 if (!devfreq_wq) {
1733 class_destroy(devfreq_class);
1734 pr_err("%s: couldn't create workqueue\n", __FILE__);
1735 return -ENOMEM;
1737 devfreq_class->dev_groups = devfreq_groups;
1739 devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1740 debugfs_create_file("devfreq_summary", 0444,
1741 devfreq_debugfs, NULL,
1742 &devfreq_summary_fops);
1744 return 0;
1746 subsys_initcall(devfreq_init);
1749 * The following are helper functions for devfreq user device drivers with
1750 * OPP framework.
1754 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1755 * freq value given to target callback.
1756 * @dev: The devfreq user device. (parent of devfreq)
1757 * @freq: The frequency given to target function
1758 * @flags: Flags handed from devfreq framework.
1760 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1761 * use.
1763 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1764 unsigned long *freq,
1765 u32 flags)
1767 struct dev_pm_opp *opp;
1769 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1770 /* The freq is an upper bound. opp should be lower */
1771 opp = dev_pm_opp_find_freq_floor(dev, freq);
1773 /* If not available, use the closest opp */
1774 if (opp == ERR_PTR(-ERANGE))
1775 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1776 } else {
1777 /* The freq is an lower bound. opp should be higher */
1778 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1780 /* If not available, use the closest opp */
1781 if (opp == ERR_PTR(-ERANGE))
1782 opp = dev_pm_opp_find_freq_floor(dev, freq);
1785 return opp;
1787 EXPORT_SYMBOL(devfreq_recommended_opp);
1790 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1791 * for any changes in the OPP availability
1792 * changes
1793 * @dev: The devfreq user device. (parent of devfreq)
1794 * @devfreq: The devfreq object.
1796 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1798 return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1800 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1803 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1804 * notified for any changes in the OPP
1805 * availability changes anymore.
1806 * @dev: The devfreq user device. (parent of devfreq)
1807 * @devfreq: The devfreq object.
1809 * At exit() callback of devfreq_dev_profile, this must be included if
1810 * devfreq_recommended_opp is used.
1812 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1814 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1816 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1818 static void devm_devfreq_opp_release(struct device *dev, void *res)
1820 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1824 * devm_devfreq_register_opp_notifier() - Resource-managed
1825 * devfreq_register_opp_notifier()
1826 * @dev: The devfreq user device. (parent of devfreq)
1827 * @devfreq: The devfreq object.
1829 int devm_devfreq_register_opp_notifier(struct device *dev,
1830 struct devfreq *devfreq)
1832 struct devfreq **ptr;
1833 int ret;
1835 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1836 if (!ptr)
1837 return -ENOMEM;
1839 ret = devfreq_register_opp_notifier(dev, devfreq);
1840 if (ret) {
1841 devres_free(ptr);
1842 return ret;
1845 *ptr = devfreq;
1846 devres_add(dev, ptr);
1848 return 0;
1850 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1853 * devm_devfreq_unregister_opp_notifier() - Resource-managed
1854 * devfreq_unregister_opp_notifier()
1855 * @dev: The devfreq user device. (parent of devfreq)
1856 * @devfreq: The devfreq object.
1858 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1859 struct devfreq *devfreq)
1861 WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1862 devm_devfreq_dev_match, devfreq));
1864 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1867 * devfreq_register_notifier() - Register a driver with devfreq
1868 * @devfreq: The devfreq object.
1869 * @nb: The notifier block to register.
1870 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1872 int devfreq_register_notifier(struct devfreq *devfreq,
1873 struct notifier_block *nb,
1874 unsigned int list)
1876 int ret = 0;
1878 if (!devfreq)
1879 return -EINVAL;
1881 switch (list) {
1882 case DEVFREQ_TRANSITION_NOTIFIER:
1883 ret = srcu_notifier_chain_register(
1884 &devfreq->transition_notifier_list, nb);
1885 break;
1886 default:
1887 ret = -EINVAL;
1890 return ret;
1892 EXPORT_SYMBOL(devfreq_register_notifier);
1895 * devfreq_unregister_notifier() - Unregister a driver with devfreq
1896 * @devfreq: The devfreq object.
1897 * @nb: The notifier block to be unregistered.
1898 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1900 int devfreq_unregister_notifier(struct devfreq *devfreq,
1901 struct notifier_block *nb,
1902 unsigned int list)
1904 int ret = 0;
1906 if (!devfreq)
1907 return -EINVAL;
1909 switch (list) {
1910 case DEVFREQ_TRANSITION_NOTIFIER:
1911 ret = srcu_notifier_chain_unregister(
1912 &devfreq->transition_notifier_list, nb);
1913 break;
1914 default:
1915 ret = -EINVAL;
1918 return ret;
1920 EXPORT_SYMBOL(devfreq_unregister_notifier);
1922 struct devfreq_notifier_devres {
1923 struct devfreq *devfreq;
1924 struct notifier_block *nb;
1925 unsigned int list;
1928 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1930 struct devfreq_notifier_devres *this = res;
1932 devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1936 * devm_devfreq_register_notifier()
1937 * - Resource-managed devfreq_register_notifier()
1938 * @dev: The devfreq user device. (parent of devfreq)
1939 * @devfreq: The devfreq object.
1940 * @nb: The notifier block to be unregistered.
1941 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1943 int devm_devfreq_register_notifier(struct device *dev,
1944 struct devfreq *devfreq,
1945 struct notifier_block *nb,
1946 unsigned int list)
1948 struct devfreq_notifier_devres *ptr;
1949 int ret;
1951 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1952 GFP_KERNEL);
1953 if (!ptr)
1954 return -ENOMEM;
1956 ret = devfreq_register_notifier(devfreq, nb, list);
1957 if (ret) {
1958 devres_free(ptr);
1959 return ret;
1962 ptr->devfreq = devfreq;
1963 ptr->nb = nb;
1964 ptr->list = list;
1965 devres_add(dev, ptr);
1967 return 0;
1969 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1972 * devm_devfreq_unregister_notifier()
1973 * - Resource-managed devfreq_unregister_notifier()
1974 * @dev: The devfreq user device. (parent of devfreq)
1975 * @devfreq: The devfreq object.
1976 * @nb: The notifier block to be unregistered.
1977 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1979 void devm_devfreq_unregister_notifier(struct device *dev,
1980 struct devfreq *devfreq,
1981 struct notifier_block *nb,
1982 unsigned int list)
1984 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1985 devm_devfreq_dev_match, devfreq));
1987 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);