1 // SPDX-License-Identifier: GPL-2.0-only
3 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
6 * Copyright (C) 2011 Samsung Electronics
7 * MyungJoo Ham <myungjoo.ham@samsung.com>
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
28 #include <linux/pm_qos.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
34 #define HZ_PER_KHZ 1000
36 static struct class *devfreq_class
;
37 static struct dentry
*devfreq_debugfs
;
40 * devfreq core provides delayed work based load monitoring helper
41 * functions. Governors can use these or can implement their own
42 * monitoring mechanism.
44 static struct workqueue_struct
*devfreq_wq
;
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list
);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list
);
50 static DEFINE_MUTEX(devfreq_list_lock
);
53 * find_device_devfreq() - find devfreq struct using device pointer
54 * @dev: device pointer used to lookup device devfreq.
56 * Search the list of device devfreqs and return the matched device's
57 * devfreq info. devfreq_list_lock should be held by the caller.
59 static struct devfreq
*find_device_devfreq(struct device
*dev
)
61 struct devfreq
*tmp_devfreq
;
63 if (IS_ERR_OR_NULL(dev
)) {
64 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
65 return ERR_PTR(-EINVAL
);
67 WARN(!mutex_is_locked(&devfreq_list_lock
),
68 "devfreq_list_lock must be locked.");
70 list_for_each_entry(tmp_devfreq
, &devfreq_list
, node
) {
71 if (tmp_devfreq
->dev
.parent
== dev
)
75 return ERR_PTR(-ENODEV
);
78 static unsigned long find_available_min_freq(struct devfreq
*devfreq
)
80 struct dev_pm_opp
*opp
;
81 unsigned long min_freq
= 0;
83 opp
= dev_pm_opp_find_freq_ceil(devfreq
->dev
.parent
, &min_freq
);
92 static unsigned long find_available_max_freq(struct devfreq
*devfreq
)
94 struct dev_pm_opp
*opp
;
95 unsigned long max_freq
= ULONG_MAX
;
97 opp
= dev_pm_opp_find_freq_floor(devfreq
->dev
.parent
, &max_freq
);
107 * get_freq_range() - Get the current freq range
108 * @devfreq: the devfreq instance
109 * @min_freq: the min frequency
110 * @max_freq: the max frequency
112 * This takes into consideration all constraints.
114 static void get_freq_range(struct devfreq
*devfreq
,
115 unsigned long *min_freq
,
116 unsigned long *max_freq
)
118 unsigned long *freq_table
= devfreq
->profile
->freq_table
;
119 s32 qos_min_freq
, qos_max_freq
;
121 lockdep_assert_held(&devfreq
->lock
);
124 * Initialize minimum/maximum frequency from freq table.
125 * The devfreq drivers can initialize this in either ascending or
126 * descending order and devfreq core supports both.
128 if (freq_table
[0] < freq_table
[devfreq
->profile
->max_state
- 1]) {
129 *min_freq
= freq_table
[0];
130 *max_freq
= freq_table
[devfreq
->profile
->max_state
- 1];
132 *min_freq
= freq_table
[devfreq
->profile
->max_state
- 1];
133 *max_freq
= freq_table
[0];
136 /* Apply constraints from PM QoS */
137 qos_min_freq
= dev_pm_qos_read_value(devfreq
->dev
.parent
,
138 DEV_PM_QOS_MIN_FREQUENCY
);
139 qos_max_freq
= dev_pm_qos_read_value(devfreq
->dev
.parent
,
140 DEV_PM_QOS_MAX_FREQUENCY
);
141 *min_freq
= max(*min_freq
, (unsigned long)HZ_PER_KHZ
* qos_min_freq
);
142 if (qos_max_freq
!= PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
)
143 *max_freq
= min(*max_freq
,
144 (unsigned long)HZ_PER_KHZ
* qos_max_freq
);
146 /* Apply constraints from OPP interface */
147 *min_freq
= max(*min_freq
, devfreq
->scaling_min_freq
);
148 *max_freq
= min(*max_freq
, devfreq
->scaling_max_freq
);
150 if (*min_freq
> *max_freq
)
151 *min_freq
= *max_freq
;
155 * devfreq_get_freq_level() - Lookup freq_table for the frequency
156 * @devfreq: the devfreq instance
157 * @freq: the target frequency
159 static int devfreq_get_freq_level(struct devfreq
*devfreq
, unsigned long freq
)
163 for (lev
= 0; lev
< devfreq
->profile
->max_state
; lev
++)
164 if (freq
== devfreq
->profile
->freq_table
[lev
])
170 static int set_freq_table(struct devfreq
*devfreq
)
172 struct devfreq_dev_profile
*profile
= devfreq
->profile
;
173 struct dev_pm_opp
*opp
;
177 /* Initialize the freq_table from OPP table */
178 count
= dev_pm_opp_get_opp_count(devfreq
->dev
.parent
);
182 profile
->max_state
= count
;
183 profile
->freq_table
= devm_kcalloc(devfreq
->dev
.parent
,
185 sizeof(*profile
->freq_table
),
187 if (!profile
->freq_table
) {
188 profile
->max_state
= 0;
192 for (i
= 0, freq
= 0; i
< profile
->max_state
; i
++, freq
++) {
193 opp
= dev_pm_opp_find_freq_ceil(devfreq
->dev
.parent
, &freq
);
195 devm_kfree(devfreq
->dev
.parent
, profile
->freq_table
);
196 profile
->max_state
= 0;
200 profile
->freq_table
[i
] = freq
;
207 * devfreq_update_status() - Update statistics of devfreq behavior
208 * @devfreq: the devfreq instance
209 * @freq: the update target frequency
211 int devfreq_update_status(struct devfreq
*devfreq
, unsigned long freq
)
213 int lev
, prev_lev
, ret
= 0;
216 lockdep_assert_held(&devfreq
->lock
);
217 cur_time
= get_jiffies_64();
219 /* Immediately exit if previous_freq is not initialized yet. */
220 if (!devfreq
->previous_freq
)
223 prev_lev
= devfreq_get_freq_level(devfreq
, devfreq
->previous_freq
);
229 devfreq
->stats
.time_in_state
[prev_lev
] +=
230 cur_time
- devfreq
->stats
.last_update
;
232 lev
= devfreq_get_freq_level(devfreq
, freq
);
238 if (lev
!= prev_lev
) {
239 devfreq
->stats
.trans_table
[
240 (prev_lev
* devfreq
->profile
->max_state
) + lev
]++;
241 devfreq
->stats
.total_trans
++;
245 devfreq
->stats
.last_update
= cur_time
;
248 EXPORT_SYMBOL(devfreq_update_status
);
251 * find_devfreq_governor() - find devfreq governor from name
252 * @name: name of the governor
254 * Search the list of devfreq governors and return the matched
255 * governor's pointer. devfreq_list_lock should be held by the caller.
257 static struct devfreq_governor
*find_devfreq_governor(const char *name
)
259 struct devfreq_governor
*tmp_governor
;
261 if (IS_ERR_OR_NULL(name
)) {
262 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
263 return ERR_PTR(-EINVAL
);
265 WARN(!mutex_is_locked(&devfreq_list_lock
),
266 "devfreq_list_lock must be locked.");
268 list_for_each_entry(tmp_governor
, &devfreq_governor_list
, node
) {
269 if (!strncmp(tmp_governor
->name
, name
, DEVFREQ_NAME_LEN
))
273 return ERR_PTR(-ENODEV
);
277 * try_then_request_governor() - Try to find the governor and request the
278 * module if is not found.
279 * @name: name of the governor
281 * Search the list of devfreq governors and request the module and try again
282 * if is not found. This can happen when both drivers (the governor driver
283 * and the driver that call devfreq_add_device) are built as modules.
284 * devfreq_list_lock should be held by the caller. Returns the matched
285 * governor's pointer or an error pointer.
287 static struct devfreq_governor
*try_then_request_governor(const char *name
)
289 struct devfreq_governor
*governor
;
292 if (IS_ERR_OR_NULL(name
)) {
293 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
294 return ERR_PTR(-EINVAL
);
296 WARN(!mutex_is_locked(&devfreq_list_lock
),
297 "devfreq_list_lock must be locked.");
299 governor
= find_devfreq_governor(name
);
300 if (IS_ERR(governor
)) {
301 mutex_unlock(&devfreq_list_lock
);
303 if (!strncmp(name
, DEVFREQ_GOV_SIMPLE_ONDEMAND
,
305 err
= request_module("governor_%s", "simpleondemand");
307 err
= request_module("governor_%s", name
);
308 /* Restore previous state before return */
309 mutex_lock(&devfreq_list_lock
);
311 return (err
< 0) ? ERR_PTR(err
) : ERR_PTR(-EINVAL
);
313 governor
= find_devfreq_governor(name
);
319 static int devfreq_notify_transition(struct devfreq
*devfreq
,
320 struct devfreq_freqs
*freqs
, unsigned int state
)
326 case DEVFREQ_PRECHANGE
:
327 srcu_notifier_call_chain(&devfreq
->transition_notifier_list
,
328 DEVFREQ_PRECHANGE
, freqs
);
331 case DEVFREQ_POSTCHANGE
:
332 srcu_notifier_call_chain(&devfreq
->transition_notifier_list
,
333 DEVFREQ_POSTCHANGE
, freqs
);
342 static int devfreq_set_target(struct devfreq
*devfreq
, unsigned long new_freq
,
345 struct devfreq_freqs freqs
;
346 unsigned long cur_freq
;
349 if (devfreq
->profile
->get_cur_freq
)
350 devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &cur_freq
);
352 cur_freq
= devfreq
->previous_freq
;
354 freqs
.old
= cur_freq
;
355 freqs
.new = new_freq
;
356 devfreq_notify_transition(devfreq
, &freqs
, DEVFREQ_PRECHANGE
);
358 err
= devfreq
->profile
->target(devfreq
->dev
.parent
, &new_freq
, flags
);
360 freqs
.new = cur_freq
;
361 devfreq_notify_transition(devfreq
, &freqs
, DEVFREQ_POSTCHANGE
);
365 freqs
.new = new_freq
;
366 devfreq_notify_transition(devfreq
, &freqs
, DEVFREQ_POSTCHANGE
);
368 if (devfreq_update_status(devfreq
, new_freq
))
369 dev_err(&devfreq
->dev
,
370 "Couldn't update frequency transition information.\n");
372 devfreq
->previous_freq
= new_freq
;
374 if (devfreq
->suspend_freq
)
375 devfreq
->resume_freq
= cur_freq
;
380 /* Load monitoring helper functions for governors use */
383 * update_devfreq() - Reevaluate the device and configure frequency.
384 * @devfreq: the devfreq instance.
386 * Note: Lock devfreq->lock before calling update_devfreq
387 * This function is exported for governors.
389 int update_devfreq(struct devfreq
*devfreq
)
391 unsigned long freq
, min_freq
, max_freq
;
395 if (!mutex_is_locked(&devfreq
->lock
)) {
396 WARN(true, "devfreq->lock must be locked by the caller.\n");
400 if (!devfreq
->governor
)
403 /* Reevaluate the proper frequency */
404 err
= devfreq
->governor
->get_target_freq(devfreq
, &freq
);
407 get_freq_range(devfreq
, &min_freq
, &max_freq
);
409 if (freq
< min_freq
) {
411 flags
&= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND
; /* Use GLB */
413 if (freq
> max_freq
) {
415 flags
|= DEVFREQ_FLAG_LEAST_UPPER_BOUND
; /* Use LUB */
418 return devfreq_set_target(devfreq
, freq
, flags
);
421 EXPORT_SYMBOL(update_devfreq
);
424 * devfreq_monitor() - Periodically poll devfreq objects.
425 * @work: the work struct used to run devfreq_monitor periodically.
428 static void devfreq_monitor(struct work_struct
*work
)
431 struct devfreq
*devfreq
= container_of(work
,
432 struct devfreq
, work
.work
);
434 mutex_lock(&devfreq
->lock
);
435 err
= update_devfreq(devfreq
);
437 dev_err(&devfreq
->dev
, "dvfs failed with (%d) error\n", err
);
439 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
440 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
441 mutex_unlock(&devfreq
->lock
);
443 trace_devfreq_monitor(devfreq
);
447 * devfreq_monitor_start() - Start load monitoring of devfreq instance
448 * @devfreq: the devfreq instance.
450 * Helper function for starting devfreq device load monitoring. By
451 * default delayed work based monitoring is supported. Function
452 * to be called from governor in response to DEVFREQ_GOV_START
453 * event when device is added to devfreq framework.
455 void devfreq_monitor_start(struct devfreq
*devfreq
)
457 if (devfreq
->governor
->interrupt_driven
)
460 INIT_DEFERRABLE_WORK(&devfreq
->work
, devfreq_monitor
);
461 if (devfreq
->profile
->polling_ms
)
462 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
463 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
465 EXPORT_SYMBOL(devfreq_monitor_start
);
468 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
469 * @devfreq: the devfreq instance.
471 * Helper function to stop devfreq device load monitoring. Function
472 * to be called from governor in response to DEVFREQ_GOV_STOP
473 * event when device is removed from devfreq framework.
475 void devfreq_monitor_stop(struct devfreq
*devfreq
)
477 if (devfreq
->governor
->interrupt_driven
)
480 cancel_delayed_work_sync(&devfreq
->work
);
482 EXPORT_SYMBOL(devfreq_monitor_stop
);
485 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
486 * @devfreq: the devfreq instance.
488 * Helper function to suspend devfreq device load monitoring. Function
489 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
490 * event or when polling interval is set to zero.
492 * Note: Though this function is same as devfreq_monitor_stop(),
493 * intentionally kept separate to provide hooks for collecting
494 * transition statistics.
496 void devfreq_monitor_suspend(struct devfreq
*devfreq
)
498 mutex_lock(&devfreq
->lock
);
499 if (devfreq
->stop_polling
) {
500 mutex_unlock(&devfreq
->lock
);
504 devfreq_update_status(devfreq
, devfreq
->previous_freq
);
505 devfreq
->stop_polling
= true;
506 mutex_unlock(&devfreq
->lock
);
508 if (devfreq
->governor
->interrupt_driven
)
511 cancel_delayed_work_sync(&devfreq
->work
);
513 EXPORT_SYMBOL(devfreq_monitor_suspend
);
516 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
517 * @devfreq: the devfreq instance.
519 * Helper function to resume devfreq device load monitoring. Function
520 * to be called from governor in response to DEVFREQ_GOV_RESUME
521 * event or when polling interval is set to non-zero.
523 void devfreq_monitor_resume(struct devfreq
*devfreq
)
527 mutex_lock(&devfreq
->lock
);
528 if (!devfreq
->stop_polling
)
531 if (devfreq
->governor
->interrupt_driven
)
534 if (!delayed_work_pending(&devfreq
->work
) &&
535 devfreq
->profile
->polling_ms
)
536 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
537 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
540 devfreq
->stats
.last_update
= get_jiffies_64();
541 devfreq
->stop_polling
= false;
543 if (devfreq
->profile
->get_cur_freq
&&
544 !devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &freq
))
545 devfreq
->previous_freq
= freq
;
548 mutex_unlock(&devfreq
->lock
);
550 EXPORT_SYMBOL(devfreq_monitor_resume
);
553 * devfreq_update_interval() - Update device devfreq monitoring interval
554 * @devfreq: the devfreq instance.
555 * @delay: new polling interval to be set.
557 * Helper function to set new load monitoring polling interval. Function
558 * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
560 void devfreq_update_interval(struct devfreq
*devfreq
, unsigned int *delay
)
562 unsigned int cur_delay
= devfreq
->profile
->polling_ms
;
563 unsigned int new_delay
= *delay
;
565 mutex_lock(&devfreq
->lock
);
566 devfreq
->profile
->polling_ms
= new_delay
;
568 if (devfreq
->stop_polling
)
571 if (devfreq
->governor
->interrupt_driven
)
574 /* if new delay is zero, stop polling */
576 mutex_unlock(&devfreq
->lock
);
577 cancel_delayed_work_sync(&devfreq
->work
);
581 /* if current delay is zero, start polling with new delay */
583 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
584 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
588 /* if current delay is greater than new delay, restart polling */
589 if (cur_delay
> new_delay
) {
590 mutex_unlock(&devfreq
->lock
);
591 cancel_delayed_work_sync(&devfreq
->work
);
592 mutex_lock(&devfreq
->lock
);
593 if (!devfreq
->stop_polling
)
594 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
595 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
598 mutex_unlock(&devfreq
->lock
);
600 EXPORT_SYMBOL(devfreq_update_interval
);
603 * devfreq_notifier_call() - Notify that the device frequency requirements
604 * has been changed out of devfreq framework.
605 * @nb: the notifier_block (supposed to be devfreq->nb)
609 * Called by a notifier that uses devfreq->nb.
611 static int devfreq_notifier_call(struct notifier_block
*nb
, unsigned long type
,
614 struct devfreq
*devfreq
= container_of(nb
, struct devfreq
, nb
);
617 mutex_lock(&devfreq
->lock
);
619 devfreq
->scaling_min_freq
= find_available_min_freq(devfreq
);
620 if (!devfreq
->scaling_min_freq
)
623 devfreq
->scaling_max_freq
= find_available_max_freq(devfreq
);
624 if (!devfreq
->scaling_max_freq
) {
625 devfreq
->scaling_max_freq
= ULONG_MAX
;
629 err
= update_devfreq(devfreq
);
632 mutex_unlock(&devfreq
->lock
);
634 dev_err(devfreq
->dev
.parent
,
635 "failed to update frequency from OPP notifier (%d)\n",
642 * qos_notifier_call() - Common handler for QoS constraints.
643 * @devfreq: the devfreq instance.
645 static int qos_notifier_call(struct devfreq
*devfreq
)
649 mutex_lock(&devfreq
->lock
);
650 err
= update_devfreq(devfreq
);
651 mutex_unlock(&devfreq
->lock
);
653 dev_err(devfreq
->dev
.parent
,
654 "failed to update frequency from PM QoS (%d)\n",
661 * qos_min_notifier_call() - Callback for QoS min_freq changes.
662 * @nb: Should be devfreq->nb_min
664 static int qos_min_notifier_call(struct notifier_block
*nb
,
665 unsigned long val
, void *ptr
)
667 return qos_notifier_call(container_of(nb
, struct devfreq
, nb_min
));
671 * qos_max_notifier_call() - Callback for QoS max_freq changes.
672 * @nb: Should be devfreq->nb_max
674 static int qos_max_notifier_call(struct notifier_block
*nb
,
675 unsigned long val
, void *ptr
)
677 return qos_notifier_call(container_of(nb
, struct devfreq
, nb_max
));
681 * devfreq_dev_release() - Callback for struct device to release the device.
682 * @dev: the devfreq device
684 * Remove devfreq from the list and release its resources.
686 static void devfreq_dev_release(struct device
*dev
)
688 struct devfreq
*devfreq
= to_devfreq(dev
);
691 mutex_lock(&devfreq_list_lock
);
692 list_del(&devfreq
->node
);
693 mutex_unlock(&devfreq_list_lock
);
695 err
= dev_pm_qos_remove_notifier(devfreq
->dev
.parent
, &devfreq
->nb_max
,
696 DEV_PM_QOS_MAX_FREQUENCY
);
697 if (err
&& err
!= -ENOENT
)
698 dev_warn(dev
->parent
,
699 "Failed to remove max_freq notifier: %d\n", err
);
700 err
= dev_pm_qos_remove_notifier(devfreq
->dev
.parent
, &devfreq
->nb_min
,
701 DEV_PM_QOS_MIN_FREQUENCY
);
702 if (err
&& err
!= -ENOENT
)
703 dev_warn(dev
->parent
,
704 "Failed to remove min_freq notifier: %d\n", err
);
706 if (dev_pm_qos_request_active(&devfreq
->user_max_freq_req
)) {
707 err
= dev_pm_qos_remove_request(&devfreq
->user_max_freq_req
);
709 dev_warn(dev
->parent
,
710 "Failed to remove max_freq request: %d\n", err
);
712 if (dev_pm_qos_request_active(&devfreq
->user_min_freq_req
)) {
713 err
= dev_pm_qos_remove_request(&devfreq
->user_min_freq_req
);
715 dev_warn(dev
->parent
,
716 "Failed to remove min_freq request: %d\n", err
);
719 if (devfreq
->profile
->exit
)
720 devfreq
->profile
->exit(devfreq
->dev
.parent
);
722 mutex_destroy(&devfreq
->lock
);
727 * devfreq_add_device() - Add devfreq feature to the device
728 * @dev: the device to add devfreq feature.
729 * @profile: device-specific profile to run devfreq.
730 * @governor_name: name of the policy to choose frequency.
731 * @data: private data for the governor. The devfreq framework does not
734 struct devfreq
*devfreq_add_device(struct device
*dev
,
735 struct devfreq_dev_profile
*profile
,
736 const char *governor_name
,
739 struct devfreq
*devfreq
;
740 struct devfreq_governor
*governor
;
743 if (!dev
|| !profile
|| !governor_name
) {
744 dev_err(dev
, "%s: Invalid parameters.\n", __func__
);
745 return ERR_PTR(-EINVAL
);
748 mutex_lock(&devfreq_list_lock
);
749 devfreq
= find_device_devfreq(dev
);
750 mutex_unlock(&devfreq_list_lock
);
751 if (!IS_ERR(devfreq
)) {
752 dev_err(dev
, "%s: devfreq device already exists!\n",
758 devfreq
= kzalloc(sizeof(struct devfreq
), GFP_KERNEL
);
764 mutex_init(&devfreq
->lock
);
765 mutex_lock(&devfreq
->lock
);
766 devfreq
->dev
.parent
= dev
;
767 devfreq
->dev
.class = devfreq_class
;
768 devfreq
->dev
.release
= devfreq_dev_release
;
769 INIT_LIST_HEAD(&devfreq
->node
);
770 devfreq
->profile
= profile
;
771 strncpy(devfreq
->governor_name
, governor_name
, DEVFREQ_NAME_LEN
);
772 devfreq
->previous_freq
= profile
->initial_freq
;
773 devfreq
->last_status
.current_frequency
= profile
->initial_freq
;
774 devfreq
->data
= data
;
775 devfreq
->nb
.notifier_call
= devfreq_notifier_call
;
777 if (!devfreq
->profile
->max_state
&& !devfreq
->profile
->freq_table
) {
778 mutex_unlock(&devfreq
->lock
);
779 err
= set_freq_table(devfreq
);
782 mutex_lock(&devfreq
->lock
);
785 devfreq
->scaling_min_freq
= find_available_min_freq(devfreq
);
786 if (!devfreq
->scaling_min_freq
) {
787 mutex_unlock(&devfreq
->lock
);
792 devfreq
->scaling_max_freq
= find_available_max_freq(devfreq
);
793 if (!devfreq
->scaling_max_freq
) {
794 mutex_unlock(&devfreq
->lock
);
799 devfreq
->suspend_freq
= dev_pm_opp_get_suspend_opp_freq(dev
);
800 atomic_set(&devfreq
->suspend_count
, 0);
802 dev_set_name(&devfreq
->dev
, "%s", dev_name(dev
));
803 err
= device_register(&devfreq
->dev
);
805 mutex_unlock(&devfreq
->lock
);
806 put_device(&devfreq
->dev
);
810 devfreq
->stats
.trans_table
= devm_kzalloc(&devfreq
->dev
,
811 array3_size(sizeof(unsigned int),
812 devfreq
->profile
->max_state
,
813 devfreq
->profile
->max_state
),
815 if (!devfreq
->stats
.trans_table
) {
816 mutex_unlock(&devfreq
->lock
);
821 devfreq
->stats
.time_in_state
= devm_kcalloc(&devfreq
->dev
,
822 devfreq
->profile
->max_state
,
823 sizeof(*devfreq
->stats
.time_in_state
),
825 if (!devfreq
->stats
.time_in_state
) {
826 mutex_unlock(&devfreq
->lock
);
831 devfreq
->stats
.total_trans
= 0;
832 devfreq
->stats
.last_update
= get_jiffies_64();
834 srcu_init_notifier_head(&devfreq
->transition_notifier_list
);
836 mutex_unlock(&devfreq
->lock
);
838 err
= dev_pm_qos_add_request(dev
, &devfreq
->user_min_freq_req
,
839 DEV_PM_QOS_MIN_FREQUENCY
, 0);
842 err
= dev_pm_qos_add_request(dev
, &devfreq
->user_max_freq_req
,
843 DEV_PM_QOS_MAX_FREQUENCY
,
844 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
);
848 devfreq
->nb_min
.notifier_call
= qos_min_notifier_call
;
849 err
= dev_pm_qos_add_notifier(devfreq
->dev
.parent
, &devfreq
->nb_min
,
850 DEV_PM_QOS_MIN_FREQUENCY
);
854 devfreq
->nb_max
.notifier_call
= qos_max_notifier_call
;
855 err
= dev_pm_qos_add_notifier(devfreq
->dev
.parent
, &devfreq
->nb_max
,
856 DEV_PM_QOS_MAX_FREQUENCY
);
860 mutex_lock(&devfreq_list_lock
);
862 governor
= try_then_request_governor(devfreq
->governor_name
);
863 if (IS_ERR(governor
)) {
864 dev_err(dev
, "%s: Unable to find governor for the device\n",
866 err
= PTR_ERR(governor
);
870 devfreq
->governor
= governor
;
871 err
= devfreq
->governor
->event_handler(devfreq
, DEVFREQ_GOV_START
,
874 dev_err(dev
, "%s: Unable to start governor for the device\n",
879 list_add(&devfreq
->node
, &devfreq_list
);
881 mutex_unlock(&devfreq_list_lock
);
886 mutex_unlock(&devfreq_list_lock
);
888 devfreq_remove_device(devfreq
);
895 EXPORT_SYMBOL(devfreq_add_device
);
898 * devfreq_remove_device() - Remove devfreq feature from a device.
899 * @devfreq: the devfreq instance to be removed
901 * The opposite of devfreq_add_device().
903 int devfreq_remove_device(struct devfreq
*devfreq
)
908 if (devfreq
->governor
)
909 devfreq
->governor
->event_handler(devfreq
,
910 DEVFREQ_GOV_STOP
, NULL
);
911 device_unregister(&devfreq
->dev
);
915 EXPORT_SYMBOL(devfreq_remove_device
);
917 static int devm_devfreq_dev_match(struct device
*dev
, void *res
, void *data
)
919 struct devfreq
**r
= res
;
921 if (WARN_ON(!r
|| !*r
))
927 static void devm_devfreq_dev_release(struct device
*dev
, void *res
)
929 devfreq_remove_device(*(struct devfreq
**)res
);
933 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
934 * @dev: the device to add devfreq feature.
935 * @profile: device-specific profile to run devfreq.
936 * @governor_name: name of the policy to choose frequency.
937 * @data: private data for the governor. The devfreq framework does not
940 * This function manages automatically the memory of devfreq device using device
941 * resource management and simplify the free operation for memory of devfreq
944 struct devfreq
*devm_devfreq_add_device(struct device
*dev
,
945 struct devfreq_dev_profile
*profile
,
946 const char *governor_name
,
949 struct devfreq
**ptr
, *devfreq
;
951 ptr
= devres_alloc(devm_devfreq_dev_release
, sizeof(*ptr
), GFP_KERNEL
);
953 return ERR_PTR(-ENOMEM
);
955 devfreq
= devfreq_add_device(dev
, profile
, governor_name
, data
);
956 if (IS_ERR(devfreq
)) {
962 devres_add(dev
, ptr
);
966 EXPORT_SYMBOL(devm_devfreq_add_device
);
970 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
971 * @dev - instance to the given device
972 * @index - index into list of devfreq
974 * return the instance of devfreq device
976 struct devfreq
*devfreq_get_devfreq_by_phandle(struct device
*dev
, int index
)
978 struct device_node
*node
;
979 struct devfreq
*devfreq
;
982 return ERR_PTR(-EINVAL
);
985 return ERR_PTR(-EINVAL
);
987 node
= of_parse_phandle(dev
->of_node
, "devfreq", index
);
989 return ERR_PTR(-ENODEV
);
991 mutex_lock(&devfreq_list_lock
);
992 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
993 if (devfreq
->dev
.parent
994 && devfreq
->dev
.parent
->of_node
== node
) {
995 mutex_unlock(&devfreq_list_lock
);
1000 mutex_unlock(&devfreq_list_lock
);
1003 return ERR_PTR(-EPROBE_DEFER
);
1006 struct devfreq
*devfreq_get_devfreq_by_phandle(struct device
*dev
, int index
)
1008 return ERR_PTR(-ENODEV
);
1010 #endif /* CONFIG_OF */
1011 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle
);
1014 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1015 * @dev: the device from which to remove devfreq feature.
1016 * @devfreq: the devfreq instance to be removed
1018 void devm_devfreq_remove_device(struct device
*dev
, struct devfreq
*devfreq
)
1020 WARN_ON(devres_release(dev
, devm_devfreq_dev_release
,
1021 devm_devfreq_dev_match
, devfreq
));
1023 EXPORT_SYMBOL(devm_devfreq_remove_device
);
1026 * devfreq_suspend_device() - Suspend devfreq of a device.
1027 * @devfreq: the devfreq instance to be suspended
1029 * This function is intended to be called by the pm callbacks
1030 * (e.g., runtime_suspend, suspend) of the device driver that
1031 * holds the devfreq.
1033 int devfreq_suspend_device(struct devfreq
*devfreq
)
1040 if (atomic_inc_return(&devfreq
->suspend_count
) > 1)
1043 if (devfreq
->governor
) {
1044 ret
= devfreq
->governor
->event_handler(devfreq
,
1045 DEVFREQ_GOV_SUSPEND
, NULL
);
1050 if (devfreq
->suspend_freq
) {
1051 mutex_lock(&devfreq
->lock
);
1052 ret
= devfreq_set_target(devfreq
, devfreq
->suspend_freq
, 0);
1053 mutex_unlock(&devfreq
->lock
);
1060 EXPORT_SYMBOL(devfreq_suspend_device
);
1063 * devfreq_resume_device() - Resume devfreq of a device.
1064 * @devfreq: the devfreq instance to be resumed
1066 * This function is intended to be called by the pm callbacks
1067 * (e.g., runtime_resume, resume) of the device driver that
1068 * holds the devfreq.
1070 int devfreq_resume_device(struct devfreq
*devfreq
)
1077 if (atomic_dec_return(&devfreq
->suspend_count
) >= 1)
1080 if (devfreq
->resume_freq
) {
1081 mutex_lock(&devfreq
->lock
);
1082 ret
= devfreq_set_target(devfreq
, devfreq
->resume_freq
, 0);
1083 mutex_unlock(&devfreq
->lock
);
1088 if (devfreq
->governor
) {
1089 ret
= devfreq
->governor
->event_handler(devfreq
,
1090 DEVFREQ_GOV_RESUME
, NULL
);
1097 EXPORT_SYMBOL(devfreq_resume_device
);
1100 * devfreq_suspend() - Suspend devfreq governors and devices
1102 * Called during system wide Suspend/Hibernate cycles for suspending governors
1103 * and devices preserving the state for resume. On some platforms the devfreq
1104 * device must have precise state (frequency) after resume in order to provide
1105 * fully operating setup.
1107 void devfreq_suspend(void)
1109 struct devfreq
*devfreq
;
1112 mutex_lock(&devfreq_list_lock
);
1113 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1114 ret
= devfreq_suspend_device(devfreq
);
1116 dev_err(&devfreq
->dev
,
1117 "failed to suspend devfreq device\n");
1119 mutex_unlock(&devfreq_list_lock
);
1123 * devfreq_resume() - Resume devfreq governors and devices
1125 * Called during system wide Suspend/Hibernate cycle for resuming governors and
1126 * devices that are suspended with devfreq_suspend().
1128 void devfreq_resume(void)
1130 struct devfreq
*devfreq
;
1133 mutex_lock(&devfreq_list_lock
);
1134 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1135 ret
= devfreq_resume_device(devfreq
);
1137 dev_warn(&devfreq
->dev
,
1138 "failed to resume devfreq device\n");
1140 mutex_unlock(&devfreq_list_lock
);
1144 * devfreq_add_governor() - Add devfreq governor
1145 * @governor: the devfreq governor to be added
1147 int devfreq_add_governor(struct devfreq_governor
*governor
)
1149 struct devfreq_governor
*g
;
1150 struct devfreq
*devfreq
;
1154 pr_err("%s: Invalid parameters.\n", __func__
);
1158 mutex_lock(&devfreq_list_lock
);
1159 g
= find_devfreq_governor(governor
->name
);
1161 pr_err("%s: governor %s already registered\n", __func__
,
1167 list_add(&governor
->node
, &devfreq_governor_list
);
1169 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1171 struct device
*dev
= devfreq
->dev
.parent
;
1173 if (!strncmp(devfreq
->governor_name
, governor
->name
,
1174 DEVFREQ_NAME_LEN
)) {
1175 /* The following should never occur */
1176 if (devfreq
->governor
) {
1178 "%s: Governor %s already present\n",
1179 __func__
, devfreq
->governor
->name
);
1180 ret
= devfreq
->governor
->event_handler(devfreq
,
1181 DEVFREQ_GOV_STOP
, NULL
);
1184 "%s: Governor %s stop = %d\n",
1186 devfreq
->governor
->name
, ret
);
1190 devfreq
->governor
= governor
;
1191 ret
= devfreq
->governor
->event_handler(devfreq
,
1192 DEVFREQ_GOV_START
, NULL
);
1194 dev_warn(dev
, "%s: Governor %s start=%d\n",
1195 __func__
, devfreq
->governor
->name
,
1202 mutex_unlock(&devfreq_list_lock
);
1206 EXPORT_SYMBOL(devfreq_add_governor
);
1209 * devfreq_remove_governor() - Remove devfreq feature from a device.
1210 * @governor: the devfreq governor to be removed
1212 int devfreq_remove_governor(struct devfreq_governor
*governor
)
1214 struct devfreq_governor
*g
;
1215 struct devfreq
*devfreq
;
1219 pr_err("%s: Invalid parameters.\n", __func__
);
1223 mutex_lock(&devfreq_list_lock
);
1224 g
= find_devfreq_governor(governor
->name
);
1226 pr_err("%s: governor %s not registered\n", __func__
,
1231 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1233 struct device
*dev
= devfreq
->dev
.parent
;
1235 if (!strncmp(devfreq
->governor_name
, governor
->name
,
1236 DEVFREQ_NAME_LEN
)) {
1237 /* we should have a devfreq governor! */
1238 if (!devfreq
->governor
) {
1239 dev_warn(dev
, "%s: Governor %s NOT present\n",
1240 __func__
, governor
->name
);
1244 ret
= devfreq
->governor
->event_handler(devfreq
,
1245 DEVFREQ_GOV_STOP
, NULL
);
1247 dev_warn(dev
, "%s: Governor %s stop=%d\n",
1248 __func__
, devfreq
->governor
->name
,
1251 devfreq
->governor
= NULL
;
1255 list_del(&governor
->node
);
1257 mutex_unlock(&devfreq_list_lock
);
1261 EXPORT_SYMBOL(devfreq_remove_governor
);
1263 static ssize_t
name_show(struct device
*dev
,
1264 struct device_attribute
*attr
, char *buf
)
1266 struct devfreq
*devfreq
= to_devfreq(dev
);
1267 return sprintf(buf
, "%s\n", dev_name(devfreq
->dev
.parent
));
1269 static DEVICE_ATTR_RO(name
);
1271 static ssize_t
governor_show(struct device
*dev
,
1272 struct device_attribute
*attr
, char *buf
)
1274 if (!to_devfreq(dev
)->governor
)
1277 return sprintf(buf
, "%s\n", to_devfreq(dev
)->governor
->name
);
1280 static ssize_t
governor_store(struct device
*dev
, struct device_attribute
*attr
,
1281 const char *buf
, size_t count
)
1283 struct devfreq
*df
= to_devfreq(dev
);
1285 char str_governor
[DEVFREQ_NAME_LEN
+ 1];
1286 const struct devfreq_governor
*governor
, *prev_governor
;
1288 ret
= sscanf(buf
, "%" __stringify(DEVFREQ_NAME_LEN
) "s", str_governor
);
1292 mutex_lock(&devfreq_list_lock
);
1293 governor
= try_then_request_governor(str_governor
);
1294 if (IS_ERR(governor
)) {
1295 ret
= PTR_ERR(governor
);
1298 if (df
->governor
== governor
) {
1301 } else if ((df
->governor
&& df
->governor
->immutable
) ||
1302 governor
->immutable
) {
1308 ret
= df
->governor
->event_handler(df
, DEVFREQ_GOV_STOP
, NULL
);
1310 dev_warn(dev
, "%s: Governor %s not stopped(%d)\n",
1311 __func__
, df
->governor
->name
, ret
);
1315 prev_governor
= df
->governor
;
1316 df
->governor
= governor
;
1317 strncpy(df
->governor_name
, governor
->name
, DEVFREQ_NAME_LEN
);
1318 ret
= df
->governor
->event_handler(df
, DEVFREQ_GOV_START
, NULL
);
1320 dev_warn(dev
, "%s: Governor %s not started(%d)\n",
1321 __func__
, df
->governor
->name
, ret
);
1322 df
->governor
= prev_governor
;
1323 strncpy(df
->governor_name
, prev_governor
->name
,
1325 ret
= df
->governor
->event_handler(df
, DEVFREQ_GOV_START
, NULL
);
1328 "%s: reverting to Governor %s failed (%d)\n",
1329 __func__
, df
->governor_name
, ret
);
1330 df
->governor
= NULL
;
1334 mutex_unlock(&devfreq_list_lock
);
1340 static DEVICE_ATTR_RW(governor
);
1342 static ssize_t
available_governors_show(struct device
*d
,
1343 struct device_attribute
*attr
,
1346 struct devfreq
*df
= to_devfreq(d
);
1349 mutex_lock(&devfreq_list_lock
);
1352 * The devfreq with immutable governor (e.g., passive) shows
1353 * only own governor.
1355 if (df
->governor
&& df
->governor
->immutable
) {
1356 count
= scnprintf(&buf
[count
], DEVFREQ_NAME_LEN
,
1357 "%s ", df
->governor_name
);
1359 * The devfreq device shows the registered governor except for
1360 * immutable governors such as passive governor .
1363 struct devfreq_governor
*governor
;
1365 list_for_each_entry(governor
, &devfreq_governor_list
, node
) {
1366 if (governor
->immutable
)
1368 count
+= scnprintf(&buf
[count
], (PAGE_SIZE
- count
- 2),
1369 "%s ", governor
->name
);
1373 mutex_unlock(&devfreq_list_lock
);
1375 /* Truncate the trailing space */
1379 count
+= sprintf(&buf
[count
], "\n");
1383 static DEVICE_ATTR_RO(available_governors
);
1385 static ssize_t
cur_freq_show(struct device
*dev
, struct device_attribute
*attr
,
1389 struct devfreq
*devfreq
= to_devfreq(dev
);
1391 if (devfreq
->profile
->get_cur_freq
&&
1392 !devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &freq
))
1393 return sprintf(buf
, "%lu\n", freq
);
1395 return sprintf(buf
, "%lu\n", devfreq
->previous_freq
);
1397 static DEVICE_ATTR_RO(cur_freq
);
1399 static ssize_t
target_freq_show(struct device
*dev
,
1400 struct device_attribute
*attr
, char *buf
)
1402 return sprintf(buf
, "%lu\n", to_devfreq(dev
)->previous_freq
);
1404 static DEVICE_ATTR_RO(target_freq
);
1406 static ssize_t
polling_interval_show(struct device
*dev
,
1407 struct device_attribute
*attr
, char *buf
)
1409 return sprintf(buf
, "%d\n", to_devfreq(dev
)->profile
->polling_ms
);
1412 static ssize_t
polling_interval_store(struct device
*dev
,
1413 struct device_attribute
*attr
,
1414 const char *buf
, size_t count
)
1416 struct devfreq
*df
= to_devfreq(dev
);
1423 ret
= sscanf(buf
, "%u", &value
);
1427 df
->governor
->event_handler(df
, DEVFREQ_GOV_UPDATE_INTERVAL
, &value
);
1432 static DEVICE_ATTR_RW(polling_interval
);
1434 static ssize_t
min_freq_store(struct device
*dev
, struct device_attribute
*attr
,
1435 const char *buf
, size_t count
)
1437 struct devfreq
*df
= to_devfreq(dev
);
1438 unsigned long value
;
1442 * Protect against theoretical sysfs writes between
1443 * device_add and dev_pm_qos_add_request
1445 if (!dev_pm_qos_request_active(&df
->user_min_freq_req
))
1448 ret
= sscanf(buf
, "%lu", &value
);
1452 /* Round down to kHz for PM QoS */
1453 ret
= dev_pm_qos_update_request(&df
->user_min_freq_req
,
1454 value
/ HZ_PER_KHZ
);
1461 static ssize_t
min_freq_show(struct device
*dev
, struct device_attribute
*attr
,
1464 struct devfreq
*df
= to_devfreq(dev
);
1465 unsigned long min_freq
, max_freq
;
1467 mutex_lock(&df
->lock
);
1468 get_freq_range(df
, &min_freq
, &max_freq
);
1469 mutex_unlock(&df
->lock
);
1471 return sprintf(buf
, "%lu\n", min_freq
);
1473 static DEVICE_ATTR_RW(min_freq
);
1475 static ssize_t
max_freq_store(struct device
*dev
, struct device_attribute
*attr
,
1476 const char *buf
, size_t count
)
1478 struct devfreq
*df
= to_devfreq(dev
);
1479 unsigned long value
;
1483 * Protect against theoretical sysfs writes between
1484 * device_add and dev_pm_qos_add_request
1486 if (!dev_pm_qos_request_active(&df
->user_max_freq_req
))
1489 ret
= sscanf(buf
, "%lu", &value
);
1494 * PM QoS frequencies are in kHz so we need to convert. Convert by
1495 * rounding upwards so that the acceptable interval never shrinks.
1497 * For example if the user writes "666666666" to sysfs this value will
1498 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1499 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1501 * A value of zero means "no limit".
1504 value
= DIV_ROUND_UP(value
, HZ_PER_KHZ
);
1506 value
= PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
;
1508 ret
= dev_pm_qos_update_request(&df
->user_max_freq_req
, value
);
1515 static ssize_t
max_freq_show(struct device
*dev
, struct device_attribute
*attr
,
1518 struct devfreq
*df
= to_devfreq(dev
);
1519 unsigned long min_freq
, max_freq
;
1521 mutex_lock(&df
->lock
);
1522 get_freq_range(df
, &min_freq
, &max_freq
);
1523 mutex_unlock(&df
->lock
);
1525 return sprintf(buf
, "%lu\n", max_freq
);
1527 static DEVICE_ATTR_RW(max_freq
);
1529 static ssize_t
available_frequencies_show(struct device
*d
,
1530 struct device_attribute
*attr
,
1533 struct devfreq
*df
= to_devfreq(d
);
1537 mutex_lock(&df
->lock
);
1539 for (i
= 0; i
< df
->profile
->max_state
; i
++)
1540 count
+= scnprintf(&buf
[count
], (PAGE_SIZE
- count
- 2),
1541 "%lu ", df
->profile
->freq_table
[i
]);
1543 mutex_unlock(&df
->lock
);
1544 /* Truncate the trailing space */
1548 count
+= sprintf(&buf
[count
], "\n");
1552 static DEVICE_ATTR_RO(available_frequencies
);
1554 static ssize_t
trans_stat_show(struct device
*dev
,
1555 struct device_attribute
*attr
, char *buf
)
1557 struct devfreq
*devfreq
= to_devfreq(dev
);
1560 unsigned int max_state
= devfreq
->profile
->max_state
;
1563 return sprintf(buf
, "Not Supported.\n");
1565 mutex_lock(&devfreq
->lock
);
1566 if (!devfreq
->stop_polling
&&
1567 devfreq_update_status(devfreq
, devfreq
->previous_freq
)) {
1568 mutex_unlock(&devfreq
->lock
);
1571 mutex_unlock(&devfreq
->lock
);
1573 len
= sprintf(buf
, " From : To\n");
1574 len
+= sprintf(buf
+ len
, " :");
1575 for (i
= 0; i
< max_state
; i
++)
1576 len
+= sprintf(buf
+ len
, "%10lu",
1577 devfreq
->profile
->freq_table
[i
]);
1579 len
+= sprintf(buf
+ len
, " time(ms)\n");
1581 for (i
= 0; i
< max_state
; i
++) {
1582 if (devfreq
->profile
->freq_table
[i
]
1583 == devfreq
->previous_freq
) {
1584 len
+= sprintf(buf
+ len
, "*");
1586 len
+= sprintf(buf
+ len
, " ");
1588 len
+= sprintf(buf
+ len
, "%10lu:",
1589 devfreq
->profile
->freq_table
[i
]);
1590 for (j
= 0; j
< max_state
; j
++)
1591 len
+= sprintf(buf
+ len
, "%10u",
1592 devfreq
->stats
.trans_table
[(i
* max_state
) + j
]);
1594 len
+= sprintf(buf
+ len
, "%10llu\n", (u64
)
1595 jiffies64_to_msecs(devfreq
->stats
.time_in_state
[i
]));
1598 len
+= sprintf(buf
+ len
, "Total transition : %u\n",
1599 devfreq
->stats
.total_trans
);
1603 static ssize_t
trans_stat_store(struct device
*dev
,
1604 struct device_attribute
*attr
,
1605 const char *buf
, size_t count
)
1607 struct devfreq
*df
= to_devfreq(dev
);
1610 if (df
->profile
->max_state
== 0)
1613 err
= kstrtoint(buf
, 10, &value
);
1614 if (err
|| value
!= 0)
1617 mutex_lock(&df
->lock
);
1618 memset(df
->stats
.time_in_state
, 0, (df
->profile
->max_state
*
1619 sizeof(*df
->stats
.time_in_state
)));
1620 memset(df
->stats
.trans_table
, 0, array3_size(sizeof(unsigned int),
1621 df
->profile
->max_state
,
1622 df
->profile
->max_state
));
1623 df
->stats
.total_trans
= 0;
1624 df
->stats
.last_update
= get_jiffies_64();
1625 mutex_unlock(&df
->lock
);
1629 static DEVICE_ATTR_RW(trans_stat
);
1631 static struct attribute
*devfreq_attrs
[] = {
1632 &dev_attr_name
.attr
,
1633 &dev_attr_governor
.attr
,
1634 &dev_attr_available_governors
.attr
,
1635 &dev_attr_cur_freq
.attr
,
1636 &dev_attr_available_frequencies
.attr
,
1637 &dev_attr_target_freq
.attr
,
1638 &dev_attr_polling_interval
.attr
,
1639 &dev_attr_min_freq
.attr
,
1640 &dev_attr_max_freq
.attr
,
1641 &dev_attr_trans_stat
.attr
,
1644 ATTRIBUTE_GROUPS(devfreq
);
1647 * devfreq_summary_show() - Show the summary of the devfreq devices
1648 * @s: seq_file instance to show the summary of devfreq devices
1651 * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1652 * It helps that user can know the detailed information of the devfreq devices.
1654 * Return 0 always because it shows the information without any data change.
1656 static int devfreq_summary_show(struct seq_file
*s
, void *data
)
1658 struct devfreq
*devfreq
;
1659 struct devfreq
*p_devfreq
= NULL
;
1660 unsigned long cur_freq
, min_freq
, max_freq
;
1661 unsigned int polling_ms
;
1663 seq_printf(s
, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
1672 seq_printf(s
, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
1673 "------------------------------",
1682 mutex_lock(&devfreq_list_lock
);
1684 list_for_each_entry_reverse(devfreq
, &devfreq_list
, node
) {
1685 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1686 if (!strncmp(devfreq
->governor_name
, DEVFREQ_GOV_PASSIVE
,
1687 DEVFREQ_NAME_LEN
)) {
1688 struct devfreq_passive_data
*data
= devfreq
->data
;
1691 p_devfreq
= data
->parent
;
1697 mutex_lock(&devfreq
->lock
);
1698 cur_freq
= devfreq
->previous_freq
,
1699 get_freq_range(devfreq
, &min_freq
, &max_freq
);
1700 polling_ms
= devfreq
->profile
->polling_ms
,
1701 mutex_unlock(&devfreq
->lock
);
1704 "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
1705 dev_name(devfreq
->dev
.parent
),
1706 dev_name(&devfreq
->dev
),
1707 p_devfreq
? dev_name(&p_devfreq
->dev
) : "null",
1708 devfreq
->governor_name
,
1715 mutex_unlock(&devfreq_list_lock
);
1719 DEFINE_SHOW_ATTRIBUTE(devfreq_summary
);
1721 static int __init
devfreq_init(void)
1723 devfreq_class
= class_create(THIS_MODULE
, "devfreq");
1724 if (IS_ERR(devfreq_class
)) {
1725 pr_err("%s: couldn't create class\n", __FILE__
);
1726 return PTR_ERR(devfreq_class
);
1729 devfreq_wq
= create_freezable_workqueue("devfreq_wq");
1731 class_destroy(devfreq_class
);
1732 pr_err("%s: couldn't create workqueue\n", __FILE__
);
1735 devfreq_class
->dev_groups
= devfreq_groups
;
1737 devfreq_debugfs
= debugfs_create_dir("devfreq", NULL
);
1738 debugfs_create_file("devfreq_summary", 0444,
1739 devfreq_debugfs
, NULL
,
1740 &devfreq_summary_fops
);
1744 subsys_initcall(devfreq_init
);
1747 * The following are helper functions for devfreq user device drivers with
1752 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1753 * freq value given to target callback.
1754 * @dev: The devfreq user device. (parent of devfreq)
1755 * @freq: The frequency given to target function
1756 * @flags: Flags handed from devfreq framework.
1758 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1761 struct dev_pm_opp
*devfreq_recommended_opp(struct device
*dev
,
1762 unsigned long *freq
,
1765 struct dev_pm_opp
*opp
;
1767 if (flags
& DEVFREQ_FLAG_LEAST_UPPER_BOUND
) {
1768 /* The freq is an upper bound. opp should be lower */
1769 opp
= dev_pm_opp_find_freq_floor(dev
, freq
);
1771 /* If not available, use the closest opp */
1772 if (opp
== ERR_PTR(-ERANGE
))
1773 opp
= dev_pm_opp_find_freq_ceil(dev
, freq
);
1775 /* The freq is an lower bound. opp should be higher */
1776 opp
= dev_pm_opp_find_freq_ceil(dev
, freq
);
1778 /* If not available, use the closest opp */
1779 if (opp
== ERR_PTR(-ERANGE
))
1780 opp
= dev_pm_opp_find_freq_floor(dev
, freq
);
1785 EXPORT_SYMBOL(devfreq_recommended_opp
);
1788 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1789 * for any changes in the OPP availability
1791 * @dev: The devfreq user device. (parent of devfreq)
1792 * @devfreq: The devfreq object.
1794 int devfreq_register_opp_notifier(struct device
*dev
, struct devfreq
*devfreq
)
1796 return dev_pm_opp_register_notifier(dev
, &devfreq
->nb
);
1798 EXPORT_SYMBOL(devfreq_register_opp_notifier
);
1801 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1802 * notified for any changes in the OPP
1803 * availability changes anymore.
1804 * @dev: The devfreq user device. (parent of devfreq)
1805 * @devfreq: The devfreq object.
1807 * At exit() callback of devfreq_dev_profile, this must be included if
1808 * devfreq_recommended_opp is used.
1810 int devfreq_unregister_opp_notifier(struct device
*dev
, struct devfreq
*devfreq
)
1812 return dev_pm_opp_unregister_notifier(dev
, &devfreq
->nb
);
1814 EXPORT_SYMBOL(devfreq_unregister_opp_notifier
);
1816 static void devm_devfreq_opp_release(struct device
*dev
, void *res
)
1818 devfreq_unregister_opp_notifier(dev
, *(struct devfreq
**)res
);
1822 * devm_devfreq_register_opp_notifier() - Resource-managed
1823 * devfreq_register_opp_notifier()
1824 * @dev: The devfreq user device. (parent of devfreq)
1825 * @devfreq: The devfreq object.
1827 int devm_devfreq_register_opp_notifier(struct device
*dev
,
1828 struct devfreq
*devfreq
)
1830 struct devfreq
**ptr
;
1833 ptr
= devres_alloc(devm_devfreq_opp_release
, sizeof(*ptr
), GFP_KERNEL
);
1837 ret
= devfreq_register_opp_notifier(dev
, devfreq
);
1844 devres_add(dev
, ptr
);
1848 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier
);
1851 * devm_devfreq_unregister_opp_notifier() - Resource-managed
1852 * devfreq_unregister_opp_notifier()
1853 * @dev: The devfreq user device. (parent of devfreq)
1854 * @devfreq: The devfreq object.
1856 void devm_devfreq_unregister_opp_notifier(struct device
*dev
,
1857 struct devfreq
*devfreq
)
1859 WARN_ON(devres_release(dev
, devm_devfreq_opp_release
,
1860 devm_devfreq_dev_match
, devfreq
));
1862 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier
);
1865 * devfreq_register_notifier() - Register a driver with devfreq
1866 * @devfreq: The devfreq object.
1867 * @nb: The notifier block to register.
1868 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1870 int devfreq_register_notifier(struct devfreq
*devfreq
,
1871 struct notifier_block
*nb
,
1880 case DEVFREQ_TRANSITION_NOTIFIER
:
1881 ret
= srcu_notifier_chain_register(
1882 &devfreq
->transition_notifier_list
, nb
);
1890 EXPORT_SYMBOL(devfreq_register_notifier
);
1893 * devfreq_unregister_notifier() - Unregister a driver with devfreq
1894 * @devfreq: The devfreq object.
1895 * @nb: The notifier block to be unregistered.
1896 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1898 int devfreq_unregister_notifier(struct devfreq
*devfreq
,
1899 struct notifier_block
*nb
,
1908 case DEVFREQ_TRANSITION_NOTIFIER
:
1909 ret
= srcu_notifier_chain_unregister(
1910 &devfreq
->transition_notifier_list
, nb
);
1918 EXPORT_SYMBOL(devfreq_unregister_notifier
);
1920 struct devfreq_notifier_devres
{
1921 struct devfreq
*devfreq
;
1922 struct notifier_block
*nb
;
1926 static void devm_devfreq_notifier_release(struct device
*dev
, void *res
)
1928 struct devfreq_notifier_devres
*this = res
;
1930 devfreq_unregister_notifier(this->devfreq
, this->nb
, this->list
);
1934 * devm_devfreq_register_notifier()
1935 * - Resource-managed devfreq_register_notifier()
1936 * @dev: The devfreq user device. (parent of devfreq)
1937 * @devfreq: The devfreq object.
1938 * @nb: The notifier block to be unregistered.
1939 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1941 int devm_devfreq_register_notifier(struct device
*dev
,
1942 struct devfreq
*devfreq
,
1943 struct notifier_block
*nb
,
1946 struct devfreq_notifier_devres
*ptr
;
1949 ptr
= devres_alloc(devm_devfreq_notifier_release
, sizeof(*ptr
),
1954 ret
= devfreq_register_notifier(devfreq
, nb
, list
);
1960 ptr
->devfreq
= devfreq
;
1963 devres_add(dev
, ptr
);
1967 EXPORT_SYMBOL(devm_devfreq_register_notifier
);
1970 * devm_devfreq_unregister_notifier()
1971 * - Resource-managed devfreq_unregister_notifier()
1972 * @dev: The devfreq user device. (parent of devfreq)
1973 * @devfreq: The devfreq object.
1974 * @nb: The notifier block to be unregistered.
1975 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1977 void devm_devfreq_unregister_notifier(struct device
*dev
,
1978 struct devfreq
*devfreq
,
1979 struct notifier_block
*nb
,
1982 WARN_ON(devres_release(dev
, devm_devfreq_notifier_release
,
1983 devm_devfreq_dev_match
, devfreq
));
1985 EXPORT_SYMBOL(devm_devfreq_unregister_notifier
);