1 // SPDX-License-Identifier: GPL-2.0-only
3 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
6 * Copyright (C) 2011 Samsung Electronics
7 * MyungJoo Ham <myungjoo.ham@samsung.com>
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
28 #include <linux/pm_qos.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
34 #define HZ_PER_KHZ 1000
36 static struct class *devfreq_class
;
37 static struct dentry
*devfreq_debugfs
;
40 * devfreq core provides delayed work based load monitoring helper
41 * functions. Governors can use these or can implement their own
42 * monitoring mechanism.
44 static struct workqueue_struct
*devfreq_wq
;
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list
);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list
);
50 static DEFINE_MUTEX(devfreq_list_lock
);
53 * find_device_devfreq() - find devfreq struct using device pointer
54 * @dev: device pointer used to lookup device devfreq.
56 * Search the list of device devfreqs and return the matched device's
57 * devfreq info. devfreq_list_lock should be held by the caller.
59 static struct devfreq
*find_device_devfreq(struct device
*dev
)
61 struct devfreq
*tmp_devfreq
;
63 if (IS_ERR_OR_NULL(dev
)) {
64 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
65 return ERR_PTR(-EINVAL
);
67 WARN(!mutex_is_locked(&devfreq_list_lock
),
68 "devfreq_list_lock must be locked.");
70 list_for_each_entry(tmp_devfreq
, &devfreq_list
, node
) {
71 if (tmp_devfreq
->dev
.parent
== dev
)
75 return ERR_PTR(-ENODEV
);
78 static unsigned long find_available_min_freq(struct devfreq
*devfreq
)
80 struct dev_pm_opp
*opp
;
81 unsigned long min_freq
= 0;
83 opp
= dev_pm_opp_find_freq_ceil(devfreq
->dev
.parent
, &min_freq
);
92 static unsigned long find_available_max_freq(struct devfreq
*devfreq
)
94 struct dev_pm_opp
*opp
;
95 unsigned long max_freq
= ULONG_MAX
;
97 opp
= dev_pm_opp_find_freq_floor(devfreq
->dev
.parent
, &max_freq
);
107 * get_freq_range() - Get the current freq range
108 * @devfreq: the devfreq instance
109 * @min_freq: the min frequency
110 * @max_freq: the max frequency
112 * This takes into consideration all constraints.
114 static void get_freq_range(struct devfreq
*devfreq
,
115 unsigned long *min_freq
,
116 unsigned long *max_freq
)
118 unsigned long *freq_table
= devfreq
->profile
->freq_table
;
119 s32 qos_min_freq
, qos_max_freq
;
121 lockdep_assert_held(&devfreq
->lock
);
124 * Initialize minimum/maximum frequency from freq table.
125 * The devfreq drivers can initialize this in either ascending or
126 * descending order and devfreq core supports both.
128 if (freq_table
[0] < freq_table
[devfreq
->profile
->max_state
- 1]) {
129 *min_freq
= freq_table
[0];
130 *max_freq
= freq_table
[devfreq
->profile
->max_state
- 1];
132 *min_freq
= freq_table
[devfreq
->profile
->max_state
- 1];
133 *max_freq
= freq_table
[0];
136 /* Apply constraints from PM QoS */
137 qos_min_freq
= dev_pm_qos_read_value(devfreq
->dev
.parent
,
138 DEV_PM_QOS_MIN_FREQUENCY
);
139 qos_max_freq
= dev_pm_qos_read_value(devfreq
->dev
.parent
,
140 DEV_PM_QOS_MAX_FREQUENCY
);
141 *min_freq
= max(*min_freq
, (unsigned long)HZ_PER_KHZ
* qos_min_freq
);
142 if (qos_max_freq
!= PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
)
143 *max_freq
= min(*max_freq
,
144 (unsigned long)HZ_PER_KHZ
* qos_max_freq
);
146 /* Apply constraints from OPP interface */
147 *min_freq
= max(*min_freq
, devfreq
->scaling_min_freq
);
148 *max_freq
= min(*max_freq
, devfreq
->scaling_max_freq
);
150 if (*min_freq
> *max_freq
)
151 *min_freq
= *max_freq
;
155 * devfreq_get_freq_level() - Lookup freq_table for the frequency
156 * @devfreq: the devfreq instance
157 * @freq: the target frequency
159 static int devfreq_get_freq_level(struct devfreq
*devfreq
, unsigned long freq
)
163 for (lev
= 0; lev
< devfreq
->profile
->max_state
; lev
++)
164 if (freq
== devfreq
->profile
->freq_table
[lev
])
170 static int set_freq_table(struct devfreq
*devfreq
)
172 struct devfreq_dev_profile
*profile
= devfreq
->profile
;
173 struct dev_pm_opp
*opp
;
177 /* Initialize the freq_table from OPP table */
178 count
= dev_pm_opp_get_opp_count(devfreq
->dev
.parent
);
182 profile
->max_state
= count
;
183 profile
->freq_table
= devm_kcalloc(devfreq
->dev
.parent
,
185 sizeof(*profile
->freq_table
),
187 if (!profile
->freq_table
) {
188 profile
->max_state
= 0;
192 for (i
= 0, freq
= 0; i
< profile
->max_state
; i
++, freq
++) {
193 opp
= dev_pm_opp_find_freq_ceil(devfreq
->dev
.parent
, &freq
);
195 devm_kfree(devfreq
->dev
.parent
, profile
->freq_table
);
196 profile
->max_state
= 0;
200 profile
->freq_table
[i
] = freq
;
207 * devfreq_update_status() - Update statistics of devfreq behavior
208 * @devfreq: the devfreq instance
209 * @freq: the update target frequency
211 int devfreq_update_status(struct devfreq
*devfreq
, unsigned long freq
)
213 int lev
, prev_lev
, ret
= 0;
216 lockdep_assert_held(&devfreq
->lock
);
217 cur_time
= get_jiffies_64();
219 /* Immediately exit if previous_freq is not initialized yet. */
220 if (!devfreq
->previous_freq
)
223 prev_lev
= devfreq_get_freq_level(devfreq
, devfreq
->previous_freq
);
229 devfreq
->stats
.time_in_state
[prev_lev
] +=
230 cur_time
- devfreq
->stats
.last_update
;
232 lev
= devfreq_get_freq_level(devfreq
, freq
);
238 if (lev
!= prev_lev
) {
239 devfreq
->stats
.trans_table
[
240 (prev_lev
* devfreq
->profile
->max_state
) + lev
]++;
241 devfreq
->stats
.total_trans
++;
245 devfreq
->stats
.last_update
= cur_time
;
248 EXPORT_SYMBOL(devfreq_update_status
);
251 * find_devfreq_governor() - find devfreq governor from name
252 * @name: name of the governor
254 * Search the list of devfreq governors and return the matched
255 * governor's pointer. devfreq_list_lock should be held by the caller.
257 static struct devfreq_governor
*find_devfreq_governor(const char *name
)
259 struct devfreq_governor
*tmp_governor
;
261 if (IS_ERR_OR_NULL(name
)) {
262 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
263 return ERR_PTR(-EINVAL
);
265 WARN(!mutex_is_locked(&devfreq_list_lock
),
266 "devfreq_list_lock must be locked.");
268 list_for_each_entry(tmp_governor
, &devfreq_governor_list
, node
) {
269 if (!strncmp(tmp_governor
->name
, name
, DEVFREQ_NAME_LEN
))
273 return ERR_PTR(-ENODEV
);
277 * try_then_request_governor() - Try to find the governor and request the
278 * module if is not found.
279 * @name: name of the governor
281 * Search the list of devfreq governors and request the module and try again
282 * if is not found. This can happen when both drivers (the governor driver
283 * and the driver that call devfreq_add_device) are built as modules.
284 * devfreq_list_lock should be held by the caller. Returns the matched
285 * governor's pointer or an error pointer.
287 static struct devfreq_governor
*try_then_request_governor(const char *name
)
289 struct devfreq_governor
*governor
;
292 if (IS_ERR_OR_NULL(name
)) {
293 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__
);
294 return ERR_PTR(-EINVAL
);
296 WARN(!mutex_is_locked(&devfreq_list_lock
),
297 "devfreq_list_lock must be locked.");
299 governor
= find_devfreq_governor(name
);
300 if (IS_ERR(governor
)) {
301 mutex_unlock(&devfreq_list_lock
);
303 if (!strncmp(name
, DEVFREQ_GOV_SIMPLE_ONDEMAND
,
305 err
= request_module("governor_%s", "simpleondemand");
307 err
= request_module("governor_%s", name
);
308 /* Restore previous state before return */
309 mutex_lock(&devfreq_list_lock
);
311 return (err
< 0) ? ERR_PTR(err
) : ERR_PTR(-EINVAL
);
313 governor
= find_devfreq_governor(name
);
319 static int devfreq_notify_transition(struct devfreq
*devfreq
,
320 struct devfreq_freqs
*freqs
, unsigned int state
)
326 case DEVFREQ_PRECHANGE
:
327 srcu_notifier_call_chain(&devfreq
->transition_notifier_list
,
328 DEVFREQ_PRECHANGE
, freqs
);
331 case DEVFREQ_POSTCHANGE
:
332 srcu_notifier_call_chain(&devfreq
->transition_notifier_list
,
333 DEVFREQ_POSTCHANGE
, freqs
);
342 static int devfreq_set_target(struct devfreq
*devfreq
, unsigned long new_freq
,
345 struct devfreq_freqs freqs
;
346 unsigned long cur_freq
;
349 if (devfreq
->profile
->get_cur_freq
)
350 devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &cur_freq
);
352 cur_freq
= devfreq
->previous_freq
;
354 freqs
.old
= cur_freq
;
355 freqs
.new = new_freq
;
356 devfreq_notify_transition(devfreq
, &freqs
, DEVFREQ_PRECHANGE
);
358 err
= devfreq
->profile
->target(devfreq
->dev
.parent
, &new_freq
, flags
);
360 freqs
.new = cur_freq
;
361 devfreq_notify_transition(devfreq
, &freqs
, DEVFREQ_POSTCHANGE
);
365 freqs
.new = new_freq
;
366 devfreq_notify_transition(devfreq
, &freqs
, DEVFREQ_POSTCHANGE
);
368 if (devfreq_update_status(devfreq
, new_freq
))
369 dev_err(&devfreq
->dev
,
370 "Couldn't update frequency transition information.\n");
372 devfreq
->previous_freq
= new_freq
;
374 if (devfreq
->suspend_freq
)
375 devfreq
->resume_freq
= cur_freq
;
380 /* Load monitoring helper functions for governors use */
383 * update_devfreq() - Reevaluate the device and configure frequency.
384 * @devfreq: the devfreq instance.
386 * Note: Lock devfreq->lock before calling update_devfreq
387 * This function is exported for governors.
389 int update_devfreq(struct devfreq
*devfreq
)
391 unsigned long freq
, min_freq
, max_freq
;
395 if (!mutex_is_locked(&devfreq
->lock
)) {
396 WARN(true, "devfreq->lock must be locked by the caller.\n");
400 if (!devfreq
->governor
)
403 /* Reevaluate the proper frequency */
404 err
= devfreq
->governor
->get_target_freq(devfreq
, &freq
);
407 get_freq_range(devfreq
, &min_freq
, &max_freq
);
409 if (freq
< min_freq
) {
411 flags
&= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND
; /* Use GLB */
413 if (freq
> max_freq
) {
415 flags
|= DEVFREQ_FLAG_LEAST_UPPER_BOUND
; /* Use LUB */
418 return devfreq_set_target(devfreq
, freq
, flags
);
421 EXPORT_SYMBOL(update_devfreq
);
424 * devfreq_monitor() - Periodically poll devfreq objects.
425 * @work: the work struct used to run devfreq_monitor periodically.
428 static void devfreq_monitor(struct work_struct
*work
)
431 struct devfreq
*devfreq
= container_of(work
,
432 struct devfreq
, work
.work
);
434 mutex_lock(&devfreq
->lock
);
435 err
= update_devfreq(devfreq
);
437 dev_err(&devfreq
->dev
, "dvfs failed with (%d) error\n", err
);
439 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
440 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
441 mutex_unlock(&devfreq
->lock
);
443 trace_devfreq_monitor(devfreq
);
447 * devfreq_monitor_start() - Start load monitoring of devfreq instance
448 * @devfreq: the devfreq instance.
450 * Helper function for starting devfreq device load monitoring. By
451 * default delayed work based monitoring is supported. Function
452 * to be called from governor in response to DEVFREQ_GOV_START
453 * event when device is added to devfreq framework.
455 void devfreq_monitor_start(struct devfreq
*devfreq
)
457 if (devfreq
->governor
->interrupt_driven
)
460 INIT_DEFERRABLE_WORK(&devfreq
->work
, devfreq_monitor
);
461 if (devfreq
->profile
->polling_ms
)
462 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
463 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
465 EXPORT_SYMBOL(devfreq_monitor_start
);
468 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
469 * @devfreq: the devfreq instance.
471 * Helper function to stop devfreq device load monitoring. Function
472 * to be called from governor in response to DEVFREQ_GOV_STOP
473 * event when device is removed from devfreq framework.
475 void devfreq_monitor_stop(struct devfreq
*devfreq
)
477 if (devfreq
->governor
->interrupt_driven
)
480 cancel_delayed_work_sync(&devfreq
->work
);
482 EXPORT_SYMBOL(devfreq_monitor_stop
);
485 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
486 * @devfreq: the devfreq instance.
488 * Helper function to suspend devfreq device load monitoring. Function
489 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
490 * event or when polling interval is set to zero.
492 * Note: Though this function is same as devfreq_monitor_stop(),
493 * intentionally kept separate to provide hooks for collecting
494 * transition statistics.
496 void devfreq_monitor_suspend(struct devfreq
*devfreq
)
498 mutex_lock(&devfreq
->lock
);
499 if (devfreq
->stop_polling
) {
500 mutex_unlock(&devfreq
->lock
);
504 devfreq_update_status(devfreq
, devfreq
->previous_freq
);
505 devfreq
->stop_polling
= true;
506 mutex_unlock(&devfreq
->lock
);
508 if (devfreq
->governor
->interrupt_driven
)
511 cancel_delayed_work_sync(&devfreq
->work
);
513 EXPORT_SYMBOL(devfreq_monitor_suspend
);
516 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
517 * @devfreq: the devfreq instance.
519 * Helper function to resume devfreq device load monitoring. Function
520 * to be called from governor in response to DEVFREQ_GOV_RESUME
521 * event or when polling interval is set to non-zero.
523 void devfreq_monitor_resume(struct devfreq
*devfreq
)
527 mutex_lock(&devfreq
->lock
);
528 if (!devfreq
->stop_polling
)
531 if (devfreq
->governor
->interrupt_driven
)
534 if (!delayed_work_pending(&devfreq
->work
) &&
535 devfreq
->profile
->polling_ms
)
536 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
537 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
540 devfreq
->stats
.last_update
= get_jiffies_64();
541 devfreq
->stop_polling
= false;
543 if (devfreq
->profile
->get_cur_freq
&&
544 !devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &freq
))
545 devfreq
->previous_freq
= freq
;
548 mutex_unlock(&devfreq
->lock
);
550 EXPORT_SYMBOL(devfreq_monitor_resume
);
553 * devfreq_interval_update() - Update device devfreq monitoring interval
554 * @devfreq: the devfreq instance.
555 * @delay: new polling interval to be set.
557 * Helper function to set new load monitoring polling interval. Function
558 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
560 void devfreq_interval_update(struct devfreq
*devfreq
, unsigned int *delay
)
562 unsigned int cur_delay
= devfreq
->profile
->polling_ms
;
563 unsigned int new_delay
= *delay
;
565 mutex_lock(&devfreq
->lock
);
566 devfreq
->profile
->polling_ms
= new_delay
;
568 if (devfreq
->stop_polling
)
571 if (devfreq
->governor
->interrupt_driven
)
574 /* if new delay is zero, stop polling */
576 mutex_unlock(&devfreq
->lock
);
577 cancel_delayed_work_sync(&devfreq
->work
);
581 /* if current delay is zero, start polling with new delay */
583 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
584 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
588 /* if current delay is greater than new delay, restart polling */
589 if (cur_delay
> new_delay
) {
590 mutex_unlock(&devfreq
->lock
);
591 cancel_delayed_work_sync(&devfreq
->work
);
592 mutex_lock(&devfreq
->lock
);
593 if (!devfreq
->stop_polling
)
594 queue_delayed_work(devfreq_wq
, &devfreq
->work
,
595 msecs_to_jiffies(devfreq
->profile
->polling_ms
));
598 mutex_unlock(&devfreq
->lock
);
600 EXPORT_SYMBOL(devfreq_interval_update
);
603 * devfreq_notifier_call() - Notify that the device frequency requirements
604 * has been changed out of devfreq framework.
605 * @nb: the notifier_block (supposed to be devfreq->nb)
609 * Called by a notifier that uses devfreq->nb.
611 static int devfreq_notifier_call(struct notifier_block
*nb
, unsigned long type
,
614 struct devfreq
*devfreq
= container_of(nb
, struct devfreq
, nb
);
617 mutex_lock(&devfreq
->lock
);
619 devfreq
->scaling_min_freq
= find_available_min_freq(devfreq
);
620 if (!devfreq
->scaling_min_freq
)
623 devfreq
->scaling_max_freq
= find_available_max_freq(devfreq
);
624 if (!devfreq
->scaling_max_freq
) {
625 devfreq
->scaling_max_freq
= ULONG_MAX
;
629 err
= update_devfreq(devfreq
);
632 mutex_unlock(&devfreq
->lock
);
634 dev_err(devfreq
->dev
.parent
,
635 "failed to update frequency from OPP notifier (%d)\n",
642 * qos_notifier_call() - Common handler for QoS constraints.
643 * @devfreq: the devfreq instance.
645 static int qos_notifier_call(struct devfreq
*devfreq
)
649 mutex_lock(&devfreq
->lock
);
650 err
= update_devfreq(devfreq
);
651 mutex_unlock(&devfreq
->lock
);
653 dev_err(devfreq
->dev
.parent
,
654 "failed to update frequency from PM QoS (%d)\n",
661 * qos_min_notifier_call() - Callback for QoS min_freq changes.
662 * @nb: Should be devfreq->nb_min
664 static int qos_min_notifier_call(struct notifier_block
*nb
,
665 unsigned long val
, void *ptr
)
667 return qos_notifier_call(container_of(nb
, struct devfreq
, nb_min
));
671 * qos_max_notifier_call() - Callback for QoS max_freq changes.
672 * @nb: Should be devfreq->nb_max
674 static int qos_max_notifier_call(struct notifier_block
*nb
,
675 unsigned long val
, void *ptr
)
677 return qos_notifier_call(container_of(nb
, struct devfreq
, nb_max
));
681 * devfreq_dev_release() - Callback for struct device to release the device.
682 * @dev: the devfreq device
684 * Remove devfreq from the list and release its resources.
686 static void devfreq_dev_release(struct device
*dev
)
688 struct devfreq
*devfreq
= to_devfreq(dev
);
691 mutex_lock(&devfreq_list_lock
);
692 list_del(&devfreq
->node
);
693 mutex_unlock(&devfreq_list_lock
);
695 err
= dev_pm_qos_remove_notifier(devfreq
->dev
.parent
, &devfreq
->nb_max
,
696 DEV_PM_QOS_MAX_FREQUENCY
);
697 if (err
&& err
!= -ENOENT
)
698 dev_warn(dev
->parent
,
699 "Failed to remove max_freq notifier: %d\n", err
);
700 err
= dev_pm_qos_remove_notifier(devfreq
->dev
.parent
, &devfreq
->nb_min
,
701 DEV_PM_QOS_MIN_FREQUENCY
);
702 if (err
&& err
!= -ENOENT
)
703 dev_warn(dev
->parent
,
704 "Failed to remove min_freq notifier: %d\n", err
);
706 if (dev_pm_qos_request_active(&devfreq
->user_max_freq_req
)) {
707 err
= dev_pm_qos_remove_request(&devfreq
->user_max_freq_req
);
709 dev_warn(dev
->parent
,
710 "Failed to remove max_freq request: %d\n", err
);
712 if (dev_pm_qos_request_active(&devfreq
->user_min_freq_req
)) {
713 err
= dev_pm_qos_remove_request(&devfreq
->user_min_freq_req
);
715 dev_warn(dev
->parent
,
716 "Failed to remove min_freq request: %d\n", err
);
719 if (devfreq
->profile
->exit
)
720 devfreq
->profile
->exit(devfreq
->dev
.parent
);
722 mutex_destroy(&devfreq
->lock
);
727 * devfreq_add_device() - Add devfreq feature to the device
728 * @dev: the device to add devfreq feature.
729 * @profile: device-specific profile to run devfreq.
730 * @governor_name: name of the policy to choose frequency.
731 * @data: private data for the governor. The devfreq framework does not
734 struct devfreq
*devfreq_add_device(struct device
*dev
,
735 struct devfreq_dev_profile
*profile
,
736 const char *governor_name
,
739 struct devfreq
*devfreq
;
740 struct devfreq_governor
*governor
;
741 static atomic_t devfreq_no
= ATOMIC_INIT(-1);
744 if (!dev
|| !profile
|| !governor_name
) {
745 dev_err(dev
, "%s: Invalid parameters.\n", __func__
);
746 return ERR_PTR(-EINVAL
);
749 mutex_lock(&devfreq_list_lock
);
750 devfreq
= find_device_devfreq(dev
);
751 mutex_unlock(&devfreq_list_lock
);
752 if (!IS_ERR(devfreq
)) {
753 dev_err(dev
, "%s: devfreq device already exists!\n",
759 devfreq
= kzalloc(sizeof(struct devfreq
), GFP_KERNEL
);
765 mutex_init(&devfreq
->lock
);
766 mutex_lock(&devfreq
->lock
);
767 devfreq
->dev
.parent
= dev
;
768 devfreq
->dev
.class = devfreq_class
;
769 devfreq
->dev
.release
= devfreq_dev_release
;
770 INIT_LIST_HEAD(&devfreq
->node
);
771 devfreq
->profile
= profile
;
772 strncpy(devfreq
->governor_name
, governor_name
, DEVFREQ_NAME_LEN
);
773 devfreq
->previous_freq
= profile
->initial_freq
;
774 devfreq
->last_status
.current_frequency
= profile
->initial_freq
;
775 devfreq
->data
= data
;
776 devfreq
->nb
.notifier_call
= devfreq_notifier_call
;
778 if (!devfreq
->profile
->max_state
&& !devfreq
->profile
->freq_table
) {
779 mutex_unlock(&devfreq
->lock
);
780 err
= set_freq_table(devfreq
);
783 mutex_lock(&devfreq
->lock
);
786 devfreq
->scaling_min_freq
= find_available_min_freq(devfreq
);
787 if (!devfreq
->scaling_min_freq
) {
788 mutex_unlock(&devfreq
->lock
);
793 devfreq
->scaling_max_freq
= find_available_max_freq(devfreq
);
794 if (!devfreq
->scaling_max_freq
) {
795 mutex_unlock(&devfreq
->lock
);
800 devfreq
->suspend_freq
= dev_pm_opp_get_suspend_opp_freq(dev
);
801 atomic_set(&devfreq
->suspend_count
, 0);
803 dev_set_name(&devfreq
->dev
, "devfreq%d",
804 atomic_inc_return(&devfreq_no
));
805 err
= device_register(&devfreq
->dev
);
807 mutex_unlock(&devfreq
->lock
);
808 put_device(&devfreq
->dev
);
812 devfreq
->stats
.trans_table
= devm_kzalloc(&devfreq
->dev
,
813 array3_size(sizeof(unsigned int),
814 devfreq
->profile
->max_state
,
815 devfreq
->profile
->max_state
),
817 if (!devfreq
->stats
.trans_table
) {
818 mutex_unlock(&devfreq
->lock
);
823 devfreq
->stats
.time_in_state
= devm_kcalloc(&devfreq
->dev
,
824 devfreq
->profile
->max_state
,
825 sizeof(*devfreq
->stats
.time_in_state
),
827 if (!devfreq
->stats
.time_in_state
) {
828 mutex_unlock(&devfreq
->lock
);
833 devfreq
->stats
.total_trans
= 0;
834 devfreq
->stats
.last_update
= get_jiffies_64();
836 srcu_init_notifier_head(&devfreq
->transition_notifier_list
);
838 mutex_unlock(&devfreq
->lock
);
840 err
= dev_pm_qos_add_request(dev
, &devfreq
->user_min_freq_req
,
841 DEV_PM_QOS_MIN_FREQUENCY
, 0);
844 err
= dev_pm_qos_add_request(dev
, &devfreq
->user_max_freq_req
,
845 DEV_PM_QOS_MAX_FREQUENCY
,
846 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
);
850 devfreq
->nb_min
.notifier_call
= qos_min_notifier_call
;
851 err
= dev_pm_qos_add_notifier(devfreq
->dev
.parent
, &devfreq
->nb_min
,
852 DEV_PM_QOS_MIN_FREQUENCY
);
856 devfreq
->nb_max
.notifier_call
= qos_max_notifier_call
;
857 err
= dev_pm_qos_add_notifier(devfreq
->dev
.parent
, &devfreq
->nb_max
,
858 DEV_PM_QOS_MAX_FREQUENCY
);
862 mutex_lock(&devfreq_list_lock
);
864 governor
= try_then_request_governor(devfreq
->governor_name
);
865 if (IS_ERR(governor
)) {
866 dev_err(dev
, "%s: Unable to find governor for the device\n",
868 err
= PTR_ERR(governor
);
872 devfreq
->governor
= governor
;
873 err
= devfreq
->governor
->event_handler(devfreq
, DEVFREQ_GOV_START
,
876 dev_err(dev
, "%s: Unable to start governor for the device\n",
881 list_add(&devfreq
->node
, &devfreq_list
);
883 mutex_unlock(&devfreq_list_lock
);
888 mutex_unlock(&devfreq_list_lock
);
890 devfreq_remove_device(devfreq
);
897 EXPORT_SYMBOL(devfreq_add_device
);
900 * devfreq_remove_device() - Remove devfreq feature from a device.
901 * @devfreq: the devfreq instance to be removed
903 * The opposite of devfreq_add_device().
905 int devfreq_remove_device(struct devfreq
*devfreq
)
910 if (devfreq
->governor
)
911 devfreq
->governor
->event_handler(devfreq
,
912 DEVFREQ_GOV_STOP
, NULL
);
913 device_unregister(&devfreq
->dev
);
917 EXPORT_SYMBOL(devfreq_remove_device
);
919 static int devm_devfreq_dev_match(struct device
*dev
, void *res
, void *data
)
921 struct devfreq
**r
= res
;
923 if (WARN_ON(!r
|| !*r
))
929 static void devm_devfreq_dev_release(struct device
*dev
, void *res
)
931 devfreq_remove_device(*(struct devfreq
**)res
);
935 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
936 * @dev: the device to add devfreq feature.
937 * @profile: device-specific profile to run devfreq.
938 * @governor_name: name of the policy to choose frequency.
939 * @data: private data for the governor. The devfreq framework does not
942 * This function manages automatically the memory of devfreq device using device
943 * resource management and simplify the free operation for memory of devfreq
946 struct devfreq
*devm_devfreq_add_device(struct device
*dev
,
947 struct devfreq_dev_profile
*profile
,
948 const char *governor_name
,
951 struct devfreq
**ptr
, *devfreq
;
953 ptr
= devres_alloc(devm_devfreq_dev_release
, sizeof(*ptr
), GFP_KERNEL
);
955 return ERR_PTR(-ENOMEM
);
957 devfreq
= devfreq_add_device(dev
, profile
, governor_name
, data
);
958 if (IS_ERR(devfreq
)) {
964 devres_add(dev
, ptr
);
968 EXPORT_SYMBOL(devm_devfreq_add_device
);
972 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
973 * @dev - instance to the given device
974 * @index - index into list of devfreq
976 * return the instance of devfreq device
978 struct devfreq
*devfreq_get_devfreq_by_phandle(struct device
*dev
, int index
)
980 struct device_node
*node
;
981 struct devfreq
*devfreq
;
984 return ERR_PTR(-EINVAL
);
987 return ERR_PTR(-EINVAL
);
989 node
= of_parse_phandle(dev
->of_node
, "devfreq", index
);
991 return ERR_PTR(-ENODEV
);
993 mutex_lock(&devfreq_list_lock
);
994 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
995 if (devfreq
->dev
.parent
996 && devfreq
->dev
.parent
->of_node
== node
) {
997 mutex_unlock(&devfreq_list_lock
);
1002 mutex_unlock(&devfreq_list_lock
);
1005 return ERR_PTR(-EPROBE_DEFER
);
1008 struct devfreq
*devfreq_get_devfreq_by_phandle(struct device
*dev
, int index
)
1010 return ERR_PTR(-ENODEV
);
1012 #endif /* CONFIG_OF */
1013 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle
);
1016 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1017 * @dev: the device from which to remove devfreq feature.
1018 * @devfreq: the devfreq instance to be removed
1020 void devm_devfreq_remove_device(struct device
*dev
, struct devfreq
*devfreq
)
1022 WARN_ON(devres_release(dev
, devm_devfreq_dev_release
,
1023 devm_devfreq_dev_match
, devfreq
));
1025 EXPORT_SYMBOL(devm_devfreq_remove_device
);
1028 * devfreq_suspend_device() - Suspend devfreq of a device.
1029 * @devfreq: the devfreq instance to be suspended
1031 * This function is intended to be called by the pm callbacks
1032 * (e.g., runtime_suspend, suspend) of the device driver that
1033 * holds the devfreq.
1035 int devfreq_suspend_device(struct devfreq
*devfreq
)
1042 if (atomic_inc_return(&devfreq
->suspend_count
) > 1)
1045 if (devfreq
->governor
) {
1046 ret
= devfreq
->governor
->event_handler(devfreq
,
1047 DEVFREQ_GOV_SUSPEND
, NULL
);
1052 if (devfreq
->suspend_freq
) {
1053 mutex_lock(&devfreq
->lock
);
1054 ret
= devfreq_set_target(devfreq
, devfreq
->suspend_freq
, 0);
1055 mutex_unlock(&devfreq
->lock
);
1062 EXPORT_SYMBOL(devfreq_suspend_device
);
1065 * devfreq_resume_device() - Resume devfreq of a device.
1066 * @devfreq: the devfreq instance to be resumed
1068 * This function is intended to be called by the pm callbacks
1069 * (e.g., runtime_resume, resume) of the device driver that
1070 * holds the devfreq.
1072 int devfreq_resume_device(struct devfreq
*devfreq
)
1079 if (atomic_dec_return(&devfreq
->suspend_count
) >= 1)
1082 if (devfreq
->resume_freq
) {
1083 mutex_lock(&devfreq
->lock
);
1084 ret
= devfreq_set_target(devfreq
, devfreq
->resume_freq
, 0);
1085 mutex_unlock(&devfreq
->lock
);
1090 if (devfreq
->governor
) {
1091 ret
= devfreq
->governor
->event_handler(devfreq
,
1092 DEVFREQ_GOV_RESUME
, NULL
);
1099 EXPORT_SYMBOL(devfreq_resume_device
);
1102 * devfreq_suspend() - Suspend devfreq governors and devices
1104 * Called during system wide Suspend/Hibernate cycles for suspending governors
1105 * and devices preserving the state for resume. On some platforms the devfreq
1106 * device must have precise state (frequency) after resume in order to provide
1107 * fully operating setup.
1109 void devfreq_suspend(void)
1111 struct devfreq
*devfreq
;
1114 mutex_lock(&devfreq_list_lock
);
1115 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1116 ret
= devfreq_suspend_device(devfreq
);
1118 dev_err(&devfreq
->dev
,
1119 "failed to suspend devfreq device\n");
1121 mutex_unlock(&devfreq_list_lock
);
1125 * devfreq_resume() - Resume devfreq governors and devices
1127 * Called during system wide Suspend/Hibernate cycle for resuming governors and
1128 * devices that are suspended with devfreq_suspend().
1130 void devfreq_resume(void)
1132 struct devfreq
*devfreq
;
1135 mutex_lock(&devfreq_list_lock
);
1136 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1137 ret
= devfreq_resume_device(devfreq
);
1139 dev_warn(&devfreq
->dev
,
1140 "failed to resume devfreq device\n");
1142 mutex_unlock(&devfreq_list_lock
);
1146 * devfreq_add_governor() - Add devfreq governor
1147 * @governor: the devfreq governor to be added
1149 int devfreq_add_governor(struct devfreq_governor
*governor
)
1151 struct devfreq_governor
*g
;
1152 struct devfreq
*devfreq
;
1156 pr_err("%s: Invalid parameters.\n", __func__
);
1160 mutex_lock(&devfreq_list_lock
);
1161 g
= find_devfreq_governor(governor
->name
);
1163 pr_err("%s: governor %s already registered\n", __func__
,
1169 list_add(&governor
->node
, &devfreq_governor_list
);
1171 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1173 struct device
*dev
= devfreq
->dev
.parent
;
1175 if (!strncmp(devfreq
->governor_name
, governor
->name
,
1176 DEVFREQ_NAME_LEN
)) {
1177 /* The following should never occur */
1178 if (devfreq
->governor
) {
1180 "%s: Governor %s already present\n",
1181 __func__
, devfreq
->governor
->name
);
1182 ret
= devfreq
->governor
->event_handler(devfreq
,
1183 DEVFREQ_GOV_STOP
, NULL
);
1186 "%s: Governor %s stop = %d\n",
1188 devfreq
->governor
->name
, ret
);
1192 devfreq
->governor
= governor
;
1193 ret
= devfreq
->governor
->event_handler(devfreq
,
1194 DEVFREQ_GOV_START
, NULL
);
1196 dev_warn(dev
, "%s: Governor %s start=%d\n",
1197 __func__
, devfreq
->governor
->name
,
1204 mutex_unlock(&devfreq_list_lock
);
1208 EXPORT_SYMBOL(devfreq_add_governor
);
1211 * devfreq_remove_governor() - Remove devfreq feature from a device.
1212 * @governor: the devfreq governor to be removed
1214 int devfreq_remove_governor(struct devfreq_governor
*governor
)
1216 struct devfreq_governor
*g
;
1217 struct devfreq
*devfreq
;
1221 pr_err("%s: Invalid parameters.\n", __func__
);
1225 mutex_lock(&devfreq_list_lock
);
1226 g
= find_devfreq_governor(governor
->name
);
1228 pr_err("%s: governor %s not registered\n", __func__
,
1233 list_for_each_entry(devfreq
, &devfreq_list
, node
) {
1235 struct device
*dev
= devfreq
->dev
.parent
;
1237 if (!strncmp(devfreq
->governor_name
, governor
->name
,
1238 DEVFREQ_NAME_LEN
)) {
1239 /* we should have a devfreq governor! */
1240 if (!devfreq
->governor
) {
1241 dev_warn(dev
, "%s: Governor %s NOT present\n",
1242 __func__
, governor
->name
);
1246 ret
= devfreq
->governor
->event_handler(devfreq
,
1247 DEVFREQ_GOV_STOP
, NULL
);
1249 dev_warn(dev
, "%s: Governor %s stop=%d\n",
1250 __func__
, devfreq
->governor
->name
,
1253 devfreq
->governor
= NULL
;
1257 list_del(&governor
->node
);
1259 mutex_unlock(&devfreq_list_lock
);
1263 EXPORT_SYMBOL(devfreq_remove_governor
);
1265 static ssize_t
name_show(struct device
*dev
,
1266 struct device_attribute
*attr
, char *buf
)
1268 struct devfreq
*devfreq
= to_devfreq(dev
);
1269 return sprintf(buf
, "%s\n", dev_name(devfreq
->dev
.parent
));
1271 static DEVICE_ATTR_RO(name
);
1273 static ssize_t
governor_show(struct device
*dev
,
1274 struct device_attribute
*attr
, char *buf
)
1276 if (!to_devfreq(dev
)->governor
)
1279 return sprintf(buf
, "%s\n", to_devfreq(dev
)->governor
->name
);
1282 static ssize_t
governor_store(struct device
*dev
, struct device_attribute
*attr
,
1283 const char *buf
, size_t count
)
1285 struct devfreq
*df
= to_devfreq(dev
);
1287 char str_governor
[DEVFREQ_NAME_LEN
+ 1];
1288 const struct devfreq_governor
*governor
, *prev_governor
;
1290 ret
= sscanf(buf
, "%" __stringify(DEVFREQ_NAME_LEN
) "s", str_governor
);
1294 mutex_lock(&devfreq_list_lock
);
1295 governor
= try_then_request_governor(str_governor
);
1296 if (IS_ERR(governor
)) {
1297 ret
= PTR_ERR(governor
);
1300 if (df
->governor
== governor
) {
1303 } else if ((df
->governor
&& df
->governor
->immutable
) ||
1304 governor
->immutable
) {
1310 ret
= df
->governor
->event_handler(df
, DEVFREQ_GOV_STOP
, NULL
);
1312 dev_warn(dev
, "%s: Governor %s not stopped(%d)\n",
1313 __func__
, df
->governor
->name
, ret
);
1317 prev_governor
= df
->governor
;
1318 df
->governor
= governor
;
1319 strncpy(df
->governor_name
, governor
->name
, DEVFREQ_NAME_LEN
);
1320 ret
= df
->governor
->event_handler(df
, DEVFREQ_GOV_START
, NULL
);
1322 dev_warn(dev
, "%s: Governor %s not started(%d)\n",
1323 __func__
, df
->governor
->name
, ret
);
1324 df
->governor
= prev_governor
;
1325 strncpy(df
->governor_name
, prev_governor
->name
,
1327 ret
= df
->governor
->event_handler(df
, DEVFREQ_GOV_START
, NULL
);
1330 "%s: reverting to Governor %s failed (%d)\n",
1331 __func__
, df
->governor_name
, ret
);
1332 df
->governor
= NULL
;
1336 mutex_unlock(&devfreq_list_lock
);
1342 static DEVICE_ATTR_RW(governor
);
1344 static ssize_t
available_governors_show(struct device
*d
,
1345 struct device_attribute
*attr
,
1348 struct devfreq
*df
= to_devfreq(d
);
1351 mutex_lock(&devfreq_list_lock
);
1354 * The devfreq with immutable governor (e.g., passive) shows
1355 * only own governor.
1357 if (df
->governor
&& df
->governor
->immutable
) {
1358 count
= scnprintf(&buf
[count
], DEVFREQ_NAME_LEN
,
1359 "%s ", df
->governor_name
);
1361 * The devfreq device shows the registered governor except for
1362 * immutable governors such as passive governor .
1365 struct devfreq_governor
*governor
;
1367 list_for_each_entry(governor
, &devfreq_governor_list
, node
) {
1368 if (governor
->immutable
)
1370 count
+= scnprintf(&buf
[count
], (PAGE_SIZE
- count
- 2),
1371 "%s ", governor
->name
);
1375 mutex_unlock(&devfreq_list_lock
);
1377 /* Truncate the trailing space */
1381 count
+= sprintf(&buf
[count
], "\n");
1385 static DEVICE_ATTR_RO(available_governors
);
1387 static ssize_t
cur_freq_show(struct device
*dev
, struct device_attribute
*attr
,
1391 struct devfreq
*devfreq
= to_devfreq(dev
);
1393 if (devfreq
->profile
->get_cur_freq
&&
1394 !devfreq
->profile
->get_cur_freq(devfreq
->dev
.parent
, &freq
))
1395 return sprintf(buf
, "%lu\n", freq
);
1397 return sprintf(buf
, "%lu\n", devfreq
->previous_freq
);
1399 static DEVICE_ATTR_RO(cur_freq
);
1401 static ssize_t
target_freq_show(struct device
*dev
,
1402 struct device_attribute
*attr
, char *buf
)
1404 return sprintf(buf
, "%lu\n", to_devfreq(dev
)->previous_freq
);
1406 static DEVICE_ATTR_RO(target_freq
);
1408 static ssize_t
polling_interval_show(struct device
*dev
,
1409 struct device_attribute
*attr
, char *buf
)
1411 return sprintf(buf
, "%d\n", to_devfreq(dev
)->profile
->polling_ms
);
1414 static ssize_t
polling_interval_store(struct device
*dev
,
1415 struct device_attribute
*attr
,
1416 const char *buf
, size_t count
)
1418 struct devfreq
*df
= to_devfreq(dev
);
1425 ret
= sscanf(buf
, "%u", &value
);
1429 df
->governor
->event_handler(df
, DEVFREQ_GOV_INTERVAL
, &value
);
1434 static DEVICE_ATTR_RW(polling_interval
);
1436 static ssize_t
min_freq_store(struct device
*dev
, struct device_attribute
*attr
,
1437 const char *buf
, size_t count
)
1439 struct devfreq
*df
= to_devfreq(dev
);
1440 unsigned long value
;
1444 * Protect against theoretical sysfs writes between
1445 * device_add and dev_pm_qos_add_request
1447 if (!dev_pm_qos_request_active(&df
->user_min_freq_req
))
1450 ret
= sscanf(buf
, "%lu", &value
);
1454 /* Round down to kHz for PM QoS */
1455 ret
= dev_pm_qos_update_request(&df
->user_min_freq_req
,
1456 value
/ HZ_PER_KHZ
);
1463 static ssize_t
min_freq_show(struct device
*dev
, struct device_attribute
*attr
,
1466 struct devfreq
*df
= to_devfreq(dev
);
1467 unsigned long min_freq
, max_freq
;
1469 mutex_lock(&df
->lock
);
1470 get_freq_range(df
, &min_freq
, &max_freq
);
1471 mutex_unlock(&df
->lock
);
1473 return sprintf(buf
, "%lu\n", min_freq
);
1475 static DEVICE_ATTR_RW(min_freq
);
1477 static ssize_t
max_freq_store(struct device
*dev
, struct device_attribute
*attr
,
1478 const char *buf
, size_t count
)
1480 struct devfreq
*df
= to_devfreq(dev
);
1481 unsigned long value
;
1485 * Protect against theoretical sysfs writes between
1486 * device_add and dev_pm_qos_add_request
1488 if (!dev_pm_qos_request_active(&df
->user_max_freq_req
))
1491 ret
= sscanf(buf
, "%lu", &value
);
1496 * PM QoS frequencies are in kHz so we need to convert. Convert by
1497 * rounding upwards so that the acceptable interval never shrinks.
1499 * For example if the user writes "666666666" to sysfs this value will
1500 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1501 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1503 * A value of zero means "no limit".
1506 value
= DIV_ROUND_UP(value
, HZ_PER_KHZ
);
1508 value
= PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
;
1510 ret
= dev_pm_qos_update_request(&df
->user_max_freq_req
, value
);
1517 static ssize_t
max_freq_show(struct device
*dev
, struct device_attribute
*attr
,
1520 struct devfreq
*df
= to_devfreq(dev
);
1521 unsigned long min_freq
, max_freq
;
1523 mutex_lock(&df
->lock
);
1524 get_freq_range(df
, &min_freq
, &max_freq
);
1525 mutex_unlock(&df
->lock
);
1527 return sprintf(buf
, "%lu\n", max_freq
);
1529 static DEVICE_ATTR_RW(max_freq
);
1531 static ssize_t
available_frequencies_show(struct device
*d
,
1532 struct device_attribute
*attr
,
1535 struct devfreq
*df
= to_devfreq(d
);
1539 mutex_lock(&df
->lock
);
1541 for (i
= 0; i
< df
->profile
->max_state
; i
++)
1542 count
+= scnprintf(&buf
[count
], (PAGE_SIZE
- count
- 2),
1543 "%lu ", df
->profile
->freq_table
[i
]);
1545 mutex_unlock(&df
->lock
);
1546 /* Truncate the trailing space */
1550 count
+= sprintf(&buf
[count
], "\n");
1554 static DEVICE_ATTR_RO(available_frequencies
);
1556 static ssize_t
trans_stat_show(struct device
*dev
,
1557 struct device_attribute
*attr
, char *buf
)
1559 struct devfreq
*devfreq
= to_devfreq(dev
);
1562 unsigned int max_state
= devfreq
->profile
->max_state
;
1565 return sprintf(buf
, "Not Supported.\n");
1567 mutex_lock(&devfreq
->lock
);
1568 if (!devfreq
->stop_polling
&&
1569 devfreq_update_status(devfreq
, devfreq
->previous_freq
)) {
1570 mutex_unlock(&devfreq
->lock
);
1573 mutex_unlock(&devfreq
->lock
);
1575 len
= sprintf(buf
, " From : To\n");
1576 len
+= sprintf(buf
+ len
, " :");
1577 for (i
= 0; i
< max_state
; i
++)
1578 len
+= sprintf(buf
+ len
, "%10lu",
1579 devfreq
->profile
->freq_table
[i
]);
1581 len
+= sprintf(buf
+ len
, " time(ms)\n");
1583 for (i
= 0; i
< max_state
; i
++) {
1584 if (devfreq
->profile
->freq_table
[i
]
1585 == devfreq
->previous_freq
) {
1586 len
+= sprintf(buf
+ len
, "*");
1588 len
+= sprintf(buf
+ len
, " ");
1590 len
+= sprintf(buf
+ len
, "%10lu:",
1591 devfreq
->profile
->freq_table
[i
]);
1592 for (j
= 0; j
< max_state
; j
++)
1593 len
+= sprintf(buf
+ len
, "%10u",
1594 devfreq
->stats
.trans_table
[(i
* max_state
) + j
]);
1596 len
+= sprintf(buf
+ len
, "%10llu\n", (u64
)
1597 jiffies64_to_msecs(devfreq
->stats
.time_in_state
[i
]));
1600 len
+= sprintf(buf
+ len
, "Total transition : %u\n",
1601 devfreq
->stats
.total_trans
);
1605 static ssize_t
trans_stat_store(struct device
*dev
,
1606 struct device_attribute
*attr
,
1607 const char *buf
, size_t count
)
1609 struct devfreq
*df
= to_devfreq(dev
);
1612 if (df
->profile
->max_state
== 0)
1615 err
= kstrtoint(buf
, 10, &value
);
1616 if (err
|| value
!= 0)
1619 mutex_lock(&df
->lock
);
1620 memset(df
->stats
.time_in_state
, 0, (df
->profile
->max_state
*
1621 sizeof(*df
->stats
.time_in_state
)));
1622 memset(df
->stats
.trans_table
, 0, array3_size(sizeof(unsigned int),
1623 df
->profile
->max_state
,
1624 df
->profile
->max_state
));
1625 df
->stats
.total_trans
= 0;
1626 df
->stats
.last_update
= get_jiffies_64();
1627 mutex_unlock(&df
->lock
);
1631 static DEVICE_ATTR_RW(trans_stat
);
1633 static struct attribute
*devfreq_attrs
[] = {
1634 &dev_attr_name
.attr
,
1635 &dev_attr_governor
.attr
,
1636 &dev_attr_available_governors
.attr
,
1637 &dev_attr_cur_freq
.attr
,
1638 &dev_attr_available_frequencies
.attr
,
1639 &dev_attr_target_freq
.attr
,
1640 &dev_attr_polling_interval
.attr
,
1641 &dev_attr_min_freq
.attr
,
1642 &dev_attr_max_freq
.attr
,
1643 &dev_attr_trans_stat
.attr
,
1646 ATTRIBUTE_GROUPS(devfreq
);
1649 * devfreq_summary_show() - Show the summary of the devfreq devices
1650 * @s: seq_file instance to show the summary of devfreq devices
1653 * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1654 * It helps that user can know the detailed information of the devfreq devices.
1656 * Return 0 always because it shows the information without any data change.
1658 static int devfreq_summary_show(struct seq_file
*s
, void *data
)
1660 struct devfreq
*devfreq
;
1661 struct devfreq
*p_devfreq
= NULL
;
1662 unsigned long cur_freq
, min_freq
, max_freq
;
1663 unsigned int polling_ms
;
1665 seq_printf(s
, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
1674 seq_printf(s
, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
1675 "------------------------------",
1684 mutex_lock(&devfreq_list_lock
);
1686 list_for_each_entry_reverse(devfreq
, &devfreq_list
, node
) {
1687 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1688 if (!strncmp(devfreq
->governor_name
, DEVFREQ_GOV_PASSIVE
,
1689 DEVFREQ_NAME_LEN
)) {
1690 struct devfreq_passive_data
*data
= devfreq
->data
;
1693 p_devfreq
= data
->parent
;
1699 mutex_lock(&devfreq
->lock
);
1700 cur_freq
= devfreq
->previous_freq
,
1701 get_freq_range(devfreq
, &min_freq
, &max_freq
);
1702 polling_ms
= devfreq
->profile
->polling_ms
,
1703 mutex_unlock(&devfreq
->lock
);
1706 "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
1707 dev_name(devfreq
->dev
.parent
),
1708 dev_name(&devfreq
->dev
),
1709 p_devfreq
? dev_name(&p_devfreq
->dev
) : "null",
1710 devfreq
->governor_name
,
1717 mutex_unlock(&devfreq_list_lock
);
1721 DEFINE_SHOW_ATTRIBUTE(devfreq_summary
);
1723 static int __init
devfreq_init(void)
1725 devfreq_class
= class_create(THIS_MODULE
, "devfreq");
1726 if (IS_ERR(devfreq_class
)) {
1727 pr_err("%s: couldn't create class\n", __FILE__
);
1728 return PTR_ERR(devfreq_class
);
1731 devfreq_wq
= create_freezable_workqueue("devfreq_wq");
1733 class_destroy(devfreq_class
);
1734 pr_err("%s: couldn't create workqueue\n", __FILE__
);
1737 devfreq_class
->dev_groups
= devfreq_groups
;
1739 devfreq_debugfs
= debugfs_create_dir("devfreq", NULL
);
1740 debugfs_create_file("devfreq_summary", 0444,
1741 devfreq_debugfs
, NULL
,
1742 &devfreq_summary_fops
);
1746 subsys_initcall(devfreq_init
);
1749 * The following are helper functions for devfreq user device drivers with
1754 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1755 * freq value given to target callback.
1756 * @dev: The devfreq user device. (parent of devfreq)
1757 * @freq: The frequency given to target function
1758 * @flags: Flags handed from devfreq framework.
1760 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1763 struct dev_pm_opp
*devfreq_recommended_opp(struct device
*dev
,
1764 unsigned long *freq
,
1767 struct dev_pm_opp
*opp
;
1769 if (flags
& DEVFREQ_FLAG_LEAST_UPPER_BOUND
) {
1770 /* The freq is an upper bound. opp should be lower */
1771 opp
= dev_pm_opp_find_freq_floor(dev
, freq
);
1773 /* If not available, use the closest opp */
1774 if (opp
== ERR_PTR(-ERANGE
))
1775 opp
= dev_pm_opp_find_freq_ceil(dev
, freq
);
1777 /* The freq is an lower bound. opp should be higher */
1778 opp
= dev_pm_opp_find_freq_ceil(dev
, freq
);
1780 /* If not available, use the closest opp */
1781 if (opp
== ERR_PTR(-ERANGE
))
1782 opp
= dev_pm_opp_find_freq_floor(dev
, freq
);
1787 EXPORT_SYMBOL(devfreq_recommended_opp
);
1790 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1791 * for any changes in the OPP availability
1793 * @dev: The devfreq user device. (parent of devfreq)
1794 * @devfreq: The devfreq object.
1796 int devfreq_register_opp_notifier(struct device
*dev
, struct devfreq
*devfreq
)
1798 return dev_pm_opp_register_notifier(dev
, &devfreq
->nb
);
1800 EXPORT_SYMBOL(devfreq_register_opp_notifier
);
1803 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1804 * notified for any changes in the OPP
1805 * availability changes anymore.
1806 * @dev: The devfreq user device. (parent of devfreq)
1807 * @devfreq: The devfreq object.
1809 * At exit() callback of devfreq_dev_profile, this must be included if
1810 * devfreq_recommended_opp is used.
1812 int devfreq_unregister_opp_notifier(struct device
*dev
, struct devfreq
*devfreq
)
1814 return dev_pm_opp_unregister_notifier(dev
, &devfreq
->nb
);
1816 EXPORT_SYMBOL(devfreq_unregister_opp_notifier
);
1818 static void devm_devfreq_opp_release(struct device
*dev
, void *res
)
1820 devfreq_unregister_opp_notifier(dev
, *(struct devfreq
**)res
);
1824 * devm_devfreq_register_opp_notifier() - Resource-managed
1825 * devfreq_register_opp_notifier()
1826 * @dev: The devfreq user device. (parent of devfreq)
1827 * @devfreq: The devfreq object.
1829 int devm_devfreq_register_opp_notifier(struct device
*dev
,
1830 struct devfreq
*devfreq
)
1832 struct devfreq
**ptr
;
1835 ptr
= devres_alloc(devm_devfreq_opp_release
, sizeof(*ptr
), GFP_KERNEL
);
1839 ret
= devfreq_register_opp_notifier(dev
, devfreq
);
1846 devres_add(dev
, ptr
);
1850 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier
);
1853 * devm_devfreq_unregister_opp_notifier() - Resource-managed
1854 * devfreq_unregister_opp_notifier()
1855 * @dev: The devfreq user device. (parent of devfreq)
1856 * @devfreq: The devfreq object.
1858 void devm_devfreq_unregister_opp_notifier(struct device
*dev
,
1859 struct devfreq
*devfreq
)
1861 WARN_ON(devres_release(dev
, devm_devfreq_opp_release
,
1862 devm_devfreq_dev_match
, devfreq
));
1864 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier
);
1867 * devfreq_register_notifier() - Register a driver with devfreq
1868 * @devfreq: The devfreq object.
1869 * @nb: The notifier block to register.
1870 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1872 int devfreq_register_notifier(struct devfreq
*devfreq
,
1873 struct notifier_block
*nb
,
1882 case DEVFREQ_TRANSITION_NOTIFIER
:
1883 ret
= srcu_notifier_chain_register(
1884 &devfreq
->transition_notifier_list
, nb
);
1892 EXPORT_SYMBOL(devfreq_register_notifier
);
1895 * devfreq_unregister_notifier() - Unregister a driver with devfreq
1896 * @devfreq: The devfreq object.
1897 * @nb: The notifier block to be unregistered.
1898 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1900 int devfreq_unregister_notifier(struct devfreq
*devfreq
,
1901 struct notifier_block
*nb
,
1910 case DEVFREQ_TRANSITION_NOTIFIER
:
1911 ret
= srcu_notifier_chain_unregister(
1912 &devfreq
->transition_notifier_list
, nb
);
1920 EXPORT_SYMBOL(devfreq_unregister_notifier
);
1922 struct devfreq_notifier_devres
{
1923 struct devfreq
*devfreq
;
1924 struct notifier_block
*nb
;
1928 static void devm_devfreq_notifier_release(struct device
*dev
, void *res
)
1930 struct devfreq_notifier_devres
*this = res
;
1932 devfreq_unregister_notifier(this->devfreq
, this->nb
, this->list
);
1936 * devm_devfreq_register_notifier()
1937 * - Resource-managed devfreq_register_notifier()
1938 * @dev: The devfreq user device. (parent of devfreq)
1939 * @devfreq: The devfreq object.
1940 * @nb: The notifier block to be unregistered.
1941 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1943 int devm_devfreq_register_notifier(struct device
*dev
,
1944 struct devfreq
*devfreq
,
1945 struct notifier_block
*nb
,
1948 struct devfreq_notifier_devres
*ptr
;
1951 ptr
= devres_alloc(devm_devfreq_notifier_release
, sizeof(*ptr
),
1956 ret
= devfreq_register_notifier(devfreq
, nb
, list
);
1962 ptr
->devfreq
= devfreq
;
1965 devres_add(dev
, ptr
);
1969 EXPORT_SYMBOL(devm_devfreq_register_notifier
);
1972 * devm_devfreq_unregister_notifier()
1973 * - Resource-managed devfreq_unregister_notifier()
1974 * @dev: The devfreq user device. (parent of devfreq)
1975 * @devfreq: The devfreq object.
1976 * @nb: The notifier block to be unregistered.
1977 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1979 void devm_devfreq_unregister_notifier(struct device
*dev
,
1980 struct devfreq
*devfreq
,
1981 struct notifier_block
*nb
,
1984 WARN_ON(devres_release(dev
, devm_devfreq_notifier_release
,
1985 devm_devfreq_dev_match
, devfreq
));
1987 EXPORT_SYMBOL(devm_devfreq_unregister_notifier
);