2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/pm_domain.h>
23 #include <linux/regulator/consumer.h>
28 * The root of the list of all opp-tables. All opp_table structures branch off
29 * from here, with each opp_table containing the list of opps it supports in
30 * various states of availability.
32 LIST_HEAD(opp_tables
);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(opp_table_lock
);
36 static struct opp_device
*_find_opp_dev(const struct device
*dev
,
37 struct opp_table
*opp_table
)
39 struct opp_device
*opp_dev
;
41 list_for_each_entry(opp_dev
, &opp_table
->dev_list
, node
)
42 if (opp_dev
->dev
== dev
)
48 static struct opp_table
*_find_opp_table_unlocked(struct device
*dev
)
50 struct opp_table
*opp_table
;
53 list_for_each_entry(opp_table
, &opp_tables
, node
) {
54 mutex_lock(&opp_table
->lock
);
55 found
= !!_find_opp_dev(dev
, opp_table
);
56 mutex_unlock(&opp_table
->lock
);
59 _get_opp_table_kref(opp_table
);
65 return ERR_PTR(-ENODEV
);
69 * _find_opp_table() - find opp_table struct using device pointer
70 * @dev: device pointer used to lookup OPP table
72 * Search OPP table for one containing matching device.
74 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
75 * -EINVAL based on type of error.
77 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
79 struct opp_table
*_find_opp_table(struct device
*dev
)
81 struct opp_table
*opp_table
;
83 if (IS_ERR_OR_NULL(dev
)) {
84 pr_err("%s: Invalid parameters\n", __func__
);
85 return ERR_PTR(-EINVAL
);
88 mutex_lock(&opp_table_lock
);
89 opp_table
= _find_opp_table_unlocked(dev
);
90 mutex_unlock(&opp_table_lock
);
96 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
97 * @opp: opp for which voltage has to be returned for
99 * Return: voltage in micro volt corresponding to the opp, else
102 * This is useful only for devices with single power supply.
104 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp
*opp
)
106 if (IS_ERR_OR_NULL(opp
)) {
107 pr_err("%s: Invalid parameters\n", __func__
);
111 return opp
->supplies
[0].u_volt
;
113 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage
);
116 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
117 * @opp: opp for which frequency has to be returned for
119 * Return: frequency in hertz corresponding to the opp, else
122 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp
*opp
)
124 if (IS_ERR_OR_NULL(opp
) || !opp
->available
) {
125 pr_err("%s: Invalid parameters\n", __func__
);
131 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq
);
134 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
135 * @opp: opp for which level value has to be returned for
137 * Return: level read from device tree corresponding to the opp, else
140 unsigned int dev_pm_opp_get_level(struct dev_pm_opp
*opp
)
142 if (IS_ERR_OR_NULL(opp
) || !opp
->available
) {
143 pr_err("%s: Invalid parameters\n", __func__
);
149 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level
);
152 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
153 * @opp: opp for which turbo mode is being verified
155 * Turbo OPPs are not for normal use, and can be enabled (under certain
156 * conditions) for short duration of times to finish high throughput work
157 * quickly. Running on them for longer times may overheat the chip.
159 * Return: true if opp is turbo opp, else false.
161 bool dev_pm_opp_is_turbo(struct dev_pm_opp
*opp
)
163 if (IS_ERR_OR_NULL(opp
) || !opp
->available
) {
164 pr_err("%s: Invalid parameters\n", __func__
);
170 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo
);
173 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
174 * @dev: device for which we do this operation
176 * Return: This function returns the max clock latency in nanoseconds.
178 unsigned long dev_pm_opp_get_max_clock_latency(struct device
*dev
)
180 struct opp_table
*opp_table
;
181 unsigned long clock_latency_ns
;
183 opp_table
= _find_opp_table(dev
);
184 if (IS_ERR(opp_table
))
187 clock_latency_ns
= opp_table
->clock_latency_ns_max
;
189 dev_pm_opp_put_opp_table(opp_table
);
191 return clock_latency_ns
;
193 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency
);
196 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
197 * @dev: device for which we do this operation
199 * Return: This function returns the max voltage latency in nanoseconds.
201 unsigned long dev_pm_opp_get_max_volt_latency(struct device
*dev
)
203 struct opp_table
*opp_table
;
204 struct dev_pm_opp
*opp
;
205 struct regulator
*reg
;
206 unsigned long latency_ns
= 0;
213 opp_table
= _find_opp_table(dev
);
214 if (IS_ERR(opp_table
))
217 /* Regulator may not be required for the device */
218 if (!opp_table
->regulators
)
221 count
= opp_table
->regulator_count
;
223 uV
= kmalloc_array(count
, sizeof(*uV
), GFP_KERNEL
);
227 mutex_lock(&opp_table
->lock
);
229 for (i
= 0; i
< count
; i
++) {
233 list_for_each_entry(opp
, &opp_table
->opp_list
, node
) {
237 if (opp
->supplies
[i
].u_volt_min
< uV
[i
].min
)
238 uV
[i
].min
= opp
->supplies
[i
].u_volt_min
;
239 if (opp
->supplies
[i
].u_volt_max
> uV
[i
].max
)
240 uV
[i
].max
= opp
->supplies
[i
].u_volt_max
;
244 mutex_unlock(&opp_table
->lock
);
247 * The caller needs to ensure that opp_table (and hence the regulator)
248 * isn't freed, while we are executing this routine.
250 for (i
= 0; i
< count
; i
++) {
251 reg
= opp_table
->regulators
[i
];
252 ret
= regulator_set_voltage_time(reg
, uV
[i
].min
, uV
[i
].max
);
254 latency_ns
+= ret
* 1000;
259 dev_pm_opp_put_opp_table(opp_table
);
263 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency
);
266 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
268 * @dev: device for which we do this operation
270 * Return: This function returns the max transition latency, in nanoseconds, to
271 * switch from one OPP to other.
273 unsigned long dev_pm_opp_get_max_transition_latency(struct device
*dev
)
275 return dev_pm_opp_get_max_volt_latency(dev
) +
276 dev_pm_opp_get_max_clock_latency(dev
);
278 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency
);
281 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
282 * @dev: device for which we do this operation
284 * Return: This function returns the frequency of the OPP marked as suspend_opp
285 * if one is available, else returns 0;
287 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device
*dev
)
289 struct opp_table
*opp_table
;
290 unsigned long freq
= 0;
292 opp_table
= _find_opp_table(dev
);
293 if (IS_ERR(opp_table
))
296 if (opp_table
->suspend_opp
&& opp_table
->suspend_opp
->available
)
297 freq
= dev_pm_opp_get_freq(opp_table
->suspend_opp
);
299 dev_pm_opp_put_opp_table(opp_table
);
303 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq
);
305 int _get_opp_count(struct opp_table
*opp_table
)
307 struct dev_pm_opp
*opp
;
310 mutex_lock(&opp_table
->lock
);
312 list_for_each_entry(opp
, &opp_table
->opp_list
, node
) {
317 mutex_unlock(&opp_table
->lock
);
323 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
324 * @dev: device for which we do this operation
326 * Return: This function returns the number of available opps if there are any,
327 * else returns 0 if none or the corresponding error value.
329 int dev_pm_opp_get_opp_count(struct device
*dev
)
331 struct opp_table
*opp_table
;
334 opp_table
= _find_opp_table(dev
);
335 if (IS_ERR(opp_table
)) {
336 count
= PTR_ERR(opp_table
);
337 dev_dbg(dev
, "%s: OPP table not found (%d)\n",
342 count
= _get_opp_count(opp_table
);
343 dev_pm_opp_put_opp_table(opp_table
);
347 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count
);
350 * dev_pm_opp_find_freq_exact() - search for an exact frequency
351 * @dev: device for which we do this operation
352 * @freq: frequency to search for
353 * @available: true/false - match for available opp
355 * Return: Searches for exact match in the opp table and returns pointer to the
356 * matching opp if found, else returns ERR_PTR in case of error and should
357 * be handled using IS_ERR. Error return values can be:
358 * EINVAL: for bad pointer
359 * ERANGE: no match found for search
360 * ENODEV: if device not found in list of registered devices
362 * Note: available is a modifier for the search. if available=true, then the
363 * match is for exact matching frequency and is available in the stored OPP
364 * table. if false, the match is for exact frequency which is not available.
366 * This provides a mechanism to enable an opp which is not available currently
367 * or the opposite as well.
369 * The callers are required to call dev_pm_opp_put() for the returned OPP after
372 struct dev_pm_opp
*dev_pm_opp_find_freq_exact(struct device
*dev
,
376 struct opp_table
*opp_table
;
377 struct dev_pm_opp
*temp_opp
, *opp
= ERR_PTR(-ERANGE
);
379 opp_table
= _find_opp_table(dev
);
380 if (IS_ERR(opp_table
)) {
381 int r
= PTR_ERR(opp_table
);
383 dev_err(dev
, "%s: OPP table not found (%d)\n", __func__
, r
);
387 mutex_lock(&opp_table
->lock
);
389 list_for_each_entry(temp_opp
, &opp_table
->opp_list
, node
) {
390 if (temp_opp
->available
== available
&&
391 temp_opp
->rate
== freq
) {
394 /* Increment the reference count of OPP */
400 mutex_unlock(&opp_table
->lock
);
401 dev_pm_opp_put_opp_table(opp_table
);
405 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact
);
407 static noinline
struct dev_pm_opp
*_find_freq_ceil(struct opp_table
*opp_table
,
410 struct dev_pm_opp
*temp_opp
, *opp
= ERR_PTR(-ERANGE
);
412 mutex_lock(&opp_table
->lock
);
414 list_for_each_entry(temp_opp
, &opp_table
->opp_list
, node
) {
415 if (temp_opp
->available
&& temp_opp
->rate
>= *freq
) {
419 /* Increment the reference count of OPP */
425 mutex_unlock(&opp_table
->lock
);
431 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
432 * @dev: device for which we do this operation
433 * @freq: Start frequency
435 * Search for the matching ceil *available* OPP from a starting freq
438 * Return: matching *opp and refreshes *freq accordingly, else returns
439 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
441 * EINVAL: for bad pointer
442 * ERANGE: no match found for search
443 * ENODEV: if device not found in list of registered devices
445 * The callers are required to call dev_pm_opp_put() for the returned OPP after
448 struct dev_pm_opp
*dev_pm_opp_find_freq_ceil(struct device
*dev
,
451 struct opp_table
*opp_table
;
452 struct dev_pm_opp
*opp
;
455 dev_err(dev
, "%s: Invalid argument freq=%p\n", __func__
, freq
);
456 return ERR_PTR(-EINVAL
);
459 opp_table
= _find_opp_table(dev
);
460 if (IS_ERR(opp_table
))
461 return ERR_CAST(opp_table
);
463 opp
= _find_freq_ceil(opp_table
, freq
);
465 dev_pm_opp_put_opp_table(opp_table
);
469 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil
);
472 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
473 * @dev: device for which we do this operation
474 * @freq: Start frequency
476 * Search for the matching floor *available* OPP from a starting freq
479 * Return: matching *opp and refreshes *freq accordingly, else returns
480 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
482 * EINVAL: for bad pointer
483 * ERANGE: no match found for search
484 * ENODEV: if device not found in list of registered devices
486 * The callers are required to call dev_pm_opp_put() for the returned OPP after
489 struct dev_pm_opp
*dev_pm_opp_find_freq_floor(struct device
*dev
,
492 struct opp_table
*opp_table
;
493 struct dev_pm_opp
*temp_opp
, *opp
= ERR_PTR(-ERANGE
);
496 dev_err(dev
, "%s: Invalid argument freq=%p\n", __func__
, freq
);
497 return ERR_PTR(-EINVAL
);
500 opp_table
= _find_opp_table(dev
);
501 if (IS_ERR(opp_table
))
502 return ERR_CAST(opp_table
);
504 mutex_lock(&opp_table
->lock
);
506 list_for_each_entry(temp_opp
, &opp_table
->opp_list
, node
) {
507 if (temp_opp
->available
) {
508 /* go to the next node, before choosing prev */
509 if (temp_opp
->rate
> *freq
)
516 /* Increment the reference count of OPP */
519 mutex_unlock(&opp_table
->lock
);
520 dev_pm_opp_put_opp_table(opp_table
);
527 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor
);
529 static int _set_opp_voltage(struct device
*dev
, struct regulator
*reg
,
530 struct dev_pm_opp_supply
*supply
)
534 /* Regulator not available for device */
536 dev_dbg(dev
, "%s: regulator not available: %ld\n", __func__
,
541 dev_dbg(dev
, "%s: voltages (mV): %lu %lu %lu\n", __func__
,
542 supply
->u_volt_min
, supply
->u_volt
, supply
->u_volt_max
);
544 ret
= regulator_set_voltage_triplet(reg
, supply
->u_volt_min
,
545 supply
->u_volt
, supply
->u_volt_max
);
547 dev_err(dev
, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
548 __func__
, supply
->u_volt_min
, supply
->u_volt
,
549 supply
->u_volt_max
, ret
);
554 static inline int _generic_set_opp_clk_only(struct device
*dev
, struct clk
*clk
,
559 ret
= clk_set_rate(clk
, freq
);
561 dev_err(dev
, "%s: failed to set clock rate: %d\n", __func__
,
568 static int _generic_set_opp_regulator(const struct opp_table
*opp_table
,
570 unsigned long old_freq
,
572 struct dev_pm_opp_supply
*old_supply
,
573 struct dev_pm_opp_supply
*new_supply
)
575 struct regulator
*reg
= opp_table
->regulators
[0];
578 /* This function only supports single regulator per device */
579 if (WARN_ON(opp_table
->regulator_count
> 1)) {
580 dev_err(dev
, "multiple regulators are not supported\n");
584 /* Scaling up? Scale voltage before frequency */
585 if (freq
>= old_freq
) {
586 ret
= _set_opp_voltage(dev
, reg
, new_supply
);
588 goto restore_voltage
;
591 /* Change frequency */
592 ret
= _generic_set_opp_clk_only(dev
, opp_table
->clk
, freq
);
594 goto restore_voltage
;
596 /* Scaling down? Scale voltage after frequency */
597 if (freq
< old_freq
) {
598 ret
= _set_opp_voltage(dev
, reg
, new_supply
);
606 if (_generic_set_opp_clk_only(dev
, opp_table
->clk
, old_freq
))
607 dev_err(dev
, "%s: failed to restore old-freq (%lu Hz)\n",
610 /* This shouldn't harm even if the voltages weren't updated earlier */
612 _set_opp_voltage(dev
, reg
, old_supply
);
617 static int _set_opp_custom(const struct opp_table
*opp_table
,
618 struct device
*dev
, unsigned long old_freq
,
620 struct dev_pm_opp_supply
*old_supply
,
621 struct dev_pm_opp_supply
*new_supply
)
623 struct dev_pm_set_opp_data
*data
;
626 data
= opp_table
->set_opp_data
;
627 data
->regulators
= opp_table
->regulators
;
628 data
->regulator_count
= opp_table
->regulator_count
;
629 data
->clk
= opp_table
->clk
;
632 data
->old_opp
.rate
= old_freq
;
633 size
= sizeof(*old_supply
) * opp_table
->regulator_count
;
634 if (IS_ERR(old_supply
))
635 memset(data
->old_opp
.supplies
, 0, size
);
637 memcpy(data
->old_opp
.supplies
, old_supply
, size
);
639 data
->new_opp
.rate
= freq
;
640 memcpy(data
->new_opp
.supplies
, new_supply
, size
);
642 return opp_table
->set_opp(data
);
645 /* This is only called for PM domain for now */
646 static int _set_required_opps(struct device
*dev
,
647 struct opp_table
*opp_table
,
648 struct dev_pm_opp
*opp
)
650 struct opp_table
**required_opp_tables
= opp_table
->required_opp_tables
;
651 struct device
**genpd_virt_devs
= opp_table
->genpd_virt_devs
;
655 if (!required_opp_tables
)
658 /* Single genpd case */
659 if (!genpd_virt_devs
) {
660 pstate
= opp
->required_opps
[0]->pstate
;
661 ret
= dev_pm_genpd_set_performance_state(dev
, pstate
);
663 dev_err(dev
, "Failed to set performance state of %s: %d (%d)\n",
664 dev_name(dev
), pstate
, ret
);
669 /* Multiple genpd case */
672 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
673 * after it is freed from another thread.
675 mutex_lock(&opp_table
->genpd_virt_dev_lock
);
677 for (i
= 0; i
< opp_table
->required_opp_count
; i
++) {
678 pstate
= opp
->required_opps
[i
]->pstate
;
680 if (!genpd_virt_devs
[i
])
683 ret
= dev_pm_genpd_set_performance_state(genpd_virt_devs
[i
], pstate
);
685 dev_err(dev
, "Failed to set performance rate of %s: %d (%d)\n",
686 dev_name(genpd_virt_devs
[i
]), pstate
, ret
);
690 mutex_unlock(&opp_table
->genpd_virt_dev_lock
);
696 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
697 * @dev: device for which we do this operation
698 * @target_freq: frequency to achieve
700 * This configures the power-supplies and clock source to the levels specified
701 * by the OPP corresponding to the target_freq.
703 int dev_pm_opp_set_rate(struct device
*dev
, unsigned long target_freq
)
705 struct opp_table
*opp_table
;
706 unsigned long freq
, old_freq
;
707 struct dev_pm_opp
*old_opp
, *opp
;
711 if (unlikely(!target_freq
)) {
712 dev_err(dev
, "%s: Invalid target frequency %lu\n", __func__
,
717 opp_table
= _find_opp_table(dev
);
718 if (IS_ERR(opp_table
)) {
719 dev_err(dev
, "%s: device opp doesn't exist\n", __func__
);
720 return PTR_ERR(opp_table
);
723 clk
= opp_table
->clk
;
725 dev_err(dev
, "%s: No clock available for the device\n",
731 freq
= clk_round_rate(clk
, target_freq
);
735 old_freq
= clk_get_rate(clk
);
737 /* Return early if nothing to do */
738 if (old_freq
== freq
) {
739 dev_dbg(dev
, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
745 old_opp
= _find_freq_ceil(opp_table
, &old_freq
);
746 if (IS_ERR(old_opp
)) {
747 dev_err(dev
, "%s: failed to find current OPP for freq %lu (%ld)\n",
748 __func__
, old_freq
, PTR_ERR(old_opp
));
751 opp
= _find_freq_ceil(opp_table
, &freq
);
754 dev_err(dev
, "%s: failed to find OPP for freq %lu (%d)\n",
755 __func__
, freq
, ret
);
759 dev_dbg(dev
, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__
,
762 /* Scaling up? Configure required OPPs before frequency */
763 if (freq
>= old_freq
) {
764 ret
= _set_required_opps(dev
, opp_table
, opp
);
769 if (opp_table
->set_opp
) {
770 ret
= _set_opp_custom(opp_table
, dev
, old_freq
, freq
,
771 IS_ERR(old_opp
) ? NULL
: old_opp
->supplies
,
773 } else if (opp_table
->regulators
) {
774 ret
= _generic_set_opp_regulator(opp_table
, dev
, old_freq
, freq
,
775 IS_ERR(old_opp
) ? NULL
: old_opp
->supplies
,
778 /* Only frequency scaling */
779 ret
= _generic_set_opp_clk_only(dev
, clk
, freq
);
782 /* Scaling down? Configure required OPPs after frequency */
783 if (!ret
&& freq
< old_freq
) {
784 ret
= _set_required_opps(dev
, opp_table
, opp
);
786 dev_err(dev
, "Failed to set required opps: %d\n", ret
);
792 if (!IS_ERR(old_opp
))
793 dev_pm_opp_put(old_opp
);
795 dev_pm_opp_put_opp_table(opp_table
);
798 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate
);
800 /* OPP-dev Helpers */
801 static void _remove_opp_dev(struct opp_device
*opp_dev
,
802 struct opp_table
*opp_table
)
804 opp_debug_unregister(opp_dev
, opp_table
);
805 list_del(&opp_dev
->node
);
809 static struct opp_device
*_add_opp_dev_unlocked(const struct device
*dev
,
810 struct opp_table
*opp_table
)
812 struct opp_device
*opp_dev
;
814 opp_dev
= kzalloc(sizeof(*opp_dev
), GFP_KERNEL
);
818 /* Initialize opp-dev */
821 list_add(&opp_dev
->node
, &opp_table
->dev_list
);
823 /* Create debugfs entries for the opp_table */
824 opp_debug_register(opp_dev
, opp_table
);
829 struct opp_device
*_add_opp_dev(const struct device
*dev
,
830 struct opp_table
*opp_table
)
832 struct opp_device
*opp_dev
;
834 mutex_lock(&opp_table
->lock
);
835 opp_dev
= _add_opp_dev_unlocked(dev
, opp_table
);
836 mutex_unlock(&opp_table
->lock
);
841 static struct opp_table
*_allocate_opp_table(struct device
*dev
, int index
)
843 struct opp_table
*opp_table
;
844 struct opp_device
*opp_dev
;
848 * Allocate a new OPP table. In the infrequent case where a new
849 * device is needed to be added, we pay this penalty.
851 opp_table
= kzalloc(sizeof(*opp_table
), GFP_KERNEL
);
855 mutex_init(&opp_table
->lock
);
856 mutex_init(&opp_table
->genpd_virt_dev_lock
);
857 INIT_LIST_HEAD(&opp_table
->dev_list
);
859 /* Mark regulator count uninitialized */
860 opp_table
->regulator_count
= -1;
862 opp_dev
= _add_opp_dev(dev
, opp_table
);
868 _of_init_opp_table(opp_table
, dev
, index
);
870 /* Find clk for the device */
871 opp_table
->clk
= clk_get(dev
, NULL
);
872 if (IS_ERR(opp_table
->clk
)) {
873 ret
= PTR_ERR(opp_table
->clk
);
874 if (ret
!= -EPROBE_DEFER
)
875 dev_dbg(dev
, "%s: Couldn't find clock: %d\n", __func__
,
879 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table
->head
);
880 INIT_LIST_HEAD(&opp_table
->opp_list
);
881 kref_init(&opp_table
->kref
);
883 /* Secure the device table modification */
884 list_add(&opp_table
->node
, &opp_tables
);
888 void _get_opp_table_kref(struct opp_table
*opp_table
)
890 kref_get(&opp_table
->kref
);
893 static struct opp_table
*_opp_get_opp_table(struct device
*dev
, int index
)
895 struct opp_table
*opp_table
;
897 /* Hold our table modification lock here */
898 mutex_lock(&opp_table_lock
);
900 opp_table
= _find_opp_table_unlocked(dev
);
901 if (!IS_ERR(opp_table
))
904 opp_table
= _managed_opp(dev
, index
);
906 if (!_add_opp_dev_unlocked(dev
, opp_table
)) {
907 dev_pm_opp_put_opp_table(opp_table
);
913 opp_table
= _allocate_opp_table(dev
, index
);
916 mutex_unlock(&opp_table_lock
);
921 struct opp_table
*dev_pm_opp_get_opp_table(struct device
*dev
)
923 return _opp_get_opp_table(dev
, 0);
925 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table
);
927 struct opp_table
*dev_pm_opp_get_opp_table_indexed(struct device
*dev
,
930 return _opp_get_opp_table(dev
, index
);
933 static void _opp_table_kref_release(struct kref
*kref
)
935 struct opp_table
*opp_table
= container_of(kref
, struct opp_table
, kref
);
936 struct opp_device
*opp_dev
, *temp
;
938 _of_clear_opp_table(opp_table
);
941 if (!IS_ERR(opp_table
->clk
))
942 clk_put(opp_table
->clk
);
944 WARN_ON(!list_empty(&opp_table
->opp_list
));
946 list_for_each_entry_safe(opp_dev
, temp
, &opp_table
->dev_list
, node
) {
948 * The OPP table is getting removed, drop the performance state
951 if (opp_table
->genpd_performance_state
)
952 dev_pm_genpd_set_performance_state((struct device
*)(opp_dev
->dev
), 0);
954 _remove_opp_dev(opp_dev
, opp_table
);
957 mutex_destroy(&opp_table
->genpd_virt_dev_lock
);
958 mutex_destroy(&opp_table
->lock
);
959 list_del(&opp_table
->node
);
962 mutex_unlock(&opp_table_lock
);
965 void _opp_remove_all_static(struct opp_table
*opp_table
)
967 struct dev_pm_opp
*opp
, *tmp
;
969 list_for_each_entry_safe(opp
, tmp
, &opp_table
->opp_list
, node
) {
974 opp_table
->parsed_static_opps
= false;
977 static void _opp_table_list_kref_release(struct kref
*kref
)
979 struct opp_table
*opp_table
= container_of(kref
, struct opp_table
,
982 _opp_remove_all_static(opp_table
);
983 mutex_unlock(&opp_table_lock
);
986 void _put_opp_list_kref(struct opp_table
*opp_table
)
988 kref_put_mutex(&opp_table
->list_kref
, _opp_table_list_kref_release
,
992 void dev_pm_opp_put_opp_table(struct opp_table
*opp_table
)
994 kref_put_mutex(&opp_table
->kref
, _opp_table_kref_release
,
997 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table
);
999 void _opp_free(struct dev_pm_opp
*opp
)
1004 static void _opp_kref_release(struct dev_pm_opp
*opp
,
1005 struct opp_table
*opp_table
)
1008 * Notify the changes in the availability of the operable
1009 * frequency/voltage list.
1011 blocking_notifier_call_chain(&opp_table
->head
, OPP_EVENT_REMOVE
, opp
);
1012 _of_opp_free_required_opps(opp_table
, opp
);
1013 opp_debug_remove_one(opp
);
1014 list_del(&opp
->node
);
1018 static void _opp_kref_release_unlocked(struct kref
*kref
)
1020 struct dev_pm_opp
*opp
= container_of(kref
, struct dev_pm_opp
, kref
);
1021 struct opp_table
*opp_table
= opp
->opp_table
;
1023 _opp_kref_release(opp
, opp_table
);
1026 static void _opp_kref_release_locked(struct kref
*kref
)
1028 struct dev_pm_opp
*opp
= container_of(kref
, struct dev_pm_opp
, kref
);
1029 struct opp_table
*opp_table
= opp
->opp_table
;
1031 _opp_kref_release(opp
, opp_table
);
1032 mutex_unlock(&opp_table
->lock
);
1035 void dev_pm_opp_get(struct dev_pm_opp
*opp
)
1037 kref_get(&opp
->kref
);
1040 void dev_pm_opp_put(struct dev_pm_opp
*opp
)
1042 kref_put_mutex(&opp
->kref
, _opp_kref_release_locked
,
1043 &opp
->opp_table
->lock
);
1045 EXPORT_SYMBOL_GPL(dev_pm_opp_put
);
1047 static void dev_pm_opp_put_unlocked(struct dev_pm_opp
*opp
)
1049 kref_put(&opp
->kref
, _opp_kref_release_unlocked
);
1053 * dev_pm_opp_remove() - Remove an OPP from OPP table
1054 * @dev: device for which we do this operation
1055 * @freq: OPP to remove with matching 'freq'
1057 * This function removes an opp from the opp table.
1059 void dev_pm_opp_remove(struct device
*dev
, unsigned long freq
)
1061 struct dev_pm_opp
*opp
;
1062 struct opp_table
*opp_table
;
1065 opp_table
= _find_opp_table(dev
);
1066 if (IS_ERR(opp_table
))
1069 mutex_lock(&opp_table
->lock
);
1071 list_for_each_entry(opp
, &opp_table
->opp_list
, node
) {
1072 if (opp
->rate
== freq
) {
1078 mutex_unlock(&opp_table
->lock
);
1081 dev_pm_opp_put(opp
);
1083 /* Drop the reference taken by dev_pm_opp_add() */
1084 dev_pm_opp_put_opp_table(opp_table
);
1086 dev_warn(dev
, "%s: Couldn't find OPP with freq: %lu\n",
1090 /* Drop the reference taken by _find_opp_table() */
1091 dev_pm_opp_put_opp_table(opp_table
);
1093 EXPORT_SYMBOL_GPL(dev_pm_opp_remove
);
1096 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1097 * @dev: device for which we do this operation
1099 * This function removes all dynamically created OPPs from the opp table.
1101 void dev_pm_opp_remove_all_dynamic(struct device
*dev
)
1103 struct opp_table
*opp_table
;
1104 struct dev_pm_opp
*opp
, *temp
;
1107 opp_table
= _find_opp_table(dev
);
1108 if (IS_ERR(opp_table
))
1111 mutex_lock(&opp_table
->lock
);
1112 list_for_each_entry_safe(opp
, temp
, &opp_table
->opp_list
, node
) {
1114 dev_pm_opp_put_unlocked(opp
);
1118 mutex_unlock(&opp_table
->lock
);
1120 /* Drop the references taken by dev_pm_opp_add() */
1122 dev_pm_opp_put_opp_table(opp_table
);
1124 /* Drop the reference taken by _find_opp_table() */
1125 dev_pm_opp_put_opp_table(opp_table
);
1127 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic
);
1129 struct dev_pm_opp
*_opp_allocate(struct opp_table
*table
)
1131 struct dev_pm_opp
*opp
;
1132 int count
, supply_size
;
1134 /* Allocate space for at least one supply */
1135 count
= table
->regulator_count
> 0 ? table
->regulator_count
: 1;
1136 supply_size
= sizeof(*opp
->supplies
) * count
;
1138 /* allocate new OPP node and supplies structures */
1139 opp
= kzalloc(sizeof(*opp
) + supply_size
, GFP_KERNEL
);
1143 /* Put the supplies at the end of the OPP structure as an empty array */
1144 opp
->supplies
= (struct dev_pm_opp_supply
*)(opp
+ 1);
1145 INIT_LIST_HEAD(&opp
->node
);
1150 static bool _opp_supported_by_regulators(struct dev_pm_opp
*opp
,
1151 struct opp_table
*opp_table
)
1153 struct regulator
*reg
;
1156 if (!opp_table
->regulators
)
1159 for (i
= 0; i
< opp_table
->regulator_count
; i
++) {
1160 reg
= opp_table
->regulators
[i
];
1162 if (!regulator_is_supported_voltage(reg
,
1163 opp
->supplies
[i
].u_volt_min
,
1164 opp
->supplies
[i
].u_volt_max
)) {
1165 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1166 __func__
, opp
->supplies
[i
].u_volt_min
,
1167 opp
->supplies
[i
].u_volt_max
);
1175 static int _opp_is_duplicate(struct device
*dev
, struct dev_pm_opp
*new_opp
,
1176 struct opp_table
*opp_table
,
1177 struct list_head
**head
)
1179 struct dev_pm_opp
*opp
;
1182 * Insert new OPP in order of increasing frequency and discard if
1185 * Need to use &opp_table->opp_list in the condition part of the 'for'
1186 * loop, don't replace it with head otherwise it will become an infinite
1189 list_for_each_entry(opp
, &opp_table
->opp_list
, node
) {
1190 if (new_opp
->rate
> opp
->rate
) {
1195 if (new_opp
->rate
< opp
->rate
)
1198 /* Duplicate OPPs */
1199 dev_warn(dev
, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1200 __func__
, opp
->rate
, opp
->supplies
[0].u_volt
,
1201 opp
->available
, new_opp
->rate
,
1202 new_opp
->supplies
[0].u_volt
, new_opp
->available
);
1204 /* Should we compare voltages for all regulators here ? */
1205 return opp
->available
&&
1206 new_opp
->supplies
[0].u_volt
== opp
->supplies
[0].u_volt
? -EBUSY
: -EEXIST
;
1214 * 0: On success. And appropriate error message for duplicate OPPs.
1215 * -EBUSY: For OPP with same freq/volt and is available. The callers of
1216 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
1217 * sure we don't print error messages unnecessarily if different parts of
1218 * kernel try to initialize the OPP table.
1219 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
1220 * should be considered an error by the callers of _opp_add().
1222 int _opp_add(struct device
*dev
, struct dev_pm_opp
*new_opp
,
1223 struct opp_table
*opp_table
, bool rate_not_available
)
1225 struct list_head
*head
;
1228 mutex_lock(&opp_table
->lock
);
1229 head
= &opp_table
->opp_list
;
1231 if (likely(!rate_not_available
)) {
1232 ret
= _opp_is_duplicate(dev
, new_opp
, opp_table
, &head
);
1234 mutex_unlock(&opp_table
->lock
);
1239 list_add(&new_opp
->node
, head
);
1240 mutex_unlock(&opp_table
->lock
);
1242 new_opp
->opp_table
= opp_table
;
1243 kref_init(&new_opp
->kref
);
1245 opp_debug_create_one(new_opp
, opp_table
);
1247 if (!_opp_supported_by_regulators(new_opp
, opp_table
)) {
1248 new_opp
->available
= false;
1249 dev_warn(dev
, "%s: OPP not supported by regulators (%lu)\n",
1250 __func__
, new_opp
->rate
);
1257 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1258 * @opp_table: OPP table
1259 * @dev: device for which we do this operation
1260 * @freq: Frequency in Hz for this OPP
1261 * @u_volt: Voltage in uVolts for this OPP
1262 * @dynamic: Dynamically added OPPs.
1264 * This function adds an opp definition to the opp table and returns status.
1265 * The opp is made available by default and it can be controlled using
1266 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1268 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1269 * and freed by dev_pm_opp_of_remove_table.
1273 * Duplicate OPPs (both freq and volt are same) and opp->available
1274 * -EEXIST Freq are same and volt are different OR
1275 * Duplicate OPPs (both freq and volt are same) and !opp->available
1276 * -ENOMEM Memory allocation failure
1278 int _opp_add_v1(struct opp_table
*opp_table
, struct device
*dev
,
1279 unsigned long freq
, long u_volt
, bool dynamic
)
1281 struct dev_pm_opp
*new_opp
;
1285 new_opp
= _opp_allocate(opp_table
);
1289 /* populate the opp table */
1290 new_opp
->rate
= freq
;
1291 tol
= u_volt
* opp_table
->voltage_tolerance_v1
/ 100;
1292 new_opp
->supplies
[0].u_volt
= u_volt
;
1293 new_opp
->supplies
[0].u_volt_min
= u_volt
- tol
;
1294 new_opp
->supplies
[0].u_volt_max
= u_volt
+ tol
;
1295 new_opp
->available
= true;
1296 new_opp
->dynamic
= dynamic
;
1298 ret
= _opp_add(dev
, new_opp
, opp_table
, false);
1300 /* Don't return error for duplicate OPPs */
1307 * Notify the changes in the availability of the operable
1308 * frequency/voltage list.
1310 blocking_notifier_call_chain(&opp_table
->head
, OPP_EVENT_ADD
, new_opp
);
1320 * dev_pm_opp_set_supported_hw() - Set supported platforms
1321 * @dev: Device for which supported-hw has to be set.
1322 * @versions: Array of hierarchy of versions to match.
1323 * @count: Number of elements in the array.
1325 * This is required only for the V2 bindings, and it enables a platform to
1326 * specify the hierarchy of versions it supports. OPP layer will then enable
1327 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1330 struct opp_table
*dev_pm_opp_set_supported_hw(struct device
*dev
,
1331 const u32
*versions
, unsigned int count
)
1333 struct opp_table
*opp_table
;
1335 opp_table
= dev_pm_opp_get_opp_table(dev
);
1337 return ERR_PTR(-ENOMEM
);
1339 /* Make sure there are no concurrent readers while updating opp_table */
1340 WARN_ON(!list_empty(&opp_table
->opp_list
));
1342 /* Another CPU that shares the OPP table has set the property ? */
1343 if (opp_table
->supported_hw
)
1346 opp_table
->supported_hw
= kmemdup(versions
, count
* sizeof(*versions
),
1348 if (!opp_table
->supported_hw
) {
1349 dev_pm_opp_put_opp_table(opp_table
);
1350 return ERR_PTR(-ENOMEM
);
1353 opp_table
->supported_hw_count
= count
;
1357 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw
);
1360 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1361 * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
1363 * This is required only for the V2 bindings, and is called for a matching
1364 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1365 * will not be freed.
1367 void dev_pm_opp_put_supported_hw(struct opp_table
*opp_table
)
1369 /* Make sure there are no concurrent readers while updating opp_table */
1370 WARN_ON(!list_empty(&opp_table
->opp_list
));
1372 kfree(opp_table
->supported_hw
);
1373 opp_table
->supported_hw
= NULL
;
1374 opp_table
->supported_hw_count
= 0;
1376 dev_pm_opp_put_opp_table(opp_table
);
1378 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw
);
1381 * dev_pm_opp_set_prop_name() - Set prop-extn name
1382 * @dev: Device for which the prop-name has to be set.
1383 * @name: name to postfix to properties.
1385 * This is required only for the V2 bindings, and it enables a platform to
1386 * specify the extn to be used for certain property names. The properties to
1387 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1388 * should postfix the property name with -<name> while looking for them.
1390 struct opp_table
*dev_pm_opp_set_prop_name(struct device
*dev
, const char *name
)
1392 struct opp_table
*opp_table
;
1394 opp_table
= dev_pm_opp_get_opp_table(dev
);
1396 return ERR_PTR(-ENOMEM
);
1398 /* Make sure there are no concurrent readers while updating opp_table */
1399 WARN_ON(!list_empty(&opp_table
->opp_list
));
1401 /* Another CPU that shares the OPP table has set the property ? */
1402 if (opp_table
->prop_name
)
1405 opp_table
->prop_name
= kstrdup(name
, GFP_KERNEL
);
1406 if (!opp_table
->prop_name
) {
1407 dev_pm_opp_put_opp_table(opp_table
);
1408 return ERR_PTR(-ENOMEM
);
1413 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name
);
1416 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1417 * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
1419 * This is required only for the V2 bindings, and is called for a matching
1420 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1421 * will not be freed.
1423 void dev_pm_opp_put_prop_name(struct opp_table
*opp_table
)
1425 /* Make sure there are no concurrent readers while updating opp_table */
1426 WARN_ON(!list_empty(&opp_table
->opp_list
));
1428 kfree(opp_table
->prop_name
);
1429 opp_table
->prop_name
= NULL
;
1431 dev_pm_opp_put_opp_table(opp_table
);
1433 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name
);
1435 static int _allocate_set_opp_data(struct opp_table
*opp_table
)
1437 struct dev_pm_set_opp_data
*data
;
1438 int len
, count
= opp_table
->regulator_count
;
1440 if (WARN_ON(!opp_table
->regulators
))
1443 /* space for set_opp_data */
1444 len
= sizeof(*data
);
1446 /* space for old_opp.supplies and new_opp.supplies */
1447 len
+= 2 * sizeof(struct dev_pm_opp_supply
) * count
;
1449 data
= kzalloc(len
, GFP_KERNEL
);
1453 data
->old_opp
.supplies
= (void *)(data
+ 1);
1454 data
->new_opp
.supplies
= data
->old_opp
.supplies
+ count
;
1456 opp_table
->set_opp_data
= data
;
1461 static void _free_set_opp_data(struct opp_table
*opp_table
)
1463 kfree(opp_table
->set_opp_data
);
1464 opp_table
->set_opp_data
= NULL
;
1468 * dev_pm_opp_set_regulators() - Set regulator names for the device
1469 * @dev: Device for which regulator name is being set.
1470 * @names: Array of pointers to the names of the regulator.
1471 * @count: Number of regulators.
1473 * In order to support OPP switching, OPP layer needs to know the name of the
1474 * device's regulators, as the core would be required to switch voltages as
1477 * This must be called before any OPPs are initialized for the device.
1479 struct opp_table
*dev_pm_opp_set_regulators(struct device
*dev
,
1480 const char * const names
[],
1483 struct opp_table
*opp_table
;
1484 struct regulator
*reg
;
1487 opp_table
= dev_pm_opp_get_opp_table(dev
);
1489 return ERR_PTR(-ENOMEM
);
1491 /* This should be called before OPPs are initialized */
1492 if (WARN_ON(!list_empty(&opp_table
->opp_list
))) {
1497 /* Another CPU that shares the OPP table has set the regulators ? */
1498 if (opp_table
->regulators
)
1501 opp_table
->regulators
= kmalloc_array(count
,
1502 sizeof(*opp_table
->regulators
),
1504 if (!opp_table
->regulators
) {
1509 for (i
= 0; i
< count
; i
++) {
1510 reg
= regulator_get_optional(dev
, names
[i
]);
1513 if (ret
!= -EPROBE_DEFER
)
1514 dev_err(dev
, "%s: no regulator (%s) found: %d\n",
1515 __func__
, names
[i
], ret
);
1516 goto free_regulators
;
1519 opp_table
->regulators
[i
] = reg
;
1522 opp_table
->regulator_count
= count
;
1524 /* Allocate block only once to pass to set_opp() routines */
1525 ret
= _allocate_set_opp_data(opp_table
);
1527 goto free_regulators
;
1533 regulator_put(opp_table
->regulators
[--i
]);
1535 kfree(opp_table
->regulators
);
1536 opp_table
->regulators
= NULL
;
1537 opp_table
->regulator_count
= -1;
1539 dev_pm_opp_put_opp_table(opp_table
);
1541 return ERR_PTR(ret
);
1543 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators
);
1546 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
1547 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
1549 void dev_pm_opp_put_regulators(struct opp_table
*opp_table
)
1553 if (!opp_table
->regulators
)
1556 /* Make sure there are no concurrent readers while updating opp_table */
1557 WARN_ON(!list_empty(&opp_table
->opp_list
));
1559 for (i
= opp_table
->regulator_count
- 1; i
>= 0; i
--)
1560 regulator_put(opp_table
->regulators
[i
]);
1562 _free_set_opp_data(opp_table
);
1564 kfree(opp_table
->regulators
);
1565 opp_table
->regulators
= NULL
;
1566 opp_table
->regulator_count
= -1;
1569 dev_pm_opp_put_opp_table(opp_table
);
1571 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators
);
1574 * dev_pm_opp_set_clkname() - Set clk name for the device
1575 * @dev: Device for which clk name is being set.
1578 * In order to support OPP switching, OPP layer needs to get pointer to the
1579 * clock for the device. Simple cases work fine without using this routine (i.e.
1580 * by passing connection-id as NULL), but for a device with multiple clocks
1581 * available, the OPP core needs to know the exact name of the clk to use.
1583 * This must be called before any OPPs are initialized for the device.
1585 struct opp_table
*dev_pm_opp_set_clkname(struct device
*dev
, const char *name
)
1587 struct opp_table
*opp_table
;
1590 opp_table
= dev_pm_opp_get_opp_table(dev
);
1592 return ERR_PTR(-ENOMEM
);
1594 /* This should be called before OPPs are initialized */
1595 if (WARN_ON(!list_empty(&opp_table
->opp_list
))) {
1600 /* Already have default clk set, free it */
1601 if (!IS_ERR(opp_table
->clk
))
1602 clk_put(opp_table
->clk
);
1604 /* Find clk for the device */
1605 opp_table
->clk
= clk_get(dev
, name
);
1606 if (IS_ERR(opp_table
->clk
)) {
1607 ret
= PTR_ERR(opp_table
->clk
);
1608 if (ret
!= -EPROBE_DEFER
) {
1609 dev_err(dev
, "%s: Couldn't find clock: %d\n", __func__
,
1618 dev_pm_opp_put_opp_table(opp_table
);
1620 return ERR_PTR(ret
);
1622 EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname
);
1625 * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
1626 * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
1628 void dev_pm_opp_put_clkname(struct opp_table
*opp_table
)
1630 /* Make sure there are no concurrent readers while updating opp_table */
1631 WARN_ON(!list_empty(&opp_table
->opp_list
));
1633 clk_put(opp_table
->clk
);
1634 opp_table
->clk
= ERR_PTR(-EINVAL
);
1636 dev_pm_opp_put_opp_table(opp_table
);
1638 EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname
);
1641 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
1642 * @dev: Device for which the helper is getting registered.
1643 * @set_opp: Custom set OPP helper.
1645 * This is useful to support complex platforms (like platforms with multiple
1646 * regulators per device), instead of the generic OPP set rate helper.
1648 * This must be called before any OPPs are initialized for the device.
1650 struct opp_table
*dev_pm_opp_register_set_opp_helper(struct device
*dev
,
1651 int (*set_opp
)(struct dev_pm_set_opp_data
*data
))
1653 struct opp_table
*opp_table
;
1656 return ERR_PTR(-EINVAL
);
1658 opp_table
= dev_pm_opp_get_opp_table(dev
);
1660 return ERR_PTR(-ENOMEM
);
1662 /* This should be called before OPPs are initialized */
1663 if (WARN_ON(!list_empty(&opp_table
->opp_list
))) {
1664 dev_pm_opp_put_opp_table(opp_table
);
1665 return ERR_PTR(-EBUSY
);
1668 /* Another CPU that shares the OPP table has set the helper ? */
1669 if (!opp_table
->set_opp
)
1670 opp_table
->set_opp
= set_opp
;
1674 EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper
);
1677 * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
1679 * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
1681 * Release resources blocked for platform specific set_opp helper.
1683 void dev_pm_opp_unregister_set_opp_helper(struct opp_table
*opp_table
)
1685 /* Make sure there are no concurrent readers while updating opp_table */
1686 WARN_ON(!list_empty(&opp_table
->opp_list
));
1688 opp_table
->set_opp
= NULL
;
1689 dev_pm_opp_put_opp_table(opp_table
);
1691 EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper
);
1694 * dev_pm_opp_set_genpd_virt_dev - Set virtual genpd device for an index
1695 * @dev: Consumer device for which the genpd device is getting set.
1696 * @virt_dev: virtual genpd device.
1699 * Multiple generic power domains for a device are supported with the help of
1700 * virtual genpd devices, which are created for each consumer device - genpd
1701 * pair. These are the device structures which are attached to the power domain
1702 * and are required by the OPP core to set the performance state of the genpd.
1704 * This helper will normally be called by the consumer driver of the device
1705 * "dev", as only that has details of the genpd devices.
1707 * This helper needs to be called once for each of those virtual devices, but
1708 * only if multiple domains are available for a device. Otherwise the original
1709 * device structure will be used instead by the OPP core.
1711 struct opp_table
*dev_pm_opp_set_genpd_virt_dev(struct device
*dev
,
1712 struct device
*virt_dev
,
1715 struct opp_table
*opp_table
;
1717 opp_table
= dev_pm_opp_get_opp_table(dev
);
1719 return ERR_PTR(-ENOMEM
);
1721 mutex_lock(&opp_table
->genpd_virt_dev_lock
);
1723 if (unlikely(!opp_table
->genpd_virt_devs
||
1724 index
>= opp_table
->required_opp_count
||
1725 opp_table
->genpd_virt_devs
[index
])) {
1727 dev_err(dev
, "Invalid request to set required device\n");
1728 dev_pm_opp_put_opp_table(opp_table
);
1729 mutex_unlock(&opp_table
->genpd_virt_dev_lock
);
1731 return ERR_PTR(-EINVAL
);
1734 opp_table
->genpd_virt_devs
[index
] = virt_dev
;
1735 mutex_unlock(&opp_table
->genpd_virt_dev_lock
);
1741 * dev_pm_opp_put_genpd_virt_dev() - Releases resources blocked for genpd device.
1742 * @opp_table: OPP table returned by dev_pm_opp_set_genpd_virt_dev().
1743 * @virt_dev: virtual genpd device.
1745 * This releases the resource previously acquired with a call to
1746 * dev_pm_opp_set_genpd_virt_dev(). The consumer driver shall call this helper
1747 * if it doesn't want OPP core to update performance state of a power domain
1750 void dev_pm_opp_put_genpd_virt_dev(struct opp_table
*opp_table
,
1751 struct device
*virt_dev
)
1756 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
1759 mutex_lock(&opp_table
->genpd_virt_dev_lock
);
1761 for (i
= 0; i
< opp_table
->required_opp_count
; i
++) {
1762 if (opp_table
->genpd_virt_devs
[i
] != virt_dev
)
1765 opp_table
->genpd_virt_devs
[i
] = NULL
;
1766 dev_pm_opp_put_opp_table(opp_table
);
1769 dev_pm_genpd_set_performance_state(virt_dev
, 0);
1773 mutex_unlock(&opp_table
->genpd_virt_dev_lock
);
1775 if (unlikely(i
== opp_table
->required_opp_count
))
1776 dev_err(virt_dev
, "Failed to find required device entry\n");
1780 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
1781 * @src_table: OPP table which has dst_table as one of its required OPP table.
1782 * @dst_table: Required OPP table of the src_table.
1783 * @pstate: Current performance state of the src_table.
1785 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
1786 * "required-opps" property of the OPP (present in @src_table) which has
1787 * performance state set to @pstate.
1789 * Return: Zero or positive performance state on success, otherwise negative
1792 int dev_pm_opp_xlate_performance_state(struct opp_table
*src_table
,
1793 struct opp_table
*dst_table
,
1794 unsigned int pstate
)
1796 struct dev_pm_opp
*opp
;
1797 int dest_pstate
= -EINVAL
;
1804 * Normally the src_table will have the "required_opps" property set to
1805 * point to one of the OPPs in the dst_table, but in some cases the
1806 * genpd and its master have one to one mapping of performance states
1807 * and so none of them have the "required-opps" property set. Return the
1808 * pstate of the src_table as it is in such cases.
1810 if (!src_table
->required_opp_count
)
1813 for (i
= 0; i
< src_table
->required_opp_count
; i
++) {
1814 if (src_table
->required_opp_tables
[i
]->np
== dst_table
->np
)
1818 if (unlikely(i
== src_table
->required_opp_count
)) {
1819 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
1820 __func__
, src_table
, dst_table
);
1824 mutex_lock(&src_table
->lock
);
1826 list_for_each_entry(opp
, &src_table
->opp_list
, node
) {
1827 if (opp
->pstate
== pstate
) {
1828 dest_pstate
= opp
->required_opps
[i
]->pstate
;
1833 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__
, src_table
,
1837 mutex_unlock(&src_table
->lock
);
1843 * dev_pm_opp_add() - Add an OPP table from a table definitions
1844 * @dev: device for which we do this operation
1845 * @freq: Frequency in Hz for this OPP
1846 * @u_volt: Voltage in uVolts for this OPP
1848 * This function adds an opp definition to the opp table and returns status.
1849 * The opp is made available by default and it can be controlled using
1850 * dev_pm_opp_enable/disable functions.
1854 * Duplicate OPPs (both freq and volt are same) and opp->available
1855 * -EEXIST Freq are same and volt are different OR
1856 * Duplicate OPPs (both freq and volt are same) and !opp->available
1857 * -ENOMEM Memory allocation failure
1859 int dev_pm_opp_add(struct device
*dev
, unsigned long freq
, unsigned long u_volt
)
1861 struct opp_table
*opp_table
;
1864 opp_table
= dev_pm_opp_get_opp_table(dev
);
1868 /* Fix regulator count for dynamic OPPs */
1869 opp_table
->regulator_count
= 1;
1871 ret
= _opp_add_v1(opp_table
, dev
, freq
, u_volt
, true);
1873 dev_pm_opp_put_opp_table(opp_table
);
1877 EXPORT_SYMBOL_GPL(dev_pm_opp_add
);
1880 * _opp_set_availability() - helper to set the availability of an opp
1881 * @dev: device for which we do this operation
1882 * @freq: OPP frequency to modify availability
1883 * @availability_req: availability status requested for this opp
1885 * Set the availability of an OPP, opp_{enable,disable} share a common logic
1886 * which is isolated here.
1888 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1889 * copy operation, returns 0 if no modification was done OR modification was
1892 static int _opp_set_availability(struct device
*dev
, unsigned long freq
,
1893 bool availability_req
)
1895 struct opp_table
*opp_table
;
1896 struct dev_pm_opp
*tmp_opp
, *opp
= ERR_PTR(-ENODEV
);
1899 /* Find the opp_table */
1900 opp_table
= _find_opp_table(dev
);
1901 if (IS_ERR(opp_table
)) {
1902 r
= PTR_ERR(opp_table
);
1903 dev_warn(dev
, "%s: Device OPP not found (%d)\n", __func__
, r
);
1907 mutex_lock(&opp_table
->lock
);
1909 /* Do we have the frequency? */
1910 list_for_each_entry(tmp_opp
, &opp_table
->opp_list
, node
) {
1911 if (tmp_opp
->rate
== freq
) {
1922 /* Is update really needed? */
1923 if (opp
->available
== availability_req
)
1926 opp
->available
= availability_req
;
1928 dev_pm_opp_get(opp
);
1929 mutex_unlock(&opp_table
->lock
);
1931 /* Notify the change of the OPP availability */
1932 if (availability_req
)
1933 blocking_notifier_call_chain(&opp_table
->head
, OPP_EVENT_ENABLE
,
1936 blocking_notifier_call_chain(&opp_table
->head
,
1937 OPP_EVENT_DISABLE
, opp
);
1939 dev_pm_opp_put(opp
);
1943 mutex_unlock(&opp_table
->lock
);
1945 dev_pm_opp_put_opp_table(opp_table
);
1950 * dev_pm_opp_enable() - Enable a specific OPP
1951 * @dev: device for which we do this operation
1952 * @freq: OPP frequency to enable
1954 * Enables a provided opp. If the operation is valid, this returns 0, else the
1955 * corresponding error value. It is meant to be used for users an OPP available
1956 * after being temporarily made unavailable with dev_pm_opp_disable.
1958 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1959 * copy operation, returns 0 if no modification was done OR modification was
1962 int dev_pm_opp_enable(struct device
*dev
, unsigned long freq
)
1964 return _opp_set_availability(dev
, freq
, true);
1966 EXPORT_SYMBOL_GPL(dev_pm_opp_enable
);
1969 * dev_pm_opp_disable() - Disable a specific OPP
1970 * @dev: device for which we do this operation
1971 * @freq: OPP frequency to disable
1973 * Disables a provided opp. If the operation is valid, this returns
1974 * 0, else the corresponding error value. It is meant to be a temporary
1975 * control by users to make this OPP not available until the circumstances are
1976 * right to make it available again (with a call to dev_pm_opp_enable).
1978 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1979 * copy operation, returns 0 if no modification was done OR modification was
1982 int dev_pm_opp_disable(struct device
*dev
, unsigned long freq
)
1984 return _opp_set_availability(dev
, freq
, false);
1986 EXPORT_SYMBOL_GPL(dev_pm_opp_disable
);
1989 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
1990 * @dev: Device for which notifier needs to be registered
1991 * @nb: Notifier block to be registered
1993 * Return: 0 on success or a negative error value.
1995 int dev_pm_opp_register_notifier(struct device
*dev
, struct notifier_block
*nb
)
1997 struct opp_table
*opp_table
;
2000 opp_table
= _find_opp_table(dev
);
2001 if (IS_ERR(opp_table
))
2002 return PTR_ERR(opp_table
);
2004 ret
= blocking_notifier_chain_register(&opp_table
->head
, nb
);
2006 dev_pm_opp_put_opp_table(opp_table
);
2010 EXPORT_SYMBOL(dev_pm_opp_register_notifier
);
2013 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
2014 * @dev: Device for which notifier needs to be unregistered
2015 * @nb: Notifier block to be unregistered
2017 * Return: 0 on success or a negative error value.
2019 int dev_pm_opp_unregister_notifier(struct device
*dev
,
2020 struct notifier_block
*nb
)
2022 struct opp_table
*opp_table
;
2025 opp_table
= _find_opp_table(dev
);
2026 if (IS_ERR(opp_table
))
2027 return PTR_ERR(opp_table
);
2029 ret
= blocking_notifier_chain_unregister(&opp_table
->head
, nb
);
2031 dev_pm_opp_put_opp_table(opp_table
);
2035 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier
);
2037 void _dev_pm_opp_find_and_remove_table(struct device
*dev
)
2039 struct opp_table
*opp_table
;
2041 /* Check for existing table for 'dev' */
2042 opp_table
= _find_opp_table(dev
);
2043 if (IS_ERR(opp_table
)) {
2044 int error
= PTR_ERR(opp_table
);
2046 if (error
!= -ENODEV
)
2047 WARN(1, "%s: opp_table: %d\n",
2048 IS_ERR_OR_NULL(dev
) ?
2049 "Invalid device" : dev_name(dev
),
2054 _put_opp_list_kref(opp_table
);
2056 /* Drop reference taken by _find_opp_table() */
2057 dev_pm_opp_put_opp_table(opp_table
);
2059 /* Drop reference taken while the OPP table was added */
2060 dev_pm_opp_put_opp_table(opp_table
);
2064 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
2065 * @dev: device pointer used to lookup OPP table.
2067 * Free both OPPs created using static entries present in DT and the
2068 * dynamically added entries.
2070 void dev_pm_opp_remove_table(struct device
*dev
)
2072 _dev_pm_opp_find_and_remove_table(dev
);
2074 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table
);