2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
21 #include <linux/export.h>
26 * The root of the list of all devices. All device_opp structures branch off
27 * from here, with each device_opp containing the list of opp it supports in
28 * various states of availability.
30 static LIST_HEAD(dev_opp_list
);
31 /* Lock to allow exclusive modification to the device and opp lists */
32 DEFINE_MUTEX(dev_opp_list_lock
);
34 #define opp_rcu_lockdep_assert() \
36 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
37 !lockdep_is_held(&dev_opp_list_lock), \
38 "Missing rcu_read_lock() or " \
39 "dev_opp_list_lock protection"); \
42 static struct device_list_opp
*_find_list_dev(const struct device
*dev
,
43 struct device_opp
*dev_opp
)
45 struct device_list_opp
*list_dev
;
47 list_for_each_entry(list_dev
, &dev_opp
->dev_list
, node
)
48 if (list_dev
->dev
== dev
)
54 static struct device_opp
*_managed_opp(const struct device_node
*np
)
56 struct device_opp
*dev_opp
;
58 list_for_each_entry_rcu(dev_opp
, &dev_opp_list
, node
) {
59 if (dev_opp
->np
== np
) {
61 * Multiple devices can point to the same OPP table and
62 * so will have same node-pointer, np.
64 * But the OPPs will be considered as shared only if the
65 * OPP table contains a "opp-shared" property.
67 return dev_opp
->shared_opp
? dev_opp
: NULL
;
75 * _find_device_opp() - find device_opp struct using device pointer
76 * @dev: device pointer used to lookup device OPPs
78 * Search list of device OPPs for one containing matching device. Does a RCU
79 * reader operation to grab the pointer needed.
81 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
82 * -EINVAL based on type of error.
84 * Locking: For readers, this function must be called under rcu_read_lock().
85 * device_opp is a RCU protected pointer, which means that device_opp is valid
86 * as long as we are under RCU lock.
88 * For Writers, this function must be called with dev_opp_list_lock held.
90 struct device_opp
*_find_device_opp(struct device
*dev
)
92 struct device_opp
*dev_opp
;
94 opp_rcu_lockdep_assert();
96 if (IS_ERR_OR_NULL(dev
)) {
97 pr_err("%s: Invalid parameters\n", __func__
);
98 return ERR_PTR(-EINVAL
);
101 list_for_each_entry_rcu(dev_opp
, &dev_opp_list
, node
)
102 if (_find_list_dev(dev
, dev_opp
))
105 return ERR_PTR(-ENODEV
);
109 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
110 * @opp: opp for which voltage has to be returned for
112 * Return: voltage in micro volt corresponding to the opp, else
115 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
116 * protected pointer. This means that opp which could have been fetched by
117 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
118 * under RCU lock. The pointer returned by the opp_find_freq family must be
119 * used in the same section as the usage of this function with the pointer
120 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
123 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp
*opp
)
125 struct dev_pm_opp
*tmp_opp
;
128 opp_rcu_lockdep_assert();
130 tmp_opp
= rcu_dereference(opp
);
131 if (IS_ERR_OR_NULL(tmp_opp
))
132 pr_err("%s: Invalid parameters\n", __func__
);
138 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage
);
141 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
142 * @opp: opp for which frequency has to be returned for
144 * Return: frequency in hertz corresponding to the opp, else
147 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
148 * protected pointer. This means that opp which could have been fetched by
149 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
150 * under RCU lock. The pointer returned by the opp_find_freq family must be
151 * used in the same section as the usage of this function with the pointer
152 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
155 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp
*opp
)
157 struct dev_pm_opp
*tmp_opp
;
160 opp_rcu_lockdep_assert();
162 tmp_opp
= rcu_dereference(opp
);
163 if (IS_ERR_OR_NULL(tmp_opp
) || !tmp_opp
->available
)
164 pr_err("%s: Invalid parameters\n", __func__
);
170 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq
);
173 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
174 * @opp: opp for which turbo mode is being verified
176 * Turbo OPPs are not for normal use, and can be enabled (under certain
177 * conditions) for short duration of times to finish high throughput work
178 * quickly. Running on them for longer times may overheat the chip.
180 * Return: true if opp is turbo opp, else false.
182 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
183 * protected pointer. This means that opp which could have been fetched by
184 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
185 * under RCU lock. The pointer returned by the opp_find_freq family must be
186 * used in the same section as the usage of this function with the pointer
187 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
190 bool dev_pm_opp_is_turbo(struct dev_pm_opp
*opp
)
192 struct dev_pm_opp
*tmp_opp
;
194 opp_rcu_lockdep_assert();
196 tmp_opp
= rcu_dereference(opp
);
197 if (IS_ERR_OR_NULL(tmp_opp
) || !tmp_opp
->available
) {
198 pr_err("%s: Invalid parameters\n", __func__
);
202 return tmp_opp
->turbo
;
204 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo
);
207 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
208 * @dev: device for which we do this operation
210 * Return: This function returns the max clock latency in nanoseconds.
212 * Locking: This function takes rcu_read_lock().
214 unsigned long dev_pm_opp_get_max_clock_latency(struct device
*dev
)
216 struct device_opp
*dev_opp
;
217 unsigned long clock_latency_ns
;
221 dev_opp
= _find_device_opp(dev
);
223 clock_latency_ns
= 0;
225 clock_latency_ns
= dev_opp
->clock_latency_ns_max
;
228 return clock_latency_ns
;
230 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency
);
233 * dev_pm_opp_get_suspend_opp() - Get suspend opp
234 * @dev: device for which we do this operation
236 * Return: This function returns pointer to the suspend opp if it is
237 * defined and available, otherwise it returns NULL.
239 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
240 * protected pointer. The reason for the same is that the opp pointer which is
241 * returned will remain valid for use with opp_get_{voltage, freq} only while
242 * under the locked area. The pointer returned must be used prior to unlocking
243 * with rcu_read_unlock() to maintain the integrity of the pointer.
245 struct dev_pm_opp
*dev_pm_opp_get_suspend_opp(struct device
*dev
)
247 struct device_opp
*dev_opp
;
249 opp_rcu_lockdep_assert();
251 dev_opp
= _find_device_opp(dev
);
252 if (IS_ERR(dev_opp
) || !dev_opp
->suspend_opp
||
253 !dev_opp
->suspend_opp
->available
)
256 return dev_opp
->suspend_opp
;
258 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp
);
261 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
262 * @dev: device for which we do this operation
264 * Return: This function returns the number of available opps if there are any,
265 * else returns 0 if none or the corresponding error value.
267 * Locking: This function takes rcu_read_lock().
269 int dev_pm_opp_get_opp_count(struct device
*dev
)
271 struct device_opp
*dev_opp
;
272 struct dev_pm_opp
*temp_opp
;
277 dev_opp
= _find_device_opp(dev
);
278 if (IS_ERR(dev_opp
)) {
279 count
= PTR_ERR(dev_opp
);
280 dev_err(dev
, "%s: device OPP not found (%d)\n",
285 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
286 if (temp_opp
->available
)
294 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count
);
297 * dev_pm_opp_find_freq_exact() - search for an exact frequency
298 * @dev: device for which we do this operation
299 * @freq: frequency to search for
300 * @available: true/false - match for available opp
302 * Return: Searches for exact match in the opp list and returns pointer to the
303 * matching opp if found, else returns ERR_PTR in case of error and should
304 * be handled using IS_ERR. Error return values can be:
305 * EINVAL: for bad pointer
306 * ERANGE: no match found for search
307 * ENODEV: if device not found in list of registered devices
309 * Note: available is a modifier for the search. if available=true, then the
310 * match is for exact matching frequency and is available in the stored OPP
311 * table. if false, the match is for exact frequency which is not available.
313 * This provides a mechanism to enable an opp which is not available currently
314 * or the opposite as well.
316 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
317 * protected pointer. The reason for the same is that the opp pointer which is
318 * returned will remain valid for use with opp_get_{voltage, freq} only while
319 * under the locked area. The pointer returned must be used prior to unlocking
320 * with rcu_read_unlock() to maintain the integrity of the pointer.
322 struct dev_pm_opp
*dev_pm_opp_find_freq_exact(struct device
*dev
,
326 struct device_opp
*dev_opp
;
327 struct dev_pm_opp
*temp_opp
, *opp
= ERR_PTR(-ERANGE
);
329 opp_rcu_lockdep_assert();
331 dev_opp
= _find_device_opp(dev
);
332 if (IS_ERR(dev_opp
)) {
333 int r
= PTR_ERR(dev_opp
);
334 dev_err(dev
, "%s: device OPP not found (%d)\n", __func__
, r
);
338 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
339 if (temp_opp
->available
== available
&&
340 temp_opp
->rate
== freq
) {
348 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact
);
351 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
352 * @dev: device for which we do this operation
353 * @freq: Start frequency
355 * Search for the matching ceil *available* OPP from a starting freq
358 * Return: matching *opp and refreshes *freq accordingly, else returns
359 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
361 * EINVAL: for bad pointer
362 * ERANGE: no match found for search
363 * ENODEV: if device not found in list of registered devices
365 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
366 * protected pointer. The reason for the same is that the opp pointer which is
367 * returned will remain valid for use with opp_get_{voltage, freq} only while
368 * under the locked area. The pointer returned must be used prior to unlocking
369 * with rcu_read_unlock() to maintain the integrity of the pointer.
371 struct dev_pm_opp
*dev_pm_opp_find_freq_ceil(struct device
*dev
,
374 struct device_opp
*dev_opp
;
375 struct dev_pm_opp
*temp_opp
, *opp
= ERR_PTR(-ERANGE
);
377 opp_rcu_lockdep_assert();
380 dev_err(dev
, "%s: Invalid argument freq=%p\n", __func__
, freq
);
381 return ERR_PTR(-EINVAL
);
384 dev_opp
= _find_device_opp(dev
);
386 return ERR_CAST(dev_opp
);
388 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
389 if (temp_opp
->available
&& temp_opp
->rate
>= *freq
) {
398 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil
);
401 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
402 * @dev: device for which we do this operation
403 * @freq: Start frequency
405 * Search for the matching floor *available* OPP from a starting freq
408 * Return: matching *opp and refreshes *freq accordingly, else returns
409 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
411 * EINVAL: for bad pointer
412 * ERANGE: no match found for search
413 * ENODEV: if device not found in list of registered devices
415 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
416 * protected pointer. The reason for the same is that the opp pointer which is
417 * returned will remain valid for use with opp_get_{voltage, freq} only while
418 * under the locked area. The pointer returned must be used prior to unlocking
419 * with rcu_read_unlock() to maintain the integrity of the pointer.
421 struct dev_pm_opp
*dev_pm_opp_find_freq_floor(struct device
*dev
,
424 struct device_opp
*dev_opp
;
425 struct dev_pm_opp
*temp_opp
, *opp
= ERR_PTR(-ERANGE
);
427 opp_rcu_lockdep_assert();
430 dev_err(dev
, "%s: Invalid argument freq=%p\n", __func__
, freq
);
431 return ERR_PTR(-EINVAL
);
434 dev_opp
= _find_device_opp(dev
);
436 return ERR_CAST(dev_opp
);
438 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
439 if (temp_opp
->available
) {
440 /* go to the next node, before choosing prev */
441 if (temp_opp
->rate
> *freq
)
452 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor
);
454 /* List-dev Helpers */
455 static void _kfree_list_dev_rcu(struct rcu_head
*head
)
457 struct device_list_opp
*list_dev
;
459 list_dev
= container_of(head
, struct device_list_opp
, rcu_head
);
460 kfree_rcu(list_dev
, rcu_head
);
463 static void _remove_list_dev(struct device_list_opp
*list_dev
,
464 struct device_opp
*dev_opp
)
466 list_del(&list_dev
->node
);
467 call_srcu(&dev_opp
->srcu_head
.srcu
, &list_dev
->rcu_head
,
468 _kfree_list_dev_rcu
);
471 struct device_list_opp
*_add_list_dev(const struct device
*dev
,
472 struct device_opp
*dev_opp
)
474 struct device_list_opp
*list_dev
;
476 list_dev
= kzalloc(sizeof(*list_dev
), GFP_KERNEL
);
480 /* Initialize list-dev */
482 list_add_rcu(&list_dev
->node
, &dev_opp
->dev_list
);
488 * _add_device_opp() - Find device OPP table or allocate a new one
489 * @dev: device for which we do this operation
491 * It tries to find an existing table first, if it couldn't find one, it
492 * allocates a new OPP table and returns that.
494 * Return: valid device_opp pointer if success, else NULL.
496 static struct device_opp
*_add_device_opp(struct device
*dev
)
498 struct device_opp
*dev_opp
;
499 struct device_list_opp
*list_dev
;
501 /* Check for existing list for 'dev' first */
502 dev_opp
= _find_device_opp(dev
);
503 if (!IS_ERR(dev_opp
))
507 * Allocate a new device OPP table. In the infrequent case where a new
508 * device is needed to be added, we pay this penalty.
510 dev_opp
= kzalloc(sizeof(*dev_opp
), GFP_KERNEL
);
514 INIT_LIST_HEAD(&dev_opp
->dev_list
);
516 list_dev
= _add_list_dev(dev
, dev_opp
);
522 srcu_init_notifier_head(&dev_opp
->srcu_head
);
523 INIT_LIST_HEAD(&dev_opp
->opp_list
);
525 /* Secure the device list modification */
526 list_add_rcu(&dev_opp
->node
, &dev_opp_list
);
531 * _kfree_device_rcu() - Free device_opp RCU handler
534 static void _kfree_device_rcu(struct rcu_head
*head
)
536 struct device_opp
*device_opp
= container_of(head
, struct device_opp
, rcu_head
);
538 kfree_rcu(device_opp
, rcu_head
);
542 * _remove_device_opp() - Removes a device OPP table
543 * @dev_opp: device OPP table to be removed.
545 * Removes/frees device OPP table it it doesn't contain any OPPs.
547 static void _remove_device_opp(struct device_opp
*dev_opp
)
549 struct device_list_opp
*list_dev
;
551 if (!list_empty(&dev_opp
->opp_list
))
554 list_dev
= list_first_entry(&dev_opp
->dev_list
, struct device_list_opp
,
557 _remove_list_dev(list_dev
, dev_opp
);
559 /* dev_list must be empty now */
560 WARN_ON(!list_empty(&dev_opp
->dev_list
));
562 list_del_rcu(&dev_opp
->node
);
563 call_srcu(&dev_opp
->srcu_head
.srcu
, &dev_opp
->rcu_head
,
568 * _kfree_opp_rcu() - Free OPP RCU handler
571 static void _kfree_opp_rcu(struct rcu_head
*head
)
573 struct dev_pm_opp
*opp
= container_of(head
, struct dev_pm_opp
, rcu_head
);
575 kfree_rcu(opp
, rcu_head
);
579 * _opp_remove() - Remove an OPP from a table definition
580 * @dev_opp: points back to the device_opp struct this opp belongs to
581 * @opp: pointer to the OPP to remove
582 * @notify: OPP_EVENT_REMOVE notification should be sent or not
584 * This function removes an opp definition from the opp list.
586 * Locking: The internal device_opp and opp structures are RCU protected.
587 * It is assumed that the caller holds required mutex for an RCU updater
590 static void _opp_remove(struct device_opp
*dev_opp
,
591 struct dev_pm_opp
*opp
, bool notify
)
594 * Notify the changes in the availability of the operable
595 * frequency/voltage list.
598 srcu_notifier_call_chain(&dev_opp
->srcu_head
, OPP_EVENT_REMOVE
, opp
);
599 list_del_rcu(&opp
->node
);
600 call_srcu(&dev_opp
->srcu_head
.srcu
, &opp
->rcu_head
, _kfree_opp_rcu
);
602 _remove_device_opp(dev_opp
);
606 * dev_pm_opp_remove() - Remove an OPP from OPP list
607 * @dev: device for which we do this operation
608 * @freq: OPP to remove with matching 'freq'
610 * This function removes an opp from the opp list.
612 * Locking: The internal device_opp and opp structures are RCU protected.
613 * Hence this function internally uses RCU updater strategy with mutex locks
614 * to keep the integrity of the internal data structures. Callers should ensure
615 * that this function is *NOT* called under RCU protection or in contexts where
616 * mutex cannot be locked.
618 void dev_pm_opp_remove(struct device
*dev
, unsigned long freq
)
620 struct dev_pm_opp
*opp
;
621 struct device_opp
*dev_opp
;
624 /* Hold our list modification lock here */
625 mutex_lock(&dev_opp_list_lock
);
627 dev_opp
= _find_device_opp(dev
);
631 list_for_each_entry(opp
, &dev_opp
->opp_list
, node
) {
632 if (opp
->rate
== freq
) {
639 dev_warn(dev
, "%s: Couldn't find OPP with freq: %lu\n",
644 _opp_remove(dev_opp
, opp
, true);
646 mutex_unlock(&dev_opp_list_lock
);
648 EXPORT_SYMBOL_GPL(dev_pm_opp_remove
);
650 static struct dev_pm_opp
*_allocate_opp(struct device
*dev
,
651 struct device_opp
**dev_opp
)
653 struct dev_pm_opp
*opp
;
655 /* allocate new OPP node */
656 opp
= kzalloc(sizeof(*opp
), GFP_KERNEL
);
660 INIT_LIST_HEAD(&opp
->node
);
662 *dev_opp
= _add_device_opp(dev
);
671 static int _opp_add(struct device
*dev
, struct dev_pm_opp
*new_opp
,
672 struct device_opp
*dev_opp
)
674 struct dev_pm_opp
*opp
;
675 struct list_head
*head
= &dev_opp
->opp_list
;
678 * Insert new OPP in order of increasing frequency and discard if
681 * Need to use &dev_opp->opp_list in the condition part of the 'for'
682 * loop, don't replace it with head otherwise it will become an infinite
685 list_for_each_entry_rcu(opp
, &dev_opp
->opp_list
, node
) {
686 if (new_opp
->rate
> opp
->rate
) {
691 if (new_opp
->rate
< opp
->rate
)
695 dev_warn(dev
, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
696 __func__
, opp
->rate
, opp
->u_volt
, opp
->available
,
697 new_opp
->rate
, new_opp
->u_volt
, new_opp
->available
);
699 return opp
->available
&& new_opp
->u_volt
== opp
->u_volt
?
703 new_opp
->dev_opp
= dev_opp
;
704 list_add_rcu(&new_opp
->node
, head
);
710 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
711 * @dev: device for which we do this operation
712 * @freq: Frequency in Hz for this OPP
713 * @u_volt: Voltage in uVolts for this OPP
714 * @dynamic: Dynamically added OPPs.
716 * This function adds an opp definition to the opp list and returns status.
717 * The opp is made available by default and it can be controlled using
718 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
720 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
721 * and freed by dev_pm_opp_of_remove_table.
723 * Locking: The internal device_opp and opp structures are RCU protected.
724 * Hence this function internally uses RCU updater strategy with mutex locks
725 * to keep the integrity of the internal data structures. Callers should ensure
726 * that this function is *NOT* called under RCU protection or in contexts where
727 * mutex cannot be locked.
731 * Duplicate OPPs (both freq and volt are same) and opp->available
732 * -EEXIST Freq are same and volt are different OR
733 * Duplicate OPPs (both freq and volt are same) and !opp->available
734 * -ENOMEM Memory allocation failure
736 static int _opp_add_v1(struct device
*dev
, unsigned long freq
, long u_volt
,
739 struct device_opp
*dev_opp
;
740 struct dev_pm_opp
*new_opp
;
743 /* Hold our list modification lock here */
744 mutex_lock(&dev_opp_list_lock
);
746 new_opp
= _allocate_opp(dev
, &dev_opp
);
752 /* populate the opp table */
753 new_opp
->rate
= freq
;
754 new_opp
->u_volt
= u_volt
;
755 new_opp
->available
= true;
756 new_opp
->dynamic
= dynamic
;
758 ret
= _opp_add(dev
, new_opp
, dev_opp
);
762 mutex_unlock(&dev_opp_list_lock
);
765 * Notify the changes in the availability of the operable
766 * frequency/voltage list.
768 srcu_notifier_call_chain(&dev_opp
->srcu_head
, OPP_EVENT_ADD
, new_opp
);
772 _opp_remove(dev_opp
, new_opp
, false);
774 mutex_unlock(&dev_opp_list_lock
);
778 /* TODO: Support multiple regulators */
779 static int opp_parse_supplies(struct dev_pm_opp
*opp
, struct device
*dev
)
781 u32 microvolt
[3] = {0};
785 /* Missing property isn't a problem, but an invalid entry is */
786 if (!of_find_property(opp
->np
, "opp-microvolt", NULL
))
789 count
= of_property_count_u32_elems(opp
->np
, "opp-microvolt");
791 dev_err(dev
, "%s: Invalid opp-microvolt property (%d)\n",
796 /* There can be one or three elements here */
797 if (count
!= 1 && count
!= 3) {
798 dev_err(dev
, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
803 ret
= of_property_read_u32_array(opp
->np
, "opp-microvolt", microvolt
,
806 dev_err(dev
, "%s: error parsing opp-microvolt: %d\n", __func__
,
811 opp
->u_volt
= microvolt
[0];
812 opp
->u_volt_min
= microvolt
[1];
813 opp
->u_volt_max
= microvolt
[2];
815 if (!of_property_read_u32(opp
->np
, "opp-microamp", &val
))
822 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
823 * @dev: device for which we do this operation
826 * This function adds an opp definition to the opp list and returns status. The
827 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
828 * removed by dev_pm_opp_remove.
830 * Locking: The internal device_opp and opp structures are RCU protected.
831 * Hence this function internally uses RCU updater strategy with mutex locks
832 * to keep the integrity of the internal data structures. Callers should ensure
833 * that this function is *NOT* called under RCU protection or in contexts where
834 * mutex cannot be locked.
838 * Duplicate OPPs (both freq and volt are same) and opp->available
839 * -EEXIST Freq are same and volt are different OR
840 * Duplicate OPPs (both freq and volt are same) and !opp->available
841 * -ENOMEM Memory allocation failure
842 * -EINVAL Failed parsing the OPP node
844 static int _opp_add_static_v2(struct device
*dev
, struct device_node
*np
)
846 struct device_opp
*dev_opp
;
847 struct dev_pm_opp
*new_opp
;
852 /* Hold our list modification lock here */
853 mutex_lock(&dev_opp_list_lock
);
855 new_opp
= _allocate_opp(dev
, &dev_opp
);
861 ret
= of_property_read_u64(np
, "opp-hz", &rate
);
863 dev_err(dev
, "%s: opp-hz not found\n", __func__
);
868 * Rate is defined as an unsigned long in clk API, and so casting
869 * explicitly to its type. Must be fixed once rate is 64 bit
870 * guaranteed in clk API.
872 new_opp
->rate
= (unsigned long)rate
;
873 new_opp
->turbo
= of_property_read_bool(np
, "turbo-mode");
876 new_opp
->dynamic
= false;
877 new_opp
->available
= true;
879 if (!of_property_read_u32(np
, "clock-latency-ns", &val
))
880 new_opp
->clock_latency_ns
= val
;
882 ret
= opp_parse_supplies(new_opp
, dev
);
886 ret
= _opp_add(dev
, new_opp
, dev_opp
);
890 /* OPP to select on device suspend */
891 if (of_property_read_bool(np
, "opp-suspend")) {
892 if (dev_opp
->suspend_opp
)
893 dev_warn(dev
, "%s: Multiple suspend OPPs found (%lu %lu)\n",
894 __func__
, dev_opp
->suspend_opp
->rate
,
897 dev_opp
->suspend_opp
= new_opp
;
900 if (new_opp
->clock_latency_ns
> dev_opp
->clock_latency_ns_max
)
901 dev_opp
->clock_latency_ns_max
= new_opp
->clock_latency_ns
;
903 mutex_unlock(&dev_opp_list_lock
);
905 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
906 __func__
, new_opp
->turbo
, new_opp
->rate
, new_opp
->u_volt
,
907 new_opp
->u_volt_min
, new_opp
->u_volt_max
,
908 new_opp
->clock_latency_ns
);
911 * Notify the changes in the availability of the operable
912 * frequency/voltage list.
914 srcu_notifier_call_chain(&dev_opp
->srcu_head
, OPP_EVENT_ADD
, new_opp
);
918 _opp_remove(dev_opp
, new_opp
, false);
920 mutex_unlock(&dev_opp_list_lock
);
925 * dev_pm_opp_add() - Add an OPP table from a table definitions
926 * @dev: device for which we do this operation
927 * @freq: Frequency in Hz for this OPP
928 * @u_volt: Voltage in uVolts for this OPP
930 * This function adds an opp definition to the opp list and returns status.
931 * The opp is made available by default and it can be controlled using
932 * dev_pm_opp_enable/disable functions.
934 * Locking: The internal device_opp and opp structures are RCU protected.
935 * Hence this function internally uses RCU updater strategy with mutex locks
936 * to keep the integrity of the internal data structures. Callers should ensure
937 * that this function is *NOT* called under RCU protection or in contexts where
938 * mutex cannot be locked.
942 * Duplicate OPPs (both freq and volt are same) and opp->available
943 * -EEXIST Freq are same and volt are different OR
944 * Duplicate OPPs (both freq and volt are same) and !opp->available
945 * -ENOMEM Memory allocation failure
947 int dev_pm_opp_add(struct device
*dev
, unsigned long freq
, unsigned long u_volt
)
949 return _opp_add_v1(dev
, freq
, u_volt
, true);
951 EXPORT_SYMBOL_GPL(dev_pm_opp_add
);
954 * _opp_set_availability() - helper to set the availability of an opp
955 * @dev: device for which we do this operation
956 * @freq: OPP frequency to modify availability
957 * @availability_req: availability status requested for this opp
959 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
960 * share a common logic which is isolated here.
962 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
963 * copy operation, returns 0 if no modification was done OR modification was
966 * Locking: The internal device_opp and opp structures are RCU protected.
967 * Hence this function internally uses RCU updater strategy with mutex locks to
968 * keep the integrity of the internal data structures. Callers should ensure
969 * that this function is *NOT* called under RCU protection or in contexts where
970 * mutex locking or synchronize_rcu() blocking calls cannot be used.
972 static int _opp_set_availability(struct device
*dev
, unsigned long freq
,
973 bool availability_req
)
975 struct device_opp
*dev_opp
;
976 struct dev_pm_opp
*new_opp
, *tmp_opp
, *opp
= ERR_PTR(-ENODEV
);
979 /* keep the node allocated */
980 new_opp
= kmalloc(sizeof(*new_opp
), GFP_KERNEL
);
984 mutex_lock(&dev_opp_list_lock
);
986 /* Find the device_opp */
987 dev_opp
= _find_device_opp(dev
);
988 if (IS_ERR(dev_opp
)) {
989 r
= PTR_ERR(dev_opp
);
990 dev_warn(dev
, "%s: Device OPP not found (%d)\n", __func__
, r
);
994 /* Do we have the frequency? */
995 list_for_each_entry(tmp_opp
, &dev_opp
->opp_list
, node
) {
996 if (tmp_opp
->rate
== freq
) {
1006 /* Is update really needed? */
1007 if (opp
->available
== availability_req
)
1009 /* copy the old data over */
1012 /* plug in new node */
1013 new_opp
->available
= availability_req
;
1015 list_replace_rcu(&opp
->node
, &new_opp
->node
);
1016 mutex_unlock(&dev_opp_list_lock
);
1017 call_srcu(&dev_opp
->srcu_head
.srcu
, &opp
->rcu_head
, _kfree_opp_rcu
);
1019 /* Notify the change of the OPP availability */
1020 if (availability_req
)
1021 srcu_notifier_call_chain(&dev_opp
->srcu_head
, OPP_EVENT_ENABLE
,
1024 srcu_notifier_call_chain(&dev_opp
->srcu_head
, OPP_EVENT_DISABLE
,
1030 mutex_unlock(&dev_opp_list_lock
);
1036 * dev_pm_opp_enable() - Enable a specific OPP
1037 * @dev: device for which we do this operation
1038 * @freq: OPP frequency to enable
1040 * Enables a provided opp. If the operation is valid, this returns 0, else the
1041 * corresponding error value. It is meant to be used for users an OPP available
1042 * after being temporarily made unavailable with dev_pm_opp_disable.
1044 * Locking: The internal device_opp and opp structures are RCU protected.
1045 * Hence this function indirectly uses RCU and mutex locks to keep the
1046 * integrity of the internal data structures. Callers should ensure that
1047 * this function is *NOT* called under RCU protection or in contexts where
1048 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1050 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1051 * copy operation, returns 0 if no modification was done OR modification was
1054 int dev_pm_opp_enable(struct device
*dev
, unsigned long freq
)
1056 return _opp_set_availability(dev
, freq
, true);
1058 EXPORT_SYMBOL_GPL(dev_pm_opp_enable
);
1061 * dev_pm_opp_disable() - Disable a specific OPP
1062 * @dev: device for which we do this operation
1063 * @freq: OPP frequency to disable
1065 * Disables a provided opp. If the operation is valid, this returns
1066 * 0, else the corresponding error value. It is meant to be a temporary
1067 * control by users to make this OPP not available until the circumstances are
1068 * right to make it available again (with a call to dev_pm_opp_enable).
1070 * Locking: The internal device_opp and opp structures are RCU protected.
1071 * Hence this function indirectly uses RCU and mutex locks to keep the
1072 * integrity of the internal data structures. Callers should ensure that
1073 * this function is *NOT* called under RCU protection or in contexts where
1074 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1076 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1077 * copy operation, returns 0 if no modification was done OR modification was
1080 int dev_pm_opp_disable(struct device
*dev
, unsigned long freq
)
1082 return _opp_set_availability(dev
, freq
, false);
1084 EXPORT_SYMBOL_GPL(dev_pm_opp_disable
);
1087 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1088 * @dev: device pointer used to lookup device OPPs.
1090 * Return: pointer to notifier head if found, otherwise -ENODEV or
1091 * -EINVAL based on type of error casted as pointer. value must be checked
1092 * with IS_ERR to determine valid pointer or error result.
1094 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1095 * protected pointer. The reason for the same is that the opp pointer which is
1096 * returned will remain valid for use with opp_get_{voltage, freq} only while
1097 * under the locked area. The pointer returned must be used prior to unlocking
1098 * with rcu_read_unlock() to maintain the integrity of the pointer.
1100 struct srcu_notifier_head
*dev_pm_opp_get_notifier(struct device
*dev
)
1102 struct device_opp
*dev_opp
= _find_device_opp(dev
);
1104 if (IS_ERR(dev_opp
))
1105 return ERR_CAST(dev_opp
); /* matching type */
1107 return &dev_opp
->srcu_head
;
1109 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier
);
1113 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1115 * @dev: device pointer used to lookup device OPPs.
1117 * Free OPPs created using static entries present in DT.
1119 * Locking: The internal device_opp and opp structures are RCU protected.
1120 * Hence this function indirectly uses RCU updater strategy with mutex locks
1121 * to keep the integrity of the internal data structures. Callers should ensure
1122 * that this function is *NOT* called under RCU protection or in contexts where
1123 * mutex cannot be locked.
1125 void dev_pm_opp_of_remove_table(struct device
*dev
)
1127 struct device_opp
*dev_opp
;
1128 struct dev_pm_opp
*opp
, *tmp
;
1130 /* Hold our list modification lock here */
1131 mutex_lock(&dev_opp_list_lock
);
1133 /* Check for existing list for 'dev' */
1134 dev_opp
= _find_device_opp(dev
);
1135 if (IS_ERR(dev_opp
)) {
1136 int error
= PTR_ERR(dev_opp
);
1138 if (error
!= -ENODEV
)
1139 WARN(1, "%s: dev_opp: %d\n",
1140 IS_ERR_OR_NULL(dev
) ?
1141 "Invalid device" : dev_name(dev
),
1146 /* Find if dev_opp manages a single device */
1147 if (list_is_singular(&dev_opp
->dev_list
)) {
1148 /* Free static OPPs */
1149 list_for_each_entry_safe(opp
, tmp
, &dev_opp
->opp_list
, node
) {
1151 _opp_remove(dev_opp
, opp
, true);
1154 _remove_list_dev(_find_list_dev(dev
, dev_opp
), dev_opp
);
1158 mutex_unlock(&dev_opp_list_lock
);
1160 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table
);
1162 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1163 struct device_node
*_of_get_opp_desc_node(struct device
*dev
)
1166 * TODO: Support for multiple OPP tables.
1168 * There should be only ONE phandle present in "operating-points-v2"
1172 return of_parse_phandle(dev
->of_node
, "operating-points-v2", 0);
1175 /* Initializes OPP tables based on new bindings */
1176 static int _of_add_opp_table_v2(struct device
*dev
, struct device_node
*opp_np
)
1178 struct device_node
*np
;
1179 struct device_opp
*dev_opp
;
1180 int ret
= 0, count
= 0;
1182 mutex_lock(&dev_opp_list_lock
);
1184 dev_opp
= _managed_opp(opp_np
);
1186 /* OPPs are already managed */
1187 if (!_add_list_dev(dev
, dev_opp
))
1189 mutex_unlock(&dev_opp_list_lock
);
1192 mutex_unlock(&dev_opp_list_lock
);
1194 /* We have opp-list node now, iterate over it and add OPPs */
1195 for_each_available_child_of_node(opp_np
, np
) {
1198 ret
= _opp_add_static_v2(dev
, np
);
1200 dev_err(dev
, "%s: Failed to add OPP, %d\n", __func__
,
1206 /* There should be one of more OPP defined */
1207 if (WARN_ON(!count
))
1210 mutex_lock(&dev_opp_list_lock
);
1212 dev_opp
= _find_device_opp(dev
);
1213 if (WARN_ON(IS_ERR(dev_opp
))) {
1214 ret
= PTR_ERR(dev_opp
);
1215 mutex_unlock(&dev_opp_list_lock
);
1219 dev_opp
->np
= opp_np
;
1220 dev_opp
->shared_opp
= of_property_read_bool(opp_np
, "opp-shared");
1222 mutex_unlock(&dev_opp_list_lock
);
1227 dev_pm_opp_of_remove_table(dev
);
1232 /* Initializes OPP tables based on old-deprecated bindings */
1233 static int _of_add_opp_table_v1(struct device
*dev
)
1235 const struct property
*prop
;
1239 prop
= of_find_property(dev
->of_node
, "operating-points", NULL
);
1246 * Each OPP is a set of tuples consisting of frequency and
1247 * voltage like <freq-kHz vol-uV>.
1249 nr
= prop
->length
/ sizeof(u32
);
1251 dev_err(dev
, "%s: Invalid OPP list\n", __func__
);
1257 unsigned long freq
= be32_to_cpup(val
++) * 1000;
1258 unsigned long volt
= be32_to_cpup(val
++);
1260 if (_opp_add_v1(dev
, freq
, volt
, false))
1261 dev_warn(dev
, "%s: Failed to add OPP %ld\n",
1270 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
1271 * @dev: device pointer used to lookup device OPPs.
1273 * Register the initial OPP table with the OPP library for given device.
1275 * Locking: The internal device_opp and opp structures are RCU protected.
1276 * Hence this function indirectly uses RCU updater strategy with mutex locks
1277 * to keep the integrity of the internal data structures. Callers should ensure
1278 * that this function is *NOT* called under RCU protection or in contexts where
1279 * mutex cannot be locked.
1283 * Duplicate OPPs (both freq and volt are same) and opp->available
1284 * -EEXIST Freq are same and volt are different OR
1285 * Duplicate OPPs (both freq and volt are same) and !opp->available
1286 * -ENOMEM Memory allocation failure
1287 * -ENODEV when 'operating-points' property is not found or is invalid data
1289 * -ENODATA when empty 'operating-points' property is found
1290 * -EINVAL when invalid entries are found in opp-v2 table
1292 int dev_pm_opp_of_add_table(struct device
*dev
)
1294 struct device_node
*opp_np
;
1298 * OPPs have two version of bindings now. The older one is deprecated,
1299 * try for the new binding first.
1301 opp_np
= _of_get_opp_desc_node(dev
);
1304 * Try old-deprecated bindings for backward compatibility with
1307 return _of_add_opp_table_v1(dev
);
1310 ret
= _of_add_opp_table_v2(dev
, opp_np
);
1311 of_node_put(opp_np
);
1315 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table
);