2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/cpufreq.h>
20 #include <linux/device.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/opp.h>
27 * Internal data structure organization with the OPP layer library is as
30 * |- device 1 (represents voltage domain 1)
31 * | |- opp 1 (availability, freq, voltage)
35 * |- device 2 (represents the next voltage domain)
37 * `- device m (represents mth voltage domain)
38 * device 1, 2.. are represented by dev_opp structure while each opp
39 * is represented by the opp structure.
43 * struct opp - Generic OPP description structure
44 * @node: opp list node. The nodes are maintained throughout the lifetime
45 * of boot. It is expected only an optimal set of OPPs are
46 * added to the library by the SoC framework.
47 * RCU usage: opp list is traversed with RCU locks. node
48 * modification is possible realtime, hence the modifications
49 * are protected by the dev_opp_list_lock for integrity.
50 * IMPORTANT: the opp nodes should be maintained in increasing
52 * @available: true/false - marks if this OPP as available or not
53 * @rate: Frequency in hertz
54 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
55 * @dev_opp: points back to the device_opp struct this opp belongs to
57 * This structure stores the OPP information for a given device.
60 struct list_head node
;
66 struct device_opp
*dev_opp
;
70 * struct device_opp - Device opp structure
71 * @node: list node - contains the devices with OPPs that
72 * have been registered. Nodes once added are not modified in this
74 * RCU usage: nodes are not modified in the list of device_opp,
75 * however addition is possible and is secured by dev_opp_list_lock
76 * @dev: device pointer
77 * @head: notifier head to notify the OPP availability changes.
78 * @opp_list: list of opps
80 * This is an internal data structure maintaining the link to opps attached to
81 * a device. This structure is not meant to be shared to users as it is
82 * meant for book keeping and private to OPP library
85 struct list_head node
;
88 struct srcu_notifier_head head
;
89 struct list_head opp_list
;
93 * The root of the list of all devices. All device_opp structures branch off
94 * from here, with each device_opp containing the list of opp it supports in
95 * various states of availability.
97 static LIST_HEAD(dev_opp_list
);
98 /* Lock to allow exclusive modification to the device and opp lists */
99 static DEFINE_MUTEX(dev_opp_list_lock
);
102 * find_device_opp() - find device_opp struct using device pointer
103 * @dev: device pointer used to lookup device OPPs
105 * Search list of device OPPs for one containing matching device. Does a RCU
106 * reader operation to grab the pointer needed.
108 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
109 * -EINVAL based on type of error.
111 * Locking: This function must be called under rcu_read_lock(). device_opp
112 * is a RCU protected pointer. This means that device_opp is valid as long
113 * as we are under RCU lock.
115 static struct device_opp
*find_device_opp(struct device
*dev
)
117 struct device_opp
*tmp_dev_opp
, *dev_opp
= ERR_PTR(-ENODEV
);
119 if (unlikely(IS_ERR_OR_NULL(dev
))) {
120 pr_err("%s: Invalid parameters\n", __func__
);
121 return ERR_PTR(-EINVAL
);
124 list_for_each_entry_rcu(tmp_dev_opp
, &dev_opp_list
, node
) {
125 if (tmp_dev_opp
->dev
== dev
) {
126 dev_opp
= tmp_dev_opp
;
135 * opp_get_voltage() - Gets the voltage corresponding to an available opp
136 * @opp: opp for which voltage has to be returned for
138 * Return voltage in micro volt corresponding to the opp, else
141 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
142 * protected pointer. This means that opp which could have been fetched by
143 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
144 * under RCU lock. The pointer returned by the opp_find_freq family must be
145 * used in the same section as the usage of this function with the pointer
146 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
149 unsigned long opp_get_voltage(struct opp
*opp
)
154 tmp_opp
= rcu_dereference(opp
);
155 if (unlikely(IS_ERR_OR_NULL(tmp_opp
)) || !tmp_opp
->available
)
156 pr_err("%s: Invalid parameters\n", __func__
);
164 * opp_get_freq() - Gets the frequency corresponding to an available opp
165 * @opp: opp for which frequency has to be returned for
167 * Return frequency in hertz corresponding to the opp, else
170 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
171 * protected pointer. This means that opp which could have been fetched by
172 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
173 * under RCU lock. The pointer returned by the opp_find_freq family must be
174 * used in the same section as the usage of this function with the pointer
175 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
178 unsigned long opp_get_freq(struct opp
*opp
)
183 tmp_opp
= rcu_dereference(opp
);
184 if (unlikely(IS_ERR_OR_NULL(tmp_opp
)) || !tmp_opp
->available
)
185 pr_err("%s: Invalid parameters\n", __func__
);
193 * opp_get_opp_count() - Get number of opps available in the opp list
194 * @dev: device for which we do this operation
196 * This function returns the number of available opps if there are any,
197 * else returns 0 if none or the corresponding error value.
199 * Locking: This function must be called under rcu_read_lock(). This function
200 * internally references two RCU protected structures: device_opp and opp which
201 * are safe as long as we are under a common RCU locked section.
203 int opp_get_opp_count(struct device
*dev
)
205 struct device_opp
*dev_opp
;
206 struct opp
*temp_opp
;
209 dev_opp
= find_device_opp(dev
);
210 if (IS_ERR(dev_opp
)) {
211 int r
= PTR_ERR(dev_opp
);
212 dev_err(dev
, "%s: device OPP not found (%d)\n", __func__
, r
);
216 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
217 if (temp_opp
->available
)
225 * opp_find_freq_exact() - search for an exact frequency
226 * @dev: device for which we do this operation
227 * @freq: frequency to search for
228 * @available: true/false - match for available opp
230 * Searches for exact match in the opp list and returns pointer to the matching
231 * opp if found, else returns ERR_PTR in case of error and should be handled
234 * Note: available is a modifier for the search. if available=true, then the
235 * match is for exact matching frequency and is available in the stored OPP
236 * table. if false, the match is for exact frequency which is not available.
238 * This provides a mechanism to enable an opp which is not available currently
239 * or the opposite as well.
241 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
242 * protected pointer. The reason for the same is that the opp pointer which is
243 * returned will remain valid for use with opp_get_{voltage, freq} only while
244 * under the locked area. The pointer returned must be used prior to unlocking
245 * with rcu_read_unlock() to maintain the integrity of the pointer.
247 struct opp
*opp_find_freq_exact(struct device
*dev
, unsigned long freq
,
250 struct device_opp
*dev_opp
;
251 struct opp
*temp_opp
, *opp
= ERR_PTR(-ENODEV
);
253 dev_opp
= find_device_opp(dev
);
254 if (IS_ERR(dev_opp
)) {
255 int r
= PTR_ERR(dev_opp
);
256 dev_err(dev
, "%s: device OPP not found (%d)\n", __func__
, r
);
260 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
261 if (temp_opp
->available
== available
&&
262 temp_opp
->rate
== freq
) {
272 * opp_find_freq_ceil() - Search for an rounded ceil freq
273 * @dev: device for which we do this operation
274 * @freq: Start frequency
276 * Search for the matching ceil *available* OPP from a starting freq
279 * Returns matching *opp and refreshes *freq accordingly, else returns
280 * ERR_PTR in case of error and should be handled using IS_ERR.
282 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
283 * protected pointer. The reason for the same is that the opp pointer which is
284 * returned will remain valid for use with opp_get_{voltage, freq} only while
285 * under the locked area. The pointer returned must be used prior to unlocking
286 * with rcu_read_unlock() to maintain the integrity of the pointer.
288 struct opp
*opp_find_freq_ceil(struct device
*dev
, unsigned long *freq
)
290 struct device_opp
*dev_opp
;
291 struct opp
*temp_opp
, *opp
= ERR_PTR(-ENODEV
);
294 dev_err(dev
, "%s: Invalid argument freq=%p\n", __func__
, freq
);
295 return ERR_PTR(-EINVAL
);
298 dev_opp
= find_device_opp(dev
);
302 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
303 if (temp_opp
->available
&& temp_opp
->rate
>= *freq
) {
314 * opp_find_freq_floor() - Search for a rounded floor freq
315 * @dev: device for which we do this operation
316 * @freq: Start frequency
318 * Search for the matching floor *available* OPP from a starting freq
321 * Returns matching *opp and refreshes *freq accordingly, else returns
322 * ERR_PTR in case of error and should be handled using IS_ERR.
324 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
325 * protected pointer. The reason for the same is that the opp pointer which is
326 * returned will remain valid for use with opp_get_{voltage, freq} only while
327 * under the locked area. The pointer returned must be used prior to unlocking
328 * with rcu_read_unlock() to maintain the integrity of the pointer.
330 struct opp
*opp_find_freq_floor(struct device
*dev
, unsigned long *freq
)
332 struct device_opp
*dev_opp
;
333 struct opp
*temp_opp
, *opp
= ERR_PTR(-ENODEV
);
336 dev_err(dev
, "%s: Invalid argument freq=%p\n", __func__
, freq
);
337 return ERR_PTR(-EINVAL
);
340 dev_opp
= find_device_opp(dev
);
344 list_for_each_entry_rcu(temp_opp
, &dev_opp
->opp_list
, node
) {
345 if (temp_opp
->available
) {
346 /* go to the next node, before choosing prev */
347 if (temp_opp
->rate
> *freq
)
360 * opp_add() - Add an OPP table from a table definitions
361 * @dev: device for which we do this operation
362 * @freq: Frequency in Hz for this OPP
363 * @u_volt: Voltage in uVolts for this OPP
365 * This function adds an opp definition to the opp list and returns status.
366 * The opp is made available by default and it can be controlled using
367 * opp_enable/disable functions.
369 * Locking: The internal device_opp and opp structures are RCU protected.
370 * Hence this function internally uses RCU updater strategy with mutex locks
371 * to keep the integrity of the internal data structures. Callers should ensure
372 * that this function is *NOT* called under RCU protection or in contexts where
373 * mutex cannot be locked.
375 int opp_add(struct device
*dev
, unsigned long freq
, unsigned long u_volt
)
377 struct device_opp
*dev_opp
= NULL
;
378 struct opp
*opp
, *new_opp
;
379 struct list_head
*head
;
381 /* allocate new OPP node */
382 new_opp
= kzalloc(sizeof(struct opp
), GFP_KERNEL
);
384 dev_warn(dev
, "%s: Unable to create new OPP node\n", __func__
);
388 /* Hold our list modification lock here */
389 mutex_lock(&dev_opp_list_lock
);
391 /* Check for existing list for 'dev' */
392 dev_opp
= find_device_opp(dev
);
393 if (IS_ERR(dev_opp
)) {
395 * Allocate a new device OPP table. In the infrequent case
396 * where a new device is needed to be added, we pay this
399 dev_opp
= kzalloc(sizeof(struct device_opp
), GFP_KERNEL
);
401 mutex_unlock(&dev_opp_list_lock
);
404 "%s: Unable to create device OPP structure\n",
410 srcu_init_notifier_head(&dev_opp
->head
);
411 INIT_LIST_HEAD(&dev_opp
->opp_list
);
413 /* Secure the device list modification */
414 list_add_rcu(&dev_opp
->node
, &dev_opp_list
);
417 /* populate the opp table */
418 new_opp
->dev_opp
= dev_opp
;
419 new_opp
->rate
= freq
;
420 new_opp
->u_volt
= u_volt
;
421 new_opp
->available
= true;
423 /* Insert new OPP in order of increasing frequency */
424 head
= &dev_opp
->opp_list
;
425 list_for_each_entry_rcu(opp
, &dev_opp
->opp_list
, node
) {
426 if (new_opp
->rate
< opp
->rate
)
432 list_add_rcu(&new_opp
->node
, head
);
433 mutex_unlock(&dev_opp_list_lock
);
436 * Notify the changes in the availability of the operable
437 * frequency/voltage list.
439 srcu_notifier_call_chain(&dev_opp
->head
, OPP_EVENT_ADD
, new_opp
);
444 * opp_set_availability() - helper to set the availability of an opp
445 * @dev: device for which we do this operation
446 * @freq: OPP frequency to modify availability
447 * @availability_req: availability status requested for this opp
449 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
450 * share a common logic which is isolated here.
452 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
453 * copy operation, returns 0 if no modifcation was done OR modification was
456 * Locking: The internal device_opp and opp structures are RCU protected.
457 * Hence this function internally uses RCU updater strategy with mutex locks to
458 * keep the integrity of the internal data structures. Callers should ensure
459 * that this function is *NOT* called under RCU protection or in contexts where
460 * mutex locking or synchronize_rcu() blocking calls cannot be used.
462 static int opp_set_availability(struct device
*dev
, unsigned long freq
,
463 bool availability_req
)
465 struct device_opp
*tmp_dev_opp
, *dev_opp
= ERR_PTR(-ENODEV
);
466 struct opp
*new_opp
, *tmp_opp
, *opp
= ERR_PTR(-ENODEV
);
469 /* keep the node allocated */
470 new_opp
= kmalloc(sizeof(struct opp
), GFP_KERNEL
);
472 dev_warn(dev
, "%s: Unable to create OPP\n", __func__
);
476 mutex_lock(&dev_opp_list_lock
);
478 /* Find the device_opp */
479 list_for_each_entry(tmp_dev_opp
, &dev_opp_list
, node
) {
480 if (dev
== tmp_dev_opp
->dev
) {
481 dev_opp
= tmp_dev_opp
;
485 if (IS_ERR(dev_opp
)) {
486 r
= PTR_ERR(dev_opp
);
487 dev_warn(dev
, "%s: Device OPP not found (%d)\n", __func__
, r
);
491 /* Do we have the frequency? */
492 list_for_each_entry(tmp_opp
, &dev_opp
->opp_list
, node
) {
493 if (tmp_opp
->rate
== freq
) {
503 /* Is update really needed? */
504 if (opp
->available
== availability_req
)
506 /* copy the old data over */
509 /* plug in new node */
510 new_opp
->available
= availability_req
;
512 list_replace_rcu(&opp
->node
, &new_opp
->node
);
513 mutex_unlock(&dev_opp_list_lock
);
516 /* Notify the change of the OPP availability */
517 if (availability_req
)
518 srcu_notifier_call_chain(&dev_opp
->head
, OPP_EVENT_ENABLE
,
521 srcu_notifier_call_chain(&dev_opp
->head
, OPP_EVENT_DISABLE
,
524 /* clean up old opp */
529 mutex_unlock(&dev_opp_list_lock
);
536 * opp_enable() - Enable a specific OPP
537 * @dev: device for which we do this operation
538 * @freq: OPP frequency to enable
540 * Enables a provided opp. If the operation is valid, this returns 0, else the
541 * corresponding error value. It is meant to be used for users an OPP available
542 * after being temporarily made unavailable with opp_disable.
544 * Locking: The internal device_opp and opp structures are RCU protected.
545 * Hence this function indirectly uses RCU and mutex locks to keep the
546 * integrity of the internal data structures. Callers should ensure that
547 * this function is *NOT* called under RCU protection or in contexts where
548 * mutex locking or synchronize_rcu() blocking calls cannot be used.
550 int opp_enable(struct device
*dev
, unsigned long freq
)
552 return opp_set_availability(dev
, freq
, true);
556 * opp_disable() - Disable a specific OPP
557 * @dev: device for which we do this operation
558 * @freq: OPP frequency to disable
560 * Disables a provided opp. If the operation is valid, this returns
561 * 0, else the corresponding error value. It is meant to be a temporary
562 * control by users to make this OPP not available until the circumstances are
563 * right to make it available again (with a call to opp_enable).
565 * Locking: The internal device_opp and opp structures are RCU protected.
566 * Hence this function indirectly uses RCU and mutex locks to keep the
567 * integrity of the internal data structures. Callers should ensure that
568 * this function is *NOT* called under RCU protection or in contexts where
569 * mutex locking or synchronize_rcu() blocking calls cannot be used.
571 int opp_disable(struct device
*dev
, unsigned long freq
)
573 return opp_set_availability(dev
, freq
, false);
576 #ifdef CONFIG_CPU_FREQ
578 * opp_init_cpufreq_table() - create a cpufreq table for a device
579 * @dev: device for which we do this operation
580 * @table: Cpufreq table returned back to caller
582 * Generate a cpufreq table for a provided device- this assumes that the
583 * opp list is already initialized and ready for usage.
585 * This function allocates required memory for the cpufreq table. It is
586 * expected that the caller does the required maintenance such as freeing
587 * the table as required.
589 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
590 * if no memory available for the operation (table is not populated), returns 0
591 * if successful and table is populated.
593 * WARNING: It is important for the callers to ensure refreshing their copy of
594 * the table if any of the mentioned functions have been invoked in the interim.
596 * Locking: The internal device_opp and opp structures are RCU protected.
597 * To simplify the logic, we pretend we are updater and hold relevant mutex here
598 * Callers should ensure that this function is *NOT* called under RCU protection
599 * or in contexts where mutex locking cannot be used.
601 int opp_init_cpufreq_table(struct device
*dev
,
602 struct cpufreq_frequency_table
**table
)
604 struct device_opp
*dev_opp
;
606 struct cpufreq_frequency_table
*freq_table
;
609 /* Pretend as if I am an updater */
610 mutex_lock(&dev_opp_list_lock
);
612 dev_opp
= find_device_opp(dev
);
613 if (IS_ERR(dev_opp
)) {
614 int r
= PTR_ERR(dev_opp
);
615 mutex_unlock(&dev_opp_list_lock
);
616 dev_err(dev
, "%s: Device OPP not found (%d)\n", __func__
, r
);
620 freq_table
= kzalloc(sizeof(struct cpufreq_frequency_table
) *
621 (opp_get_opp_count(dev
) + 1), GFP_KERNEL
);
623 mutex_unlock(&dev_opp_list_lock
);
624 dev_warn(dev
, "%s: Unable to allocate frequency table\n",
629 list_for_each_entry(opp
, &dev_opp
->opp_list
, node
) {
630 if (opp
->available
) {
631 freq_table
[i
].index
= i
;
632 freq_table
[i
].frequency
= opp
->rate
/ 1000;
636 mutex_unlock(&dev_opp_list_lock
);
638 freq_table
[i
].index
= i
;
639 freq_table
[i
].frequency
= CPUFREQ_TABLE_END
;
641 *table
= &freq_table
[0];
647 * opp_free_cpufreq_table() - free the cpufreq table
648 * @dev: device for which we do this operation
649 * @table: table to free
651 * Free up the table allocated by opp_init_cpufreq_table
653 void opp_free_cpufreq_table(struct device
*dev
,
654 struct cpufreq_frequency_table
**table
)
662 #endif /* CONFIG_CPU_FREQ */
665 * opp_get_notifier() - find notifier_head of the device with opp
666 * @dev: device pointer used to lookup device OPPs.
668 struct srcu_notifier_head
*opp_get_notifier(struct device
*dev
)
670 struct device_opp
*dev_opp
= find_device_opp(dev
);
673 return ERR_CAST(dev_opp
); /* matching type */
675 return &dev_opp
->head
;