PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / base / power / opp.c
blobfa41874184401cd46c155526102fb72aed99e329
1 /*
2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/cpufreq.h>
20 #include <linux/device.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/pm_opp.h>
25 #include <linux/of.h>
26 #include <linux/export.h>
29 * Internal data structure organization with the OPP layer library is as
30 * follows:
31 * dev_opp_list (root)
32 * |- device 1 (represents voltage domain 1)
33 * | |- opp 1 (availability, freq, voltage)
34 * | |- opp 2 ..
35 * ... ...
36 * | `- opp n ..
37 * |- device 2 (represents the next voltage domain)
38 * ...
39 * `- device m (represents mth voltage domain)
40 * device 1, 2.. are represented by dev_opp structure while each opp
41 * is represented by the opp structure.
44 /**
45 * struct dev_pm_opp - Generic OPP description structure
46 * @node: opp list node. The nodes are maintained throughout the lifetime
47 * of boot. It is expected only an optimal set of OPPs are
48 * added to the library by the SoC framework.
49 * RCU usage: opp list is traversed with RCU locks. node
50 * modification is possible realtime, hence the modifications
51 * are protected by the dev_opp_list_lock for integrity.
52 * IMPORTANT: the opp nodes should be maintained in increasing
53 * order.
54 * @available: true/false - marks if this OPP as available or not
55 * @rate: Frequency in hertz
56 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
57 * @dev_opp: points back to the device_opp struct this opp belongs to
58 * @head: RCU callback head used for deferred freeing
60 * This structure stores the OPP information for a given device.
62 struct dev_pm_opp {
63 struct list_head node;
65 bool available;
66 unsigned long rate;
67 unsigned long u_volt;
69 struct device_opp *dev_opp;
70 struct rcu_head head;
73 /**
74 * struct device_opp - Device opp structure
75 * @node: list node - contains the devices with OPPs that
76 * have been registered. Nodes once added are not modified in this
77 * list.
78 * RCU usage: nodes are not modified in the list of device_opp,
79 * however addition is possible and is secured by dev_opp_list_lock
80 * @dev: device pointer
81 * @head: notifier head to notify the OPP availability changes.
82 * @opp_list: list of opps
84 * This is an internal data structure maintaining the link to opps attached to
85 * a device. This structure is not meant to be shared to users as it is
86 * meant for book keeping and private to OPP library
88 struct device_opp {
89 struct list_head node;
91 struct device *dev;
92 struct srcu_notifier_head head;
93 struct list_head opp_list;
97 * The root of the list of all devices. All device_opp structures branch off
98 * from here, with each device_opp containing the list of opp it supports in
99 * various states of availability.
101 static LIST_HEAD(dev_opp_list);
102 /* Lock to allow exclusive modification to the device and opp lists */
103 static DEFINE_MUTEX(dev_opp_list_lock);
106 * find_device_opp() - find device_opp struct using device pointer
107 * @dev: device pointer used to lookup device OPPs
109 * Search list of device OPPs for one containing matching device. Does a RCU
110 * reader operation to grab the pointer needed.
112 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
113 * -EINVAL based on type of error.
115 * Locking: This function must be called under rcu_read_lock(). device_opp
116 * is a RCU protected pointer. This means that device_opp is valid as long
117 * as we are under RCU lock.
119 static struct device_opp *find_device_opp(struct device *dev)
121 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
123 if (unlikely(IS_ERR_OR_NULL(dev))) {
124 pr_err("%s: Invalid parameters\n", __func__);
125 return ERR_PTR(-EINVAL);
128 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
129 if (tmp_dev_opp->dev == dev) {
130 dev_opp = tmp_dev_opp;
131 break;
135 return dev_opp;
139 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
140 * @opp: opp for which voltage has to be returned for
142 * Return voltage in micro volt corresponding to the opp, else
143 * return 0
145 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
146 * protected pointer. This means that opp which could have been fetched by
147 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
148 * under RCU lock. The pointer returned by the opp_find_freq family must be
149 * used in the same section as the usage of this function with the pointer
150 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
151 * pointer.
153 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
155 struct dev_pm_opp *tmp_opp;
156 unsigned long v = 0;
158 tmp_opp = rcu_dereference(opp);
159 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
160 pr_err("%s: Invalid parameters\n", __func__);
161 else
162 v = tmp_opp->u_volt;
164 return v;
166 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
169 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
170 * @opp: opp for which frequency has to be returned for
172 * Return frequency in hertz corresponding to the opp, else
173 * return 0
175 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
176 * protected pointer. This means that opp which could have been fetched by
177 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
178 * under RCU lock. The pointer returned by the opp_find_freq family must be
179 * used in the same section as the usage of this function with the pointer
180 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
181 * pointer.
183 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
185 struct dev_pm_opp *tmp_opp;
186 unsigned long f = 0;
188 tmp_opp = rcu_dereference(opp);
189 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
190 pr_err("%s: Invalid parameters\n", __func__);
191 else
192 f = tmp_opp->rate;
194 return f;
196 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
199 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
200 * @dev: device for which we do this operation
202 * This function returns the number of available opps if there are any,
203 * else returns 0 if none or the corresponding error value.
205 * Locking: This function must be called under rcu_read_lock(). This function
206 * internally references two RCU protected structures: device_opp and opp which
207 * are safe as long as we are under a common RCU locked section.
209 int dev_pm_opp_get_opp_count(struct device *dev)
211 struct device_opp *dev_opp;
212 struct dev_pm_opp *temp_opp;
213 int count = 0;
215 dev_opp = find_device_opp(dev);
216 if (IS_ERR(dev_opp)) {
217 int r = PTR_ERR(dev_opp);
218 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
219 return r;
222 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
223 if (temp_opp->available)
224 count++;
227 return count;
229 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
232 * dev_pm_opp_find_freq_exact() - search for an exact frequency
233 * @dev: device for which we do this operation
234 * @freq: frequency to search for
235 * @available: true/false - match for available opp
237 * Searches for exact match in the opp list and returns pointer to the matching
238 * opp if found, else returns ERR_PTR in case of error and should be handled
239 * using IS_ERR. Error return values can be:
240 * EINVAL: for bad pointer
241 * ERANGE: no match found for search
242 * ENODEV: if device not found in list of registered devices
244 * Note: available is a modifier for the search. if available=true, then the
245 * match is for exact matching frequency and is available in the stored OPP
246 * table. if false, the match is for exact frequency which is not available.
248 * This provides a mechanism to enable an opp which is not available currently
249 * or the opposite as well.
251 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
252 * protected pointer. The reason for the same is that the opp pointer which is
253 * returned will remain valid for use with opp_get_{voltage, freq} only while
254 * under the locked area. The pointer returned must be used prior to unlocking
255 * with rcu_read_unlock() to maintain the integrity of the pointer.
257 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
258 unsigned long freq,
259 bool available)
261 struct device_opp *dev_opp;
262 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
264 dev_opp = find_device_opp(dev);
265 if (IS_ERR(dev_opp)) {
266 int r = PTR_ERR(dev_opp);
267 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
268 return ERR_PTR(r);
271 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
272 if (temp_opp->available == available &&
273 temp_opp->rate == freq) {
274 opp = temp_opp;
275 break;
279 return opp;
281 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
284 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
285 * @dev: device for which we do this operation
286 * @freq: Start frequency
288 * Search for the matching ceil *available* OPP from a starting freq
289 * for a device.
291 * Returns matching *opp and refreshes *freq accordingly, else returns
292 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
293 * values can be:
294 * EINVAL: for bad pointer
295 * ERANGE: no match found for search
296 * ENODEV: if device not found in list of registered devices
298 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
299 * protected pointer. The reason for the same is that the opp pointer which is
300 * returned will remain valid for use with opp_get_{voltage, freq} only while
301 * under the locked area. The pointer returned must be used prior to unlocking
302 * with rcu_read_unlock() to maintain the integrity of the pointer.
304 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
305 unsigned long *freq)
307 struct device_opp *dev_opp;
308 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
310 if (!dev || !freq) {
311 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
312 return ERR_PTR(-EINVAL);
315 dev_opp = find_device_opp(dev);
316 if (IS_ERR(dev_opp))
317 return ERR_CAST(dev_opp);
319 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
320 if (temp_opp->available && temp_opp->rate >= *freq) {
321 opp = temp_opp;
322 *freq = opp->rate;
323 break;
327 return opp;
329 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
332 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
333 * @dev: device for which we do this operation
334 * @freq: Start frequency
336 * Search for the matching floor *available* OPP from a starting freq
337 * for a device.
339 * Returns matching *opp and refreshes *freq accordingly, else returns
340 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
341 * values can be:
342 * EINVAL: for bad pointer
343 * ERANGE: no match found for search
344 * ENODEV: if device not found in list of registered devices
346 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
347 * protected pointer. The reason for the same is that the opp pointer which is
348 * returned will remain valid for use with opp_get_{voltage, freq} only while
349 * under the locked area. The pointer returned must be used prior to unlocking
350 * with rcu_read_unlock() to maintain the integrity of the pointer.
352 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
353 unsigned long *freq)
355 struct device_opp *dev_opp;
356 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
358 if (!dev || !freq) {
359 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
360 return ERR_PTR(-EINVAL);
363 dev_opp = find_device_opp(dev);
364 if (IS_ERR(dev_opp))
365 return ERR_CAST(dev_opp);
367 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
368 if (temp_opp->available) {
369 /* go to the next node, before choosing prev */
370 if (temp_opp->rate > *freq)
371 break;
372 else
373 opp = temp_opp;
376 if (!IS_ERR(opp))
377 *freq = opp->rate;
379 return opp;
381 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
384 * dev_pm_opp_add() - Add an OPP table from a table definitions
385 * @dev: device for which we do this operation
386 * @freq: Frequency in Hz for this OPP
387 * @u_volt: Voltage in uVolts for this OPP
389 * This function adds an opp definition to the opp list and returns status.
390 * The opp is made available by default and it can be controlled using
391 * dev_pm_opp_enable/disable functions.
393 * Locking: The internal device_opp and opp structures are RCU protected.
394 * Hence this function internally uses RCU updater strategy with mutex locks
395 * to keep the integrity of the internal data structures. Callers should ensure
396 * that this function is *NOT* called under RCU protection or in contexts where
397 * mutex cannot be locked.
399 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
401 struct device_opp *dev_opp = NULL;
402 struct dev_pm_opp *opp, *new_opp;
403 struct list_head *head;
405 /* allocate new OPP node */
406 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
407 if (!new_opp) {
408 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
409 return -ENOMEM;
412 /* Hold our list modification lock here */
413 mutex_lock(&dev_opp_list_lock);
415 /* Check for existing list for 'dev' */
416 dev_opp = find_device_opp(dev);
417 if (IS_ERR(dev_opp)) {
419 * Allocate a new device OPP table. In the infrequent case
420 * where a new device is needed to be added, we pay this
421 * penalty.
423 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
424 if (!dev_opp) {
425 mutex_unlock(&dev_opp_list_lock);
426 kfree(new_opp);
427 dev_warn(dev,
428 "%s: Unable to create device OPP structure\n",
429 __func__);
430 return -ENOMEM;
433 dev_opp->dev = dev;
434 srcu_init_notifier_head(&dev_opp->head);
435 INIT_LIST_HEAD(&dev_opp->opp_list);
437 /* Secure the device list modification */
438 list_add_rcu(&dev_opp->node, &dev_opp_list);
441 /* populate the opp table */
442 new_opp->dev_opp = dev_opp;
443 new_opp->rate = freq;
444 new_opp->u_volt = u_volt;
445 new_opp->available = true;
447 /* Insert new OPP in order of increasing frequency */
448 head = &dev_opp->opp_list;
449 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
450 if (new_opp->rate < opp->rate)
451 break;
452 else
453 head = &opp->node;
456 list_add_rcu(&new_opp->node, head);
457 mutex_unlock(&dev_opp_list_lock);
460 * Notify the changes in the availability of the operable
461 * frequency/voltage list.
463 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
464 return 0;
466 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
469 * opp_set_availability() - helper to set the availability of an opp
470 * @dev: device for which we do this operation
471 * @freq: OPP frequency to modify availability
472 * @availability_req: availability status requested for this opp
474 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
475 * share a common logic which is isolated here.
477 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
478 * copy operation, returns 0 if no modifcation was done OR modification was
479 * successful.
481 * Locking: The internal device_opp and opp structures are RCU protected.
482 * Hence this function internally uses RCU updater strategy with mutex locks to
483 * keep the integrity of the internal data structures. Callers should ensure
484 * that this function is *NOT* called under RCU protection or in contexts where
485 * mutex locking or synchronize_rcu() blocking calls cannot be used.
487 static int opp_set_availability(struct device *dev, unsigned long freq,
488 bool availability_req)
490 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
491 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
492 int r = 0;
494 /* keep the node allocated */
495 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
496 if (!new_opp) {
497 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
498 return -ENOMEM;
501 mutex_lock(&dev_opp_list_lock);
503 /* Find the device_opp */
504 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
505 if (dev == tmp_dev_opp->dev) {
506 dev_opp = tmp_dev_opp;
507 break;
510 if (IS_ERR(dev_opp)) {
511 r = PTR_ERR(dev_opp);
512 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
513 goto unlock;
516 /* Do we have the frequency? */
517 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
518 if (tmp_opp->rate == freq) {
519 opp = tmp_opp;
520 break;
523 if (IS_ERR(opp)) {
524 r = PTR_ERR(opp);
525 goto unlock;
528 /* Is update really needed? */
529 if (opp->available == availability_req)
530 goto unlock;
531 /* copy the old data over */
532 *new_opp = *opp;
534 /* plug in new node */
535 new_opp->available = availability_req;
537 list_replace_rcu(&opp->node, &new_opp->node);
538 mutex_unlock(&dev_opp_list_lock);
539 kfree_rcu(opp, head);
541 /* Notify the change of the OPP availability */
542 if (availability_req)
543 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
544 new_opp);
545 else
546 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
547 new_opp);
549 return 0;
551 unlock:
552 mutex_unlock(&dev_opp_list_lock);
553 kfree(new_opp);
554 return r;
558 * dev_pm_opp_enable() - Enable a specific OPP
559 * @dev: device for which we do this operation
560 * @freq: OPP frequency to enable
562 * Enables a provided opp. If the operation is valid, this returns 0, else the
563 * corresponding error value. It is meant to be used for users an OPP available
564 * after being temporarily made unavailable with dev_pm_opp_disable.
566 * Locking: The internal device_opp and opp structures are RCU protected.
567 * Hence this function indirectly uses RCU and mutex locks to keep the
568 * integrity of the internal data structures. Callers should ensure that
569 * this function is *NOT* called under RCU protection or in contexts where
570 * mutex locking or synchronize_rcu() blocking calls cannot be used.
572 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
574 return opp_set_availability(dev, freq, true);
576 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
579 * dev_pm_opp_disable() - Disable a specific OPP
580 * @dev: device for which we do this operation
581 * @freq: OPP frequency to disable
583 * Disables a provided opp. If the operation is valid, this returns
584 * 0, else the corresponding error value. It is meant to be a temporary
585 * control by users to make this OPP not available until the circumstances are
586 * right to make it available again (with a call to dev_pm_opp_enable).
588 * Locking: The internal device_opp and opp structures are RCU protected.
589 * Hence this function indirectly uses RCU and mutex locks to keep the
590 * integrity of the internal data structures. Callers should ensure that
591 * this function is *NOT* called under RCU protection or in contexts where
592 * mutex locking or synchronize_rcu() blocking calls cannot be used.
594 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
596 return opp_set_availability(dev, freq, false);
598 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
600 #ifdef CONFIG_CPU_FREQ
602 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
603 * @dev: device for which we do this operation
604 * @table: Cpufreq table returned back to caller
606 * Generate a cpufreq table for a provided device- this assumes that the
607 * opp list is already initialized and ready for usage.
609 * This function allocates required memory for the cpufreq table. It is
610 * expected that the caller does the required maintenance such as freeing
611 * the table as required.
613 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
614 * if no memory available for the operation (table is not populated), returns 0
615 * if successful and table is populated.
617 * WARNING: It is important for the callers to ensure refreshing their copy of
618 * the table if any of the mentioned functions have been invoked in the interim.
620 * Locking: The internal device_opp and opp structures are RCU protected.
621 * To simplify the logic, we pretend we are updater and hold relevant mutex here
622 * Callers should ensure that this function is *NOT* called under RCU protection
623 * or in contexts where mutex locking cannot be used.
625 int dev_pm_opp_init_cpufreq_table(struct device *dev,
626 struct cpufreq_frequency_table **table)
628 struct device_opp *dev_opp;
629 struct dev_pm_opp *opp;
630 struct cpufreq_frequency_table *freq_table;
631 int i = 0;
633 /* Pretend as if I am an updater */
634 mutex_lock(&dev_opp_list_lock);
636 dev_opp = find_device_opp(dev);
637 if (IS_ERR(dev_opp)) {
638 int r = PTR_ERR(dev_opp);
639 mutex_unlock(&dev_opp_list_lock);
640 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
641 return r;
644 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
645 (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
646 if (!freq_table) {
647 mutex_unlock(&dev_opp_list_lock);
648 dev_warn(dev, "%s: Unable to allocate frequency table\n",
649 __func__);
650 return -ENOMEM;
653 list_for_each_entry(opp, &dev_opp->opp_list, node) {
654 if (opp->available) {
655 freq_table[i].driver_data = i;
656 freq_table[i].frequency = opp->rate / 1000;
657 i++;
660 mutex_unlock(&dev_opp_list_lock);
662 freq_table[i].driver_data = i;
663 freq_table[i].frequency = CPUFREQ_TABLE_END;
665 *table = &freq_table[0];
667 return 0;
669 EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
672 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
673 * @dev: device for which we do this operation
674 * @table: table to free
676 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
678 void dev_pm_opp_free_cpufreq_table(struct device *dev,
679 struct cpufreq_frequency_table **table)
681 if (!table)
682 return;
684 kfree(*table);
685 *table = NULL;
687 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
688 #endif /* CONFIG_CPU_FREQ */
691 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
692 * @dev: device pointer used to lookup device OPPs.
694 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
696 struct device_opp *dev_opp = find_device_opp(dev);
698 if (IS_ERR(dev_opp))
699 return ERR_CAST(dev_opp); /* matching type */
701 return &dev_opp->head;
704 #ifdef CONFIG_OF
706 * of_init_opp_table() - Initialize opp table from device tree
707 * @dev: device pointer used to lookup device OPPs.
709 * Register the initial OPP table with the OPP library for given device.
711 int of_init_opp_table(struct device *dev)
713 const struct property *prop;
714 const __be32 *val;
715 int nr;
717 prop = of_find_property(dev->of_node, "operating-points", NULL);
718 if (!prop)
719 return -ENODEV;
720 if (!prop->value)
721 return -ENODATA;
724 * Each OPP is a set of tuples consisting of frequency and
725 * voltage like <freq-kHz vol-uV>.
727 nr = prop->length / sizeof(u32);
728 if (nr % 2) {
729 dev_err(dev, "%s: Invalid OPP list\n", __func__);
730 return -EINVAL;
733 val = prop->value;
734 while (nr) {
735 unsigned long freq = be32_to_cpup(val++) * 1000;
736 unsigned long volt = be32_to_cpup(val++);
738 if (dev_pm_opp_add(dev, freq, volt)) {
739 dev_warn(dev, "%s: Failed to add OPP %ld\n",
740 __func__, freq);
741 continue;
743 nr -= 2;
746 return 0;
748 EXPORT_SYMBOL_GPL(of_init_opp_table);
749 #endif