usb: xhci-plat: properly handle probe deferral for devm_clk_get()
[linux/fpc-iii.git] / drivers / base / power / opp / core.c
blobd8f4cc22856c924b1be7bf1aa97f175b6579c554
1 /*
2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/of.h>
22 #include <linux/export.h>
23 #include <linux/regulator/consumer.h>
25 #include "opp.h"
28 * The root of the list of all opp-tables. All opp_table structures branch off
29 * from here, with each opp_table containing the list of opps it supports in
30 * various states of availability.
32 static LIST_HEAD(opp_tables);
33 /* Lock to allow exclusive modification to the device and opp lists */
34 DEFINE_MUTEX(opp_table_lock);
36 #define opp_rcu_lockdep_assert() \
37 do { \
38 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
39 !lockdep_is_held(&opp_table_lock), \
40 "Missing rcu_read_lock() or " \
41 "opp_table_lock protection"); \
42 } while (0)
44 static struct opp_device *_find_opp_dev(const struct device *dev,
45 struct opp_table *opp_table)
47 struct opp_device *opp_dev;
49 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
50 if (opp_dev->dev == dev)
51 return opp_dev;
53 return NULL;
56 static struct opp_table *_managed_opp(const struct device_node *np)
58 struct opp_table *opp_table;
60 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
61 if (opp_table->np == np) {
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
69 return opp_table->shared_opp ? opp_table : NULL;
73 return NULL;
76 /**
77 * _find_opp_table() - find opp_table struct using device pointer
78 * @dev: device pointer used to lookup OPP table
80 * Search OPP table for one containing matching device. Does a RCU reader
81 * operation to grab the pointer needed.
83 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
84 * -EINVAL based on type of error.
86 * Locking: For readers, this function must be called under rcu_read_lock().
87 * opp_table is a RCU protected pointer, which means that opp_table is valid
88 * as long as we are under RCU lock.
90 * For Writers, this function must be called with opp_table_lock held.
92 struct opp_table *_find_opp_table(struct device *dev)
94 struct opp_table *opp_table;
96 opp_rcu_lockdep_assert();
98 if (IS_ERR_OR_NULL(dev)) {
99 pr_err("%s: Invalid parameters\n", __func__);
100 return ERR_PTR(-EINVAL);
103 list_for_each_entry_rcu(opp_table, &opp_tables, node)
104 if (_find_opp_dev(dev, opp_table))
105 return opp_table;
107 return ERR_PTR(-ENODEV);
111 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
112 * @opp: opp for which voltage has to be returned for
114 * Return: voltage in micro volt corresponding to the opp, else
115 * return 0
117 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
118 * protected pointer. This means that opp which could have been fetched by
119 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
120 * under RCU lock. The pointer returned by the opp_find_freq family must be
121 * used in the same section as the usage of this function with the pointer
122 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
123 * pointer.
125 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
127 struct dev_pm_opp *tmp_opp;
128 unsigned long v = 0;
130 opp_rcu_lockdep_assert();
132 tmp_opp = rcu_dereference(opp);
133 if (IS_ERR_OR_NULL(tmp_opp))
134 pr_err("%s: Invalid parameters\n", __func__);
135 else
136 v = tmp_opp->u_volt;
138 return v;
140 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
143 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
144 * @opp: opp for which frequency has to be returned for
146 * Return: frequency in hertz corresponding to the opp, else
147 * return 0
149 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
150 * protected pointer. This means that opp which could have been fetched by
151 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
152 * under RCU lock. The pointer returned by the opp_find_freq family must be
153 * used in the same section as the usage of this function with the pointer
154 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
155 * pointer.
157 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
159 struct dev_pm_opp *tmp_opp;
160 unsigned long f = 0;
162 opp_rcu_lockdep_assert();
164 tmp_opp = rcu_dereference(opp);
165 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
166 pr_err("%s: Invalid parameters\n", __func__);
167 else
168 f = tmp_opp->rate;
170 return f;
172 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
175 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
176 * @opp: opp for which turbo mode is being verified
178 * Turbo OPPs are not for normal use, and can be enabled (under certain
179 * conditions) for short duration of times to finish high throughput work
180 * quickly. Running on them for longer times may overheat the chip.
182 * Return: true if opp is turbo opp, else false.
184 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
185 * protected pointer. This means that opp which could have been fetched by
186 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
187 * under RCU lock. The pointer returned by the opp_find_freq family must be
188 * used in the same section as the usage of this function with the pointer
189 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
190 * pointer.
192 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
194 struct dev_pm_opp *tmp_opp;
196 opp_rcu_lockdep_assert();
198 tmp_opp = rcu_dereference(opp);
199 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
200 pr_err("%s: Invalid parameters\n", __func__);
201 return false;
204 return tmp_opp->turbo;
206 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
209 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
210 * @dev: device for which we do this operation
212 * Return: This function returns the max clock latency in nanoseconds.
214 * Locking: This function takes rcu_read_lock().
216 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
218 struct opp_table *opp_table;
219 unsigned long clock_latency_ns;
221 rcu_read_lock();
223 opp_table = _find_opp_table(dev);
224 if (IS_ERR(opp_table))
225 clock_latency_ns = 0;
226 else
227 clock_latency_ns = opp_table->clock_latency_ns_max;
229 rcu_read_unlock();
230 return clock_latency_ns;
232 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
235 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
236 * @dev: device for which we do this operation
238 * Return: This function returns the max voltage latency in nanoseconds.
240 * Locking: This function takes rcu_read_lock().
242 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
244 struct opp_table *opp_table;
245 struct dev_pm_opp *opp;
246 struct regulator *reg;
247 unsigned long latency_ns = 0;
248 unsigned long min_uV = ~0, max_uV = 0;
249 int ret;
251 rcu_read_lock();
253 opp_table = _find_opp_table(dev);
254 if (IS_ERR(opp_table)) {
255 rcu_read_unlock();
256 return 0;
259 reg = opp_table->regulator;
260 if (IS_ERR(reg)) {
261 /* Regulator may not be required for device */
262 rcu_read_unlock();
263 return 0;
266 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
267 if (!opp->available)
268 continue;
270 if (opp->u_volt_min < min_uV)
271 min_uV = opp->u_volt_min;
272 if (opp->u_volt_max > max_uV)
273 max_uV = opp->u_volt_max;
276 rcu_read_unlock();
279 * The caller needs to ensure that opp_table (and hence the regulator)
280 * isn't freed, while we are executing this routine.
282 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
283 if (ret > 0)
284 latency_ns = ret * 1000;
286 return latency_ns;
288 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
291 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
292 * nanoseconds
293 * @dev: device for which we do this operation
295 * Return: This function returns the max transition latency, in nanoseconds, to
296 * switch from one OPP to other.
298 * Locking: This function takes rcu_read_lock().
300 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
302 return dev_pm_opp_get_max_volt_latency(dev) +
303 dev_pm_opp_get_max_clock_latency(dev);
305 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
308 * dev_pm_opp_get_suspend_opp() - Get suspend opp
309 * @dev: device for which we do this operation
311 * Return: This function returns pointer to the suspend opp if it is
312 * defined and available, otherwise it returns NULL.
314 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
315 * protected pointer. The reason for the same is that the opp pointer which is
316 * returned will remain valid for use with opp_get_{voltage, freq} only while
317 * under the locked area. The pointer returned must be used prior to unlocking
318 * with rcu_read_unlock() to maintain the integrity of the pointer.
320 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
322 struct opp_table *opp_table;
324 opp_rcu_lockdep_assert();
326 opp_table = _find_opp_table(dev);
327 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
328 !opp_table->suspend_opp->available)
329 return NULL;
331 return opp_table->suspend_opp;
333 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
336 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
337 * @dev: device for which we do this operation
339 * Return: This function returns the number of available opps if there are any,
340 * else returns 0 if none or the corresponding error value.
342 * Locking: This function takes rcu_read_lock().
344 int dev_pm_opp_get_opp_count(struct device *dev)
346 struct opp_table *opp_table;
347 struct dev_pm_opp *temp_opp;
348 int count = 0;
350 rcu_read_lock();
352 opp_table = _find_opp_table(dev);
353 if (IS_ERR(opp_table)) {
354 count = PTR_ERR(opp_table);
355 dev_err(dev, "%s: OPP table not found (%d)\n",
356 __func__, count);
357 goto out_unlock;
360 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
361 if (temp_opp->available)
362 count++;
365 out_unlock:
366 rcu_read_unlock();
367 return count;
369 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
372 * dev_pm_opp_find_freq_exact() - search for an exact frequency
373 * @dev: device for which we do this operation
374 * @freq: frequency to search for
375 * @available: true/false - match for available opp
377 * Return: Searches for exact match in the opp table and returns pointer to the
378 * matching opp if found, else returns ERR_PTR in case of error and should
379 * be handled using IS_ERR. Error return values can be:
380 * EINVAL: for bad pointer
381 * ERANGE: no match found for search
382 * ENODEV: if device not found in list of registered devices
384 * Note: available is a modifier for the search. if available=true, then the
385 * match is for exact matching frequency and is available in the stored OPP
386 * table. if false, the match is for exact frequency which is not available.
388 * This provides a mechanism to enable an opp which is not available currently
389 * or the opposite as well.
391 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
392 * protected pointer. The reason for the same is that the opp pointer which is
393 * returned will remain valid for use with opp_get_{voltage, freq} only while
394 * under the locked area. The pointer returned must be used prior to unlocking
395 * with rcu_read_unlock() to maintain the integrity of the pointer.
397 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
398 unsigned long freq,
399 bool available)
401 struct opp_table *opp_table;
402 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
404 opp_rcu_lockdep_assert();
406 opp_table = _find_opp_table(dev);
407 if (IS_ERR(opp_table)) {
408 int r = PTR_ERR(opp_table);
410 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
411 return ERR_PTR(r);
414 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
415 if (temp_opp->available == available &&
416 temp_opp->rate == freq) {
417 opp = temp_opp;
418 break;
422 return opp;
424 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
427 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
428 * @dev: device for which we do this operation
429 * @freq: Start frequency
431 * Search for the matching ceil *available* OPP from a starting freq
432 * for a device.
434 * Return: matching *opp and refreshes *freq accordingly, else returns
435 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
436 * values can be:
437 * EINVAL: for bad pointer
438 * ERANGE: no match found for search
439 * ENODEV: if device not found in list of registered devices
441 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
442 * protected pointer. The reason for the same is that the opp pointer which is
443 * returned will remain valid for use with opp_get_{voltage, freq} only while
444 * under the locked area. The pointer returned must be used prior to unlocking
445 * with rcu_read_unlock() to maintain the integrity of the pointer.
447 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
448 unsigned long *freq)
450 struct opp_table *opp_table;
451 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
453 opp_rcu_lockdep_assert();
455 if (!dev || !freq) {
456 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
457 return ERR_PTR(-EINVAL);
460 opp_table = _find_opp_table(dev);
461 if (IS_ERR(opp_table))
462 return ERR_CAST(opp_table);
464 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
465 if (temp_opp->available && temp_opp->rate >= *freq) {
466 opp = temp_opp;
467 *freq = opp->rate;
468 break;
472 return opp;
474 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
477 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
478 * @dev: device for which we do this operation
479 * @freq: Start frequency
481 * Search for the matching floor *available* OPP from a starting freq
482 * for a device.
484 * Return: matching *opp and refreshes *freq accordingly, else returns
485 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
486 * values can be:
487 * EINVAL: for bad pointer
488 * ERANGE: no match found for search
489 * ENODEV: if device not found in list of registered devices
491 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
492 * protected pointer. The reason for the same is that the opp pointer which is
493 * returned will remain valid for use with opp_get_{voltage, freq} only while
494 * under the locked area. The pointer returned must be used prior to unlocking
495 * with rcu_read_unlock() to maintain the integrity of the pointer.
497 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
498 unsigned long *freq)
500 struct opp_table *opp_table;
501 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
503 opp_rcu_lockdep_assert();
505 if (!dev || !freq) {
506 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
507 return ERR_PTR(-EINVAL);
510 opp_table = _find_opp_table(dev);
511 if (IS_ERR(opp_table))
512 return ERR_CAST(opp_table);
514 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
515 if (temp_opp->available) {
516 /* go to the next node, before choosing prev */
517 if (temp_opp->rate > *freq)
518 break;
519 else
520 opp = temp_opp;
523 if (!IS_ERR(opp))
524 *freq = opp->rate;
526 return opp;
528 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
531 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
532 * while clk returned here is used.
534 static struct clk *_get_opp_clk(struct device *dev)
536 struct opp_table *opp_table;
537 struct clk *clk;
539 rcu_read_lock();
541 opp_table = _find_opp_table(dev);
542 if (IS_ERR(opp_table)) {
543 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
544 clk = ERR_CAST(opp_table);
545 goto unlock;
548 clk = opp_table->clk;
549 if (IS_ERR(clk))
550 dev_err(dev, "%s: No clock available for the device\n",
551 __func__);
553 unlock:
554 rcu_read_unlock();
555 return clk;
558 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
559 unsigned long u_volt, unsigned long u_volt_min,
560 unsigned long u_volt_max)
562 int ret;
564 /* Regulator not available for device */
565 if (IS_ERR(reg)) {
566 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
567 PTR_ERR(reg));
568 return 0;
571 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
572 u_volt, u_volt_max);
574 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
575 u_volt_max);
576 if (ret)
577 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
578 __func__, u_volt_min, u_volt, u_volt_max, ret);
580 return ret;
584 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
585 * @dev: device for which we do this operation
586 * @target_freq: frequency to achieve
588 * This configures the power-supplies and clock source to the levels specified
589 * by the OPP corresponding to the target_freq.
591 * Locking: This function takes rcu_read_lock().
593 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
595 struct opp_table *opp_table;
596 struct dev_pm_opp *old_opp, *opp;
597 struct regulator *reg;
598 struct clk *clk;
599 unsigned long freq, old_freq;
600 unsigned long u_volt, u_volt_min, u_volt_max;
601 unsigned long ou_volt, ou_volt_min, ou_volt_max;
602 int ret;
604 if (unlikely(!target_freq)) {
605 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
606 target_freq);
607 return -EINVAL;
610 clk = _get_opp_clk(dev);
611 if (IS_ERR(clk))
612 return PTR_ERR(clk);
614 freq = clk_round_rate(clk, target_freq);
615 if ((long)freq <= 0)
616 freq = target_freq;
618 old_freq = clk_get_rate(clk);
620 /* Return early if nothing to do */
621 if (old_freq == freq) {
622 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
623 __func__, freq);
624 return 0;
627 rcu_read_lock();
629 opp_table = _find_opp_table(dev);
630 if (IS_ERR(opp_table)) {
631 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
632 rcu_read_unlock();
633 return PTR_ERR(opp_table);
636 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
637 if (!IS_ERR(old_opp)) {
638 ou_volt = old_opp->u_volt;
639 ou_volt_min = old_opp->u_volt_min;
640 ou_volt_max = old_opp->u_volt_max;
641 } else {
642 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
643 __func__, old_freq, PTR_ERR(old_opp));
646 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
647 if (IS_ERR(opp)) {
648 ret = PTR_ERR(opp);
649 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
650 __func__, freq, ret);
651 rcu_read_unlock();
652 return ret;
655 u_volt = opp->u_volt;
656 u_volt_min = opp->u_volt_min;
657 u_volt_max = opp->u_volt_max;
659 reg = opp_table->regulator;
661 rcu_read_unlock();
663 /* Scaling up? Scale voltage before frequency */
664 if (freq > old_freq) {
665 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
666 u_volt_max);
667 if (ret)
668 goto restore_voltage;
671 /* Change frequency */
673 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
674 __func__, old_freq, freq);
676 ret = clk_set_rate(clk, freq);
677 if (ret) {
678 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
679 ret);
680 goto restore_voltage;
683 /* Scaling down? Scale voltage after frequency */
684 if (freq < old_freq) {
685 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
686 u_volt_max);
687 if (ret)
688 goto restore_freq;
691 return 0;
693 restore_freq:
694 if (clk_set_rate(clk, old_freq))
695 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
696 __func__, old_freq);
697 restore_voltage:
698 /* This shouldn't harm even if the voltages weren't updated earlier */
699 if (!IS_ERR(old_opp))
700 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
702 return ret;
704 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
706 /* OPP-dev Helpers */
707 static void _kfree_opp_dev_rcu(struct rcu_head *head)
709 struct opp_device *opp_dev;
711 opp_dev = container_of(head, struct opp_device, rcu_head);
712 kfree_rcu(opp_dev, rcu_head);
715 static void _remove_opp_dev(struct opp_device *opp_dev,
716 struct opp_table *opp_table)
718 opp_debug_unregister(opp_dev, opp_table);
719 list_del(&opp_dev->node);
720 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
721 _kfree_opp_dev_rcu);
724 struct opp_device *_add_opp_dev(const struct device *dev,
725 struct opp_table *opp_table)
727 struct opp_device *opp_dev;
728 int ret;
730 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
731 if (!opp_dev)
732 return NULL;
734 /* Initialize opp-dev */
735 opp_dev->dev = dev;
736 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
738 /* Create debugfs entries for the opp_table */
739 ret = opp_debug_register(opp_dev, opp_table);
740 if (ret)
741 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
742 __func__, ret);
744 return opp_dev;
748 * _add_opp_table() - Find OPP table or allocate a new one
749 * @dev: device for which we do this operation
751 * It tries to find an existing table first, if it couldn't find one, it
752 * allocates a new OPP table and returns that.
754 * Return: valid opp_table pointer if success, else NULL.
756 static struct opp_table *_add_opp_table(struct device *dev)
758 struct opp_table *opp_table;
759 struct opp_device *opp_dev;
760 struct device_node *np;
761 int ret;
763 /* Check for existing table for 'dev' first */
764 opp_table = _find_opp_table(dev);
765 if (!IS_ERR(opp_table))
766 return opp_table;
769 * Allocate a new OPP table. In the infrequent case where a new
770 * device is needed to be added, we pay this penalty.
772 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
773 if (!opp_table)
774 return NULL;
776 INIT_LIST_HEAD(&opp_table->dev_list);
778 opp_dev = _add_opp_dev(dev, opp_table);
779 if (!opp_dev) {
780 kfree(opp_table);
781 return NULL;
785 * Only required for backward compatibility with v1 bindings, but isn't
786 * harmful for other cases. And so we do it unconditionally.
788 np = of_node_get(dev->of_node);
789 if (np) {
790 u32 val;
792 if (!of_property_read_u32(np, "clock-latency", &val))
793 opp_table->clock_latency_ns_max = val;
794 of_property_read_u32(np, "voltage-tolerance",
795 &opp_table->voltage_tolerance_v1);
796 of_node_put(np);
799 /* Set regulator to a non-NULL error value */
800 opp_table->regulator = ERR_PTR(-ENXIO);
802 /* Find clk for the device */
803 opp_table->clk = clk_get(dev, NULL);
804 if (IS_ERR(opp_table->clk)) {
805 ret = PTR_ERR(opp_table->clk);
806 if (ret != -EPROBE_DEFER)
807 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
808 ret);
811 srcu_init_notifier_head(&opp_table->srcu_head);
812 INIT_LIST_HEAD(&opp_table->opp_list);
814 /* Secure the device table modification */
815 list_add_rcu(&opp_table->node, &opp_tables);
816 return opp_table;
820 * _kfree_device_rcu() - Free opp_table RCU handler
821 * @head: RCU head
823 static void _kfree_device_rcu(struct rcu_head *head)
825 struct opp_table *opp_table = container_of(head, struct opp_table,
826 rcu_head);
828 kfree_rcu(opp_table, rcu_head);
832 * _remove_opp_table() - Removes a OPP table
833 * @opp_table: OPP table to be removed.
835 * Removes/frees OPP table if it doesn't contain any OPPs.
837 static void _remove_opp_table(struct opp_table *opp_table)
839 struct opp_device *opp_dev;
841 if (!list_empty(&opp_table->opp_list))
842 return;
844 if (opp_table->supported_hw)
845 return;
847 if (opp_table->prop_name)
848 return;
850 if (!IS_ERR(opp_table->regulator))
851 return;
853 /* Release clk */
854 if (!IS_ERR(opp_table->clk))
855 clk_put(opp_table->clk);
857 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
858 node);
860 _remove_opp_dev(opp_dev, opp_table);
862 /* dev_list must be empty now */
863 WARN_ON(!list_empty(&opp_table->dev_list));
865 list_del_rcu(&opp_table->node);
866 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
867 _kfree_device_rcu);
871 * _kfree_opp_rcu() - Free OPP RCU handler
872 * @head: RCU head
874 static void _kfree_opp_rcu(struct rcu_head *head)
876 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
878 kfree_rcu(opp, rcu_head);
882 * _opp_remove() - Remove an OPP from a table definition
883 * @opp_table: points back to the opp_table struct this opp belongs to
884 * @opp: pointer to the OPP to remove
885 * @notify: OPP_EVENT_REMOVE notification should be sent or not
887 * This function removes an opp definition from the opp table.
889 * Locking: The internal opp_table and opp structures are RCU protected.
890 * It is assumed that the caller holds required mutex for an RCU updater
891 * strategy.
893 static void _opp_remove(struct opp_table *opp_table,
894 struct dev_pm_opp *opp, bool notify)
897 * Notify the changes in the availability of the operable
898 * frequency/voltage list.
900 if (notify)
901 srcu_notifier_call_chain(&opp_table->srcu_head,
902 OPP_EVENT_REMOVE, opp);
903 opp_debug_remove_one(opp);
904 list_del_rcu(&opp->node);
905 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
907 _remove_opp_table(opp_table);
911 * dev_pm_opp_remove() - Remove an OPP from OPP table
912 * @dev: device for which we do this operation
913 * @freq: OPP to remove with matching 'freq'
915 * This function removes an opp from the opp table.
917 * Locking: The internal opp_table and opp structures are RCU protected.
918 * Hence this function internally uses RCU updater strategy with mutex locks
919 * to keep the integrity of the internal data structures. Callers should ensure
920 * that this function is *NOT* called under RCU protection or in contexts where
921 * mutex cannot be locked.
923 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
925 struct dev_pm_opp *opp;
926 struct opp_table *opp_table;
927 bool found = false;
929 /* Hold our table modification lock here */
930 mutex_lock(&opp_table_lock);
932 opp_table = _find_opp_table(dev);
933 if (IS_ERR(opp_table))
934 goto unlock;
936 list_for_each_entry(opp, &opp_table->opp_list, node) {
937 if (opp->rate == freq) {
938 found = true;
939 break;
943 if (!found) {
944 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
945 __func__, freq);
946 goto unlock;
949 _opp_remove(opp_table, opp, true);
950 unlock:
951 mutex_unlock(&opp_table_lock);
953 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
955 static struct dev_pm_opp *_allocate_opp(struct device *dev,
956 struct opp_table **opp_table)
958 struct dev_pm_opp *opp;
960 /* allocate new OPP node */
961 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
962 if (!opp)
963 return NULL;
965 INIT_LIST_HEAD(&opp->node);
967 *opp_table = _add_opp_table(dev);
968 if (!*opp_table) {
969 kfree(opp);
970 return NULL;
973 return opp;
976 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
977 struct opp_table *opp_table)
979 struct regulator *reg = opp_table->regulator;
981 if (!IS_ERR(reg) &&
982 !regulator_is_supported_voltage(reg, opp->u_volt_min,
983 opp->u_volt_max)) {
984 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
985 __func__, opp->u_volt_min, opp->u_volt_max);
986 return false;
989 return true;
992 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
993 struct opp_table *opp_table)
995 struct dev_pm_opp *opp;
996 struct list_head *head = &opp_table->opp_list;
997 int ret;
1000 * Insert new OPP in order of increasing frequency and discard if
1001 * already present.
1003 * Need to use &opp_table->opp_list in the condition part of the 'for'
1004 * loop, don't replace it with head otherwise it will become an infinite
1005 * loop.
1007 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1008 if (new_opp->rate > opp->rate) {
1009 head = &opp->node;
1010 continue;
1013 if (new_opp->rate < opp->rate)
1014 break;
1016 /* Duplicate OPPs */
1017 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1018 __func__, opp->rate, opp->u_volt, opp->available,
1019 new_opp->rate, new_opp->u_volt, new_opp->available);
1021 return opp->available && new_opp->u_volt == opp->u_volt ?
1022 0 : -EEXIST;
1025 new_opp->opp_table = opp_table;
1026 list_add_rcu(&new_opp->node, head);
1028 ret = opp_debug_create_one(new_opp, opp_table);
1029 if (ret)
1030 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1031 __func__, ret);
1033 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1034 new_opp->available = false;
1035 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1036 __func__, new_opp->rate);
1039 return 0;
1043 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1044 * @dev: device for which we do this operation
1045 * @freq: Frequency in Hz for this OPP
1046 * @u_volt: Voltage in uVolts for this OPP
1047 * @dynamic: Dynamically added OPPs.
1049 * This function adds an opp definition to the opp table and returns status.
1050 * The opp is made available by default and it can be controlled using
1051 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1053 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1054 * and freed by dev_pm_opp_of_remove_table.
1056 * Locking: The internal opp_table and opp structures are RCU protected.
1057 * Hence this function internally uses RCU updater strategy with mutex locks
1058 * to keep the integrity of the internal data structures. Callers should ensure
1059 * that this function is *NOT* called under RCU protection or in contexts where
1060 * mutex cannot be locked.
1062 * Return:
1063 * 0 On success OR
1064 * Duplicate OPPs (both freq and volt are same) and opp->available
1065 * -EEXIST Freq are same and volt are different OR
1066 * Duplicate OPPs (both freq and volt are same) and !opp->available
1067 * -ENOMEM Memory allocation failure
1069 static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1070 bool dynamic)
1072 struct opp_table *opp_table;
1073 struct dev_pm_opp *new_opp;
1074 unsigned long tol;
1075 int ret;
1077 /* Hold our table modification lock here */
1078 mutex_lock(&opp_table_lock);
1080 new_opp = _allocate_opp(dev, &opp_table);
1081 if (!new_opp) {
1082 ret = -ENOMEM;
1083 goto unlock;
1086 /* populate the opp table */
1087 new_opp->rate = freq;
1088 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1089 new_opp->u_volt = u_volt;
1090 new_opp->u_volt_min = u_volt - tol;
1091 new_opp->u_volt_max = u_volt + tol;
1092 new_opp->available = true;
1093 new_opp->dynamic = dynamic;
1095 ret = _opp_add(dev, new_opp, opp_table);
1096 if (ret)
1097 goto free_opp;
1099 mutex_unlock(&opp_table_lock);
1102 * Notify the changes in the availability of the operable
1103 * frequency/voltage list.
1105 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1106 return 0;
1108 free_opp:
1109 _opp_remove(opp_table, new_opp, false);
1110 unlock:
1111 mutex_unlock(&opp_table_lock);
1112 return ret;
1115 /* TODO: Support multiple regulators */
1116 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1117 struct opp_table *opp_table)
1119 u32 microvolt[3] = {0};
1120 u32 val;
1121 int count, ret;
1122 struct property *prop = NULL;
1123 char name[NAME_MAX];
1125 /* Search for "opp-microvolt-<name>" */
1126 if (opp_table->prop_name) {
1127 snprintf(name, sizeof(name), "opp-microvolt-%s",
1128 opp_table->prop_name);
1129 prop = of_find_property(opp->np, name, NULL);
1132 if (!prop) {
1133 /* Search for "opp-microvolt" */
1134 sprintf(name, "opp-microvolt");
1135 prop = of_find_property(opp->np, name, NULL);
1137 /* Missing property isn't a problem, but an invalid entry is */
1138 if (!prop)
1139 return 0;
1142 count = of_property_count_u32_elems(opp->np, name);
1143 if (count < 0) {
1144 dev_err(dev, "%s: Invalid %s property (%d)\n",
1145 __func__, name, count);
1146 return count;
1149 /* There can be one or three elements here */
1150 if (count != 1 && count != 3) {
1151 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1152 __func__, name, count);
1153 return -EINVAL;
1156 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1157 if (ret) {
1158 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1159 return -EINVAL;
1162 opp->u_volt = microvolt[0];
1164 if (count == 1) {
1165 opp->u_volt_min = opp->u_volt;
1166 opp->u_volt_max = opp->u_volt;
1167 } else {
1168 opp->u_volt_min = microvolt[1];
1169 opp->u_volt_max = microvolt[2];
1172 /* Search for "opp-microamp-<name>" */
1173 prop = NULL;
1174 if (opp_table->prop_name) {
1175 snprintf(name, sizeof(name), "opp-microamp-%s",
1176 opp_table->prop_name);
1177 prop = of_find_property(opp->np, name, NULL);
1180 if (!prop) {
1181 /* Search for "opp-microamp" */
1182 sprintf(name, "opp-microamp");
1183 prop = of_find_property(opp->np, name, NULL);
1186 if (prop && !of_property_read_u32(opp->np, name, &val))
1187 opp->u_amp = val;
1189 return 0;
1193 * dev_pm_opp_set_supported_hw() - Set supported platforms
1194 * @dev: Device for which supported-hw has to be set.
1195 * @versions: Array of hierarchy of versions to match.
1196 * @count: Number of elements in the array.
1198 * This is required only for the V2 bindings, and it enables a platform to
1199 * specify the hierarchy of versions it supports. OPP layer will then enable
1200 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1201 * property.
1203 * Locking: The internal opp_table and opp structures are RCU protected.
1204 * Hence this function internally uses RCU updater strategy with mutex locks
1205 * to keep the integrity of the internal data structures. Callers should ensure
1206 * that this function is *NOT* called under RCU protection or in contexts where
1207 * mutex cannot be locked.
1209 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1210 unsigned int count)
1212 struct opp_table *opp_table;
1213 int ret = 0;
1215 /* Hold our table modification lock here */
1216 mutex_lock(&opp_table_lock);
1218 opp_table = _add_opp_table(dev);
1219 if (!opp_table) {
1220 ret = -ENOMEM;
1221 goto unlock;
1224 /* Make sure there are no concurrent readers while updating opp_table */
1225 WARN_ON(!list_empty(&opp_table->opp_list));
1227 /* Do we already have a version hierarchy associated with opp_table? */
1228 if (opp_table->supported_hw) {
1229 dev_err(dev, "%s: Already have supported hardware list\n",
1230 __func__);
1231 ret = -EBUSY;
1232 goto err;
1235 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1236 GFP_KERNEL);
1237 if (!opp_table->supported_hw) {
1238 ret = -ENOMEM;
1239 goto err;
1242 opp_table->supported_hw_count = count;
1243 mutex_unlock(&opp_table_lock);
1244 return 0;
1246 err:
1247 _remove_opp_table(opp_table);
1248 unlock:
1249 mutex_unlock(&opp_table_lock);
1251 return ret;
1253 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1256 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1257 * @dev: Device for which supported-hw has to be put.
1259 * This is required only for the V2 bindings, and is called for a matching
1260 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1261 * will not be freed.
1263 * Locking: The internal opp_table and opp structures are RCU protected.
1264 * Hence this function internally uses RCU updater strategy with mutex locks
1265 * to keep the integrity of the internal data structures. Callers should ensure
1266 * that this function is *NOT* called under RCU protection or in contexts where
1267 * mutex cannot be locked.
1269 void dev_pm_opp_put_supported_hw(struct device *dev)
1271 struct opp_table *opp_table;
1273 /* Hold our table modification lock here */
1274 mutex_lock(&opp_table_lock);
1276 /* Check for existing table for 'dev' first */
1277 opp_table = _find_opp_table(dev);
1278 if (IS_ERR(opp_table)) {
1279 dev_err(dev, "Failed to find opp_table: %ld\n",
1280 PTR_ERR(opp_table));
1281 goto unlock;
1284 /* Make sure there are no concurrent readers while updating opp_table */
1285 WARN_ON(!list_empty(&opp_table->opp_list));
1287 if (!opp_table->supported_hw) {
1288 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1289 __func__);
1290 goto unlock;
1293 kfree(opp_table->supported_hw);
1294 opp_table->supported_hw = NULL;
1295 opp_table->supported_hw_count = 0;
1297 /* Try freeing opp_table if this was the last blocking resource */
1298 _remove_opp_table(opp_table);
1300 unlock:
1301 mutex_unlock(&opp_table_lock);
1303 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1306 * dev_pm_opp_set_prop_name() - Set prop-extn name
1307 * @dev: Device for which the prop-name has to be set.
1308 * @name: name to postfix to properties.
1310 * This is required only for the V2 bindings, and it enables a platform to
1311 * specify the extn to be used for certain property names. The properties to
1312 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1313 * should postfix the property name with -<name> while looking for them.
1315 * Locking: The internal opp_table and opp structures are RCU protected.
1316 * Hence this function internally uses RCU updater strategy with mutex locks
1317 * to keep the integrity of the internal data structures. Callers should ensure
1318 * that this function is *NOT* called under RCU protection or in contexts where
1319 * mutex cannot be locked.
1321 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1323 struct opp_table *opp_table;
1324 int ret = 0;
1326 /* Hold our table modification lock here */
1327 mutex_lock(&opp_table_lock);
1329 opp_table = _add_opp_table(dev);
1330 if (!opp_table) {
1331 ret = -ENOMEM;
1332 goto unlock;
1335 /* Make sure there are no concurrent readers while updating opp_table */
1336 WARN_ON(!list_empty(&opp_table->opp_list));
1338 /* Do we already have a prop-name associated with opp_table? */
1339 if (opp_table->prop_name) {
1340 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1341 opp_table->prop_name);
1342 ret = -EBUSY;
1343 goto err;
1346 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1347 if (!opp_table->prop_name) {
1348 ret = -ENOMEM;
1349 goto err;
1352 mutex_unlock(&opp_table_lock);
1353 return 0;
1355 err:
1356 _remove_opp_table(opp_table);
1357 unlock:
1358 mutex_unlock(&opp_table_lock);
1360 return ret;
1362 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1365 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1366 * @dev: Device for which the prop-name has to be put.
1368 * This is required only for the V2 bindings, and is called for a matching
1369 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1370 * will not be freed.
1372 * Locking: The internal opp_table and opp structures are RCU protected.
1373 * Hence this function internally uses RCU updater strategy with mutex locks
1374 * to keep the integrity of the internal data structures. Callers should ensure
1375 * that this function is *NOT* called under RCU protection or in contexts where
1376 * mutex cannot be locked.
1378 void dev_pm_opp_put_prop_name(struct device *dev)
1380 struct opp_table *opp_table;
1382 /* Hold our table modification lock here */
1383 mutex_lock(&opp_table_lock);
1385 /* Check for existing table for 'dev' first */
1386 opp_table = _find_opp_table(dev);
1387 if (IS_ERR(opp_table)) {
1388 dev_err(dev, "Failed to find opp_table: %ld\n",
1389 PTR_ERR(opp_table));
1390 goto unlock;
1393 /* Make sure there are no concurrent readers while updating opp_table */
1394 WARN_ON(!list_empty(&opp_table->opp_list));
1396 if (!opp_table->prop_name) {
1397 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1398 goto unlock;
1401 kfree(opp_table->prop_name);
1402 opp_table->prop_name = NULL;
1404 /* Try freeing opp_table if this was the last blocking resource */
1405 _remove_opp_table(opp_table);
1407 unlock:
1408 mutex_unlock(&opp_table_lock);
1410 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1413 * dev_pm_opp_set_regulator() - Set regulator name for the device
1414 * @dev: Device for which regulator name is being set.
1415 * @name: Name of the regulator.
1417 * In order to support OPP switching, OPP layer needs to know the name of the
1418 * device's regulator, as the core would be required to switch voltages as well.
1420 * This must be called before any OPPs are initialized for the device.
1422 * Locking: The internal opp_table and opp structures are RCU protected.
1423 * Hence this function internally uses RCU updater strategy with mutex locks
1424 * to keep the integrity of the internal data structures. Callers should ensure
1425 * that this function is *NOT* called under RCU protection or in contexts where
1426 * mutex cannot be locked.
1428 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1430 struct opp_table *opp_table;
1431 struct regulator *reg;
1432 int ret;
1434 mutex_lock(&opp_table_lock);
1436 opp_table = _add_opp_table(dev);
1437 if (!opp_table) {
1438 ret = -ENOMEM;
1439 goto unlock;
1442 /* This should be called before OPPs are initialized */
1443 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1444 ret = -EBUSY;
1445 goto err;
1448 /* Already have a regulator set */
1449 if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1450 ret = -EBUSY;
1451 goto err;
1453 /* Allocate the regulator */
1454 reg = regulator_get_optional(dev, name);
1455 if (IS_ERR(reg)) {
1456 ret = PTR_ERR(reg);
1457 if (ret != -EPROBE_DEFER)
1458 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1459 __func__, name, ret);
1460 goto err;
1463 opp_table->regulator = reg;
1465 mutex_unlock(&opp_table_lock);
1466 return 0;
1468 err:
1469 _remove_opp_table(opp_table);
1470 unlock:
1471 mutex_unlock(&opp_table_lock);
1473 return ret;
1475 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1478 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1479 * @dev: Device for which regulator was set.
1481 * Locking: The internal opp_table and opp structures are RCU protected.
1482 * Hence this function internally uses RCU updater strategy with mutex locks
1483 * to keep the integrity of the internal data structures. Callers should ensure
1484 * that this function is *NOT* called under RCU protection or in contexts where
1485 * mutex cannot be locked.
1487 void dev_pm_opp_put_regulator(struct device *dev)
1489 struct opp_table *opp_table;
1491 mutex_lock(&opp_table_lock);
1493 /* Check for existing table for 'dev' first */
1494 opp_table = _find_opp_table(dev);
1495 if (IS_ERR(opp_table)) {
1496 dev_err(dev, "Failed to find opp_table: %ld\n",
1497 PTR_ERR(opp_table));
1498 goto unlock;
1501 if (IS_ERR(opp_table->regulator)) {
1502 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1503 goto unlock;
1506 /* Make sure there are no concurrent readers while updating opp_table */
1507 WARN_ON(!list_empty(&opp_table->opp_list));
1509 regulator_put(opp_table->regulator);
1510 opp_table->regulator = ERR_PTR(-ENXIO);
1512 /* Try freeing opp_table if this was the last blocking resource */
1513 _remove_opp_table(opp_table);
1515 unlock:
1516 mutex_unlock(&opp_table_lock);
1518 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1520 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
1521 struct device_node *np)
1523 unsigned int count = opp_table->supported_hw_count;
1524 u32 version;
1525 int ret;
1527 if (!opp_table->supported_hw)
1528 return true;
1530 while (count--) {
1531 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1532 &version);
1533 if (ret) {
1534 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1535 __func__, count, ret);
1536 return false;
1539 /* Both of these are bitwise masks of the versions */
1540 if (!(version & opp_table->supported_hw[count]))
1541 return false;
1544 return true;
1548 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1549 * @dev: device for which we do this operation
1550 * @np: device node
1552 * This function adds an opp definition to the opp table and returns status. The
1553 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1554 * removed by dev_pm_opp_remove.
1556 * Locking: The internal opp_table and opp structures are RCU protected.
1557 * Hence this function internally uses RCU updater strategy with mutex locks
1558 * to keep the integrity of the internal data structures. Callers should ensure
1559 * that this function is *NOT* called under RCU protection or in contexts where
1560 * mutex cannot be locked.
1562 * Return:
1563 * 0 On success OR
1564 * Duplicate OPPs (both freq and volt are same) and opp->available
1565 * -EEXIST Freq are same and volt are different OR
1566 * Duplicate OPPs (both freq and volt are same) and !opp->available
1567 * -ENOMEM Memory allocation failure
1568 * -EINVAL Failed parsing the OPP node
1570 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1572 struct opp_table *opp_table;
1573 struct dev_pm_opp *new_opp;
1574 u64 rate;
1575 u32 val;
1576 int ret;
1578 /* Hold our table modification lock here */
1579 mutex_lock(&opp_table_lock);
1581 new_opp = _allocate_opp(dev, &opp_table);
1582 if (!new_opp) {
1583 ret = -ENOMEM;
1584 goto unlock;
1587 ret = of_property_read_u64(np, "opp-hz", &rate);
1588 if (ret < 0) {
1589 dev_err(dev, "%s: opp-hz not found\n", __func__);
1590 goto free_opp;
1593 /* Check if the OPP supports hardware's hierarchy of versions or not */
1594 if (!_opp_is_supported(dev, opp_table, np)) {
1595 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1596 goto free_opp;
1600 * Rate is defined as an unsigned long in clk API, and so casting
1601 * explicitly to its type. Must be fixed once rate is 64 bit
1602 * guaranteed in clk API.
1604 new_opp->rate = (unsigned long)rate;
1605 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1607 new_opp->np = np;
1608 new_opp->dynamic = false;
1609 new_opp->available = true;
1611 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1612 new_opp->clock_latency_ns = val;
1614 ret = opp_parse_supplies(new_opp, dev, opp_table);
1615 if (ret)
1616 goto free_opp;
1618 ret = _opp_add(dev, new_opp, opp_table);
1619 if (ret)
1620 goto free_opp;
1622 /* OPP to select on device suspend */
1623 if (of_property_read_bool(np, "opp-suspend")) {
1624 if (opp_table->suspend_opp) {
1625 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1626 __func__, opp_table->suspend_opp->rate,
1627 new_opp->rate);
1628 } else {
1629 new_opp->suspend = true;
1630 opp_table->suspend_opp = new_opp;
1634 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
1635 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
1637 mutex_unlock(&opp_table_lock);
1639 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1640 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1641 new_opp->u_volt_min, new_opp->u_volt_max,
1642 new_opp->clock_latency_ns);
1645 * Notify the changes in the availability of the operable
1646 * frequency/voltage list.
1648 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1649 return 0;
1651 free_opp:
1652 _opp_remove(opp_table, new_opp, false);
1653 unlock:
1654 mutex_unlock(&opp_table_lock);
1655 return ret;
1659 * dev_pm_opp_add() - Add an OPP table from a table definitions
1660 * @dev: device for which we do this operation
1661 * @freq: Frequency in Hz for this OPP
1662 * @u_volt: Voltage in uVolts for this OPP
1664 * This function adds an opp definition to the opp table and returns status.
1665 * The opp is made available by default and it can be controlled using
1666 * dev_pm_opp_enable/disable functions.
1668 * Locking: The internal opp_table and opp structures are RCU protected.
1669 * Hence this function internally uses RCU updater strategy with mutex locks
1670 * to keep the integrity of the internal data structures. Callers should ensure
1671 * that this function is *NOT* called under RCU protection or in contexts where
1672 * mutex cannot be locked.
1674 * Return:
1675 * 0 On success OR
1676 * Duplicate OPPs (both freq and volt are same) and opp->available
1677 * -EEXIST Freq are same and volt are different OR
1678 * Duplicate OPPs (both freq and volt are same) and !opp->available
1679 * -ENOMEM Memory allocation failure
1681 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1683 return _opp_add_v1(dev, freq, u_volt, true);
1685 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1688 * _opp_set_availability() - helper to set the availability of an opp
1689 * @dev: device for which we do this operation
1690 * @freq: OPP frequency to modify availability
1691 * @availability_req: availability status requested for this opp
1693 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1694 * share a common logic which is isolated here.
1696 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1697 * copy operation, returns 0 if no modification was done OR modification was
1698 * successful.
1700 * Locking: The internal opp_table and opp structures are RCU protected.
1701 * Hence this function internally uses RCU updater strategy with mutex locks to
1702 * keep the integrity of the internal data structures. Callers should ensure
1703 * that this function is *NOT* called under RCU protection or in contexts where
1704 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1706 static int _opp_set_availability(struct device *dev, unsigned long freq,
1707 bool availability_req)
1709 struct opp_table *opp_table;
1710 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1711 int r = 0;
1713 /* keep the node allocated */
1714 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1715 if (!new_opp)
1716 return -ENOMEM;
1718 mutex_lock(&opp_table_lock);
1720 /* Find the opp_table */
1721 opp_table = _find_opp_table(dev);
1722 if (IS_ERR(opp_table)) {
1723 r = PTR_ERR(opp_table);
1724 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1725 goto unlock;
1728 /* Do we have the frequency? */
1729 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1730 if (tmp_opp->rate == freq) {
1731 opp = tmp_opp;
1732 break;
1735 if (IS_ERR(opp)) {
1736 r = PTR_ERR(opp);
1737 goto unlock;
1740 /* Is update really needed? */
1741 if (opp->available == availability_req)
1742 goto unlock;
1743 /* copy the old data over */
1744 *new_opp = *opp;
1746 /* plug in new node */
1747 new_opp->available = availability_req;
1749 list_replace_rcu(&opp->node, &new_opp->node);
1750 mutex_unlock(&opp_table_lock);
1751 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1753 /* Notify the change of the OPP availability */
1754 if (availability_req)
1755 srcu_notifier_call_chain(&opp_table->srcu_head,
1756 OPP_EVENT_ENABLE, new_opp);
1757 else
1758 srcu_notifier_call_chain(&opp_table->srcu_head,
1759 OPP_EVENT_DISABLE, new_opp);
1761 return 0;
1763 unlock:
1764 mutex_unlock(&opp_table_lock);
1765 kfree(new_opp);
1766 return r;
1770 * dev_pm_opp_enable() - Enable a specific OPP
1771 * @dev: device for which we do this operation
1772 * @freq: OPP frequency to enable
1774 * Enables a provided opp. If the operation is valid, this returns 0, else the
1775 * corresponding error value. It is meant to be used for users an OPP available
1776 * after being temporarily made unavailable with dev_pm_opp_disable.
1778 * Locking: The internal opp_table and opp structures are RCU protected.
1779 * Hence this function indirectly uses RCU and mutex locks to keep the
1780 * integrity of the internal data structures. Callers should ensure that
1781 * this function is *NOT* called under RCU protection or in contexts where
1782 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1784 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1785 * copy operation, returns 0 if no modification was done OR modification was
1786 * successful.
1788 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1790 return _opp_set_availability(dev, freq, true);
1792 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1795 * dev_pm_opp_disable() - Disable a specific OPP
1796 * @dev: device for which we do this operation
1797 * @freq: OPP frequency to disable
1799 * Disables a provided opp. If the operation is valid, this returns
1800 * 0, else the corresponding error value. It is meant to be a temporary
1801 * control by users to make this OPP not available until the circumstances are
1802 * right to make it available again (with a call to dev_pm_opp_enable).
1804 * Locking: The internal opp_table and opp structures are RCU protected.
1805 * Hence this function indirectly uses RCU and mutex locks to keep the
1806 * integrity of the internal data structures. Callers should ensure that
1807 * this function is *NOT* called under RCU protection or in contexts where
1808 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1810 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1811 * copy operation, returns 0 if no modification was done OR modification was
1812 * successful.
1814 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1816 return _opp_set_availability(dev, freq, false);
1818 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1821 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1822 * @dev: device pointer used to lookup OPP table.
1824 * Return: pointer to notifier head if found, otherwise -ENODEV or
1825 * -EINVAL based on type of error casted as pointer. value must be checked
1826 * with IS_ERR to determine valid pointer or error result.
1828 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1829 * RCU protected pointer. The reason for the same is that the opp pointer which
1830 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1831 * under the locked area. The pointer returned must be used prior to unlocking
1832 * with rcu_read_unlock() to maintain the integrity of the pointer.
1834 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1836 struct opp_table *opp_table = _find_opp_table(dev);
1838 if (IS_ERR(opp_table))
1839 return ERR_CAST(opp_table); /* matching type */
1841 return &opp_table->srcu_head;
1843 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1845 #ifdef CONFIG_OF
1847 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1848 * entries
1849 * @dev: device pointer used to lookup OPP table.
1851 * Free OPPs created using static entries present in DT.
1853 * Locking: The internal opp_table and opp structures are RCU protected.
1854 * Hence this function indirectly uses RCU updater strategy with mutex locks
1855 * to keep the integrity of the internal data structures. Callers should ensure
1856 * that this function is *NOT* called under RCU protection or in contexts where
1857 * mutex cannot be locked.
1859 void dev_pm_opp_of_remove_table(struct device *dev)
1861 struct opp_table *opp_table;
1862 struct dev_pm_opp *opp, *tmp;
1864 /* Hold our table modification lock here */
1865 mutex_lock(&opp_table_lock);
1867 /* Check for existing table for 'dev' */
1868 opp_table = _find_opp_table(dev);
1869 if (IS_ERR(opp_table)) {
1870 int error = PTR_ERR(opp_table);
1872 if (error != -ENODEV)
1873 WARN(1, "%s: opp_table: %d\n",
1874 IS_ERR_OR_NULL(dev) ?
1875 "Invalid device" : dev_name(dev),
1876 error);
1877 goto unlock;
1880 /* Find if opp_table manages a single device */
1881 if (list_is_singular(&opp_table->dev_list)) {
1882 /* Free static OPPs */
1883 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1884 if (!opp->dynamic)
1885 _opp_remove(opp_table, opp, true);
1887 } else {
1888 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1891 unlock:
1892 mutex_unlock(&opp_table_lock);
1894 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1896 /* Returns opp descriptor node for a device, caller must do of_node_put() */
1897 struct device_node *_of_get_opp_desc_node(struct device *dev)
1900 * TODO: Support for multiple OPP tables.
1902 * There should be only ONE phandle present in "operating-points-v2"
1903 * property.
1906 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1909 /* Initializes OPP tables based on new bindings */
1910 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1912 struct device_node *np;
1913 struct opp_table *opp_table;
1914 int ret = 0, count = 0;
1916 mutex_lock(&opp_table_lock);
1918 opp_table = _managed_opp(opp_np);
1919 if (opp_table) {
1920 /* OPPs are already managed */
1921 if (!_add_opp_dev(dev, opp_table))
1922 ret = -ENOMEM;
1923 mutex_unlock(&opp_table_lock);
1924 return ret;
1926 mutex_unlock(&opp_table_lock);
1928 /* We have opp-table node now, iterate over it and add OPPs */
1929 for_each_available_child_of_node(opp_np, np) {
1930 count++;
1932 ret = _opp_add_static_v2(dev, np);
1933 if (ret) {
1934 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1935 ret);
1936 goto free_table;
1940 /* There should be one of more OPP defined */
1941 if (WARN_ON(!count))
1942 return -ENOENT;
1944 mutex_lock(&opp_table_lock);
1946 opp_table = _find_opp_table(dev);
1947 if (WARN_ON(IS_ERR(opp_table))) {
1948 ret = PTR_ERR(opp_table);
1949 mutex_unlock(&opp_table_lock);
1950 goto free_table;
1953 opp_table->np = opp_np;
1954 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1956 mutex_unlock(&opp_table_lock);
1958 return 0;
1960 free_table:
1961 dev_pm_opp_of_remove_table(dev);
1963 return ret;
1966 /* Initializes OPP tables based on old-deprecated bindings */
1967 static int _of_add_opp_table_v1(struct device *dev)
1969 const struct property *prop;
1970 const __be32 *val;
1971 int nr;
1973 prop = of_find_property(dev->of_node, "operating-points", NULL);
1974 if (!prop)
1975 return -ENODEV;
1976 if (!prop->value)
1977 return -ENODATA;
1980 * Each OPP is a set of tuples consisting of frequency and
1981 * voltage like <freq-kHz vol-uV>.
1983 nr = prop->length / sizeof(u32);
1984 if (nr % 2) {
1985 dev_err(dev, "%s: Invalid OPP table\n", __func__);
1986 return -EINVAL;
1989 val = prop->value;
1990 while (nr) {
1991 unsigned long freq = be32_to_cpup(val++) * 1000;
1992 unsigned long volt = be32_to_cpup(val++);
1994 if (_opp_add_v1(dev, freq, volt, false))
1995 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1996 __func__, freq);
1997 nr -= 2;
2000 return 0;
2004 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
2005 * @dev: device pointer used to lookup OPP table.
2007 * Register the initial OPP table with the OPP library for given device.
2009 * Locking: The internal opp_table and opp structures are RCU protected.
2010 * Hence this function indirectly uses RCU updater strategy with mutex locks
2011 * to keep the integrity of the internal data structures. Callers should ensure
2012 * that this function is *NOT* called under RCU protection or in contexts where
2013 * mutex cannot be locked.
2015 * Return:
2016 * 0 On success OR
2017 * Duplicate OPPs (both freq and volt are same) and opp->available
2018 * -EEXIST Freq are same and volt are different OR
2019 * Duplicate OPPs (both freq and volt are same) and !opp->available
2020 * -ENOMEM Memory allocation failure
2021 * -ENODEV when 'operating-points' property is not found or is invalid data
2022 * in device node.
2023 * -ENODATA when empty 'operating-points' property is found
2024 * -EINVAL when invalid entries are found in opp-v2 table
2026 int dev_pm_opp_of_add_table(struct device *dev)
2028 struct device_node *opp_np;
2029 int ret;
2032 * OPPs have two version of bindings now. The older one is deprecated,
2033 * try for the new binding first.
2035 opp_np = _of_get_opp_desc_node(dev);
2036 if (!opp_np) {
2038 * Try old-deprecated bindings for backward compatibility with
2039 * older dtbs.
2041 return _of_add_opp_table_v1(dev);
2044 ret = _of_add_opp_table_v2(dev, opp_np);
2045 of_node_put(opp_np);
2047 return ret;
2049 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
2050 #endif