sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / base / power / opp / core.c
blob35ff06283738036e8388ea19a502b9434874ab99
1 /*
2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/regulator/consumer.h>
24 #include "opp.h"
27 * The root of the list of all opp-tables. All opp_table structures branch off
28 * from here, with each opp_table containing the list of opps it supports in
29 * various states of availability.
31 LIST_HEAD(opp_tables);
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
35 #define opp_rcu_lockdep_assert() \
36 do { \
37 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
38 !lockdep_is_held(&opp_table_lock), \
39 "Missing rcu_read_lock() or " \
40 "opp_table_lock protection"); \
41 } while (0)
43 static struct opp_device *_find_opp_dev(const struct device *dev,
44 struct opp_table *opp_table)
46 struct opp_device *opp_dev;
48 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
49 if (opp_dev->dev == dev)
50 return opp_dev;
52 return NULL;
55 /**
56 * _find_opp_table() - find opp_table struct using device pointer
57 * @dev: device pointer used to lookup OPP table
59 * Search OPP table for one containing matching device. Does a RCU reader
60 * operation to grab the pointer needed.
62 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
63 * -EINVAL based on type of error.
65 * Locking: For readers, this function must be called under rcu_read_lock().
66 * opp_table is a RCU protected pointer, which means that opp_table is valid
67 * as long as we are under RCU lock.
69 * For Writers, this function must be called with opp_table_lock held.
71 struct opp_table *_find_opp_table(struct device *dev)
73 struct opp_table *opp_table;
75 opp_rcu_lockdep_assert();
77 if (IS_ERR_OR_NULL(dev)) {
78 pr_err("%s: Invalid parameters\n", __func__);
79 return ERR_PTR(-EINVAL);
82 list_for_each_entry_rcu(opp_table, &opp_tables, node)
83 if (_find_opp_dev(dev, opp_table))
84 return opp_table;
86 return ERR_PTR(-ENODEV);
89 /**
90 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
91 * @opp: opp for which voltage has to be returned for
93 * Return: voltage in micro volt corresponding to the opp, else
94 * return 0
96 * This is useful only for devices with single power supply.
98 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
99 * protected pointer. This means that opp which could have been fetched by
100 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
101 * under RCU lock. The pointer returned by the opp_find_freq family must be
102 * used in the same section as the usage of this function with the pointer
103 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
104 * pointer.
106 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
108 struct dev_pm_opp *tmp_opp;
109 unsigned long v = 0;
111 opp_rcu_lockdep_assert();
113 tmp_opp = rcu_dereference(opp);
114 if (IS_ERR_OR_NULL(tmp_opp))
115 pr_err("%s: Invalid parameters\n", __func__);
116 else
117 v = tmp_opp->supplies[0].u_volt;
119 return v;
121 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
124 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
125 * @opp: opp for which frequency has to be returned for
127 * Return: frequency in hertz corresponding to the opp, else
128 * return 0
130 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
131 * protected pointer. This means that opp which could have been fetched by
132 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
133 * under RCU lock. The pointer returned by the opp_find_freq family must be
134 * used in the same section as the usage of this function with the pointer
135 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
136 * pointer.
138 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
140 struct dev_pm_opp *tmp_opp;
141 unsigned long f = 0;
143 opp_rcu_lockdep_assert();
145 tmp_opp = rcu_dereference(opp);
146 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
147 pr_err("%s: Invalid parameters\n", __func__);
148 else
149 f = tmp_opp->rate;
151 return f;
153 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
156 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
157 * @opp: opp for which turbo mode is being verified
159 * Turbo OPPs are not for normal use, and can be enabled (under certain
160 * conditions) for short duration of times to finish high throughput work
161 * quickly. Running on them for longer times may overheat the chip.
163 * Return: true if opp is turbo opp, else false.
165 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
166 * protected pointer. This means that opp which could have been fetched by
167 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
168 * under RCU lock. The pointer returned by the opp_find_freq family must be
169 * used in the same section as the usage of this function with the pointer
170 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
171 * pointer.
173 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
175 struct dev_pm_opp *tmp_opp;
177 opp_rcu_lockdep_assert();
179 tmp_opp = rcu_dereference(opp);
180 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
181 pr_err("%s: Invalid parameters\n", __func__);
182 return false;
185 return tmp_opp->turbo;
187 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
190 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
191 * @dev: device for which we do this operation
193 * Return: This function returns the max clock latency in nanoseconds.
195 * Locking: This function takes rcu_read_lock().
197 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
199 struct opp_table *opp_table;
200 unsigned long clock_latency_ns;
202 rcu_read_lock();
204 opp_table = _find_opp_table(dev);
205 if (IS_ERR(opp_table))
206 clock_latency_ns = 0;
207 else
208 clock_latency_ns = opp_table->clock_latency_ns_max;
210 rcu_read_unlock();
211 return clock_latency_ns;
213 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
215 static int _get_regulator_count(struct device *dev)
217 struct opp_table *opp_table;
218 int count;
220 rcu_read_lock();
222 opp_table = _find_opp_table(dev);
223 if (!IS_ERR(opp_table))
224 count = opp_table->regulator_count;
225 else
226 count = 0;
228 rcu_read_unlock();
230 return count;
234 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
235 * @dev: device for which we do this operation
237 * Return: This function returns the max voltage latency in nanoseconds.
239 * Locking: This function takes rcu_read_lock().
241 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
243 struct opp_table *opp_table;
244 struct dev_pm_opp *opp;
245 struct regulator *reg, **regulators;
246 unsigned long latency_ns = 0;
247 int ret, i, count;
248 struct {
249 unsigned long min;
250 unsigned long max;
251 } *uV;
253 count = _get_regulator_count(dev);
255 /* Regulator may not be required for the device */
256 if (!count)
257 return 0;
259 regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
260 if (!regulators)
261 return 0;
263 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
264 if (!uV)
265 goto free_regulators;
267 rcu_read_lock();
269 opp_table = _find_opp_table(dev);
270 if (IS_ERR(opp_table)) {
271 rcu_read_unlock();
272 goto free_uV;
275 memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
277 for (i = 0; i < count; i++) {
278 uV[i].min = ~0;
279 uV[i].max = 0;
281 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
282 if (!opp->available)
283 continue;
285 if (opp->supplies[i].u_volt_min < uV[i].min)
286 uV[i].min = opp->supplies[i].u_volt_min;
287 if (opp->supplies[i].u_volt_max > uV[i].max)
288 uV[i].max = opp->supplies[i].u_volt_max;
292 rcu_read_unlock();
295 * The caller needs to ensure that opp_table (and hence the regulator)
296 * isn't freed, while we are executing this routine.
298 for (i = 0; reg = regulators[i], i < count; i++) {
299 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
300 if (ret > 0)
301 latency_ns += ret * 1000;
304 free_uV:
305 kfree(uV);
306 free_regulators:
307 kfree(regulators);
309 return latency_ns;
311 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
314 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
315 * nanoseconds
316 * @dev: device for which we do this operation
318 * Return: This function returns the max transition latency, in nanoseconds, to
319 * switch from one OPP to other.
321 * Locking: This function takes rcu_read_lock().
323 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
325 return dev_pm_opp_get_max_volt_latency(dev) +
326 dev_pm_opp_get_max_clock_latency(dev);
328 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
331 * dev_pm_opp_get_suspend_opp() - Get suspend opp
332 * @dev: device for which we do this operation
334 * Return: This function returns pointer to the suspend opp if it is
335 * defined and available, otherwise it returns NULL.
337 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
338 * protected pointer. The reason for the same is that the opp pointer which is
339 * returned will remain valid for use with opp_get_{voltage, freq} only while
340 * under the locked area. The pointer returned must be used prior to unlocking
341 * with rcu_read_unlock() to maintain the integrity of the pointer.
343 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
345 struct opp_table *opp_table;
347 opp_rcu_lockdep_assert();
349 opp_table = _find_opp_table(dev);
350 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
351 !opp_table->suspend_opp->available)
352 return NULL;
354 return opp_table->suspend_opp;
356 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
359 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
360 * @dev: device for which we do this operation
362 * Return: This function returns the number of available opps if there are any,
363 * else returns 0 if none or the corresponding error value.
365 * Locking: This function takes rcu_read_lock().
367 int dev_pm_opp_get_opp_count(struct device *dev)
369 struct opp_table *opp_table;
370 struct dev_pm_opp *temp_opp;
371 int count = 0;
373 rcu_read_lock();
375 opp_table = _find_opp_table(dev);
376 if (IS_ERR(opp_table)) {
377 count = PTR_ERR(opp_table);
378 dev_err(dev, "%s: OPP table not found (%d)\n",
379 __func__, count);
380 goto out_unlock;
383 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
384 if (temp_opp->available)
385 count++;
388 out_unlock:
389 rcu_read_unlock();
390 return count;
392 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
395 * dev_pm_opp_find_freq_exact() - search for an exact frequency
396 * @dev: device for which we do this operation
397 * @freq: frequency to search for
398 * @available: true/false - match for available opp
400 * Return: Searches for exact match in the opp table and returns pointer to the
401 * matching opp if found, else returns ERR_PTR in case of error and should
402 * be handled using IS_ERR. Error return values can be:
403 * EINVAL: for bad pointer
404 * ERANGE: no match found for search
405 * ENODEV: if device not found in list of registered devices
407 * Note: available is a modifier for the search. if available=true, then the
408 * match is for exact matching frequency and is available in the stored OPP
409 * table. if false, the match is for exact frequency which is not available.
411 * This provides a mechanism to enable an opp which is not available currently
412 * or the opposite as well.
414 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
415 * protected pointer. The reason for the same is that the opp pointer which is
416 * returned will remain valid for use with opp_get_{voltage, freq} only while
417 * under the locked area. The pointer returned must be used prior to unlocking
418 * with rcu_read_unlock() to maintain the integrity of the pointer.
420 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
421 unsigned long freq,
422 bool available)
424 struct opp_table *opp_table;
425 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
427 opp_rcu_lockdep_assert();
429 opp_table = _find_opp_table(dev);
430 if (IS_ERR(opp_table)) {
431 int r = PTR_ERR(opp_table);
433 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
434 return ERR_PTR(r);
437 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
438 if (temp_opp->available == available &&
439 temp_opp->rate == freq) {
440 opp = temp_opp;
441 break;
445 return opp;
447 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
449 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
450 unsigned long *freq)
452 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
454 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
455 if (temp_opp->available && temp_opp->rate >= *freq) {
456 opp = temp_opp;
457 *freq = opp->rate;
458 break;
462 return opp;
466 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
467 * @dev: device for which we do this operation
468 * @freq: Start frequency
470 * Search for the matching ceil *available* OPP from a starting freq
471 * for a device.
473 * Return: matching *opp and refreshes *freq accordingly, else returns
474 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
475 * values can be:
476 * EINVAL: for bad pointer
477 * ERANGE: no match found for search
478 * ENODEV: if device not found in list of registered devices
480 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
481 * protected pointer. The reason for the same is that the opp pointer which is
482 * returned will remain valid for use with opp_get_{voltage, freq} only while
483 * under the locked area. The pointer returned must be used prior to unlocking
484 * with rcu_read_unlock() to maintain the integrity of the pointer.
486 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
487 unsigned long *freq)
489 struct opp_table *opp_table;
491 opp_rcu_lockdep_assert();
493 if (!dev || !freq) {
494 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
495 return ERR_PTR(-EINVAL);
498 opp_table = _find_opp_table(dev);
499 if (IS_ERR(opp_table))
500 return ERR_CAST(opp_table);
502 return _find_freq_ceil(opp_table, freq);
504 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
507 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
508 * @dev: device for which we do this operation
509 * @freq: Start frequency
511 * Search for the matching floor *available* OPP from a starting freq
512 * for a device.
514 * Return: matching *opp and refreshes *freq accordingly, else returns
515 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
516 * values can be:
517 * EINVAL: for bad pointer
518 * ERANGE: no match found for search
519 * ENODEV: if device not found in list of registered devices
521 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
522 * protected pointer. The reason for the same is that the opp pointer which is
523 * returned will remain valid for use with opp_get_{voltage, freq} only while
524 * under the locked area. The pointer returned must be used prior to unlocking
525 * with rcu_read_unlock() to maintain the integrity of the pointer.
527 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
528 unsigned long *freq)
530 struct opp_table *opp_table;
531 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
533 opp_rcu_lockdep_assert();
535 if (!dev || !freq) {
536 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
537 return ERR_PTR(-EINVAL);
540 opp_table = _find_opp_table(dev);
541 if (IS_ERR(opp_table))
542 return ERR_CAST(opp_table);
544 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
545 if (temp_opp->available) {
546 /* go to the next node, before choosing prev */
547 if (temp_opp->rate > *freq)
548 break;
549 else
550 opp = temp_opp;
553 if (!IS_ERR(opp))
554 *freq = opp->rate;
556 return opp;
558 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
561 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
562 * while clk returned here is used.
564 static struct clk *_get_opp_clk(struct device *dev)
566 struct opp_table *opp_table;
567 struct clk *clk;
569 rcu_read_lock();
571 opp_table = _find_opp_table(dev);
572 if (IS_ERR(opp_table)) {
573 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
574 clk = ERR_CAST(opp_table);
575 goto unlock;
578 clk = opp_table->clk;
579 if (IS_ERR(clk))
580 dev_err(dev, "%s: No clock available for the device\n",
581 __func__);
583 unlock:
584 rcu_read_unlock();
585 return clk;
588 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
589 struct dev_pm_opp_supply *supply)
591 int ret;
593 /* Regulator not available for device */
594 if (IS_ERR(reg)) {
595 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
596 PTR_ERR(reg));
597 return 0;
600 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
601 supply->u_volt_min, supply->u_volt, supply->u_volt_max);
603 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
604 supply->u_volt, supply->u_volt_max);
605 if (ret)
606 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
607 __func__, supply->u_volt_min, supply->u_volt,
608 supply->u_volt_max, ret);
610 return ret;
613 static inline int
614 _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
615 unsigned long old_freq, unsigned long freq)
617 int ret;
619 ret = clk_set_rate(clk, freq);
620 if (ret) {
621 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
622 ret);
625 return ret;
628 static int _generic_set_opp(struct dev_pm_set_opp_data *data)
630 struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
631 struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
632 unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
633 struct regulator *reg = data->regulators[0];
634 struct device *dev= data->dev;
635 int ret;
637 /* This function only supports single regulator per device */
638 if (WARN_ON(data->regulator_count > 1)) {
639 dev_err(dev, "multiple regulators are not supported\n");
640 return -EINVAL;
643 /* Scaling up? Scale voltage before frequency */
644 if (freq > old_freq) {
645 ret = _set_opp_voltage(dev, reg, new_supply);
646 if (ret)
647 goto restore_voltage;
650 /* Change frequency */
651 ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
652 if (ret)
653 goto restore_voltage;
655 /* Scaling down? Scale voltage after frequency */
656 if (freq < old_freq) {
657 ret = _set_opp_voltage(dev, reg, new_supply);
658 if (ret)
659 goto restore_freq;
662 return 0;
664 restore_freq:
665 if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
666 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
667 __func__, old_freq);
668 restore_voltage:
669 /* This shouldn't harm even if the voltages weren't updated earlier */
670 if (old_supply->u_volt)
671 _set_opp_voltage(dev, reg, old_supply);
673 return ret;
677 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
678 * @dev: device for which we do this operation
679 * @target_freq: frequency to achieve
681 * This configures the power-supplies and clock source to the levels specified
682 * by the OPP corresponding to the target_freq.
684 * Locking: This function takes rcu_read_lock().
686 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
688 struct opp_table *opp_table;
689 unsigned long freq, old_freq;
690 int (*set_opp)(struct dev_pm_set_opp_data *data);
691 struct dev_pm_opp *old_opp, *opp;
692 struct regulator **regulators;
693 struct dev_pm_set_opp_data *data;
694 struct clk *clk;
695 int ret, size;
697 if (unlikely(!target_freq)) {
698 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
699 target_freq);
700 return -EINVAL;
703 clk = _get_opp_clk(dev);
704 if (IS_ERR(clk))
705 return PTR_ERR(clk);
707 freq = clk_round_rate(clk, target_freq);
708 if ((long)freq <= 0)
709 freq = target_freq;
711 old_freq = clk_get_rate(clk);
713 /* Return early if nothing to do */
714 if (old_freq == freq) {
715 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
716 __func__, freq);
717 return 0;
720 rcu_read_lock();
722 opp_table = _find_opp_table(dev);
723 if (IS_ERR(opp_table)) {
724 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
725 rcu_read_unlock();
726 return PTR_ERR(opp_table);
729 old_opp = _find_freq_ceil(opp_table, &old_freq);
730 if (IS_ERR(old_opp)) {
731 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
732 __func__, old_freq, PTR_ERR(old_opp));
735 opp = _find_freq_ceil(opp_table, &freq);
736 if (IS_ERR(opp)) {
737 ret = PTR_ERR(opp);
738 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
739 __func__, freq, ret);
740 rcu_read_unlock();
741 return ret;
744 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
745 old_freq, freq);
747 regulators = opp_table->regulators;
749 /* Only frequency scaling */
750 if (!regulators) {
751 rcu_read_unlock();
752 return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
755 if (opp_table->set_opp)
756 set_opp = opp_table->set_opp;
757 else
758 set_opp = _generic_set_opp;
760 data = opp_table->set_opp_data;
761 data->regulators = regulators;
762 data->regulator_count = opp_table->regulator_count;
763 data->clk = clk;
764 data->dev = dev;
766 data->old_opp.rate = old_freq;
767 size = sizeof(*opp->supplies) * opp_table->regulator_count;
768 if (IS_ERR(old_opp))
769 memset(data->old_opp.supplies, 0, size);
770 else
771 memcpy(data->old_opp.supplies, old_opp->supplies, size);
773 data->new_opp.rate = freq;
774 memcpy(data->new_opp.supplies, opp->supplies, size);
776 rcu_read_unlock();
778 return set_opp(data);
780 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
782 /* OPP-dev Helpers */
783 static void _kfree_opp_dev_rcu(struct rcu_head *head)
785 struct opp_device *opp_dev;
787 opp_dev = container_of(head, struct opp_device, rcu_head);
788 kfree_rcu(opp_dev, rcu_head);
791 static void _remove_opp_dev(struct opp_device *opp_dev,
792 struct opp_table *opp_table)
794 opp_debug_unregister(opp_dev, opp_table);
795 list_del(&opp_dev->node);
796 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
797 _kfree_opp_dev_rcu);
800 struct opp_device *_add_opp_dev(const struct device *dev,
801 struct opp_table *opp_table)
803 struct opp_device *opp_dev;
804 int ret;
806 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
807 if (!opp_dev)
808 return NULL;
810 /* Initialize opp-dev */
811 opp_dev->dev = dev;
812 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
814 /* Create debugfs entries for the opp_table */
815 ret = opp_debug_register(opp_dev, opp_table);
816 if (ret)
817 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
818 __func__, ret);
820 return opp_dev;
824 * _add_opp_table() - Find OPP table or allocate a new one
825 * @dev: device for which we do this operation
827 * It tries to find an existing table first, if it couldn't find one, it
828 * allocates a new OPP table and returns that.
830 * Return: valid opp_table pointer if success, else NULL.
832 static struct opp_table *_add_opp_table(struct device *dev)
834 struct opp_table *opp_table;
835 struct opp_device *opp_dev;
836 int ret;
838 /* Check for existing table for 'dev' first */
839 opp_table = _find_opp_table(dev);
840 if (!IS_ERR(opp_table))
841 return opp_table;
844 * Allocate a new OPP table. In the infrequent case where a new
845 * device is needed to be added, we pay this penalty.
847 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
848 if (!opp_table)
849 return NULL;
851 INIT_LIST_HEAD(&opp_table->dev_list);
853 opp_dev = _add_opp_dev(dev, opp_table);
854 if (!opp_dev) {
855 kfree(opp_table);
856 return NULL;
859 _of_init_opp_table(opp_table, dev);
861 /* Find clk for the device */
862 opp_table->clk = clk_get(dev, NULL);
863 if (IS_ERR(opp_table->clk)) {
864 ret = PTR_ERR(opp_table->clk);
865 if (ret != -EPROBE_DEFER)
866 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
867 ret);
870 srcu_init_notifier_head(&opp_table->srcu_head);
871 INIT_LIST_HEAD(&opp_table->opp_list);
873 /* Secure the device table modification */
874 list_add_rcu(&opp_table->node, &opp_tables);
875 return opp_table;
879 * _kfree_device_rcu() - Free opp_table RCU handler
880 * @head: RCU head
882 static void _kfree_device_rcu(struct rcu_head *head)
884 struct opp_table *opp_table = container_of(head, struct opp_table,
885 rcu_head);
887 kfree_rcu(opp_table, rcu_head);
891 * _remove_opp_table() - Removes a OPP table
892 * @opp_table: OPP table to be removed.
894 * Removes/frees OPP table if it doesn't contain any OPPs.
896 static void _remove_opp_table(struct opp_table *opp_table)
898 struct opp_device *opp_dev;
900 if (!list_empty(&opp_table->opp_list))
901 return;
903 if (opp_table->supported_hw)
904 return;
906 if (opp_table->prop_name)
907 return;
909 if (opp_table->regulators)
910 return;
912 if (opp_table->set_opp)
913 return;
915 /* Release clk */
916 if (!IS_ERR(opp_table->clk))
917 clk_put(opp_table->clk);
919 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
920 node);
922 _remove_opp_dev(opp_dev, opp_table);
924 /* dev_list must be empty now */
925 WARN_ON(!list_empty(&opp_table->dev_list));
927 list_del_rcu(&opp_table->node);
928 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
929 _kfree_device_rcu);
933 * _kfree_opp_rcu() - Free OPP RCU handler
934 * @head: RCU head
936 static void _kfree_opp_rcu(struct rcu_head *head)
938 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
940 kfree_rcu(opp, rcu_head);
944 * _opp_remove() - Remove an OPP from a table definition
945 * @opp_table: points back to the opp_table struct this opp belongs to
946 * @opp: pointer to the OPP to remove
947 * @notify: OPP_EVENT_REMOVE notification should be sent or not
949 * This function removes an opp definition from the opp table.
951 * Locking: The internal opp_table and opp structures are RCU protected.
952 * It is assumed that the caller holds required mutex for an RCU updater
953 * strategy.
955 void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
956 bool notify)
959 * Notify the changes in the availability of the operable
960 * frequency/voltage list.
962 if (notify)
963 srcu_notifier_call_chain(&opp_table->srcu_head,
964 OPP_EVENT_REMOVE, opp);
965 opp_debug_remove_one(opp);
966 list_del_rcu(&opp->node);
967 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
969 _remove_opp_table(opp_table);
973 * dev_pm_opp_remove() - Remove an OPP from OPP table
974 * @dev: device for which we do this operation
975 * @freq: OPP to remove with matching 'freq'
977 * This function removes an opp from the opp table.
979 * Locking: The internal opp_table and opp structures are RCU protected.
980 * Hence this function internally uses RCU updater strategy with mutex locks
981 * to keep the integrity of the internal data structures. Callers should ensure
982 * that this function is *NOT* called under RCU protection or in contexts where
983 * mutex cannot be locked.
985 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
987 struct dev_pm_opp *opp;
988 struct opp_table *opp_table;
989 bool found = false;
991 /* Hold our table modification lock here */
992 mutex_lock(&opp_table_lock);
994 opp_table = _find_opp_table(dev);
995 if (IS_ERR(opp_table))
996 goto unlock;
998 list_for_each_entry(opp, &opp_table->opp_list, node) {
999 if (opp->rate == freq) {
1000 found = true;
1001 break;
1005 if (!found) {
1006 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1007 __func__, freq);
1008 goto unlock;
1011 _opp_remove(opp_table, opp, true);
1012 unlock:
1013 mutex_unlock(&opp_table_lock);
1015 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1017 struct dev_pm_opp *_allocate_opp(struct device *dev,
1018 struct opp_table **opp_table)
1020 struct dev_pm_opp *opp;
1021 int count, supply_size;
1022 struct opp_table *table;
1024 table = _add_opp_table(dev);
1025 if (!table)
1026 return NULL;
1028 /* Allocate space for at least one supply */
1029 count = table->regulator_count ? table->regulator_count : 1;
1030 supply_size = sizeof(*opp->supplies) * count;
1032 /* allocate new OPP node and supplies structures */
1033 opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
1034 if (!opp) {
1035 kfree(table);
1036 return NULL;
1039 /* Put the supplies at the end of the OPP structure as an empty array */
1040 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1041 INIT_LIST_HEAD(&opp->node);
1043 *opp_table = table;
1045 return opp;
1048 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1049 struct opp_table *opp_table)
1051 struct regulator *reg;
1052 int i;
1054 for (i = 0; i < opp_table->regulator_count; i++) {
1055 reg = opp_table->regulators[i];
1057 if (!regulator_is_supported_voltage(reg,
1058 opp->supplies[i].u_volt_min,
1059 opp->supplies[i].u_volt_max)) {
1060 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1061 __func__, opp->supplies[i].u_volt_min,
1062 opp->supplies[i].u_volt_max);
1063 return false;
1067 return true;
1070 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1071 struct opp_table *opp_table)
1073 struct dev_pm_opp *opp;
1074 struct list_head *head = &opp_table->opp_list;
1075 int ret;
1078 * Insert new OPP in order of increasing frequency and discard if
1079 * already present.
1081 * Need to use &opp_table->opp_list in the condition part of the 'for'
1082 * loop, don't replace it with head otherwise it will become an infinite
1083 * loop.
1085 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
1086 if (new_opp->rate > opp->rate) {
1087 head = &opp->node;
1088 continue;
1091 if (new_opp->rate < opp->rate)
1092 break;
1094 /* Duplicate OPPs */
1095 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1096 __func__, opp->rate, opp->supplies[0].u_volt,
1097 opp->available, new_opp->rate,
1098 new_opp->supplies[0].u_volt, new_opp->available);
1100 /* Should we compare voltages for all regulators here ? */
1101 return opp->available &&
1102 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
1105 new_opp->opp_table = opp_table;
1106 list_add_rcu(&new_opp->node, head);
1108 ret = opp_debug_create_one(new_opp, opp_table);
1109 if (ret)
1110 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1111 __func__, ret);
1113 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1114 new_opp->available = false;
1115 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1116 __func__, new_opp->rate);
1119 return 0;
1123 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1124 * @dev: device for which we do this operation
1125 * @freq: Frequency in Hz for this OPP
1126 * @u_volt: Voltage in uVolts for this OPP
1127 * @dynamic: Dynamically added OPPs.
1129 * This function adds an opp definition to the opp table and returns status.
1130 * The opp is made available by default and it can be controlled using
1131 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1133 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1134 * and freed by dev_pm_opp_of_remove_table.
1136 * Locking: The internal opp_table and opp structures are RCU protected.
1137 * Hence this function internally uses RCU updater strategy with mutex locks
1138 * to keep the integrity of the internal data structures. Callers should ensure
1139 * that this function is *NOT* called under RCU protection or in contexts where
1140 * mutex cannot be locked.
1142 * Return:
1143 * 0 On success OR
1144 * Duplicate OPPs (both freq and volt are same) and opp->available
1145 * -EEXIST Freq are same and volt are different OR
1146 * Duplicate OPPs (both freq and volt are same) and !opp->available
1147 * -ENOMEM Memory allocation failure
1149 int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1150 bool dynamic)
1152 struct opp_table *opp_table;
1153 struct dev_pm_opp *new_opp;
1154 unsigned long tol;
1155 int ret;
1157 /* Hold our table modification lock here */
1158 mutex_lock(&opp_table_lock);
1160 new_opp = _allocate_opp(dev, &opp_table);
1161 if (!new_opp) {
1162 ret = -ENOMEM;
1163 goto unlock;
1166 /* populate the opp table */
1167 new_opp->rate = freq;
1168 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1169 new_opp->supplies[0].u_volt = u_volt;
1170 new_opp->supplies[0].u_volt_min = u_volt - tol;
1171 new_opp->supplies[0].u_volt_max = u_volt + tol;
1172 new_opp->available = true;
1173 new_opp->dynamic = dynamic;
1175 ret = _opp_add(dev, new_opp, opp_table);
1176 if (ret)
1177 goto free_opp;
1179 mutex_unlock(&opp_table_lock);
1182 * Notify the changes in the availability of the operable
1183 * frequency/voltage list.
1185 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1186 return 0;
1188 free_opp:
1189 _opp_remove(opp_table, new_opp, false);
1190 unlock:
1191 mutex_unlock(&opp_table_lock);
1192 return ret;
1196 * dev_pm_opp_set_supported_hw() - Set supported platforms
1197 * @dev: Device for which supported-hw has to be set.
1198 * @versions: Array of hierarchy of versions to match.
1199 * @count: Number of elements in the array.
1201 * This is required only for the V2 bindings, and it enables a platform to
1202 * specify the hierarchy of versions it supports. OPP layer will then enable
1203 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1204 * property.
1206 * Locking: The internal opp_table and opp structures are RCU protected.
1207 * Hence this function internally uses RCU updater strategy with mutex locks
1208 * to keep the integrity of the internal data structures. Callers should ensure
1209 * that this function is *NOT* called under RCU protection or in contexts where
1210 * mutex cannot be locked.
1212 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1213 unsigned int count)
1215 struct opp_table *opp_table;
1216 int ret = 0;
1218 /* Hold our table modification lock here */
1219 mutex_lock(&opp_table_lock);
1221 opp_table = _add_opp_table(dev);
1222 if (!opp_table) {
1223 ret = -ENOMEM;
1224 goto unlock;
1227 /* Make sure there are no concurrent readers while updating opp_table */
1228 WARN_ON(!list_empty(&opp_table->opp_list));
1230 /* Do we already have a version hierarchy associated with opp_table? */
1231 if (opp_table->supported_hw) {
1232 dev_err(dev, "%s: Already have supported hardware list\n",
1233 __func__);
1234 ret = -EBUSY;
1235 goto err;
1238 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1239 GFP_KERNEL);
1240 if (!opp_table->supported_hw) {
1241 ret = -ENOMEM;
1242 goto err;
1245 opp_table->supported_hw_count = count;
1246 mutex_unlock(&opp_table_lock);
1247 return 0;
1249 err:
1250 _remove_opp_table(opp_table);
1251 unlock:
1252 mutex_unlock(&opp_table_lock);
1254 return ret;
1256 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1259 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1260 * @dev: Device for which supported-hw has to be put.
1262 * This is required only for the V2 bindings, and is called for a matching
1263 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1264 * will not be freed.
1266 * Locking: The internal opp_table and opp structures are RCU protected.
1267 * Hence this function internally uses RCU updater strategy with mutex locks
1268 * to keep the integrity of the internal data structures. Callers should ensure
1269 * that this function is *NOT* called under RCU protection or in contexts where
1270 * mutex cannot be locked.
1272 void dev_pm_opp_put_supported_hw(struct device *dev)
1274 struct opp_table *opp_table;
1276 /* Hold our table modification lock here */
1277 mutex_lock(&opp_table_lock);
1279 /* Check for existing table for 'dev' first */
1280 opp_table = _find_opp_table(dev);
1281 if (IS_ERR(opp_table)) {
1282 dev_err(dev, "Failed to find opp_table: %ld\n",
1283 PTR_ERR(opp_table));
1284 goto unlock;
1287 /* Make sure there are no concurrent readers while updating opp_table */
1288 WARN_ON(!list_empty(&opp_table->opp_list));
1290 if (!opp_table->supported_hw) {
1291 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1292 __func__);
1293 goto unlock;
1296 kfree(opp_table->supported_hw);
1297 opp_table->supported_hw = NULL;
1298 opp_table->supported_hw_count = 0;
1300 /* Try freeing opp_table if this was the last blocking resource */
1301 _remove_opp_table(opp_table);
1303 unlock:
1304 mutex_unlock(&opp_table_lock);
1306 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1309 * dev_pm_opp_set_prop_name() - Set prop-extn name
1310 * @dev: Device for which the prop-name has to be set.
1311 * @name: name to postfix to properties.
1313 * This is required only for the V2 bindings, and it enables a platform to
1314 * specify the extn to be used for certain property names. The properties to
1315 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1316 * should postfix the property name with -<name> while looking for them.
1318 * Locking: The internal opp_table and opp structures are RCU protected.
1319 * Hence this function internally uses RCU updater strategy with mutex locks
1320 * to keep the integrity of the internal data structures. Callers should ensure
1321 * that this function is *NOT* called under RCU protection or in contexts where
1322 * mutex cannot be locked.
1324 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1326 struct opp_table *opp_table;
1327 int ret = 0;
1329 /* Hold our table modification lock here */
1330 mutex_lock(&opp_table_lock);
1332 opp_table = _add_opp_table(dev);
1333 if (!opp_table) {
1334 ret = -ENOMEM;
1335 goto unlock;
1338 /* Make sure there are no concurrent readers while updating opp_table */
1339 WARN_ON(!list_empty(&opp_table->opp_list));
1341 /* Do we already have a prop-name associated with opp_table? */
1342 if (opp_table->prop_name) {
1343 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1344 opp_table->prop_name);
1345 ret = -EBUSY;
1346 goto err;
1349 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1350 if (!opp_table->prop_name) {
1351 ret = -ENOMEM;
1352 goto err;
1355 mutex_unlock(&opp_table_lock);
1356 return 0;
1358 err:
1359 _remove_opp_table(opp_table);
1360 unlock:
1361 mutex_unlock(&opp_table_lock);
1363 return ret;
1365 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1368 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1369 * @dev: Device for which the prop-name has to be put.
1371 * This is required only for the V2 bindings, and is called for a matching
1372 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1373 * will not be freed.
1375 * Locking: The internal opp_table and opp structures are RCU protected.
1376 * Hence this function internally uses RCU updater strategy with mutex locks
1377 * to keep the integrity of the internal data structures. Callers should ensure
1378 * that this function is *NOT* called under RCU protection or in contexts where
1379 * mutex cannot be locked.
1381 void dev_pm_opp_put_prop_name(struct device *dev)
1383 struct opp_table *opp_table;
1385 /* Hold our table modification lock here */
1386 mutex_lock(&opp_table_lock);
1388 /* Check for existing table for 'dev' first */
1389 opp_table = _find_opp_table(dev);
1390 if (IS_ERR(opp_table)) {
1391 dev_err(dev, "Failed to find opp_table: %ld\n",
1392 PTR_ERR(opp_table));
1393 goto unlock;
1396 /* Make sure there are no concurrent readers while updating opp_table */
1397 WARN_ON(!list_empty(&opp_table->opp_list));
1399 if (!opp_table->prop_name) {
1400 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1401 goto unlock;
1404 kfree(opp_table->prop_name);
1405 opp_table->prop_name = NULL;
1407 /* Try freeing opp_table if this was the last blocking resource */
1408 _remove_opp_table(opp_table);
1410 unlock:
1411 mutex_unlock(&opp_table_lock);
1413 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1415 static int _allocate_set_opp_data(struct opp_table *opp_table)
1417 struct dev_pm_set_opp_data *data;
1418 int len, count = opp_table->regulator_count;
1420 if (WARN_ON(!count))
1421 return -EINVAL;
1423 /* space for set_opp_data */
1424 len = sizeof(*data);
1426 /* space for old_opp.supplies and new_opp.supplies */
1427 len += 2 * sizeof(struct dev_pm_opp_supply) * count;
1429 data = kzalloc(len, GFP_KERNEL);
1430 if (!data)
1431 return -ENOMEM;
1433 data->old_opp.supplies = (void *)(data + 1);
1434 data->new_opp.supplies = data->old_opp.supplies + count;
1436 opp_table->set_opp_data = data;
1438 return 0;
1441 static void _free_set_opp_data(struct opp_table *opp_table)
1443 kfree(opp_table->set_opp_data);
1444 opp_table->set_opp_data = NULL;
1448 * dev_pm_opp_set_regulators() - Set regulator names for the device
1449 * @dev: Device for which regulator name is being set.
1450 * @names: Array of pointers to the names of the regulator.
1451 * @count: Number of regulators.
1453 * In order to support OPP switching, OPP layer needs to know the name of the
1454 * device's regulators, as the core would be required to switch voltages as
1455 * well.
1457 * This must be called before any OPPs are initialized for the device.
1459 * Locking: The internal opp_table and opp structures are RCU protected.
1460 * Hence this function internally uses RCU updater strategy with mutex locks
1461 * to keep the integrity of the internal data structures. Callers should ensure
1462 * that this function is *NOT* called under RCU protection or in contexts where
1463 * mutex cannot be locked.
1465 struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
1466 const char * const names[],
1467 unsigned int count)
1469 struct opp_table *opp_table;
1470 struct regulator *reg;
1471 int ret, i;
1473 mutex_lock(&opp_table_lock);
1475 opp_table = _add_opp_table(dev);
1476 if (!opp_table) {
1477 ret = -ENOMEM;
1478 goto unlock;
1481 /* This should be called before OPPs are initialized */
1482 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1483 ret = -EBUSY;
1484 goto err;
1487 /* Already have regulators set */
1488 if (opp_table->regulators) {
1489 ret = -EBUSY;
1490 goto err;
1493 opp_table->regulators = kmalloc_array(count,
1494 sizeof(*opp_table->regulators),
1495 GFP_KERNEL);
1496 if (!opp_table->regulators) {
1497 ret = -ENOMEM;
1498 goto err;
1501 for (i = 0; i < count; i++) {
1502 reg = regulator_get_optional(dev, names[i]);
1503 if (IS_ERR(reg)) {
1504 ret = PTR_ERR(reg);
1505 if (ret != -EPROBE_DEFER)
1506 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1507 __func__, names[i], ret);
1508 goto free_regulators;
1511 opp_table->regulators[i] = reg;
1514 opp_table->regulator_count = count;
1516 /* Allocate block only once to pass to set_opp() routines */
1517 ret = _allocate_set_opp_data(opp_table);
1518 if (ret)
1519 goto free_regulators;
1521 mutex_unlock(&opp_table_lock);
1522 return opp_table;
1524 free_regulators:
1525 while (i != 0)
1526 regulator_put(opp_table->regulators[--i]);
1528 kfree(opp_table->regulators);
1529 opp_table->regulators = NULL;
1530 opp_table->regulator_count = 0;
1531 err:
1532 _remove_opp_table(opp_table);
1533 unlock:
1534 mutex_unlock(&opp_table_lock);
1536 return ERR_PTR(ret);
1538 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
1541 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
1542 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
1544 * Locking: The internal opp_table and opp structures are RCU protected.
1545 * Hence this function internally uses RCU updater strategy with mutex locks
1546 * to keep the integrity of the internal data structures. Callers should ensure
1547 * that this function is *NOT* called under RCU protection or in contexts where
1548 * mutex cannot be locked.
1550 void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1552 int i;
1554 mutex_lock(&opp_table_lock);
1556 if (!opp_table->regulators) {
1557 pr_err("%s: Doesn't have regulators set\n", __func__);
1558 goto unlock;
1561 /* Make sure there are no concurrent readers while updating opp_table */
1562 WARN_ON(!list_empty(&opp_table->opp_list));
1564 for (i = opp_table->regulator_count - 1; i >= 0; i--)
1565 regulator_put(opp_table->regulators[i]);
1567 _free_set_opp_data(opp_table);
1569 kfree(opp_table->regulators);
1570 opp_table->regulators = NULL;
1571 opp_table->regulator_count = 0;
1573 /* Try freeing opp_table if this was the last blocking resource */
1574 _remove_opp_table(opp_table);
1576 unlock:
1577 mutex_unlock(&opp_table_lock);
1579 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
1582 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
1583 * @dev: Device for which the helper is getting registered.
1584 * @set_opp: Custom set OPP helper.
1586 * This is useful to support complex platforms (like platforms with multiple
1587 * regulators per device), instead of the generic OPP set rate helper.
1589 * This must be called before any OPPs are initialized for the device.
1591 * Locking: The internal opp_table and opp structures are RCU protected.
1592 * Hence this function internally uses RCU updater strategy with mutex locks
1593 * to keep the integrity of the internal data structures. Callers should ensure
1594 * that this function is *NOT* called under RCU protection or in contexts where
1595 * mutex cannot be locked.
1597 int dev_pm_opp_register_set_opp_helper(struct device *dev,
1598 int (*set_opp)(struct dev_pm_set_opp_data *data))
1600 struct opp_table *opp_table;
1601 int ret;
1603 if (!set_opp)
1604 return -EINVAL;
1606 mutex_lock(&opp_table_lock);
1608 opp_table = _add_opp_table(dev);
1609 if (!opp_table) {
1610 ret = -ENOMEM;
1611 goto unlock;
1614 /* This should be called before OPPs are initialized */
1615 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1616 ret = -EBUSY;
1617 goto err;
1620 /* Already have custom set_opp helper */
1621 if (WARN_ON(opp_table->set_opp)) {
1622 ret = -EBUSY;
1623 goto err;
1626 opp_table->set_opp = set_opp;
1628 mutex_unlock(&opp_table_lock);
1629 return 0;
1631 err:
1632 _remove_opp_table(opp_table);
1633 unlock:
1634 mutex_unlock(&opp_table_lock);
1636 return ret;
1638 EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
1641 * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
1642 * set_opp helper
1643 * @dev: Device for which custom set_opp helper has to be cleared.
1645 * Locking: The internal opp_table and opp structures are RCU protected.
1646 * Hence this function internally uses RCU updater strategy with mutex locks
1647 * to keep the integrity of the internal data structures. Callers should ensure
1648 * that this function is *NOT* called under RCU protection or in contexts where
1649 * mutex cannot be locked.
1651 void dev_pm_opp_register_put_opp_helper(struct device *dev)
1653 struct opp_table *opp_table;
1655 mutex_lock(&opp_table_lock);
1657 /* Check for existing table for 'dev' first */
1658 opp_table = _find_opp_table(dev);
1659 if (IS_ERR(opp_table)) {
1660 dev_err(dev, "Failed to find opp_table: %ld\n",
1661 PTR_ERR(opp_table));
1662 goto unlock;
1665 if (!opp_table->set_opp) {
1666 dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
1667 __func__);
1668 goto unlock;
1671 /* Make sure there are no concurrent readers while updating opp_table */
1672 WARN_ON(!list_empty(&opp_table->opp_list));
1674 opp_table->set_opp = NULL;
1676 /* Try freeing opp_table if this was the last blocking resource */
1677 _remove_opp_table(opp_table);
1679 unlock:
1680 mutex_unlock(&opp_table_lock);
1682 EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
1685 * dev_pm_opp_add() - Add an OPP table from a table definitions
1686 * @dev: device for which we do this operation
1687 * @freq: Frequency in Hz for this OPP
1688 * @u_volt: Voltage in uVolts for this OPP
1690 * This function adds an opp definition to the opp table and returns status.
1691 * The opp is made available by default and it can be controlled using
1692 * dev_pm_opp_enable/disable functions.
1694 * Locking: The internal opp_table and opp structures are RCU protected.
1695 * Hence this function internally uses RCU updater strategy with mutex locks
1696 * to keep the integrity of the internal data structures. Callers should ensure
1697 * that this function is *NOT* called under RCU protection or in contexts where
1698 * mutex cannot be locked.
1700 * Return:
1701 * 0 On success OR
1702 * Duplicate OPPs (both freq and volt are same) and opp->available
1703 * -EEXIST Freq are same and volt are different OR
1704 * Duplicate OPPs (both freq and volt are same) and !opp->available
1705 * -ENOMEM Memory allocation failure
1707 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1709 return _opp_add_v1(dev, freq, u_volt, true);
1711 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1714 * _opp_set_availability() - helper to set the availability of an opp
1715 * @dev: device for which we do this operation
1716 * @freq: OPP frequency to modify availability
1717 * @availability_req: availability status requested for this opp
1719 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1720 * share a common logic which is isolated here.
1722 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1723 * copy operation, returns 0 if no modification was done OR modification was
1724 * successful.
1726 * Locking: The internal opp_table and opp structures are RCU protected.
1727 * Hence this function internally uses RCU updater strategy with mutex locks to
1728 * keep the integrity of the internal data structures. Callers should ensure
1729 * that this function is *NOT* called under RCU protection or in contexts where
1730 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1732 static int _opp_set_availability(struct device *dev, unsigned long freq,
1733 bool availability_req)
1735 struct opp_table *opp_table;
1736 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1737 int r = 0;
1739 /* keep the node allocated */
1740 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1741 if (!new_opp)
1742 return -ENOMEM;
1744 mutex_lock(&opp_table_lock);
1746 /* Find the opp_table */
1747 opp_table = _find_opp_table(dev);
1748 if (IS_ERR(opp_table)) {
1749 r = PTR_ERR(opp_table);
1750 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1751 goto unlock;
1754 /* Do we have the frequency? */
1755 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1756 if (tmp_opp->rate == freq) {
1757 opp = tmp_opp;
1758 break;
1761 if (IS_ERR(opp)) {
1762 r = PTR_ERR(opp);
1763 goto unlock;
1766 /* Is update really needed? */
1767 if (opp->available == availability_req)
1768 goto unlock;
1769 /* copy the old data over */
1770 *new_opp = *opp;
1772 /* plug in new node */
1773 new_opp->available = availability_req;
1775 list_replace_rcu(&opp->node, &new_opp->node);
1776 mutex_unlock(&opp_table_lock);
1777 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1779 /* Notify the change of the OPP availability */
1780 if (availability_req)
1781 srcu_notifier_call_chain(&opp_table->srcu_head,
1782 OPP_EVENT_ENABLE, new_opp);
1783 else
1784 srcu_notifier_call_chain(&opp_table->srcu_head,
1785 OPP_EVENT_DISABLE, new_opp);
1787 return 0;
1789 unlock:
1790 mutex_unlock(&opp_table_lock);
1791 kfree(new_opp);
1792 return r;
1796 * dev_pm_opp_enable() - Enable a specific OPP
1797 * @dev: device for which we do this operation
1798 * @freq: OPP frequency to enable
1800 * Enables a provided opp. If the operation is valid, this returns 0, else the
1801 * corresponding error value. It is meant to be used for users an OPP available
1802 * after being temporarily made unavailable with dev_pm_opp_disable.
1804 * Locking: The internal opp_table and opp structures are RCU protected.
1805 * Hence this function indirectly uses RCU and mutex locks to keep the
1806 * integrity of the internal data structures. Callers should ensure that
1807 * this function is *NOT* called under RCU protection or in contexts where
1808 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1810 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1811 * copy operation, returns 0 if no modification was done OR modification was
1812 * successful.
1814 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1816 return _opp_set_availability(dev, freq, true);
1818 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1821 * dev_pm_opp_disable() - Disable a specific OPP
1822 * @dev: device for which we do this operation
1823 * @freq: OPP frequency to disable
1825 * Disables a provided opp. If the operation is valid, this returns
1826 * 0, else the corresponding error value. It is meant to be a temporary
1827 * control by users to make this OPP not available until the circumstances are
1828 * right to make it available again (with a call to dev_pm_opp_enable).
1830 * Locking: The internal opp_table and opp structures are RCU protected.
1831 * Hence this function indirectly uses RCU and mutex locks to keep the
1832 * integrity of the internal data structures. Callers should ensure that
1833 * this function is *NOT* called under RCU protection or in contexts where
1834 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1836 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1837 * copy operation, returns 0 if no modification was done OR modification was
1838 * successful.
1840 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1842 return _opp_set_availability(dev, freq, false);
1844 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1847 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1848 * @dev: device pointer used to lookup OPP table.
1850 * Return: pointer to notifier head if found, otherwise -ENODEV or
1851 * -EINVAL based on type of error casted as pointer. value must be checked
1852 * with IS_ERR to determine valid pointer or error result.
1854 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1855 * RCU protected pointer. The reason for the same is that the opp pointer which
1856 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1857 * under the locked area. The pointer returned must be used prior to unlocking
1858 * with rcu_read_unlock() to maintain the integrity of the pointer.
1860 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1862 struct opp_table *opp_table = _find_opp_table(dev);
1864 if (IS_ERR(opp_table))
1865 return ERR_CAST(opp_table); /* matching type */
1867 return &opp_table->srcu_head;
1869 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1872 * Free OPPs either created using static entries present in DT or even the
1873 * dynamically added entries based on remove_all param.
1875 void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1877 struct opp_table *opp_table;
1878 struct dev_pm_opp *opp, *tmp;
1880 /* Hold our table modification lock here */
1881 mutex_lock(&opp_table_lock);
1883 /* Check for existing table for 'dev' */
1884 opp_table = _find_opp_table(dev);
1885 if (IS_ERR(opp_table)) {
1886 int error = PTR_ERR(opp_table);
1888 if (error != -ENODEV)
1889 WARN(1, "%s: opp_table: %d\n",
1890 IS_ERR_OR_NULL(dev) ?
1891 "Invalid device" : dev_name(dev),
1892 error);
1893 goto unlock;
1896 /* Find if opp_table manages a single device */
1897 if (list_is_singular(&opp_table->dev_list)) {
1898 /* Free static OPPs */
1899 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1900 if (remove_all || !opp->dynamic)
1901 _opp_remove(opp_table, opp, true);
1903 } else {
1904 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1907 unlock:
1908 mutex_unlock(&opp_table_lock);
1912 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1913 * @dev: device pointer used to lookup OPP table.
1915 * Free both OPPs created using static entries present in DT and the
1916 * dynamically added entries.
1918 * Locking: The internal opp_table and opp structures are RCU protected.
1919 * Hence this function indirectly uses RCU updater strategy with mutex locks
1920 * to keep the integrity of the internal data structures. Callers should ensure
1921 * that this function is *NOT* called under RCU protection or in contexts where
1922 * mutex cannot be locked.
1924 void dev_pm_opp_remove_table(struct device *dev)
1926 _dev_pm_opp_remove_table(dev, true);
1928 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);