mmc: rtsx_pci: Enable MMC_CAP_ERASE to allow erase/discard/trim requests
[linux/fpc-iii.git] / drivers / base / power / opp / core.c
blob7c04c87738a69d13b28060fe5dfa9b095798709e
1 /*
2 * Generic OPP Interface
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/regulator/consumer.h>
24 #include "opp.h"
27 * The root of the list of all opp-tables. All opp_table structures branch off
28 * from here, with each opp_table containing the list of opps it supports in
29 * various states of availability.
31 LIST_HEAD(opp_tables);
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
35 #define opp_rcu_lockdep_assert() \
36 do { \
37 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
38 !lockdep_is_held(&opp_table_lock), \
39 "Missing rcu_read_lock() or " \
40 "opp_table_lock protection"); \
41 } while (0)
43 static struct opp_device *_find_opp_dev(const struct device *dev,
44 struct opp_table *opp_table)
46 struct opp_device *opp_dev;
48 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
49 if (opp_dev->dev == dev)
50 return opp_dev;
52 return NULL;
55 /**
56 * _find_opp_table() - find opp_table struct using device pointer
57 * @dev: device pointer used to lookup OPP table
59 * Search OPP table for one containing matching device. Does a RCU reader
60 * operation to grab the pointer needed.
62 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
63 * -EINVAL based on type of error.
65 * Locking: For readers, this function must be called under rcu_read_lock().
66 * opp_table is a RCU protected pointer, which means that opp_table is valid
67 * as long as we are under RCU lock.
69 * For Writers, this function must be called with opp_table_lock held.
71 struct opp_table *_find_opp_table(struct device *dev)
73 struct opp_table *opp_table;
75 opp_rcu_lockdep_assert();
77 if (IS_ERR_OR_NULL(dev)) {
78 pr_err("%s: Invalid parameters\n", __func__);
79 return ERR_PTR(-EINVAL);
82 list_for_each_entry_rcu(opp_table, &opp_tables, node)
83 if (_find_opp_dev(dev, opp_table))
84 return opp_table;
86 return ERR_PTR(-ENODEV);
89 /**
90 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
91 * @opp: opp for which voltage has to be returned for
93 * Return: voltage in micro volt corresponding to the opp, else
94 * return 0
96 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
97 * protected pointer. This means that opp which could have been fetched by
98 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
99 * under RCU lock. The pointer returned by the opp_find_freq family must be
100 * used in the same section as the usage of this function with the pointer
101 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
102 * pointer.
104 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
106 struct dev_pm_opp *tmp_opp;
107 unsigned long v = 0;
109 opp_rcu_lockdep_assert();
111 tmp_opp = rcu_dereference(opp);
112 if (IS_ERR_OR_NULL(tmp_opp))
113 pr_err("%s: Invalid parameters\n", __func__);
114 else
115 v = tmp_opp->u_volt;
117 return v;
119 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
122 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
123 * @opp: opp for which frequency has to be returned for
125 * Return: frequency in hertz corresponding to the opp, else
126 * return 0
128 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
129 * protected pointer. This means that opp which could have been fetched by
130 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
131 * under RCU lock. The pointer returned by the opp_find_freq family must be
132 * used in the same section as the usage of this function with the pointer
133 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
134 * pointer.
136 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
138 struct dev_pm_opp *tmp_opp;
139 unsigned long f = 0;
141 opp_rcu_lockdep_assert();
143 tmp_opp = rcu_dereference(opp);
144 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
145 pr_err("%s: Invalid parameters\n", __func__);
146 else
147 f = tmp_opp->rate;
149 return f;
151 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
154 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
155 * @opp: opp for which turbo mode is being verified
157 * Turbo OPPs are not for normal use, and can be enabled (under certain
158 * conditions) for short duration of times to finish high throughput work
159 * quickly. Running on them for longer times may overheat the chip.
161 * Return: true if opp is turbo opp, else false.
163 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
164 * protected pointer. This means that opp which could have been fetched by
165 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
166 * under RCU lock. The pointer returned by the opp_find_freq family must be
167 * used in the same section as the usage of this function with the pointer
168 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
169 * pointer.
171 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
173 struct dev_pm_opp *tmp_opp;
175 opp_rcu_lockdep_assert();
177 tmp_opp = rcu_dereference(opp);
178 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
179 pr_err("%s: Invalid parameters\n", __func__);
180 return false;
183 return tmp_opp->turbo;
185 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
188 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
189 * @dev: device for which we do this operation
191 * Return: This function returns the max clock latency in nanoseconds.
193 * Locking: This function takes rcu_read_lock().
195 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
197 struct opp_table *opp_table;
198 unsigned long clock_latency_ns;
200 rcu_read_lock();
202 opp_table = _find_opp_table(dev);
203 if (IS_ERR(opp_table))
204 clock_latency_ns = 0;
205 else
206 clock_latency_ns = opp_table->clock_latency_ns_max;
208 rcu_read_unlock();
209 return clock_latency_ns;
211 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
214 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
215 * @dev: device for which we do this operation
217 * Return: This function returns the max voltage latency in nanoseconds.
219 * Locking: This function takes rcu_read_lock().
221 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
223 struct opp_table *opp_table;
224 struct dev_pm_opp *opp;
225 struct regulator *reg;
226 unsigned long latency_ns = 0;
227 unsigned long min_uV = ~0, max_uV = 0;
228 int ret;
230 rcu_read_lock();
232 opp_table = _find_opp_table(dev);
233 if (IS_ERR(opp_table)) {
234 rcu_read_unlock();
235 return 0;
238 reg = opp_table->regulator;
239 if (IS_ERR(reg)) {
240 /* Regulator may not be required for device */
241 rcu_read_unlock();
242 return 0;
245 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
246 if (!opp->available)
247 continue;
249 if (opp->u_volt_min < min_uV)
250 min_uV = opp->u_volt_min;
251 if (opp->u_volt_max > max_uV)
252 max_uV = opp->u_volt_max;
255 rcu_read_unlock();
258 * The caller needs to ensure that opp_table (and hence the regulator)
259 * isn't freed, while we are executing this routine.
261 ret = regulator_set_voltage_time(reg, min_uV, max_uV);
262 if (ret > 0)
263 latency_ns = ret * 1000;
265 return latency_ns;
267 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
270 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
271 * nanoseconds
272 * @dev: device for which we do this operation
274 * Return: This function returns the max transition latency, in nanoseconds, to
275 * switch from one OPP to other.
277 * Locking: This function takes rcu_read_lock().
279 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
281 return dev_pm_opp_get_max_volt_latency(dev) +
282 dev_pm_opp_get_max_clock_latency(dev);
284 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
287 * dev_pm_opp_get_suspend_opp() - Get suspend opp
288 * @dev: device for which we do this operation
290 * Return: This function returns pointer to the suspend opp if it is
291 * defined and available, otherwise it returns NULL.
293 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
294 * protected pointer. The reason for the same is that the opp pointer which is
295 * returned will remain valid for use with opp_get_{voltage, freq} only while
296 * under the locked area. The pointer returned must be used prior to unlocking
297 * with rcu_read_unlock() to maintain the integrity of the pointer.
299 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
301 struct opp_table *opp_table;
303 opp_rcu_lockdep_assert();
305 opp_table = _find_opp_table(dev);
306 if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
307 !opp_table->suspend_opp->available)
308 return NULL;
310 return opp_table->suspend_opp;
312 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
315 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
316 * @dev: device for which we do this operation
318 * Return: This function returns the number of available opps if there are any,
319 * else returns 0 if none or the corresponding error value.
321 * Locking: This function takes rcu_read_lock().
323 int dev_pm_opp_get_opp_count(struct device *dev)
325 struct opp_table *opp_table;
326 struct dev_pm_opp *temp_opp;
327 int count = 0;
329 rcu_read_lock();
331 opp_table = _find_opp_table(dev);
332 if (IS_ERR(opp_table)) {
333 count = PTR_ERR(opp_table);
334 dev_err(dev, "%s: OPP table not found (%d)\n",
335 __func__, count);
336 goto out_unlock;
339 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
340 if (temp_opp->available)
341 count++;
344 out_unlock:
345 rcu_read_unlock();
346 return count;
348 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
351 * dev_pm_opp_find_freq_exact() - search for an exact frequency
352 * @dev: device for which we do this operation
353 * @freq: frequency to search for
354 * @available: true/false - match for available opp
356 * Return: Searches for exact match in the opp table and returns pointer to the
357 * matching opp if found, else returns ERR_PTR in case of error and should
358 * be handled using IS_ERR. Error return values can be:
359 * EINVAL: for bad pointer
360 * ERANGE: no match found for search
361 * ENODEV: if device not found in list of registered devices
363 * Note: available is a modifier for the search. if available=true, then the
364 * match is for exact matching frequency and is available in the stored OPP
365 * table. if false, the match is for exact frequency which is not available.
367 * This provides a mechanism to enable an opp which is not available currently
368 * or the opposite as well.
370 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
371 * protected pointer. The reason for the same is that the opp pointer which is
372 * returned will remain valid for use with opp_get_{voltage, freq} only while
373 * under the locked area. The pointer returned must be used prior to unlocking
374 * with rcu_read_unlock() to maintain the integrity of the pointer.
376 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
377 unsigned long freq,
378 bool available)
380 struct opp_table *opp_table;
381 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
383 opp_rcu_lockdep_assert();
385 opp_table = _find_opp_table(dev);
386 if (IS_ERR(opp_table)) {
387 int r = PTR_ERR(opp_table);
389 dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
390 return ERR_PTR(r);
393 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
394 if (temp_opp->available == available &&
395 temp_opp->rate == freq) {
396 opp = temp_opp;
397 break;
401 return opp;
403 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
406 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
407 * @dev: device for which we do this operation
408 * @freq: Start frequency
410 * Search for the matching ceil *available* OPP from a starting freq
411 * for a device.
413 * Return: matching *opp and refreshes *freq accordingly, else returns
414 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
415 * values can be:
416 * EINVAL: for bad pointer
417 * ERANGE: no match found for search
418 * ENODEV: if device not found in list of registered devices
420 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
421 * protected pointer. The reason for the same is that the opp pointer which is
422 * returned will remain valid for use with opp_get_{voltage, freq} only while
423 * under the locked area. The pointer returned must be used prior to unlocking
424 * with rcu_read_unlock() to maintain the integrity of the pointer.
426 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
427 unsigned long *freq)
429 struct opp_table *opp_table;
430 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
432 opp_rcu_lockdep_assert();
434 if (!dev || !freq) {
435 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
436 return ERR_PTR(-EINVAL);
439 opp_table = _find_opp_table(dev);
440 if (IS_ERR(opp_table))
441 return ERR_CAST(opp_table);
443 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
444 if (temp_opp->available && temp_opp->rate >= *freq) {
445 opp = temp_opp;
446 *freq = opp->rate;
447 break;
451 return opp;
453 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
456 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
457 * @dev: device for which we do this operation
458 * @freq: Start frequency
460 * Search for the matching floor *available* OPP from a starting freq
461 * for a device.
463 * Return: matching *opp and refreshes *freq accordingly, else returns
464 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
465 * values can be:
466 * EINVAL: for bad pointer
467 * ERANGE: no match found for search
468 * ENODEV: if device not found in list of registered devices
470 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
471 * protected pointer. The reason for the same is that the opp pointer which is
472 * returned will remain valid for use with opp_get_{voltage, freq} only while
473 * under the locked area. The pointer returned must be used prior to unlocking
474 * with rcu_read_unlock() to maintain the integrity of the pointer.
476 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
477 unsigned long *freq)
479 struct opp_table *opp_table;
480 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
482 opp_rcu_lockdep_assert();
484 if (!dev || !freq) {
485 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
486 return ERR_PTR(-EINVAL);
489 opp_table = _find_opp_table(dev);
490 if (IS_ERR(opp_table))
491 return ERR_CAST(opp_table);
493 list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
494 if (temp_opp->available) {
495 /* go to the next node, before choosing prev */
496 if (temp_opp->rate > *freq)
497 break;
498 else
499 opp = temp_opp;
502 if (!IS_ERR(opp))
503 *freq = opp->rate;
505 return opp;
507 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
510 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
511 * while clk returned here is used.
513 static struct clk *_get_opp_clk(struct device *dev)
515 struct opp_table *opp_table;
516 struct clk *clk;
518 rcu_read_lock();
520 opp_table = _find_opp_table(dev);
521 if (IS_ERR(opp_table)) {
522 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
523 clk = ERR_CAST(opp_table);
524 goto unlock;
527 clk = opp_table->clk;
528 if (IS_ERR(clk))
529 dev_err(dev, "%s: No clock available for the device\n",
530 __func__);
532 unlock:
533 rcu_read_unlock();
534 return clk;
537 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
538 unsigned long u_volt, unsigned long u_volt_min,
539 unsigned long u_volt_max)
541 int ret;
543 /* Regulator not available for device */
544 if (IS_ERR(reg)) {
545 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
546 PTR_ERR(reg));
547 return 0;
550 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
551 u_volt, u_volt_max);
553 ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
554 u_volt_max);
555 if (ret)
556 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
557 __func__, u_volt_min, u_volt, u_volt_max, ret);
559 return ret;
563 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
564 * @dev: device for which we do this operation
565 * @target_freq: frequency to achieve
567 * This configures the power-supplies and clock source to the levels specified
568 * by the OPP corresponding to the target_freq.
570 * Locking: This function takes rcu_read_lock().
572 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
574 struct opp_table *opp_table;
575 struct dev_pm_opp *old_opp, *opp;
576 struct regulator *reg;
577 struct clk *clk;
578 unsigned long freq, old_freq;
579 unsigned long u_volt, u_volt_min, u_volt_max;
580 unsigned long ou_volt, ou_volt_min, ou_volt_max;
581 int ret;
583 if (unlikely(!target_freq)) {
584 dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
585 target_freq);
586 return -EINVAL;
589 clk = _get_opp_clk(dev);
590 if (IS_ERR(clk))
591 return PTR_ERR(clk);
593 freq = clk_round_rate(clk, target_freq);
594 if ((long)freq <= 0)
595 freq = target_freq;
597 old_freq = clk_get_rate(clk);
599 /* Return early if nothing to do */
600 if (old_freq == freq) {
601 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
602 __func__, freq);
603 return 0;
606 rcu_read_lock();
608 opp_table = _find_opp_table(dev);
609 if (IS_ERR(opp_table)) {
610 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
611 rcu_read_unlock();
612 return PTR_ERR(opp_table);
615 old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
616 if (!IS_ERR(old_opp)) {
617 ou_volt = old_opp->u_volt;
618 ou_volt_min = old_opp->u_volt_min;
619 ou_volt_max = old_opp->u_volt_max;
620 } else {
621 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
622 __func__, old_freq, PTR_ERR(old_opp));
625 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
626 if (IS_ERR(opp)) {
627 ret = PTR_ERR(opp);
628 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
629 __func__, freq, ret);
630 rcu_read_unlock();
631 return ret;
634 u_volt = opp->u_volt;
635 u_volt_min = opp->u_volt_min;
636 u_volt_max = opp->u_volt_max;
638 reg = opp_table->regulator;
640 rcu_read_unlock();
642 /* Scaling up? Scale voltage before frequency */
643 if (freq > old_freq) {
644 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
645 u_volt_max);
646 if (ret)
647 goto restore_voltage;
650 /* Change frequency */
652 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
653 __func__, old_freq, freq);
655 ret = clk_set_rate(clk, freq);
656 if (ret) {
657 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
658 ret);
659 goto restore_voltage;
662 /* Scaling down? Scale voltage after frequency */
663 if (freq < old_freq) {
664 ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
665 u_volt_max);
666 if (ret)
667 goto restore_freq;
670 return 0;
672 restore_freq:
673 if (clk_set_rate(clk, old_freq))
674 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
675 __func__, old_freq);
676 restore_voltage:
677 /* This shouldn't harm even if the voltages weren't updated earlier */
678 if (!IS_ERR(old_opp))
679 _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
681 return ret;
683 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
685 /* OPP-dev Helpers */
686 static void _kfree_opp_dev_rcu(struct rcu_head *head)
688 struct opp_device *opp_dev;
690 opp_dev = container_of(head, struct opp_device, rcu_head);
691 kfree_rcu(opp_dev, rcu_head);
694 static void _remove_opp_dev(struct opp_device *opp_dev,
695 struct opp_table *opp_table)
697 opp_debug_unregister(opp_dev, opp_table);
698 list_del(&opp_dev->node);
699 call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
700 _kfree_opp_dev_rcu);
703 struct opp_device *_add_opp_dev(const struct device *dev,
704 struct opp_table *opp_table)
706 struct opp_device *opp_dev;
707 int ret;
709 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
710 if (!opp_dev)
711 return NULL;
713 /* Initialize opp-dev */
714 opp_dev->dev = dev;
715 list_add_rcu(&opp_dev->node, &opp_table->dev_list);
717 /* Create debugfs entries for the opp_table */
718 ret = opp_debug_register(opp_dev, opp_table);
719 if (ret)
720 dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
721 __func__, ret);
723 return opp_dev;
727 * _add_opp_table() - Find OPP table or allocate a new one
728 * @dev: device for which we do this operation
730 * It tries to find an existing table first, if it couldn't find one, it
731 * allocates a new OPP table and returns that.
733 * Return: valid opp_table pointer if success, else NULL.
735 static struct opp_table *_add_opp_table(struct device *dev)
737 struct opp_table *opp_table;
738 struct opp_device *opp_dev;
739 int ret;
741 /* Check for existing table for 'dev' first */
742 opp_table = _find_opp_table(dev);
743 if (!IS_ERR(opp_table))
744 return opp_table;
747 * Allocate a new OPP table. In the infrequent case where a new
748 * device is needed to be added, we pay this penalty.
750 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
751 if (!opp_table)
752 return NULL;
754 INIT_LIST_HEAD(&opp_table->dev_list);
756 opp_dev = _add_opp_dev(dev, opp_table);
757 if (!opp_dev) {
758 kfree(opp_table);
759 return NULL;
762 _of_init_opp_table(opp_table, dev);
764 /* Set regulator to a non-NULL error value */
765 opp_table->regulator = ERR_PTR(-ENXIO);
767 /* Find clk for the device */
768 opp_table->clk = clk_get(dev, NULL);
769 if (IS_ERR(opp_table->clk)) {
770 ret = PTR_ERR(opp_table->clk);
771 if (ret != -EPROBE_DEFER)
772 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
773 ret);
776 srcu_init_notifier_head(&opp_table->srcu_head);
777 INIT_LIST_HEAD(&opp_table->opp_list);
779 /* Secure the device table modification */
780 list_add_rcu(&opp_table->node, &opp_tables);
781 return opp_table;
785 * _kfree_device_rcu() - Free opp_table RCU handler
786 * @head: RCU head
788 static void _kfree_device_rcu(struct rcu_head *head)
790 struct opp_table *opp_table = container_of(head, struct opp_table,
791 rcu_head);
793 kfree_rcu(opp_table, rcu_head);
797 * _remove_opp_table() - Removes a OPP table
798 * @opp_table: OPP table to be removed.
800 * Removes/frees OPP table if it doesn't contain any OPPs.
802 static void _remove_opp_table(struct opp_table *opp_table)
804 struct opp_device *opp_dev;
806 if (!list_empty(&opp_table->opp_list))
807 return;
809 if (opp_table->supported_hw)
810 return;
812 if (opp_table->prop_name)
813 return;
815 if (!IS_ERR(opp_table->regulator))
816 return;
818 /* Release clk */
819 if (!IS_ERR(opp_table->clk))
820 clk_put(opp_table->clk);
822 opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
823 node);
825 _remove_opp_dev(opp_dev, opp_table);
827 /* dev_list must be empty now */
828 WARN_ON(!list_empty(&opp_table->dev_list));
830 list_del_rcu(&opp_table->node);
831 call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
832 _kfree_device_rcu);
836 * _kfree_opp_rcu() - Free OPP RCU handler
837 * @head: RCU head
839 static void _kfree_opp_rcu(struct rcu_head *head)
841 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
843 kfree_rcu(opp, rcu_head);
847 * _opp_remove() - Remove an OPP from a table definition
848 * @opp_table: points back to the opp_table struct this opp belongs to
849 * @opp: pointer to the OPP to remove
850 * @notify: OPP_EVENT_REMOVE notification should be sent or not
852 * This function removes an opp definition from the opp table.
854 * Locking: The internal opp_table and opp structures are RCU protected.
855 * It is assumed that the caller holds required mutex for an RCU updater
856 * strategy.
858 void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
859 bool notify)
862 * Notify the changes in the availability of the operable
863 * frequency/voltage list.
865 if (notify)
866 srcu_notifier_call_chain(&opp_table->srcu_head,
867 OPP_EVENT_REMOVE, opp);
868 opp_debug_remove_one(opp);
869 list_del_rcu(&opp->node);
870 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
872 _remove_opp_table(opp_table);
876 * dev_pm_opp_remove() - Remove an OPP from OPP table
877 * @dev: device for which we do this operation
878 * @freq: OPP to remove with matching 'freq'
880 * This function removes an opp from the opp table.
882 * Locking: The internal opp_table and opp structures are RCU protected.
883 * Hence this function internally uses RCU updater strategy with mutex locks
884 * to keep the integrity of the internal data structures. Callers should ensure
885 * that this function is *NOT* called under RCU protection or in contexts where
886 * mutex cannot be locked.
888 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
890 struct dev_pm_opp *opp;
891 struct opp_table *opp_table;
892 bool found = false;
894 /* Hold our table modification lock here */
895 mutex_lock(&opp_table_lock);
897 opp_table = _find_opp_table(dev);
898 if (IS_ERR(opp_table))
899 goto unlock;
901 list_for_each_entry(opp, &opp_table->opp_list, node) {
902 if (opp->rate == freq) {
903 found = true;
904 break;
908 if (!found) {
909 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
910 __func__, freq);
911 goto unlock;
914 _opp_remove(opp_table, opp, true);
915 unlock:
916 mutex_unlock(&opp_table_lock);
918 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
920 struct dev_pm_opp *_allocate_opp(struct device *dev,
921 struct opp_table **opp_table)
923 struct dev_pm_opp *opp;
925 /* allocate new OPP node */
926 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
927 if (!opp)
928 return NULL;
930 INIT_LIST_HEAD(&opp->node);
932 *opp_table = _add_opp_table(dev);
933 if (!*opp_table) {
934 kfree(opp);
935 return NULL;
938 return opp;
941 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
942 struct opp_table *opp_table)
944 struct regulator *reg = opp_table->regulator;
946 if (!IS_ERR(reg) &&
947 !regulator_is_supported_voltage(reg, opp->u_volt_min,
948 opp->u_volt_max)) {
949 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
950 __func__, opp->u_volt_min, opp->u_volt_max);
951 return false;
954 return true;
957 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
958 struct opp_table *opp_table)
960 struct dev_pm_opp *opp;
961 struct list_head *head = &opp_table->opp_list;
962 int ret;
965 * Insert new OPP in order of increasing frequency and discard if
966 * already present.
968 * Need to use &opp_table->opp_list in the condition part of the 'for'
969 * loop, don't replace it with head otherwise it will become an infinite
970 * loop.
972 list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
973 if (new_opp->rate > opp->rate) {
974 head = &opp->node;
975 continue;
978 if (new_opp->rate < opp->rate)
979 break;
981 /* Duplicate OPPs */
982 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
983 __func__, opp->rate, opp->u_volt, opp->available,
984 new_opp->rate, new_opp->u_volt, new_opp->available);
986 return opp->available && new_opp->u_volt == opp->u_volt ?
987 0 : -EEXIST;
990 new_opp->opp_table = opp_table;
991 list_add_rcu(&new_opp->node, head);
993 ret = opp_debug_create_one(new_opp, opp_table);
994 if (ret)
995 dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
996 __func__, ret);
998 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
999 new_opp->available = false;
1000 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1001 __func__, new_opp->rate);
1004 return 0;
1008 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1009 * @dev: device for which we do this operation
1010 * @freq: Frequency in Hz for this OPP
1011 * @u_volt: Voltage in uVolts for this OPP
1012 * @dynamic: Dynamically added OPPs.
1014 * This function adds an opp definition to the opp table and returns status.
1015 * The opp is made available by default and it can be controlled using
1016 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1018 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1019 * and freed by dev_pm_opp_of_remove_table.
1021 * Locking: The internal opp_table and opp structures are RCU protected.
1022 * Hence this function internally uses RCU updater strategy with mutex locks
1023 * to keep the integrity of the internal data structures. Callers should ensure
1024 * that this function is *NOT* called under RCU protection or in contexts where
1025 * mutex cannot be locked.
1027 * Return:
1028 * 0 On success OR
1029 * Duplicate OPPs (both freq and volt are same) and opp->available
1030 * -EEXIST Freq are same and volt are different OR
1031 * Duplicate OPPs (both freq and volt are same) and !opp->available
1032 * -ENOMEM Memory allocation failure
1034 int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1035 bool dynamic)
1037 struct opp_table *opp_table;
1038 struct dev_pm_opp *new_opp;
1039 unsigned long tol;
1040 int ret;
1042 /* Hold our table modification lock here */
1043 mutex_lock(&opp_table_lock);
1045 new_opp = _allocate_opp(dev, &opp_table);
1046 if (!new_opp) {
1047 ret = -ENOMEM;
1048 goto unlock;
1051 /* populate the opp table */
1052 new_opp->rate = freq;
1053 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1054 new_opp->u_volt = u_volt;
1055 new_opp->u_volt_min = u_volt - tol;
1056 new_opp->u_volt_max = u_volt + tol;
1057 new_opp->available = true;
1058 new_opp->dynamic = dynamic;
1060 ret = _opp_add(dev, new_opp, opp_table);
1061 if (ret)
1062 goto free_opp;
1064 mutex_unlock(&opp_table_lock);
1067 * Notify the changes in the availability of the operable
1068 * frequency/voltage list.
1070 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1071 return 0;
1073 free_opp:
1074 _opp_remove(opp_table, new_opp, false);
1075 unlock:
1076 mutex_unlock(&opp_table_lock);
1077 return ret;
1081 * dev_pm_opp_set_supported_hw() - Set supported platforms
1082 * @dev: Device for which supported-hw has to be set.
1083 * @versions: Array of hierarchy of versions to match.
1084 * @count: Number of elements in the array.
1086 * This is required only for the V2 bindings, and it enables a platform to
1087 * specify the hierarchy of versions it supports. OPP layer will then enable
1088 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1089 * property.
1091 * Locking: The internal opp_table and opp structures are RCU protected.
1092 * Hence this function internally uses RCU updater strategy with mutex locks
1093 * to keep the integrity of the internal data structures. Callers should ensure
1094 * that this function is *NOT* called under RCU protection or in contexts where
1095 * mutex cannot be locked.
1097 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1098 unsigned int count)
1100 struct opp_table *opp_table;
1101 int ret = 0;
1103 /* Hold our table modification lock here */
1104 mutex_lock(&opp_table_lock);
1106 opp_table = _add_opp_table(dev);
1107 if (!opp_table) {
1108 ret = -ENOMEM;
1109 goto unlock;
1112 /* Make sure there are no concurrent readers while updating opp_table */
1113 WARN_ON(!list_empty(&opp_table->opp_list));
1115 /* Do we already have a version hierarchy associated with opp_table? */
1116 if (opp_table->supported_hw) {
1117 dev_err(dev, "%s: Already have supported hardware list\n",
1118 __func__);
1119 ret = -EBUSY;
1120 goto err;
1123 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1124 GFP_KERNEL);
1125 if (!opp_table->supported_hw) {
1126 ret = -ENOMEM;
1127 goto err;
1130 opp_table->supported_hw_count = count;
1131 mutex_unlock(&opp_table_lock);
1132 return 0;
1134 err:
1135 _remove_opp_table(opp_table);
1136 unlock:
1137 mutex_unlock(&opp_table_lock);
1139 return ret;
1141 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1144 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1145 * @dev: Device for which supported-hw has to be put.
1147 * This is required only for the V2 bindings, and is called for a matching
1148 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1149 * will not be freed.
1151 * Locking: The internal opp_table and opp structures are RCU protected.
1152 * Hence this function internally uses RCU updater strategy with mutex locks
1153 * to keep the integrity of the internal data structures. Callers should ensure
1154 * that this function is *NOT* called under RCU protection or in contexts where
1155 * mutex cannot be locked.
1157 void dev_pm_opp_put_supported_hw(struct device *dev)
1159 struct opp_table *opp_table;
1161 /* Hold our table modification lock here */
1162 mutex_lock(&opp_table_lock);
1164 /* Check for existing table for 'dev' first */
1165 opp_table = _find_opp_table(dev);
1166 if (IS_ERR(opp_table)) {
1167 dev_err(dev, "Failed to find opp_table: %ld\n",
1168 PTR_ERR(opp_table));
1169 goto unlock;
1172 /* Make sure there are no concurrent readers while updating opp_table */
1173 WARN_ON(!list_empty(&opp_table->opp_list));
1175 if (!opp_table->supported_hw) {
1176 dev_err(dev, "%s: Doesn't have supported hardware list\n",
1177 __func__);
1178 goto unlock;
1181 kfree(opp_table->supported_hw);
1182 opp_table->supported_hw = NULL;
1183 opp_table->supported_hw_count = 0;
1185 /* Try freeing opp_table if this was the last blocking resource */
1186 _remove_opp_table(opp_table);
1188 unlock:
1189 mutex_unlock(&opp_table_lock);
1191 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1194 * dev_pm_opp_set_prop_name() - Set prop-extn name
1195 * @dev: Device for which the prop-name has to be set.
1196 * @name: name to postfix to properties.
1198 * This is required only for the V2 bindings, and it enables a platform to
1199 * specify the extn to be used for certain property names. The properties to
1200 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1201 * should postfix the property name with -<name> while looking for them.
1203 * Locking: The internal opp_table and opp structures are RCU protected.
1204 * Hence this function internally uses RCU updater strategy with mutex locks
1205 * to keep the integrity of the internal data structures. Callers should ensure
1206 * that this function is *NOT* called under RCU protection or in contexts where
1207 * mutex cannot be locked.
1209 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1211 struct opp_table *opp_table;
1212 int ret = 0;
1214 /* Hold our table modification lock here */
1215 mutex_lock(&opp_table_lock);
1217 opp_table = _add_opp_table(dev);
1218 if (!opp_table) {
1219 ret = -ENOMEM;
1220 goto unlock;
1223 /* Make sure there are no concurrent readers while updating opp_table */
1224 WARN_ON(!list_empty(&opp_table->opp_list));
1226 /* Do we already have a prop-name associated with opp_table? */
1227 if (opp_table->prop_name) {
1228 dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1229 opp_table->prop_name);
1230 ret = -EBUSY;
1231 goto err;
1234 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1235 if (!opp_table->prop_name) {
1236 ret = -ENOMEM;
1237 goto err;
1240 mutex_unlock(&opp_table_lock);
1241 return 0;
1243 err:
1244 _remove_opp_table(opp_table);
1245 unlock:
1246 mutex_unlock(&opp_table_lock);
1248 return ret;
1250 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1253 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1254 * @dev: Device for which the prop-name has to be put.
1256 * This is required only for the V2 bindings, and is called for a matching
1257 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1258 * will not be freed.
1260 * Locking: The internal opp_table and opp structures are RCU protected.
1261 * Hence this function internally uses RCU updater strategy with mutex locks
1262 * to keep the integrity of the internal data structures. Callers should ensure
1263 * that this function is *NOT* called under RCU protection or in contexts where
1264 * mutex cannot be locked.
1266 void dev_pm_opp_put_prop_name(struct device *dev)
1268 struct opp_table *opp_table;
1270 /* Hold our table modification lock here */
1271 mutex_lock(&opp_table_lock);
1273 /* Check for existing table for 'dev' first */
1274 opp_table = _find_opp_table(dev);
1275 if (IS_ERR(opp_table)) {
1276 dev_err(dev, "Failed to find opp_table: %ld\n",
1277 PTR_ERR(opp_table));
1278 goto unlock;
1281 /* Make sure there are no concurrent readers while updating opp_table */
1282 WARN_ON(!list_empty(&opp_table->opp_list));
1284 if (!opp_table->prop_name) {
1285 dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1286 goto unlock;
1289 kfree(opp_table->prop_name);
1290 opp_table->prop_name = NULL;
1292 /* Try freeing opp_table if this was the last blocking resource */
1293 _remove_opp_table(opp_table);
1295 unlock:
1296 mutex_unlock(&opp_table_lock);
1298 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1301 * dev_pm_opp_set_regulator() - Set regulator name for the device
1302 * @dev: Device for which regulator name is being set.
1303 * @name: Name of the regulator.
1305 * In order to support OPP switching, OPP layer needs to know the name of the
1306 * device's regulator, as the core would be required to switch voltages as well.
1308 * This must be called before any OPPs are initialized for the device.
1310 * Locking: The internal opp_table and opp structures are RCU protected.
1311 * Hence this function internally uses RCU updater strategy with mutex locks
1312 * to keep the integrity of the internal data structures. Callers should ensure
1313 * that this function is *NOT* called under RCU protection or in contexts where
1314 * mutex cannot be locked.
1316 int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1318 struct opp_table *opp_table;
1319 struct regulator *reg;
1320 int ret;
1322 mutex_lock(&opp_table_lock);
1324 opp_table = _add_opp_table(dev);
1325 if (!opp_table) {
1326 ret = -ENOMEM;
1327 goto unlock;
1330 /* This should be called before OPPs are initialized */
1331 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1332 ret = -EBUSY;
1333 goto err;
1336 /* Already have a regulator set */
1337 if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1338 ret = -EBUSY;
1339 goto err;
1341 /* Allocate the regulator */
1342 reg = regulator_get_optional(dev, name);
1343 if (IS_ERR(reg)) {
1344 ret = PTR_ERR(reg);
1345 if (ret != -EPROBE_DEFER)
1346 dev_err(dev, "%s: no regulator (%s) found: %d\n",
1347 __func__, name, ret);
1348 goto err;
1351 opp_table->regulator = reg;
1353 mutex_unlock(&opp_table_lock);
1354 return 0;
1356 err:
1357 _remove_opp_table(opp_table);
1358 unlock:
1359 mutex_unlock(&opp_table_lock);
1361 return ret;
1363 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1366 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1367 * @dev: Device for which regulator was set.
1369 * Locking: The internal opp_table and opp structures are RCU protected.
1370 * Hence this function internally uses RCU updater strategy with mutex locks
1371 * to keep the integrity of the internal data structures. Callers should ensure
1372 * that this function is *NOT* called under RCU protection or in contexts where
1373 * mutex cannot be locked.
1375 void dev_pm_opp_put_regulator(struct device *dev)
1377 struct opp_table *opp_table;
1379 mutex_lock(&opp_table_lock);
1381 /* Check for existing table for 'dev' first */
1382 opp_table = _find_opp_table(dev);
1383 if (IS_ERR(opp_table)) {
1384 dev_err(dev, "Failed to find opp_table: %ld\n",
1385 PTR_ERR(opp_table));
1386 goto unlock;
1389 if (IS_ERR(opp_table->regulator)) {
1390 dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1391 goto unlock;
1394 /* Make sure there are no concurrent readers while updating opp_table */
1395 WARN_ON(!list_empty(&opp_table->opp_list));
1397 regulator_put(opp_table->regulator);
1398 opp_table->regulator = ERR_PTR(-ENXIO);
1400 /* Try freeing opp_table if this was the last blocking resource */
1401 _remove_opp_table(opp_table);
1403 unlock:
1404 mutex_unlock(&opp_table_lock);
1406 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1409 * dev_pm_opp_add() - Add an OPP table from a table definitions
1410 * @dev: device for which we do this operation
1411 * @freq: Frequency in Hz for this OPP
1412 * @u_volt: Voltage in uVolts for this OPP
1414 * This function adds an opp definition to the opp table and returns status.
1415 * The opp is made available by default and it can be controlled using
1416 * dev_pm_opp_enable/disable functions.
1418 * Locking: The internal opp_table and opp structures are RCU protected.
1419 * Hence this function internally uses RCU updater strategy with mutex locks
1420 * to keep the integrity of the internal data structures. Callers should ensure
1421 * that this function is *NOT* called under RCU protection or in contexts where
1422 * mutex cannot be locked.
1424 * Return:
1425 * 0 On success OR
1426 * Duplicate OPPs (both freq and volt are same) and opp->available
1427 * -EEXIST Freq are same and volt are different OR
1428 * Duplicate OPPs (both freq and volt are same) and !opp->available
1429 * -ENOMEM Memory allocation failure
1431 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1433 return _opp_add_v1(dev, freq, u_volt, true);
1435 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1438 * _opp_set_availability() - helper to set the availability of an opp
1439 * @dev: device for which we do this operation
1440 * @freq: OPP frequency to modify availability
1441 * @availability_req: availability status requested for this opp
1443 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1444 * share a common logic which is isolated here.
1446 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1447 * copy operation, returns 0 if no modification was done OR modification was
1448 * successful.
1450 * Locking: The internal opp_table and opp structures are RCU protected.
1451 * Hence this function internally uses RCU updater strategy with mutex locks to
1452 * keep the integrity of the internal data structures. Callers should ensure
1453 * that this function is *NOT* called under RCU protection or in contexts where
1454 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1456 static int _opp_set_availability(struct device *dev, unsigned long freq,
1457 bool availability_req)
1459 struct opp_table *opp_table;
1460 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1461 int r = 0;
1463 /* keep the node allocated */
1464 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1465 if (!new_opp)
1466 return -ENOMEM;
1468 mutex_lock(&opp_table_lock);
1470 /* Find the opp_table */
1471 opp_table = _find_opp_table(dev);
1472 if (IS_ERR(opp_table)) {
1473 r = PTR_ERR(opp_table);
1474 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1475 goto unlock;
1478 /* Do we have the frequency? */
1479 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1480 if (tmp_opp->rate == freq) {
1481 opp = tmp_opp;
1482 break;
1485 if (IS_ERR(opp)) {
1486 r = PTR_ERR(opp);
1487 goto unlock;
1490 /* Is update really needed? */
1491 if (opp->available == availability_req)
1492 goto unlock;
1493 /* copy the old data over */
1494 *new_opp = *opp;
1496 /* plug in new node */
1497 new_opp->available = availability_req;
1499 list_replace_rcu(&opp->node, &new_opp->node);
1500 mutex_unlock(&opp_table_lock);
1501 call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1503 /* Notify the change of the OPP availability */
1504 if (availability_req)
1505 srcu_notifier_call_chain(&opp_table->srcu_head,
1506 OPP_EVENT_ENABLE, new_opp);
1507 else
1508 srcu_notifier_call_chain(&opp_table->srcu_head,
1509 OPP_EVENT_DISABLE, new_opp);
1511 return 0;
1513 unlock:
1514 mutex_unlock(&opp_table_lock);
1515 kfree(new_opp);
1516 return r;
1520 * dev_pm_opp_enable() - Enable a specific OPP
1521 * @dev: device for which we do this operation
1522 * @freq: OPP frequency to enable
1524 * Enables a provided opp. If the operation is valid, this returns 0, else the
1525 * corresponding error value. It is meant to be used for users an OPP available
1526 * after being temporarily made unavailable with dev_pm_opp_disable.
1528 * Locking: The internal opp_table and opp structures are RCU protected.
1529 * Hence this function indirectly uses RCU and mutex locks to keep the
1530 * integrity of the internal data structures. Callers should ensure that
1531 * this function is *NOT* called under RCU protection or in contexts where
1532 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1534 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1535 * copy operation, returns 0 if no modification was done OR modification was
1536 * successful.
1538 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1540 return _opp_set_availability(dev, freq, true);
1542 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1545 * dev_pm_opp_disable() - Disable a specific OPP
1546 * @dev: device for which we do this operation
1547 * @freq: OPP frequency to disable
1549 * Disables a provided opp. If the operation is valid, this returns
1550 * 0, else the corresponding error value. It is meant to be a temporary
1551 * control by users to make this OPP not available until the circumstances are
1552 * right to make it available again (with a call to dev_pm_opp_enable).
1554 * Locking: The internal opp_table and opp structures are RCU protected.
1555 * Hence this function indirectly uses RCU and mutex locks to keep the
1556 * integrity of the internal data structures. Callers should ensure that
1557 * this function is *NOT* called under RCU protection or in contexts where
1558 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1560 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1561 * copy operation, returns 0 if no modification was done OR modification was
1562 * successful.
1564 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1566 return _opp_set_availability(dev, freq, false);
1568 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1571 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1572 * @dev: device pointer used to lookup OPP table.
1574 * Return: pointer to notifier head if found, otherwise -ENODEV or
1575 * -EINVAL based on type of error casted as pointer. value must be checked
1576 * with IS_ERR to determine valid pointer or error result.
1578 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1579 * RCU protected pointer. The reason for the same is that the opp pointer which
1580 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1581 * under the locked area. The pointer returned must be used prior to unlocking
1582 * with rcu_read_unlock() to maintain the integrity of the pointer.
1584 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1586 struct opp_table *opp_table = _find_opp_table(dev);
1588 if (IS_ERR(opp_table))
1589 return ERR_CAST(opp_table); /* matching type */
1591 return &opp_table->srcu_head;
1593 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1596 * Free OPPs either created using static entries present in DT or even the
1597 * dynamically added entries based on remove_all param.
1599 void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1601 struct opp_table *opp_table;
1602 struct dev_pm_opp *opp, *tmp;
1604 /* Hold our table modification lock here */
1605 mutex_lock(&opp_table_lock);
1607 /* Check for existing table for 'dev' */
1608 opp_table = _find_opp_table(dev);
1609 if (IS_ERR(opp_table)) {
1610 int error = PTR_ERR(opp_table);
1612 if (error != -ENODEV)
1613 WARN(1, "%s: opp_table: %d\n",
1614 IS_ERR_OR_NULL(dev) ?
1615 "Invalid device" : dev_name(dev),
1616 error);
1617 goto unlock;
1620 /* Find if opp_table manages a single device */
1621 if (list_is_singular(&opp_table->dev_list)) {
1622 /* Free static OPPs */
1623 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1624 if (remove_all || !opp->dynamic)
1625 _opp_remove(opp_table, opp, true);
1627 } else {
1628 _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1631 unlock:
1632 mutex_unlock(&opp_table_lock);
1636 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1637 * @dev: device pointer used to lookup OPP table.
1639 * Free both OPPs created using static entries present in DT and the
1640 * dynamically added entries.
1642 * Locking: The internal opp_table and opp structures are RCU protected.
1643 * Hence this function indirectly uses RCU updater strategy with mutex locks
1644 * to keep the integrity of the internal data structures. Callers should ensure
1645 * that this function is *NOT* called under RCU protection or in contexts where
1646 * mutex cannot be locked.
1648 void dev_pm_opp_remove_table(struct device *dev)
1650 _dev_pm_opp_remove_table(dev, true);
1652 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);