2 * Generic OPP OF helpers
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/cpu.h>
17 #include <linux/errno.h>
18 #include <linux/device.h>
20 #include <linux/slab.h>
21 #include <linux/export.h>
25 static struct opp_table
*_managed_opp(const struct device_node
*np
)
27 struct opp_table
*opp_table
;
29 list_for_each_entry_rcu(opp_table
, &opp_tables
, node
) {
30 if (opp_table
->np
== np
) {
32 * Multiple devices can point to the same OPP table and
33 * so will have same node-pointer, np.
35 * But the OPPs will be considered as shared only if the
36 * OPP table contains a "opp-shared" property.
38 if (opp_table
->shared_opp
== OPP_TABLE_ACCESS_SHARED
)
48 void _of_init_opp_table(struct opp_table
*opp_table
, struct device
*dev
)
50 struct device_node
*np
;
53 * Only required for backward compatibility with v1 bindings, but isn't
54 * harmful for other cases. And so we do it unconditionally.
56 np
= of_node_get(dev
->of_node
);
60 if (!of_property_read_u32(np
, "clock-latency", &val
))
61 opp_table
->clock_latency_ns_max
= val
;
62 of_property_read_u32(np
, "voltage-tolerance",
63 &opp_table
->voltage_tolerance_v1
);
68 static bool _opp_is_supported(struct device
*dev
, struct opp_table
*opp_table
,
69 struct device_node
*np
)
71 unsigned int count
= opp_table
->supported_hw_count
;
75 if (!opp_table
->supported_hw
) {
77 * In the case that no supported_hw has been set by the
78 * platform but there is an opp-supported-hw value set for
79 * an OPP then the OPP should not be enabled as there is
80 * no way to see if the hardware supports it.
82 if (of_find_property(np
, "opp-supported-hw", NULL
))
89 ret
= of_property_read_u32_index(np
, "opp-supported-hw", count
,
92 dev_warn(dev
, "%s: failed to read opp-supported-hw property at index %d: %d\n",
93 __func__
, count
, ret
);
97 /* Both of these are bitwise masks of the versions */
98 if (!(version
& opp_table
->supported_hw
[count
]))
105 static int opp_parse_supplies(struct dev_pm_opp
*opp
, struct device
*dev
,
106 struct opp_table
*opp_table
)
108 u32
*microvolt
, *microamp
= NULL
;
109 int supplies
, vcount
, icount
, ret
, i
, j
;
110 struct property
*prop
= NULL
;
113 supplies
= opp_table
->regulator_count
? opp_table
->regulator_count
: 1;
115 /* Search for "opp-microvolt-<name>" */
116 if (opp_table
->prop_name
) {
117 snprintf(name
, sizeof(name
), "opp-microvolt-%s",
118 opp_table
->prop_name
);
119 prop
= of_find_property(opp
->np
, name
, NULL
);
123 /* Search for "opp-microvolt" */
124 sprintf(name
, "opp-microvolt");
125 prop
= of_find_property(opp
->np
, name
, NULL
);
127 /* Missing property isn't a problem, but an invalid entry is */
132 vcount
= of_property_count_u32_elems(opp
->np
, name
);
134 dev_err(dev
, "%s: Invalid %s property (%d)\n",
135 __func__
, name
, vcount
);
139 /* There can be one or three elements per supply */
140 if (vcount
!= supplies
&& vcount
!= supplies
* 3) {
141 dev_err(dev
, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
142 __func__
, name
, vcount
, supplies
);
146 microvolt
= kmalloc_array(vcount
, sizeof(*microvolt
), GFP_KERNEL
);
150 ret
= of_property_read_u32_array(opp
->np
, name
, microvolt
, vcount
);
152 dev_err(dev
, "%s: error parsing %s: %d\n", __func__
, name
, ret
);
157 /* Search for "opp-microamp-<name>" */
159 if (opp_table
->prop_name
) {
160 snprintf(name
, sizeof(name
), "opp-microamp-%s",
161 opp_table
->prop_name
);
162 prop
= of_find_property(opp
->np
, name
, NULL
);
166 /* Search for "opp-microamp" */
167 sprintf(name
, "opp-microamp");
168 prop
= of_find_property(opp
->np
, name
, NULL
);
172 icount
= of_property_count_u32_elems(opp
->np
, name
);
174 dev_err(dev
, "%s: Invalid %s property (%d)\n", __func__
,
180 if (icount
!= supplies
) {
181 dev_err(dev
, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
182 __func__
, name
, icount
, supplies
);
187 microamp
= kmalloc_array(icount
, sizeof(*microamp
), GFP_KERNEL
);
193 ret
= of_property_read_u32_array(opp
->np
, name
, microamp
,
196 dev_err(dev
, "%s: error parsing %s: %d\n", __func__
,
203 for (i
= 0, j
= 0; i
< supplies
; i
++) {
204 opp
->supplies
[i
].u_volt
= microvolt
[j
++];
206 if (vcount
== supplies
) {
207 opp
->supplies
[i
].u_volt_min
= opp
->supplies
[i
].u_volt
;
208 opp
->supplies
[i
].u_volt_max
= opp
->supplies
[i
].u_volt
;
210 opp
->supplies
[i
].u_volt_min
= microvolt
[j
++];
211 opp
->supplies
[i
].u_volt_max
= microvolt
[j
++];
215 opp
->supplies
[i
].u_amp
= microamp
[i
];
227 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
229 * @dev: device pointer used to lookup OPP table.
231 * Free OPPs created using static entries present in DT.
233 * Locking: The internal opp_table and opp structures are RCU protected.
234 * Hence this function indirectly uses RCU updater strategy with mutex locks
235 * to keep the integrity of the internal data structures. Callers should ensure
236 * that this function is *NOT* called under RCU protection or in contexts where
237 * mutex cannot be locked.
239 void dev_pm_opp_of_remove_table(struct device
*dev
)
241 _dev_pm_opp_remove_table(dev
, false);
243 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table
);
245 /* Returns opp descriptor node for a device, caller must do of_node_put() */
246 static struct device_node
*_of_get_opp_desc_node(struct device
*dev
)
249 * TODO: Support for multiple OPP tables.
251 * There should be only ONE phandle present in "operating-points-v2"
255 return of_parse_phandle(dev
->of_node
, "operating-points-v2", 0);
259 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
260 * @dev: device for which we do this operation
263 * This function adds an opp definition to the opp table and returns status. The
264 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
265 * removed by dev_pm_opp_remove.
267 * Locking: The internal opp_table and opp structures are RCU protected.
268 * Hence this function internally uses RCU updater strategy with mutex locks
269 * to keep the integrity of the internal data structures. Callers should ensure
270 * that this function is *NOT* called under RCU protection or in contexts where
271 * mutex cannot be locked.
275 * Duplicate OPPs (both freq and volt are same) and opp->available
276 * -EEXIST Freq are same and volt are different OR
277 * Duplicate OPPs (both freq and volt are same) and !opp->available
278 * -ENOMEM Memory allocation failure
279 * -EINVAL Failed parsing the OPP node
281 static int _opp_add_static_v2(struct device
*dev
, struct device_node
*np
)
283 struct opp_table
*opp_table
;
284 struct dev_pm_opp
*new_opp
;
289 /* Hold our table modification lock here */
290 mutex_lock(&opp_table_lock
);
292 new_opp
= _allocate_opp(dev
, &opp_table
);
298 ret
= of_property_read_u64(np
, "opp-hz", &rate
);
300 dev_err(dev
, "%s: opp-hz not found\n", __func__
);
304 /* Check if the OPP supports hardware's hierarchy of versions or not */
305 if (!_opp_is_supported(dev
, opp_table
, np
)) {
306 dev_dbg(dev
, "OPP not supported by hardware: %llu\n", rate
);
311 * Rate is defined as an unsigned long in clk API, and so casting
312 * explicitly to its type. Must be fixed once rate is 64 bit
313 * guaranteed in clk API.
315 new_opp
->rate
= (unsigned long)rate
;
316 new_opp
->turbo
= of_property_read_bool(np
, "turbo-mode");
319 new_opp
->dynamic
= false;
320 new_opp
->available
= true;
322 if (!of_property_read_u32(np
, "clock-latency-ns", &val
))
323 new_opp
->clock_latency_ns
= val
;
325 ret
= opp_parse_supplies(new_opp
, dev
, opp_table
);
329 ret
= _opp_add(dev
, new_opp
, opp_table
);
333 /* OPP to select on device suspend */
334 if (of_property_read_bool(np
, "opp-suspend")) {
335 if (opp_table
->suspend_opp
) {
336 dev_warn(dev
, "%s: Multiple suspend OPPs found (%lu %lu)\n",
337 __func__
, opp_table
->suspend_opp
->rate
,
340 new_opp
->suspend
= true;
341 opp_table
->suspend_opp
= new_opp
;
345 if (new_opp
->clock_latency_ns
> opp_table
->clock_latency_ns_max
)
346 opp_table
->clock_latency_ns_max
= new_opp
->clock_latency_ns
;
348 mutex_unlock(&opp_table_lock
);
350 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
351 __func__
, new_opp
->turbo
, new_opp
->rate
,
352 new_opp
->supplies
[0].u_volt
, new_opp
->supplies
[0].u_volt_min
,
353 new_opp
->supplies
[0].u_volt_max
, new_opp
->clock_latency_ns
);
356 * Notify the changes in the availability of the operable
357 * frequency/voltage list.
359 srcu_notifier_call_chain(&opp_table
->srcu_head
, OPP_EVENT_ADD
, new_opp
);
363 _opp_remove(opp_table
, new_opp
, false);
365 mutex_unlock(&opp_table_lock
);
369 /* Initializes OPP tables based on new bindings */
370 static int _of_add_opp_table_v2(struct device
*dev
, struct device_node
*opp_np
)
372 struct device_node
*np
;
373 struct opp_table
*opp_table
;
374 int ret
= 0, count
= 0;
376 mutex_lock(&opp_table_lock
);
378 opp_table
= _managed_opp(opp_np
);
380 /* OPPs are already managed */
381 if (!_add_opp_dev(dev
, opp_table
))
383 mutex_unlock(&opp_table_lock
);
386 mutex_unlock(&opp_table_lock
);
388 /* We have opp-table node now, iterate over it and add OPPs */
389 for_each_available_child_of_node(opp_np
, np
) {
392 ret
= _opp_add_static_v2(dev
, np
);
394 dev_err(dev
, "%s: Failed to add OPP, %d\n", __func__
,
400 /* There should be one of more OPP defined */
404 mutex_lock(&opp_table_lock
);
406 opp_table
= _find_opp_table(dev
);
407 if (WARN_ON(IS_ERR(opp_table
))) {
408 ret
= PTR_ERR(opp_table
);
409 mutex_unlock(&opp_table_lock
);
413 opp_table
->np
= opp_np
;
414 if (of_property_read_bool(opp_np
, "opp-shared"))
415 opp_table
->shared_opp
= OPP_TABLE_ACCESS_SHARED
;
417 opp_table
->shared_opp
= OPP_TABLE_ACCESS_EXCLUSIVE
;
419 mutex_unlock(&opp_table_lock
);
424 dev_pm_opp_of_remove_table(dev
);
429 /* Initializes OPP tables based on old-deprecated bindings */
430 static int _of_add_opp_table_v1(struct device
*dev
)
432 const struct property
*prop
;
436 prop
= of_find_property(dev
->of_node
, "operating-points", NULL
);
443 * Each OPP is a set of tuples consisting of frequency and
444 * voltage like <freq-kHz vol-uV>.
446 nr
= prop
->length
/ sizeof(u32
);
448 dev_err(dev
, "%s: Invalid OPP table\n", __func__
);
454 unsigned long freq
= be32_to_cpup(val
++) * 1000;
455 unsigned long volt
= be32_to_cpup(val
++);
457 if (_opp_add_v1(dev
, freq
, volt
, false))
458 dev_warn(dev
, "%s: Failed to add OPP %ld\n",
467 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
468 * @dev: device pointer used to lookup OPP table.
470 * Register the initial OPP table with the OPP library for given device.
472 * Locking: The internal opp_table and opp structures are RCU protected.
473 * Hence this function indirectly uses RCU updater strategy with mutex locks
474 * to keep the integrity of the internal data structures. Callers should ensure
475 * that this function is *NOT* called under RCU protection or in contexts where
476 * mutex cannot be locked.
480 * Duplicate OPPs (both freq and volt are same) and opp->available
481 * -EEXIST Freq are same and volt are different OR
482 * Duplicate OPPs (both freq and volt are same) and !opp->available
483 * -ENOMEM Memory allocation failure
484 * -ENODEV when 'operating-points' property is not found or is invalid data
486 * -ENODATA when empty 'operating-points' property is found
487 * -EINVAL when invalid entries are found in opp-v2 table
489 int dev_pm_opp_of_add_table(struct device
*dev
)
491 struct device_node
*opp_np
;
495 * OPPs have two version of bindings now. The older one is deprecated,
496 * try for the new binding first.
498 opp_np
= _of_get_opp_desc_node(dev
);
501 * Try old-deprecated bindings for backward compatibility with
504 return _of_add_opp_table_v1(dev
);
507 ret
= _of_add_opp_table_v2(dev
, opp_np
);
512 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table
);
514 /* CPU device specific helpers */
517 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
518 * @cpumask: cpumask for which OPP table needs to be removed
520 * This removes the OPP tables for CPUs present in the @cpumask.
521 * This should be used only to remove static entries created from DT.
523 * Locking: The internal opp_table and opp structures are RCU protected.
524 * Hence this function internally uses RCU updater strategy with mutex locks
525 * to keep the integrity of the internal data structures. Callers should ensure
526 * that this function is *NOT* called under RCU protection or in contexts where
527 * mutex cannot be locked.
529 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask
*cpumask
)
531 _dev_pm_opp_cpumask_remove_table(cpumask
, true);
533 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table
);
536 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
537 * @cpumask: cpumask for which OPP table needs to be added.
539 * This adds the OPP tables for CPUs present in the @cpumask.
541 * Locking: The internal opp_table and opp structures are RCU protected.
542 * Hence this function internally uses RCU updater strategy with mutex locks
543 * to keep the integrity of the internal data structures. Callers should ensure
544 * that this function is *NOT* called under RCU protection or in contexts where
545 * mutex cannot be locked.
547 int dev_pm_opp_of_cpumask_add_table(const struct cpumask
*cpumask
)
549 struct device
*cpu_dev
;
552 WARN_ON(cpumask_empty(cpumask
));
554 for_each_cpu(cpu
, cpumask
) {
555 cpu_dev
= get_cpu_device(cpu
);
557 pr_err("%s: failed to get cpu%d device\n", __func__
,
562 ret
= dev_pm_opp_of_add_table(cpu_dev
);
564 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
567 /* Free all other OPPs */
568 dev_pm_opp_of_cpumask_remove_table(cpumask
);
575 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table
);
578 * Works only for OPP v2 bindings.
580 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
583 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
584 * @cpu_dev using operating-points-v2
587 * @cpu_dev: CPU device for which we do this operation
588 * @cpumask: cpumask to update with information of sharing CPUs
590 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
592 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
594 * Locking: The internal opp_table and opp structures are RCU protected.
595 * Hence this function internally uses RCU updater strategy with mutex locks
596 * to keep the integrity of the internal data structures. Callers should ensure
597 * that this function is *NOT* called under RCU protection or in contexts where
598 * mutex cannot be locked.
600 int dev_pm_opp_of_get_sharing_cpus(struct device
*cpu_dev
,
601 struct cpumask
*cpumask
)
603 struct device_node
*np
, *tmp_np
;
604 struct device
*tcpu_dev
;
607 /* Get OPP descriptor node */
608 np
= _of_get_opp_desc_node(cpu_dev
);
610 dev_dbg(cpu_dev
, "%s: Couldn't find opp node.\n", __func__
);
614 cpumask_set_cpu(cpu_dev
->id
, cpumask
);
616 /* OPPs are shared ? */
617 if (!of_property_read_bool(np
, "opp-shared"))
620 for_each_possible_cpu(cpu
) {
621 if (cpu
== cpu_dev
->id
)
624 tcpu_dev
= get_cpu_device(cpu
);
626 dev_err(cpu_dev
, "%s: failed to get cpu%d device\n",
632 /* Get OPP descriptor node */
633 tmp_np
= _of_get_opp_desc_node(tcpu_dev
);
635 dev_err(tcpu_dev
, "%s: Couldn't find opp node.\n",
641 /* CPUs are sharing opp node */
643 cpumask_set_cpu(cpu
, cpumask
);
652 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus
);