2 * Generic OPP helper interface for CPU device
4 * Copyright (C) 2009-2014 Texas Instruments Incorporated.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/cpu.h>
17 #include <linux/cpufreq.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <linux/export.h>
22 #include <linux/slab.h>
26 #ifdef CONFIG_CPU_FREQ
29 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
30 * @dev: device for which we do this operation
31 * @table: Cpufreq table returned back to caller
33 * Generate a cpufreq table for a provided device- this assumes that the
34 * opp table is already initialized and ready for usage.
36 * This function allocates required memory for the cpufreq table. It is
37 * expected that the caller does the required maintenance such as freeing
38 * the table as required.
40 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
41 * if no memory available for the operation (table is not populated), returns 0
42 * if successful and table is populated.
44 * WARNING: It is important for the callers to ensure refreshing their copy of
45 * the table if any of the mentioned functions have been invoked in the interim.
47 * Locking: The internal opp_table and opp structures are RCU protected.
48 * Since we just use the regular accessor functions to access the internal data
49 * structures, we use RCU read lock inside this function. As a result, users of
50 * this function DONOT need to use explicit locks for invoking.
52 int dev_pm_opp_init_cpufreq_table(struct device
*dev
,
53 struct cpufreq_frequency_table
**table
)
55 struct dev_pm_opp
*opp
;
56 struct cpufreq_frequency_table
*freq_table
= NULL
;
57 int i
, max_opps
, ret
= 0;
62 max_opps
= dev_pm_opp_get_opp_count(dev
);
64 ret
= max_opps
? max_opps
: -ENODATA
;
68 freq_table
= kcalloc((max_opps
+ 1), sizeof(*freq_table
), GFP_ATOMIC
);
74 for (i
= 0, rate
= 0; i
< max_opps
; i
++, rate
++) {
76 opp
= dev_pm_opp_find_freq_ceil(dev
, &rate
);
81 freq_table
[i
].driver_data
= i
;
82 freq_table
[i
].frequency
= rate
/ 1000;
84 /* Is Boost/turbo opp ? */
85 if (dev_pm_opp_is_turbo(opp
))
86 freq_table
[i
].flags
= CPUFREQ_BOOST_FREQ
;
89 freq_table
[i
].driver_data
= i
;
90 freq_table
[i
].frequency
= CPUFREQ_TABLE_END
;
92 *table
= &freq_table
[0];
101 EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table
);
104 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
105 * @dev: device for which we do this operation
106 * @table: table to free
108 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
110 void dev_pm_opp_free_cpufreq_table(struct device
*dev
,
111 struct cpufreq_frequency_table
**table
)
119 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table
);
120 #endif /* CONFIG_CPU_FREQ */
122 /* Required only for V1 bindings, as v2 can manage it from DT itself */
123 int dev_pm_opp_set_sharing_cpus(struct device
*cpu_dev
, cpumask_var_t cpumask
)
125 struct opp_device
*opp_dev
;
126 struct opp_table
*opp_table
;
130 mutex_lock(&opp_table_lock
);
132 opp_table
= _find_opp_table(cpu_dev
);
133 if (IS_ERR(opp_table
)) {
138 for_each_cpu(cpu
, cpumask
) {
139 if (cpu
== cpu_dev
->id
)
142 dev
= get_cpu_device(cpu
);
144 dev_err(cpu_dev
, "%s: failed to get cpu%d device\n",
149 opp_dev
= _add_opp_dev(dev
, opp_table
);
151 dev_err(dev
, "%s: failed to add opp-dev for cpu%d device\n",
157 mutex_unlock(&opp_table_lock
);
161 EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus
);
164 void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask
)
166 struct device
*cpu_dev
;
169 WARN_ON(cpumask_empty(cpumask
));
171 for_each_cpu(cpu
, cpumask
) {
172 cpu_dev
= get_cpu_device(cpu
);
174 pr_err("%s: failed to get cpu%d device\n", __func__
,
179 dev_pm_opp_of_remove_table(cpu_dev
);
182 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table
);
184 int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask
)
186 struct device
*cpu_dev
;
189 WARN_ON(cpumask_empty(cpumask
));
191 for_each_cpu(cpu
, cpumask
) {
192 cpu_dev
= get_cpu_device(cpu
);
194 pr_err("%s: failed to get cpu%d device\n", __func__
,
199 ret
= dev_pm_opp_of_add_table(cpu_dev
);
201 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
204 /* Free all other OPPs */
205 dev_pm_opp_of_cpumask_remove_table(cpumask
);
212 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table
);
215 * Works only for OPP v2 bindings.
217 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
219 int dev_pm_opp_of_get_sharing_cpus(struct device
*cpu_dev
, cpumask_var_t cpumask
)
221 struct device_node
*np
, *tmp_np
;
222 struct device
*tcpu_dev
;
225 /* Get OPP descriptor node */
226 np
= _of_get_opp_desc_node(cpu_dev
);
228 dev_dbg(cpu_dev
, "%s: Couldn't find cpu_dev node.\n", __func__
);
232 cpumask_set_cpu(cpu_dev
->id
, cpumask
);
234 /* OPPs are shared ? */
235 if (!of_property_read_bool(np
, "opp-shared"))
238 for_each_possible_cpu(cpu
) {
239 if (cpu
== cpu_dev
->id
)
242 tcpu_dev
= get_cpu_device(cpu
);
244 dev_err(cpu_dev
, "%s: failed to get cpu%d device\n",
250 /* Get OPP descriptor node */
251 tmp_np
= _of_get_opp_desc_node(tcpu_dev
);
253 dev_err(tcpu_dev
, "%s: Couldn't find tcpu_dev node.\n",
259 /* CPUs are sharing opp node */
261 cpumask_set_cpu(cpu
, cpumask
);
270 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus
);