drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / cpufreq / cpufreq-dt.c
blob983443396f8f22cd8e3d48840ae4894d459a8bbe
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Freescale Semiconductor, Inc.
5 * Copyright (C) 2014 Linaro.
6 * Viresh Kumar <viresh.kumar@linaro.org>
7 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/clk.h>
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/cpumask.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/pm_opp.h>
20 #include <linux/platform_device.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/slab.h>
23 #include <linux/thermal.h>
25 #include "cpufreq-dt.h"
27 struct private_data {
28 struct list_head node;
30 cpumask_var_t cpus;
31 struct device *cpu_dev;
32 struct cpufreq_frequency_table *freq_table;
33 bool have_static_opps;
34 int opp_token;
37 static LIST_HEAD(priv_list);
39 static struct freq_attr *cpufreq_dt_attr[] = {
40 &cpufreq_freq_attr_scaling_available_freqs,
41 NULL, /* Extra space for boost-attr if required */
42 NULL,
45 static struct private_data *cpufreq_dt_find_data(int cpu)
47 struct private_data *priv;
49 list_for_each_entry(priv, &priv_list, node) {
50 if (cpumask_test_cpu(cpu, priv->cpus))
51 return priv;
54 return NULL;
57 static int set_target(struct cpufreq_policy *policy, unsigned int index)
59 struct private_data *priv = policy->driver_data;
60 unsigned long freq = policy->freq_table[index].frequency;
62 return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
66 * An earlier version of opp-v1 bindings used to name the regulator
67 * "cpu0-supply", we still need to handle that for backwards compatibility.
69 static const char *find_supply_name(struct device *dev)
71 struct device_node *np __free(device_node) = of_node_get(dev->of_node);
72 int cpu = dev->id;
74 /* This must be valid for sure */
75 if (WARN_ON(!np))
76 return NULL;
78 /* Try "cpu0" for older DTs */
79 if (!cpu && of_property_present(np, "cpu0-supply"))
80 return "cpu0";
82 if (of_property_present(np, "cpu-supply"))
83 return "cpu";
85 dev_dbg(dev, "no regulator for cpu%d\n", cpu);
86 return NULL;
89 static int cpufreq_init(struct cpufreq_policy *policy)
91 struct private_data *priv;
92 struct device *cpu_dev;
93 struct clk *cpu_clk;
94 unsigned int transition_latency;
95 int ret;
97 priv = cpufreq_dt_find_data(policy->cpu);
98 if (!priv) {
99 pr_err("failed to find data for cpu%d\n", policy->cpu);
100 return -ENODEV;
102 cpu_dev = priv->cpu_dev;
104 cpu_clk = clk_get(cpu_dev, NULL);
105 if (IS_ERR(cpu_clk)) {
106 ret = PTR_ERR(cpu_clk);
107 dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
108 return ret;
111 transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
112 if (!transition_latency)
113 transition_latency = CPUFREQ_ETERNAL;
115 cpumask_copy(policy->cpus, priv->cpus);
116 policy->driver_data = priv;
117 policy->clk = cpu_clk;
118 policy->freq_table = priv->freq_table;
119 policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
120 policy->cpuinfo.transition_latency = transition_latency;
121 policy->dvfs_possible_from_any_cpu = true;
123 /* Support turbo/boost mode */
124 if (policy_has_boost_freq(policy)) {
125 /* This gets disabled by core on driver unregister */
126 ret = cpufreq_enable_boost_support();
127 if (ret)
128 goto out_clk_put;
129 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
132 return 0;
134 out_clk_put:
135 clk_put(cpu_clk);
137 return ret;
140 static int cpufreq_online(struct cpufreq_policy *policy)
142 /* We did light-weight tear down earlier, nothing to do here */
143 return 0;
146 static int cpufreq_offline(struct cpufreq_policy *policy)
149 * Preserve policy->driver_data and don't free resources on light-weight
150 * tear down.
152 return 0;
155 static void cpufreq_exit(struct cpufreq_policy *policy)
157 clk_put(policy->clk);
160 static struct cpufreq_driver dt_cpufreq_driver = {
161 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
162 CPUFREQ_IS_COOLING_DEV,
163 .verify = cpufreq_generic_frequency_table_verify,
164 .target_index = set_target,
165 .get = cpufreq_generic_get,
166 .init = cpufreq_init,
167 .exit = cpufreq_exit,
168 .online = cpufreq_online,
169 .offline = cpufreq_offline,
170 .register_em = cpufreq_register_em_with_opp,
171 .name = "cpufreq-dt",
172 .attr = cpufreq_dt_attr,
173 .suspend = cpufreq_generic_suspend,
176 static int dt_cpufreq_early_init(struct device *dev, int cpu)
178 struct private_data *priv;
179 struct device *cpu_dev;
180 bool fallback = false;
181 const char *reg_name[] = { NULL, NULL };
182 int ret;
184 /* Check if this CPU is already covered by some other policy */
185 if (cpufreq_dt_find_data(cpu))
186 return 0;
188 cpu_dev = get_cpu_device(cpu);
189 if (!cpu_dev)
190 return -EPROBE_DEFER;
192 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
193 if (!priv)
194 return -ENOMEM;
196 if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
197 return -ENOMEM;
199 cpumask_set_cpu(cpu, priv->cpus);
200 priv->cpu_dev = cpu_dev;
203 * OPP layer will be taking care of regulators now, but it needs to know
204 * the name of the regulator first.
206 reg_name[0] = find_supply_name(cpu_dev);
207 if (reg_name[0]) {
208 priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
209 if (priv->opp_token < 0) {
210 ret = dev_err_probe(cpu_dev, priv->opp_token,
211 "failed to set regulators\n");
212 goto free_cpumask;
216 /* Get OPP-sharing information from "operating-points-v2" bindings */
217 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
218 if (ret) {
219 if (ret != -ENOENT)
220 goto out;
223 * operating-points-v2 not supported, fallback to all CPUs share
224 * OPP for backward compatibility if the platform hasn't set
225 * sharing CPUs.
227 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
228 fallback = true;
232 * Initialize OPP tables for all priv->cpus. They will be shared by
233 * all CPUs which have marked their CPUs shared with OPP bindings.
235 * For platforms not using operating-points-v2 bindings, we do this
236 * before updating priv->cpus. Otherwise, we will end up creating
237 * duplicate OPPs for the CPUs.
239 * OPPs might be populated at runtime, don't fail for error here unless
240 * it is -EPROBE_DEFER.
242 ret = dev_pm_opp_of_cpumask_add_table(priv->cpus);
243 if (!ret) {
244 priv->have_static_opps = true;
245 } else if (ret == -EPROBE_DEFER) {
246 goto out;
250 * The OPP table must be initialized, statically or dynamically, by this
251 * point.
253 ret = dev_pm_opp_get_opp_count(cpu_dev);
254 if (ret <= 0) {
255 dev_err(cpu_dev, "OPP table can't be empty\n");
256 ret = -ENODEV;
257 goto out;
260 if (fallback) {
261 cpumask_setall(priv->cpus);
262 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
263 if (ret)
264 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
265 __func__, ret);
268 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
269 if (ret) {
270 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
271 goto out;
274 list_add(&priv->node, &priv_list);
275 return 0;
277 out:
278 if (priv->have_static_opps)
279 dev_pm_opp_of_cpumask_remove_table(priv->cpus);
280 dev_pm_opp_put_regulators(priv->opp_token);
281 free_cpumask:
282 free_cpumask_var(priv->cpus);
283 return ret;
286 static void dt_cpufreq_release(void)
288 struct private_data *priv, *tmp;
290 list_for_each_entry_safe(priv, tmp, &priv_list, node) {
291 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
292 if (priv->have_static_opps)
293 dev_pm_opp_of_cpumask_remove_table(priv->cpus);
294 dev_pm_opp_put_regulators(priv->opp_token);
295 free_cpumask_var(priv->cpus);
296 list_del(&priv->node);
300 static int dt_cpufreq_probe(struct platform_device *pdev)
302 struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
303 int ret, cpu;
305 /* Request resources early so we can return in case of -EPROBE_DEFER */
306 for_each_possible_cpu(cpu) {
307 ret = dt_cpufreq_early_init(&pdev->dev, cpu);
308 if (ret)
309 goto err;
312 if (data) {
313 if (data->have_governor_per_policy)
314 dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
316 dt_cpufreq_driver.resume = data->resume;
317 if (data->suspend)
318 dt_cpufreq_driver.suspend = data->suspend;
319 if (data->get_intermediate) {
320 dt_cpufreq_driver.target_intermediate = data->target_intermediate;
321 dt_cpufreq_driver.get_intermediate = data->get_intermediate;
325 ret = cpufreq_register_driver(&dt_cpufreq_driver);
326 if (ret) {
327 dev_err(&pdev->dev, "failed register driver: %d\n", ret);
328 goto err;
331 return 0;
332 err:
333 dt_cpufreq_release();
334 return ret;
337 static void dt_cpufreq_remove(struct platform_device *pdev)
339 cpufreq_unregister_driver(&dt_cpufreq_driver);
340 dt_cpufreq_release();
343 static struct platform_driver dt_cpufreq_platdrv = {
344 .driver = {
345 .name = "cpufreq-dt",
347 .probe = dt_cpufreq_probe,
348 .remove_new = dt_cpufreq_remove,
350 module_platform_driver(dt_cpufreq_platdrv);
352 MODULE_ALIAS("platform:cpufreq-dt");
353 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
354 MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
355 MODULE_DESCRIPTION("Generic cpufreq driver");
356 MODULE_LICENSE("GPL");