1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Power Interface (SCMI) based CPUFreq Interface driver
5 * Copyright (C) 2018-2021 ARM Ltd.
6 * Sudeep Holla <sudeep.holla@arm.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/clk-provider.h>
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/cpumask.h>
15 #include <linux/energy_model.h>
16 #include <linux/export.h>
17 #include <linux/module.h>
18 #include <linux/pm_opp.h>
19 #include <linux/slab.h>
20 #include <linux/scmi_protocol.h>
21 #include <linux/types.h>
22 #include <linux/units.h>
27 struct device
*cpu_dev
;
28 cpumask_var_t opp_shared_cpus
;
31 static struct scmi_protocol_handle
*ph
;
32 static const struct scmi_perf_proto_ops
*perf_ops
;
33 static struct cpufreq_driver scmi_cpufreq_driver
;
35 static unsigned int scmi_cpufreq_get_rate(unsigned int cpu
)
37 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
38 struct scmi_data
*priv
= policy
->driver_data
;
42 ret
= perf_ops
->freq_get(ph
, priv
->domain_id
, &rate
, false);
49 * perf_ops->freq_set is not a synchronous, the actual OPP change will
50 * happen asynchronously and can get notified if the events are
51 * subscribed for by the SCMI firmware
54 scmi_cpufreq_set_target(struct cpufreq_policy
*policy
, unsigned int index
)
56 struct scmi_data
*priv
= policy
->driver_data
;
57 u64 freq
= policy
->freq_table
[index
].frequency
;
59 return perf_ops
->freq_set(ph
, priv
->domain_id
, freq
* 1000, false);
62 static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy
*policy
,
63 unsigned int target_freq
)
65 struct scmi_data
*priv
= policy
->driver_data
;
66 unsigned long freq
= target_freq
;
68 if (!perf_ops
->freq_set(ph
, priv
->domain_id
, freq
* 1000, true))
74 static int scmi_cpu_domain_id(struct device
*cpu_dev
)
76 struct device_node
*np
= cpu_dev
->of_node
;
77 struct of_phandle_args domain_id
;
80 if (of_parse_phandle_with_args(np
, "clocks", "#clock-cells", 0,
82 /* Find the corresponding index for power-domain "perf". */
83 index
= of_property_match_string(np
, "power-domain-names",
88 if (of_parse_phandle_with_args(np
, "power-domains",
89 "#power-domain-cells", index
,
94 return domain_id
.args
[0];
98 scmi_get_sharing_cpus(struct device
*cpu_dev
, int domain
,
99 struct cpumask
*cpumask
)
102 struct device
*tcpu_dev
;
104 for_each_possible_cpu(cpu
) {
105 if (cpu
== cpu_dev
->id
)
108 tcpu_dev
= get_cpu_device(cpu
);
112 tdomain
= scmi_cpu_domain_id(tcpu_dev
);
113 if (tdomain
== domain
)
114 cpumask_set_cpu(cpu
, cpumask
);
120 static int __maybe_unused
121 scmi_get_cpu_power(struct device
*cpu_dev
, unsigned long *power
,
124 enum scmi_power_scale power_scale
= perf_ops
->power_scale_get(ph
);
128 domain
= scmi_cpu_domain_id(cpu_dev
);
132 /* Get the power cost of the performance domain. */
134 ret
= perf_ops
->est_power_get(ph
, domain
, &Hz
, power
);
138 /* Convert the power to uW if it is mW (ignore bogoW) */
139 if (power_scale
== SCMI_POWER_MILLIWATTS
)
140 *power
*= MICROWATT_PER_MILLIWATT
;
142 /* The EM framework specifies the frequency in KHz. */
149 scmi_get_rate_limit(u32 domain
, bool has_fast_switch
)
153 if (has_fast_switch
) {
155 * Fast channels are used whenever available,
156 * so use their rate_limit value if populated.
158 ret
= perf_ops
->fast_switch_rate_limit(ph
, domain
,
160 if (!ret
&& rate_limit
)
164 ret
= perf_ops
->rate_limit_get(ph
, domain
, &rate_limit
);
171 static struct freq_attr
*scmi_cpufreq_hw_attr
[] = {
172 &cpufreq_freq_attr_scaling_available_freqs
,
177 static int scmi_cpufreq_init(struct cpufreq_policy
*policy
)
179 int ret
, nr_opp
, domain
;
180 unsigned int latency
;
181 struct device
*cpu_dev
;
182 struct scmi_data
*priv
;
183 struct cpufreq_frequency_table
*freq_table
;
185 cpu_dev
= get_cpu_device(policy
->cpu
);
187 pr_err("failed to get cpu%d device\n", policy
->cpu
);
191 domain
= scmi_cpu_domain_id(cpu_dev
);
195 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
199 if (!zalloc_cpumask_var(&priv
->opp_shared_cpus
, GFP_KERNEL
)) {
204 /* Obtain CPUs that share SCMI performance controls */
205 ret
= scmi_get_sharing_cpus(cpu_dev
, domain
, policy
->cpus
);
207 dev_warn(cpu_dev
, "failed to get sharing cpumask\n");
208 goto out_free_cpumask
;
212 * Obtain CPUs that share performance levels.
213 * The OPP 'sharing cpus' info may come from DT through an empty opp
214 * table and opp-shared.
216 ret
= dev_pm_opp_of_get_sharing_cpus(cpu_dev
, priv
->opp_shared_cpus
);
217 if (ret
|| cpumask_empty(priv
->opp_shared_cpus
)) {
219 * Either opp-table is not set or no opp-shared was found.
220 * Use the CPU mask from SCMI to designate CPUs sharing an OPP
223 cpumask_copy(priv
->opp_shared_cpus
, policy
->cpus
);
227 * A previous CPU may have marked OPPs as shared for a few CPUs, based on
228 * what OPP core provided. If the current CPU is part of those few, then
229 * there is no need to add OPPs again.
231 nr_opp
= dev_pm_opp_get_opp_count(cpu_dev
);
233 ret
= perf_ops
->device_opps_add(ph
, cpu_dev
, domain
);
235 dev_warn(cpu_dev
, "failed to add opps to the device\n");
236 goto out_free_cpumask
;
239 nr_opp
= dev_pm_opp_get_opp_count(cpu_dev
);
241 dev_err(cpu_dev
, "%s: No OPPs for this device: %d\n",
248 ret
= dev_pm_opp_set_sharing_cpus(cpu_dev
, priv
->opp_shared_cpus
);
250 dev_err(cpu_dev
, "%s: failed to mark OPPs as shared: %d\n",
256 priv
->nr_opp
= nr_opp
;
259 ret
= dev_pm_opp_init_cpufreq_table(cpu_dev
, &freq_table
);
261 dev_err(cpu_dev
, "failed to init cpufreq table: %d\n", ret
);
265 priv
->cpu_dev
= cpu_dev
;
266 priv
->domain_id
= domain
;
268 policy
->driver_data
= priv
;
269 policy
->freq_table
= freq_table
;
271 /* SCMI allows DVFS request for any domain from any CPU */
272 policy
->dvfs_possible_from_any_cpu
= true;
274 latency
= perf_ops
->transition_latency_get(ph
, domain
);
276 latency
= CPUFREQ_ETERNAL
;
278 policy
->cpuinfo
.transition_latency
= latency
;
280 policy
->fast_switch_possible
=
281 perf_ops
->fast_switch_possible(ph
, domain
);
283 policy
->transition_delay_us
=
284 scmi_get_rate_limit(domain
, policy
->fast_switch_possible
);
286 if (policy_has_boost_freq(policy
)) {
287 ret
= cpufreq_enable_boost_support();
289 dev_warn(cpu_dev
, "failed to enable boost: %d\n", ret
);
292 scmi_cpufreq_hw_attr
[1] = &cpufreq_freq_attr_scaling_boost_freqs
;
293 scmi_cpufreq_driver
.boost_enabled
= true;
300 dev_pm_opp_remove_all_dynamic(cpu_dev
);
303 free_cpumask_var(priv
->opp_shared_cpus
);
311 static void scmi_cpufreq_exit(struct cpufreq_policy
*policy
)
313 struct scmi_data
*priv
= policy
->driver_data
;
315 dev_pm_opp_free_cpufreq_table(priv
->cpu_dev
, &policy
->freq_table
);
316 dev_pm_opp_remove_all_dynamic(priv
->cpu_dev
);
317 free_cpumask_var(priv
->opp_shared_cpus
);
321 static void scmi_cpufreq_register_em(struct cpufreq_policy
*policy
)
323 struct em_data_callback em_cb
= EM_DATA_CB(scmi_get_cpu_power
);
324 enum scmi_power_scale power_scale
= perf_ops
->power_scale_get(ph
);
325 struct scmi_data
*priv
= policy
->driver_data
;
326 bool em_power_scale
= false;
329 * This callback will be called for each policy, but we don't need to
330 * register with EM every time. Despite not being part of the same
331 * policy, some CPUs may still share their perf-domains, and a CPU from
332 * another policy may already have registered with EM on behalf of CPUs
338 if (power_scale
== SCMI_POWER_MILLIWATTS
339 || power_scale
== SCMI_POWER_MICROWATTS
)
340 em_power_scale
= true;
342 em_dev_register_perf_domain(get_cpu_device(policy
->cpu
), priv
->nr_opp
,
343 &em_cb
, priv
->opp_shared_cpus
,
347 static struct cpufreq_driver scmi_cpufreq_driver
= {
349 .flags
= CPUFREQ_HAVE_GOVERNOR_PER_POLICY
|
350 CPUFREQ_NEED_INITIAL_FREQ_CHECK
|
351 CPUFREQ_IS_COOLING_DEV
,
352 .verify
= cpufreq_generic_frequency_table_verify
,
353 .attr
= scmi_cpufreq_hw_attr
,
354 .target_index
= scmi_cpufreq_set_target
,
355 .fast_switch
= scmi_cpufreq_fast_switch
,
356 .get
= scmi_cpufreq_get_rate
,
357 .init
= scmi_cpufreq_init
,
358 .exit
= scmi_cpufreq_exit
,
359 .register_em
= scmi_cpufreq_register_em
,
362 static int scmi_cpufreq_probe(struct scmi_device
*sdev
)
365 struct device
*dev
= &sdev
->dev
;
366 const struct scmi_handle
*handle
;
368 handle
= sdev
->handle
;
373 perf_ops
= handle
->devm_protocol_get(sdev
, SCMI_PROTOCOL_PERF
, &ph
);
374 if (IS_ERR(perf_ops
))
375 return PTR_ERR(perf_ops
);
377 #ifdef CONFIG_COMMON_CLK
378 /* dummy clock provider as needed by OPP if clocks property is used */
379 if (of_property_present(dev
->of_node
, "#clock-cells")) {
380 ret
= devm_of_clk_add_hw_provider(dev
, of_clk_hw_simple_get
, NULL
);
382 return dev_err_probe(dev
, ret
, "%s: registering clock provider failed\n", __func__
);
386 ret
= cpufreq_register_driver(&scmi_cpufreq_driver
);
388 dev_err(dev
, "%s: registering cpufreq failed, err: %d\n",
395 static void scmi_cpufreq_remove(struct scmi_device
*sdev
)
397 cpufreq_unregister_driver(&scmi_cpufreq_driver
);
400 static const struct scmi_device_id scmi_id_table
[] = {
401 { SCMI_PROTOCOL_PERF
, "cpufreq" },
404 MODULE_DEVICE_TABLE(scmi
, scmi_id_table
);
406 static struct scmi_driver scmi_cpufreq_drv
= {
407 .name
= "scmi-cpufreq",
408 .probe
= scmi_cpufreq_probe
,
409 .remove
= scmi_cpufreq_remove
,
410 .id_table
= scmi_id_table
,
412 module_scmi_driver(scmi_cpufreq_drv
);
414 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
415 MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
416 MODULE_LICENSE("GPL v2");