1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPPC (Collaborative Processor Performance Control) driver for
4 * interfacing with the CPUfreq layer and governors. See
5 * cppc_acpi.c for CPPC specific methods.
7 * (C) Copyright 2014, 2015 Linaro Ltd.
8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/delay.h>
16 #include <linux/cpu.h>
17 #include <linux/cpufreq.h>
18 #include <linux/dmi.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
22 #include <asm/unaligned.h>
24 #include <acpi/cppc_acpi.h>
26 /* Minimum struct length needed for the DMI processor entry we want */
27 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
29 /* Offset in the DMI processor structure for the max frequency */
30 #define DMI_PROCESSOR_MAX_SPEED 0x14
33 * This list contains information parsed from per CPU ACPI _CPC and _PSD
34 * structures: e.g. the highest and lowest supported performance, capabilities,
35 * desired performance, level requested etc. Depending on the share_type, not
36 * all CPUs will have an entry in the list.
38 static LIST_HEAD(cpu_data_list
);
40 static bool boost_supported
;
42 struct cppc_workaround_oem_info
{
43 char oem_id
[ACPI_OEM_ID_SIZE
+ 1];
44 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
+ 1];
48 static struct cppc_workaround_oem_info wa_info
[] = {
51 .oem_table_id
= "HIP07 ",
55 .oem_table_id
= "HIP08 ",
60 /* Callback function used to retrieve the max frequency from DMI */
61 static void cppc_find_dmi_mhz(const struct dmi_header
*dm
, void *private)
63 const u8
*dmi_data
= (const u8
*)dm
;
64 u16
*mhz
= (u16
*)private;
66 if (dm
->type
== DMI_ENTRY_PROCESSOR
&&
67 dm
->length
>= DMI_ENTRY_PROCESSOR_MIN_LENGTH
) {
68 u16 val
= (u16
)get_unaligned((const u16
*)
69 (dmi_data
+ DMI_PROCESSOR_MAX_SPEED
));
70 *mhz
= val
> *mhz
? val
: *mhz
;
74 /* Look up the max frequency in DMI */
75 static u64
cppc_get_dmi_max_khz(void)
79 dmi_walk(cppc_find_dmi_mhz
, &mhz
);
82 * Real stupid fallback value, just in case there is no
91 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
92 * use them to convert perf to freq and vice versa
94 * If the perf/freq point lies between Nominal and Lowest, we can treat
95 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
96 * and extrapolate the rest
97 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
99 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata
*cpu_data
,
102 struct cppc_perf_caps
*caps
= &cpu_data
->perf_caps
;
106 if (caps
->lowest_freq
&& caps
->nominal_freq
) {
107 if (perf
>= caps
->nominal_perf
) {
108 mul
= caps
->nominal_freq
;
109 div
= caps
->nominal_perf
;
111 mul
= caps
->nominal_freq
- caps
->lowest_freq
;
112 div
= caps
->nominal_perf
- caps
->lowest_perf
;
116 max_khz
= cppc_get_dmi_max_khz();
118 div
= caps
->highest_perf
;
120 return (u64
)perf
* mul
/ div
;
123 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata
*cpu_data
,
126 struct cppc_perf_caps
*caps
= &cpu_data
->perf_caps
;
130 if (caps
->lowest_freq
&& caps
->nominal_freq
) {
131 if (freq
>= caps
->nominal_freq
) {
132 mul
= caps
->nominal_perf
;
133 div
= caps
->nominal_freq
;
135 mul
= caps
->lowest_perf
;
136 div
= caps
->lowest_freq
;
140 max_khz
= cppc_get_dmi_max_khz();
141 mul
= caps
->highest_perf
;
145 return (u64
)freq
* mul
/ div
;
148 static int cppc_cpufreq_set_target(struct cpufreq_policy
*policy
,
149 unsigned int target_freq
,
150 unsigned int relation
)
153 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
154 unsigned int cpu
= policy
->cpu
;
155 struct cpufreq_freqs freqs
;
159 desired_perf
= cppc_cpufreq_khz_to_perf(cpu_data
, target_freq
);
160 /* Return if it is exactly the same perf */
161 if (desired_perf
== cpu_data
->perf_ctrls
.desired_perf
)
164 cpu_data
->perf_ctrls
.desired_perf
= desired_perf
;
165 freqs
.old
= policy
->cur
;
166 freqs
.new = target_freq
;
168 cpufreq_freq_transition_begin(policy
, &freqs
);
169 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
170 cpufreq_freq_transition_end(policy
, &freqs
, ret
!= 0);
173 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
179 static int cppc_verify_policy(struct cpufreq_policy_data
*policy
)
181 cpufreq_verify_within_cpu_limits(policy
);
185 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy
*policy
)
187 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
188 struct cppc_perf_caps
*caps
= &cpu_data
->perf_caps
;
189 unsigned int cpu
= policy
->cpu
;
192 cpu_data
->perf_ctrls
.desired_perf
= caps
->lowest_perf
;
194 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
196 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
197 caps
->lowest_perf
, cpu
, ret
);
199 /* Remove CPU node from list and free driver data for policy */
200 free_cpumask_var(cpu_data
->shared_cpu_map
);
201 list_del(&cpu_data
->node
);
202 kfree(policy
->driver_data
);
203 policy
->driver_data
= NULL
;
207 * The PCC subspace describes the rate at which platform can accept commands
208 * on the shared PCC channel (including READs which do not count towards freq
209 * transition requests), so ideally we need to use the PCC values as a fallback
210 * if we don't have a platform specific transition_delay_us
213 #include <asm/cputype.h>
215 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu
)
217 unsigned long implementor
= read_cpuid_implementor();
218 unsigned long part_num
= read_cpuid_part_number();
219 unsigned int delay_us
= 0;
221 switch (implementor
) {
222 case ARM_CPU_IMP_QCOM
:
224 case QCOM_CPU_PART_FALKOR_V1
:
225 case QCOM_CPU_PART_FALKOR
:
229 delay_us
= cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
234 delay_us
= cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
243 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu
)
245 return cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
250 static struct cppc_cpudata
*cppc_cpufreq_get_cpu_data(unsigned int cpu
)
252 struct cppc_cpudata
*cpu_data
;
255 cpu_data
= kzalloc(sizeof(struct cppc_cpudata
), GFP_KERNEL
);
259 if (!zalloc_cpumask_var(&cpu_data
->shared_cpu_map
, GFP_KERNEL
))
262 ret
= acpi_get_psd_map(cpu
, cpu_data
);
264 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu
, ret
);
268 ret
= cppc_get_perf_caps(cpu
, &cpu_data
->perf_caps
);
270 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu
, ret
);
274 /* Convert the lowest and nominal freq from MHz to KHz */
275 cpu_data
->perf_caps
.lowest_freq
*= 1000;
276 cpu_data
->perf_caps
.nominal_freq
*= 1000;
278 list_add(&cpu_data
->node
, &cpu_data_list
);
283 free_cpumask_var(cpu_data
->shared_cpu_map
);
290 static int cppc_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
292 unsigned int cpu
= policy
->cpu
;
293 struct cppc_cpudata
*cpu_data
;
294 struct cppc_perf_caps
*caps
;
297 cpu_data
= cppc_cpufreq_get_cpu_data(cpu
);
299 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu
);
302 caps
= &cpu_data
->perf_caps
;
303 policy
->driver_data
= cpu_data
;
306 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
307 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
309 policy
->min
= cppc_cpufreq_perf_to_khz(cpu_data
,
310 caps
->lowest_nonlinear_perf
);
311 policy
->max
= cppc_cpufreq_perf_to_khz(cpu_data
,
315 * Set cpuinfo.min_freq to Lowest to make the full range of performance
316 * available if userspace wants to use any perf between lowest & lowest
319 policy
->cpuinfo
.min_freq
= cppc_cpufreq_perf_to_khz(cpu_data
,
321 policy
->cpuinfo
.max_freq
= cppc_cpufreq_perf_to_khz(cpu_data
,
324 policy
->transition_delay_us
= cppc_cpufreq_get_transition_delay_us(cpu
);
325 policy
->shared_type
= cpu_data
->shared_type
;
327 switch (policy
->shared_type
) {
328 case CPUFREQ_SHARED_TYPE_HW
:
329 case CPUFREQ_SHARED_TYPE_NONE
:
330 /* Nothing to be done - we'll have a policy for each CPU */
332 case CPUFREQ_SHARED_TYPE_ANY
:
334 * All CPUs in the domain will share a policy and all cpufreq
335 * operations will use a single cppc_cpudata structure stored
336 * in policy->driver_data.
338 cpumask_copy(policy
->cpus
, cpu_data
->shared_cpu_map
);
341 pr_debug("Unsupported CPU co-ord type: %d\n",
342 policy
->shared_type
);
347 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
350 if (caps
->highest_perf
> caps
->nominal_perf
)
351 boost_supported
= true;
353 /* Set policy->cur to max now. The governors will adjust later. */
354 policy
->cur
= cppc_cpufreq_perf_to_khz(cpu_data
, caps
->highest_perf
);
355 cpu_data
->perf_ctrls
.desired_perf
= caps
->highest_perf
;
357 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
359 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
360 caps
->highest_perf
, cpu
, ret
);
365 static inline u64
get_delta(u64 t1
, u64 t0
)
367 if (t1
> t0
|| t0
> ~(u32
)0)
370 return (u32
)t1
- (u32
)t0
;
373 static int cppc_get_rate_from_fbctrs(struct cppc_cpudata
*cpu_data
,
374 struct cppc_perf_fb_ctrs fb_ctrs_t0
,
375 struct cppc_perf_fb_ctrs fb_ctrs_t1
)
377 u64 delta_reference
, delta_delivered
;
378 u64 reference_perf
, delivered_perf
;
380 reference_perf
= fb_ctrs_t0
.reference_perf
;
382 delta_reference
= get_delta(fb_ctrs_t1
.reference
,
383 fb_ctrs_t0
.reference
);
384 delta_delivered
= get_delta(fb_ctrs_t1
.delivered
,
385 fb_ctrs_t0
.delivered
);
387 /* Check to avoid divide-by zero */
388 if (delta_reference
|| delta_delivered
)
389 delivered_perf
= (reference_perf
* delta_delivered
) /
392 delivered_perf
= cpu_data
->perf_ctrls
.desired_perf
;
394 return cppc_cpufreq_perf_to_khz(cpu_data
, delivered_perf
);
397 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu
)
399 struct cppc_perf_fb_ctrs fb_ctrs_t0
= {0}, fb_ctrs_t1
= {0};
400 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
401 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
404 cpufreq_cpu_put(policy
);
406 ret
= cppc_get_perf_ctrs(cpu
, &fb_ctrs_t0
);
410 udelay(2); /* 2usec delay between sampling */
412 ret
= cppc_get_perf_ctrs(cpu
, &fb_ctrs_t1
);
416 return cppc_get_rate_from_fbctrs(cpu_data
, fb_ctrs_t0
, fb_ctrs_t1
);
419 static int cppc_cpufreq_set_boost(struct cpufreq_policy
*policy
, int state
)
421 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
422 struct cppc_perf_caps
*caps
= &cpu_data
->perf_caps
;
425 if (!boost_supported
) {
426 pr_err("BOOST not supported by CPU or firmware\n");
431 policy
->max
= cppc_cpufreq_perf_to_khz(cpu_data
,
434 policy
->max
= cppc_cpufreq_perf_to_khz(cpu_data
,
436 policy
->cpuinfo
.max_freq
= policy
->max
;
438 ret
= freq_qos_update_request(policy
->max_freq_req
, policy
->max
);
445 static ssize_t
show_freqdomain_cpus(struct cpufreq_policy
*policy
, char *buf
)
447 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
449 return cpufreq_show_cpus(cpu_data
->shared_cpu_map
, buf
);
451 cpufreq_freq_attr_ro(freqdomain_cpus
);
453 static struct freq_attr
*cppc_cpufreq_attr
[] = {
458 static struct cpufreq_driver cppc_cpufreq_driver
= {
459 .flags
= CPUFREQ_CONST_LOOPS
,
460 .verify
= cppc_verify_policy
,
461 .target
= cppc_cpufreq_set_target
,
462 .get
= cppc_cpufreq_get_rate
,
463 .init
= cppc_cpufreq_cpu_init
,
464 .stop_cpu
= cppc_cpufreq_stop_cpu
,
465 .set_boost
= cppc_cpufreq_set_boost
,
466 .attr
= cppc_cpufreq_attr
,
467 .name
= "cppc_cpufreq",
471 * HISI platform does not support delivered performance counter and
472 * reference performance counter. It can calculate the performance using the
473 * platform specific mechanism. We reuse the desired performance register to
474 * store the real performance calculated by the platform.
476 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu
)
478 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
479 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
483 cpufreq_cpu_put(policy
);
485 ret
= cppc_get_desired_perf(cpu
, &desired_perf
);
489 return cppc_cpufreq_perf_to_khz(cpu_data
, desired_perf
);
492 static void cppc_check_hisi_workaround(void)
494 struct acpi_table_header
*tbl
;
495 acpi_status status
= AE_OK
;
498 status
= acpi_get_table(ACPI_SIG_PCCT
, 0, &tbl
);
499 if (ACPI_FAILURE(status
) || !tbl
)
502 for (i
= 0; i
< ARRAY_SIZE(wa_info
); i
++) {
503 if (!memcmp(wa_info
[i
].oem_id
, tbl
->oem_id
, ACPI_OEM_ID_SIZE
) &&
504 !memcmp(wa_info
[i
].oem_table_id
, tbl
->oem_table_id
, ACPI_OEM_TABLE_ID_SIZE
) &&
505 wa_info
[i
].oem_revision
== tbl
->oem_revision
) {
506 /* Overwrite the get() callback */
507 cppc_cpufreq_driver
.get
= hisi_cppc_cpufreq_get_rate
;
515 static int __init
cppc_cpufreq_init(void)
517 if ((acpi_disabled
) || !acpi_cpc_valid())
520 INIT_LIST_HEAD(&cpu_data_list
);
522 cppc_check_hisi_workaround();
524 return cpufreq_register_driver(&cppc_cpufreq_driver
);
527 static inline void free_cpu_data(void)
529 struct cppc_cpudata
*iter
, *tmp
;
531 list_for_each_entry_safe(iter
, tmp
, &cpu_data_list
, node
) {
532 free_cpumask_var(iter
->shared_cpu_map
);
533 list_del(&iter
->node
);
539 static void __exit
cppc_cpufreq_exit(void)
541 cpufreq_unregister_driver(&cppc_cpufreq_driver
);
546 module_exit(cppc_cpufreq_exit
);
547 MODULE_AUTHOR("Ashwin Chaugule");
548 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
549 MODULE_LICENSE("GPL");
551 late_initcall(cppc_cpufreq_init
);
553 static const struct acpi_device_id cppc_acpi_ids
[] __used
= {
554 {ACPI_PROCESSOR_DEVICE_HID
, },
558 MODULE_DEVICE_TABLE(acpi
, cppc_acpi_ids
);