2 * CPPC (Collaborative Processor Performance Control) driver for
3 * interfacing with the CPUfreq layer and governors. See
4 * cppc_acpi.c for CPPC specific methods.
6 * (C) Copyright 2014, 2015 Linaro Ltd.
7 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/delay.h>
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/dmi.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
26 #include <asm/unaligned.h>
28 #include <acpi/cppc_acpi.h>
30 /* Minimum struct length needed for the DMI processor entry we want */
31 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
33 /* Offest in the DMI processor structure for the max frequency */
34 #define DMI_PROCESSOR_MAX_SPEED 0x14
37 * These structs contain information parsed from per CPU
38 * ACPI _CPC structures.
39 * e.g. For each CPU the highest, lowest supported
40 * performance capabilities, desired performance level
43 static struct cppc_cpudata
**all_cpu_data
;
45 /* Callback function used to retrieve the max frequency from DMI */
46 static void cppc_find_dmi_mhz(const struct dmi_header
*dm
, void *private)
48 const u8
*dmi_data
= (const u8
*)dm
;
49 u16
*mhz
= (u16
*)private;
51 if (dm
->type
== DMI_ENTRY_PROCESSOR
&&
52 dm
->length
>= DMI_ENTRY_PROCESSOR_MIN_LENGTH
) {
53 u16 val
= (u16
)get_unaligned((const u16
*)
54 (dmi_data
+ DMI_PROCESSOR_MAX_SPEED
));
55 *mhz
= val
> *mhz
? val
: *mhz
;
59 /* Look up the max frequency in DMI */
60 static u64
cppc_get_dmi_max_khz(void)
64 dmi_walk(cppc_find_dmi_mhz
, &mhz
);
67 * Real stupid fallback value, just in case there is no
76 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
77 * use them to convert perf to freq and vice versa
79 * If the perf/freq point lies between Nominal and Lowest, we can treat
80 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
81 * and extrapolate the rest
82 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
84 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata
*cpu
,
88 struct cppc_perf_caps
*caps
= &cpu
->perf_caps
;
91 if (caps
->lowest_freq
&& caps
->nominal_freq
) {
92 if (perf
>= caps
->nominal_perf
) {
93 mul
= caps
->nominal_freq
;
94 div
= caps
->nominal_perf
;
96 mul
= caps
->nominal_freq
- caps
->lowest_freq
;
97 div
= caps
->nominal_perf
- caps
->lowest_perf
;
101 max_khz
= cppc_get_dmi_max_khz();
103 div
= cpu
->perf_caps
.highest_perf
;
105 return (u64
)perf
* mul
/ div
;
108 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata
*cpu
,
112 struct cppc_perf_caps
*caps
= &cpu
->perf_caps
;
115 if (caps
->lowest_freq
&& caps
->nominal_freq
) {
116 if (freq
>= caps
->nominal_freq
) {
117 mul
= caps
->nominal_perf
;
118 div
= caps
->nominal_freq
;
120 mul
= caps
->lowest_perf
;
121 div
= caps
->lowest_freq
;
125 max_khz
= cppc_get_dmi_max_khz();
126 mul
= cpu
->perf_caps
.highest_perf
;
130 return (u64
)freq
* mul
/ div
;
133 static int cppc_cpufreq_set_target(struct cpufreq_policy
*policy
,
134 unsigned int target_freq
,
135 unsigned int relation
)
137 struct cppc_cpudata
*cpu
;
138 struct cpufreq_freqs freqs
;
142 cpu
= all_cpu_data
[policy
->cpu
];
144 desired_perf
= cppc_cpufreq_khz_to_perf(cpu
, target_freq
);
145 /* Return if it is exactly the same perf */
146 if (desired_perf
== cpu
->perf_ctrls
.desired_perf
)
149 cpu
->perf_ctrls
.desired_perf
= desired_perf
;
150 freqs
.old
= policy
->cur
;
151 freqs
.new = target_freq
;
153 cpufreq_freq_transition_begin(policy
, &freqs
);
154 ret
= cppc_set_perf(cpu
->cpu
, &cpu
->perf_ctrls
);
155 cpufreq_freq_transition_end(policy
, &freqs
, ret
!= 0);
158 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
164 static int cppc_verify_policy(struct cpufreq_policy
*policy
)
166 cpufreq_verify_within_cpu_limits(policy
);
170 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy
*policy
)
172 int cpu_num
= policy
->cpu
;
173 struct cppc_cpudata
*cpu
= all_cpu_data
[cpu_num
];
176 cpu
->perf_ctrls
.desired_perf
= cpu
->perf_caps
.lowest_perf
;
178 ret
= cppc_set_perf(cpu_num
, &cpu
->perf_ctrls
);
180 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
181 cpu
->perf_caps
.lowest_perf
, cpu_num
, ret
);
185 * The PCC subspace describes the rate at which platform can accept commands
186 * on the shared PCC channel (including READs which do not count towards freq
187 * trasition requests), so ideally we need to use the PCC values as a fallback
188 * if we don't have a platform specific transition_delay_us
191 #include <asm/cputype.h>
193 static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu
)
195 unsigned long implementor
= read_cpuid_implementor();
196 unsigned long part_num
= read_cpuid_part_number();
197 unsigned int delay_us
= 0;
199 switch (implementor
) {
200 case ARM_CPU_IMP_QCOM
:
202 case QCOM_CPU_PART_FALKOR_V1
:
203 case QCOM_CPU_PART_FALKOR
:
207 delay_us
= cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
212 delay_us
= cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
221 static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu
)
223 return cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
227 static int cppc_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
229 struct cppc_cpudata
*cpu
;
230 unsigned int cpu_num
= policy
->cpu
;
233 cpu
= all_cpu_data
[policy
->cpu
];
236 ret
= cppc_get_perf_caps(policy
->cpu
, &cpu
->perf_caps
);
239 pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
244 /* Convert the lowest and nominal freq from MHz to KHz */
245 cpu
->perf_caps
.lowest_freq
*= 1000;
246 cpu
->perf_caps
.nominal_freq
*= 1000;
249 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
250 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
252 policy
->min
= cppc_cpufreq_perf_to_khz(cpu
, cpu
->perf_caps
.lowest_nonlinear_perf
);
253 policy
->max
= cppc_cpufreq_perf_to_khz(cpu
, cpu
->perf_caps
.highest_perf
);
256 * Set cpuinfo.min_freq to Lowest to make the full range of performance
257 * available if userspace wants to use any perf between lowest & lowest
260 policy
->cpuinfo
.min_freq
= cppc_cpufreq_perf_to_khz(cpu
, cpu
->perf_caps
.lowest_perf
);
261 policy
->cpuinfo
.max_freq
= cppc_cpufreq_perf_to_khz(cpu
, cpu
->perf_caps
.highest_perf
);
263 policy
->transition_delay_us
= cppc_cpufreq_get_transition_delay_us(cpu_num
);
264 policy
->shared_type
= cpu
->shared_type
;
266 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
269 cpumask_copy(policy
->cpus
, cpu
->shared_cpu_map
);
271 for_each_cpu(i
, policy
->cpus
) {
272 if (unlikely(i
== policy
->cpu
))
275 memcpy(&all_cpu_data
[i
]->perf_caps
, &cpu
->perf_caps
,
276 sizeof(cpu
->perf_caps
));
278 } else if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
) {
279 /* Support only SW_ANY for now. */
280 pr_debug("Unsupported CPU co-ord type\n");
284 cpu
->cur_policy
= policy
;
286 /* Set policy->cur to max now. The governors will adjust later. */
287 policy
->cur
= cppc_cpufreq_perf_to_khz(cpu
,
288 cpu
->perf_caps
.highest_perf
);
289 cpu
->perf_ctrls
.desired_perf
= cpu
->perf_caps
.highest_perf
;
291 ret
= cppc_set_perf(cpu_num
, &cpu
->perf_ctrls
);
293 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
294 cpu
->perf_caps
.highest_perf
, cpu_num
, ret
);
299 static struct cpufreq_driver cppc_cpufreq_driver
= {
300 .flags
= CPUFREQ_CONST_LOOPS
,
301 .verify
= cppc_verify_policy
,
302 .target
= cppc_cpufreq_set_target
,
303 .init
= cppc_cpufreq_cpu_init
,
304 .stop_cpu
= cppc_cpufreq_stop_cpu
,
305 .name
= "cppc_cpufreq",
308 static int __init
cppc_cpufreq_init(void)
311 struct cppc_cpudata
*cpu
;
316 all_cpu_data
= kcalloc(num_possible_cpus(), sizeof(void *),
321 for_each_possible_cpu(i
) {
322 all_cpu_data
[i
] = kzalloc(sizeof(struct cppc_cpudata
), GFP_KERNEL
);
323 if (!all_cpu_data
[i
])
326 cpu
= all_cpu_data
[i
];
327 if (!zalloc_cpumask_var(&cpu
->shared_cpu_map
, GFP_KERNEL
))
331 ret
= acpi_get_psd_map(all_cpu_data
);
333 pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
337 ret
= cpufreq_register_driver(&cppc_cpufreq_driver
);
344 for_each_possible_cpu(i
) {
345 cpu
= all_cpu_data
[i
];
348 free_cpumask_var(cpu
->shared_cpu_map
);
356 static void __exit
cppc_cpufreq_exit(void)
358 struct cppc_cpudata
*cpu
;
361 cpufreq_unregister_driver(&cppc_cpufreq_driver
);
363 for_each_possible_cpu(i
) {
364 cpu
= all_cpu_data
[i
];
365 free_cpumask_var(cpu
->shared_cpu_map
);
372 module_exit(cppc_cpufreq_exit
);
373 MODULE_AUTHOR("Ashwin Chaugule");
374 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
375 MODULE_LICENSE("GPL");
377 late_initcall(cppc_cpufreq_init
);
379 static const struct acpi_device_id cppc_acpi_ids
[] = {
380 {ACPI_PROCESSOR_DEVICE_HID
, },
384 MODULE_DEVICE_TABLE(acpi
, cppc_acpi_ids
);