2 * CPPC (Collaborative Processor Performance Control) driver for
3 * interfacing with the CPUfreq layer and governors. See
4 * cppc_acpi.c for CPPC specific methods.
6 * (C) Copyright 2014, 2015 Linaro Ltd.
7 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/delay.h>
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/dmi.h>
23 #include <linux/vmalloc.h>
25 #include <asm/unaligned.h>
27 #include <acpi/cppc_acpi.h>
29 /* Minimum struct length needed for the DMI processor entry we want */
30 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
32 /* Offest in the DMI processor structure for the max frequency */
33 #define DMI_PROCESSOR_MAX_SPEED 0x14
36 * These structs contain information parsed from per CPU
37 * ACPI _CPC structures.
38 * e.g. For each CPU the highest, lowest supported
39 * performance capabilities, desired performance level
42 static struct cppc_cpudata
**all_cpu_data
;
44 /* Capture the max KHz from DMI */
45 static u64 cppc_dmi_max_khz
;
47 /* Callback function used to retrieve the max frequency from DMI */
48 static void cppc_find_dmi_mhz(const struct dmi_header
*dm
, void *private)
50 const u8
*dmi_data
= (const u8
*)dm
;
51 u16
*mhz
= (u16
*)private;
53 if (dm
->type
== DMI_ENTRY_PROCESSOR
&&
54 dm
->length
>= DMI_ENTRY_PROCESSOR_MIN_LENGTH
) {
55 u16 val
= (u16
)get_unaligned((const u16
*)
56 (dmi_data
+ DMI_PROCESSOR_MAX_SPEED
));
57 *mhz
= val
> *mhz
? val
: *mhz
;
61 /* Look up the max frequency in DMI */
62 static u64
cppc_get_dmi_max_khz(void)
66 dmi_walk(cppc_find_dmi_mhz
, &mhz
);
69 * Real stupid fallback value, just in case there is no
77 static int cppc_cpufreq_set_target(struct cpufreq_policy
*policy
,
78 unsigned int target_freq
,
79 unsigned int relation
)
81 struct cppc_cpudata
*cpu
;
82 struct cpufreq_freqs freqs
;
85 cpu
= all_cpu_data
[policy
->cpu
];
87 cpu
->perf_ctrls
.desired_perf
= (u64
)target_freq
* policy
->max
/ cppc_dmi_max_khz
;
88 freqs
.old
= policy
->cur
;
89 freqs
.new = target_freq
;
91 cpufreq_freq_transition_begin(policy
, &freqs
);
92 ret
= cppc_set_perf(cpu
->cpu
, &cpu
->perf_ctrls
);
93 cpufreq_freq_transition_end(policy
, &freqs
, ret
!= 0);
96 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
102 static int cppc_verify_policy(struct cpufreq_policy
*policy
)
104 cpufreq_verify_within_cpu_limits(policy
);
108 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy
*policy
)
110 int cpu_num
= policy
->cpu
;
111 struct cppc_cpudata
*cpu
= all_cpu_data
[cpu_num
];
114 cpu
->perf_ctrls
.desired_perf
= cpu
->perf_caps
.lowest_perf
;
116 ret
= cppc_set_perf(cpu_num
, &cpu
->perf_ctrls
);
118 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
119 cpu
->perf_caps
.lowest_perf
, cpu_num
, ret
);
122 static int cppc_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
124 struct cppc_cpudata
*cpu
;
125 unsigned int cpu_num
= policy
->cpu
;
128 cpu
= all_cpu_data
[policy
->cpu
];
131 ret
= cppc_get_perf_caps(policy
->cpu
, &cpu
->perf_caps
);
134 pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
139 cppc_dmi_max_khz
= cppc_get_dmi_max_khz();
141 policy
->min
= cpu
->perf_caps
.lowest_perf
* cppc_dmi_max_khz
/ cpu
->perf_caps
.highest_perf
;
142 policy
->max
= cppc_dmi_max_khz
;
143 policy
->cpuinfo
.min_freq
= policy
->min
;
144 policy
->cpuinfo
.max_freq
= policy
->max
;
145 policy
->cpuinfo
.transition_latency
= cppc_get_transition_latency(cpu_num
);
146 policy
->shared_type
= cpu
->shared_type
;
148 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
)
149 cpumask_copy(policy
->cpus
, cpu
->shared_cpu_map
);
150 else if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
) {
151 /* Support only SW_ANY for now. */
152 pr_debug("Unsupported CPU co-ord type\n");
156 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
157 cpu
->cur_policy
= policy
;
159 /* Set policy->cur to max now. The governors will adjust later. */
160 policy
->cur
= cppc_dmi_max_khz
;
161 cpu
->perf_ctrls
.desired_perf
= cpu
->perf_caps
.highest_perf
;
163 ret
= cppc_set_perf(cpu_num
, &cpu
->perf_ctrls
);
165 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
166 cpu
->perf_caps
.highest_perf
, cpu_num
, ret
);
171 static struct cpufreq_driver cppc_cpufreq_driver
= {
172 .flags
= CPUFREQ_CONST_LOOPS
,
173 .verify
= cppc_verify_policy
,
174 .target
= cppc_cpufreq_set_target
,
175 .init
= cppc_cpufreq_cpu_init
,
176 .stop_cpu
= cppc_cpufreq_stop_cpu
,
177 .name
= "cppc_cpufreq",
180 static int __init
cppc_cpufreq_init(void)
183 struct cppc_cpudata
*cpu
;
188 all_cpu_data
= kzalloc(sizeof(void *) * num_possible_cpus(), GFP_KERNEL
);
192 for_each_possible_cpu(i
) {
193 all_cpu_data
[i
] = kzalloc(sizeof(struct cppc_cpudata
), GFP_KERNEL
);
194 if (!all_cpu_data
[i
])
197 cpu
= all_cpu_data
[i
];
198 if (!zalloc_cpumask_var(&cpu
->shared_cpu_map
, GFP_KERNEL
))
202 ret
= acpi_get_psd_map(all_cpu_data
);
204 pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
208 ret
= cpufreq_register_driver(&cppc_cpufreq_driver
);
215 for_each_possible_cpu(i
)
216 kfree(all_cpu_data
[i
]);
222 static void __exit
cppc_cpufreq_exit(void)
224 struct cppc_cpudata
*cpu
;
227 cpufreq_unregister_driver(&cppc_cpufreq_driver
);
229 for_each_possible_cpu(i
) {
230 cpu
= all_cpu_data
[i
];
231 free_cpumask_var(cpu
->shared_cpu_map
);
238 module_exit(cppc_cpufreq_exit
);
239 MODULE_AUTHOR("Ashwin Chaugule");
240 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
241 MODULE_LICENSE("GPL");
243 late_initcall(cppc_cpufreq_init
);