1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPPC (Collaborative Processor Performance Control) driver for
4 * interfacing with the CPUfreq layer and governors. See
5 * cppc_acpi.c for CPPC specific methods.
7 * (C) Copyright 2014, 2015 Linaro Ltd.
8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
13 #include <linux/arch_topology.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/irq_work.h>
20 #include <linux/kthread.h>
21 #include <linux/time.h>
22 #include <linux/vmalloc.h>
23 #include <uapi/linux/sched/types.h>
25 #include <linux/unaligned.h>
27 #include <acpi/cppc_acpi.h>
30 * This list contains information parsed from per CPU ACPI _CPC and _PSD
31 * structures: e.g. the highest and lowest supported performance, capabilities,
32 * desired performance, level requested etc. Depending on the share_type, not
33 * all CPUs will have an entry in the list.
35 static LIST_HEAD(cpu_data_list
);
37 static bool boost_supported
;
39 static struct cpufreq_driver cppc_cpufreq_driver
;
41 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
46 } fie_disabled
= FIE_UNSET
;
48 module_param(fie_disabled
, int, 0444);
49 MODULE_PARM_DESC(fie_disabled
, "Disable Frequency Invariance Engine (FIE)");
51 /* Frequency invariance support */
52 struct cppc_freq_invariance
{
54 struct irq_work irq_work
;
55 struct kthread_work work
;
56 struct cppc_perf_fb_ctrs prev_perf_fb_ctrs
;
57 struct cppc_cpudata
*cpu_data
;
60 static DEFINE_PER_CPU(struct cppc_freq_invariance
, cppc_freq_inv
);
61 static struct kthread_worker
*kworker_fie
;
63 static int cppc_perf_from_fbctrs(struct cppc_cpudata
*cpu_data
,
64 struct cppc_perf_fb_ctrs
*fb_ctrs_t0
,
65 struct cppc_perf_fb_ctrs
*fb_ctrs_t1
);
68 * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
69 * @work: The work item.
71 * The CPPC driver register itself with the topology core to provide its own
72 * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
73 * gets called by the scheduler on every tick.
75 * Note that the arch specific counters have higher priority than CPPC counters,
76 * if available, though the CPPC driver doesn't need to have any special
79 * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
80 * reach here from hard-irq context), which then schedules a normal work item
81 * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
82 * based on the counter updates since the last tick.
84 static void cppc_scale_freq_workfn(struct kthread_work
*work
)
86 struct cppc_freq_invariance
*cppc_fi
;
87 struct cppc_perf_fb_ctrs fb_ctrs
= {0};
88 struct cppc_cpudata
*cpu_data
;
89 unsigned long local_freq_scale
;
92 cppc_fi
= container_of(work
, struct cppc_freq_invariance
, work
);
93 cpu_data
= cppc_fi
->cpu_data
;
95 if (cppc_get_perf_ctrs(cppc_fi
->cpu
, &fb_ctrs
)) {
96 pr_warn("%s: failed to read perf counters\n", __func__
);
100 perf
= cppc_perf_from_fbctrs(cpu_data
, &cppc_fi
->prev_perf_fb_ctrs
,
105 cppc_fi
->prev_perf_fb_ctrs
= fb_ctrs
;
107 perf
<<= SCHED_CAPACITY_SHIFT
;
108 local_freq_scale
= div64_u64(perf
, cpu_data
->perf_caps
.highest_perf
);
110 /* This can happen due to counter's overflow */
111 if (unlikely(local_freq_scale
> 1024))
112 local_freq_scale
= 1024;
114 per_cpu(arch_freq_scale
, cppc_fi
->cpu
) = local_freq_scale
;
117 static void cppc_irq_work(struct irq_work
*irq_work
)
119 struct cppc_freq_invariance
*cppc_fi
;
121 cppc_fi
= container_of(irq_work
, struct cppc_freq_invariance
, irq_work
);
122 kthread_queue_work(kworker_fie
, &cppc_fi
->work
);
125 static void cppc_scale_freq_tick(void)
127 struct cppc_freq_invariance
*cppc_fi
= &per_cpu(cppc_freq_inv
, smp_processor_id());
130 * cppc_get_perf_ctrs() can potentially sleep, call that from the right
133 irq_work_queue(&cppc_fi
->irq_work
);
136 static struct scale_freq_data cppc_sftd
= {
137 .source
= SCALE_FREQ_SOURCE_CPPC
,
138 .set_freq_scale
= cppc_scale_freq_tick
,
141 static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy
*policy
)
143 struct cppc_freq_invariance
*cppc_fi
;
149 for_each_cpu(cpu
, policy
->cpus
) {
150 cppc_fi
= &per_cpu(cppc_freq_inv
, cpu
);
152 cppc_fi
->cpu_data
= policy
->driver_data
;
153 kthread_init_work(&cppc_fi
->work
, cppc_scale_freq_workfn
);
154 init_irq_work(&cppc_fi
->irq_work
, cppc_irq_work
);
156 ret
= cppc_get_perf_ctrs(cpu
, &cppc_fi
->prev_perf_fb_ctrs
);
158 pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
162 * Don't abort if the CPU was offline while the driver
163 * was getting registered.
170 /* Register for freq-invariance */
171 topology_set_scale_freq_source(&cppc_sftd
, policy
->cpus
);
175 * We free all the resources on policy's removal and not on CPU removal as the
176 * irq-work are per-cpu and the hotplug core takes care of flushing the pending
177 * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
178 * fires on another CPU after the concerned CPU is removed, it won't harm.
180 * We just need to make sure to remove them all on policy->exit().
182 static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy
*policy
)
184 struct cppc_freq_invariance
*cppc_fi
;
190 /* policy->cpus will be empty here, use related_cpus instead */
191 topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC
, policy
->related_cpus
);
193 for_each_cpu(cpu
, policy
->related_cpus
) {
194 cppc_fi
= &per_cpu(cppc_freq_inv
, cpu
);
195 irq_work_sync(&cppc_fi
->irq_work
);
196 kthread_cancel_work_sync(&cppc_fi
->work
);
200 static void __init
cppc_freq_invariance_init(void)
202 struct sched_attr attr
= {
203 .size
= sizeof(struct sched_attr
),
204 .sched_policy
= SCHED_DEADLINE
,
208 * Fake (unused) bandwidth; workaround to "fix"
209 * priority inheritance.
211 .sched_runtime
= NSEC_PER_MSEC
,
212 .sched_deadline
= 10 * NSEC_PER_MSEC
,
213 .sched_period
= 10 * NSEC_PER_MSEC
,
217 if (fie_disabled
!= FIE_ENABLED
&& fie_disabled
!= FIE_DISABLED
) {
218 fie_disabled
= FIE_ENABLED
;
219 if (cppc_perf_ctrs_in_pcc()) {
220 pr_info("FIE not enabled on systems with registers in PCC\n");
221 fie_disabled
= FIE_DISABLED
;
228 kworker_fie
= kthread_create_worker(0, "cppc_fie");
229 if (IS_ERR(kworker_fie
)) {
230 pr_warn("%s: failed to create kworker_fie: %ld\n", __func__
,
231 PTR_ERR(kworker_fie
));
232 fie_disabled
= FIE_DISABLED
;
236 ret
= sched_setattr_nocheck(kworker_fie
->task
, &attr
);
238 pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__
,
240 kthread_destroy_worker(kworker_fie
);
241 fie_disabled
= FIE_DISABLED
;
245 static void cppc_freq_invariance_exit(void)
250 kthread_destroy_worker(kworker_fie
);
254 static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy
*policy
)
258 static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy
*policy
)
262 static inline void cppc_freq_invariance_init(void)
266 static inline void cppc_freq_invariance_exit(void)
269 #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
271 static int cppc_cpufreq_set_target(struct cpufreq_policy
*policy
,
272 unsigned int target_freq
,
273 unsigned int relation
)
275 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
276 unsigned int cpu
= policy
->cpu
;
277 struct cpufreq_freqs freqs
;
280 cpu_data
->perf_ctrls
.desired_perf
=
281 cppc_khz_to_perf(&cpu_data
->perf_caps
, target_freq
);
282 freqs
.old
= policy
->cur
;
283 freqs
.new = target_freq
;
285 cpufreq_freq_transition_begin(policy
, &freqs
);
286 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
287 cpufreq_freq_transition_end(policy
, &freqs
, ret
!= 0);
290 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
296 static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy
*policy
,
297 unsigned int target_freq
)
299 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
300 unsigned int cpu
= policy
->cpu
;
304 desired_perf
= cppc_khz_to_perf(&cpu_data
->perf_caps
, target_freq
);
305 cpu_data
->perf_ctrls
.desired_perf
= desired_perf
;
306 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
309 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
317 static int cppc_verify_policy(struct cpufreq_policy_data
*policy
)
319 cpufreq_verify_within_cpu_limits(policy
);
324 * The PCC subspace describes the rate at which platform can accept commands
325 * on the shared PCC channel (including READs which do not count towards freq
326 * transition requests), so ideally we need to use the PCC values as a fallback
327 * if we don't have a platform specific transition_delay_us
330 #include <asm/cputype.h>
332 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu
)
334 unsigned long implementor
= read_cpuid_implementor();
335 unsigned long part_num
= read_cpuid_part_number();
337 switch (implementor
) {
338 case ARM_CPU_IMP_QCOM
:
340 case QCOM_CPU_PART_FALKOR_V1
:
341 case QCOM_CPU_PART_FALKOR
:
345 return cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
348 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu
)
350 return cppc_get_transition_latency(cpu
) / NSEC_PER_USEC
;
354 #if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
356 static DEFINE_PER_CPU(unsigned int, efficiency_class
);
357 static void cppc_cpufreq_register_em(struct cpufreq_policy
*policy
);
359 /* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
360 #define CPPC_EM_CAP_STEP (20)
361 /* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
362 #define CPPC_EM_COST_STEP (1)
363 /* Add a cost gap correspnding to the energy of 4 CPUs. */
364 #define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
367 static unsigned int get_perf_level_count(struct cpufreq_policy
*policy
)
369 struct cppc_perf_caps
*perf_caps
;
370 unsigned int min_cap
, max_cap
;
371 struct cppc_cpudata
*cpu_data
;
372 int cpu
= policy
->cpu
;
374 cpu_data
= policy
->driver_data
;
375 perf_caps
= &cpu_data
->perf_caps
;
376 max_cap
= arch_scale_cpu_capacity(cpu
);
377 min_cap
= div_u64((u64
)max_cap
* perf_caps
->lowest_perf
,
378 perf_caps
->highest_perf
);
379 if ((min_cap
== 0) || (max_cap
< min_cap
))
381 return 1 + max_cap
/ CPPC_EM_CAP_STEP
- min_cap
/ CPPC_EM_CAP_STEP
;
385 * The cost is defined as:
386 * cost = power * max_frequency / frequency
388 static inline unsigned long compute_cost(int cpu
, int step
)
390 return CPPC_EM_COST_GAP
* per_cpu(efficiency_class
, cpu
) +
391 step
* CPPC_EM_COST_STEP
;
394 static int cppc_get_cpu_power(struct device
*cpu_dev
,
395 unsigned long *power
, unsigned long *KHz
)
397 unsigned long perf_step
, perf_prev
, perf
, perf_check
;
398 unsigned int min_step
, max_step
, step
, step_check
;
399 unsigned long prev_freq
= *KHz
;
400 unsigned int min_cap
, max_cap
;
401 struct cpufreq_policy
*policy
;
403 struct cppc_perf_caps
*perf_caps
;
404 struct cppc_cpudata
*cpu_data
;
406 policy
= cpufreq_cpu_get_raw(cpu_dev
->id
);
410 cpu_data
= policy
->driver_data
;
411 perf_caps
= &cpu_data
->perf_caps
;
412 max_cap
= arch_scale_cpu_capacity(cpu_dev
->id
);
413 min_cap
= div_u64((u64
)max_cap
* perf_caps
->lowest_perf
,
414 perf_caps
->highest_perf
);
415 perf_step
= div_u64((u64
)CPPC_EM_CAP_STEP
* perf_caps
->highest_perf
,
417 min_step
= min_cap
/ CPPC_EM_CAP_STEP
;
418 max_step
= max_cap
/ CPPC_EM_CAP_STEP
;
420 perf_prev
= cppc_khz_to_perf(perf_caps
, *KHz
);
421 step
= perf_prev
/ perf_step
;
426 if (min_step
== max_step
) {
428 perf
= perf_caps
->highest_perf
;
429 } else if (step
< min_step
) {
431 perf
= perf_caps
->lowest_perf
;
434 if (step
== max_step
)
435 perf
= perf_caps
->highest_perf
;
437 perf
= step
* perf_step
;
440 *KHz
= cppc_perf_to_khz(perf_caps
, perf
);
441 perf_check
= cppc_khz_to_perf(perf_caps
, *KHz
);
442 step_check
= perf_check
/ perf_step
;
445 * To avoid bad integer approximation, check that new frequency value
446 * increased and that the new frequency will be converted to the
447 * desired step value.
449 while ((*KHz
== prev_freq
) || (step_check
!= step
)) {
451 *KHz
= cppc_perf_to_khz(perf_caps
, perf
);
452 perf_check
= cppc_khz_to_perf(perf_caps
, *KHz
);
453 step_check
= perf_check
/ perf_step
;
457 * With an artificial EM, only the cost value is used. Still the power
458 * is populated such as 0 < power < EM_MAX_POWER. This allows to add
459 * more sense to the artificial performance states.
461 *power
= compute_cost(cpu_dev
->id
, step
);
466 static int cppc_get_cpu_cost(struct device
*cpu_dev
, unsigned long KHz
,
469 unsigned long perf_step
, perf_prev
;
470 struct cppc_perf_caps
*perf_caps
;
471 struct cpufreq_policy
*policy
;
472 struct cppc_cpudata
*cpu_data
;
473 unsigned int max_cap
;
476 policy
= cpufreq_cpu_get_raw(cpu_dev
->id
);
480 cpu_data
= policy
->driver_data
;
481 perf_caps
= &cpu_data
->perf_caps
;
482 max_cap
= arch_scale_cpu_capacity(cpu_dev
->id
);
484 perf_prev
= cppc_khz_to_perf(perf_caps
, KHz
);
485 perf_step
= CPPC_EM_CAP_STEP
* perf_caps
->highest_perf
/ max_cap
;
486 step
= perf_prev
/ perf_step
;
488 *cost
= compute_cost(cpu_dev
->id
, step
);
493 static int populate_efficiency_class(void)
495 struct acpi_madt_generic_interrupt
*gicc
;
496 DECLARE_BITMAP(used_classes
, 256) = {};
497 int class, cpu
, index
;
499 for_each_possible_cpu(cpu
) {
500 gicc
= acpi_cpu_get_madt_gicc(cpu
);
501 class = gicc
->efficiency_class
;
502 bitmap_set(used_classes
, class, 1);
505 if (bitmap_weight(used_classes
, 256) <= 1) {
506 pr_debug("Efficiency classes are all equal (=%d). "
507 "No EM registered", class);
512 * Squeeze efficiency class values on [0:#efficiency_class-1].
513 * Values are per spec in [0:255].
516 for_each_set_bit(class, used_classes
, 256) {
517 for_each_possible_cpu(cpu
) {
518 gicc
= acpi_cpu_get_madt_gicc(cpu
);
519 if (gicc
->efficiency_class
== class)
520 per_cpu(efficiency_class
, cpu
) = index
;
524 cppc_cpufreq_driver
.register_em
= cppc_cpufreq_register_em
;
529 static void cppc_cpufreq_register_em(struct cpufreq_policy
*policy
)
531 struct cppc_cpudata
*cpu_data
;
532 struct em_data_callback em_cb
=
533 EM_ADV_DATA_CB(cppc_get_cpu_power
, cppc_get_cpu_cost
);
535 cpu_data
= policy
->driver_data
;
536 em_dev_register_perf_domain(get_cpu_device(policy
->cpu
),
537 get_perf_level_count(policy
), &em_cb
,
538 cpu_data
->shared_cpu_map
, 0);
542 static int populate_efficiency_class(void)
548 static struct cppc_cpudata
*cppc_cpufreq_get_cpu_data(unsigned int cpu
)
550 struct cppc_cpudata
*cpu_data
;
553 cpu_data
= kzalloc(sizeof(struct cppc_cpudata
), GFP_KERNEL
);
557 if (!zalloc_cpumask_var(&cpu_data
->shared_cpu_map
, GFP_KERNEL
))
560 ret
= acpi_get_psd_map(cpu
, cpu_data
);
562 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu
, ret
);
566 ret
= cppc_get_perf_caps(cpu
, &cpu_data
->perf_caps
);
568 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu
, ret
);
572 list_add(&cpu_data
->node
, &cpu_data_list
);
577 free_cpumask_var(cpu_data
->shared_cpu_map
);
584 static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy
*policy
)
586 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
588 list_del(&cpu_data
->node
);
589 free_cpumask_var(cpu_data
->shared_cpu_map
);
591 policy
->driver_data
= NULL
;
594 static int cppc_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
596 unsigned int cpu
= policy
->cpu
;
597 struct cppc_cpudata
*cpu_data
;
598 struct cppc_perf_caps
*caps
;
601 cpu_data
= cppc_cpufreq_get_cpu_data(cpu
);
603 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu
);
606 caps
= &cpu_data
->perf_caps
;
607 policy
->driver_data
= cpu_data
;
610 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
611 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
613 policy
->min
= cppc_perf_to_khz(caps
, caps
->lowest_nonlinear_perf
);
614 policy
->max
= cppc_perf_to_khz(caps
, caps
->nominal_perf
);
617 * Set cpuinfo.min_freq to Lowest to make the full range of performance
618 * available if userspace wants to use any perf between lowest & lowest
621 policy
->cpuinfo
.min_freq
= cppc_perf_to_khz(caps
, caps
->lowest_perf
);
622 policy
->cpuinfo
.max_freq
= cppc_perf_to_khz(caps
, caps
->nominal_perf
);
624 policy
->transition_delay_us
= cppc_cpufreq_get_transition_delay_us(cpu
);
625 policy
->shared_type
= cpu_data
->shared_type
;
627 switch (policy
->shared_type
) {
628 case CPUFREQ_SHARED_TYPE_HW
:
629 case CPUFREQ_SHARED_TYPE_NONE
:
630 /* Nothing to be done - we'll have a policy for each CPU */
632 case CPUFREQ_SHARED_TYPE_ANY
:
634 * All CPUs in the domain will share a policy and all cpufreq
635 * operations will use a single cppc_cpudata structure stored
636 * in policy->driver_data.
638 cpumask_copy(policy
->cpus
, cpu_data
->shared_cpu_map
);
641 pr_debug("Unsupported CPU co-ord type: %d\n",
642 policy
->shared_type
);
647 policy
->fast_switch_possible
= cppc_allow_fast_switch();
648 policy
->dvfs_possible_from_any_cpu
= true;
651 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
654 if (caps
->highest_perf
> caps
->nominal_perf
)
655 boost_supported
= true;
657 /* Set policy->cur to max now. The governors will adjust later. */
658 policy
->cur
= cppc_perf_to_khz(caps
, caps
->highest_perf
);
659 cpu_data
->perf_ctrls
.desired_perf
= caps
->highest_perf
;
661 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
663 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
664 caps
->highest_perf
, cpu
, ret
);
668 cppc_cpufreq_cpu_fie_init(policy
);
672 cppc_cpufreq_put_cpu_data(policy
);
676 static void cppc_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
678 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
679 struct cppc_perf_caps
*caps
= &cpu_data
->perf_caps
;
680 unsigned int cpu
= policy
->cpu
;
683 cppc_cpufreq_cpu_fie_exit(policy
);
685 cpu_data
->perf_ctrls
.desired_perf
= caps
->lowest_perf
;
687 ret
= cppc_set_perf(cpu
, &cpu_data
->perf_ctrls
);
689 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
690 caps
->lowest_perf
, cpu
, ret
);
692 cppc_cpufreq_put_cpu_data(policy
);
695 static inline u64
get_delta(u64 t1
, u64 t0
)
697 if (t1
> t0
|| t0
> ~(u32
)0)
700 return (u32
)t1
- (u32
)t0
;
703 static int cppc_perf_from_fbctrs(struct cppc_cpudata
*cpu_data
,
704 struct cppc_perf_fb_ctrs
*fb_ctrs_t0
,
705 struct cppc_perf_fb_ctrs
*fb_ctrs_t1
)
707 u64 delta_reference
, delta_delivered
;
710 reference_perf
= fb_ctrs_t0
->reference_perf
;
712 delta_reference
= get_delta(fb_ctrs_t1
->reference
,
713 fb_ctrs_t0
->reference
);
714 delta_delivered
= get_delta(fb_ctrs_t1
->delivered
,
715 fb_ctrs_t0
->delivered
);
718 * Avoid divide-by zero and unchanged feedback counters.
719 * Leave it for callers to handle.
721 if (!delta_reference
|| !delta_delivered
)
724 return (reference_perf
* delta_delivered
) / delta_reference
;
727 static int cppc_get_perf_ctrs_sample(int cpu
,
728 struct cppc_perf_fb_ctrs
*fb_ctrs_t0
,
729 struct cppc_perf_fb_ctrs
*fb_ctrs_t1
)
733 ret
= cppc_get_perf_ctrs(cpu
, fb_ctrs_t0
);
737 udelay(2); /* 2usec delay between sampling */
739 return cppc_get_perf_ctrs(cpu
, fb_ctrs_t1
);
742 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu
)
744 struct cppc_perf_fb_ctrs fb_ctrs_t0
= {0}, fb_ctrs_t1
= {0};
745 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
746 struct cppc_cpudata
*cpu_data
;
753 cpu_data
= policy
->driver_data
;
755 cpufreq_cpu_put(policy
);
757 ret
= cppc_get_perf_ctrs_sample(cpu
, &fb_ctrs_t0
, &fb_ctrs_t1
);
760 /* Any of the associated CPPC regs is 0. */
761 goto out_invalid_counters
;
766 delivered_perf
= cppc_perf_from_fbctrs(cpu_data
, &fb_ctrs_t0
,
769 goto out_invalid_counters
;
771 return cppc_perf_to_khz(&cpu_data
->perf_caps
, delivered_perf
);
773 out_invalid_counters
:
775 * Feedback counters could be unchanged or 0 when a cpu enters a
776 * low-power idle state, e.g. clock-gated or power-gated.
777 * Use desired perf for reflecting frequency. Get the latest register
778 * value first as some platforms may update the actual delivered perf
779 * there; if failed, resort to the cached desired perf.
781 if (cppc_get_desired_perf(cpu
, &delivered_perf
))
782 delivered_perf
= cpu_data
->perf_ctrls
.desired_perf
;
784 return cppc_perf_to_khz(&cpu_data
->perf_caps
, delivered_perf
);
787 static int cppc_cpufreq_set_boost(struct cpufreq_policy
*policy
, int state
)
789 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
790 struct cppc_perf_caps
*caps
= &cpu_data
->perf_caps
;
793 if (!boost_supported
) {
794 pr_err("BOOST not supported by CPU or firmware\n");
799 policy
->max
= cppc_perf_to_khz(caps
, caps
->highest_perf
);
801 policy
->max
= cppc_perf_to_khz(caps
, caps
->nominal_perf
);
802 policy
->cpuinfo
.max_freq
= policy
->max
;
804 ret
= freq_qos_update_request(policy
->max_freq_req
, policy
->max
);
811 static ssize_t
show_freqdomain_cpus(struct cpufreq_policy
*policy
, char *buf
)
813 struct cppc_cpudata
*cpu_data
= policy
->driver_data
;
815 return cpufreq_show_cpus(cpu_data
->shared_cpu_map
, buf
);
817 cpufreq_freq_attr_ro(freqdomain_cpus
);
819 static struct freq_attr
*cppc_cpufreq_attr
[] = {
824 static struct cpufreq_driver cppc_cpufreq_driver
= {
825 .flags
= CPUFREQ_CONST_LOOPS
,
826 .verify
= cppc_verify_policy
,
827 .target
= cppc_cpufreq_set_target
,
828 .get
= cppc_cpufreq_get_rate
,
829 .fast_switch
= cppc_cpufreq_fast_switch
,
830 .init
= cppc_cpufreq_cpu_init
,
831 .exit
= cppc_cpufreq_cpu_exit
,
832 .set_boost
= cppc_cpufreq_set_boost
,
833 .attr
= cppc_cpufreq_attr
,
834 .name
= "cppc_cpufreq",
837 static int __init
cppc_cpufreq_init(void)
841 if (!acpi_cpc_valid())
844 cppc_freq_invariance_init();
845 populate_efficiency_class();
847 ret
= cpufreq_register_driver(&cppc_cpufreq_driver
);
849 cppc_freq_invariance_exit();
854 static inline void free_cpu_data(void)
856 struct cppc_cpudata
*iter
, *tmp
;
858 list_for_each_entry_safe(iter
, tmp
, &cpu_data_list
, node
) {
859 free_cpumask_var(iter
->shared_cpu_map
);
860 list_del(&iter
->node
);
866 static void __exit
cppc_cpufreq_exit(void)
868 cpufreq_unregister_driver(&cppc_cpufreq_driver
);
869 cppc_freq_invariance_exit();
874 module_exit(cppc_cpufreq_exit
);
875 MODULE_AUTHOR("Ashwin Chaugule");
876 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
877 MODULE_LICENSE("GPL");
879 late_initcall(cppc_cpufreq_init
);
881 static const struct acpi_device_id cppc_acpi_ids
[] __used
= {
882 {ACPI_PROCESSOR_DEVICE_HID
, },
886 MODULE_DEVICE_TABLE(acpi
, cppc_acpi_ids
);