1 // SPDX-License-Identifier: GPL-2.0-only
3 * acpi_pad.c ACPI Processor Aggregator Driver
5 * Copyright (c) 2009, Intel Corporation.
8 #include <linux/kernel.h>
9 #include <linux/cpumask.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/kthread.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/freezer.h>
16 #include <linux/cpu.h>
17 #include <linux/tick.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <asm/mwait.h>
23 #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
25 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
26 static DEFINE_MUTEX(isolated_cpus_lock
);
27 static DEFINE_MUTEX(round_robin_lock
);
29 static unsigned long power_saving_mwait_eax
;
31 static unsigned char tsc_detected_unstable
;
32 static unsigned char tsc_marked_unstable
;
34 static void power_saving_mwait_init(void)
36 unsigned int eax
, ebx
, ecx
, edx
;
37 unsigned int highest_cstate
= 0;
38 unsigned int highest_subcstate
= 0;
41 if (!boot_cpu_has(X86_FEATURE_MWAIT
))
43 if (boot_cpu_data
.cpuid_level
< CPUID_MWAIT_LEAF
)
46 cpuid(CPUID_MWAIT_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
48 if (!(ecx
& CPUID5_ECX_EXTENSIONS_SUPPORTED
) ||
49 !(ecx
& CPUID5_ECX_INTERRUPT_BREAK
))
52 edx
>>= MWAIT_SUBSTATE_SIZE
;
53 for (i
= 0; i
< 7 && edx
; i
++, edx
>>= MWAIT_SUBSTATE_SIZE
) {
54 if (edx
& MWAIT_SUBSTATE_MASK
) {
56 highest_subcstate
= edx
& MWAIT_SUBSTATE_MASK
;
59 power_saving_mwait_eax
= (highest_cstate
<< MWAIT_SUBSTATE_SIZE
) |
60 (highest_subcstate
- 1);
62 #if defined(CONFIG_X86)
63 switch (boot_cpu_data
.x86_vendor
) {
64 case X86_VENDOR_HYGON
:
66 case X86_VENDOR_INTEL
:
67 case X86_VENDOR_ZHAOXIN
:
69 * AMD Fam10h TSC will tick in all
70 * C/P/S0/S1 states when this bit is set.
72 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
73 tsc_detected_unstable
= 1;
76 /* TSC could halt in idle */
77 tsc_detected_unstable
= 1;
82 static unsigned long cpu_weight
[NR_CPUS
];
83 static int tsk_in_cpu
[NR_CPUS
] = {[0 ... NR_CPUS
-1] = -1};
84 static DECLARE_BITMAP(pad_busy_cpus_bits
, NR_CPUS
);
85 static void round_robin_cpu(unsigned int tsk_index
)
87 struct cpumask
*pad_busy_cpus
= to_cpumask(pad_busy_cpus_bits
);
90 unsigned long min_weight
= -1;
91 unsigned long preferred_cpu
;
93 if (!alloc_cpumask_var(&tmp
, GFP_KERNEL
))
96 mutex_lock(&round_robin_lock
);
98 for_each_cpu(cpu
, pad_busy_cpus
)
99 cpumask_or(tmp
, tmp
, topology_sibling_cpumask(cpu
));
100 cpumask_andnot(tmp
, cpu_online_mask
, tmp
);
101 /* avoid HT sibilings if possible */
102 if (cpumask_empty(tmp
))
103 cpumask_andnot(tmp
, cpu_online_mask
, pad_busy_cpus
);
104 if (cpumask_empty(tmp
)) {
105 mutex_unlock(&round_robin_lock
);
106 free_cpumask_var(tmp
);
109 for_each_cpu(cpu
, tmp
) {
110 if (cpu_weight
[cpu
] < min_weight
) {
111 min_weight
= cpu_weight
[cpu
];
116 if (tsk_in_cpu
[tsk_index
] != -1)
117 cpumask_clear_cpu(tsk_in_cpu
[tsk_index
], pad_busy_cpus
);
118 tsk_in_cpu
[tsk_index
] = preferred_cpu
;
119 cpumask_set_cpu(preferred_cpu
, pad_busy_cpus
);
120 cpu_weight
[preferred_cpu
]++;
121 mutex_unlock(&round_robin_lock
);
123 set_cpus_allowed_ptr(current
, cpumask_of(preferred_cpu
));
125 free_cpumask_var(tmp
);
128 static void exit_round_robin(unsigned int tsk_index
)
130 struct cpumask
*pad_busy_cpus
= to_cpumask(pad_busy_cpus_bits
);
131 cpumask_clear_cpu(tsk_in_cpu
[tsk_index
], pad_busy_cpus
);
132 tsk_in_cpu
[tsk_index
] = -1;
135 static unsigned int idle_pct
= 5; /* percentage */
136 static unsigned int round_robin_time
= 1; /* second */
137 static int power_saving_thread(void *data
)
140 unsigned int tsk_index
= (unsigned long)data
;
141 u64 last_jiffies
= 0;
143 sched_set_fifo_low(current
);
145 while (!kthread_should_stop()) {
146 unsigned long expire_time
;
148 /* round robin to cpus */
149 expire_time
= last_jiffies
+ round_robin_time
* HZ
;
150 if (time_before(expire_time
, jiffies
)) {
151 last_jiffies
= jiffies
;
152 round_robin_cpu(tsk_index
);
157 expire_time
= jiffies
+ HZ
* (100 - idle_pct
) / 100;
159 while (!need_resched()) {
160 if (tsc_detected_unstable
&& !tsc_marked_unstable
) {
161 /* TSC could halt in idle, so notify users */
162 mark_tsc_unstable("TSC halts in idle");
163 tsc_marked_unstable
= 1;
166 tick_broadcast_enable();
167 tick_broadcast_enter();
168 stop_critical_timings();
170 mwait_idle_with_hints(power_saving_mwait_eax
, 1);
172 start_critical_timings();
173 tick_broadcast_exit();
176 if (time_before(expire_time
, jiffies
)) {
183 * current sched_rt has threshold for rt task running time.
184 * When a rt task uses 95% CPU time, the rt thread will be
185 * scheduled out for 5% CPU time to not starve other tasks. But
186 * the mechanism only works when all CPUs have RT task running,
187 * as if one CPU hasn't RT task, RT task from other CPUs will
188 * borrow CPU time from this CPU and cause RT task use > 95%
189 * CPU time. To make 'avoid starvation' work, takes a nap here.
191 if (unlikely(do_sleep
))
192 schedule_timeout_killable(HZ
* idle_pct
/ 100);
194 /* If an external event has set the need_resched flag, then
195 * we need to deal with it, or this loop will continue to
196 * spin without calling __mwait().
198 if (unlikely(need_resched()))
202 exit_round_robin(tsk_index
);
206 static struct task_struct
*ps_tsks
[NR_CPUS
];
207 static unsigned int ps_tsk_num
;
208 static int create_power_saving_task(void)
212 ps_tsks
[ps_tsk_num
] = kthread_run(power_saving_thread
,
213 (void *)(unsigned long)ps_tsk_num
,
214 "acpi_pad/%d", ps_tsk_num
);
216 if (IS_ERR(ps_tsks
[ps_tsk_num
])) {
217 rc
= PTR_ERR(ps_tsks
[ps_tsk_num
]);
218 ps_tsks
[ps_tsk_num
] = NULL
;
227 static void destroy_power_saving_task(void)
229 if (ps_tsk_num
> 0) {
231 kthread_stop(ps_tsks
[ps_tsk_num
]);
232 ps_tsks
[ps_tsk_num
] = NULL
;
236 static void set_power_saving_task_num(unsigned int num
)
238 if (num
> ps_tsk_num
) {
239 while (ps_tsk_num
< num
) {
240 if (create_power_saving_task())
243 } else if (num
< ps_tsk_num
) {
244 while (ps_tsk_num
> num
)
245 destroy_power_saving_task();
249 static void acpi_pad_idle_cpus(unsigned int num_cpus
)
253 num_cpus
= min_t(unsigned int, num_cpus
, num_online_cpus());
254 set_power_saving_task_num(num_cpus
);
259 static uint32_t acpi_pad_idle_cpus_num(void)
264 static ssize_t
acpi_pad_rrtime_store(struct device
*dev
,
265 struct device_attribute
*attr
, const char *buf
, size_t count
)
268 if (kstrtoul(buf
, 0, &num
))
270 if (num
< 1 || num
>= 100)
272 mutex_lock(&isolated_cpus_lock
);
273 round_robin_time
= num
;
274 mutex_unlock(&isolated_cpus_lock
);
278 static ssize_t
acpi_pad_rrtime_show(struct device
*dev
,
279 struct device_attribute
*attr
, char *buf
)
281 return scnprintf(buf
, PAGE_SIZE
, "%d\n", round_robin_time
);
283 static DEVICE_ATTR(rrtime
, S_IRUGO
|S_IWUSR
,
284 acpi_pad_rrtime_show
,
285 acpi_pad_rrtime_store
);
287 static ssize_t
acpi_pad_idlepct_store(struct device
*dev
,
288 struct device_attribute
*attr
, const char *buf
, size_t count
)
291 if (kstrtoul(buf
, 0, &num
))
293 if (num
< 1 || num
>= 100)
295 mutex_lock(&isolated_cpus_lock
);
297 mutex_unlock(&isolated_cpus_lock
);
301 static ssize_t
acpi_pad_idlepct_show(struct device
*dev
,
302 struct device_attribute
*attr
, char *buf
)
304 return scnprintf(buf
, PAGE_SIZE
, "%d\n", idle_pct
);
306 static DEVICE_ATTR(idlepct
, S_IRUGO
|S_IWUSR
,
307 acpi_pad_idlepct_show
,
308 acpi_pad_idlepct_store
);
310 static ssize_t
acpi_pad_idlecpus_store(struct device
*dev
,
311 struct device_attribute
*attr
, const char *buf
, size_t count
)
314 if (kstrtoul(buf
, 0, &num
))
316 mutex_lock(&isolated_cpus_lock
);
317 acpi_pad_idle_cpus(num
);
318 mutex_unlock(&isolated_cpus_lock
);
322 static ssize_t
acpi_pad_idlecpus_show(struct device
*dev
,
323 struct device_attribute
*attr
, char *buf
)
325 return cpumap_print_to_pagebuf(false, buf
,
326 to_cpumask(pad_busy_cpus_bits
));
329 static DEVICE_ATTR(idlecpus
, S_IRUGO
|S_IWUSR
,
330 acpi_pad_idlecpus_show
,
331 acpi_pad_idlecpus_store
);
333 static int acpi_pad_add_sysfs(struct acpi_device
*device
)
337 result
= device_create_file(&device
->dev
, &dev_attr_idlecpus
);
340 result
= device_create_file(&device
->dev
, &dev_attr_idlepct
);
342 device_remove_file(&device
->dev
, &dev_attr_idlecpus
);
345 result
= device_create_file(&device
->dev
, &dev_attr_rrtime
);
347 device_remove_file(&device
->dev
, &dev_attr_idlecpus
);
348 device_remove_file(&device
->dev
, &dev_attr_idlepct
);
354 static void acpi_pad_remove_sysfs(struct acpi_device
*device
)
356 device_remove_file(&device
->dev
, &dev_attr_idlecpus
);
357 device_remove_file(&device
->dev
, &dev_attr_idlepct
);
358 device_remove_file(&device
->dev
, &dev_attr_rrtime
);
362 * Query firmware how many CPUs should be idle
363 * return -1 on failure
365 static int acpi_pad_pur(acpi_handle handle
)
367 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
368 union acpi_object
*package
;
371 if (ACPI_FAILURE(acpi_evaluate_object(handle
, "_PUR", NULL
, &buffer
)))
374 if (!buffer
.length
|| !buffer
.pointer
)
377 package
= buffer
.pointer
;
379 if (package
->type
== ACPI_TYPE_PACKAGE
&&
380 package
->package
.count
== 2 &&
381 package
->package
.elements
[0].integer
.value
== 1) /* rev 1 */
383 num
= package
->package
.elements
[1].integer
.value
;
385 kfree(buffer
.pointer
);
389 static void acpi_pad_handle_notify(acpi_handle handle
)
393 struct acpi_buffer param
= {
395 .pointer
= (void *)&idle_cpus
,
398 mutex_lock(&isolated_cpus_lock
);
399 num_cpus
= acpi_pad_pur(handle
);
401 mutex_unlock(&isolated_cpus_lock
);
404 acpi_pad_idle_cpus(num_cpus
);
405 idle_cpus
= acpi_pad_idle_cpus_num();
406 acpi_evaluate_ost(handle
, ACPI_PROCESSOR_AGGREGATOR_NOTIFY
, 0, ¶m
);
407 mutex_unlock(&isolated_cpus_lock
);
410 static void acpi_pad_notify(acpi_handle handle
, u32 event
,
413 struct acpi_device
*device
= data
;
416 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY
:
417 acpi_pad_handle_notify(handle
);
418 acpi_bus_generate_netlink_event(device
->pnp
.device_class
,
419 dev_name(&device
->dev
), event
, 0);
422 pr_warn("Unsupported event [0x%x]\n", event
);
427 static int acpi_pad_add(struct acpi_device
*device
)
431 strcpy(acpi_device_name(device
), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME
);
432 strcpy(acpi_device_class(device
), ACPI_PROCESSOR_AGGREGATOR_CLASS
);
434 if (acpi_pad_add_sysfs(device
))
437 status
= acpi_install_notify_handler(device
->handle
,
438 ACPI_DEVICE_NOTIFY
, acpi_pad_notify
, device
);
439 if (ACPI_FAILURE(status
)) {
440 acpi_pad_remove_sysfs(device
);
447 static int acpi_pad_remove(struct acpi_device
*device
)
449 mutex_lock(&isolated_cpus_lock
);
450 acpi_pad_idle_cpus(0);
451 mutex_unlock(&isolated_cpus_lock
);
453 acpi_remove_notify_handler(device
->handle
,
454 ACPI_DEVICE_NOTIFY
, acpi_pad_notify
);
455 acpi_pad_remove_sysfs(device
);
459 static const struct acpi_device_id pad_device_ids
[] = {
463 MODULE_DEVICE_TABLE(acpi
, pad_device_ids
);
465 static struct acpi_driver acpi_pad_driver
= {
466 .name
= "processor_aggregator",
467 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS
,
468 .ids
= pad_device_ids
,
471 .remove
= acpi_pad_remove
,
475 static int __init
acpi_pad_init(void)
477 /* Xen ACPI PAD is used when running as Xen Dom0. */
478 if (xen_initial_domain())
481 power_saving_mwait_init();
482 if (power_saving_mwait_eax
== 0)
485 return acpi_bus_register_driver(&acpi_pad_driver
);
488 static void __exit
acpi_pad_exit(void)
490 acpi_bus_unregister_driver(&acpi_pad_driver
);
493 module_init(acpi_pad_init
);
494 module_exit(acpi_pad_exit
);
495 MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
496 MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
497 MODULE_LICENSE("GPL");