2 * Copyright 2012 by Oracle Inc
3 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249
6 * so many thanks go to Kevin Tian <kevin.tian@intel.com>
7 * and Yu Ke <ke.yu@intel.com>.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/cpumask.h>
23 #include <linux/cpufreq.h>
24 #include <linux/freezer.h>
25 #include <linux/kernel.h>
26 #include <linux/kthread.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/acpi.h>
32 #include <acpi/processor.h>
34 #include <xen/interface/platform.h>
35 #include <asm/xen/hypercall.h>
37 static int no_hypercall
;
38 MODULE_PARM_DESC(off
, "Inhibit the hypercall.");
39 module_param_named(off
, no_hypercall
, int, 0400);
42 * Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit
43 * - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which
44 * can be less than what we want to put in. Instead use the 'nr_acpi_bits'
45 * which is dynamically computed based on the MADT or x2APIC table.
47 static unsigned int nr_acpi_bits
;
48 /* Mutex to protect the acpi_ids_done - for CPU hotplug use. */
49 static DEFINE_MUTEX(acpi_ids_mutex
);
50 /* Which ACPI ID we have processed from 'struct acpi_processor'. */
51 static unsigned long *acpi_ids_done
;
52 /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
53 static unsigned long *acpi_id_present
;
54 /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
55 static unsigned long *acpi_id_cst_present
;
56 /* Which ACPI P-State dependencies for a enumerated processor */
57 static struct acpi_psd_package
*acpi_psd
;
59 static int push_cxx_to_hypervisor(struct acpi_processor
*_pr
)
61 struct xen_platform_op op
= {
62 .cmd
= XENPF_set_processor_pminfo
,
63 .interface_version
= XENPF_INTERFACE_VERSION
,
64 .u
.set_pminfo
.id
= _pr
->acpi_id
,
65 .u
.set_pminfo
.type
= XEN_PM_CX
,
67 struct xen_processor_cx
*dst_cx
, *dst_cx_states
= NULL
;
68 struct acpi_processor_cx
*cx
;
72 dst_cx_states
= kcalloc(_pr
->power
.count
,
73 sizeof(struct xen_processor_cx
), GFP_KERNEL
);
77 for (ok
= 0, i
= 1; i
<= _pr
->power
.count
; i
++) {
78 cx
= &_pr
->power
.states
[i
];
82 dst_cx
= &(dst_cx_states
[ok
++]);
84 dst_cx
->reg
.space_id
= ACPI_ADR_SPACE_SYSTEM_IO
;
85 if (cx
->entry_method
== ACPI_CSTATE_SYSTEMIO
) {
86 dst_cx
->reg
.bit_width
= 8;
87 dst_cx
->reg
.bit_offset
= 0;
88 dst_cx
->reg
.access_size
= 1;
90 dst_cx
->reg
.space_id
= ACPI_ADR_SPACE_FIXED_HARDWARE
;
91 if (cx
->entry_method
== ACPI_CSTATE_FFH
) {
92 /* NATIVE_CSTATE_BEYOND_HALT */
93 dst_cx
->reg
.bit_offset
= 2;
94 dst_cx
->reg
.bit_width
= 1; /* VENDOR_INTEL */
96 dst_cx
->reg
.access_size
= 0;
98 dst_cx
->reg
.address
= cx
->address
;
100 dst_cx
->type
= cx
->type
;
101 dst_cx
->latency
= cx
->latency
;
104 set_xen_guest_handle(dst_cx
->dp
, NULL
);
107 pr_debug("No _Cx for ACPI CPU %u\n", _pr
->acpi_id
);
108 kfree(dst_cx_states
);
111 op
.u
.set_pminfo
.power
.count
= ok
;
112 op
.u
.set_pminfo
.power
.flags
.bm_control
= _pr
->flags
.bm_control
;
113 op
.u
.set_pminfo
.power
.flags
.bm_check
= _pr
->flags
.bm_check
;
114 op
.u
.set_pminfo
.power
.flags
.has_cst
= _pr
->flags
.has_cst
;
115 op
.u
.set_pminfo
.power
.flags
.power_setup_done
=
116 _pr
->flags
.power_setup_done
;
118 set_xen_guest_handle(op
.u
.set_pminfo
.power
.states
, dst_cx_states
);
121 ret
= HYPERVISOR_platform_op(&op
);
124 pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr
->acpi_id
);
125 for (i
= 1; i
<= _pr
->power
.count
; i
++) {
126 cx
= &_pr
->power
.states
[i
];
129 pr_debug(" C%d: %s %d uS\n",
130 cx
->type
, cx
->desc
, (u32
)cx
->latency
);
132 } else if ((ret
!= -EINVAL
) && (ret
!= -ENOSYS
))
133 /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
134 * table is referencing a non-existing CPU - which can happen
135 * with broken ACPI tables. */
136 pr_err("(CX): Hypervisor error (%d) for ACPI CPU%u\n",
139 kfree(dst_cx_states
);
143 static struct xen_processor_px
*
144 xen_copy_pss_data(struct acpi_processor
*_pr
,
145 struct xen_processor_performance
*dst_perf
)
147 struct xen_processor_px
*dst_states
= NULL
;
150 BUILD_BUG_ON(sizeof(struct xen_processor_px
) !=
151 sizeof(struct acpi_processor_px
));
153 dst_states
= kcalloc(_pr
->performance
->state_count
,
154 sizeof(struct xen_processor_px
), GFP_KERNEL
);
156 return ERR_PTR(-ENOMEM
);
158 dst_perf
->state_count
= _pr
->performance
->state_count
;
159 for (i
= 0; i
< _pr
->performance
->state_count
; i
++) {
160 /* Fortunatly for us, they are both the same size */
161 memcpy(&(dst_states
[i
]), &(_pr
->performance
->states
[i
]),
162 sizeof(struct acpi_processor_px
));
166 static int xen_copy_psd_data(struct acpi_processor
*_pr
,
167 struct xen_processor_performance
*dst
)
169 struct acpi_psd_package
*pdomain
;
171 BUILD_BUG_ON(sizeof(struct xen_psd_package
) !=
172 sizeof(struct acpi_psd_package
));
174 /* This information is enumerated only if acpi_processor_preregister_performance
177 dst
->shared_type
= _pr
->performance
->shared_type
;
179 pdomain
= &(_pr
->performance
->domain_info
);
181 /* 'acpi_processor_preregister_performance' does not parse if the
182 * num_processors <= 1, but Xen still requires it. Do it manually here.
184 if (pdomain
->num_processors
<= 1) {
185 if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ALL
)
186 dst
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
187 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_HW_ALL
)
188 dst
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
189 else if (pdomain
->coord_type
== DOMAIN_COORD_TYPE_SW_ANY
)
190 dst
->shared_type
= CPUFREQ_SHARED_TYPE_ANY
;
193 memcpy(&(dst
->domain_info
), pdomain
, sizeof(struct acpi_psd_package
));
196 static int xen_copy_pct_data(struct acpi_pct_register
*pct
,
197 struct xen_pct_register
*dst_pct
)
199 /* It would be nice if you could just do 'memcpy(pct, dst_pct') but
200 * sadly the Xen structure did not have the proper padding so the
201 * descriptor field takes two (dst_pct) bytes instead of one (pct).
203 dst_pct
->descriptor
= pct
->descriptor
;
204 dst_pct
->length
= pct
->length
;
205 dst_pct
->space_id
= pct
->space_id
;
206 dst_pct
->bit_width
= pct
->bit_width
;
207 dst_pct
->bit_offset
= pct
->bit_offset
;
208 dst_pct
->reserved
= pct
->reserved
;
209 dst_pct
->address
= pct
->address
;
212 static int push_pxx_to_hypervisor(struct acpi_processor
*_pr
)
215 struct xen_platform_op op
= {
216 .cmd
= XENPF_set_processor_pminfo
,
217 .interface_version
= XENPF_INTERFACE_VERSION
,
218 .u
.set_pminfo
.id
= _pr
->acpi_id
,
219 .u
.set_pminfo
.type
= XEN_PM_PX
,
221 struct xen_processor_performance
*dst_perf
;
222 struct xen_processor_px
*dst_states
= NULL
;
224 dst_perf
= &op
.u
.set_pminfo
.perf
;
226 dst_perf
->platform_limit
= _pr
->performance_platform_limit
;
227 dst_perf
->flags
|= XEN_PX_PPC
;
228 xen_copy_pct_data(&(_pr
->performance
->control_register
),
229 &dst_perf
->control_register
);
230 xen_copy_pct_data(&(_pr
->performance
->status_register
),
231 &dst_perf
->status_register
);
232 dst_perf
->flags
|= XEN_PX_PCT
;
233 dst_states
= xen_copy_pss_data(_pr
, dst_perf
);
234 if (!IS_ERR_OR_NULL(dst_states
)) {
235 set_xen_guest_handle(dst_perf
->states
, dst_states
);
236 dst_perf
->flags
|= XEN_PX_PSS
;
238 if (!xen_copy_psd_data(_pr
, dst_perf
))
239 dst_perf
->flags
|= XEN_PX_PSD
;
241 if (dst_perf
->flags
!= (XEN_PX_PSD
| XEN_PX_PSS
| XEN_PX_PCT
| XEN_PX_PPC
)) {
242 pr_warn("ACPI CPU%u missing some P-state data (%x), skipping\n",
243 _pr
->acpi_id
, dst_perf
->flags
);
249 ret
= HYPERVISOR_platform_op(&op
);
252 struct acpi_processor_performance
*perf
;
255 perf
= _pr
->performance
;
256 pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr
->acpi_id
);
257 for (i
= 0; i
< perf
->state_count
; i
++) {
258 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
259 (i
== perf
->state
? '*' : ' '), i
,
260 (u32
) perf
->states
[i
].core_frequency
,
261 (u32
) perf
->states
[i
].power
,
262 (u32
) perf
->states
[i
].transition_latency
);
264 } else if ((ret
!= -EINVAL
) && (ret
!= -ENOSYS
))
265 /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
266 * table is referencing a non-existing CPU - which can happen
267 * with broken ACPI tables. */
268 pr_warn("(_PXX): Hypervisor error (%d) for ACPI CPU%u\n",
271 if (!IS_ERR_OR_NULL(dst_states
))
276 static int upload_pm_data(struct acpi_processor
*_pr
)
280 mutex_lock(&acpi_ids_mutex
);
281 if (__test_and_set_bit(_pr
->acpi_id
, acpi_ids_done
)) {
282 mutex_unlock(&acpi_ids_mutex
);
285 if (_pr
->flags
.power
)
286 err
= push_cxx_to_hypervisor(_pr
);
288 if (_pr
->performance
&& _pr
->performance
->states
)
289 err
|= push_pxx_to_hypervisor(_pr
);
291 mutex_unlock(&acpi_ids_mutex
);
294 static unsigned int __init
get_max_acpi_id(void)
296 struct xenpf_pcpuinfo
*info
;
297 struct xen_platform_op op
= {
298 .cmd
= XENPF_get_cpuinfo
,
299 .interface_version
= XENPF_INTERFACE_VERSION
,
302 unsigned int i
, last_cpu
, max_acpi_id
= 0;
304 info
= &op
.u
.pcpu_info
;
307 ret
= HYPERVISOR_platform_op(&op
);
311 /* The max_present is the same irregardless of the xen_cpuid */
312 last_cpu
= op
.u
.pcpu_info
.max_present
;
313 for (i
= 0; i
<= last_cpu
; i
++) {
315 ret
= HYPERVISOR_platform_op(&op
);
318 max_acpi_id
= max(info
->acpi_id
, max_acpi_id
);
320 max_acpi_id
*= 2; /* Slack for CPU hotplug support. */
321 pr_debug("Max ACPI ID: %u\n", max_acpi_id
);
325 * The read_acpi_id and check_acpi_ids are there to support the Xen
326 * oddity of virtual CPUs != physical CPUs in the initial domain.
327 * The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line
328 * which will band the amount of CPUs the initial domain can see.
329 * In general that is OK, except it plays havoc with any of the
330 * for_each_[present|online]_cpu macros which are banded to the virtual
334 read_acpi_id(acpi_handle handle
, u32 lvl
, void *context
, void **rv
)
338 acpi_object_type acpi_type
;
339 unsigned long long tmp
;
340 union acpi_object object
= { 0 };
341 struct acpi_buffer buffer
= { sizeof(union acpi_object
), &object
};
342 acpi_io_address pblk
= 0;
344 status
= acpi_get_type(handle
, &acpi_type
);
345 if (ACPI_FAILURE(status
))
349 case ACPI_TYPE_PROCESSOR
:
350 status
= acpi_evaluate_object(handle
, NULL
, NULL
, &buffer
);
351 if (ACPI_FAILURE(status
))
353 acpi_id
= object
.processor
.proc_id
;
354 pblk
= object
.processor
.pblk_address
;
356 case ACPI_TYPE_DEVICE
:
357 status
= acpi_evaluate_integer(handle
, "_UID", NULL
, &tmp
);
358 if (ACPI_FAILURE(status
))
365 /* There are more ACPI Processor objects than in x2APIC or MADT.
366 * This can happen with incorrect ACPI SSDT declerations. */
367 if (acpi_id
>= nr_acpi_bits
) {
368 pr_debug("max acpi id %u, trying to set %u\n",
369 nr_acpi_bits
- 1, acpi_id
);
372 /* OK, There is a ACPI Processor object */
373 __set_bit(acpi_id
, acpi_id_present
);
375 pr_debug("ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id
, (unsigned long)pblk
);
377 /* It has P-state dependencies */
378 if (!acpi_processor_get_psd(handle
, &acpi_psd
[acpi_id
])) {
379 pr_debug("ACPI CPU%u w/ PST:coord_type = %llu domain = %llu\n",
380 acpi_id
, acpi_psd
[acpi_id
].coord_type
,
381 acpi_psd
[acpi_id
].domain
);
384 status
= acpi_evaluate_object(handle
, "_CST", NULL
, &buffer
);
385 if (ACPI_FAILURE(status
)) {
389 /* .. and it has a C-state */
390 __set_bit(acpi_id
, acpi_id_cst_present
);
394 static int check_acpi_ids(struct acpi_processor
*pr_backup
)
400 if (acpi_id_present
&& acpi_id_cst_present
)
401 /* OK, done this once .. skip to uploading */
404 /* All online CPUs have been processed at this stage. Now verify
405 * whether in fact "online CPUs" == physical CPUs.
407 acpi_id_present
= kcalloc(BITS_TO_LONGS(nr_acpi_bits
), sizeof(unsigned long), GFP_KERNEL
);
408 if (!acpi_id_present
)
411 acpi_id_cst_present
= kcalloc(BITS_TO_LONGS(nr_acpi_bits
), sizeof(unsigned long), GFP_KERNEL
);
412 if (!acpi_id_cst_present
) {
413 kfree(acpi_id_present
);
417 acpi_psd
= kcalloc(nr_acpi_bits
, sizeof(struct acpi_psd_package
),
420 kfree(acpi_id_present
);
421 kfree(acpi_id_cst_present
);
425 acpi_walk_namespace(ACPI_TYPE_PROCESSOR
, ACPI_ROOT_OBJECT
,
427 read_acpi_id
, NULL
, NULL
, NULL
);
428 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID
, read_acpi_id
, NULL
, NULL
);
431 if (!bitmap_equal(acpi_id_present
, acpi_ids_done
, nr_acpi_bits
)) {
433 for_each_set_bit(i
, acpi_id_present
, nr_acpi_bits
) {
434 pr_backup
->acpi_id
= i
;
435 /* Mask out C-states if there are no _CST or PBLK */
436 pr_backup
->flags
.power
= test_bit(i
, acpi_id_cst_present
);
437 /* num_entries is non-zero if we evaluated _PSD */
438 if (acpi_psd
[i
].num_entries
) {
439 memcpy(&pr_backup
->performance
->domain_info
,
441 sizeof(struct acpi_psd_package
));
443 (void)upload_pm_data(pr_backup
);
450 /* acpi_perf_data is a pointer to percpu data. */
451 static struct acpi_processor_performance __percpu
*acpi_perf_data
;
453 static void free_acpi_perf_data(void)
457 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
458 for_each_possible_cpu(i
)
459 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
461 free_percpu(acpi_perf_data
);
464 static int xen_upload_processor_pm_data(void)
466 struct acpi_processor
*pr_backup
= NULL
;
470 pr_info("Uploading Xen processor PM info\n");
472 for_each_possible_cpu(i
) {
473 struct acpi_processor
*_pr
;
474 _pr
= per_cpu(processors
, i
/* APIC ID */);
479 pr_backup
= kzalloc(sizeof(struct acpi_processor
), GFP_KERNEL
);
481 memcpy(pr_backup
, _pr
, sizeof(struct acpi_processor
));
483 (void)upload_pm_data(_pr
);
486 rc
= check_acpi_ids(pr_backup
);
492 static void xen_acpi_processor_resume_worker(struct work_struct
*dummy
)
496 bitmap_zero(acpi_ids_done
, nr_acpi_bits
);
498 rc
= xen_upload_processor_pm_data();
500 pr_info("ACPI data upload failed, error = %d\n", rc
);
503 static void xen_acpi_processor_resume(void)
505 static DECLARE_WORK(wq
, xen_acpi_processor_resume_worker
);
508 * xen_upload_processor_pm_data() calls non-atomic code.
509 * However, the context for xen_acpi_processor_resume is syscore
510 * with only the boot CPU online and in an atomic context.
512 * So defer the upload for some point safer.
517 static struct syscore_ops xap_syscore_ops
= {
518 .resume
= xen_acpi_processor_resume
,
521 static int __init
xen_acpi_processor_init(void)
526 if (!xen_initial_domain())
529 nr_acpi_bits
= get_max_acpi_id() + 1;
530 acpi_ids_done
= kcalloc(BITS_TO_LONGS(nr_acpi_bits
), sizeof(unsigned long), GFP_KERNEL
);
534 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
535 if (!acpi_perf_data
) {
536 pr_debug("Memory allocation error for acpi_perf_data\n");
537 kfree(acpi_ids_done
);
540 for_each_possible_cpu(i
) {
541 if (!zalloc_cpumask_var_node(
542 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
543 GFP_KERNEL
, cpu_to_node(i
))) {
549 /* Do initialization in ACPI core. It is OK to fail here. */
550 (void)acpi_processor_preregister_performance(acpi_perf_data
);
552 for_each_possible_cpu(i
) {
553 struct acpi_processor
*pr
;
554 struct acpi_processor_performance
*perf
;
556 pr
= per_cpu(processors
, i
);
557 perf
= per_cpu_ptr(acpi_perf_data
, i
);
561 pr
->performance
= perf
;
562 rc
= acpi_processor_get_performance_info(pr
);
567 rc
= xen_upload_processor_pm_data();
571 register_syscore_ops(&xap_syscore_ops
);
575 for_each_possible_cpu(i
)
576 acpi_processor_unregister_performance(i
);
579 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
580 free_acpi_perf_data();
581 kfree(acpi_ids_done
);
584 static void __exit
xen_acpi_processor_exit(void)
588 unregister_syscore_ops(&xap_syscore_ops
);
589 kfree(acpi_ids_done
);
590 kfree(acpi_id_present
);
591 kfree(acpi_id_cst_present
);
593 for_each_possible_cpu(i
)
594 acpi_processor_unregister_performance(i
);
596 free_acpi_perf_data();
599 MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>");
600 MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor");
601 MODULE_LICENSE("GPL");
603 /* We want to be loaded before the CPU freq scaling drivers are loaded.
604 * They are loaded in late_initcall. */
605 device_initcall(xen_acpi_processor_init
);
606 module_exit(xen_acpi_processor_exit
);