1 #include "qemu/osdep.h"
2 #include "migration/vmstate.h"
3 #include "hw/acpi/cpu.h"
4 #include "hw/core/cpu.h"
5 #include "qapi/error.h"
6 #include "qapi/qapi-events-acpi.h"
8 #include "sysemu/numa.h"
10 #define ACPI_CPU_SELECTOR_OFFSET_WR 0
11 #define ACPI_CPU_FLAGS_OFFSET_RW 4
12 #define ACPI_CPU_CMD_OFFSET_WR 5
13 #define ACPI_CPU_CMD_DATA_OFFSET_RW 8
14 #define ACPI_CPU_CMD_DATA2_OFFSET_R 0
16 #define OVMF_CPUHP_SMI_CMD 4
19 CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
= 0,
20 CPHP_OST_EVENT_CMD
= 1,
21 CPHP_OST_STATUS_CMD
= 2,
22 CPHP_GET_CPU_ID_CMD
= 3,
26 static ACPIOSTInfo
*acpi_cpu_device_status(int idx
, AcpiCpuStatus
*cdev
)
28 ACPIOSTInfo
*info
= g_new0(ACPIOSTInfo
, 1);
30 info
->slot_type
= ACPI_SLOT_TYPE_CPU
;
31 info
->slot
= g_strdup_printf("%d", idx
);
32 info
->source
= cdev
->ost_event
;
33 info
->status
= cdev
->ost_status
;
35 DeviceState
*dev
= DEVICE(cdev
->cpu
);
37 info
->device
= g_strdup(dev
->id
);
43 void acpi_cpu_ospm_status(CPUHotplugState
*cpu_st
, ACPIOSTInfoList
***list
)
45 ACPIOSTInfoList
***tail
= list
;
48 for (i
= 0; i
< cpu_st
->dev_count
; i
++) {
49 QAPI_LIST_APPEND(*tail
, acpi_cpu_device_status(i
, &cpu_st
->devs
[i
]));
53 static uint64_t cpu_hotplug_rd(void *opaque
, hwaddr addr
, unsigned size
)
56 CPUHotplugState
*cpu_st
= opaque
;
59 if (cpu_st
->selector
>= cpu_st
->dev_count
) {
63 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
65 case ACPI_CPU_FLAGS_OFFSET_RW
: /* pack and return is_* fields */
66 val
|= cdev
->cpu
? 1 : 0;
67 val
|= cdev
->is_inserting
? 2 : 0;
68 val
|= cdev
->is_removing
? 4 : 0;
69 val
|= cdev
->fw_remove
? 16 : 0;
70 trace_cpuhp_acpi_read_flags(cpu_st
->selector
, val
);
72 case ACPI_CPU_CMD_DATA_OFFSET_RW
:
73 switch (cpu_st
->command
) {
74 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
:
75 val
= cpu_st
->selector
;
77 case CPHP_GET_CPU_ID_CMD
:
78 val
= cdev
->arch_id
& 0xFFFFFFFF;
83 trace_cpuhp_acpi_read_cmd_data(cpu_st
->selector
, val
);
85 case ACPI_CPU_CMD_DATA2_OFFSET_R
:
86 switch (cpu_st
->command
) {
87 case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
:
90 case CPHP_GET_CPU_ID_CMD
:
91 val
= cdev
->arch_id
>> 32;
96 trace_cpuhp_acpi_read_cmd_data2(cpu_st
->selector
, val
);
104 static void cpu_hotplug_wr(void *opaque
, hwaddr addr
, uint64_t data
,
107 CPUHotplugState
*cpu_st
= opaque
;
111 assert(cpu_st
->dev_count
);
114 if (cpu_st
->selector
>= cpu_st
->dev_count
) {
115 trace_cpuhp_acpi_invalid_idx_selected(cpu_st
->selector
);
121 case ACPI_CPU_SELECTOR_OFFSET_WR
: /* current CPU selector */
122 cpu_st
->selector
= data
;
123 trace_cpuhp_acpi_write_idx(cpu_st
->selector
);
125 case ACPI_CPU_FLAGS_OFFSET_RW
: /* set is_* fields */
126 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
127 if (data
& 2) { /* clear insert event */
128 cdev
->is_inserting
= false;
129 trace_cpuhp_acpi_clear_inserting_evt(cpu_st
->selector
);
130 } else if (data
& 4) { /* clear remove event */
131 cdev
->is_removing
= false;
132 trace_cpuhp_acpi_clear_remove_evt(cpu_st
->selector
);
133 } else if (data
& 8) {
134 DeviceState
*dev
= NULL
;
135 HotplugHandler
*hotplug_ctrl
= NULL
;
137 if (!cdev
->cpu
|| cdev
->cpu
== first_cpu
) {
138 trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st
->selector
);
142 trace_cpuhp_acpi_ejecting_cpu(cpu_st
->selector
);
143 dev
= DEVICE(cdev
->cpu
);
144 hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
145 hotplug_handler_unplug(hotplug_ctrl
, dev
, NULL
);
146 object_unparent(OBJECT(dev
));
147 cdev
->fw_remove
= false;
148 } else if (data
& 16) {
149 if (!cdev
->cpu
|| cdev
->cpu
== first_cpu
) {
150 trace_cpuhp_acpi_fw_remove_invalid_cpu(cpu_st
->selector
);
153 trace_cpuhp_acpi_fw_remove_cpu(cpu_st
->selector
);
154 cdev
->fw_remove
= true;
157 case ACPI_CPU_CMD_OFFSET_WR
:
158 trace_cpuhp_acpi_write_cmd(cpu_st
->selector
, data
);
159 if (data
< CPHP_CMD_MAX
) {
160 cpu_st
->command
= data
;
161 if (cpu_st
->command
== CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
) {
162 uint32_t iter
= cpu_st
->selector
;
165 cdev
= &cpu_st
->devs
[iter
];
166 if (cdev
->is_inserting
|| cdev
->is_removing
||
168 cpu_st
->selector
= iter
;
169 trace_cpuhp_acpi_cpu_has_events(cpu_st
->selector
,
170 cdev
->is_inserting
, cdev
->is_removing
);
173 iter
= iter
+ 1 < cpu_st
->dev_count
? iter
+ 1 : 0;
174 } while (iter
!= cpu_st
->selector
);
178 case ACPI_CPU_CMD_DATA_OFFSET_RW
:
179 switch (cpu_st
->command
) {
180 case CPHP_OST_EVENT_CMD
: {
181 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
182 cdev
->ost_event
= data
;
183 trace_cpuhp_acpi_write_ost_ev(cpu_st
->selector
, cdev
->ost_event
);
186 case CPHP_OST_STATUS_CMD
: {
187 cdev
= &cpu_st
->devs
[cpu_st
->selector
];
188 cdev
->ost_status
= data
;
189 info
= acpi_cpu_device_status(cpu_st
->selector
, cdev
);
190 qapi_event_send_acpi_device_ost(info
);
191 qapi_free_ACPIOSTInfo(info
);
192 trace_cpuhp_acpi_write_ost_status(cpu_st
->selector
,
205 static const MemoryRegionOps cpu_hotplug_ops
= {
206 .read
= cpu_hotplug_rd
,
207 .write
= cpu_hotplug_wr
,
208 .endianness
= DEVICE_LITTLE_ENDIAN
,
210 .min_access_size
= 1,
211 .max_access_size
= 4,
215 void cpu_hotplug_hw_init(MemoryRegion
*as
, Object
*owner
,
216 CPUHotplugState
*state
, hwaddr base_addr
)
218 MachineState
*machine
= MACHINE(qdev_get_machine());
219 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
220 const CPUArchIdList
*id_list
;
223 assert(mc
->possible_cpu_arch_ids
);
224 id_list
= mc
->possible_cpu_arch_ids(machine
);
225 state
->dev_count
= id_list
->len
;
226 state
->devs
= g_new0(typeof(*state
->devs
), state
->dev_count
);
227 for (i
= 0; i
< id_list
->len
; i
++) {
228 state
->devs
[i
].cpu
= CPU(id_list
->cpus
[i
].cpu
);
229 state
->devs
[i
].arch_id
= id_list
->cpus
[i
].arch_id
;
231 memory_region_init_io(&state
->ctrl_reg
, owner
, &cpu_hotplug_ops
, state
,
232 "acpi-cpu-hotplug", ACPI_CPU_HOTPLUG_REG_LEN
);
233 memory_region_add_subregion(as
, base_addr
, &state
->ctrl_reg
);
236 static AcpiCpuStatus
*get_cpu_status(CPUHotplugState
*cpu_st
, DeviceState
*dev
)
238 CPUClass
*k
= CPU_GET_CLASS(dev
);
239 uint64_t cpu_arch_id
= k
->get_arch_id(CPU(dev
));
242 for (i
= 0; i
< cpu_st
->dev_count
; i
++) {
243 if (cpu_arch_id
== cpu_st
->devs
[i
].arch_id
) {
244 return &cpu_st
->devs
[i
];
250 void acpi_cpu_plug_cb(HotplugHandler
*hotplug_dev
,
251 CPUHotplugState
*cpu_st
, DeviceState
*dev
, Error
**errp
)
255 cdev
= get_cpu_status(cpu_st
, dev
);
260 cdev
->cpu
= CPU(dev
);
261 if (dev
->hotplugged
) {
262 cdev
->is_inserting
= true;
263 acpi_send_event(DEVICE(hotplug_dev
), ACPI_CPU_HOTPLUG_STATUS
);
267 void acpi_cpu_unplug_request_cb(HotplugHandler
*hotplug_dev
,
268 CPUHotplugState
*cpu_st
,
269 DeviceState
*dev
, Error
**errp
)
273 cdev
= get_cpu_status(cpu_st
, dev
);
278 cdev
->is_removing
= true;
279 acpi_send_event(DEVICE(hotplug_dev
), ACPI_CPU_HOTPLUG_STATUS
);
282 void acpi_cpu_unplug_cb(CPUHotplugState
*cpu_st
,
283 DeviceState
*dev
, Error
**errp
)
287 cdev
= get_cpu_status(cpu_st
, dev
);
295 static const VMStateDescription vmstate_cpuhp_sts
= {
296 .name
= "CPU hotplug device state",
298 .minimum_version_id
= 1,
299 .fields
= (const VMStateField
[]) {
300 VMSTATE_BOOL(is_inserting
, AcpiCpuStatus
),
301 VMSTATE_BOOL(is_removing
, AcpiCpuStatus
),
302 VMSTATE_UINT32(ost_event
, AcpiCpuStatus
),
303 VMSTATE_UINT32(ost_status
, AcpiCpuStatus
),
304 VMSTATE_END_OF_LIST()
308 const VMStateDescription vmstate_cpu_hotplug
= {
309 .name
= "CPU hotplug state",
311 .minimum_version_id
= 1,
312 .fields
= (const VMStateField
[]) {
313 VMSTATE_UINT32(selector
, CPUHotplugState
),
314 VMSTATE_UINT8(command
, CPUHotplugState
),
315 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs
, CPUHotplugState
, dev_count
,
316 vmstate_cpuhp_sts
, AcpiCpuStatus
),
317 VMSTATE_END_OF_LIST()
321 #define CPU_NAME_FMT "C%.03X"
322 #define CPUHP_RES_DEVICE "PRES"
323 #define CPU_LOCK "CPLK"
324 #define CPU_STS_METHOD "CSTA"
325 #define CPU_SCAN_METHOD "CSCN"
326 #define CPU_NOTIFY_METHOD "CTFY"
327 #define CPU_EJECT_METHOD "CEJ0"
328 #define CPU_OST_METHOD "COST"
329 #define CPU_ADDED_LIST "CNEW"
331 #define CPU_ENABLED "CPEN"
332 #define CPU_SELECTOR "CSEL"
333 #define CPU_COMMAND "CCMD"
334 #define CPU_DATA "CDAT"
335 #define CPU_INSERT_EVENT "CINS"
336 #define CPU_REMOVE_EVENT "CRMV"
337 #define CPU_EJECT_EVENT "CEJ0"
338 #define CPU_FW_EJECT_EVENT "CEJF"
340 void build_cpus_aml(Aml
*table
, MachineState
*machine
, CPUHotplugFeatures opts
,
341 build_madt_cpu_fn build_madt_cpu
, hwaddr base_addr
,
342 const char *res_root
,
343 const char *event_handler_method
,
351 Aml
*zero
= aml_int(0);
352 Aml
*one
= aml_int(1);
353 Aml
*sb_scope
= aml_scope("_SB");
354 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
355 const CPUArchIdList
*arch_ids
= mc
->possible_cpu_arch_ids(machine
);
356 char *cphp_res_path
= g_strdup_printf("%s." CPUHP_RES_DEVICE
, res_root
);
358 cpu_ctrl_dev
= aml_device("%s", cphp_res_path
);
362 aml_append(cpu_ctrl_dev
,
363 aml_name_decl("_HID", aml_eisaid("PNP0A06")));
364 aml_append(cpu_ctrl_dev
,
365 aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
366 aml_append(cpu_ctrl_dev
, aml_mutex(CPU_LOCK
, 0));
368 assert((rs
== AML_SYSTEM_IO
) || (rs
== AML_SYSTEM_MEMORY
));
370 crs
= aml_resource_template();
371 if (rs
== AML_SYSTEM_IO
) {
372 aml_append(crs
, aml_io(AML_DECODE16
, base_addr
, base_addr
, 1,
373 ACPI_CPU_HOTPLUG_REG_LEN
));
374 } else if (rs
== AML_SYSTEM_MEMORY
) {
375 aml_append(crs
, aml_memory32_fixed(base_addr
,
376 ACPI_CPU_HOTPLUG_REG_LEN
, AML_READ_WRITE
));
379 aml_append(cpu_ctrl_dev
, aml_name_decl("_CRS", crs
));
381 /* declare CPU hotplug MMIO region with related access fields */
382 aml_append(cpu_ctrl_dev
,
383 aml_operation_region("PRST", rs
, aml_int(base_addr
),
384 ACPI_CPU_HOTPLUG_REG_LEN
));
386 field
= aml_field("PRST", AML_BYTE_ACC
, AML_NOLOCK
,
388 aml_append(field
, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW
* 8));
389 /* 1 if enabled, read only */
390 aml_append(field
, aml_named_field(CPU_ENABLED
, 1));
391 /* (read) 1 if has a insert event. (write) 1 to clear event */
392 aml_append(field
, aml_named_field(CPU_INSERT_EVENT
, 1));
393 /* (read) 1 if has a remove event. (write) 1 to clear event */
394 aml_append(field
, aml_named_field(CPU_REMOVE_EVENT
, 1));
395 /* initiates device eject, write only */
396 aml_append(field
, aml_named_field(CPU_EJECT_EVENT
, 1));
397 /* tell firmware to do device eject, write only */
398 aml_append(field
, aml_named_field(CPU_FW_EJECT_EVENT
, 1));
399 aml_append(field
, aml_reserved_field(3));
400 aml_append(field
, aml_named_field(CPU_COMMAND
, 8));
401 aml_append(cpu_ctrl_dev
, field
);
403 field
= aml_field("PRST", AML_DWORD_ACC
, AML_NOLOCK
, AML_PRESERVE
);
404 /* CPU selector, write only */
405 aml_append(field
, aml_named_field(CPU_SELECTOR
, 32));
406 /* flags + cmd + 2byte align */
407 aml_append(field
, aml_reserved_field(4 * 8));
408 aml_append(field
, aml_named_field(CPU_DATA
, 32));
409 aml_append(cpu_ctrl_dev
, field
);
411 if (opts
.has_legacy_cphp
) {
412 method
= aml_method("_INI", 0, AML_SERIALIZED
);
413 /* switch off legacy CPU hotplug HW and use new one,
414 * on reboot system is in new mode and writing 0
415 * in CPU_SELECTOR selects BSP, which is NOP at
416 * the time _INI is called */
417 aml_append(method
, aml_store(zero
, aml_name(CPU_SELECTOR
)));
418 aml_append(cpu_ctrl_dev
, method
);
421 aml_append(sb_scope
, cpu_ctrl_dev
);
423 cpus_dev
= aml_device("\\_SB.CPUS");
426 Aml
*ctrl_lock
= aml_name("%s.%s", cphp_res_path
, CPU_LOCK
);
427 Aml
*cpu_selector
= aml_name("%s.%s", cphp_res_path
, CPU_SELECTOR
);
428 Aml
*is_enabled
= aml_name("%s.%s", cphp_res_path
, CPU_ENABLED
);
429 Aml
*cpu_cmd
= aml_name("%s.%s", cphp_res_path
, CPU_COMMAND
);
430 Aml
*cpu_data
= aml_name("%s.%s", cphp_res_path
, CPU_DATA
);
431 Aml
*ins_evt
= aml_name("%s.%s", cphp_res_path
, CPU_INSERT_EVENT
);
432 Aml
*rm_evt
= aml_name("%s.%s", cphp_res_path
, CPU_REMOVE_EVENT
);
433 Aml
*ej_evt
= aml_name("%s.%s", cphp_res_path
, CPU_EJECT_EVENT
);
434 Aml
*fw_ej_evt
= aml_name("%s.%s", cphp_res_path
, CPU_FW_EJECT_EVENT
);
436 aml_append(cpus_dev
, aml_name_decl("_HID", aml_string("ACPI0010")));
437 aml_append(cpus_dev
, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
439 method
= aml_method(CPU_NOTIFY_METHOD
, 2, AML_NOTSERIALIZED
);
440 for (i
= 0; i
< arch_ids
->len
; i
++) {
441 Aml
*cpu
= aml_name(CPU_NAME_FMT
, i
);
442 Aml
*uid
= aml_arg(0);
443 Aml
*event
= aml_arg(1);
445 ifctx
= aml_if(aml_equal(uid
, aml_int(i
)));
447 aml_append(ifctx
, aml_notify(cpu
, event
));
449 aml_append(method
, ifctx
);
451 aml_append(cpus_dev
, method
);
453 method
= aml_method(CPU_STS_METHOD
, 1, AML_SERIALIZED
);
455 Aml
*idx
= aml_arg(0);
456 Aml
*sta
= aml_local(0);
458 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
459 aml_append(method
, aml_store(idx
, cpu_selector
));
460 aml_append(method
, aml_store(zero
, sta
));
461 ifctx
= aml_if(aml_equal(is_enabled
, one
));
463 aml_append(ifctx
, aml_store(aml_int(0xF), sta
));
465 aml_append(method
, ifctx
);
466 aml_append(method
, aml_release(ctrl_lock
));
467 aml_append(method
, aml_return(sta
));
469 aml_append(cpus_dev
, method
);
471 method
= aml_method(CPU_EJECT_METHOD
, 1, AML_SERIALIZED
);
473 Aml
*idx
= aml_arg(0);
475 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
476 aml_append(method
, aml_store(idx
, cpu_selector
));
477 if (opts
.fw_unplugs_cpu
) {
478 aml_append(method
, aml_store(one
, fw_ej_evt
));
479 aml_append(method
, aml_store(aml_int(OVMF_CPUHP_SMI_CMD
),
480 aml_name("%s", opts
.smi_path
)));
482 aml_append(method
, aml_store(one
, ej_evt
));
484 aml_append(method
, aml_release(ctrl_lock
));
486 aml_append(cpus_dev
, method
);
488 method
= aml_method(CPU_SCAN_METHOD
, 0, AML_SERIALIZED
);
490 const uint8_t max_cpus_per_pass
= 255;
492 Aml
*while_ctx
, *while_ctx2
;
493 Aml
*has_event
= aml_local(0);
494 Aml
*dev_chk
= aml_int(1);
495 Aml
*eject_req
= aml_int(3);
496 Aml
*next_cpu_cmd
= aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD
);
497 Aml
*num_added_cpus
= aml_local(1);
498 Aml
*cpu_idx
= aml_local(2);
499 Aml
*uid
= aml_local(3);
500 Aml
*has_job
= aml_local(4);
501 Aml
*new_cpus
= aml_name(CPU_ADDED_LIST
);
503 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
506 * Windows versions newer than XP (including Windows 10/Windows
507 * Server 2019), do support* VarPackageOp but, it is cripled to hold
508 * the same elements number as old PackageOp.
509 * For compatibility with Windows XP (so it won't crash) use ACPI1.0
510 * PackageOp which can hold max 255 elements.
512 * use named package as old Windows don't support it in local var
514 aml_append(method
, aml_name_decl(CPU_ADDED_LIST
,
515 aml_package(max_cpus_per_pass
)));
517 aml_append(method
, aml_store(zero
, uid
));
518 aml_append(method
, aml_store(one
, has_job
));
520 * CPU_ADDED_LIST can hold limited number of elements, outer loop
521 * allows to process CPUs in batches which let us to handle more
522 * CPUs than CPU_ADDED_LIST can hold.
524 while_ctx2
= aml_while(aml_equal(has_job
, one
));
526 aml_append(while_ctx2
, aml_store(zero
, has_job
));
528 aml_append(while_ctx2
, aml_store(one
, has_event
));
529 aml_append(while_ctx2
, aml_store(zero
, num_added_cpus
));
532 * Scan CPUs, till there are CPUs with events or
533 * CPU_ADDED_LIST capacity is exhausted
535 while_ctx
= aml_while(aml_land(aml_equal(has_event
, one
),
536 aml_lless(uid
, aml_int(arch_ids
->len
))));
539 * clear loop exit condition, ins_evt/rm_evt checks will
540 * set it to 1 while next_cpu_cmd returns a CPU with events
542 aml_append(while_ctx
, aml_store(zero
, has_event
));
544 aml_append(while_ctx
, aml_store(uid
, cpu_selector
));
545 aml_append(while_ctx
, aml_store(next_cpu_cmd
, cpu_cmd
));
548 * wrap around case, scan is complete, exit loop.
549 * It happens since events are not cleared in scan loop,
550 * so next_cpu_cmd continues to find already processed CPUs
552 ifctx
= aml_if(aml_lless(cpu_data
, uid
));
554 aml_append(ifctx
, aml_break());
556 aml_append(while_ctx
, ifctx
);
559 * if CPU_ADDED_LIST is full, exit inner loop and process
563 aml_equal(num_added_cpus
, aml_int(max_cpus_per_pass
)));
565 aml_append(ifctx
, aml_store(one
, has_job
));
566 aml_append(ifctx
, aml_break());
568 aml_append(while_ctx
, ifctx
);
570 aml_append(while_ctx
, aml_store(cpu_data
, uid
));
571 ifctx
= aml_if(aml_equal(ins_evt
, one
));
573 /* cache added CPUs to Notify/Wakeup later */
574 aml_append(ifctx
, aml_store(uid
,
575 aml_index(new_cpus
, num_added_cpus
)));
576 aml_append(ifctx
, aml_increment(num_added_cpus
));
577 aml_append(ifctx
, aml_store(one
, has_event
));
579 aml_append(while_ctx
, ifctx
);
580 else_ctx
= aml_else();
581 ifctx
= aml_if(aml_equal(rm_evt
, one
));
584 aml_call2(CPU_NOTIFY_METHOD
, uid
, eject_req
));
585 aml_append(ifctx
, aml_store(one
, rm_evt
));
586 aml_append(ifctx
, aml_store(one
, has_event
));
588 aml_append(else_ctx
, ifctx
);
589 aml_append(while_ctx
, else_ctx
);
590 aml_append(while_ctx
, aml_increment(uid
));
592 aml_append(while_ctx2
, while_ctx
);
595 * in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT,
596 * make upcall to FW, so it can pull in new CPUs before
597 * OS is notified and wakes them up
600 ifctx
= aml_if(aml_lgreater(num_added_cpus
, zero
));
602 aml_append(ifctx
, aml_store(aml_int(OVMF_CPUHP_SMI_CMD
),
603 aml_name("%s", opts
.smi_path
)));
605 aml_append(while_ctx2
, ifctx
);
608 /* Notify OSPM about new CPUs and clear insert events */
609 aml_append(while_ctx2
, aml_store(zero
, cpu_idx
));
610 while_ctx
= aml_while(aml_lless(cpu_idx
, num_added_cpus
));
612 aml_append(while_ctx
,
613 aml_store(aml_derefof(aml_index(new_cpus
, cpu_idx
)),
615 aml_append(while_ctx
,
616 aml_call2(CPU_NOTIFY_METHOD
, uid
, dev_chk
));
617 aml_append(while_ctx
, aml_store(uid
, aml_debug()));
618 aml_append(while_ctx
, aml_store(uid
, cpu_selector
));
619 aml_append(while_ctx
, aml_store(one
, ins_evt
));
620 aml_append(while_ctx
, aml_increment(cpu_idx
));
622 aml_append(while_ctx2
, while_ctx
);
624 * If another batch is needed, then it will resume scanning
625 * exactly at -- and not after -- the last CPU that's currently
626 * in CPU_ADDED_LIST. In other words, the last CPU in
627 * CPU_ADDED_LIST is going to be re-checked. That's OK: we've
628 * just cleared the insert event for *all* CPUs in
629 * CPU_ADDED_LIST, including the last one. So the scan will
630 * simply seek past it.
633 aml_append(method
, while_ctx2
);
634 aml_append(method
, aml_release(ctrl_lock
));
636 aml_append(cpus_dev
, method
);
638 method
= aml_method(CPU_OST_METHOD
, 4, AML_SERIALIZED
);
640 Aml
*uid
= aml_arg(0);
641 Aml
*ev_cmd
= aml_int(CPHP_OST_EVENT_CMD
);
642 Aml
*st_cmd
= aml_int(CPHP_OST_STATUS_CMD
);
644 aml_append(method
, aml_acquire(ctrl_lock
, 0xFFFF));
645 aml_append(method
, aml_store(uid
, cpu_selector
));
646 aml_append(method
, aml_store(ev_cmd
, cpu_cmd
));
647 aml_append(method
, aml_store(aml_arg(1), cpu_data
));
648 aml_append(method
, aml_store(st_cmd
, cpu_cmd
));
649 aml_append(method
, aml_store(aml_arg(2), cpu_data
));
650 aml_append(method
, aml_release(ctrl_lock
));
652 aml_append(cpus_dev
, method
);
654 /* build Processor object for each processor */
655 for (i
= 0; i
< arch_ids
->len
; i
++) {
657 Aml
*uid
= aml_int(i
);
658 GArray
*madt_buf
= g_array_new(0, 1, 1);
659 int arch_id
= arch_ids
->cpus
[i
].arch_id
;
661 if (opts
.acpi_1_compatible
&& arch_id
< 255) {
662 dev
= aml_processor(i
, 0, 0, CPU_NAME_FMT
, i
);
664 dev
= aml_device(CPU_NAME_FMT
, i
);
665 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0007")));
666 aml_append(dev
, aml_name_decl("_UID", uid
));
669 method
= aml_method("_STA", 0, AML_SERIALIZED
);
670 aml_append(method
, aml_return(aml_call1(CPU_STS_METHOD
, uid
)));
671 aml_append(dev
, method
);
673 /* build _MAT object */
674 build_madt_cpu(i
, arch_ids
, madt_buf
, true); /* set enabled flag */
675 aml_append(dev
, aml_name_decl("_MAT",
676 aml_buffer(madt_buf
->len
, (uint8_t *)madt_buf
->data
)));
677 g_array_free(madt_buf
, true);
679 if (CPU(arch_ids
->cpus
[i
].cpu
) != first_cpu
) {
680 method
= aml_method("_EJ0", 1, AML_NOTSERIALIZED
);
681 aml_append(method
, aml_call1(CPU_EJECT_METHOD
, uid
));
682 aml_append(dev
, method
);
685 method
= aml_method("_OST", 3, AML_SERIALIZED
);
687 aml_call4(CPU_OST_METHOD
, uid
, aml_arg(0),
688 aml_arg(1), aml_arg(2))
690 aml_append(dev
, method
);
692 /* Linux guests discard SRAT info for non-present CPUs
693 * as a result _PXM is required for all CPUs which might
694 * be hot-plugged. For simplicity, add it for all CPUs.
696 if (arch_ids
->cpus
[i
].props
.has_node_id
) {
697 aml_append(dev
, aml_name_decl("_PXM",
698 aml_int(arch_ids
->cpus
[i
].props
.node_id
)));
701 aml_append(cpus_dev
, dev
);
704 aml_append(sb_scope
, cpus_dev
);
705 aml_append(table
, sb_scope
);
707 method
= aml_method(event_handler_method
, 0, AML_NOTSERIALIZED
);
708 aml_append(method
, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD
));
709 aml_append(table
, method
);
711 g_free(cphp_res_path
);