1 /* Support for generating ACPI tables and passing them to Guests
3 * ARM virt ACPI generation
5 * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
6 * Copyright (C) 2006 Fabrice Bellard
7 * Copyright (C) 2013 Red Hat Inc
9 * Author: Michael S. Tsirkin <mst@redhat.com>
11 * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
13 * Author: Shannon Zhao <zhaoshenglong@huawei.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, see <http://www.gnu.org/licenses/>.
29 #include "qemu/osdep.h"
30 #include "qapi/error.h"
31 #include "qemu/bitmap.h"
33 #include "hw/core/cpu.h"
34 #include "target/arm/cpu.h"
35 #include "hw/acpi/acpi-defs.h"
36 #include "hw/acpi/acpi.h"
37 #include "hw/nvram/fw_cfg.h"
38 #include "hw/acpi/bios-linker-loader.h"
39 #include "hw/acpi/aml-build.h"
40 #include "hw/acpi/utils.h"
41 #include "hw/acpi/pci.h"
42 #include "hw/acpi/memory_hotplug.h"
43 #include "hw/acpi/generic_event_device.h"
44 #include "hw/acpi/tpm.h"
45 #include "hw/pci/pcie_host.h"
46 #include "hw/pci/pci.h"
47 #include "hw/pci/pci_bus.h"
48 #include "hw/pci-host/gpex.h"
49 #include "hw/arm/virt.h"
50 #include "hw/mem/nvdimm.h"
51 #include "hw/platform-bus.h"
52 #include "sysemu/numa.h"
53 #include "sysemu/reset.h"
54 #include "sysemu/tpm.h"
56 #include "migration/vmstate.h"
57 #include "hw/acpi/ghes.h"
59 #define ARM_SPI_BASE 32
61 #define ACPI_BUILD_TABLE_SIZE 0x20000
63 static void acpi_dsdt_add_cpus(Aml
*scope
, VirtMachineState
*vms
)
65 MachineState
*ms
= MACHINE(vms
);
68 for (i
= 0; i
< ms
->smp
.cpus
; i
++) {
69 Aml
*dev
= aml_device("C%.03X", i
);
70 aml_append(dev
, aml_name_decl("_HID", aml_string("ACPI0007")));
71 aml_append(dev
, aml_name_decl("_UID", aml_int(i
)));
72 aml_append(scope
, dev
);
76 static void acpi_dsdt_add_uart(Aml
*scope
, const MemMapEntry
*uart_memmap
,
79 Aml
*dev
= aml_device("COM0");
80 aml_append(dev
, aml_name_decl("_HID", aml_string("ARMH0011")));
81 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
83 Aml
*crs
= aml_resource_template();
84 aml_append(crs
, aml_memory32_fixed(uart_memmap
->base
,
85 uart_memmap
->size
, AML_READ_WRITE
));
87 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
88 AML_EXCLUSIVE
, &uart_irq
, 1));
89 aml_append(dev
, aml_name_decl("_CRS", crs
));
91 aml_append(scope
, dev
);
94 static void acpi_dsdt_add_fw_cfg(Aml
*scope
, const MemMapEntry
*fw_cfg_memmap
)
96 Aml
*dev
= aml_device("FWCF");
97 aml_append(dev
, aml_name_decl("_HID", aml_string("QEMU0002")));
98 /* device present, functioning, decoding, not shown in UI */
99 aml_append(dev
, aml_name_decl("_STA", aml_int(0xB)));
100 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
102 Aml
*crs
= aml_resource_template();
103 aml_append(crs
, aml_memory32_fixed(fw_cfg_memmap
->base
,
104 fw_cfg_memmap
->size
, AML_READ_WRITE
));
105 aml_append(dev
, aml_name_decl("_CRS", crs
));
106 aml_append(scope
, dev
);
109 static void acpi_dsdt_add_flash(Aml
*scope
, const MemMapEntry
*flash_memmap
)
112 hwaddr base
= flash_memmap
->base
;
113 hwaddr size
= flash_memmap
->size
/ 2;
115 dev
= aml_device("FLS0");
116 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0015")));
117 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
119 crs
= aml_resource_template();
120 aml_append(crs
, aml_memory32_fixed(base
, size
, AML_READ_WRITE
));
121 aml_append(dev
, aml_name_decl("_CRS", crs
));
122 aml_append(scope
, dev
);
124 dev
= aml_device("FLS1");
125 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0015")));
126 aml_append(dev
, aml_name_decl("_UID", aml_int(1)));
127 crs
= aml_resource_template();
128 aml_append(crs
, aml_memory32_fixed(base
+ size
, size
, AML_READ_WRITE
));
129 aml_append(dev
, aml_name_decl("_CRS", crs
));
130 aml_append(scope
, dev
);
133 static void acpi_dsdt_add_virtio(Aml
*scope
,
134 const MemMapEntry
*virtio_mmio_memmap
,
135 uint32_t mmio_irq
, int num
)
137 hwaddr base
= virtio_mmio_memmap
->base
;
138 hwaddr size
= virtio_mmio_memmap
->size
;
141 for (i
= 0; i
< num
; i
++) {
142 uint32_t irq
= mmio_irq
+ i
;
143 Aml
*dev
= aml_device("VR%02u", i
);
144 aml_append(dev
, aml_name_decl("_HID", aml_string("LNRO0005")));
145 aml_append(dev
, aml_name_decl("_UID", aml_int(i
)));
146 aml_append(dev
, aml_name_decl("_CCA", aml_int(1)));
148 Aml
*crs
= aml_resource_template();
149 aml_append(crs
, aml_memory32_fixed(base
, size
, AML_READ_WRITE
));
151 aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
152 AML_EXCLUSIVE
, &irq
, 1));
153 aml_append(dev
, aml_name_decl("_CRS", crs
));
154 aml_append(scope
, dev
);
159 static void acpi_dsdt_add_pci(Aml
*scope
, const MemMapEntry
*memmap
,
160 uint32_t irq
, bool use_highmem
, bool highmem_ecam
,
161 VirtMachineState
*vms
)
163 int ecam_id
= VIRT_ECAM_ID(highmem_ecam
);
164 struct GPEXConfig cfg
= {
165 .mmio32
= memmap
[VIRT_PCIE_MMIO
],
166 .pio
= memmap
[VIRT_PCIE_PIO
],
167 .ecam
= memmap
[ecam_id
],
173 cfg
.mmio64
= memmap
[VIRT_HIGH_PCIE_MMIO
];
176 acpi_dsdt_add_gpex(scope
, &cfg
);
179 static void acpi_dsdt_add_gpio(Aml
*scope
, const MemMapEntry
*gpio_memmap
,
182 Aml
*dev
= aml_device("GPO0");
183 aml_append(dev
, aml_name_decl("_HID", aml_string("ARMH0061")));
184 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
186 Aml
*crs
= aml_resource_template();
187 aml_append(crs
, aml_memory32_fixed(gpio_memmap
->base
, gpio_memmap
->size
,
189 aml_append(crs
, aml_interrupt(AML_CONSUMER
, AML_LEVEL
, AML_ACTIVE_HIGH
,
190 AML_EXCLUSIVE
, &gpio_irq
, 1));
191 aml_append(dev
, aml_name_decl("_CRS", crs
));
193 Aml
*aei
= aml_resource_template();
194 /* Pin 3 for power button */
195 const uint32_t pin_list
[1] = {3};
196 aml_append(aei
, aml_gpio_int(AML_CONSUMER
, AML_EDGE
, AML_ACTIVE_HIGH
,
197 AML_EXCLUSIVE
, AML_PULL_UP
, 0, pin_list
, 1,
199 aml_append(dev
, aml_name_decl("_AEI", aei
));
201 /* _E03 is handle for power button */
202 Aml
*method
= aml_method("_E03", 0, AML_NOTSERIALIZED
);
203 aml_append(method
, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE
),
205 aml_append(dev
, method
);
206 aml_append(scope
, dev
);
210 static void acpi_dsdt_add_tpm(Aml
*scope
, VirtMachineState
*vms
)
212 PlatformBusDevice
*pbus
= PLATFORM_BUS_DEVICE(vms
->platform_bus_dev
);
213 hwaddr pbus_base
= vms
->memmap
[VIRT_PLATFORM_BUS
].base
;
214 SysBusDevice
*sbdev
= SYS_BUS_DEVICE(tpm_find());
215 MemoryRegion
*sbdev_mr
;
222 tpm_base
= platform_bus_get_mmio_addr(pbus
, sbdev
, 0);
223 assert(tpm_base
!= -1);
225 tpm_base
+= pbus_base
;
227 sbdev_mr
= sysbus_mmio_get_region(sbdev
, 0);
229 Aml
*dev
= aml_device("TPM0");
230 aml_append(dev
, aml_name_decl("_HID", aml_string("MSFT0101")));
231 aml_append(dev
, aml_name_decl("_UID", aml_int(0)));
233 Aml
*crs
= aml_resource_template();
235 aml_memory32_fixed(tpm_base
,
236 (uint32_t)memory_region_size(sbdev_mr
),
238 aml_append(dev
, aml_name_decl("_CRS", crs
));
239 aml_append(scope
, dev
);
243 /* Build the iort ID mapping to SMMUv3 for a given PCI host bridge */
245 iort_host_bridges(Object
*obj
, void *opaque
)
247 GArray
*idmap_blob
= opaque
;
249 if (object_dynamic_cast(obj
, TYPE_PCI_HOST_BRIDGE
)) {
250 PCIBus
*bus
= PCI_HOST_BRIDGE(obj
)->bus
;
252 if (bus
&& !pci_bus_bypass_iommu(bus
)) {
253 int min_bus
, max_bus
;
255 pci_bus_range(bus
, &min_bus
, &max_bus
);
257 AcpiIortIdMapping idmap
= {
258 .input_base
= min_bus
<< 8,
259 .id_count
= (max_bus
- min_bus
+ 1) << 8,
261 g_array_append_val(idmap_blob
, idmap
);
268 static int iort_idmap_compare(gconstpointer a
, gconstpointer b
)
270 AcpiIortIdMapping
*idmap_a
= (AcpiIortIdMapping
*)a
;
271 AcpiIortIdMapping
*idmap_b
= (AcpiIortIdMapping
*)b
;
273 return idmap_a
->input_base
- idmap_b
->input_base
;
277 build_iort(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
279 int i
, nb_nodes
, rc_mapping_count
, iort_start
= table_data
->len
;
280 AcpiIortIdMapping
*idmap
;
281 AcpiIortItsGroup
*its
;
284 size_t node_size
, iort_node_offset
, iort_length
, smmu_offset
= 0;
286 GArray
*smmu_idmaps
= g_array_new(false, true, sizeof(AcpiIortIdMapping
));
287 GArray
*its_idmaps
= g_array_new(false, true, sizeof(AcpiIortIdMapping
));
289 iort
= acpi_data_push(table_data
, sizeof(*iort
));
291 if (vms
->iommu
== VIRT_IOMMU_SMMUV3
) {
292 AcpiIortIdMapping next_range
= {0};
294 object_child_foreach_recursive(object_get_root(),
295 iort_host_bridges
, smmu_idmaps
);
297 /* Sort the smmu idmap by input_base */
298 g_array_sort(smmu_idmaps
, iort_idmap_compare
);
301 * Split the whole RIDs by mapping from RC to SMMU,
302 * build the ID mapping from RC to ITS directly.
304 for (i
= 0; i
< smmu_idmaps
->len
; i
++) {
305 idmap
= &g_array_index(smmu_idmaps
, AcpiIortIdMapping
, i
);
307 if (next_range
.input_base
< idmap
->input_base
) {
308 next_range
.id_count
= idmap
->input_base
- next_range
.input_base
;
309 g_array_append_val(its_idmaps
, next_range
);
312 next_range
.input_base
= idmap
->input_base
+ idmap
->id_count
;
315 /* Append the last RC -> ITS ID mapping */
316 if (next_range
.input_base
< 0xFFFF) {
317 next_range
.id_count
= 0xFFFF - next_range
.input_base
;
318 g_array_append_val(its_idmaps
, next_range
);
321 nb_nodes
= 3; /* RC, ITS, SMMUv3 */
322 rc_mapping_count
= smmu_idmaps
->len
+ its_idmaps
->len
;
324 nb_nodes
= 2; /* RC, ITS */
325 rc_mapping_count
= 1;
328 iort_length
= sizeof(*iort
);
329 iort
->node_count
= cpu_to_le32(nb_nodes
);
331 * Use a copy in case table_data->data moves during acpi_data_push
334 iort_node_offset
= sizeof(*iort
);
335 iort
->node_offset
= cpu_to_le32(iort_node_offset
);
338 node_size
= sizeof(*its
) + sizeof(uint32_t);
339 iort_length
+= node_size
;
340 its
= acpi_data_push(table_data
, node_size
);
342 its
->type
= ACPI_IORT_NODE_ITS_GROUP
;
343 its
->length
= cpu_to_le16(node_size
);
344 its
->its_count
= cpu_to_le32(1);
345 its
->identifiers
[0] = 0; /* MADT translation_id */
347 if (vms
->iommu
== VIRT_IOMMU_SMMUV3
) {
348 int irq
= vms
->irqmap
[VIRT_SMMU
] + ARM_SPI_BASE
;
351 smmu_offset
= iort_node_offset
+ node_size
;
352 node_size
= sizeof(*smmu
) + sizeof(*idmap
);
353 iort_length
+= node_size
;
354 smmu
= acpi_data_push(table_data
, node_size
);
356 smmu
->type
= ACPI_IORT_NODE_SMMU_V3
;
357 smmu
->length
= cpu_to_le16(node_size
);
358 smmu
->mapping_count
= cpu_to_le32(1);
359 smmu
->mapping_offset
= cpu_to_le32(sizeof(*smmu
));
360 smmu
->base_address
= cpu_to_le64(vms
->memmap
[VIRT_SMMU
].base
);
361 smmu
->flags
= cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE
);
362 smmu
->event_gsiv
= cpu_to_le32(irq
);
363 smmu
->pri_gsiv
= cpu_to_le32(irq
+ 1);
364 smmu
->sync_gsiv
= cpu_to_le32(irq
+ 2);
365 smmu
->gerr_gsiv
= cpu_to_le32(irq
+ 3);
367 /* Identity RID mapping covering the whole input RID range */
368 idmap
= &smmu
->id_mapping_array
[0];
369 idmap
->input_base
= 0;
370 idmap
->id_count
= cpu_to_le32(0xFFFF);
371 idmap
->output_base
= 0;
372 /* output IORT node is the ITS group node (the first node) */
373 idmap
->output_reference
= cpu_to_le32(iort_node_offset
);
376 /* Root Complex Node */
377 node_size
= sizeof(*rc
) + sizeof(*idmap
) * rc_mapping_count
;
378 iort_length
+= node_size
;
379 rc
= acpi_data_push(table_data
, node_size
);
381 rc
->type
= ACPI_IORT_NODE_PCI_ROOT_COMPLEX
;
382 rc
->length
= cpu_to_le16(node_size
);
383 rc
->mapping_count
= cpu_to_le32(rc_mapping_count
);
384 rc
->mapping_offset
= cpu_to_le32(sizeof(*rc
));
386 /* fully coherent device */
387 rc
->memory_properties
.cache_coherency
= cpu_to_le32(1);
388 rc
->memory_properties
.memory_flags
= 0x3; /* CCA = CPM = DCAS = 1 */
389 rc
->pci_segment_number
= 0; /* MCFG pci_segment */
391 if (vms
->iommu
== VIRT_IOMMU_SMMUV3
) {
392 AcpiIortIdMapping
*range
;
394 /* translated RIDs connect to SMMUv3 node: RC -> SMMUv3 -> ITS */
395 for (i
= 0; i
< smmu_idmaps
->len
; i
++) {
396 idmap
= &rc
->id_mapping_array
[i
];
397 range
= &g_array_index(smmu_idmaps
, AcpiIortIdMapping
, i
);
399 idmap
->input_base
= cpu_to_le32(range
->input_base
);
400 idmap
->id_count
= cpu_to_le32(range
->id_count
);
401 idmap
->output_base
= cpu_to_le32(range
->input_base
);
402 /* output IORT node is the smmuv3 node */
403 idmap
->output_reference
= cpu_to_le32(smmu_offset
);
406 /* bypassed RIDs connect to ITS group node directly: RC -> ITS */
407 for (i
= 0; i
< its_idmaps
->len
; i
++) {
408 idmap
= &rc
->id_mapping_array
[smmu_idmaps
->len
+ i
];
409 range
= &g_array_index(its_idmaps
, AcpiIortIdMapping
, i
);
411 idmap
->input_base
= cpu_to_le32(range
->input_base
);
412 idmap
->id_count
= cpu_to_le32(range
->id_count
);
413 idmap
->output_base
= cpu_to_le32(range
->input_base
);
414 /* output IORT node is the ITS group node (the first node) */
415 idmap
->output_reference
= cpu_to_le32(iort_node_offset
);
418 /* Identity RID mapping covering the whole input RID range */
419 idmap
= &rc
->id_mapping_array
[0];
420 idmap
->input_base
= cpu_to_le32(0);
421 idmap
->id_count
= cpu_to_le32(0xFFFF);
422 idmap
->output_base
= cpu_to_le32(0);
423 /* output IORT node is the ITS group node (the first node) */
424 idmap
->output_reference
= cpu_to_le32(iort_node_offset
);
427 g_array_free(smmu_idmaps
, true);
428 g_array_free(its_idmaps
, true);
431 * Update the pointer address in case table_data->data moves during above
432 * acpi_data_push operations.
434 iort
= (AcpiIortTable
*)(table_data
->data
+ iort_start
);
435 iort
->length
= cpu_to_le32(iort_length
);
437 build_header(linker
, table_data
, (void *)(table_data
->data
+ iort_start
),
438 "IORT", table_data
->len
- iort_start
, 0, vms
->oem_id
,
443 build_spcr(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
445 AcpiSerialPortConsoleRedirection
*spcr
;
446 const MemMapEntry
*uart_memmap
= &vms
->memmap
[VIRT_UART
];
447 int irq
= vms
->irqmap
[VIRT_UART
] + ARM_SPI_BASE
;
448 int spcr_start
= table_data
->len
;
450 spcr
= acpi_data_push(table_data
, sizeof(*spcr
));
452 spcr
->interface_type
= 0x3; /* ARM PL011 UART */
454 spcr
->base_address
.space_id
= AML_SYSTEM_MEMORY
;
455 spcr
->base_address
.bit_width
= 8;
456 spcr
->base_address
.bit_offset
= 0;
457 spcr
->base_address
.access_width
= 1;
458 spcr
->base_address
.address
= cpu_to_le64(uart_memmap
->base
);
460 spcr
->interrupt_types
= (1 << 3); /* Bit[3] ARMH GIC interrupt */
461 spcr
->gsi
= cpu_to_le32(irq
); /* Global System Interrupt */
463 spcr
->baud
= 3; /* Baud Rate: 3 = 9600 */
464 spcr
->parity
= 0; /* No Parity */
465 spcr
->stopbits
= 1; /* 1 Stop bit */
466 spcr
->flowctrl
= (1 << 1); /* Bit[1] = RTS/CTS hardware flow control */
467 spcr
->term_type
= 0; /* Terminal Type: 0 = VT100 */
469 spcr
->pci_device_id
= 0xffff; /* PCI Device ID: not a PCI device */
470 spcr
->pci_vendor_id
= 0xffff; /* PCI Vendor ID: not a PCI device */
472 build_header(linker
, table_data
, (void *)(table_data
->data
+ spcr_start
),
473 "SPCR", table_data
->len
- spcr_start
, 2, vms
->oem_id
,
478 build_srat(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
480 AcpiSystemResourceAffinityTable
*srat
;
481 AcpiSratProcessorGiccAffinity
*core
;
482 AcpiSratMemoryAffinity
*numamem
;
485 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
486 MachineState
*ms
= MACHINE(vms
);
487 const CPUArchIdList
*cpu_list
= mc
->possible_cpu_arch_ids(ms
);
489 srat_start
= table_data
->len
;
490 srat
= acpi_data_push(table_data
, sizeof(*srat
));
491 srat
->reserved1
= cpu_to_le32(1);
493 for (i
= 0; i
< cpu_list
->len
; ++i
) {
494 core
= acpi_data_push(table_data
, sizeof(*core
));
495 core
->type
= ACPI_SRAT_PROCESSOR_GICC
;
496 core
->length
= sizeof(*core
);
497 core
->proximity
= cpu_to_le32(cpu_list
->cpus
[i
].props
.node_id
);
498 core
->acpi_processor_uid
= cpu_to_le32(i
);
499 core
->flags
= cpu_to_le32(1);
502 mem_base
= vms
->memmap
[VIRT_MEM
].base
;
503 for (i
= 0; i
< ms
->numa_state
->num_nodes
; ++i
) {
504 if (ms
->numa_state
->nodes
[i
].node_mem
> 0) {
505 numamem
= acpi_data_push(table_data
, sizeof(*numamem
));
506 build_srat_memory(numamem
, mem_base
,
507 ms
->numa_state
->nodes
[i
].node_mem
, i
,
508 MEM_AFFINITY_ENABLED
);
509 mem_base
+= ms
->numa_state
->nodes
[i
].node_mem
;
513 if (ms
->nvdimms_state
->is_enabled
) {
514 nvdimm_build_srat(table_data
);
517 if (ms
->device_memory
) {
518 numamem
= acpi_data_push(table_data
, sizeof *numamem
);
519 build_srat_memory(numamem
, ms
->device_memory
->base
,
520 memory_region_size(&ms
->device_memory
->mr
),
521 ms
->numa_state
->num_nodes
- 1,
522 MEM_AFFINITY_HOTPLUGGABLE
| MEM_AFFINITY_ENABLED
);
525 build_header(linker
, table_data
, (void *)(table_data
->data
+ srat_start
),
526 "SRAT", table_data
->len
- srat_start
, 3, vms
->oem_id
,
532 build_gtdt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
534 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
535 int gtdt_start
= table_data
->len
;
536 AcpiGenericTimerTable
*gtdt
;
539 if (vmc
->claim_edge_triggered_timers
) {
540 irqflags
= ACPI_GTDT_INTERRUPT_MODE_EDGE
;
542 irqflags
= ACPI_GTDT_INTERRUPT_MODE_LEVEL
;
545 gtdt
= acpi_data_push(table_data
, sizeof *gtdt
);
546 /* The interrupt values are the same with the device tree when adding 16 */
547 gtdt
->secure_el1_interrupt
= cpu_to_le32(ARCH_TIMER_S_EL1_IRQ
+ 16);
548 gtdt
->secure_el1_flags
= cpu_to_le32(irqflags
);
550 gtdt
->non_secure_el1_interrupt
= cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ
+ 16);
551 gtdt
->non_secure_el1_flags
= cpu_to_le32(irqflags
|
552 ACPI_GTDT_CAP_ALWAYS_ON
);
554 gtdt
->virtual_timer_interrupt
= cpu_to_le32(ARCH_TIMER_VIRT_IRQ
+ 16);
555 gtdt
->virtual_timer_flags
= cpu_to_le32(irqflags
);
557 gtdt
->non_secure_el2_interrupt
= cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ
+ 16);
558 gtdt
->non_secure_el2_flags
= cpu_to_le32(irqflags
);
560 build_header(linker
, table_data
,
561 (void *)(table_data
->data
+ gtdt_start
), "GTDT",
562 table_data
->len
- gtdt_start
, 2, vms
->oem_id
,
568 build_madt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
570 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
571 int madt_start
= table_data
->len
;
572 const MemMapEntry
*memmap
= vms
->memmap
;
573 const int *irqmap
= vms
->irqmap
;
574 AcpiMadtGenericDistributor
*gicd
;
575 AcpiMadtGenericMsiFrame
*gic_msi
;
578 acpi_data_push(table_data
, sizeof(AcpiMultipleApicTable
));
580 gicd
= acpi_data_push(table_data
, sizeof *gicd
);
581 gicd
->type
= ACPI_APIC_GENERIC_DISTRIBUTOR
;
582 gicd
->length
= sizeof(*gicd
);
583 gicd
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_DIST
].base
);
584 gicd
->version
= vms
->gic_version
;
586 for (i
= 0; i
< MACHINE(vms
)->smp
.cpus
; i
++) {
587 AcpiMadtGenericCpuInterface
*gicc
= acpi_data_push(table_data
,
589 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(i
));
591 gicc
->type
= ACPI_APIC_GENERIC_CPU_INTERFACE
;
592 gicc
->length
= sizeof(*gicc
);
593 if (vms
->gic_version
== 2) {
594 gicc
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_CPU
].base
);
595 gicc
->gich_base_address
= cpu_to_le64(memmap
[VIRT_GIC_HYP
].base
);
596 gicc
->gicv_base_address
= cpu_to_le64(memmap
[VIRT_GIC_VCPU
].base
);
598 gicc
->cpu_interface_number
= cpu_to_le32(i
);
599 gicc
->arm_mpidr
= cpu_to_le64(armcpu
->mp_affinity
);
600 gicc
->uid
= cpu_to_le32(i
);
601 gicc
->flags
= cpu_to_le32(ACPI_MADT_GICC_ENABLED
);
603 if (arm_feature(&armcpu
->env
, ARM_FEATURE_PMU
)) {
604 gicc
->performance_interrupt
= cpu_to_le32(PPI(VIRTUAL_PMU_IRQ
));
607 gicc
->vgic_interrupt
= cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ
));
611 if (vms
->gic_version
== 3) {
612 AcpiMadtGenericTranslator
*gic_its
;
613 int nb_redist_regions
= virt_gicv3_redist_region_count(vms
);
614 AcpiMadtGenericRedistributor
*gicr
= acpi_data_push(table_data
,
617 gicr
->type
= ACPI_APIC_GENERIC_REDISTRIBUTOR
;
618 gicr
->length
= sizeof(*gicr
);
619 gicr
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_REDIST
].base
);
620 gicr
->range_length
= cpu_to_le32(memmap
[VIRT_GIC_REDIST
].size
);
622 if (nb_redist_regions
== 2) {
623 gicr
= acpi_data_push(table_data
, sizeof(*gicr
));
624 gicr
->type
= ACPI_APIC_GENERIC_REDISTRIBUTOR
;
625 gicr
->length
= sizeof(*gicr
);
627 cpu_to_le64(memmap
[VIRT_HIGH_GIC_REDIST2
].base
);
629 cpu_to_le32(memmap
[VIRT_HIGH_GIC_REDIST2
].size
);
632 if (its_class_name() && !vmc
->no_its
) {
633 gic_its
= acpi_data_push(table_data
, sizeof *gic_its
);
634 gic_its
->type
= ACPI_APIC_GENERIC_TRANSLATOR
;
635 gic_its
->length
= sizeof(*gic_its
);
636 gic_its
->translation_id
= 0;
637 gic_its
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_ITS
].base
);
640 gic_msi
= acpi_data_push(table_data
, sizeof *gic_msi
);
641 gic_msi
->type
= ACPI_APIC_GENERIC_MSI_FRAME
;
642 gic_msi
->length
= sizeof(*gic_msi
);
643 gic_msi
->gic_msi_frame_id
= 0;
644 gic_msi
->base_address
= cpu_to_le64(memmap
[VIRT_GIC_V2M
].base
);
645 gic_msi
->flags
= cpu_to_le32(1);
646 gic_msi
->spi_count
= cpu_to_le16(NUM_GICV2M_SPIS
);
647 gic_msi
->spi_base
= cpu_to_le16(irqmap
[VIRT_GIC_V2M
] + ARM_SPI_BASE
);
650 build_header(linker
, table_data
,
651 (void *)(table_data
->data
+ madt_start
), "APIC",
652 table_data
->len
- madt_start
, 3, vms
->oem_id
,
657 static void build_fadt_rev5(GArray
*table_data
, BIOSLinker
*linker
,
658 VirtMachineState
*vms
, unsigned dsdt_tbl_offset
)
661 AcpiFadtData fadt
= {
664 .flags
= 1 << ACPI_FADT_F_HW_REDUCED_ACPI
,
665 .xdsdt_tbl_offset
= &dsdt_tbl_offset
,
668 switch (vms
->psci_conduit
) {
669 case QEMU_PSCI_CONDUIT_DISABLED
:
670 fadt
.arm_boot_arch
= 0;
672 case QEMU_PSCI_CONDUIT_HVC
:
673 fadt
.arm_boot_arch
= ACPI_FADT_ARM_PSCI_COMPLIANT
|
674 ACPI_FADT_ARM_PSCI_USE_HVC
;
676 case QEMU_PSCI_CONDUIT_SMC
:
677 fadt
.arm_boot_arch
= ACPI_FADT_ARM_PSCI_COMPLIANT
;
680 g_assert_not_reached();
683 build_fadt(table_data
, linker
, &fadt
, vms
->oem_id
, vms
->oem_table_id
);
688 build_dsdt(GArray
*table_data
, BIOSLinker
*linker
, VirtMachineState
*vms
)
690 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
692 MachineState
*ms
= MACHINE(vms
);
693 const MemMapEntry
*memmap
= vms
->memmap
;
694 const int *irqmap
= vms
->irqmap
;
696 dsdt
= init_aml_allocator();
697 /* Reserve space for header */
698 acpi_data_push(dsdt
->buf
, sizeof(AcpiTableHeader
));
700 /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware.
701 * While UEFI can use libfdt to disable the RTC device node in the DTB that
702 * it passes to the OS, it cannot modify AML. Therefore, we won't generate
703 * the RTC ACPI device at all when using UEFI.
705 scope
= aml_scope("\\_SB");
706 acpi_dsdt_add_cpus(scope
, vms
);
707 acpi_dsdt_add_uart(scope
, &memmap
[VIRT_UART
],
708 (irqmap
[VIRT_UART
] + ARM_SPI_BASE
));
709 if (vmc
->acpi_expose_flash
) {
710 acpi_dsdt_add_flash(scope
, &memmap
[VIRT_FLASH
]);
712 acpi_dsdt_add_fw_cfg(scope
, &memmap
[VIRT_FW_CFG
]);
713 acpi_dsdt_add_virtio(scope
, &memmap
[VIRT_MMIO
],
714 (irqmap
[VIRT_MMIO
] + ARM_SPI_BASE
), NUM_VIRTIO_TRANSPORTS
);
715 acpi_dsdt_add_pci(scope
, memmap
, (irqmap
[VIRT_PCIE
] + ARM_SPI_BASE
),
716 vms
->highmem
, vms
->highmem_ecam
, vms
);
718 build_ged_aml(scope
, "\\_SB."GED_DEVICE
,
719 HOTPLUG_HANDLER(vms
->acpi_dev
),
720 irqmap
[VIRT_ACPI_GED
] + ARM_SPI_BASE
, AML_SYSTEM_MEMORY
,
721 memmap
[VIRT_ACPI_GED
].base
);
723 acpi_dsdt_add_gpio(scope
, &memmap
[VIRT_GPIO
],
724 (irqmap
[VIRT_GPIO
] + ARM_SPI_BASE
));
728 uint32_t event
= object_property_get_uint(OBJECT(vms
->acpi_dev
),
729 "ged-event", &error_abort
);
731 if (event
& ACPI_GED_MEM_HOTPLUG_EVT
) {
732 build_memory_hotplug_aml(scope
, ms
->ram_slots
, "\\_SB", NULL
,
734 memmap
[VIRT_PCDIMM_ACPI
].base
);
738 acpi_dsdt_add_power_button(scope
);
740 acpi_dsdt_add_tpm(scope
, vms
);
743 aml_append(dsdt
, scope
);
745 /* copy AML table into ACPI tables blob and patch header there */
746 g_array_append_vals(table_data
, dsdt
->buf
->data
, dsdt
->buf
->len
);
747 build_header(linker
, table_data
,
748 (void *)(table_data
->data
+ table_data
->len
- dsdt
->buf
->len
),
749 "DSDT", dsdt
->buf
->len
, 2, vms
->oem_id
,
751 free_aml_allocator();
755 struct AcpiBuildState
{
756 /* Copy of table in RAM (for patching). */
757 MemoryRegion
*table_mr
;
758 MemoryRegion
*rsdp_mr
;
759 MemoryRegion
*linker_mr
;
760 /* Is table patched? */
764 static void acpi_align_size(GArray
*blob
, unsigned align
)
767 * Align size to multiple of given size. This reduces the chance
768 * we need to change size in the future (breaking cross version migration).
770 g_array_set_size(blob
, ROUND_UP(acpi_data_len(blob
), align
));
774 void virt_acpi_build(VirtMachineState
*vms
, AcpiBuildTables
*tables
)
776 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
777 GArray
*table_offsets
;
779 GArray
*tables_blob
= tables
->table_data
;
780 MachineState
*ms
= MACHINE(vms
);
782 table_offsets
= g_array_new(false, true /* clear */,
785 bios_linker_loader_alloc(tables
->linker
,
786 ACPI_BUILD_TABLE_FILE
, tables_blob
,
787 64, false /* high memory */);
789 /* DSDT is pointed to by FADT */
790 dsdt
= tables_blob
->len
;
791 build_dsdt(tables_blob
, tables
->linker
, vms
);
793 /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
794 acpi_add_table(table_offsets
, tables_blob
);
795 build_fadt_rev5(tables_blob
, tables
->linker
, vms
, dsdt
);
797 acpi_add_table(table_offsets
, tables_blob
);
798 build_madt(tables_blob
, tables
->linker
, vms
);
800 acpi_add_table(table_offsets
, tables_blob
);
801 build_gtdt(tables_blob
, tables
->linker
, vms
);
803 acpi_add_table(table_offsets
, tables_blob
);
805 AcpiMcfgInfo mcfg
= {
806 .base
= vms
->memmap
[VIRT_ECAM_ID(vms
->highmem_ecam
)].base
,
807 .size
= vms
->memmap
[VIRT_ECAM_ID(vms
->highmem_ecam
)].size
,
809 build_mcfg(tables_blob
, tables
->linker
, &mcfg
, vms
->oem_id
,
813 acpi_add_table(table_offsets
, tables_blob
);
814 build_spcr(tables_blob
, tables
->linker
, vms
);
817 build_ghes_error_table(tables
->hardware_errors
, tables
->linker
);
818 acpi_add_table(table_offsets
, tables_blob
);
819 acpi_build_hest(tables_blob
, tables
->linker
, vms
->oem_id
,
823 if (ms
->numa_state
->num_nodes
> 0) {
824 acpi_add_table(table_offsets
, tables_blob
);
825 build_srat(tables_blob
, tables
->linker
, vms
);
826 if (ms
->numa_state
->have_numa_distance
) {
827 acpi_add_table(table_offsets
, tables_blob
);
828 build_slit(tables_blob
, tables
->linker
, ms
, vms
->oem_id
,
833 if (ms
->nvdimms_state
->is_enabled
) {
834 nvdimm_build_acpi(table_offsets
, tables_blob
, tables
->linker
,
835 ms
->nvdimms_state
, ms
->ram_slots
, vms
->oem_id
,
839 if (its_class_name() && !vmc
->no_its
) {
840 acpi_add_table(table_offsets
, tables_blob
);
841 build_iort(tables_blob
, tables
->linker
, vms
);
845 if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0
) {
846 acpi_add_table(table_offsets
, tables_blob
);
847 build_tpm2(tables_blob
, tables
->linker
, tables
->tcpalog
, vms
->oem_id
,
852 /* XSDT is pointed to by RSDP */
853 xsdt
= tables_blob
->len
;
854 build_xsdt(tables_blob
, tables
->linker
, table_offsets
, vms
->oem_id
,
857 /* RSDP is in FSEG memory, so allocate it separately */
859 AcpiRsdpData rsdp_data
= {
861 .oem_id
= vms
->oem_id
,
862 .xsdt_tbl_offset
= &xsdt
,
863 .rsdt_tbl_offset
= NULL
,
865 build_rsdp(tables
->rsdp
, tables
->linker
, &rsdp_data
);
869 * The align size is 128, warn if 64k is not enough therefore
870 * the align size could be resized.
872 if (tables_blob
->len
> ACPI_BUILD_TABLE_SIZE
/ 2) {
873 warn_report("ACPI table size %u exceeds %d bytes,"
874 " migration may not work",
875 tables_blob
->len
, ACPI_BUILD_TABLE_SIZE
/ 2);
876 error_printf("Try removing CPUs, NUMA nodes, memory slots"
879 acpi_align_size(tables_blob
, ACPI_BUILD_TABLE_SIZE
);
882 /* Cleanup memory that's no longer used. */
883 g_array_free(table_offsets
, true);
886 static void acpi_ram_update(MemoryRegion
*mr
, GArray
*data
)
888 uint32_t size
= acpi_data_len(data
);
890 /* Make sure RAM size is correct - in case it got changed
891 * e.g. by migration */
892 memory_region_ram_resize(mr
, size
, &error_abort
);
894 memcpy(memory_region_get_ram_ptr(mr
), data
->data
, size
);
895 memory_region_set_dirty(mr
, 0, size
);
898 static void virt_acpi_build_update(void *build_opaque
)
900 AcpiBuildState
*build_state
= build_opaque
;
901 AcpiBuildTables tables
;
903 /* No state to update or already patched? Nothing to do. */
904 if (!build_state
|| build_state
->patched
) {
907 build_state
->patched
= true;
909 acpi_build_tables_init(&tables
);
911 virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables
);
913 acpi_ram_update(build_state
->table_mr
, tables
.table_data
);
914 acpi_ram_update(build_state
->rsdp_mr
, tables
.rsdp
);
915 acpi_ram_update(build_state
->linker_mr
, tables
.linker
->cmd_blob
);
917 acpi_build_tables_cleanup(&tables
, true);
920 static void virt_acpi_build_reset(void *build_opaque
)
922 AcpiBuildState
*build_state
= build_opaque
;
923 build_state
->patched
= false;
926 static const VMStateDescription vmstate_virt_acpi_build
= {
927 .name
= "virt_acpi_build",
929 .minimum_version_id
= 1,
930 .fields
= (VMStateField
[]) {
931 VMSTATE_BOOL(patched
, AcpiBuildState
),
932 VMSTATE_END_OF_LIST()
936 void virt_acpi_setup(VirtMachineState
*vms
)
938 AcpiBuildTables tables
;
939 AcpiBuildState
*build_state
;
940 AcpiGedState
*acpi_ged_state
;
943 trace_virt_acpi_setup();
947 if (!virt_is_acpi_enabled(vms
)) {
948 trace_virt_acpi_setup();
952 build_state
= g_malloc0(sizeof *build_state
);
954 acpi_build_tables_init(&tables
);
955 virt_acpi_build(vms
, &tables
);
957 /* Now expose it all to Guest */
958 build_state
->table_mr
= acpi_add_rom_blob(virt_acpi_build_update
,
959 build_state
, tables
.table_data
,
960 ACPI_BUILD_TABLE_FILE
);
961 assert(build_state
->table_mr
!= NULL
);
963 build_state
->linker_mr
= acpi_add_rom_blob(virt_acpi_build_update
,
965 tables
.linker
->cmd_blob
,
966 ACPI_BUILD_LOADER_FILE
);
968 fw_cfg_add_file(vms
->fw_cfg
, ACPI_BUILD_TPMLOG_FILE
, tables
.tcpalog
->data
,
969 acpi_data_len(tables
.tcpalog
));
972 assert(vms
->acpi_dev
);
973 acpi_ged_state
= ACPI_GED(vms
->acpi_dev
);
974 acpi_ghes_add_fw_cfg(&acpi_ged_state
->ghes_state
,
975 vms
->fw_cfg
, tables
.hardware_errors
);
978 build_state
->rsdp_mr
= acpi_add_rom_blob(virt_acpi_build_update
,
979 build_state
, tables
.rsdp
,
980 ACPI_BUILD_RSDP_FILE
);
982 qemu_register_reset(virt_acpi_build_reset
, build_state
);
983 virt_acpi_build_reset(build_state
);
984 vmstate_register(NULL
, 0, &vmstate_virt_acpi_build
, build_state
);
986 /* Cleanup tables but don't free the memory: we track it
989 acpi_build_tables_cleanup(&tables
, false);