2 * ARM mach-virt emulation
4 * Copyright (c) 2013 Linaro Limited
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
18 * Emulate a virtual board which works by passing Linux all the information
19 * it needs about what devices are present via the device tree.
20 * There are some restrictions about what we can do here:
21 * + we can only present devices whose Linux drivers will work based
22 * purely on the device tree with no platform data at all
23 * + we want to present a very stripped-down minimalist platform,
24 * both because this reduces the security attack surface from the guest
25 * and also because it reduces our exposure to being broken when
26 * the kernel updates its device tree bindings and requires further
27 * information in a device binding that we aren't providing.
28 * This is essentially the same approach kvmtool uses.
31 #include "qemu/osdep.h"
32 #include "qemu/datadir.h"
33 #include "qemu/units.h"
34 #include "qemu/option.h"
35 #include "monitor/qdev.h"
36 #include "hw/sysbus.h"
37 #include "hw/arm/boot.h"
38 #include "hw/arm/primecell.h"
39 #include "hw/arm/virt.h"
40 #include "hw/block/flash.h"
41 #include "hw/vfio/vfio-calxeda-xgmac.h"
42 #include "hw/vfio/vfio-amd-xgbe.h"
43 #include "hw/display/ramfb.h"
45 #include "sysemu/device_tree.h"
46 #include "sysemu/numa.h"
47 #include "sysemu/runstate.h"
48 #include "sysemu/tpm.h"
49 #include "sysemu/tcg.h"
50 #include "sysemu/kvm.h"
51 #include "sysemu/hvf.h"
52 #include "sysemu/qtest.h"
53 #include "hw/loader.h"
54 #include "qapi/error.h"
55 #include "qemu/bitops.h"
56 #include "qemu/error-report.h"
57 #include "qemu/module.h"
58 #include "hw/pci-host/gpex.h"
59 #include "hw/virtio/virtio-pci.h"
60 #include "hw/core/sysbus-fdt.h"
61 #include "hw/platform-bus.h"
62 #include "hw/qdev-properties.h"
63 #include "hw/arm/fdt.h"
64 #include "hw/intc/arm_gic.h"
65 #include "hw/intc/arm_gicv3_common.h"
68 #include "hw/firmware/smbios.h"
69 #include "qapi/visitor.h"
70 #include "qapi/qapi-visit-common.h"
71 #include "standard-headers/linux/input.h"
72 #include "hw/arm/smmuv3.h"
73 #include "hw/acpi/acpi.h"
74 #include "target/arm/internals.h"
75 #include "hw/mem/memory-device.h"
76 #include "hw/mem/pc-dimm.h"
77 #include "hw/mem/nvdimm.h"
78 #include "hw/acpi/generic_event_device.h"
79 #include "hw/virtio/virtio-mem-pci.h"
80 #include "hw/virtio/virtio-iommu.h"
81 #include "hw/char/pl011.h"
82 #include "qemu/guest-random.h"
84 #define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
85 static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
88 MachineClass *mc = MACHINE_CLASS(oc); \
89 virt_machine_##major##_##minor##_options(mc); \
90 mc->desc = "QEMU " # major "." # minor " ARM Virtual Machine"; \
95 static const TypeInfo machvirt_##major##_##minor##_info = { \
96 .name = MACHINE_TYPE_NAME("virt-" # major "." # minor), \
97 .parent = TYPE_VIRT_MACHINE, \
98 .class_init = virt_##major##_##minor##_class_init, \
100 static void machvirt_machine_##major##_##minor##_init(void) \
102 type_register_static(&machvirt_##major##_##minor##_info); \
104 type_init(machvirt_machine_##major##_##minor##_init);
106 #define DEFINE_VIRT_MACHINE_AS_LATEST(major, minor) \
107 DEFINE_VIRT_MACHINE_LATEST(major, minor, true)
108 #define DEFINE_VIRT_MACHINE(major, minor) \
109 DEFINE_VIRT_MACHINE_LATEST(major, minor, false)
112 /* Number of external interrupt lines to configure the GIC with */
115 #define PLATFORM_BUS_NUM_IRQS 64
117 /* Legacy RAM limit in GB (< version 4.0) */
118 #define LEGACY_RAMLIMIT_GB 255
119 #define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB)
121 /* Addresses and sizes of our components.
122 * 0..128MB is space for a flash device so we can run bootrom code such as UEFI.
123 * 128MB..256MB is used for miscellaneous device I/O.
124 * 256MB..1GB is reserved for possible future PCI support (ie where the
125 * PCI memory window will go if we add a PCI host controller).
126 * 1GB and up is RAM (which may happily spill over into the
127 * high memory region beyond 4GB).
128 * This represents a compromise between how much RAM can be given to
129 * a 32 bit VM and leaving space for expansion and in particular for PCI.
130 * Note that devices should generally be placed at multiples of 0x10000,
131 * to accommodate guests using 64K pages.
133 static const MemMapEntry base_memmap
[] = {
134 /* Space up to 0x8000000 is reserved for a boot ROM */
135 [VIRT_FLASH
] = { 0, 0x08000000 },
136 [VIRT_CPUPERIPHS
] = { 0x08000000, 0x00020000 },
137 /* GIC distributor and CPU interfaces sit inside the CPU peripheral space */
138 [VIRT_GIC_DIST
] = { 0x08000000, 0x00010000 },
139 [VIRT_GIC_CPU
] = { 0x08010000, 0x00010000 },
140 [VIRT_GIC_V2M
] = { 0x08020000, 0x00001000 },
141 [VIRT_GIC_HYP
] = { 0x08030000, 0x00010000 },
142 [VIRT_GIC_VCPU
] = { 0x08040000, 0x00010000 },
143 /* The space in between here is reserved for GICv3 CPU/vCPU/HYP */
144 [VIRT_GIC_ITS
] = { 0x08080000, 0x00020000 },
145 /* This redistributor space allows up to 2*64kB*123 CPUs */
146 [VIRT_GIC_REDIST
] = { 0x080A0000, 0x00F60000 },
147 [VIRT_UART
] = { 0x09000000, 0x00001000 },
148 [VIRT_RTC
] = { 0x09010000, 0x00001000 },
149 [VIRT_FW_CFG
] = { 0x09020000, 0x00000018 },
150 [VIRT_GPIO
] = { 0x09030000, 0x00001000 },
151 [VIRT_SECURE_UART
] = { 0x09040000, 0x00001000 },
152 [VIRT_SMMU
] = { 0x09050000, 0x00020000 },
153 [VIRT_PCDIMM_ACPI
] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN
},
154 [VIRT_ACPI_GED
] = { 0x09080000, ACPI_GED_EVT_SEL_LEN
},
155 [VIRT_NVDIMM_ACPI
] = { 0x09090000, NVDIMM_ACPI_IO_LEN
},
156 [VIRT_PVTIME
] = { 0x090a0000, 0x00010000 },
157 [VIRT_SECURE_GPIO
] = { 0x090b0000, 0x00001000 },
158 [VIRT_MMIO
] = { 0x0a000000, 0x00000200 },
159 /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
160 [VIRT_PLATFORM_BUS
] = { 0x0c000000, 0x02000000 },
161 [VIRT_SECURE_MEM
] = { 0x0e000000, 0x01000000 },
162 [VIRT_PCIE_MMIO
] = { 0x10000000, 0x2eff0000 },
163 [VIRT_PCIE_PIO
] = { 0x3eff0000, 0x00010000 },
164 [VIRT_PCIE_ECAM
] = { 0x3f000000, 0x01000000 },
165 /* Actual RAM size depends on initial RAM and device memory settings */
166 [VIRT_MEM
] = { GiB
, LEGACY_RAMLIMIT_BYTES
},
170 * Highmem IO Regions: This memory map is floating, located after the RAM.
171 * Each MemMapEntry base (GPA) will be dynamically computed, depending on the
172 * top of the RAM, so that its base get the same alignment as the size,
173 * ie. a 512GiB entry will be aligned on a 512GiB boundary. If there is
174 * less than 256GiB of RAM, the floating area starts at the 256GiB mark.
175 * Note the extended_memmap is sized so that it eventually also includes the
176 * base_memmap entries (VIRT_HIGH_GIC_REDIST2 index is greater than the last
177 * index of base_memmap).
179 * The memory map for these Highmem IO Regions can be in legacy or compact
180 * layout, depending on 'compact-highmem' property. With legacy layout, the
181 * PA space for one specific region is always reserved, even if the region
182 * has been disabled or doesn't fit into the PA space. However, the PA space
183 * for the region won't be reserved in these circumstances with compact layout.
185 static MemMapEntry extended_memmap
[] = {
186 /* Additional 64 MB redist region (can contain up to 512 redistributors) */
187 [VIRT_HIGH_GIC_REDIST2
] = { 0x0, 64 * MiB
},
188 [VIRT_HIGH_PCIE_ECAM
] = { 0x0, 256 * MiB
},
189 /* Second PCIe window */
190 [VIRT_HIGH_PCIE_MMIO
] = { 0x0, 512 * GiB
},
193 static const int a15irqmap
[] = {
196 [VIRT_PCIE
] = 3, /* ... to 6 */
198 [VIRT_SECURE_UART
] = 8,
200 [VIRT_MMIO
] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
201 [VIRT_GIC_V2M
] = 48, /* ...to 48 + NUM_GICV2M_SPIS - 1 */
202 [VIRT_SMMU
] = 74, /* ...to 74 + NUM_SMMU_IRQS - 1 */
203 [VIRT_PLATFORM_BUS
] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
206 static const char *valid_cpus
[] = {
208 ARM_CPU_TYPE_NAME("cortex-a7"),
210 ARM_CPU_TYPE_NAME("cortex-a15"),
211 ARM_CPU_TYPE_NAME("cortex-a35"),
212 ARM_CPU_TYPE_NAME("cortex-a53"),
213 ARM_CPU_TYPE_NAME("cortex-a55"),
214 ARM_CPU_TYPE_NAME("cortex-a57"),
215 ARM_CPU_TYPE_NAME("cortex-a72"),
216 ARM_CPU_TYPE_NAME("cortex-a76"),
217 ARM_CPU_TYPE_NAME("a64fx"),
218 ARM_CPU_TYPE_NAME("neoverse-n1"),
219 ARM_CPU_TYPE_NAME("host"),
220 ARM_CPU_TYPE_NAME("max"),
223 static bool cpu_type_valid(const char *cpu
)
227 for (i
= 0; i
< ARRAY_SIZE(valid_cpus
); i
++) {
228 if (strcmp(cpu
, valid_cpus
[i
]) == 0) {
235 static void create_randomness(MachineState
*ms
, const char *node
)
242 if (qemu_guest_getrandom(&seed
, sizeof(seed
), NULL
)) {
245 qemu_fdt_setprop_u64(ms
->fdt
, node
, "kaslr-seed", seed
.kaslr
);
246 qemu_fdt_setprop(ms
->fdt
, node
, "rng-seed", seed
.rng
, sizeof(seed
.rng
));
249 static void create_fdt(VirtMachineState
*vms
)
251 MachineState
*ms
= MACHINE(vms
);
252 int nb_numa_nodes
= ms
->numa_state
->num_nodes
;
253 void *fdt
= create_device_tree(&vms
->fdt_size
);
256 error_report("create_device_tree() failed");
263 qemu_fdt_setprop_string(fdt
, "/", "compatible", "linux,dummy-virt");
264 qemu_fdt_setprop_cell(fdt
, "/", "#address-cells", 0x2);
265 qemu_fdt_setprop_cell(fdt
, "/", "#size-cells", 0x2);
266 qemu_fdt_setprop_string(fdt
, "/", "model", "linux,dummy-virt");
268 /* /chosen must exist for load_dtb to fill in necessary properties later */
269 qemu_fdt_add_subnode(fdt
, "/chosen");
270 if (vms
->dtb_randomness
) {
271 create_randomness(ms
, "/chosen");
275 qemu_fdt_add_subnode(fdt
, "/secure-chosen");
276 if (vms
->dtb_randomness
) {
277 create_randomness(ms
, "/secure-chosen");
281 /* Clock node, for the benefit of the UART. The kernel device tree
282 * binding documentation claims the PL011 node clock properties are
283 * optional but in practice if you omit them the kernel refuses to
284 * probe for the device.
286 vms
->clock_phandle
= qemu_fdt_alloc_phandle(fdt
);
287 qemu_fdt_add_subnode(fdt
, "/apb-pclk");
288 qemu_fdt_setprop_string(fdt
, "/apb-pclk", "compatible", "fixed-clock");
289 qemu_fdt_setprop_cell(fdt
, "/apb-pclk", "#clock-cells", 0x0);
290 qemu_fdt_setprop_cell(fdt
, "/apb-pclk", "clock-frequency", 24000000);
291 qemu_fdt_setprop_string(fdt
, "/apb-pclk", "clock-output-names",
293 qemu_fdt_setprop_cell(fdt
, "/apb-pclk", "phandle", vms
->clock_phandle
);
295 if (nb_numa_nodes
> 0 && ms
->numa_state
->have_numa_distance
) {
296 int size
= nb_numa_nodes
* nb_numa_nodes
* 3 * sizeof(uint32_t);
297 uint32_t *matrix
= g_malloc0(size
);
300 for (i
= 0; i
< nb_numa_nodes
; i
++) {
301 for (j
= 0; j
< nb_numa_nodes
; j
++) {
302 idx
= (i
* nb_numa_nodes
+ j
) * 3;
303 matrix
[idx
+ 0] = cpu_to_be32(i
);
304 matrix
[idx
+ 1] = cpu_to_be32(j
);
306 cpu_to_be32(ms
->numa_state
->nodes
[i
].distance
[j
]);
310 qemu_fdt_add_subnode(fdt
, "/distance-map");
311 qemu_fdt_setprop_string(fdt
, "/distance-map", "compatible",
312 "numa-distance-map-v1");
313 qemu_fdt_setprop(fdt
, "/distance-map", "distance-matrix",
319 static void fdt_add_timer_nodes(const VirtMachineState
*vms
)
321 /* On real hardware these interrupts are level-triggered.
322 * On KVM they were edge-triggered before host kernel version 4.4,
323 * and level-triggered afterwards.
324 * On emulated QEMU they are level-triggered.
326 * Getting the DTB info about them wrong is awkward for some
328 * pre-4.8 ignore the DT and leave the interrupt configured
329 * with whatever the GIC reset value (or the bootloader) left it at
330 * 4.8 before rc6 honour the incorrect data by programming it back
331 * into the GIC, causing problems
332 * 4.8rc6 and later ignore the DT and always write "level triggered"
335 * For backwards-compatibility, virt-2.8 and earlier will continue
336 * to say these are edge-triggered, but later machines will report
337 * the correct information.
340 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
341 uint32_t irqflags
= GIC_FDT_IRQ_FLAGS_LEVEL_HI
;
342 MachineState
*ms
= MACHINE(vms
);
344 if (vmc
->claim_edge_triggered_timers
) {
345 irqflags
= GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
;
348 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
349 irqflags
= deposit32(irqflags
, GIC_FDT_IRQ_PPI_CPU_START
,
350 GIC_FDT_IRQ_PPI_CPU_WIDTH
,
351 (1 << MACHINE(vms
)->smp
.cpus
) - 1);
354 qemu_fdt_add_subnode(ms
->fdt
, "/timer");
356 armcpu
= ARM_CPU(qemu_get_cpu(0));
357 if (arm_feature(&armcpu
->env
, ARM_FEATURE_V8
)) {
358 const char compat
[] = "arm,armv8-timer\0arm,armv7-timer";
359 qemu_fdt_setprop(ms
->fdt
, "/timer", "compatible",
360 compat
, sizeof(compat
));
362 qemu_fdt_setprop_string(ms
->fdt
, "/timer", "compatible",
365 qemu_fdt_setprop(ms
->fdt
, "/timer", "always-on", NULL
, 0);
366 qemu_fdt_setprop_cells(ms
->fdt
, "/timer", "interrupts",
367 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_S_EL1_IRQ
, irqflags
,
368 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_NS_EL1_IRQ
, irqflags
,
369 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_VIRT_IRQ
, irqflags
,
370 GIC_FDT_IRQ_TYPE_PPI
, ARCH_TIMER_NS_EL2_IRQ
, irqflags
);
373 static void fdt_add_cpu_nodes(const VirtMachineState
*vms
)
377 const MachineState
*ms
= MACHINE(vms
);
378 const VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
379 int smp_cpus
= ms
->smp
.cpus
;
382 * See Linux Documentation/devicetree/bindings/arm/cpus.yaml
383 * On ARM v8 64-bit systems value should be set to 2,
384 * that corresponds to the MPIDR_EL1 register size.
385 * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs
386 * in the system, #address-cells can be set to 1, since
387 * MPIDR_EL1[63:32] bits are not used for CPUs
390 * Here we actually don't know whether our system is 32- or 64-bit one.
391 * The simplest way to go is to examine affinity IDs of all our CPUs. If
392 * at least one of them has Aff3 populated, we set #address-cells to 2.
394 for (cpu
= 0; cpu
< smp_cpus
; cpu
++) {
395 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(cpu
));
397 if (armcpu
->mp_affinity
& ARM_AFF3_MASK
) {
403 qemu_fdt_add_subnode(ms
->fdt
, "/cpus");
404 qemu_fdt_setprop_cell(ms
->fdt
, "/cpus", "#address-cells", addr_cells
);
405 qemu_fdt_setprop_cell(ms
->fdt
, "/cpus", "#size-cells", 0x0);
407 for (cpu
= smp_cpus
- 1; cpu
>= 0; cpu
--) {
408 char *nodename
= g_strdup_printf("/cpus/cpu@%d", cpu
);
409 ARMCPU
*armcpu
= ARM_CPU(qemu_get_cpu(cpu
));
410 CPUState
*cs
= CPU(armcpu
);
412 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
413 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "device_type", "cpu");
414 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
415 armcpu
->dtb_compatible
);
417 if (vms
->psci_conduit
!= QEMU_PSCI_CONDUIT_DISABLED
&& smp_cpus
> 1) {
418 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
419 "enable-method", "psci");
422 if (addr_cells
== 2) {
423 qemu_fdt_setprop_u64(ms
->fdt
, nodename
, "reg",
424 armcpu
->mp_affinity
);
426 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "reg",
427 armcpu
->mp_affinity
);
430 if (ms
->possible_cpus
->cpus
[cs
->cpu_index
].props
.has_node_id
) {
431 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "numa-node-id",
432 ms
->possible_cpus
->cpus
[cs
->cpu_index
].props
.node_id
);
435 if (!vmc
->no_cpu_topology
) {
436 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle",
437 qemu_fdt_alloc_phandle(ms
->fdt
));
443 if (!vmc
->no_cpu_topology
) {
445 * Add vCPU topology description through fdt node cpu-map.
447 * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt
448 * In a SMP system, the hierarchy of CPUs can be defined through
449 * four entities that are used to describe the layout of CPUs in
450 * the system: socket/cluster/core/thread.
452 * A socket node represents the boundary of system physical package
453 * and its child nodes must be one or more cluster nodes. A system
454 * can contain several layers of clustering within a single physical
455 * package and cluster nodes can be contained in parent cluster nodes.
457 * Note: currently we only support one layer of clustering within
458 * each physical package.
460 qemu_fdt_add_subnode(ms
->fdt
, "/cpus/cpu-map");
462 for (cpu
= smp_cpus
- 1; cpu
>= 0; cpu
--) {
463 char *cpu_path
= g_strdup_printf("/cpus/cpu@%d", cpu
);
466 if (ms
->smp
.threads
> 1) {
467 map_path
= g_strdup_printf(
468 "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
469 cpu
/ (ms
->smp
.clusters
* ms
->smp
.cores
* ms
->smp
.threads
),
470 (cpu
/ (ms
->smp
.cores
* ms
->smp
.threads
)) % ms
->smp
.clusters
,
471 (cpu
/ ms
->smp
.threads
) % ms
->smp
.cores
,
472 cpu
% ms
->smp
.threads
);
474 map_path
= g_strdup_printf(
475 "/cpus/cpu-map/socket%d/cluster%d/core%d",
476 cpu
/ (ms
->smp
.clusters
* ms
->smp
.cores
),
477 (cpu
/ ms
->smp
.cores
) % ms
->smp
.clusters
,
478 cpu
% ms
->smp
.cores
);
480 qemu_fdt_add_path(ms
->fdt
, map_path
);
481 qemu_fdt_setprop_phandle(ms
->fdt
, map_path
, "cpu", cpu_path
);
489 static void fdt_add_its_gic_node(VirtMachineState
*vms
)
492 MachineState
*ms
= MACHINE(vms
);
494 vms
->msi_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
495 nodename
= g_strdup_printf("/intc/its@%" PRIx64
,
496 vms
->memmap
[VIRT_GIC_ITS
].base
);
497 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
498 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
500 qemu_fdt_setprop(ms
->fdt
, nodename
, "msi-controller", NULL
, 0);
501 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#msi-cells", 1);
502 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
503 2, vms
->memmap
[VIRT_GIC_ITS
].base
,
504 2, vms
->memmap
[VIRT_GIC_ITS
].size
);
505 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", vms
->msi_phandle
);
509 static void fdt_add_v2m_gic_node(VirtMachineState
*vms
)
511 MachineState
*ms
= MACHINE(vms
);
514 nodename
= g_strdup_printf("/intc/v2m@%" PRIx64
,
515 vms
->memmap
[VIRT_GIC_V2M
].base
);
516 vms
->msi_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
517 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
518 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
519 "arm,gic-v2m-frame");
520 qemu_fdt_setprop(ms
->fdt
, nodename
, "msi-controller", NULL
, 0);
521 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
522 2, vms
->memmap
[VIRT_GIC_V2M
].base
,
523 2, vms
->memmap
[VIRT_GIC_V2M
].size
);
524 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", vms
->msi_phandle
);
528 static void fdt_add_gic_node(VirtMachineState
*vms
)
530 MachineState
*ms
= MACHINE(vms
);
533 vms
->gic_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
534 qemu_fdt_setprop_cell(ms
->fdt
, "/", "interrupt-parent", vms
->gic_phandle
);
536 nodename
= g_strdup_printf("/intc@%" PRIx64
,
537 vms
->memmap
[VIRT_GIC_DIST
].base
);
538 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
539 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#interrupt-cells", 3);
540 qemu_fdt_setprop(ms
->fdt
, nodename
, "interrupt-controller", NULL
, 0);
541 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#address-cells", 0x2);
542 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#size-cells", 0x2);
543 qemu_fdt_setprop(ms
->fdt
, nodename
, "ranges", NULL
, 0);
544 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
545 int nb_redist_regions
= virt_gicv3_redist_region_count(vms
);
547 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
550 qemu_fdt_setprop_cell(ms
->fdt
, nodename
,
551 "#redistributor-regions", nb_redist_regions
);
553 if (nb_redist_regions
== 1) {
554 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
555 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
556 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
557 2, vms
->memmap
[VIRT_GIC_REDIST
].base
,
558 2, vms
->memmap
[VIRT_GIC_REDIST
].size
);
560 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
561 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
562 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
563 2, vms
->memmap
[VIRT_GIC_REDIST
].base
,
564 2, vms
->memmap
[VIRT_GIC_REDIST
].size
,
565 2, vms
->memmap
[VIRT_HIGH_GIC_REDIST2
].base
,
566 2, vms
->memmap
[VIRT_HIGH_GIC_REDIST2
].size
);
570 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
571 GIC_FDT_IRQ_TYPE_PPI
, ARCH_GIC_MAINT_IRQ
,
572 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
575 /* 'cortex-a15-gic' means 'GIC v2' */
576 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible",
577 "arm,cortex-a15-gic");
579 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
580 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
581 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
582 2, vms
->memmap
[VIRT_GIC_CPU
].base
,
583 2, vms
->memmap
[VIRT_GIC_CPU
].size
);
585 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
586 2, vms
->memmap
[VIRT_GIC_DIST
].base
,
587 2, vms
->memmap
[VIRT_GIC_DIST
].size
,
588 2, vms
->memmap
[VIRT_GIC_CPU
].base
,
589 2, vms
->memmap
[VIRT_GIC_CPU
].size
,
590 2, vms
->memmap
[VIRT_GIC_HYP
].base
,
591 2, vms
->memmap
[VIRT_GIC_HYP
].size
,
592 2, vms
->memmap
[VIRT_GIC_VCPU
].base
,
593 2, vms
->memmap
[VIRT_GIC_VCPU
].size
);
594 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
595 GIC_FDT_IRQ_TYPE_PPI
, ARCH_GIC_MAINT_IRQ
,
596 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
600 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", vms
->gic_phandle
);
604 static void fdt_add_pmu_nodes(const VirtMachineState
*vms
)
606 ARMCPU
*armcpu
= ARM_CPU(first_cpu
);
607 uint32_t irqflags
= GIC_FDT_IRQ_FLAGS_LEVEL_HI
;
608 MachineState
*ms
= MACHINE(vms
);
610 if (!arm_feature(&armcpu
->env
, ARM_FEATURE_PMU
)) {
611 assert(!object_property_get_bool(OBJECT(armcpu
), "pmu", NULL
));
615 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
616 irqflags
= deposit32(irqflags
, GIC_FDT_IRQ_PPI_CPU_START
,
617 GIC_FDT_IRQ_PPI_CPU_WIDTH
,
618 (1 << MACHINE(vms
)->smp
.cpus
) - 1);
621 qemu_fdt_add_subnode(ms
->fdt
, "/pmu");
622 if (arm_feature(&armcpu
->env
, ARM_FEATURE_V8
)) {
623 const char compat
[] = "arm,armv8-pmuv3";
624 qemu_fdt_setprop(ms
->fdt
, "/pmu", "compatible",
625 compat
, sizeof(compat
));
626 qemu_fdt_setprop_cells(ms
->fdt
, "/pmu", "interrupts",
627 GIC_FDT_IRQ_TYPE_PPI
, VIRTUAL_PMU_IRQ
, irqflags
);
631 static inline DeviceState
*create_acpi_ged(VirtMachineState
*vms
)
634 MachineState
*ms
= MACHINE(vms
);
635 int irq
= vms
->irqmap
[VIRT_ACPI_GED
];
636 uint32_t event
= ACPI_GED_PWR_DOWN_EVT
;
639 event
|= ACPI_GED_MEM_HOTPLUG_EVT
;
642 if (ms
->nvdimms_state
->is_enabled
) {
643 event
|= ACPI_GED_NVDIMM_HOTPLUG_EVT
;
646 dev
= qdev_new(TYPE_ACPI_GED
);
647 qdev_prop_set_uint32(dev
, "ged-event", event
);
649 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, vms
->memmap
[VIRT_ACPI_GED
].base
);
650 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 1, vms
->memmap
[VIRT_PCDIMM_ACPI
].base
);
651 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), 0, qdev_get_gpio_in(vms
->gic
, irq
));
653 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
658 static void create_its(VirtMachineState
*vms
)
660 const char *itsclass
= its_class_name();
663 if (!strcmp(itsclass
, "arm-gicv3-its")) {
670 /* Do nothing if not supported */
674 dev
= qdev_new(itsclass
);
676 object_property_set_link(OBJECT(dev
), "parent-gicv3", OBJECT(vms
->gic
),
678 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
679 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, vms
->memmap
[VIRT_GIC_ITS
].base
);
681 fdt_add_its_gic_node(vms
);
682 vms
->msi_controller
= VIRT_MSI_CTRL_ITS
;
685 static void create_v2m(VirtMachineState
*vms
)
688 int irq
= vms
->irqmap
[VIRT_GIC_V2M
];
691 dev
= qdev_new("arm-gicv2m");
692 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, vms
->memmap
[VIRT_GIC_V2M
].base
);
693 qdev_prop_set_uint32(dev
, "base-spi", irq
);
694 qdev_prop_set_uint32(dev
, "num-spi", NUM_GICV2M_SPIS
);
695 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
697 for (i
= 0; i
< NUM_GICV2M_SPIS
; i
++) {
698 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), i
,
699 qdev_get_gpio_in(vms
->gic
, irq
+ i
));
702 fdt_add_v2m_gic_node(vms
);
703 vms
->msi_controller
= VIRT_MSI_CTRL_GICV2M
;
706 static void create_gic(VirtMachineState
*vms
, MemoryRegion
*mem
)
708 MachineState
*ms
= MACHINE(vms
);
709 /* We create a standalone GIC */
710 SysBusDevice
*gicbusdev
;
713 unsigned int smp_cpus
= ms
->smp
.cpus
;
714 uint32_t nb_redist_regions
= 0;
717 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
718 gictype
= gic_class_name();
720 gictype
= gicv3_class_name();
723 switch (vms
->gic_version
) {
724 case VIRT_GIC_VERSION_2
:
727 case VIRT_GIC_VERSION_3
:
730 case VIRT_GIC_VERSION_4
:
734 g_assert_not_reached();
736 vms
->gic
= qdev_new(gictype
);
737 qdev_prop_set_uint32(vms
->gic
, "revision", revision
);
738 qdev_prop_set_uint32(vms
->gic
, "num-cpu", smp_cpus
);
739 /* Note that the num-irq property counts both internal and external
740 * interrupts; there are always 32 of the former (mandated by GIC spec).
742 qdev_prop_set_uint32(vms
->gic
, "num-irq", NUM_IRQS
+ 32);
743 if (!kvm_irqchip_in_kernel()) {
744 qdev_prop_set_bit(vms
->gic
, "has-security-extensions", vms
->secure
);
747 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
748 uint32_t redist0_capacity
= virt_redist_capacity(vms
, VIRT_GIC_REDIST
);
749 uint32_t redist0_count
= MIN(smp_cpus
, redist0_capacity
);
751 nb_redist_regions
= virt_gicv3_redist_region_count(vms
);
753 qdev_prop_set_uint32(vms
->gic
, "len-redist-region-count",
755 qdev_prop_set_uint32(vms
->gic
, "redist-region-count[0]", redist0_count
);
757 if (!kvm_irqchip_in_kernel()) {
759 object_property_set_link(OBJECT(vms
->gic
), "sysmem",
760 OBJECT(mem
), &error_fatal
);
761 qdev_prop_set_bit(vms
->gic
, "has-lpi", true);
765 if (nb_redist_regions
== 2) {
766 uint32_t redist1_capacity
=
767 virt_redist_capacity(vms
, VIRT_HIGH_GIC_REDIST2
);
769 qdev_prop_set_uint32(vms
->gic
, "redist-region-count[1]",
770 MIN(smp_cpus
- redist0_count
, redist1_capacity
));
773 if (!kvm_irqchip_in_kernel()) {
774 qdev_prop_set_bit(vms
->gic
, "has-virtualization-extensions",
778 gicbusdev
= SYS_BUS_DEVICE(vms
->gic
);
779 sysbus_realize_and_unref(gicbusdev
, &error_fatal
);
780 sysbus_mmio_map(gicbusdev
, 0, vms
->memmap
[VIRT_GIC_DIST
].base
);
781 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
782 sysbus_mmio_map(gicbusdev
, 1, vms
->memmap
[VIRT_GIC_REDIST
].base
);
783 if (nb_redist_regions
== 2) {
784 sysbus_mmio_map(gicbusdev
, 2,
785 vms
->memmap
[VIRT_HIGH_GIC_REDIST2
].base
);
788 sysbus_mmio_map(gicbusdev
, 1, vms
->memmap
[VIRT_GIC_CPU
].base
);
790 sysbus_mmio_map(gicbusdev
, 2, vms
->memmap
[VIRT_GIC_HYP
].base
);
791 sysbus_mmio_map(gicbusdev
, 3, vms
->memmap
[VIRT_GIC_VCPU
].base
);
795 /* Wire the outputs from each CPU's generic timer and the GICv3
796 * maintenance interrupt signal to the appropriate GIC PPI inputs,
797 * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
799 for (i
= 0; i
< smp_cpus
; i
++) {
800 DeviceState
*cpudev
= DEVICE(qemu_get_cpu(i
));
801 int ppibase
= NUM_IRQS
+ i
* GIC_INTERNAL
+ GIC_NR_SGIS
;
803 /* Mapping from the output timer irq lines from the CPU to the
804 * GIC PPI inputs we use for the virt board.
806 const int timer_irq
[] = {
807 [GTIMER_PHYS
] = ARCH_TIMER_NS_EL1_IRQ
,
808 [GTIMER_VIRT
] = ARCH_TIMER_VIRT_IRQ
,
809 [GTIMER_HYP
] = ARCH_TIMER_NS_EL2_IRQ
,
810 [GTIMER_SEC
] = ARCH_TIMER_S_EL1_IRQ
,
813 for (irq
= 0; irq
< ARRAY_SIZE(timer_irq
); irq
++) {
814 qdev_connect_gpio_out(cpudev
, irq
,
815 qdev_get_gpio_in(vms
->gic
,
816 ppibase
+ timer_irq
[irq
]));
819 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
) {
820 qemu_irq irq
= qdev_get_gpio_in(vms
->gic
,
821 ppibase
+ ARCH_GIC_MAINT_IRQ
);
822 qdev_connect_gpio_out_named(cpudev
, "gicv3-maintenance-interrupt",
824 } else if (vms
->virt
) {
825 qemu_irq irq
= qdev_get_gpio_in(vms
->gic
,
826 ppibase
+ ARCH_GIC_MAINT_IRQ
);
827 sysbus_connect_irq(gicbusdev
, i
+ 4 * smp_cpus
, irq
);
830 qdev_connect_gpio_out_named(cpudev
, "pmu-interrupt", 0,
831 qdev_get_gpio_in(vms
->gic
, ppibase
834 sysbus_connect_irq(gicbusdev
, i
, qdev_get_gpio_in(cpudev
, ARM_CPU_IRQ
));
835 sysbus_connect_irq(gicbusdev
, i
+ smp_cpus
,
836 qdev_get_gpio_in(cpudev
, ARM_CPU_FIQ
));
837 sysbus_connect_irq(gicbusdev
, i
+ 2 * smp_cpus
,
838 qdev_get_gpio_in(cpudev
, ARM_CPU_VIRQ
));
839 sysbus_connect_irq(gicbusdev
, i
+ 3 * smp_cpus
,
840 qdev_get_gpio_in(cpudev
, ARM_CPU_VFIQ
));
843 fdt_add_gic_node(vms
);
845 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
&& vms
->its
) {
847 } else if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
852 static void create_uart(const VirtMachineState
*vms
, int uart
,
853 MemoryRegion
*mem
, Chardev
*chr
)
856 hwaddr base
= vms
->memmap
[uart
].base
;
857 hwaddr size
= vms
->memmap
[uart
].size
;
858 int irq
= vms
->irqmap
[uart
];
859 const char compat
[] = "arm,pl011\0arm,primecell";
860 const char clocknames
[] = "uartclk\0apb_pclk";
861 DeviceState
*dev
= qdev_new(TYPE_PL011
);
862 SysBusDevice
*s
= SYS_BUS_DEVICE(dev
);
863 MachineState
*ms
= MACHINE(vms
);
865 qdev_prop_set_chr(dev
, "chardev", chr
);
866 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
867 memory_region_add_subregion(mem
, base
,
868 sysbus_mmio_get_region(s
, 0));
869 sysbus_connect_irq(s
, 0, qdev_get_gpio_in(vms
->gic
, irq
));
871 nodename
= g_strdup_printf("/pl011@%" PRIx64
, base
);
872 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
873 /* Note that we can't use setprop_string because of the embedded NUL */
874 qemu_fdt_setprop(ms
->fdt
, nodename
, "compatible",
875 compat
, sizeof(compat
));
876 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
878 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
879 GIC_FDT_IRQ_TYPE_SPI
, irq
,
880 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
881 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "clocks",
882 vms
->clock_phandle
, vms
->clock_phandle
);
883 qemu_fdt_setprop(ms
->fdt
, nodename
, "clock-names",
884 clocknames
, sizeof(clocknames
));
886 if (uart
== VIRT_UART
) {
887 qemu_fdt_setprop_string(ms
->fdt
, "/chosen", "stdout-path", nodename
);
889 /* Mark as not usable by the normal world */
890 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
891 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
893 qemu_fdt_setprop_string(ms
->fdt
, "/secure-chosen", "stdout-path",
900 static void create_rtc(const VirtMachineState
*vms
)
903 hwaddr base
= vms
->memmap
[VIRT_RTC
].base
;
904 hwaddr size
= vms
->memmap
[VIRT_RTC
].size
;
905 int irq
= vms
->irqmap
[VIRT_RTC
];
906 const char compat
[] = "arm,pl031\0arm,primecell";
907 MachineState
*ms
= MACHINE(vms
);
909 sysbus_create_simple("pl031", base
, qdev_get_gpio_in(vms
->gic
, irq
));
911 nodename
= g_strdup_printf("/pl031@%" PRIx64
, base
);
912 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
913 qemu_fdt_setprop(ms
->fdt
, nodename
, "compatible", compat
, sizeof(compat
));
914 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
916 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
917 GIC_FDT_IRQ_TYPE_SPI
, irq
,
918 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
919 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "clocks", vms
->clock_phandle
);
920 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "clock-names", "apb_pclk");
924 static DeviceState
*gpio_key_dev
;
925 static void virt_powerdown_req(Notifier
*n
, void *opaque
)
927 VirtMachineState
*s
= container_of(n
, VirtMachineState
, powerdown_notifier
);
930 acpi_send_event(s
->acpi_dev
, ACPI_POWER_DOWN_STATUS
);
932 /* use gpio Pin 3 for power button event */
933 qemu_set_irq(qdev_get_gpio_in(gpio_key_dev
, 0), 1);
937 static void create_gpio_keys(char *fdt
, DeviceState
*pl061_dev
,
940 gpio_key_dev
= sysbus_create_simple("gpio-key", -1,
941 qdev_get_gpio_in(pl061_dev
, 3));
943 qemu_fdt_add_subnode(fdt
, "/gpio-keys");
944 qemu_fdt_setprop_string(fdt
, "/gpio-keys", "compatible", "gpio-keys");
946 qemu_fdt_add_subnode(fdt
, "/gpio-keys/poweroff");
947 qemu_fdt_setprop_string(fdt
, "/gpio-keys/poweroff",
948 "label", "GPIO Key Poweroff");
949 qemu_fdt_setprop_cell(fdt
, "/gpio-keys/poweroff", "linux,code",
951 qemu_fdt_setprop_cells(fdt
, "/gpio-keys/poweroff",
952 "gpios", phandle
, 3, 0);
955 #define SECURE_GPIO_POWEROFF 0
956 #define SECURE_GPIO_RESET 1
958 static void create_secure_gpio_pwr(char *fdt
, DeviceState
*pl061_dev
,
961 DeviceState
*gpio_pwr_dev
;
964 gpio_pwr_dev
= sysbus_create_simple("gpio-pwr", -1, NULL
);
966 /* connect secure pl061 to gpio-pwr */
967 qdev_connect_gpio_out(pl061_dev
, SECURE_GPIO_RESET
,
968 qdev_get_gpio_in_named(gpio_pwr_dev
, "reset", 0));
969 qdev_connect_gpio_out(pl061_dev
, SECURE_GPIO_POWEROFF
,
970 qdev_get_gpio_in_named(gpio_pwr_dev
, "shutdown", 0));
972 qemu_fdt_add_subnode(fdt
, "/gpio-poweroff");
973 qemu_fdt_setprop_string(fdt
, "/gpio-poweroff", "compatible",
975 qemu_fdt_setprop_cells(fdt
, "/gpio-poweroff",
976 "gpios", phandle
, SECURE_GPIO_POWEROFF
, 0);
977 qemu_fdt_setprop_string(fdt
, "/gpio-poweroff", "status", "disabled");
978 qemu_fdt_setprop_string(fdt
, "/gpio-poweroff", "secure-status",
981 qemu_fdt_add_subnode(fdt
, "/gpio-restart");
982 qemu_fdt_setprop_string(fdt
, "/gpio-restart", "compatible",
984 qemu_fdt_setprop_cells(fdt
, "/gpio-restart",
985 "gpios", phandle
, SECURE_GPIO_RESET
, 0);
986 qemu_fdt_setprop_string(fdt
, "/gpio-restart", "status", "disabled");
987 qemu_fdt_setprop_string(fdt
, "/gpio-restart", "secure-status",
991 static void create_gpio_devices(const VirtMachineState
*vms
, int gpio
,
995 DeviceState
*pl061_dev
;
996 hwaddr base
= vms
->memmap
[gpio
].base
;
997 hwaddr size
= vms
->memmap
[gpio
].size
;
998 int irq
= vms
->irqmap
[gpio
];
999 const char compat
[] = "arm,pl061\0arm,primecell";
1001 MachineState
*ms
= MACHINE(vms
);
1003 pl061_dev
= qdev_new("pl061");
1004 /* Pull lines down to 0 if not driven by the PL061 */
1005 qdev_prop_set_uint32(pl061_dev
, "pullups", 0);
1006 qdev_prop_set_uint32(pl061_dev
, "pulldowns", 0xff);
1007 s
= SYS_BUS_DEVICE(pl061_dev
);
1008 sysbus_realize_and_unref(s
, &error_fatal
);
1009 memory_region_add_subregion(mem
, base
, sysbus_mmio_get_region(s
, 0));
1010 sysbus_connect_irq(s
, 0, qdev_get_gpio_in(vms
->gic
, irq
));
1012 uint32_t phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
1013 nodename
= g_strdup_printf("/pl061@%" PRIx64
, base
);
1014 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1015 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1017 qemu_fdt_setprop(ms
->fdt
, nodename
, "compatible", compat
, sizeof(compat
));
1018 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#gpio-cells", 2);
1019 qemu_fdt_setprop(ms
->fdt
, nodename
, "gpio-controller", NULL
, 0);
1020 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
1021 GIC_FDT_IRQ_TYPE_SPI
, irq
,
1022 GIC_FDT_IRQ_FLAGS_LEVEL_HI
);
1023 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "clocks", vms
->clock_phandle
);
1024 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "clock-names", "apb_pclk");
1025 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "phandle", phandle
);
1027 if (gpio
!= VIRT_GPIO
) {
1028 /* Mark as not usable by the normal world */
1029 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
1030 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
1034 /* Child gpio devices */
1035 if (gpio
== VIRT_GPIO
) {
1036 create_gpio_keys(ms
->fdt
, pl061_dev
, phandle
);
1038 create_secure_gpio_pwr(ms
->fdt
, pl061_dev
, phandle
);
1042 static void create_virtio_devices(const VirtMachineState
*vms
)
1045 hwaddr size
= vms
->memmap
[VIRT_MMIO
].size
;
1046 MachineState
*ms
= MACHINE(vms
);
1048 /* We create the transports in forwards order. Since qbus_realize()
1049 * prepends (not appends) new child buses, the incrementing loop below will
1050 * create a list of virtio-mmio buses with decreasing base addresses.
1052 * When a -device option is processed from the command line,
1053 * qbus_find_recursive() picks the next free virtio-mmio bus in forwards
1054 * order. The upshot is that -device options in increasing command line
1055 * order are mapped to virtio-mmio buses with decreasing base addresses.
1057 * When this code was originally written, that arrangement ensured that the
1058 * guest Linux kernel would give the lowest "name" (/dev/vda, eth0, etc) to
1059 * the first -device on the command line. (The end-to-end order is a
1060 * function of this loop, qbus_realize(), qbus_find_recursive(), and the
1061 * guest kernel's name-to-address assignment strategy.)
1063 * Meanwhile, the kernel's traversal seems to have been reversed; see eg.
1064 * the message, if not necessarily the code, of commit 70161ff336.
1065 * Therefore the loop now establishes the inverse of the original intent.
1067 * Unfortunately, we can't counteract the kernel change by reversing the
1068 * loop; it would break existing command lines.
1070 * In any case, the kernel makes no guarantee about the stability of
1071 * enumeration order of virtio devices (as demonstrated by it changing
1072 * between kernel versions). For reliable and stable identification
1073 * of disks users must use UUIDs or similar mechanisms.
1075 for (i
= 0; i
< NUM_VIRTIO_TRANSPORTS
; i
++) {
1076 int irq
= vms
->irqmap
[VIRT_MMIO
] + i
;
1077 hwaddr base
= vms
->memmap
[VIRT_MMIO
].base
+ i
* size
;
1079 sysbus_create_simple("virtio-mmio", base
,
1080 qdev_get_gpio_in(vms
->gic
, irq
));
1083 /* We add dtb nodes in reverse order so that they appear in the finished
1084 * device tree lowest address first.
1086 * Note that this mapping is independent of the loop above. The previous
1087 * loop influences virtio device to virtio transport assignment, whereas
1088 * this loop controls how virtio transports are laid out in the dtb.
1090 for (i
= NUM_VIRTIO_TRANSPORTS
- 1; i
>= 0; i
--) {
1092 int irq
= vms
->irqmap
[VIRT_MMIO
] + i
;
1093 hwaddr base
= vms
->memmap
[VIRT_MMIO
].base
+ i
* size
;
1095 nodename
= g_strdup_printf("/virtio_mmio@%" PRIx64
, base
);
1096 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1097 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
1098 "compatible", "virtio,mmio");
1099 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1101 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupts",
1102 GIC_FDT_IRQ_TYPE_SPI
, irq
,
1103 GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
);
1104 qemu_fdt_setprop(ms
->fdt
, nodename
, "dma-coherent", NULL
, 0);
1109 #define VIRT_FLASH_SECTOR_SIZE (256 * KiB)
1111 static PFlashCFI01
*virt_flash_create1(VirtMachineState
*vms
,
1113 const char *alias_prop_name
)
1116 * Create a single flash device. We use the same parameters as
1117 * the flash devices on the Versatile Express board.
1119 DeviceState
*dev
= qdev_new(TYPE_PFLASH_CFI01
);
1121 qdev_prop_set_uint64(dev
, "sector-length", VIRT_FLASH_SECTOR_SIZE
);
1122 qdev_prop_set_uint8(dev
, "width", 4);
1123 qdev_prop_set_uint8(dev
, "device-width", 2);
1124 qdev_prop_set_bit(dev
, "big-endian", false);
1125 qdev_prop_set_uint16(dev
, "id0", 0x89);
1126 qdev_prop_set_uint16(dev
, "id1", 0x18);
1127 qdev_prop_set_uint16(dev
, "id2", 0x00);
1128 qdev_prop_set_uint16(dev
, "id3", 0x00);
1129 qdev_prop_set_string(dev
, "name", name
);
1130 object_property_add_child(OBJECT(vms
), name
, OBJECT(dev
));
1131 object_property_add_alias(OBJECT(vms
), alias_prop_name
,
1132 OBJECT(dev
), "drive");
1133 return PFLASH_CFI01(dev
);
1136 static void virt_flash_create(VirtMachineState
*vms
)
1138 vms
->flash
[0] = virt_flash_create1(vms
, "virt.flash0", "pflash0");
1139 vms
->flash
[1] = virt_flash_create1(vms
, "virt.flash1", "pflash1");
1142 static void virt_flash_map1(PFlashCFI01
*flash
,
1143 hwaddr base
, hwaddr size
,
1144 MemoryRegion
*sysmem
)
1146 DeviceState
*dev
= DEVICE(flash
);
1148 assert(QEMU_IS_ALIGNED(size
, VIRT_FLASH_SECTOR_SIZE
));
1149 assert(size
/ VIRT_FLASH_SECTOR_SIZE
<= UINT32_MAX
);
1150 qdev_prop_set_uint32(dev
, "num-blocks", size
/ VIRT_FLASH_SECTOR_SIZE
);
1151 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1153 memory_region_add_subregion(sysmem
, base
,
1154 sysbus_mmio_get_region(SYS_BUS_DEVICE(dev
),
1158 static void virt_flash_map(VirtMachineState
*vms
,
1159 MemoryRegion
*sysmem
,
1160 MemoryRegion
*secure_sysmem
)
1163 * Map two flash devices to fill the VIRT_FLASH space in the memmap.
1164 * sysmem is the system memory space. secure_sysmem is the secure view
1165 * of the system, and the first flash device should be made visible only
1166 * there. The second flash device is visible to both secure and nonsecure.
1167 * If sysmem == secure_sysmem this means there is no separate Secure
1168 * address space and both flash devices are generally visible.
1170 hwaddr flashsize
= vms
->memmap
[VIRT_FLASH
].size
/ 2;
1171 hwaddr flashbase
= vms
->memmap
[VIRT_FLASH
].base
;
1173 virt_flash_map1(vms
->flash
[0], flashbase
, flashsize
,
1175 virt_flash_map1(vms
->flash
[1], flashbase
+ flashsize
, flashsize
,
1179 static void virt_flash_fdt(VirtMachineState
*vms
,
1180 MemoryRegion
*sysmem
,
1181 MemoryRegion
*secure_sysmem
)
1183 hwaddr flashsize
= vms
->memmap
[VIRT_FLASH
].size
/ 2;
1184 hwaddr flashbase
= vms
->memmap
[VIRT_FLASH
].base
;
1185 MachineState
*ms
= MACHINE(vms
);
1188 if (sysmem
== secure_sysmem
) {
1189 /* Report both flash devices as a single node in the DT */
1190 nodename
= g_strdup_printf("/flash@%" PRIx64
, flashbase
);
1191 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1192 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible", "cfi-flash");
1193 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1194 2, flashbase
, 2, flashsize
,
1195 2, flashbase
+ flashsize
, 2, flashsize
);
1196 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "bank-width", 4);
1200 * Report the devices as separate nodes so we can mark one as
1201 * only visible to the secure world.
1203 nodename
= g_strdup_printf("/secflash@%" PRIx64
, flashbase
);
1204 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1205 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible", "cfi-flash");
1206 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1207 2, flashbase
, 2, flashsize
);
1208 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "bank-width", 4);
1209 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
1210 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
1213 nodename
= g_strdup_printf("/flash@%" PRIx64
, flashbase
+ flashsize
);
1214 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1215 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "compatible", "cfi-flash");
1216 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1217 2, flashbase
+ flashsize
, 2, flashsize
);
1218 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "bank-width", 4);
1223 static bool virt_firmware_init(VirtMachineState
*vms
,
1224 MemoryRegion
*sysmem
,
1225 MemoryRegion
*secure_sysmem
)
1228 const char *bios_name
;
1229 BlockBackend
*pflash_blk0
;
1231 /* Map legacy -drive if=pflash to machine properties */
1232 for (i
= 0; i
< ARRAY_SIZE(vms
->flash
); i
++) {
1233 pflash_cfi01_legacy_drive(vms
->flash
[i
],
1234 drive_get(IF_PFLASH
, 0, i
));
1237 virt_flash_map(vms
, sysmem
, secure_sysmem
);
1239 pflash_blk0
= pflash_cfi01_get_blk(vms
->flash
[0]);
1241 bios_name
= MACHINE(vms
)->firmware
;
1248 error_report("The contents of the first flash device may be "
1249 "specified with -bios or with -drive if=pflash... "
1250 "but you cannot use both options at once");
1254 /* Fall back to -bios */
1256 fname
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
1258 error_report("Could not find ROM image '%s'", bios_name
);
1261 mr
= sysbus_mmio_get_region(SYS_BUS_DEVICE(vms
->flash
[0]), 0);
1262 image_size
= load_image_mr(fname
, mr
);
1264 if (image_size
< 0) {
1265 error_report("Could not load ROM image '%s'", bios_name
);
1270 return pflash_blk0
|| bios_name
;
1273 static FWCfgState
*create_fw_cfg(const VirtMachineState
*vms
, AddressSpace
*as
)
1275 MachineState
*ms
= MACHINE(vms
);
1276 hwaddr base
= vms
->memmap
[VIRT_FW_CFG
].base
;
1277 hwaddr size
= vms
->memmap
[VIRT_FW_CFG
].size
;
1281 fw_cfg
= fw_cfg_init_mem_wide(base
+ 8, base
, 8, base
+ 16, as
);
1282 fw_cfg_add_i16(fw_cfg
, FW_CFG_NB_CPUS
, (uint16_t)ms
->smp
.cpus
);
1284 nodename
= g_strdup_printf("/fw-cfg@%" PRIx64
, base
);
1285 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1286 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
1287 "compatible", "qemu,fw-cfg-mmio");
1288 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1290 qemu_fdt_setprop(ms
->fdt
, nodename
, "dma-coherent", NULL
, 0);
1295 static void create_pcie_irq_map(const MachineState
*ms
,
1296 uint32_t gic_phandle
,
1297 int first_irq
, const char *nodename
)
1300 uint32_t full_irq_map
[4 * 4 * 10] = { 0 };
1301 uint32_t *irq_map
= full_irq_map
;
1303 for (devfn
= 0; devfn
<= 0x18; devfn
+= 0x8) {
1304 for (pin
= 0; pin
< 4; pin
++) {
1305 int irq_type
= GIC_FDT_IRQ_TYPE_SPI
;
1306 int irq_nr
= first_irq
+ ((pin
+ PCI_SLOT(devfn
)) % PCI_NUM_PINS
);
1307 int irq_level
= GIC_FDT_IRQ_FLAGS_LEVEL_HI
;
1311 devfn
<< 8, 0, 0, /* devfn */
1312 pin
+ 1, /* PCI pin */
1313 gic_phandle
, 0, 0, irq_type
, irq_nr
, irq_level
}; /* GIC irq */
1315 /* Convert map to big endian */
1316 for (i
= 0; i
< 10; i
++) {
1317 irq_map
[i
] = cpu_to_be32(map
[i
]);
1323 qemu_fdt_setprop(ms
->fdt
, nodename
, "interrupt-map",
1324 full_irq_map
, sizeof(full_irq_map
));
1326 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "interrupt-map-mask",
1327 cpu_to_be16(PCI_DEVFN(3, 0)), /* Slot 3 */
1332 static void create_smmu(const VirtMachineState
*vms
,
1336 const char compat
[] = "arm,smmu-v3";
1337 int irq
= vms
->irqmap
[VIRT_SMMU
];
1339 hwaddr base
= vms
->memmap
[VIRT_SMMU
].base
;
1340 hwaddr size
= vms
->memmap
[VIRT_SMMU
].size
;
1341 const char irq_names
[] = "eventq\0priq\0cmdq-sync\0gerror";
1343 MachineState
*ms
= MACHINE(vms
);
1345 if (vms
->iommu
!= VIRT_IOMMU_SMMUV3
|| !vms
->iommu_phandle
) {
1349 dev
= qdev_new(TYPE_ARM_SMMUV3
);
1351 object_property_set_link(OBJECT(dev
), "primary-bus", OBJECT(bus
),
1353 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1354 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 0, base
);
1355 for (i
= 0; i
< NUM_SMMU_IRQS
; i
++) {
1356 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), i
,
1357 qdev_get_gpio_in(vms
->gic
, irq
+ i
));
1360 node
= g_strdup_printf("/smmuv3@%" PRIx64
, base
);
1361 qemu_fdt_add_subnode(ms
->fdt
, node
);
1362 qemu_fdt_setprop(ms
->fdt
, node
, "compatible", compat
, sizeof(compat
));
1363 qemu_fdt_setprop_sized_cells(ms
->fdt
, node
, "reg", 2, base
, 2, size
);
1365 qemu_fdt_setprop_cells(ms
->fdt
, node
, "interrupts",
1366 GIC_FDT_IRQ_TYPE_SPI
, irq
, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
,
1367 GIC_FDT_IRQ_TYPE_SPI
, irq
+ 1, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
,
1368 GIC_FDT_IRQ_TYPE_SPI
, irq
+ 2, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
,
1369 GIC_FDT_IRQ_TYPE_SPI
, irq
+ 3, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI
);
1371 qemu_fdt_setprop(ms
->fdt
, node
, "interrupt-names", irq_names
,
1374 qemu_fdt_setprop(ms
->fdt
, node
, "dma-coherent", NULL
, 0);
1376 qemu_fdt_setprop_cell(ms
->fdt
, node
, "#iommu-cells", 1);
1378 qemu_fdt_setprop_cell(ms
->fdt
, node
, "phandle", vms
->iommu_phandle
);
1382 static void create_virtio_iommu_dt_bindings(VirtMachineState
*vms
)
1384 const char compat
[] = "virtio,pci-iommu\0pci1af4,1057";
1385 uint16_t bdf
= vms
->virtio_iommu_bdf
;
1386 MachineState
*ms
= MACHINE(vms
);
1389 vms
->iommu_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
1391 node
= g_strdup_printf("%s/virtio_iommu@%x,%x", vms
->pciehb_nodename
,
1392 PCI_SLOT(bdf
), PCI_FUNC(bdf
));
1393 qemu_fdt_add_subnode(ms
->fdt
, node
);
1394 qemu_fdt_setprop(ms
->fdt
, node
, "compatible", compat
, sizeof(compat
));
1395 qemu_fdt_setprop_sized_cells(ms
->fdt
, node
, "reg",
1396 1, bdf
<< 8, 1, 0, 1, 0,
1399 qemu_fdt_setprop_cell(ms
->fdt
, node
, "#iommu-cells", 1);
1400 qemu_fdt_setprop_cell(ms
->fdt
, node
, "phandle", vms
->iommu_phandle
);
1403 qemu_fdt_setprop_cells(ms
->fdt
, vms
->pciehb_nodename
, "iommu-map",
1404 0x0, vms
->iommu_phandle
, 0x0, bdf
,
1405 bdf
+ 1, vms
->iommu_phandle
, bdf
+ 1, 0xffff - bdf
);
1408 static void create_pcie(VirtMachineState
*vms
)
1410 hwaddr base_mmio
= vms
->memmap
[VIRT_PCIE_MMIO
].base
;
1411 hwaddr size_mmio
= vms
->memmap
[VIRT_PCIE_MMIO
].size
;
1412 hwaddr base_mmio_high
= vms
->memmap
[VIRT_HIGH_PCIE_MMIO
].base
;
1413 hwaddr size_mmio_high
= vms
->memmap
[VIRT_HIGH_PCIE_MMIO
].size
;
1414 hwaddr base_pio
= vms
->memmap
[VIRT_PCIE_PIO
].base
;
1415 hwaddr size_pio
= vms
->memmap
[VIRT_PCIE_PIO
].size
;
1416 hwaddr base_ecam
, size_ecam
;
1417 hwaddr base
= base_mmio
;
1419 int irq
= vms
->irqmap
[VIRT_PCIE
];
1420 MemoryRegion
*mmio_alias
;
1421 MemoryRegion
*mmio_reg
;
1422 MemoryRegion
*ecam_alias
;
1423 MemoryRegion
*ecam_reg
;
1428 MachineState
*ms
= MACHINE(vms
);
1430 dev
= qdev_new(TYPE_GPEX_HOST
);
1431 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1433 ecam_id
= VIRT_ECAM_ID(vms
->highmem_ecam
);
1434 base_ecam
= vms
->memmap
[ecam_id
].base
;
1435 size_ecam
= vms
->memmap
[ecam_id
].size
;
1436 nr_pcie_buses
= size_ecam
/ PCIE_MMCFG_SIZE_MIN
;
1437 /* Map only the first size_ecam bytes of ECAM space */
1438 ecam_alias
= g_new0(MemoryRegion
, 1);
1439 ecam_reg
= sysbus_mmio_get_region(SYS_BUS_DEVICE(dev
), 0);
1440 memory_region_init_alias(ecam_alias
, OBJECT(dev
), "pcie-ecam",
1441 ecam_reg
, 0, size_ecam
);
1442 memory_region_add_subregion(get_system_memory(), base_ecam
, ecam_alias
);
1444 /* Map the MMIO window into system address space so as to expose
1445 * the section of PCI MMIO space which starts at the same base address
1446 * (ie 1:1 mapping for that part of PCI MMIO space visible through
1449 mmio_alias
= g_new0(MemoryRegion
, 1);
1450 mmio_reg
= sysbus_mmio_get_region(SYS_BUS_DEVICE(dev
), 1);
1451 memory_region_init_alias(mmio_alias
, OBJECT(dev
), "pcie-mmio",
1452 mmio_reg
, base_mmio
, size_mmio
);
1453 memory_region_add_subregion(get_system_memory(), base_mmio
, mmio_alias
);
1455 if (vms
->highmem_mmio
) {
1456 /* Map high MMIO space */
1457 MemoryRegion
*high_mmio_alias
= g_new0(MemoryRegion
, 1);
1459 memory_region_init_alias(high_mmio_alias
, OBJECT(dev
), "pcie-mmio-high",
1460 mmio_reg
, base_mmio_high
, size_mmio_high
);
1461 memory_region_add_subregion(get_system_memory(), base_mmio_high
,
1465 /* Map IO port space */
1466 sysbus_mmio_map(SYS_BUS_DEVICE(dev
), 2, base_pio
);
1468 for (i
= 0; i
< GPEX_NUM_IRQS
; i
++) {
1469 sysbus_connect_irq(SYS_BUS_DEVICE(dev
), i
,
1470 qdev_get_gpio_in(vms
->gic
, irq
+ i
));
1471 gpex_set_irq_num(GPEX_HOST(dev
), i
, irq
+ i
);
1474 pci
= PCI_HOST_BRIDGE(dev
);
1475 pci
->bypass_iommu
= vms
->default_bus_bypass_iommu
;
1476 vms
->bus
= pci
->bus
;
1478 for (i
= 0; i
< nb_nics
; i
++) {
1479 NICInfo
*nd
= &nd_table
[i
];
1482 nd
->model
= g_strdup("virtio");
1485 pci_nic_init_nofail(nd
, pci
->bus
, nd
->model
, NULL
);
1489 nodename
= vms
->pciehb_nodename
= g_strdup_printf("/pcie@%" PRIx64
, base
);
1490 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1491 qemu_fdt_setprop_string(ms
->fdt
, nodename
,
1492 "compatible", "pci-host-ecam-generic");
1493 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "device_type", "pci");
1494 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#address-cells", 3);
1495 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#size-cells", 2);
1496 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "linux,pci-domain", 0);
1497 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "bus-range", 0,
1499 qemu_fdt_setprop(ms
->fdt
, nodename
, "dma-coherent", NULL
, 0);
1501 if (vms
->msi_phandle
) {
1502 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "msi-map",
1503 0, vms
->msi_phandle
, 0, 0x10000);
1506 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg",
1507 2, base_ecam
, 2, size_ecam
);
1509 if (vms
->highmem_mmio
) {
1510 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "ranges",
1511 1, FDT_PCI_RANGE_IOPORT
, 2, 0,
1512 2, base_pio
, 2, size_pio
,
1513 1, FDT_PCI_RANGE_MMIO
, 2, base_mmio
,
1514 2, base_mmio
, 2, size_mmio
,
1515 1, FDT_PCI_RANGE_MMIO_64BIT
,
1517 2, base_mmio_high
, 2, size_mmio_high
);
1519 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "ranges",
1520 1, FDT_PCI_RANGE_IOPORT
, 2, 0,
1521 2, base_pio
, 2, size_pio
,
1522 1, FDT_PCI_RANGE_MMIO
, 2, base_mmio
,
1523 2, base_mmio
, 2, size_mmio
);
1526 qemu_fdt_setprop_cell(ms
->fdt
, nodename
, "#interrupt-cells", 1);
1527 create_pcie_irq_map(ms
, vms
->gic_phandle
, irq
, nodename
);
1530 vms
->iommu_phandle
= qemu_fdt_alloc_phandle(ms
->fdt
);
1532 switch (vms
->iommu
) {
1533 case VIRT_IOMMU_SMMUV3
:
1534 create_smmu(vms
, vms
->bus
);
1535 qemu_fdt_setprop_cells(ms
->fdt
, nodename
, "iommu-map",
1536 0x0, vms
->iommu_phandle
, 0x0, 0x10000);
1539 g_assert_not_reached();
1544 static void create_platform_bus(VirtMachineState
*vms
)
1549 MemoryRegion
*sysmem
= get_system_memory();
1551 dev
= qdev_new(TYPE_PLATFORM_BUS_DEVICE
);
1552 dev
->id
= g_strdup(TYPE_PLATFORM_BUS_DEVICE
);
1553 qdev_prop_set_uint32(dev
, "num_irqs", PLATFORM_BUS_NUM_IRQS
);
1554 qdev_prop_set_uint32(dev
, "mmio_size", vms
->memmap
[VIRT_PLATFORM_BUS
].size
);
1555 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
1556 vms
->platform_bus_dev
= dev
;
1558 s
= SYS_BUS_DEVICE(dev
);
1559 for (i
= 0; i
< PLATFORM_BUS_NUM_IRQS
; i
++) {
1560 int irq
= vms
->irqmap
[VIRT_PLATFORM_BUS
] + i
;
1561 sysbus_connect_irq(s
, i
, qdev_get_gpio_in(vms
->gic
, irq
));
1564 memory_region_add_subregion(sysmem
,
1565 vms
->memmap
[VIRT_PLATFORM_BUS
].base
,
1566 sysbus_mmio_get_region(s
, 0));
1569 static void create_tag_ram(MemoryRegion
*tag_sysmem
,
1570 hwaddr base
, hwaddr size
,
1573 MemoryRegion
*tagram
= g_new(MemoryRegion
, 1);
1575 memory_region_init_ram(tagram
, NULL
, name
, size
/ 32, &error_fatal
);
1576 memory_region_add_subregion(tag_sysmem
, base
/ 32, tagram
);
1579 static void create_secure_ram(VirtMachineState
*vms
,
1580 MemoryRegion
*secure_sysmem
,
1581 MemoryRegion
*secure_tag_sysmem
)
1583 MemoryRegion
*secram
= g_new(MemoryRegion
, 1);
1585 hwaddr base
= vms
->memmap
[VIRT_SECURE_MEM
].base
;
1586 hwaddr size
= vms
->memmap
[VIRT_SECURE_MEM
].size
;
1587 MachineState
*ms
= MACHINE(vms
);
1589 memory_region_init_ram(secram
, NULL
, "virt.secure-ram", size
,
1591 memory_region_add_subregion(secure_sysmem
, base
, secram
);
1593 nodename
= g_strdup_printf("/secram@%" PRIx64
, base
);
1594 qemu_fdt_add_subnode(ms
->fdt
, nodename
);
1595 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "device_type", "memory");
1596 qemu_fdt_setprop_sized_cells(ms
->fdt
, nodename
, "reg", 2, base
, 2, size
);
1597 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "status", "disabled");
1598 qemu_fdt_setprop_string(ms
->fdt
, nodename
, "secure-status", "okay");
1600 if (secure_tag_sysmem
) {
1601 create_tag_ram(secure_tag_sysmem
, base
, size
, "mach-virt.secure-tag");
1607 static void *machvirt_dtb(const struct arm_boot_info
*binfo
, int *fdt_size
)
1609 const VirtMachineState
*board
= container_of(binfo
, VirtMachineState
,
1611 MachineState
*ms
= MACHINE(board
);
1614 *fdt_size
= board
->fdt_size
;
1618 static void virt_build_smbios(VirtMachineState
*vms
)
1620 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
1621 MachineState
*ms
= MACHINE(vms
);
1622 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
1623 uint8_t *smbios_tables
, *smbios_anchor
;
1624 size_t smbios_tables_len
, smbios_anchor_len
;
1625 struct smbios_phys_mem_area mem_array
;
1626 const char *product
= "QEMU Virtual Machine";
1628 if (kvm_enabled()) {
1629 product
= "KVM Virtual Machine";
1632 smbios_set_defaults("QEMU", product
,
1633 vmc
->smbios_old_sys_ver
? "1.0" : mc
->name
, false,
1634 true, SMBIOS_ENTRY_POINT_TYPE_64
);
1636 /* build the array of physical mem area from base_memmap */
1637 mem_array
.address
= vms
->memmap
[VIRT_MEM
].base
;
1638 mem_array
.length
= ms
->ram_size
;
1640 smbios_get_tables(ms
, &mem_array
, 1,
1641 &smbios_tables
, &smbios_tables_len
,
1642 &smbios_anchor
, &smbios_anchor_len
,
1645 if (smbios_anchor
) {
1646 fw_cfg_add_file(vms
->fw_cfg
, "etc/smbios/smbios-tables",
1647 smbios_tables
, smbios_tables_len
);
1648 fw_cfg_add_file(vms
->fw_cfg
, "etc/smbios/smbios-anchor",
1649 smbios_anchor
, smbios_anchor_len
);
1654 void virt_machine_done(Notifier
*notifier
, void *data
)
1656 VirtMachineState
*vms
= container_of(notifier
, VirtMachineState
,
1658 MachineState
*ms
= MACHINE(vms
);
1659 ARMCPU
*cpu
= ARM_CPU(first_cpu
);
1660 struct arm_boot_info
*info
= &vms
->bootinfo
;
1661 AddressSpace
*as
= arm_boot_address_space(cpu
, info
);
1664 * If the user provided a dtb, we assume the dynamic sysbus nodes
1665 * already are integrated there. This corresponds to a use case where
1666 * the dynamic sysbus nodes are complex and their generation is not yet
1667 * supported. In that case the user can take charge of the guest dt
1668 * while qemu takes charge of the qom stuff.
1670 if (info
->dtb_filename
== NULL
) {
1671 platform_bus_add_all_fdt_nodes(ms
->fdt
, "/intc",
1672 vms
->memmap
[VIRT_PLATFORM_BUS
].base
,
1673 vms
->memmap
[VIRT_PLATFORM_BUS
].size
,
1674 vms
->irqmap
[VIRT_PLATFORM_BUS
]);
1676 if (arm_load_dtb(info
->dtb_start
, info
, info
->dtb_limit
, as
, ms
) < 0) {
1680 fw_cfg_add_extra_pci_roots(vms
->bus
, vms
->fw_cfg
);
1682 virt_acpi_setup(vms
);
1683 virt_build_smbios(vms
);
1686 static uint64_t virt_cpu_mp_affinity(VirtMachineState
*vms
, int idx
)
1688 uint8_t clustersz
= ARM_DEFAULT_CPUS_PER_CLUSTER
;
1689 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
1691 if (!vmc
->disallow_affinity_adjustment
) {
1692 /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the
1693 * GIC's target-list limitations. 32-bit KVM hosts currently
1694 * always create clusters of 4 CPUs, but that is expected to
1695 * change when they gain support for gicv3. When KVM is enabled
1696 * it will override the changes we make here, therefore our
1697 * purposes are to make TCG consistent (with 64-bit KVM hosts)
1698 * and to improve SGI efficiency.
1700 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
1701 clustersz
= GIC_TARGETLIST_BITS
;
1703 clustersz
= GICV3_TARGETLIST_BITS
;
1706 return arm_cpu_mp_affinity(idx
, clustersz
);
1709 static inline bool *virt_get_high_memmap_enabled(VirtMachineState
*vms
,
1712 bool *enabled_array
[] = {
1713 &vms
->highmem_redists
,
1718 assert(ARRAY_SIZE(extended_memmap
) - VIRT_LOWMEMMAP_LAST
==
1719 ARRAY_SIZE(enabled_array
));
1720 assert(index
- VIRT_LOWMEMMAP_LAST
< ARRAY_SIZE(enabled_array
));
1722 return enabled_array
[index
- VIRT_LOWMEMMAP_LAST
];
1725 static void virt_set_high_memmap(VirtMachineState
*vms
,
1726 hwaddr base
, int pa_bits
)
1728 hwaddr region_base
, region_size
;
1729 bool *region_enabled
, fits
;
1732 for (i
= VIRT_LOWMEMMAP_LAST
; i
< ARRAY_SIZE(extended_memmap
); i
++) {
1733 region_enabled
= virt_get_high_memmap_enabled(vms
, i
);
1734 region_base
= ROUND_UP(base
, extended_memmap
[i
].size
);
1735 region_size
= extended_memmap
[i
].size
;
1737 vms
->memmap
[i
].base
= region_base
;
1738 vms
->memmap
[i
].size
= region_size
;
1741 * Check each device to see if it fits in the PA space,
1742 * moving highest_gpa as we go. For compatibility, move
1743 * highest_gpa for disabled fitting devices as well, if
1744 * the compact layout has been disabled.
1746 * For each device that doesn't fit, disable it.
1748 fits
= (region_base
+ region_size
) <= BIT_ULL(pa_bits
);
1749 *region_enabled
&= fits
;
1750 if (vms
->highmem_compact
&& !*region_enabled
) {
1754 base
= region_base
+ region_size
;
1756 vms
->highest_gpa
= base
- 1;
1761 static void virt_set_memmap(VirtMachineState
*vms
, int pa_bits
)
1763 MachineState
*ms
= MACHINE(vms
);
1764 hwaddr base
, device_memory_base
, device_memory_size
, memtop
;
1767 vms
->memmap
= extended_memmap
;
1769 for (i
= 0; i
< ARRAY_SIZE(base_memmap
); i
++) {
1770 vms
->memmap
[i
] = base_memmap
[i
];
1773 if (ms
->ram_slots
> ACPI_MAX_RAM_SLOTS
) {
1774 error_report("unsupported number of memory slots: %"PRIu64
,
1780 * !highmem is exactly the same as limiting the PA space to 32bit,
1781 * irrespective of the underlying capabilities of the HW.
1783 if (!vms
->highmem
) {
1788 * We compute the base of the high IO region depending on the
1789 * amount of initial and device memory. The device memory start/size
1790 * is aligned on 1GiB. We never put the high IO region below 256GiB
1791 * so that if maxram_size is < 255GiB we keep the legacy memory map.
1792 * The device region size assumes 1GiB page max alignment per slot.
1794 device_memory_base
=
1795 ROUND_UP(vms
->memmap
[VIRT_MEM
].base
+ ms
->ram_size
, GiB
);
1796 device_memory_size
= ms
->maxram_size
- ms
->ram_size
+ ms
->ram_slots
* GiB
;
1798 /* Base address of the high IO region */
1799 memtop
= base
= device_memory_base
+ ROUND_UP(device_memory_size
, GiB
);
1800 if (memtop
> BIT_ULL(pa_bits
)) {
1801 error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n",
1802 pa_bits
, memtop
- BIT_ULL(pa_bits
));
1805 if (base
< device_memory_base
) {
1806 error_report("maxmem/slots too huge");
1809 if (base
< vms
->memmap
[VIRT_MEM
].base
+ LEGACY_RAMLIMIT_BYTES
) {
1810 base
= vms
->memmap
[VIRT_MEM
].base
+ LEGACY_RAMLIMIT_BYTES
;
1813 /* We know for sure that at least the memory fits in the PA space */
1814 vms
->highest_gpa
= memtop
- 1;
1816 virt_set_high_memmap(vms
, base
, pa_bits
);
1818 if (device_memory_size
> 0) {
1819 ms
->device_memory
= g_malloc0(sizeof(*ms
->device_memory
));
1820 ms
->device_memory
->base
= device_memory_base
;
1821 memory_region_init(&ms
->device_memory
->mr
, OBJECT(vms
),
1822 "device-memory", device_memory_size
);
1826 static VirtGICType
finalize_gic_version_do(const char *accel_name
,
1827 VirtGICType gic_version
,
1829 unsigned int max_cpus
)
1831 /* Convert host/max/nosel to GIC version number */
1832 switch (gic_version
) {
1833 case VIRT_GIC_VERSION_HOST
:
1834 if (!kvm_enabled()) {
1835 error_report("gic-version=host requires KVM");
1839 /* For KVM, gic-version=host means gic-version=max */
1840 return finalize_gic_version_do(accel_name
, VIRT_GIC_VERSION_MAX
,
1841 gics_supported
, max_cpus
);
1842 case VIRT_GIC_VERSION_MAX
:
1843 if (gics_supported
& VIRT_GIC_VERSION_4_MASK
) {
1844 gic_version
= VIRT_GIC_VERSION_4
;
1845 } else if (gics_supported
& VIRT_GIC_VERSION_3_MASK
) {
1846 gic_version
= VIRT_GIC_VERSION_3
;
1848 gic_version
= VIRT_GIC_VERSION_2
;
1851 case VIRT_GIC_VERSION_NOSEL
:
1852 if ((gics_supported
& VIRT_GIC_VERSION_2_MASK
) &&
1853 max_cpus
<= GIC_NCPU
) {
1854 gic_version
= VIRT_GIC_VERSION_2
;
1855 } else if (gics_supported
& VIRT_GIC_VERSION_3_MASK
) {
1857 * in case the host does not support v2 emulation or
1858 * the end-user requested more than 8 VCPUs we now default
1859 * to v3. In any case defaulting to v2 would be broken.
1861 gic_version
= VIRT_GIC_VERSION_3
;
1862 } else if (max_cpus
> GIC_NCPU
) {
1863 error_report("%s only supports GICv2 emulation but more than 8 "
1864 "vcpus are requested", accel_name
);
1868 case VIRT_GIC_VERSION_2
:
1869 case VIRT_GIC_VERSION_3
:
1870 case VIRT_GIC_VERSION_4
:
1874 /* Check chosen version is effectively supported */
1875 switch (gic_version
) {
1876 case VIRT_GIC_VERSION_2
:
1877 if (!(gics_supported
& VIRT_GIC_VERSION_2_MASK
)) {
1878 error_report("%s does not support GICv2 emulation", accel_name
);
1882 case VIRT_GIC_VERSION_3
:
1883 if (!(gics_supported
& VIRT_GIC_VERSION_3_MASK
)) {
1884 error_report("%s does not support GICv3 emulation", accel_name
);
1888 case VIRT_GIC_VERSION_4
:
1889 if (!(gics_supported
& VIRT_GIC_VERSION_4_MASK
)) {
1890 error_report("%s does not support GICv4 emulation, is virtualization=on?",
1896 error_report("logic error in finalize_gic_version");
1905 * finalize_gic_version - Determines the final gic_version
1906 * according to the gic-version property
1908 * Default GIC type is v2
1910 static void finalize_gic_version(VirtMachineState
*vms
)
1912 const char *accel_name
= current_accel_name();
1913 unsigned int max_cpus
= MACHINE(vms
)->smp
.max_cpus
;
1914 int gics_supported
= 0;
1916 /* Determine which GIC versions the current environment supports */
1917 if (kvm_enabled() && kvm_irqchip_in_kernel()) {
1918 int probe_bitmap
= kvm_arm_vgic_probe();
1920 if (!probe_bitmap
) {
1921 error_report("Unable to determine GIC version supported by host");
1925 if (probe_bitmap
& KVM_ARM_VGIC_V2
) {
1926 gics_supported
|= VIRT_GIC_VERSION_2_MASK
;
1928 if (probe_bitmap
& KVM_ARM_VGIC_V3
) {
1929 gics_supported
|= VIRT_GIC_VERSION_3_MASK
;
1931 } else if (kvm_enabled() && !kvm_irqchip_in_kernel()) {
1932 /* KVM w/o kernel irqchip can only deal with GICv2 */
1933 gics_supported
|= VIRT_GIC_VERSION_2_MASK
;
1934 accel_name
= "KVM with kernel-irqchip=off";
1935 } else if (tcg_enabled() || hvf_enabled() || qtest_enabled()) {
1936 gics_supported
|= VIRT_GIC_VERSION_2_MASK
;
1937 if (module_object_class_by_name("arm-gicv3")) {
1938 gics_supported
|= VIRT_GIC_VERSION_3_MASK
;
1940 /* GICv4 only makes sense if CPU has EL2 */
1941 gics_supported
|= VIRT_GIC_VERSION_4_MASK
;
1945 error_report("Unsupported accelerator, can not determine GIC support");
1950 * Then convert helpers like host/max to concrete GIC versions and ensure
1951 * the desired version is supported
1953 vms
->gic_version
= finalize_gic_version_do(accel_name
, vms
->gic_version
,
1954 gics_supported
, max_cpus
);
1958 * virt_cpu_post_init() must be called after the CPUs have
1959 * been realized and the GIC has been created.
1961 static void virt_cpu_post_init(VirtMachineState
*vms
, MemoryRegion
*sysmem
)
1963 int max_cpus
= MACHINE(vms
)->smp
.max_cpus
;
1964 bool aarch64
, pmu
, steal_time
;
1967 aarch64
= object_property_get_bool(OBJECT(first_cpu
), "aarch64", NULL
);
1968 pmu
= object_property_get_bool(OBJECT(first_cpu
), "pmu", NULL
);
1969 steal_time
= object_property_get_bool(OBJECT(first_cpu
),
1970 "kvm-steal-time", NULL
);
1972 if (kvm_enabled()) {
1973 hwaddr pvtime_reg_base
= vms
->memmap
[VIRT_PVTIME
].base
;
1974 hwaddr pvtime_reg_size
= vms
->memmap
[VIRT_PVTIME
].size
;
1977 MemoryRegion
*pvtime
= g_new(MemoryRegion
, 1);
1978 hwaddr pvtime_size
= max_cpus
* PVTIME_SIZE_PER_CPU
;
1980 /* The memory region size must be a multiple of host page size. */
1981 pvtime_size
= REAL_HOST_PAGE_ALIGN(pvtime_size
);
1983 if (pvtime_size
> pvtime_reg_size
) {
1984 error_report("pvtime requires a %" HWADDR_PRId
1985 " byte memory region for %d CPUs,"
1986 " but only %" HWADDR_PRId
" has been reserved",
1987 pvtime_size
, max_cpus
, pvtime_reg_size
);
1991 memory_region_init_ram(pvtime
, NULL
, "pvtime", pvtime_size
, NULL
);
1992 memory_region_add_subregion(sysmem
, pvtime_reg_base
, pvtime
);
1997 assert(arm_feature(&ARM_CPU(cpu
)->env
, ARM_FEATURE_PMU
));
1998 if (kvm_irqchip_in_kernel()) {
1999 kvm_arm_pmu_set_irq(cpu
, PPI(VIRTUAL_PMU_IRQ
));
2001 kvm_arm_pmu_init(cpu
);
2004 kvm_arm_pvtime_init(cpu
, pvtime_reg_base
+
2005 cpu
->cpu_index
* PVTIME_SIZE_PER_CPU
);
2009 if (aarch64
&& vms
->highmem
) {
2010 int requested_pa_size
= 64 - clz64(vms
->highest_gpa
);
2011 int pamax
= arm_pamax(ARM_CPU(first_cpu
));
2013 if (pamax
< requested_pa_size
) {
2014 error_report("VCPU supports less PA bits (%d) than "
2015 "requested by the memory map (%d)",
2016 pamax
, requested_pa_size
);
2023 static void machvirt_init(MachineState
*machine
)
2025 VirtMachineState
*vms
= VIRT_MACHINE(machine
);
2026 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(machine
);
2027 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2028 const CPUArchIdList
*possible_cpus
;
2029 MemoryRegion
*sysmem
= get_system_memory();
2030 MemoryRegion
*secure_sysmem
= NULL
;
2031 MemoryRegion
*tag_sysmem
= NULL
;
2032 MemoryRegion
*secure_tag_sysmem
= NULL
;
2033 int n
, virt_max_cpus
;
2034 bool firmware_loaded
;
2035 bool aarch64
= true;
2036 bool has_ged
= !vmc
->no_ged
;
2037 unsigned int smp_cpus
= machine
->smp
.cpus
;
2038 unsigned int max_cpus
= machine
->smp
.max_cpus
;
2040 if (!cpu_type_valid(machine
->cpu_type
)) {
2041 error_report("mach-virt: CPU type %s not supported", machine
->cpu_type
);
2045 possible_cpus
= mc
->possible_cpu_arch_ids(machine
);
2048 * In accelerated mode, the memory map is computed earlier in kvm_type()
2049 * to create a VM with the right number of IPA bits.
2057 * Instantiate a temporary CPU object to find out about what
2058 * we are about to deal with. Once this is done, get rid of
2061 cpuobj
= object_new(possible_cpus
->cpus
[0].type
);
2062 armcpu
= ARM_CPU(cpuobj
);
2064 pa_bits
= arm_pamax(armcpu
);
2066 object_unref(cpuobj
);
2068 virt_set_memmap(vms
, pa_bits
);
2071 /* We can probe only here because during property set
2072 * KVM is not available yet
2074 finalize_gic_version(vms
);
2078 * The Secure view of the world is the same as the NonSecure,
2079 * but with a few extra devices. Create it as a container region
2080 * containing the system memory at low priority; any secure-only
2081 * devices go in at higher priority and take precedence.
2083 secure_sysmem
= g_new(MemoryRegion
, 1);
2084 memory_region_init(secure_sysmem
, OBJECT(machine
), "secure-memory",
2086 memory_region_add_subregion_overlap(secure_sysmem
, 0, sysmem
, -1);
2089 firmware_loaded
= virt_firmware_init(vms
, sysmem
,
2090 secure_sysmem
?: sysmem
);
2092 /* If we have an EL3 boot ROM then the assumption is that it will
2093 * implement PSCI itself, so disable QEMU's internal implementation
2094 * so it doesn't get in the way. Instead of starting secondary
2095 * CPUs in PSCI powerdown state we will start them all running and
2096 * let the boot ROM sort them out.
2097 * The usual case is that we do use QEMU's PSCI implementation;
2098 * if the guest has EL2 then we will use SMC as the conduit,
2099 * and otherwise we will use HVC (for backwards compatibility and
2100 * because if we're using KVM then we must use HVC).
2102 if (vms
->secure
&& firmware_loaded
) {
2103 vms
->psci_conduit
= QEMU_PSCI_CONDUIT_DISABLED
;
2104 } else if (vms
->virt
) {
2105 vms
->psci_conduit
= QEMU_PSCI_CONDUIT_SMC
;
2107 vms
->psci_conduit
= QEMU_PSCI_CONDUIT_HVC
;
2111 * The maximum number of CPUs depends on the GIC version, or on how
2112 * many redistributors we can fit into the memory map (which in turn
2113 * depends on whether this is a GICv3 or v4).
2115 if (vms
->gic_version
== VIRT_GIC_VERSION_2
) {
2116 virt_max_cpus
= GIC_NCPU
;
2118 virt_max_cpus
= virt_redist_capacity(vms
, VIRT_GIC_REDIST
);
2119 if (vms
->highmem_redists
) {
2120 virt_max_cpus
+= virt_redist_capacity(vms
, VIRT_HIGH_GIC_REDIST2
);
2124 if (max_cpus
> virt_max_cpus
) {
2125 error_report("Number of SMP CPUs requested (%d) exceeds max CPUs "
2126 "supported by machine 'mach-virt' (%d)",
2127 max_cpus
, virt_max_cpus
);
2128 if (vms
->gic_version
!= VIRT_GIC_VERSION_2
&& !vms
->highmem_redists
) {
2129 error_printf("Try 'highmem-redists=on' for more CPUs\n");
2135 if (vms
->secure
&& (kvm_enabled() || hvf_enabled())) {
2136 error_report("mach-virt: %s does not support providing "
2137 "Security extensions (TrustZone) to the guest CPU",
2138 current_accel_name());
2142 if (vms
->virt
&& (kvm_enabled() || hvf_enabled())) {
2143 error_report("mach-virt: %s does not support providing "
2144 "Virtualization extensions to the guest CPU",
2145 current_accel_name());
2149 if (vms
->mte
&& (kvm_enabled() || hvf_enabled())) {
2150 error_report("mach-virt: %s does not support providing "
2151 "MTE to the guest CPU",
2152 current_accel_name());
2158 assert(possible_cpus
->len
== max_cpus
);
2159 for (n
= 0; n
< possible_cpus
->len
; n
++) {
2163 if (n
>= smp_cpus
) {
2167 cpuobj
= object_new(possible_cpus
->cpus
[n
].type
);
2168 object_property_set_int(cpuobj
, "mp-affinity",
2169 possible_cpus
->cpus
[n
].arch_id
, NULL
);
2174 numa_cpu_pre_plug(&possible_cpus
->cpus
[cs
->cpu_index
], DEVICE(cpuobj
),
2177 aarch64
&= object_property_get_bool(cpuobj
, "aarch64", NULL
);
2180 object_property_set_bool(cpuobj
, "has_el3", false, NULL
);
2183 if (!vms
->virt
&& object_property_find(cpuobj
, "has_el2")) {
2184 object_property_set_bool(cpuobj
, "has_el2", false, NULL
);
2187 if (vmc
->kvm_no_adjvtime
&&
2188 object_property_find(cpuobj
, "kvm-no-adjvtime")) {
2189 object_property_set_bool(cpuobj
, "kvm-no-adjvtime", true, NULL
);
2192 if (vmc
->no_kvm_steal_time
&&
2193 object_property_find(cpuobj
, "kvm-steal-time")) {
2194 object_property_set_bool(cpuobj
, "kvm-steal-time", false, NULL
);
2197 if (vmc
->no_pmu
&& object_property_find(cpuobj
, "pmu")) {
2198 object_property_set_bool(cpuobj
, "pmu", false, NULL
);
2201 if (vmc
->no_tcg_lpa2
&& object_property_find(cpuobj
, "lpa2")) {
2202 object_property_set_bool(cpuobj
, "lpa2", false, NULL
);
2205 if (object_property_find(cpuobj
, "reset-cbar")) {
2206 object_property_set_int(cpuobj
, "reset-cbar",
2207 vms
->memmap
[VIRT_CPUPERIPHS
].base
,
2211 object_property_set_link(cpuobj
, "memory", OBJECT(sysmem
),
2214 object_property_set_link(cpuobj
, "secure-memory",
2215 OBJECT(secure_sysmem
), &error_abort
);
2219 /* Create the memory region only once, but link to all cpus. */
2222 * The property exists only if MemTag is supported.
2223 * If it is, we must allocate the ram to back that up.
2225 if (!object_property_find(cpuobj
, "tag-memory")) {
2226 error_report("MTE requested, but not supported "
2227 "by the guest CPU");
2231 tag_sysmem
= g_new(MemoryRegion
, 1);
2232 memory_region_init(tag_sysmem
, OBJECT(machine
),
2233 "tag-memory", UINT64_MAX
/ 32);
2236 secure_tag_sysmem
= g_new(MemoryRegion
, 1);
2237 memory_region_init(secure_tag_sysmem
, OBJECT(machine
),
2238 "secure-tag-memory", UINT64_MAX
/ 32);
2240 /* As with ram, secure-tag takes precedence over tag. */
2241 memory_region_add_subregion_overlap(secure_tag_sysmem
, 0,
2246 object_property_set_link(cpuobj
, "tag-memory", OBJECT(tag_sysmem
),
2249 object_property_set_link(cpuobj
, "secure-tag-memory",
2250 OBJECT(secure_tag_sysmem
),
2255 qdev_realize(DEVICE(cpuobj
), NULL
, &error_fatal
);
2256 object_unref(cpuobj
);
2258 fdt_add_timer_nodes(vms
);
2259 fdt_add_cpu_nodes(vms
);
2261 memory_region_add_subregion(sysmem
, vms
->memmap
[VIRT_MEM
].base
,
2263 if (machine
->device_memory
) {
2264 memory_region_add_subregion(sysmem
, machine
->device_memory
->base
,
2265 &machine
->device_memory
->mr
);
2268 virt_flash_fdt(vms
, sysmem
, secure_sysmem
?: sysmem
);
2270 create_gic(vms
, sysmem
);
2272 virt_cpu_post_init(vms
, sysmem
);
2274 fdt_add_pmu_nodes(vms
);
2276 create_uart(vms
, VIRT_UART
, sysmem
, serial_hd(0));
2279 create_secure_ram(vms
, secure_sysmem
, secure_tag_sysmem
);
2280 create_uart(vms
, VIRT_SECURE_UART
, secure_sysmem
, serial_hd(1));
2284 create_tag_ram(tag_sysmem
, vms
->memmap
[VIRT_MEM
].base
,
2285 machine
->ram_size
, "mach-virt.tag");
2288 vms
->highmem_ecam
&= (!firmware_loaded
|| aarch64
);
2294 if (has_ged
&& aarch64
&& firmware_loaded
&& virt_is_acpi_enabled(vms
)) {
2295 vms
->acpi_dev
= create_acpi_ged(vms
);
2297 create_gpio_devices(vms
, VIRT_GPIO
, sysmem
);
2300 if (vms
->secure
&& !vmc
->no_secure_gpio
) {
2301 create_gpio_devices(vms
, VIRT_SECURE_GPIO
, secure_sysmem
);
2304 /* connect powerdown request */
2305 vms
->powerdown_notifier
.notify
= virt_powerdown_req
;
2306 qemu_register_powerdown_notifier(&vms
->powerdown_notifier
);
2308 /* Create mmio transports, so the user can create virtio backends
2309 * (which will be automatically plugged in to the transports). If
2310 * no backend is created the transport will just sit harmlessly idle.
2312 create_virtio_devices(vms
);
2314 vms
->fw_cfg
= create_fw_cfg(vms
, &address_space_memory
);
2315 rom_set_fw(vms
->fw_cfg
);
2317 create_platform_bus(vms
);
2319 if (machine
->nvdimms_state
->is_enabled
) {
2320 const struct AcpiGenericAddress arm_virt_nvdimm_acpi_dsmio
= {
2321 .space_id
= AML_AS_SYSTEM_MEMORY
,
2322 .address
= vms
->memmap
[VIRT_NVDIMM_ACPI
].base
,
2323 .bit_width
= NVDIMM_ACPI_IO_LEN
<< 3
2326 nvdimm_init_acpi_state(machine
->nvdimms_state
, sysmem
,
2327 arm_virt_nvdimm_acpi_dsmio
,
2328 vms
->fw_cfg
, OBJECT(vms
));
2331 vms
->bootinfo
.ram_size
= machine
->ram_size
;
2332 vms
->bootinfo
.board_id
= -1;
2333 vms
->bootinfo
.loader_start
= vms
->memmap
[VIRT_MEM
].base
;
2334 vms
->bootinfo
.get_dtb
= machvirt_dtb
;
2335 vms
->bootinfo
.skip_dtb_autoload
= true;
2336 vms
->bootinfo
.firmware_loaded
= firmware_loaded
;
2337 vms
->bootinfo
.psci_conduit
= vms
->psci_conduit
;
2338 arm_load_kernel(ARM_CPU(first_cpu
), machine
, &vms
->bootinfo
);
2340 vms
->machine_done
.notify
= virt_machine_done
;
2341 qemu_add_machine_init_done_notifier(&vms
->machine_done
);
2344 static bool virt_get_secure(Object
*obj
, Error
**errp
)
2346 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2351 static void virt_set_secure(Object
*obj
, bool value
, Error
**errp
)
2353 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2355 vms
->secure
= value
;
2358 static bool virt_get_virt(Object
*obj
, Error
**errp
)
2360 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2365 static void virt_set_virt(Object
*obj
, bool value
, Error
**errp
)
2367 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2372 static bool virt_get_highmem(Object
*obj
, Error
**errp
)
2374 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2376 return vms
->highmem
;
2379 static void virt_set_highmem(Object
*obj
, bool value
, Error
**errp
)
2381 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2383 vms
->highmem
= value
;
2386 static bool virt_get_compact_highmem(Object
*obj
, Error
**errp
)
2388 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2390 return vms
->highmem_compact
;
2393 static void virt_set_compact_highmem(Object
*obj
, bool value
, Error
**errp
)
2395 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2397 vms
->highmem_compact
= value
;
2400 static bool virt_get_highmem_redists(Object
*obj
, Error
**errp
)
2402 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2404 return vms
->highmem_redists
;
2407 static void virt_set_highmem_redists(Object
*obj
, bool value
, Error
**errp
)
2409 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2411 vms
->highmem_redists
= value
;
2414 static bool virt_get_highmem_ecam(Object
*obj
, Error
**errp
)
2416 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2418 return vms
->highmem_ecam
;
2421 static void virt_set_highmem_ecam(Object
*obj
, bool value
, Error
**errp
)
2423 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2425 vms
->highmem_ecam
= value
;
2428 static bool virt_get_highmem_mmio(Object
*obj
, Error
**errp
)
2430 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2432 return vms
->highmem_mmio
;
2435 static void virt_set_highmem_mmio(Object
*obj
, bool value
, Error
**errp
)
2437 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2439 vms
->highmem_mmio
= value
;
2443 static bool virt_get_its(Object
*obj
, Error
**errp
)
2445 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2450 static void virt_set_its(Object
*obj
, bool value
, Error
**errp
)
2452 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2457 static bool virt_get_dtb_randomness(Object
*obj
, Error
**errp
)
2459 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2461 return vms
->dtb_randomness
;
2464 static void virt_set_dtb_randomness(Object
*obj
, bool value
, Error
**errp
)
2466 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2468 vms
->dtb_randomness
= value
;
2471 static char *virt_get_oem_id(Object
*obj
, Error
**errp
)
2473 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2475 return g_strdup(vms
->oem_id
);
2478 static void virt_set_oem_id(Object
*obj
, const char *value
, Error
**errp
)
2480 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2481 size_t len
= strlen(value
);
2485 "User specified oem-id value is bigger than 6 bytes in size");
2489 strncpy(vms
->oem_id
, value
, 6);
2492 static char *virt_get_oem_table_id(Object
*obj
, Error
**errp
)
2494 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2496 return g_strdup(vms
->oem_table_id
);
2499 static void virt_set_oem_table_id(Object
*obj
, const char *value
,
2502 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2503 size_t len
= strlen(value
);
2507 "User specified oem-table-id value is bigger than 8 bytes in size");
2510 strncpy(vms
->oem_table_id
, value
, 8);
2514 bool virt_is_acpi_enabled(VirtMachineState
*vms
)
2516 if (vms
->acpi
== ON_OFF_AUTO_OFF
) {
2522 static void virt_get_acpi(Object
*obj
, Visitor
*v
, const char *name
,
2523 void *opaque
, Error
**errp
)
2525 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2526 OnOffAuto acpi
= vms
->acpi
;
2528 visit_type_OnOffAuto(v
, name
, &acpi
, errp
);
2531 static void virt_set_acpi(Object
*obj
, Visitor
*v
, const char *name
,
2532 void *opaque
, Error
**errp
)
2534 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2536 visit_type_OnOffAuto(v
, name
, &vms
->acpi
, errp
);
2539 static bool virt_get_ras(Object
*obj
, Error
**errp
)
2541 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2546 static void virt_set_ras(Object
*obj
, bool value
, Error
**errp
)
2548 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2553 static bool virt_get_mte(Object
*obj
, Error
**errp
)
2555 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2560 static void virt_set_mte(Object
*obj
, bool value
, Error
**errp
)
2562 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2567 static char *virt_get_gic_version(Object
*obj
, Error
**errp
)
2569 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2572 switch (vms
->gic_version
) {
2573 case VIRT_GIC_VERSION_4
:
2576 case VIRT_GIC_VERSION_3
:
2583 return g_strdup(val
);
2586 static void virt_set_gic_version(Object
*obj
, const char *value
, Error
**errp
)
2588 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2590 if (!strcmp(value
, "4")) {
2591 vms
->gic_version
= VIRT_GIC_VERSION_4
;
2592 } else if (!strcmp(value
, "3")) {
2593 vms
->gic_version
= VIRT_GIC_VERSION_3
;
2594 } else if (!strcmp(value
, "2")) {
2595 vms
->gic_version
= VIRT_GIC_VERSION_2
;
2596 } else if (!strcmp(value
, "host")) {
2597 vms
->gic_version
= VIRT_GIC_VERSION_HOST
; /* Will probe later */
2598 } else if (!strcmp(value
, "max")) {
2599 vms
->gic_version
= VIRT_GIC_VERSION_MAX
; /* Will probe later */
2601 error_setg(errp
, "Invalid gic-version value");
2602 error_append_hint(errp
, "Valid values are 3, 2, host, max.\n");
2606 static char *virt_get_iommu(Object
*obj
, Error
**errp
)
2608 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2610 switch (vms
->iommu
) {
2611 case VIRT_IOMMU_NONE
:
2612 return g_strdup("none");
2613 case VIRT_IOMMU_SMMUV3
:
2614 return g_strdup("smmuv3");
2616 g_assert_not_reached();
2620 static void virt_set_iommu(Object
*obj
, const char *value
, Error
**errp
)
2622 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2624 if (!strcmp(value
, "smmuv3")) {
2625 vms
->iommu
= VIRT_IOMMU_SMMUV3
;
2626 } else if (!strcmp(value
, "none")) {
2627 vms
->iommu
= VIRT_IOMMU_NONE
;
2629 error_setg(errp
, "Invalid iommu value");
2630 error_append_hint(errp
, "Valid values are none, smmuv3.\n");
2634 static bool virt_get_default_bus_bypass_iommu(Object
*obj
, Error
**errp
)
2636 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2638 return vms
->default_bus_bypass_iommu
;
2641 static void virt_set_default_bus_bypass_iommu(Object
*obj
, bool value
,
2644 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
2646 vms
->default_bus_bypass_iommu
= value
;
2649 static CpuInstanceProperties
2650 virt_cpu_index_to_props(MachineState
*ms
, unsigned cpu_index
)
2652 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2653 const CPUArchIdList
*possible_cpus
= mc
->possible_cpu_arch_ids(ms
);
2655 assert(cpu_index
< possible_cpus
->len
);
2656 return possible_cpus
->cpus
[cpu_index
].props
;
2659 static int64_t virt_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
2661 int64_t socket_id
= ms
->possible_cpus
->cpus
[idx
].props
.socket_id
;
2663 return socket_id
% ms
->numa_state
->num_nodes
;
2666 static const CPUArchIdList
*virt_possible_cpu_arch_ids(MachineState
*ms
)
2669 unsigned int max_cpus
= ms
->smp
.max_cpus
;
2670 VirtMachineState
*vms
= VIRT_MACHINE(ms
);
2671 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
2673 if (ms
->possible_cpus
) {
2674 assert(ms
->possible_cpus
->len
== max_cpus
);
2675 return ms
->possible_cpus
;
2678 ms
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
2679 sizeof(CPUArchId
) * max_cpus
);
2680 ms
->possible_cpus
->len
= max_cpus
;
2681 for (n
= 0; n
< ms
->possible_cpus
->len
; n
++) {
2682 ms
->possible_cpus
->cpus
[n
].type
= ms
->cpu_type
;
2683 ms
->possible_cpus
->cpus
[n
].arch_id
=
2684 virt_cpu_mp_affinity(vms
, n
);
2686 assert(!mc
->smp_props
.dies_supported
);
2687 ms
->possible_cpus
->cpus
[n
].props
.has_socket_id
= true;
2688 ms
->possible_cpus
->cpus
[n
].props
.socket_id
=
2689 n
/ (ms
->smp
.clusters
* ms
->smp
.cores
* ms
->smp
.threads
);
2690 ms
->possible_cpus
->cpus
[n
].props
.has_cluster_id
= true;
2691 ms
->possible_cpus
->cpus
[n
].props
.cluster_id
=
2692 (n
/ (ms
->smp
.cores
* ms
->smp
.threads
)) % ms
->smp
.clusters
;
2693 ms
->possible_cpus
->cpus
[n
].props
.has_core_id
= true;
2694 ms
->possible_cpus
->cpus
[n
].props
.core_id
=
2695 (n
/ ms
->smp
.threads
) % ms
->smp
.cores
;
2696 ms
->possible_cpus
->cpus
[n
].props
.has_thread_id
= true;
2697 ms
->possible_cpus
->cpus
[n
].props
.thread_id
=
2698 n
% ms
->smp
.threads
;
2700 return ms
->possible_cpus
;
2703 static void virt_memory_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
2706 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2707 const MachineState
*ms
= MACHINE(hotplug_dev
);
2708 const bool is_nvdimm
= object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
);
2710 if (!vms
->acpi_dev
) {
2712 "memory hotplug is not enabled: missing acpi-ged device");
2717 error_setg(errp
, "memory hotplug is not enabled: MTE is enabled");
2721 if (is_nvdimm
&& !ms
->nvdimms_state
->is_enabled
) {
2722 error_setg(errp
, "nvdimm is not enabled: add 'nvdimm=on' to '-M'");
2726 pc_dimm_pre_plug(PC_DIMM(dev
), MACHINE(hotplug_dev
), NULL
, errp
);
2729 static void virt_memory_plug(HotplugHandler
*hotplug_dev
,
2730 DeviceState
*dev
, Error
**errp
)
2732 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2733 MachineState
*ms
= MACHINE(hotplug_dev
);
2734 bool is_nvdimm
= object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
);
2736 pc_dimm_plug(PC_DIMM(dev
), MACHINE(vms
));
2739 nvdimm_plug(ms
->nvdimms_state
);
2742 hotplug_handler_plug(HOTPLUG_HANDLER(vms
->acpi_dev
),
2746 static void virt_virtio_md_pci_pre_plug(HotplugHandler
*hotplug_dev
,
2747 DeviceState
*dev
, Error
**errp
)
2749 HotplugHandler
*hotplug_dev2
= qdev_get_bus_hotplug_handler(dev
);
2750 Error
*local_err
= NULL
;
2752 if (!hotplug_dev2
&& dev
->hotplugged
) {
2754 * Without a bus hotplug handler, we cannot control the plug/unplug
2755 * order. We should never reach this point when hotplugging on ARM.
2756 * However, it's nice to add a safety net, similar to what we have
2759 error_setg(errp
, "hotplug of virtio based memory devices not supported"
2764 * First, see if we can plug this memory device at all. If that
2765 * succeeds, branch of to the actual hotplug handler.
2767 memory_device_pre_plug(MEMORY_DEVICE(dev
), MACHINE(hotplug_dev
), NULL
,
2769 if (!local_err
&& hotplug_dev2
) {
2770 hotplug_handler_pre_plug(hotplug_dev2
, dev
, &local_err
);
2772 error_propagate(errp
, local_err
);
2775 static void virt_virtio_md_pci_plug(HotplugHandler
*hotplug_dev
,
2776 DeviceState
*dev
, Error
**errp
)
2778 HotplugHandler
*hotplug_dev2
= qdev_get_bus_hotplug_handler(dev
);
2779 Error
*local_err
= NULL
;
2782 * Plug the memory device first and then branch off to the actual
2783 * hotplug handler. If that one fails, we can easily undo the memory
2786 memory_device_plug(MEMORY_DEVICE(dev
), MACHINE(hotplug_dev
));
2788 hotplug_handler_plug(hotplug_dev2
, dev
, &local_err
);
2790 memory_device_unplug(MEMORY_DEVICE(dev
), MACHINE(hotplug_dev
));
2793 error_propagate(errp
, local_err
);
2796 static void virt_virtio_md_pci_unplug_request(HotplugHandler
*hotplug_dev
,
2797 DeviceState
*dev
, Error
**errp
)
2799 /* We don't support hot unplug of virtio based memory devices */
2800 error_setg(errp
, "virtio based memory devices cannot be unplugged.");
2804 static void virt_machine_device_pre_plug_cb(HotplugHandler
*hotplug_dev
,
2805 DeviceState
*dev
, Error
**errp
)
2807 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2809 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2810 virt_memory_pre_plug(hotplug_dev
, dev
, errp
);
2811 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
)) {
2812 virt_virtio_md_pci_pre_plug(hotplug_dev
, dev
, errp
);
2813 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_IOMMU_PCI
)) {
2814 hwaddr db_start
= 0, db_end
= 0;
2815 char *resv_prop_str
;
2817 if (vms
->iommu
!= VIRT_IOMMU_NONE
) {
2818 error_setg(errp
, "virt machine does not support multiple IOMMUs");
2822 switch (vms
->msi_controller
) {
2823 case VIRT_MSI_CTRL_NONE
:
2825 case VIRT_MSI_CTRL_ITS
:
2826 /* GITS_TRANSLATER page */
2827 db_start
= base_memmap
[VIRT_GIC_ITS
].base
+ 0x10000;
2828 db_end
= base_memmap
[VIRT_GIC_ITS
].base
+
2829 base_memmap
[VIRT_GIC_ITS
].size
- 1;
2831 case VIRT_MSI_CTRL_GICV2M
:
2832 /* MSI_SETSPI_NS page */
2833 db_start
= base_memmap
[VIRT_GIC_V2M
].base
;
2834 db_end
= db_start
+ base_memmap
[VIRT_GIC_V2M
].size
- 1;
2837 resv_prop_str
= g_strdup_printf("0x%"PRIx64
":0x%"PRIx64
":%u",
2839 VIRTIO_IOMMU_RESV_MEM_T_MSI
);
2841 object_property_set_uint(OBJECT(dev
), "len-reserved-regions", 1, errp
);
2842 object_property_set_str(OBJECT(dev
), "reserved-regions[0]",
2843 resv_prop_str
, errp
);
2844 g_free(resv_prop_str
);
2848 static void virt_machine_device_plug_cb(HotplugHandler
*hotplug_dev
,
2849 DeviceState
*dev
, Error
**errp
)
2851 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2853 if (vms
->platform_bus_dev
) {
2854 MachineClass
*mc
= MACHINE_GET_CLASS(vms
);
2856 if (device_is_dynamic_sysbus(mc
, dev
)) {
2857 platform_bus_link_device(PLATFORM_BUS_DEVICE(vms
->platform_bus_dev
),
2858 SYS_BUS_DEVICE(dev
));
2861 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2862 virt_memory_plug(hotplug_dev
, dev
, errp
);
2865 if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
)) {
2866 virt_virtio_md_pci_plug(hotplug_dev
, dev
, errp
);
2869 if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_IOMMU_PCI
)) {
2870 PCIDevice
*pdev
= PCI_DEVICE(dev
);
2872 vms
->iommu
= VIRT_IOMMU_VIRTIO
;
2873 vms
->virtio_iommu_bdf
= pci_get_bdf(pdev
);
2874 create_virtio_iommu_dt_bindings(vms
);
2878 static void virt_dimm_unplug_request(HotplugHandler
*hotplug_dev
,
2879 DeviceState
*dev
, Error
**errp
)
2881 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2883 if (!vms
->acpi_dev
) {
2885 "memory hotplug is not enabled: missing acpi-ged device");
2889 if (object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
)) {
2890 error_setg(errp
, "nvdimm device hot unplug is not supported yet.");
2894 hotplug_handler_unplug_request(HOTPLUG_HANDLER(vms
->acpi_dev
), dev
,
2898 static void virt_dimm_unplug(HotplugHandler
*hotplug_dev
,
2899 DeviceState
*dev
, Error
**errp
)
2901 VirtMachineState
*vms
= VIRT_MACHINE(hotplug_dev
);
2902 Error
*local_err
= NULL
;
2904 hotplug_handler_unplug(HOTPLUG_HANDLER(vms
->acpi_dev
), dev
, &local_err
);
2909 pc_dimm_unplug(PC_DIMM(dev
), MACHINE(vms
));
2910 qdev_unrealize(dev
);
2913 error_propagate(errp
, local_err
);
2916 static void virt_machine_device_unplug_request_cb(HotplugHandler
*hotplug_dev
,
2917 DeviceState
*dev
, Error
**errp
)
2919 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2920 virt_dimm_unplug_request(hotplug_dev
, dev
, errp
);
2921 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
)) {
2922 virt_virtio_md_pci_unplug_request(hotplug_dev
, dev
, errp
);
2924 error_setg(errp
, "device unplug request for unsupported device"
2925 " type: %s", object_get_typename(OBJECT(dev
)));
2929 static void virt_machine_device_unplug_cb(HotplugHandler
*hotplug_dev
,
2930 DeviceState
*dev
, Error
**errp
)
2932 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
2933 virt_dimm_unplug(hotplug_dev
, dev
, errp
);
2935 error_setg(errp
, "virt: device unplug for unsupported device"
2936 " type: %s", object_get_typename(OBJECT(dev
)));
2940 static HotplugHandler
*virt_machine_get_hotplug_handler(MachineState
*machine
,
2943 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2945 if (device_is_dynamic_sysbus(mc
, dev
) ||
2946 object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
) ||
2947 object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_MEM_PCI
) ||
2948 object_dynamic_cast(OBJECT(dev
), TYPE_VIRTIO_IOMMU_PCI
)) {
2949 return HOTPLUG_HANDLER(machine
);
2955 * for arm64 kvm_type [7-0] encodes the requested number of bits
2956 * in the IPA address space
2958 static int virt_kvm_type(MachineState
*ms
, const char *type_str
)
2960 VirtMachineState
*vms
= VIRT_MACHINE(ms
);
2961 int max_vm_pa_size
, requested_pa_size
;
2964 max_vm_pa_size
= kvm_arm_get_max_vm_ipa_size(ms
, &fixed_ipa
);
2966 /* we freeze the memory map to compute the highest gpa */
2967 virt_set_memmap(vms
, max_vm_pa_size
);
2969 requested_pa_size
= 64 - clz64(vms
->highest_gpa
);
2972 * KVM requires the IPA size to be at least 32 bits.
2974 if (requested_pa_size
< 32) {
2975 requested_pa_size
= 32;
2978 if (requested_pa_size
> max_vm_pa_size
) {
2979 error_report("-m and ,maxmem option values "
2980 "require an IPA range (%d bits) larger than "
2981 "the one supported by the host (%d bits)",
2982 requested_pa_size
, max_vm_pa_size
);
2986 * We return the requested PA log size, unless KVM only supports
2987 * the implicit legacy 40b IPA setting, in which case the kvm_type
2990 return fixed_ipa
? 0 : requested_pa_size
;
2993 static void virt_machine_class_init(ObjectClass
*oc
, void *data
)
2995 MachineClass
*mc
= MACHINE_CLASS(oc
);
2996 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(oc
);
2998 mc
->init
= machvirt_init
;
2999 /* Start with max_cpus set to 512, which is the maximum supported by KVM.
3000 * The value may be reduced later when we have more information about the
3001 * configuration of the particular instance.
3004 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_VFIO_CALXEDA_XGMAC
);
3005 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_VFIO_AMD_XGBE
);
3006 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_RAMFB_DEVICE
);
3007 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_VFIO_PLATFORM
);
3009 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_TPM_TIS_SYSBUS
);
3011 mc
->block_default_type
= IF_VIRTIO
;
3013 mc
->pci_allow_0_address
= true;
3014 /* We know we will never create a pre-ARMv7 CPU which needs 1K pages */
3015 mc
->minimum_page_bits
= 12;
3016 mc
->possible_cpu_arch_ids
= virt_possible_cpu_arch_ids
;
3017 mc
->cpu_index_to_instance_props
= virt_cpu_index_to_props
;
3019 mc
->default_cpu_type
= ARM_CPU_TYPE_NAME("cortex-a15");
3021 mc
->default_cpu_type
= ARM_CPU_TYPE_NAME("max");
3023 mc
->get_default_cpu_node_id
= virt_get_default_cpu_node_id
;
3024 mc
->kvm_type
= virt_kvm_type
;
3025 assert(!mc
->get_hotplug_handler
);
3026 mc
->get_hotplug_handler
= virt_machine_get_hotplug_handler
;
3027 hc
->pre_plug
= virt_machine_device_pre_plug_cb
;
3028 hc
->plug
= virt_machine_device_plug_cb
;
3029 hc
->unplug_request
= virt_machine_device_unplug_request_cb
;
3030 hc
->unplug
= virt_machine_device_unplug_cb
;
3031 mc
->nvdimm_supported
= true;
3032 mc
->smp_props
.clusters_supported
= true;
3033 mc
->auto_enable_numa_with_memhp
= true;
3034 mc
->auto_enable_numa_with_memdev
= true;
3035 mc
->default_ram_id
= "mach-virt.ram";
3037 object_class_property_add(oc
, "acpi", "OnOffAuto",
3038 virt_get_acpi
, virt_set_acpi
,
3040 object_class_property_set_description(oc
, "acpi",
3042 object_class_property_add_bool(oc
, "secure", virt_get_secure
,
3044 object_class_property_set_description(oc
, "secure",
3045 "Set on/off to enable/disable the ARM "
3046 "Security Extensions (TrustZone)");
3048 object_class_property_add_bool(oc
, "virtualization", virt_get_virt
,
3050 object_class_property_set_description(oc
, "virtualization",
3051 "Set on/off to enable/disable emulating a "
3052 "guest CPU which implements the ARM "
3053 "Virtualization Extensions");
3055 object_class_property_add_bool(oc
, "highmem", virt_get_highmem
,
3057 object_class_property_set_description(oc
, "highmem",
3058 "Set on/off to enable/disable using "
3059 "physical address space above 32 bits");
3061 object_class_property_add_bool(oc
, "compact-highmem",
3062 virt_get_compact_highmem
,
3063 virt_set_compact_highmem
);
3064 object_class_property_set_description(oc
, "compact-highmem",
3065 "Set on/off to enable/disable compact "
3066 "layout for high memory regions");
3068 object_class_property_add_bool(oc
, "highmem-redists",
3069 virt_get_highmem_redists
,
3070 virt_set_highmem_redists
);
3071 object_class_property_set_description(oc
, "highmem-redists",
3072 "Set on/off to enable/disable high "
3073 "memory region for GICv3 or GICv4 "
3076 object_class_property_add_bool(oc
, "highmem-ecam",
3077 virt_get_highmem_ecam
,
3078 virt_set_highmem_ecam
);
3079 object_class_property_set_description(oc
, "highmem-ecam",
3080 "Set on/off to enable/disable high "
3081 "memory region for PCI ECAM");
3083 object_class_property_add_bool(oc
, "highmem-mmio",
3084 virt_get_highmem_mmio
,
3085 virt_set_highmem_mmio
);
3086 object_class_property_set_description(oc
, "highmem-mmio",
3087 "Set on/off to enable/disable high "
3088 "memory region for PCI MMIO");
3090 object_class_property_add_str(oc
, "gic-version", virt_get_gic_version
,
3091 virt_set_gic_version
);
3092 object_class_property_set_description(oc
, "gic-version",
3094 "Valid values are 2, 3, 4, host and max");
3096 object_class_property_add_str(oc
, "iommu", virt_get_iommu
, virt_set_iommu
);
3097 object_class_property_set_description(oc
, "iommu",
3098 "Set the IOMMU type. "
3099 "Valid values are none and smmuv3");
3101 object_class_property_add_bool(oc
, "default-bus-bypass-iommu",
3102 virt_get_default_bus_bypass_iommu
,
3103 virt_set_default_bus_bypass_iommu
);
3104 object_class_property_set_description(oc
, "default-bus-bypass-iommu",
3105 "Set on/off to enable/disable "
3106 "bypass_iommu for default root bus");
3108 object_class_property_add_bool(oc
, "ras", virt_get_ras
,
3110 object_class_property_set_description(oc
, "ras",
3111 "Set on/off to enable/disable reporting host memory errors "
3112 "to a KVM guest using ACPI and guest external abort exceptions");
3114 object_class_property_add_bool(oc
, "mte", virt_get_mte
, virt_set_mte
);
3115 object_class_property_set_description(oc
, "mte",
3116 "Set on/off to enable/disable emulating a "
3117 "guest CPU which implements the ARM "
3118 "Memory Tagging Extension");
3120 object_class_property_add_bool(oc
, "its", virt_get_its
,
3122 object_class_property_set_description(oc
, "its",
3123 "Set on/off to enable/disable "
3124 "ITS instantiation");
3126 object_class_property_add_bool(oc
, "dtb-randomness",
3127 virt_get_dtb_randomness
,
3128 virt_set_dtb_randomness
);
3129 object_class_property_set_description(oc
, "dtb-randomness",
3130 "Set off to disable passing random or "
3131 "non-deterministic dtb nodes to guest");
3133 object_class_property_add_bool(oc
, "dtb-kaslr-seed",
3134 virt_get_dtb_randomness
,
3135 virt_set_dtb_randomness
);
3136 object_class_property_set_description(oc
, "dtb-kaslr-seed",
3137 "Deprecated synonym of dtb-randomness");
3139 object_class_property_add_str(oc
, "x-oem-id",
3142 object_class_property_set_description(oc
, "x-oem-id",
3143 "Override the default value of field OEMID "
3144 "in ACPI table header."
3145 "The string may be up to 6 bytes in size");
3148 object_class_property_add_str(oc
, "x-oem-table-id",
3149 virt_get_oem_table_id
,
3150 virt_set_oem_table_id
);
3151 object_class_property_set_description(oc
, "x-oem-table-id",
3152 "Override the default value of field OEM Table ID "
3153 "in ACPI table header."
3154 "The string may be up to 8 bytes in size");
3158 static void virt_instance_init(Object
*obj
)
3160 VirtMachineState
*vms
= VIRT_MACHINE(obj
);
3161 VirtMachineClass
*vmc
= VIRT_MACHINE_GET_CLASS(vms
);
3163 /* EL3 is disabled by default on virt: this makes us consistent
3164 * between KVM and TCG for this board, and it also allows us to
3165 * boot UEFI blobs which assume no TrustZone support.
3167 vms
->secure
= false;
3169 /* EL2 is also disabled by default, for similar reasons */
3172 /* High memory is enabled by default */
3173 vms
->highmem
= true;
3174 vms
->highmem_compact
= !vmc
->no_highmem_compact
;
3175 vms
->gic_version
= VIRT_GIC_VERSION_NOSEL
;
3177 vms
->highmem_ecam
= !vmc
->no_highmem_ecam
;
3178 vms
->highmem_mmio
= true;
3179 vms
->highmem_redists
= true;
3184 /* Default allows ITS instantiation */
3187 if (vmc
->no_tcg_its
) {
3188 vms
->tcg_its
= false;
3190 vms
->tcg_its
= true;
3194 /* Default disallows iommu instantiation */
3195 vms
->iommu
= VIRT_IOMMU_NONE
;
3197 /* The default root bus is attached to iommu by default */
3198 vms
->default_bus_bypass_iommu
= false;
3200 /* Default disallows RAS instantiation */
3203 /* MTE is disabled by default. */
3206 /* Supply kaslr-seed and rng-seed by default */
3207 vms
->dtb_randomness
= true;
3209 vms
->irqmap
= a15irqmap
;
3211 virt_flash_create(vms
);
3213 vms
->oem_id
= g_strndup(ACPI_BUILD_APPNAME6
, 6);
3214 vms
->oem_table_id
= g_strndup(ACPI_BUILD_APPNAME8
, 8);
3217 static const TypeInfo virt_machine_info
= {
3218 .name
= TYPE_VIRT_MACHINE
,
3219 .parent
= TYPE_MACHINE
,
3221 .instance_size
= sizeof(VirtMachineState
),
3222 .class_size
= sizeof(VirtMachineClass
),
3223 .class_init
= virt_machine_class_init
,
3224 .instance_init
= virt_instance_init
,
3225 .interfaces
= (InterfaceInfo
[]) {
3226 { TYPE_HOTPLUG_HANDLER
},
3231 static void machvirt_machine_init(void)
3233 type_register_static(&virt_machine_info
);
3235 type_init(machvirt_machine_init
);
3237 static void virt_machine_8_1_options(MachineClass
*mc
)
3240 DEFINE_VIRT_MACHINE_AS_LATEST(8, 1)
3242 static void virt_machine_8_0_options(MachineClass
*mc
)
3244 virt_machine_8_1_options(mc
);
3245 compat_props_add(mc
->compat_props
, hw_compat_8_0
, hw_compat_8_0_len
);
3247 DEFINE_VIRT_MACHINE(8, 0)
3249 static void virt_machine_7_2_options(MachineClass
*mc
)
3251 virt_machine_8_0_options(mc
);
3252 compat_props_add(mc
->compat_props
, hw_compat_7_2
, hw_compat_7_2_len
);
3254 DEFINE_VIRT_MACHINE(7, 2)
3256 static void virt_machine_7_1_options(MachineClass
*mc
)
3258 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3260 virt_machine_7_2_options(mc
);
3261 compat_props_add(mc
->compat_props
, hw_compat_7_1
, hw_compat_7_1_len
);
3262 /* Compact layout for high memory regions was introduced with 7.2 */
3263 vmc
->no_highmem_compact
= true;
3265 DEFINE_VIRT_MACHINE(7, 1)
3267 static void virt_machine_7_0_options(MachineClass
*mc
)
3269 virt_machine_7_1_options(mc
);
3270 compat_props_add(mc
->compat_props
, hw_compat_7_0
, hw_compat_7_0_len
);
3272 DEFINE_VIRT_MACHINE(7, 0)
3274 static void virt_machine_6_2_options(MachineClass
*mc
)
3276 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3278 virt_machine_7_0_options(mc
);
3279 compat_props_add(mc
->compat_props
, hw_compat_6_2
, hw_compat_6_2_len
);
3280 vmc
->no_tcg_lpa2
= true;
3282 DEFINE_VIRT_MACHINE(6, 2)
3284 static void virt_machine_6_1_options(MachineClass
*mc
)
3286 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3288 virt_machine_6_2_options(mc
);
3289 compat_props_add(mc
->compat_props
, hw_compat_6_1
, hw_compat_6_1_len
);
3290 mc
->smp_props
.prefer_sockets
= true;
3291 vmc
->no_cpu_topology
= true;
3293 /* qemu ITS was introduced with 6.2 */
3294 vmc
->no_tcg_its
= true;
3296 DEFINE_VIRT_MACHINE(6, 1)
3298 static void virt_machine_6_0_options(MachineClass
*mc
)
3300 virt_machine_6_1_options(mc
);
3301 compat_props_add(mc
->compat_props
, hw_compat_6_0
, hw_compat_6_0_len
);
3303 DEFINE_VIRT_MACHINE(6, 0)
3305 static void virt_machine_5_2_options(MachineClass
*mc
)
3307 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3309 virt_machine_6_0_options(mc
);
3310 compat_props_add(mc
->compat_props
, hw_compat_5_2
, hw_compat_5_2_len
);
3311 vmc
->no_secure_gpio
= true;
3313 DEFINE_VIRT_MACHINE(5, 2)
3315 static void virt_machine_5_1_options(MachineClass
*mc
)
3317 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3319 virt_machine_5_2_options(mc
);
3320 compat_props_add(mc
->compat_props
, hw_compat_5_1
, hw_compat_5_1_len
);
3321 vmc
->no_kvm_steal_time
= true;
3323 DEFINE_VIRT_MACHINE(5, 1)
3325 static void virt_machine_5_0_options(MachineClass
*mc
)
3327 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3329 virt_machine_5_1_options(mc
);
3330 compat_props_add(mc
->compat_props
, hw_compat_5_0
, hw_compat_5_0_len
);
3331 mc
->numa_mem_supported
= true;
3332 vmc
->acpi_expose_flash
= true;
3333 mc
->auto_enable_numa_with_memdev
= false;
3335 DEFINE_VIRT_MACHINE(5, 0)
3337 static void virt_machine_4_2_options(MachineClass
*mc
)
3339 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3341 virt_machine_5_0_options(mc
);
3342 compat_props_add(mc
->compat_props
, hw_compat_4_2
, hw_compat_4_2_len
);
3343 vmc
->kvm_no_adjvtime
= true;
3345 DEFINE_VIRT_MACHINE(4, 2)
3347 static void virt_machine_4_1_options(MachineClass
*mc
)
3349 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3351 virt_machine_4_2_options(mc
);
3352 compat_props_add(mc
->compat_props
, hw_compat_4_1
, hw_compat_4_1_len
);
3354 mc
->auto_enable_numa_with_memhp
= false;
3356 DEFINE_VIRT_MACHINE(4, 1)
3358 static void virt_machine_4_0_options(MachineClass
*mc
)
3360 virt_machine_4_1_options(mc
);
3361 compat_props_add(mc
->compat_props
, hw_compat_4_0
, hw_compat_4_0_len
);
3363 DEFINE_VIRT_MACHINE(4, 0)
3365 static void virt_machine_3_1_options(MachineClass
*mc
)
3367 virt_machine_4_0_options(mc
);
3368 compat_props_add(mc
->compat_props
, hw_compat_3_1
, hw_compat_3_1_len
);
3370 DEFINE_VIRT_MACHINE(3, 1)
3372 static void virt_machine_3_0_options(MachineClass
*mc
)
3374 virt_machine_3_1_options(mc
);
3375 compat_props_add(mc
->compat_props
, hw_compat_3_0
, hw_compat_3_0_len
);
3377 DEFINE_VIRT_MACHINE(3, 0)
3379 static void virt_machine_2_12_options(MachineClass
*mc
)
3381 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3383 virt_machine_3_0_options(mc
);
3384 compat_props_add(mc
->compat_props
, hw_compat_2_12
, hw_compat_2_12_len
);
3385 vmc
->no_highmem_ecam
= true;
3388 DEFINE_VIRT_MACHINE(2, 12)
3390 static void virt_machine_2_11_options(MachineClass
*mc
)
3392 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3394 virt_machine_2_12_options(mc
);
3395 compat_props_add(mc
->compat_props
, hw_compat_2_11
, hw_compat_2_11_len
);
3396 vmc
->smbios_old_sys_ver
= true;
3398 DEFINE_VIRT_MACHINE(2, 11)
3400 static void virt_machine_2_10_options(MachineClass
*mc
)
3402 virt_machine_2_11_options(mc
);
3403 compat_props_add(mc
->compat_props
, hw_compat_2_10
, hw_compat_2_10_len
);
3404 /* before 2.11 we never faulted accesses to bad addresses */
3405 mc
->ignore_memory_transaction_failures
= true;
3407 DEFINE_VIRT_MACHINE(2, 10)
3409 static void virt_machine_2_9_options(MachineClass
*mc
)
3411 virt_machine_2_10_options(mc
);
3412 compat_props_add(mc
->compat_props
, hw_compat_2_9
, hw_compat_2_9_len
);
3414 DEFINE_VIRT_MACHINE(2, 9)
3416 static void virt_machine_2_8_options(MachineClass
*mc
)
3418 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3420 virt_machine_2_9_options(mc
);
3421 compat_props_add(mc
->compat_props
, hw_compat_2_8
, hw_compat_2_8_len
);
3422 /* For 2.8 and earlier we falsely claimed in the DT that
3423 * our timers were edge-triggered, not level-triggered.
3425 vmc
->claim_edge_triggered_timers
= true;
3427 DEFINE_VIRT_MACHINE(2, 8)
3429 static void virt_machine_2_7_options(MachineClass
*mc
)
3431 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3433 virt_machine_2_8_options(mc
);
3434 compat_props_add(mc
->compat_props
, hw_compat_2_7
, hw_compat_2_7_len
);
3435 /* ITS was introduced with 2.8 */
3437 /* Stick with 1K pages for migration compatibility */
3438 mc
->minimum_page_bits
= 0;
3440 DEFINE_VIRT_MACHINE(2, 7)
3442 static void virt_machine_2_6_options(MachineClass
*mc
)
3444 VirtMachineClass
*vmc
= VIRT_MACHINE_CLASS(OBJECT_CLASS(mc
));
3446 virt_machine_2_7_options(mc
);
3447 compat_props_add(mc
->compat_props
, hw_compat_2_6
, hw_compat_2_6_len
);
3448 vmc
->disallow_affinity_adjustment
= true;
3449 /* Disable PMU for 2.6 as PMU support was first introduced in 2.7 */
3452 DEFINE_VIRT_MACHINE(2, 6)