2 * Copyright (c) 2003-2004 Fabrice Bellard
3 * Copyright (c) 2019 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 #include "qemu/osdep.h"
24 #include "qemu/error-report.h"
25 #include "qemu/option.h"
26 #include "qemu/cutils.h"
27 #include "qemu/units.h"
28 #include "qemu-common.h"
29 #include "qemu/datadir.h"
30 #include "qapi/error.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qapi-visit-common.h"
33 #include "qapi/clone-visitor.h"
34 #include "qapi/qapi-visit-machine.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/qtest.h"
37 #include "sysemu/whpx.h"
38 #include "sysemu/numa.h"
39 #include "sysemu/replay.h"
40 #include "sysemu/sysemu.h"
41 #include "sysemu/cpu-timers.h"
44 #include "hw/i386/x86.h"
45 #include "target/i386/cpu.h"
46 #include "hw/i386/topology.h"
47 #include "hw/i386/fw_cfg.h"
48 #include "hw/intc/i8259.h"
49 #include "hw/rtc/mc146818rtc.h"
51 #include "hw/acpi/cpu_hotplug.h"
54 #include "hw/loader.h"
55 #include "multiboot.h"
57 #include "standard-headers/asm-x86/bootparam.h"
58 #include CONFIG_DEVICES
59 #include "kvm/kvm_i386.h"
61 /* Physical Address of PVH entry point read from kernel ELF NOTE */
62 static size_t pvh_start_addr
;
64 inline void init_topo_info(X86CPUTopoInfo
*topo_info
,
65 const X86MachineState
*x86ms
)
67 MachineState
*ms
= MACHINE(x86ms
);
69 topo_info
->dies_per_pkg
= ms
->smp
.dies
;
70 topo_info
->cores_per_die
= ms
->smp
.cores
;
71 topo_info
->threads_per_core
= ms
->smp
.threads
;
75 * Calculates initial APIC ID for a specific CPU index
77 * Currently we need to be able to calculate the APIC ID from the CPU index
78 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
79 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
80 * all CPUs up to max_cpus.
82 uint32_t x86_cpu_apic_id_from_index(X86MachineState
*x86ms
,
83 unsigned int cpu_index
)
85 X86MachineClass
*x86mc
= X86_MACHINE_GET_CLASS(x86ms
);
86 X86CPUTopoInfo topo_info
;
90 init_topo_info(&topo_info
, x86ms
);
92 correct_id
= x86_apicid_from_cpu_idx(&topo_info
, cpu_index
);
93 if (x86mc
->compat_apic_id_mode
) {
94 if (cpu_index
!= correct_id
&& !warned
&& !qtest_enabled()) {
95 error_report("APIC IDs set in compatibility mode, "
96 "CPU topology won't match the configuration");
106 void x86_cpu_new(X86MachineState
*x86ms
, int64_t apic_id
, Error
**errp
)
108 Object
*cpu
= object_new(MACHINE(x86ms
)->cpu_type
);
110 if (!object_property_set_uint(cpu
, "apic-id", apic_id
, errp
)) {
113 qdev_realize(DEVICE(cpu
), NULL
, errp
);
119 void x86_cpus_init(X86MachineState
*x86ms
, int default_cpu_version
)
122 const CPUArchIdList
*possible_cpus
;
123 MachineState
*ms
= MACHINE(x86ms
);
124 MachineClass
*mc
= MACHINE_GET_CLASS(x86ms
);
126 x86_cpu_set_default_version(default_cpu_version
);
129 * Calculates the limit to CPU APIC ID values
131 * Limit for the APIC ID value, so that all
132 * CPU APIC IDs are < x86ms->apic_id_limit.
134 * This is used for FW_CFG_MAX_CPUS. See comments on fw_cfg_arch_create().
136 x86ms
->apic_id_limit
= x86_cpu_apic_id_from_index(x86ms
,
137 ms
->smp
.max_cpus
- 1) + 1;
138 possible_cpus
= mc
->possible_cpu_arch_ids(ms
);
139 for (i
= 0; i
< ms
->smp
.cpus
; i
++) {
140 x86_cpu_new(x86ms
, possible_cpus
->cpus
[i
].arch_id
, &error_fatal
);
144 void x86_rtc_set_cpus_count(ISADevice
*rtc
, uint16_t cpus_count
)
146 if (cpus_count
> 0xff) {
148 * If the number of CPUs can't be represented in 8 bits, the
149 * BIOS must use "FW_CFG_NB_CPUS". Set RTC field to 0 just
150 * to make old BIOSes fail more predictably.
152 rtc_set_memory(rtc
, 0x5f, 0);
154 rtc_set_memory(rtc
, 0x5f, cpus_count
- 1);
158 static int x86_apic_cmp(const void *a
, const void *b
)
160 CPUArchId
*apic_a
= (CPUArchId
*)a
;
161 CPUArchId
*apic_b
= (CPUArchId
*)b
;
163 return apic_a
->arch_id
- apic_b
->arch_id
;
167 * returns pointer to CPUArchId descriptor that matches CPU's apic_id
168 * in ms->possible_cpus->cpus, if ms->possible_cpus->cpus has no
169 * entry corresponding to CPU's apic_id returns NULL.
171 CPUArchId
*x86_find_cpu_slot(MachineState
*ms
, uint32_t id
, int *idx
)
173 CPUArchId apic_id
, *found_cpu
;
175 apic_id
.arch_id
= id
;
176 found_cpu
= bsearch(&apic_id
, ms
->possible_cpus
->cpus
,
177 ms
->possible_cpus
->len
, sizeof(*ms
->possible_cpus
->cpus
),
179 if (found_cpu
&& idx
) {
180 *idx
= found_cpu
- ms
->possible_cpus
->cpus
;
185 void x86_cpu_plug(HotplugHandler
*hotplug_dev
,
186 DeviceState
*dev
, Error
**errp
)
188 CPUArchId
*found_cpu
;
189 Error
*local_err
= NULL
;
190 X86CPU
*cpu
= X86_CPU(dev
);
191 X86MachineState
*x86ms
= X86_MACHINE(hotplug_dev
);
193 if (x86ms
->acpi_dev
) {
194 hotplug_handler_plug(x86ms
->acpi_dev
, dev
, &local_err
);
200 /* increment the number of CPUs */
203 x86_rtc_set_cpus_count(x86ms
->rtc
, x86ms
->boot_cpus
);
206 fw_cfg_modify_i16(x86ms
->fw_cfg
, FW_CFG_NB_CPUS
, x86ms
->boot_cpus
);
209 found_cpu
= x86_find_cpu_slot(MACHINE(x86ms
), cpu
->apic_id
, NULL
);
210 found_cpu
->cpu
= OBJECT(dev
);
212 error_propagate(errp
, local_err
);
215 void x86_cpu_unplug_request_cb(HotplugHandler
*hotplug_dev
,
216 DeviceState
*dev
, Error
**errp
)
219 X86CPU
*cpu
= X86_CPU(dev
);
220 X86MachineState
*x86ms
= X86_MACHINE(hotplug_dev
);
222 if (!x86ms
->acpi_dev
) {
223 error_setg(errp
, "CPU hot unplug not supported without ACPI");
227 x86_find_cpu_slot(MACHINE(x86ms
), cpu
->apic_id
, &idx
);
230 error_setg(errp
, "Boot CPU is unpluggable");
234 hotplug_handler_unplug_request(x86ms
->acpi_dev
, dev
,
238 void x86_cpu_unplug_cb(HotplugHandler
*hotplug_dev
,
239 DeviceState
*dev
, Error
**errp
)
241 CPUArchId
*found_cpu
;
242 Error
*local_err
= NULL
;
243 X86CPU
*cpu
= X86_CPU(dev
);
244 X86MachineState
*x86ms
= X86_MACHINE(hotplug_dev
);
246 hotplug_handler_unplug(x86ms
->acpi_dev
, dev
, &local_err
);
251 found_cpu
= x86_find_cpu_slot(MACHINE(x86ms
), cpu
->apic_id
, NULL
);
252 found_cpu
->cpu
= NULL
;
255 /* decrement the number of CPUs */
257 /* Update the number of CPUs in CMOS */
258 x86_rtc_set_cpus_count(x86ms
->rtc
, x86ms
->boot_cpus
);
259 fw_cfg_modify_i16(x86ms
->fw_cfg
, FW_CFG_NB_CPUS
, x86ms
->boot_cpus
);
261 error_propagate(errp
, local_err
);
264 void x86_cpu_pre_plug(HotplugHandler
*hotplug_dev
,
265 DeviceState
*dev
, Error
**errp
)
270 X86CPUTopoIDs topo_ids
;
271 X86CPU
*cpu
= X86_CPU(dev
);
272 CPUX86State
*env
= &cpu
->env
;
273 MachineState
*ms
= MACHINE(hotplug_dev
);
274 X86MachineState
*x86ms
= X86_MACHINE(hotplug_dev
);
275 unsigned int smp_cores
= ms
->smp
.cores
;
276 unsigned int smp_threads
= ms
->smp
.threads
;
277 X86CPUTopoInfo topo_info
;
279 if (!object_dynamic_cast(OBJECT(cpu
), ms
->cpu_type
)) {
280 error_setg(errp
, "Invalid CPU type, expected cpu type: '%s'",
285 if (x86ms
->acpi_dev
) {
286 Error
*local_err
= NULL
;
288 hotplug_handler_pre_plug(HOTPLUG_HANDLER(x86ms
->acpi_dev
), dev
,
291 error_propagate(errp
, local_err
);
296 init_topo_info(&topo_info
, x86ms
);
298 env
->nr_dies
= ms
->smp
.dies
;
301 * If APIC ID is not set,
302 * set it based on socket/die/core/thread properties.
304 if (cpu
->apic_id
== UNASSIGNED_APIC_ID
) {
305 int max_socket
= (ms
->smp
.max_cpus
- 1) /
306 smp_threads
/ smp_cores
/ ms
->smp
.dies
;
309 * die-id was optional in QEMU 4.0 and older, so keep it optional
310 * if there's only one die per socket.
312 if (cpu
->die_id
< 0 && ms
->smp
.dies
== 1) {
316 if (cpu
->socket_id
< 0) {
317 error_setg(errp
, "CPU socket-id is not set");
319 } else if (cpu
->socket_id
> max_socket
) {
320 error_setg(errp
, "Invalid CPU socket-id: %u must be in range 0:%u",
321 cpu
->socket_id
, max_socket
);
324 if (cpu
->die_id
< 0) {
325 error_setg(errp
, "CPU die-id is not set");
327 } else if (cpu
->die_id
> ms
->smp
.dies
- 1) {
328 error_setg(errp
, "Invalid CPU die-id: %u must be in range 0:%u",
329 cpu
->die_id
, ms
->smp
.dies
- 1);
332 if (cpu
->core_id
< 0) {
333 error_setg(errp
, "CPU core-id is not set");
335 } else if (cpu
->core_id
> (smp_cores
- 1)) {
336 error_setg(errp
, "Invalid CPU core-id: %u must be in range 0:%u",
337 cpu
->core_id
, smp_cores
- 1);
340 if (cpu
->thread_id
< 0) {
341 error_setg(errp
, "CPU thread-id is not set");
343 } else if (cpu
->thread_id
> (smp_threads
- 1)) {
344 error_setg(errp
, "Invalid CPU thread-id: %u must be in range 0:%u",
345 cpu
->thread_id
, smp_threads
- 1);
349 topo_ids
.pkg_id
= cpu
->socket_id
;
350 topo_ids
.die_id
= cpu
->die_id
;
351 topo_ids
.core_id
= cpu
->core_id
;
352 topo_ids
.smt_id
= cpu
->thread_id
;
353 cpu
->apic_id
= x86_apicid_from_topo_ids(&topo_info
, &topo_ids
);
356 cpu_slot
= x86_find_cpu_slot(MACHINE(x86ms
), cpu
->apic_id
, &idx
);
358 MachineState
*ms
= MACHINE(x86ms
);
360 x86_topo_ids_from_apicid(cpu
->apic_id
, &topo_info
, &topo_ids
);
362 "Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with"
363 " APIC ID %" PRIu32
", valid index range 0:%d",
364 topo_ids
.pkg_id
, topo_ids
.die_id
, topo_ids
.core_id
, topo_ids
.smt_id
,
365 cpu
->apic_id
, ms
->possible_cpus
->len
- 1);
370 error_setg(errp
, "CPU[%d] with APIC ID %" PRIu32
" exists",
375 /* if 'address' properties socket-id/core-id/thread-id are not set, set them
376 * so that machine_query_hotpluggable_cpus would show correct values
378 /* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
379 * once -smp refactoring is complete and there will be CPU private
380 * CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
381 x86_topo_ids_from_apicid(cpu
->apic_id
, &topo_info
, &topo_ids
);
382 if (cpu
->socket_id
!= -1 && cpu
->socket_id
!= topo_ids
.pkg_id
) {
383 error_setg(errp
, "property socket-id: %u doesn't match set apic-id:"
384 " 0x%x (socket-id: %u)", cpu
->socket_id
, cpu
->apic_id
,
388 cpu
->socket_id
= topo_ids
.pkg_id
;
390 if (cpu
->die_id
!= -1 && cpu
->die_id
!= topo_ids
.die_id
) {
391 error_setg(errp
, "property die-id: %u doesn't match set apic-id:"
392 " 0x%x (die-id: %u)", cpu
->die_id
, cpu
->apic_id
, topo_ids
.die_id
);
395 cpu
->die_id
= topo_ids
.die_id
;
397 if (cpu
->core_id
!= -1 && cpu
->core_id
!= topo_ids
.core_id
) {
398 error_setg(errp
, "property core-id: %u doesn't match set apic-id:"
399 " 0x%x (core-id: %u)", cpu
->core_id
, cpu
->apic_id
,
403 cpu
->core_id
= topo_ids
.core_id
;
405 if (cpu
->thread_id
!= -1 && cpu
->thread_id
!= topo_ids
.smt_id
) {
406 error_setg(errp
, "property thread-id: %u doesn't match set apic-id:"
407 " 0x%x (thread-id: %u)", cpu
->thread_id
, cpu
->apic_id
,
411 cpu
->thread_id
= topo_ids
.smt_id
;
413 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
) &&
414 !kvm_hv_vpindex_settable()) {
415 error_setg(errp
, "kernel doesn't allow setting HyperV VP_INDEX");
422 numa_cpu_pre_plug(cpu_slot
, dev
, errp
);
425 CpuInstanceProperties
426 x86_cpu_index_to_props(MachineState
*ms
, unsigned cpu_index
)
428 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
429 const CPUArchIdList
*possible_cpus
= mc
->possible_cpu_arch_ids(ms
);
431 assert(cpu_index
< possible_cpus
->len
);
432 return possible_cpus
->cpus
[cpu_index
].props
;
435 int64_t x86_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
437 X86CPUTopoIDs topo_ids
;
438 X86MachineState
*x86ms
= X86_MACHINE(ms
);
439 X86CPUTopoInfo topo_info
;
441 init_topo_info(&topo_info
, x86ms
);
443 assert(idx
< ms
->possible_cpus
->len
);
444 x86_topo_ids_from_apicid(ms
->possible_cpus
->cpus
[idx
].arch_id
,
445 &topo_info
, &topo_ids
);
446 return topo_ids
.pkg_id
% ms
->numa_state
->num_nodes
;
449 const CPUArchIdList
*x86_possible_cpu_arch_ids(MachineState
*ms
)
451 X86MachineState
*x86ms
= X86_MACHINE(ms
);
452 unsigned int max_cpus
= ms
->smp
.max_cpus
;
453 X86CPUTopoInfo topo_info
;
456 if (ms
->possible_cpus
) {
458 * make sure that max_cpus hasn't changed since the first use, i.e.
459 * -smp hasn't been parsed after it
461 assert(ms
->possible_cpus
->len
== max_cpus
);
462 return ms
->possible_cpus
;
465 ms
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
466 sizeof(CPUArchId
) * max_cpus
);
467 ms
->possible_cpus
->len
= max_cpus
;
469 init_topo_info(&topo_info
, x86ms
);
471 for (i
= 0; i
< ms
->possible_cpus
->len
; i
++) {
472 X86CPUTopoIDs topo_ids
;
474 ms
->possible_cpus
->cpus
[i
].type
= ms
->cpu_type
;
475 ms
->possible_cpus
->cpus
[i
].vcpus_count
= 1;
476 ms
->possible_cpus
->cpus
[i
].arch_id
=
477 x86_cpu_apic_id_from_index(x86ms
, i
);
478 x86_topo_ids_from_apicid(ms
->possible_cpus
->cpus
[i
].arch_id
,
479 &topo_info
, &topo_ids
);
480 ms
->possible_cpus
->cpus
[i
].props
.has_socket_id
= true;
481 ms
->possible_cpus
->cpus
[i
].props
.socket_id
= topo_ids
.pkg_id
;
482 if (ms
->smp
.dies
> 1) {
483 ms
->possible_cpus
->cpus
[i
].props
.has_die_id
= true;
484 ms
->possible_cpus
->cpus
[i
].props
.die_id
= topo_ids
.die_id
;
486 ms
->possible_cpus
->cpus
[i
].props
.has_core_id
= true;
487 ms
->possible_cpus
->cpus
[i
].props
.core_id
= topo_ids
.core_id
;
488 ms
->possible_cpus
->cpus
[i
].props
.has_thread_id
= true;
489 ms
->possible_cpus
->cpus
[i
].props
.thread_id
= topo_ids
.smt_id
;
491 return ms
->possible_cpus
;
494 static void x86_nmi(NMIState
*n
, int cpu_index
, Error
**errp
)
496 /* cpu index isn't used */
500 X86CPU
*cpu
= X86_CPU(cs
);
502 if (!cpu
->apic_state
) {
503 cpu_interrupt(cs
, CPU_INTERRUPT_NMI
);
505 apic_deliver_nmi(cpu
->apic_state
);
510 static long get_file_size(FILE *f
)
514 /* XXX: on Unix systems, using fstat() probably makes more sense */
517 fseek(f
, 0, SEEK_END
);
519 fseek(f
, where
, SEEK_SET
);
525 uint64_t cpu_get_tsc(CPUX86State
*env
)
527 return cpus_get_elapsed_ticks();
531 static void pic_irq_request(void *opaque
, int irq
, int level
)
533 CPUState
*cs
= first_cpu
;
534 X86CPU
*cpu
= X86_CPU(cs
);
536 trace_x86_pic_interrupt(irq
, level
);
537 if (cpu
->apic_state
&& !kvm_irqchip_in_kernel() &&
538 !whpx_apic_in_platform()) {
541 if (apic_accept_pic_intr(cpu
->apic_state
)) {
542 apic_deliver_pic_intr(cpu
->apic_state
, level
);
547 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
549 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
554 qemu_irq
x86_allocate_cpu_irq(void)
556 return qemu_allocate_irq(pic_irq_request
, NULL
, 0);
559 int cpu_get_pic_interrupt(CPUX86State
*env
)
561 X86CPU
*cpu
= env_archcpu(env
);
564 if (!kvm_irqchip_in_kernel() && !whpx_apic_in_platform()) {
565 intno
= apic_get_interrupt(cpu
->apic_state
);
569 /* read the irq from the PIC */
570 if (!apic_accept_pic_intr(cpu
->apic_state
)) {
575 intno
= pic_read_irq(isa_pic
);
579 DeviceState
*cpu_get_current_apic(void)
582 X86CPU
*cpu
= X86_CPU(current_cpu
);
583 return cpu
->apic_state
;
589 void gsi_handler(void *opaque
, int n
, int level
)
591 GSIState
*s
= opaque
;
593 trace_x86_gsi_interrupt(n
, level
);
595 case 0 ... ISA_NUM_IRQS
- 1:
596 if (s
->i8259_irq
[n
]) {
597 /* Under KVM, Kernel will forward to both PIC and IOAPIC */
598 qemu_set_irq(s
->i8259_irq
[n
], level
);
601 case ISA_NUM_IRQS
... IOAPIC_NUM_PINS
- 1:
602 qemu_set_irq(s
->ioapic_irq
[n
], level
);
604 case IO_APIC_SECONDARY_IRQBASE
605 ... IO_APIC_SECONDARY_IRQBASE
+ IOAPIC_NUM_PINS
- 1:
606 qemu_set_irq(s
->ioapic2_irq
[n
- IO_APIC_SECONDARY_IRQBASE
], level
);
611 void ioapic_init_gsi(GSIState
*gsi_state
, const char *parent_name
)
618 if (kvm_ioapic_in_kernel()) {
619 dev
= qdev_new(TYPE_KVM_IOAPIC
);
621 dev
= qdev_new(TYPE_IOAPIC
);
623 object_property_add_child(object_resolve_path(parent_name
, NULL
),
624 "ioapic", OBJECT(dev
));
625 d
= SYS_BUS_DEVICE(dev
);
626 sysbus_realize_and_unref(d
, &error_fatal
);
627 sysbus_mmio_map(d
, 0, IO_APIC_DEFAULT_ADDRESS
);
629 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
630 gsi_state
->ioapic_irq
[i
] = qdev_get_gpio_in(dev
, i
);
634 DeviceState
*ioapic_init_secondary(GSIState
*gsi_state
)
640 dev
= qdev_new(TYPE_IOAPIC
);
641 d
= SYS_BUS_DEVICE(dev
);
642 sysbus_realize_and_unref(d
, &error_fatal
);
643 sysbus_mmio_map(d
, 0, IO_APIC_SECONDARY_ADDRESS
);
645 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
646 gsi_state
->ioapic2_irq
[i
] = qdev_get_gpio_in(dev
, i
);
656 } __attribute__((packed
));
660 * The entry point into the kernel for PVH boot is different from
661 * the native entry point. The PVH entry is defined by the x86/HVM
662 * direct boot ABI and is available in an ELFNOTE in the kernel binary.
664 * This function is passed to load_elf() when it is called from
665 * load_elfboot() which then additionally checks for an ELF Note of
666 * type XEN_ELFNOTE_PHYS32_ENTRY and passes it to this function to
667 * parse the PVH entry address from the ELF Note.
669 * Due to trickery in elf_opts.h, load_elf() is actually available as
670 * load_elf32() or load_elf64() and this routine needs to be able
671 * to deal with being called as 32 or 64 bit.
673 * The address of the PVH entry point is saved to the 'pvh_start_addr'
674 * global variable. (although the entry point is 32-bit, the kernel
675 * binary can be either 32-bit or 64-bit).
677 static uint64_t read_pvh_start_addr(void *arg1
, void *arg2
, bool is64
)
679 size_t *elf_note_data_addr
;
681 /* Check if ELF Note header passed in is valid */
687 struct elf64_note
*nhdr64
= (struct elf64_note
*)arg1
;
688 uint64_t nhdr_size64
= sizeof(struct elf64_note
);
689 uint64_t phdr_align
= *(uint64_t *)arg2
;
690 uint64_t nhdr_namesz
= nhdr64
->n_namesz
;
693 ((void *)nhdr64
) + nhdr_size64
+
694 QEMU_ALIGN_UP(nhdr_namesz
, phdr_align
);
696 pvh_start_addr
= *elf_note_data_addr
;
698 struct elf32_note
*nhdr32
= (struct elf32_note
*)arg1
;
699 uint32_t nhdr_size32
= sizeof(struct elf32_note
);
700 uint32_t phdr_align
= *(uint32_t *)arg2
;
701 uint32_t nhdr_namesz
= nhdr32
->n_namesz
;
704 ((void *)nhdr32
) + nhdr_size32
+
705 QEMU_ALIGN_UP(nhdr_namesz
, phdr_align
);
707 pvh_start_addr
= *(uint32_t *)elf_note_data_addr
;
710 return pvh_start_addr
;
713 static bool load_elfboot(const char *kernel_filename
,
714 int kernel_file_size
,
716 size_t pvh_xen_start_addr
,
720 uint32_t mh_load_addr
= 0;
721 uint32_t elf_kernel_size
= 0;
723 uint64_t elf_low
, elf_high
;
726 if (ldl_p(header
) != 0x464c457f) {
727 return false; /* no elfboot */
730 bool elf_is64
= header
[EI_CLASS
] == ELFCLASS64
;
732 ((Elf64_Ehdr
*)header
)->e_flags
: ((Elf32_Ehdr
*)header
)->e_flags
;
734 if (flags
& 0x00010004) { /* LOAD_ELF_HEADER_HAS_ADDR */
735 error_report("elfboot unsupported flags = %x", flags
);
739 uint64_t elf_note_type
= XEN_ELFNOTE_PHYS32_ENTRY
;
740 kernel_size
= load_elf(kernel_filename
, read_pvh_start_addr
,
741 NULL
, &elf_note_type
, &elf_entry
,
742 &elf_low
, &elf_high
, NULL
, 0, I386_ELF_MACHINE
,
745 if (kernel_size
< 0) {
746 error_report("Error while loading elf kernel");
749 mh_load_addr
= elf_low
;
750 elf_kernel_size
= elf_high
- elf_low
;
752 if (pvh_start_addr
== 0) {
753 error_report("Error loading uncompressed kernel without PVH ELF Note");
756 fw_cfg_add_i32(fw_cfg
, FW_CFG_KERNEL_ENTRY
, pvh_start_addr
);
757 fw_cfg_add_i32(fw_cfg
, FW_CFG_KERNEL_ADDR
, mh_load_addr
);
758 fw_cfg_add_i32(fw_cfg
, FW_CFG_KERNEL_SIZE
, elf_kernel_size
);
763 void x86_load_linux(X86MachineState
*x86ms
,
767 bool linuxboot_dma_enabled
)
770 int setup_size
, kernel_size
, cmdline_size
;
771 int dtb_size
, setup_data_offset
;
773 uint8_t header
[8192], *setup
, *kernel
;
774 hwaddr real_addr
, prot_addr
, cmdline_addr
, initrd_addr
= 0;
777 MachineState
*machine
= MACHINE(x86ms
);
778 struct setup_data
*setup_data
;
779 const char *kernel_filename
= machine
->kernel_filename
;
780 const char *initrd_filename
= machine
->initrd_filename
;
781 const char *dtb_filename
= machine
->dtb
;
782 const char *kernel_cmdline
= machine
->kernel_cmdline
;
784 /* Align to 16 bytes as a paranoia measure */
785 cmdline_size
= (strlen(kernel_cmdline
) + 16) & ~15;
787 /* load the kernel header */
788 f
= fopen(kernel_filename
, "rb");
790 fprintf(stderr
, "qemu: could not open kernel file '%s': %s\n",
791 kernel_filename
, strerror(errno
));
795 kernel_size
= get_file_size(f
);
797 fread(header
, 1, MIN(ARRAY_SIZE(header
), kernel_size
), f
) !=
798 MIN(ARRAY_SIZE(header
), kernel_size
)) {
799 fprintf(stderr
, "qemu: could not load kernel '%s': %s\n",
800 kernel_filename
, strerror(errno
));
804 /* kernel protocol version */
805 if (ldl_p(header
+ 0x202) == 0x53726448) {
806 protocol
= lduw_p(header
+ 0x206);
809 * This could be a multiboot kernel. If it is, let's stop treating it
810 * like a Linux kernel.
811 * Note: some multiboot images could be in the ELF format (the same of
812 * PVH), so we try multiboot first since we check the multiboot magic
813 * header before to load it.
815 if (load_multiboot(fw_cfg
, f
, kernel_filename
, initrd_filename
,
816 kernel_cmdline
, kernel_size
, header
)) {
820 * Check if the file is an uncompressed kernel file (ELF) and load it,
821 * saving the PVH entry point used by the x86/HVM direct boot ABI.
822 * If load_elfboot() is successful, populate the fw_cfg info.
825 load_elfboot(kernel_filename
, kernel_size
,
826 header
, pvh_start_addr
, fw_cfg
)) {
829 fw_cfg_add_i32(fw_cfg
, FW_CFG_CMDLINE_SIZE
,
830 strlen(kernel_cmdline
) + 1);
831 fw_cfg_add_string(fw_cfg
, FW_CFG_CMDLINE_DATA
, kernel_cmdline
);
833 fw_cfg_add_i32(fw_cfg
, FW_CFG_SETUP_SIZE
, sizeof(header
));
834 fw_cfg_add_bytes(fw_cfg
, FW_CFG_SETUP_DATA
,
835 header
, sizeof(header
));
838 if (initrd_filename
) {
839 GMappedFile
*mapped_file
;
844 mapped_file
= g_mapped_file_new(initrd_filename
, false, &gerr
);
846 fprintf(stderr
, "qemu: error reading initrd %s: %s\n",
847 initrd_filename
, gerr
->message
);
850 x86ms
->initrd_mapped_file
= mapped_file
;
852 initrd_data
= g_mapped_file_get_contents(mapped_file
);
853 initrd_size
= g_mapped_file_get_length(mapped_file
);
854 initrd_max
= x86ms
->below_4g_mem_size
- acpi_data_size
- 1;
855 if (initrd_size
>= initrd_max
) {
856 fprintf(stderr
, "qemu: initrd is too large, cannot support."
857 "(max: %"PRIu32
", need %"PRId64
")\n",
858 initrd_max
, (uint64_t)initrd_size
);
862 initrd_addr
= (initrd_max
- initrd_size
) & ~4095;
864 fw_cfg_add_i32(fw_cfg
, FW_CFG_INITRD_ADDR
, initrd_addr
);
865 fw_cfg_add_i32(fw_cfg
, FW_CFG_INITRD_SIZE
, initrd_size
);
866 fw_cfg_add_bytes(fw_cfg
, FW_CFG_INITRD_DATA
, initrd_data
,
870 option_rom
[nb_option_roms
].bootindex
= 0;
871 option_rom
[nb_option_roms
].name
= "pvh.bin";
879 if (protocol
< 0x200 || !(header
[0x211] & 0x01)) {
882 cmdline_addr
= 0x9a000 - cmdline_size
;
884 } else if (protocol
< 0x202) {
885 /* High but ancient kernel */
887 cmdline_addr
= 0x9a000 - cmdline_size
;
888 prot_addr
= 0x100000;
890 /* High and recent kernel */
892 cmdline_addr
= 0x20000;
893 prot_addr
= 0x100000;
896 /* highest address for loading the initrd */
897 if (protocol
>= 0x20c &&
898 lduw_p(header
+ 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G
) {
900 * Linux has supported initrd up to 4 GB for a very long time (2007,
901 * long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013),
902 * though it only sets initrd_max to 2 GB to "work around bootloader
903 * bugs". Luckily, QEMU firmware(which does something like bootloader)
904 * has supported this.
906 * It's believed that if XLF_CAN_BE_LOADED_ABOVE_4G is set, initrd can
907 * be loaded into any address.
909 * In addition, initrd_max is uint32_t simply because QEMU doesn't
910 * support the 64-bit boot protocol (specifically the ext_ramdisk_image
913 * Therefore here just limit initrd_max to UINT32_MAX simply as well.
915 initrd_max
= UINT32_MAX
;
916 } else if (protocol
>= 0x203) {
917 initrd_max
= ldl_p(header
+ 0x22c);
919 initrd_max
= 0x37ffffff;
922 if (initrd_max
>= x86ms
->below_4g_mem_size
- acpi_data_size
) {
923 initrd_max
= x86ms
->below_4g_mem_size
- acpi_data_size
- 1;
926 fw_cfg_add_i32(fw_cfg
, FW_CFG_CMDLINE_ADDR
, cmdline_addr
);
927 fw_cfg_add_i32(fw_cfg
, FW_CFG_CMDLINE_SIZE
, strlen(kernel_cmdline
) + 1);
928 fw_cfg_add_string(fw_cfg
, FW_CFG_CMDLINE_DATA
, kernel_cmdline
);
930 if (protocol
>= 0x202) {
931 stl_p(header
+ 0x228, cmdline_addr
);
933 stw_p(header
+ 0x20, 0xA33F);
934 stw_p(header
+ 0x22, cmdline_addr
- real_addr
);
937 /* handle vga= parameter */
938 vmode
= strstr(kernel_cmdline
, "vga=");
940 unsigned int video_mode
;
945 if (!strncmp(vmode
, "normal", 6)) {
947 } else if (!strncmp(vmode
, "ext", 3)) {
949 } else if (!strncmp(vmode
, "ask", 3)) {
952 ret
= qemu_strtoui(vmode
, &end
, 0, &video_mode
);
953 if (ret
!= 0 || (*end
&& *end
!= ' ')) {
954 fprintf(stderr
, "qemu: invalid 'vga=' kernel parameter.\n");
958 stw_p(header
+ 0x1fa, video_mode
);
963 * High nybble = B reserved for QEMU; low nybble is revision number.
964 * If this code is substantially changed, you may want to consider
965 * incrementing the revision.
967 if (protocol
>= 0x200) {
968 header
[0x210] = 0xB0;
971 if (protocol
>= 0x201) {
972 header
[0x211] |= 0x80; /* CAN_USE_HEAP */
973 stw_p(header
+ 0x224, cmdline_addr
- real_addr
- 0x200);
977 if (initrd_filename
) {
978 GMappedFile
*mapped_file
;
983 if (protocol
< 0x200) {
984 fprintf(stderr
, "qemu: linux kernel too old to load a ram disk\n");
988 mapped_file
= g_mapped_file_new(initrd_filename
, false, &gerr
);
990 fprintf(stderr
, "qemu: error reading initrd %s: %s\n",
991 initrd_filename
, gerr
->message
);
994 x86ms
->initrd_mapped_file
= mapped_file
;
996 initrd_data
= g_mapped_file_get_contents(mapped_file
);
997 initrd_size
= g_mapped_file_get_length(mapped_file
);
998 if (initrd_size
>= initrd_max
) {
999 fprintf(stderr
, "qemu: initrd is too large, cannot support."
1000 "(max: %"PRIu32
", need %"PRId64
")\n",
1001 initrd_max
, (uint64_t)initrd_size
);
1005 initrd_addr
= (initrd_max
- initrd_size
) & ~4095;
1007 fw_cfg_add_i32(fw_cfg
, FW_CFG_INITRD_ADDR
, initrd_addr
);
1008 fw_cfg_add_i32(fw_cfg
, FW_CFG_INITRD_SIZE
, initrd_size
);
1009 fw_cfg_add_bytes(fw_cfg
, FW_CFG_INITRD_DATA
, initrd_data
, initrd_size
);
1011 stl_p(header
+ 0x218, initrd_addr
);
1012 stl_p(header
+ 0x21c, initrd_size
);
1015 /* load kernel and setup */
1016 setup_size
= header
[0x1f1];
1017 if (setup_size
== 0) {
1020 setup_size
= (setup_size
+ 1) * 512;
1021 if (setup_size
> kernel_size
) {
1022 fprintf(stderr
, "qemu: invalid kernel header\n");
1025 kernel_size
-= setup_size
;
1027 setup
= g_malloc(setup_size
);
1028 kernel
= g_malloc(kernel_size
);
1029 fseek(f
, 0, SEEK_SET
);
1030 if (fread(setup
, 1, setup_size
, f
) != setup_size
) {
1031 fprintf(stderr
, "fread() failed\n");
1034 if (fread(kernel
, 1, kernel_size
, f
) != kernel_size
) {
1035 fprintf(stderr
, "fread() failed\n");
1040 /* append dtb to kernel */
1042 if (protocol
< 0x209) {
1043 fprintf(stderr
, "qemu: Linux kernel too old to load a dtb\n");
1047 dtb_size
= get_image_size(dtb_filename
);
1048 if (dtb_size
<= 0) {
1049 fprintf(stderr
, "qemu: error reading dtb %s: %s\n",
1050 dtb_filename
, strerror(errno
));
1054 setup_data_offset
= QEMU_ALIGN_UP(kernel_size
, 16);
1055 kernel_size
= setup_data_offset
+ sizeof(struct setup_data
) + dtb_size
;
1056 kernel
= g_realloc(kernel
, kernel_size
);
1058 stq_p(header
+ 0x250, prot_addr
+ setup_data_offset
);
1060 setup_data
= (struct setup_data
*)(kernel
+ setup_data_offset
);
1061 setup_data
->next
= 0;
1062 setup_data
->type
= cpu_to_le32(SETUP_DTB
);
1063 setup_data
->len
= cpu_to_le32(dtb_size
);
1065 load_image_size(dtb_filename
, setup_data
->data
, dtb_size
);
1068 memcpy(setup
, header
, MIN(sizeof(header
), setup_size
));
1070 fw_cfg_add_i32(fw_cfg
, FW_CFG_KERNEL_ADDR
, prot_addr
);
1071 fw_cfg_add_i32(fw_cfg
, FW_CFG_KERNEL_SIZE
, kernel_size
);
1072 fw_cfg_add_bytes(fw_cfg
, FW_CFG_KERNEL_DATA
, kernel
, kernel_size
);
1074 fw_cfg_add_i32(fw_cfg
, FW_CFG_SETUP_ADDR
, real_addr
);
1075 fw_cfg_add_i32(fw_cfg
, FW_CFG_SETUP_SIZE
, setup_size
);
1076 fw_cfg_add_bytes(fw_cfg
, FW_CFG_SETUP_DATA
, setup
, setup_size
);
1078 option_rom
[nb_option_roms
].bootindex
= 0;
1079 option_rom
[nb_option_roms
].name
= "linuxboot.bin";
1080 if (linuxboot_dma_enabled
&& fw_cfg_dma_enabled(fw_cfg
)) {
1081 option_rom
[nb_option_roms
].name
= "linuxboot_dma.bin";
1086 void x86_bios_rom_init(MachineState
*ms
, const char *default_firmware
,
1087 MemoryRegion
*rom_memory
, bool isapc_ram_fw
)
1089 const char *bios_name
;
1091 MemoryRegion
*bios
, *isa_bios
;
1092 int bios_size
, isa_bios_size
;
1096 bios_name
= ms
->firmware
?: default_firmware
;
1097 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
1099 bios_size
= get_image_size(filename
);
1103 if (bios_size
<= 0 ||
1104 (bios_size
% 65536) != 0) {
1107 bios
= g_malloc(sizeof(*bios
));
1108 memory_region_init_ram(bios
, NULL
, "pc.bios", bios_size
, &error_fatal
);
1109 if (!isapc_ram_fw
) {
1110 memory_region_set_readonly(bios
, true);
1112 ret
= rom_add_file_fixed(bios_name
, (uint32_t)(-bios_size
), -1);
1115 fprintf(stderr
, "qemu: could not load PC BIOS '%s'\n", bios_name
);
1120 /* map the last 128KB of the BIOS in ISA space */
1121 isa_bios_size
= MIN(bios_size
, 128 * KiB
);
1122 isa_bios
= g_malloc(sizeof(*isa_bios
));
1123 memory_region_init_alias(isa_bios
, NULL
, "isa-bios", bios
,
1124 bios_size
- isa_bios_size
, isa_bios_size
);
1125 memory_region_add_subregion_overlap(rom_memory
,
1126 0x100000 - isa_bios_size
,
1129 if (!isapc_ram_fw
) {
1130 memory_region_set_readonly(isa_bios
, true);
1133 /* map all the bios at the top of memory */
1134 memory_region_add_subregion(rom_memory
,
1135 (uint32_t)(-bios_size
),
1139 bool x86_machine_is_smm_enabled(const X86MachineState
*x86ms
)
1141 bool smm_available
= false;
1143 if (x86ms
->smm
== ON_OFF_AUTO_OFF
) {
1147 if (tcg_enabled() || qtest_enabled()) {
1148 smm_available
= true;
1149 } else if (kvm_enabled()) {
1150 smm_available
= kvm_has_smm();
1153 if (smm_available
) {
1157 if (x86ms
->smm
== ON_OFF_AUTO_ON
) {
1158 error_report("System Management Mode not supported by this hypervisor.");
1164 static void x86_machine_get_smm(Object
*obj
, Visitor
*v
, const char *name
,
1165 void *opaque
, Error
**errp
)
1167 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1168 OnOffAuto smm
= x86ms
->smm
;
1170 visit_type_OnOffAuto(v
, name
, &smm
, errp
);
1173 static void x86_machine_set_smm(Object
*obj
, Visitor
*v
, const char *name
,
1174 void *opaque
, Error
**errp
)
1176 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1178 visit_type_OnOffAuto(v
, name
, &x86ms
->smm
, errp
);
1181 bool x86_machine_is_acpi_enabled(const X86MachineState
*x86ms
)
1183 if (x86ms
->acpi
== ON_OFF_AUTO_OFF
) {
1189 static void x86_machine_get_acpi(Object
*obj
, Visitor
*v
, const char *name
,
1190 void *opaque
, Error
**errp
)
1192 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1193 OnOffAuto acpi
= x86ms
->acpi
;
1195 visit_type_OnOffAuto(v
, name
, &acpi
, errp
);
1198 static void x86_machine_set_acpi(Object
*obj
, Visitor
*v
, const char *name
,
1199 void *opaque
, Error
**errp
)
1201 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1203 visit_type_OnOffAuto(v
, name
, &x86ms
->acpi
, errp
);
1206 static char *x86_machine_get_oem_id(Object
*obj
, Error
**errp
)
1208 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1210 return g_strdup(x86ms
->oem_id
);
1213 static void x86_machine_set_oem_id(Object
*obj
, const char *value
, Error
**errp
)
1215 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1216 size_t len
= strlen(value
);
1220 "User specified "X86_MACHINE_OEM_ID
" value is bigger than "
1225 strncpy(x86ms
->oem_id
, value
, 6);
1228 static char *x86_machine_get_oem_table_id(Object
*obj
, Error
**errp
)
1230 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1232 return g_strdup(x86ms
->oem_table_id
);
1235 static void x86_machine_set_oem_table_id(Object
*obj
, const char *value
,
1238 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1239 size_t len
= strlen(value
);
1243 "User specified "X86_MACHINE_OEM_TABLE_ID
1244 " value is bigger than "
1248 strncpy(x86ms
->oem_table_id
, value
, 8);
1251 static void x86_machine_get_bus_lock_ratelimit(Object
*obj
, Visitor
*v
,
1252 const char *name
, void *opaque
, Error
**errp
)
1254 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1255 uint64_t bus_lock_ratelimit
= x86ms
->bus_lock_ratelimit
;
1257 visit_type_uint64(v
, name
, &bus_lock_ratelimit
, errp
);
1260 static void x86_machine_set_bus_lock_ratelimit(Object
*obj
, Visitor
*v
,
1261 const char *name
, void *opaque
, Error
**errp
)
1263 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1265 visit_type_uint64(v
, name
, &x86ms
->bus_lock_ratelimit
, errp
);
1268 static void machine_get_sgx_epc(Object
*obj
, Visitor
*v
, const char *name
,
1269 void *opaque
, Error
**errp
)
1271 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1272 SgxEPCList
*list
= x86ms
->sgx_epc_list
;
1274 visit_type_SgxEPCList(v
, name
, &list
, errp
);
1277 static void machine_set_sgx_epc(Object
*obj
, Visitor
*v
, const char *name
,
1278 void *opaque
, Error
**errp
)
1280 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1283 list
= x86ms
->sgx_epc_list
;
1284 visit_type_SgxEPCList(v
, name
, &x86ms
->sgx_epc_list
, errp
);
1286 qapi_free_SgxEPCList(list
);
1289 static void x86_machine_initfn(Object
*obj
)
1291 X86MachineState
*x86ms
= X86_MACHINE(obj
);
1293 x86ms
->smm
= ON_OFF_AUTO_AUTO
;
1294 x86ms
->acpi
= ON_OFF_AUTO_AUTO
;
1295 x86ms
->pci_irq_mask
= ACPI_BUILD_PCI_IRQS
;
1296 x86ms
->oem_id
= g_strndup(ACPI_BUILD_APPNAME6
, 6);
1297 x86ms
->oem_table_id
= g_strndup(ACPI_BUILD_APPNAME8
, 8);
1298 x86ms
->bus_lock_ratelimit
= 0;
1301 static void x86_machine_class_init(ObjectClass
*oc
, void *data
)
1303 MachineClass
*mc
= MACHINE_CLASS(oc
);
1304 X86MachineClass
*x86mc
= X86_MACHINE_CLASS(oc
);
1305 NMIClass
*nc
= NMI_CLASS(oc
);
1307 mc
->cpu_index_to_instance_props
= x86_cpu_index_to_props
;
1308 mc
->get_default_cpu_node_id
= x86_get_default_cpu_node_id
;
1309 mc
->possible_cpu_arch_ids
= x86_possible_cpu_arch_ids
;
1310 x86mc
->compat_apic_id_mode
= false;
1311 x86mc
->save_tsc_khz
= true;
1312 nc
->nmi_monitor_handler
= x86_nmi
;
1314 object_class_property_add(oc
, X86_MACHINE_SMM
, "OnOffAuto",
1315 x86_machine_get_smm
, x86_machine_set_smm
,
1317 object_class_property_set_description(oc
, X86_MACHINE_SMM
,
1320 object_class_property_add(oc
, X86_MACHINE_ACPI
, "OnOffAuto",
1321 x86_machine_get_acpi
, x86_machine_set_acpi
,
1323 object_class_property_set_description(oc
, X86_MACHINE_ACPI
,
1326 object_class_property_add_str(oc
, X86_MACHINE_OEM_ID
,
1327 x86_machine_get_oem_id
,
1328 x86_machine_set_oem_id
);
1329 object_class_property_set_description(oc
, X86_MACHINE_OEM_ID
,
1330 "Override the default value of field OEMID "
1331 "in ACPI table header."
1332 "The string may be up to 6 bytes in size");
1335 object_class_property_add_str(oc
, X86_MACHINE_OEM_TABLE_ID
,
1336 x86_machine_get_oem_table_id
,
1337 x86_machine_set_oem_table_id
);
1338 object_class_property_set_description(oc
, X86_MACHINE_OEM_TABLE_ID
,
1339 "Override the default value of field OEM Table ID "
1340 "in ACPI table header."
1341 "The string may be up to 8 bytes in size");
1343 object_class_property_add(oc
, X86_MACHINE_BUS_LOCK_RATELIMIT
, "uint64_t",
1344 x86_machine_get_bus_lock_ratelimit
,
1345 x86_machine_set_bus_lock_ratelimit
, NULL
, NULL
);
1346 object_class_property_set_description(oc
, X86_MACHINE_BUS_LOCK_RATELIMIT
,
1347 "Set the ratelimit for the bus locks acquired in VMs");
1349 object_class_property_add(oc
, "sgx-epc", "SgxEPC",
1350 machine_get_sgx_epc
, machine_set_sgx_epc
,
1352 object_class_property_set_description(oc
, "sgx-epc",
1356 static const TypeInfo x86_machine_info
= {
1357 .name
= TYPE_X86_MACHINE
,
1358 .parent
= TYPE_MACHINE
,
1360 .instance_size
= sizeof(X86MachineState
),
1361 .instance_init
= x86_machine_initfn
,
1362 .class_size
= sizeof(X86MachineClass
),
1363 .class_init
= x86_machine_class_init
,
1364 .interfaces
= (InterfaceInfo
[]) {
1370 static void x86_machine_register_types(void)
1372 type_register_static(&x86_machine_info
);
1375 type_init(x86_machine_register_types
)