4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include "qapi/visitor.h"
20 #include <sys/ioctl.h>
21 #include <sys/utsname.h>
22 #include <sys/syscall.h>
23 #include <sys/resource.h>
26 #include <linux/kvm.h>
27 #include <linux/kvm_para.h>
28 #include "standard-headers/asm-x86/kvm_para.h"
29 #include "hw/xen/interface/arch-x86/cpuid.h"
33 #include "vmsr_energy.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/hw_accel.h"
36 #include "sysemu/kvm_int.h"
37 #include "sysemu/runstate.h"
39 #include "../confidential-guest.h"
43 #include "hyperv-proto.h"
45 #include "gdbstub/enums.h"
46 #include "qemu/host-utils.h"
47 #include "qemu/main-loop.h"
48 #include "qemu/ratelimit.h"
49 #include "qemu/config-file.h"
50 #include "qemu/error-report.h"
51 #include "qemu/memalign.h"
52 #include "hw/i386/x86.h"
53 #include "hw/i386/kvm/xen_evtchn.h"
54 #include "hw/i386/pc.h"
55 #include "hw/i386/apic.h"
56 #include "hw/i386/apic_internal.h"
57 #include "hw/i386/apic-msidef.h"
58 #include "hw/i386/intel_iommu.h"
59 #include "hw/i386/topology.h"
60 #include "hw/i386/x86-iommu.h"
61 #include "hw/i386/e820_memory_layout.h"
63 #include "hw/xen/xen.h"
65 #include "hw/pci/pci.h"
66 #include "hw/pci/msi.h"
67 #include "hw/pci/msix.h"
68 #include "migration/blocker.h"
69 #include "exec/memattrs.h"
72 #include CONFIG_DEVICES
77 #define DPRINTF(fmt, ...) \
78 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
80 #define DPRINTF(fmt, ...) \
85 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
86 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
87 * Since these must be part of guest physical memory, we need to allocate
88 * them, both by setting their start addresses in the kernel and by
89 * creating a corresponding e820 entry. We need 4 pages before the BIOS,
90 * so this value allows up to 16M BIOSes.
92 #define KVM_IDENTITY_BASE 0xfeffc000
94 /* From arch/x86/kvm/lapic.h */
95 #define KVM_APIC_BUS_CYCLE_NS 1
96 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
98 #define MSR_KVM_WALL_CLOCK 0x11
99 #define MSR_KVM_SYSTEM_TIME 0x12
101 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
102 * 255 kvm_msr_entry structs */
103 #define MSR_BUF_SIZE 4096
105 typedef bool QEMURDMSRHandler(X86CPU
*cpu
, uint32_t msr
, uint64_t *val
);
106 typedef bool QEMUWRMSRHandler(X86CPU
*cpu
, uint32_t msr
, uint64_t val
);
109 QEMURDMSRHandler
*rdmsr
;
110 QEMUWRMSRHandler
*wrmsr
;
113 static void kvm_init_msrs(X86CPU
*cpu
);
114 static bool kvm_filter_msr(KVMState
*s
, uint32_t msr
, QEMURDMSRHandler
*rdmsr
,
115 QEMUWRMSRHandler
*wrmsr
);
117 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
118 KVM_CAP_INFO(SET_TSS_ADDR
),
119 KVM_CAP_INFO(EXT_CPUID
),
120 KVM_CAP_INFO(MP_STATE
),
121 KVM_CAP_INFO(SIGNAL_MSI
),
122 KVM_CAP_INFO(IRQ_ROUTING
),
123 KVM_CAP_INFO(DEBUGREGS
),
125 KVM_CAP_INFO(VCPU_EVENTS
),
126 KVM_CAP_INFO(X86_ROBUST_SINGLESTEP
),
128 KVM_CAP_INFO(ADJUST_CLOCK
),
129 KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR
),
133 static bool has_msr_star
;
134 static bool has_msr_hsave_pa
;
135 static bool has_msr_tsc_aux
;
136 static bool has_msr_tsc_adjust
;
137 static bool has_msr_tsc_deadline
;
138 static bool has_msr_feature_control
;
139 static bool has_msr_misc_enable
;
140 static bool has_msr_smbase
;
141 static bool has_msr_bndcfgs
;
142 static int lm_capable_kernel
;
143 static bool has_msr_hv_hypercall
;
144 static bool has_msr_hv_crash
;
145 static bool has_msr_hv_reset
;
146 static bool has_msr_hv_vpindex
;
147 static bool hv_vpindex_settable
;
148 static bool has_msr_hv_runtime
;
149 static bool has_msr_hv_synic
;
150 static bool has_msr_hv_stimer
;
151 static bool has_msr_hv_frequencies
;
152 static bool has_msr_hv_reenlightenment
;
153 static bool has_msr_hv_syndbg_options
;
154 static bool has_msr_xss
;
155 static bool has_msr_umwait
;
156 static bool has_msr_spec_ctrl
;
157 static bool has_tsc_scale_msr
;
158 static bool has_msr_tsx_ctrl
;
159 static bool has_msr_virt_ssbd
;
160 static bool has_msr_smi_count
;
161 static bool has_msr_arch_capabs
;
162 static bool has_msr_core_capabs
;
163 static bool has_msr_vmx_vmfunc
;
164 static bool has_msr_ucode_rev
;
165 static bool has_msr_vmx_procbased_ctls2
;
166 static bool has_msr_perf_capabs
;
167 static bool has_msr_pkrs
;
169 static uint32_t has_architectural_pmu_version
;
170 static uint32_t num_architectural_pmu_gp_counters
;
171 static uint32_t num_architectural_pmu_fixed_counters
;
173 static int has_xsave2
;
175 static int has_sregs2
;
176 static int has_exception_payload
;
177 static int has_triple_fault_event
;
179 static bool has_msr_mcg_ext_ctl
;
181 static struct kvm_cpuid2
*cpuid_cache
;
182 static struct kvm_cpuid2
*hv_cpuid_cache
;
183 static struct kvm_msr_list
*kvm_feature_msrs
;
185 static KVMMSRHandlers msr_handlers
[KVM_MSR_FILTER_MAX_RANGES
];
187 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
188 static RateLimit bus_lock_ratelimit_ctrl
;
189 static int kvm_get_one_msr(X86CPU
*cpu
, int index
, uint64_t *value
);
191 static const char *vm_type_name
[] = {
192 [KVM_X86_DEFAULT_VM
] = "default",
193 [KVM_X86_SEV_VM
] = "SEV",
194 [KVM_X86_SEV_ES_VM
] = "SEV-ES",
195 [KVM_X86_SNP_VM
] = "SEV-SNP",
198 bool kvm_is_vm_type_supported(int type
)
200 uint32_t machine_types
;
203 * old KVM doesn't support KVM_CAP_VM_TYPES but KVM_X86_DEFAULT_VM
204 * is always supported
206 if (type
== KVM_X86_DEFAULT_VM
) {
210 machine_types
= kvm_check_extension(KVM_STATE(current_machine
->accelerator
),
212 return !!(machine_types
& BIT(type
));
215 int kvm_get_vm_type(MachineState
*ms
)
217 int kvm_type
= KVM_X86_DEFAULT_VM
;
220 if (!object_dynamic_cast(OBJECT(ms
->cgs
), TYPE_X86_CONFIDENTIAL_GUEST
)) {
221 error_report("configuration type %s not supported for x86 guests",
222 object_get_typename(OBJECT(ms
->cgs
)));
225 kvm_type
= x86_confidential_guest_kvm_type(
226 X86_CONFIDENTIAL_GUEST(ms
->cgs
));
229 if (!kvm_is_vm_type_supported(kvm_type
)) {
230 error_report("vm-type %s not supported by KVM", vm_type_name
[kvm_type
]);
237 bool kvm_enable_hypercall(uint64_t enable_mask
)
239 KVMState
*s
= KVM_STATE(current_accel());
241 return !kvm_vm_enable_cap(s
, KVM_CAP_EXIT_HYPERCALL
, 0, enable_mask
);
244 bool kvm_has_smm(void)
246 return kvm_vm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
249 bool kvm_has_adjust_clock_stable(void)
251 int ret
= kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
253 return (ret
& KVM_CLOCK_TSC_STABLE
);
256 bool kvm_has_exception_payload(void)
258 return has_exception_payload
;
261 static bool kvm_x2apic_api_set_flags(uint64_t flags
)
263 KVMState
*s
= KVM_STATE(current_accel());
265 return !kvm_vm_enable_cap(s
, KVM_CAP_X2APIC_API
, 0, flags
);
268 #define MEMORIZE(fn, _result) \
270 static bool _memorized; \
279 static bool has_x2apic_api
;
281 bool kvm_has_x2apic_api(void)
283 return has_x2apic_api
;
286 bool kvm_enable_x2apic(void)
289 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS
|
290 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
),
294 bool kvm_hv_vpindex_settable(void)
296 return hv_vpindex_settable
;
299 static int kvm_get_tsc(CPUState
*cs
)
301 X86CPU
*cpu
= X86_CPU(cs
);
302 CPUX86State
*env
= &cpu
->env
;
306 if (env
->tsc_valid
) {
310 env
->tsc_valid
= !runstate_is_running();
312 ret
= kvm_get_one_msr(cpu
, MSR_IA32_TSC
, &value
);
321 static inline void do_kvm_synchronize_tsc(CPUState
*cpu
, run_on_cpu_data arg
)
326 void kvm_synchronize_all_tsc(void)
332 run_on_cpu(cpu
, do_kvm_synchronize_tsc
, RUN_ON_CPU_NULL
);
337 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
339 struct kvm_cpuid2
*cpuid
;
342 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
343 cpuid
= g_malloc0(size
);
345 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
346 if (r
== 0 && cpuid
->nent
>= max
) {
354 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
362 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
365 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
367 struct kvm_cpuid2
*cpuid
;
370 if (cpuid_cache
!= NULL
) {
373 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
380 static bool host_tsx_broken(void)
382 int family
, model
, stepping
;\
383 char vendor
[CPUID_VENDOR_SZ
+ 1];
385 host_cpu_vendor_fms(vendor
, &family
, &model
, &stepping
);
387 /* Check if we are running on a Haswell host known to have broken TSX */
388 return !strcmp(vendor
, CPUID_VENDOR_INTEL
) &&
390 ((model
== 63 && stepping
< 4) ||
391 model
== 60 || model
== 69 || model
== 70);
394 /* Returns the value for a specific register on the cpuid entry
396 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
416 /* Find matching entry for function/index on kvm_cpuid2 struct
418 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
423 for (i
= 0; i
< cpuid
->nent
; ++i
) {
424 if (cpuid
->entries
[i
].function
== function
&&
425 cpuid
->entries
[i
].index
== index
) {
426 return &cpuid
->entries
[i
];
433 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
434 uint32_t index
, int reg
)
436 struct kvm_cpuid2
*cpuid
;
438 uint32_t cpuid_1_edx
, unused
;
441 cpuid
= get_supported_cpuid(s
);
443 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
445 ret
= cpuid_entry_get_reg(entry
, reg
);
448 /* Fixups for the data returned by KVM, below */
450 if (function
== 1 && reg
== R_EDX
) {
451 /* KVM before 2.6.30 misreports the following features */
452 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
453 /* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */
455 } else if (function
== 1 && reg
== R_ECX
) {
456 /* We can set the hypervisor flag, even if KVM does not return it on
457 * GET_SUPPORTED_CPUID
459 ret
|= CPUID_EXT_HYPERVISOR
;
460 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
461 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
462 * and the irqchip is in the kernel.
464 if (kvm_irqchip_in_kernel() &&
465 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
466 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
469 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
470 * without the in-kernel irqchip
472 if (!kvm_irqchip_in_kernel()) {
473 ret
&= ~CPUID_EXT_X2APIC
;
477 int disable_exits
= kvm_check_extension(s
,
478 KVM_CAP_X86_DISABLE_EXITS
);
480 if (disable_exits
& KVM_X86_DISABLE_EXITS_MWAIT
) {
481 ret
|= CPUID_EXT_MONITOR
;
484 } else if (function
== 6 && reg
== R_EAX
) {
485 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
486 } else if (function
== 7 && index
== 0 && reg
== R_EBX
) {
487 /* Not new instructions, just an optimization. */
489 host_cpuid(7, 0, &unused
, &ebx
, &unused
, &unused
);
490 ret
|= ebx
& CPUID_7_0_EBX_ERMS
;
492 if (host_tsx_broken()) {
493 ret
&= ~(CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_HLE
);
495 } else if (function
== 7 && index
== 0 && reg
== R_EDX
) {
496 /* Not new instructions, just an optimization. */
498 host_cpuid(7, 0, &unused
, &unused
, &unused
, &edx
);
499 ret
|= edx
& CPUID_7_0_EDX_FSRM
;
502 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
503 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
504 * returned by KVM_GET_MSR_INDEX_LIST.
506 if (!has_msr_arch_capabs
) {
507 ret
&= ~CPUID_7_0_EDX_ARCH_CAPABILITIES
;
509 } else if (function
== 7 && index
== 1 && reg
== R_EAX
) {
510 /* Not new instructions, just an optimization. */
512 host_cpuid(7, 1, &eax
, &unused
, &unused
, &unused
);
513 ret
|= eax
& (CPUID_7_1_EAX_FZRM
| CPUID_7_1_EAX_FSRS
| CPUID_7_1_EAX_FSRC
);
514 } else if (function
== 7 && index
== 2 && reg
== R_EDX
) {
516 host_cpuid(7, 2, &unused
, &unused
, &unused
, &edx
);
517 ret
|= edx
& CPUID_7_2_EDX_MCDT_NO
;
518 } else if (function
== 0xd && index
== 0 &&
519 (reg
== R_EAX
|| reg
== R_EDX
)) {
521 * The value returned by KVM_GET_SUPPORTED_CPUID does not include
522 * features that still have to be enabled with the arch_prctl
523 * system call. QEMU needs the full value, which is retrieved
524 * with KVM_GET_DEVICE_ATTR.
526 struct kvm_device_attr attr
= {
528 .attr
= KVM_X86_XCOMP_GUEST_SUPP
,
529 .addr
= (unsigned long) &bitmask
532 bool sys_attr
= kvm_check_extension(s
, KVM_CAP_SYS_ATTRIBUTES
);
537 int rc
= kvm_ioctl(s
, KVM_GET_DEVICE_ATTR
, &attr
);
540 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
545 ret
= (reg
== R_EAX
) ? bitmask
: bitmask
>> 32;
546 } else if (function
== 0x80000001 && reg
== R_ECX
) {
548 * It's safe to enable TOPOEXT even if it's not returned by
549 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
550 * us to keep CPU models including TOPOEXT runnable on older kernels.
552 ret
|= CPUID_EXT3_TOPOEXT
;
553 } else if (function
== 0x80000001 && reg
== R_EDX
) {
554 /* On Intel, kvm returns cpuid according to the Intel spec,
555 * so add missing bits according to the AMD spec:
557 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
558 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
559 } else if (function
== 0x80000007 && reg
== R_EBX
) {
560 ret
|= CPUID_8000_0007_EBX_OVERFLOW_RECOV
| CPUID_8000_0007_EBX_SUCCOR
;
561 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EAX
) {
562 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
563 * be enabled without the in-kernel irqchip
565 if (!kvm_irqchip_in_kernel()) {
566 ret
&= ~(1U << KVM_FEATURE_PV_UNHALT
);
568 if (kvm_irqchip_is_split()) {
569 ret
|= 1U << KVM_FEATURE_MSI_EXT_DEST_ID
;
571 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EDX
) {
572 ret
|= 1U << KVM_HINTS_REALTIME
;
575 if (current_machine
->cgs
) {
576 ret
= x86_confidential_guest_mask_cpuid_features(
577 X86_CONFIDENTIAL_GUEST(current_machine
->cgs
),
578 function
, index
, reg
, ret
);
583 uint64_t kvm_arch_get_supported_msr_feature(KVMState
*s
, uint32_t index
)
586 struct kvm_msrs info
;
587 struct kvm_msr_entry entries
[1];
590 uint32_t ret
, can_be_one
, must_be_one
;
592 if (kvm_feature_msrs
== NULL
) { /* Host doesn't support feature MSRs */
596 /* Check if requested MSR is supported feature MSR */
598 for (i
= 0; i
< kvm_feature_msrs
->nmsrs
; i
++)
599 if (kvm_feature_msrs
->indices
[i
] == index
) {
602 if (i
== kvm_feature_msrs
->nmsrs
) {
603 return 0; /* if the feature MSR is not supported, simply return 0 */
606 msr_data
.info
.nmsrs
= 1;
607 msr_data
.entries
[0].index
= index
;
609 ret
= kvm_ioctl(s
, KVM_GET_MSRS
, &msr_data
);
611 error_report("KVM get MSR (index=0x%x) feature failed, %s",
612 index
, strerror(-ret
));
616 value
= msr_data
.entries
[0].data
;
618 case MSR_IA32_VMX_PROCBASED_CTLS2
:
619 if (!has_msr_vmx_procbased_ctls2
) {
620 /* KVM forgot to add these bits for some time, do this ourselves. */
621 if (kvm_arch_get_supported_cpuid(s
, 0xD, 1, R_ECX
) &
622 CPUID_XSAVE_XSAVES
) {
623 value
|= (uint64_t)VMX_SECONDARY_EXEC_XSAVES
<< 32;
625 if (kvm_arch_get_supported_cpuid(s
, 1, 0, R_ECX
) &
627 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING
<< 32;
629 if (kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
) &
630 CPUID_7_0_EBX_INVPCID
) {
631 value
|= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID
<< 32;
633 if (kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
) &
634 CPUID_7_0_EBX_RDSEED
) {
635 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING
<< 32;
637 if (kvm_arch_get_supported_cpuid(s
, 0x80000001, 0, R_EDX
) &
639 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP
<< 32;
643 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
644 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
645 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
646 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
648 * Return true for bits that can be one, but do not have to be one.
649 * The SDM tells us which bits could have a "must be one" setting,
650 * so we can do the opposite transformation in make_vmx_msr_value.
652 must_be_one
= (uint32_t)value
;
653 can_be_one
= (uint32_t)(value
>> 32);
654 return can_be_one
& ~must_be_one
;
661 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
664 *max_banks
= kvm_check_extension(s
, KVM_CAP_MCE
);
665 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
668 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
670 CPUState
*cs
= CPU(cpu
);
671 CPUX86State
*env
= &cpu
->env
;
672 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_EN
| MCI_STATUS_MISCV
|
674 uint64_t mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
;
677 if (!IS_AMD_CPU(env
)) {
678 status
|= MCI_STATUS_S
| MCI_STATUS_UC
;
679 if (code
== BUS_MCEERR_AR
) {
680 status
|= MCI_STATUS_AR
| 0x134;
681 mcg_status
|= MCG_STATUS_EIPV
;
686 if (code
== BUS_MCEERR_AR
) {
687 status
|= MCI_STATUS_UC
| MCI_STATUS_POISON
;
688 mcg_status
|= MCG_STATUS_EIPV
;
690 /* Setting the POISON bit for deferred errors indicates to the
691 * guest kernel that the address provided by the MCE is valid
692 * and usable which will ensure that the guest kernel will send
693 * a SIGBUS_AO signal to the guest process. This allows for
694 * more desirable behavior in the case that the guest process
695 * with poisoned memory has set the MCE_KILL_EARLY prctl flag
696 * which indicates that the process would prefer to handle or
697 * shutdown due to the poisoned memory condition before the
698 * memory has been accessed.
700 * While the POISON bit would not be set in a deferred error
701 * sent from hardware, the bit is not meaningful for deferred
702 * errors and can be reused in this scenario.
704 status
|= MCI_STATUS_DEFERRED
| MCI_STATUS_POISON
;
708 flags
= cpu_x86_support_mca_broadcast(env
) ? MCE_INJECT_BROADCAST
: 0;
709 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
710 * guest kernel back into env->mcg_ext_ctl.
712 cpu_synchronize_state(cs
);
713 if (env
->mcg_ext_ctl
& MCG_EXT_CTL_LMCE_EN
) {
714 mcg_status
|= MCG_STATUS_LMCE
;
718 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
719 (MCM_ADDR_PHYS
<< 6) | 0xc, flags
);
722 static void emit_hypervisor_memory_failure(MemoryFailureAction action
, bool ar
)
724 MemoryFailureFlags mff
= {.action_required
= ar
, .recursive
= false};
726 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR
, action
,
730 static void hardware_memory_error(void *host_addr
)
732 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL
, true);
733 error_report("QEMU got Hardware memory error at addr %p", host_addr
);
737 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
739 X86CPU
*cpu
= X86_CPU(c
);
740 CPUX86State
*env
= &cpu
->env
;
744 /* If we get an action required MCE, it has been injected by KVM
745 * while the VM was running. An action optional MCE instead should
746 * be coming from the main thread, which qemu_init_sigbus identifies
747 * as the "early kill" thread.
749 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
751 if ((env
->mcg_cap
& MCG_SER_P
) && addr
) {
752 ram_addr
= qemu_ram_addr_from_host(addr
);
753 if (ram_addr
!= RAM_ADDR_INVALID
&&
754 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
755 kvm_hwpoison_page_add(ram_addr
);
756 kvm_mce_inject(cpu
, paddr
, code
);
759 * Use different logging severity based on error type.
760 * If there is additional MCE reporting on the hypervisor, QEMU VA
761 * could be another source to identify the PA and MCE details.
763 if (code
== BUS_MCEERR_AR
) {
764 error_report("Guest MCE Memory Error at QEMU addr %p and "
765 "GUEST addr 0x%" HWADDR_PRIx
" of type %s injected",
766 addr
, paddr
, "BUS_MCEERR_AR");
768 warn_report("Guest MCE Memory Error at QEMU addr %p and "
769 "GUEST addr 0x%" HWADDR_PRIx
" of type %s injected",
770 addr
, paddr
, "BUS_MCEERR_AO");
776 if (code
== BUS_MCEERR_AO
) {
777 warn_report("Hardware memory error at addr %p of type %s "
778 "for memory used by QEMU itself instead of guest system!",
779 addr
, "BUS_MCEERR_AO");
783 if (code
== BUS_MCEERR_AR
) {
784 hardware_memory_error(addr
);
787 /* Hope we are lucky for AO MCE, just notify a event */
788 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE
, false);
791 static void kvm_queue_exception(CPUX86State
*env
,
792 int32_t exception_nr
,
793 uint8_t exception_has_payload
,
794 uint64_t exception_payload
)
796 assert(env
->exception_nr
== -1);
797 assert(!env
->exception_pending
);
798 assert(!env
->exception_injected
);
799 assert(!env
->exception_has_payload
);
801 env
->exception_nr
= exception_nr
;
803 if (has_exception_payload
) {
804 env
->exception_pending
= 1;
806 env
->exception_has_payload
= exception_has_payload
;
807 env
->exception_payload
= exception_payload
;
809 env
->exception_injected
= 1;
811 if (exception_nr
== EXCP01_DB
) {
812 assert(exception_has_payload
);
813 env
->dr
[6] = exception_payload
;
814 } else if (exception_nr
== EXCP0E_PAGE
) {
815 assert(exception_has_payload
);
816 env
->cr
[2] = exception_payload
;
818 assert(!exception_has_payload
);
823 static void cpu_update_state(void *opaque
, bool running
, RunState state
)
825 CPUX86State
*env
= opaque
;
828 env
->tsc_valid
= false;
832 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
834 X86CPU
*cpu
= X86_CPU(cs
);
838 #ifndef KVM_CPUID_SIGNATURE_NEXT
839 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
842 static bool hyperv_enabled(X86CPU
*cpu
)
844 return kvm_check_extension(kvm_state
, KVM_CAP_HYPERV
) > 0 &&
845 ((cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_NOTIFY
) ||
846 cpu
->hyperv_features
|| cpu
->hyperv_passthrough
);
850 * Check whether target_freq is within conservative
851 * ntp correctable bounds (250ppm) of freq
853 static inline bool freq_within_bounds(int freq
, int target_freq
)
855 int max_freq
= freq
+ (freq
* 250 / 1000000);
856 int min_freq
= freq
- (freq
* 250 / 1000000);
858 if (target_freq
>= min_freq
&& target_freq
<= max_freq
) {
865 static int kvm_arch_set_tsc_khz(CPUState
*cs
)
867 X86CPU
*cpu
= X86_CPU(cs
);
868 CPUX86State
*env
= &cpu
->env
;
870 bool set_ioctl
= false;
876 cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
877 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) : -ENOTSUP
;
880 * If TSC scaling is supported, attempt to set TSC frequency.
882 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
)) {
887 * If desired TSC frequency is within bounds of NTP correction,
888 * attempt to set TSC frequency.
890 if (cur_freq
!= -ENOTSUP
&& freq_within_bounds(cur_freq
, env
->tsc_khz
)) {
895 kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
) :
899 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
900 * TSC frequency doesn't match the one we want.
902 cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
903 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
905 if (cur_freq
<= 0 || cur_freq
!= env
->tsc_khz
) {
906 warn_report("TSC frequency mismatch between "
907 "VM (%" PRId64
" kHz) and host (%d kHz), "
908 "and TSC scaling unavailable",
909 env
->tsc_khz
, cur_freq
);
917 static bool tsc_is_stable_and_known(CPUX86State
*env
)
922 return (env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
)
923 || env
->user_tsc_khz
;
926 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
935 uint64_t dependencies
;
936 } kvm_hyperv_properties
[] = {
937 [HYPERV_FEAT_RELAXED
] = {
938 .desc
= "relaxed timing (hv-relaxed)",
940 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
941 .bits
= HV_RELAXED_TIMING_RECOMMENDED
}
944 [HYPERV_FEAT_VAPIC
] = {
945 .desc
= "virtual APIC (hv-vapic)",
947 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
948 .bits
= HV_APIC_ACCESS_AVAILABLE
}
951 [HYPERV_FEAT_TIME
] = {
952 .desc
= "clocksources (hv-time)",
954 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
955 .bits
= HV_TIME_REF_COUNT_AVAILABLE
| HV_REFERENCE_TSC_AVAILABLE
}
958 [HYPERV_FEAT_CRASH
] = {
959 .desc
= "crash MSRs (hv-crash)",
961 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
962 .bits
= HV_GUEST_CRASH_MSR_AVAILABLE
}
965 [HYPERV_FEAT_RESET
] = {
966 .desc
= "reset MSR (hv-reset)",
968 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
969 .bits
= HV_RESET_AVAILABLE
}
972 [HYPERV_FEAT_VPINDEX
] = {
973 .desc
= "VP_INDEX MSR (hv-vpindex)",
975 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
976 .bits
= HV_VP_INDEX_AVAILABLE
}
979 [HYPERV_FEAT_RUNTIME
] = {
980 .desc
= "VP_RUNTIME MSR (hv-runtime)",
982 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
983 .bits
= HV_VP_RUNTIME_AVAILABLE
}
986 [HYPERV_FEAT_SYNIC
] = {
987 .desc
= "synthetic interrupt controller (hv-synic)",
989 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
990 .bits
= HV_SYNIC_AVAILABLE
}
993 [HYPERV_FEAT_STIMER
] = {
994 .desc
= "synthetic timers (hv-stimer)",
996 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
997 .bits
= HV_SYNTIMERS_AVAILABLE
}
999 .dependencies
= BIT(HYPERV_FEAT_SYNIC
) | BIT(HYPERV_FEAT_TIME
)
1001 [HYPERV_FEAT_FREQUENCIES
] = {
1002 .desc
= "frequency MSRs (hv-frequencies)",
1004 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
1005 .bits
= HV_ACCESS_FREQUENCY_MSRS
},
1006 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1007 .bits
= HV_FREQUENCY_MSRS_AVAILABLE
}
1010 [HYPERV_FEAT_REENLIGHTENMENT
] = {
1011 .desc
= "reenlightenment MSRs (hv-reenlightenment)",
1013 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
1014 .bits
= HV_ACCESS_REENLIGHTENMENTS_CONTROL
}
1017 [HYPERV_FEAT_TLBFLUSH
] = {
1018 .desc
= "paravirtualized TLB flush (hv-tlbflush)",
1020 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
1021 .bits
= HV_REMOTE_TLB_FLUSH_RECOMMENDED
|
1022 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
1024 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
1026 [HYPERV_FEAT_EVMCS
] = {
1027 .desc
= "enlightened VMCS (hv-evmcs)",
1029 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
1030 .bits
= HV_ENLIGHTENED_VMCS_RECOMMENDED
}
1032 .dependencies
= BIT(HYPERV_FEAT_VAPIC
)
1034 [HYPERV_FEAT_IPI
] = {
1035 .desc
= "paravirtualized IPI (hv-ipi)",
1037 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
1038 .bits
= HV_CLUSTER_IPI_RECOMMENDED
|
1039 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
1041 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
1043 [HYPERV_FEAT_STIMER_DIRECT
] = {
1044 .desc
= "direct mode synthetic timers (hv-stimer-direct)",
1046 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1047 .bits
= HV_STIMER_DIRECT_MODE_AVAILABLE
}
1049 .dependencies
= BIT(HYPERV_FEAT_STIMER
)
1051 [HYPERV_FEAT_AVIC
] = {
1052 .desc
= "AVIC/APICv support (hv-avic/hv-apicv)",
1054 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
1055 .bits
= HV_DEPRECATING_AEOI_RECOMMENDED
}
1058 #ifdef CONFIG_SYNDBG
1059 [HYPERV_FEAT_SYNDBG
] = {
1060 .desc
= "Enable synthetic kernel debugger channel (hv-syndbg)",
1062 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1063 .bits
= HV_FEATURE_DEBUG_MSRS_AVAILABLE
}
1065 .dependencies
= BIT(HYPERV_FEAT_SYNIC
) | BIT(HYPERV_FEAT_RELAXED
)
1068 [HYPERV_FEAT_MSR_BITMAP
] = {
1069 .desc
= "enlightened MSR-Bitmap (hv-emsr-bitmap)",
1071 {.func
= HV_CPUID_NESTED_FEATURES
, .reg
= R_EAX
,
1072 .bits
= HV_NESTED_MSR_BITMAP
}
1075 [HYPERV_FEAT_XMM_INPUT
] = {
1076 .desc
= "XMM fast hypercall input (hv-xmm-input)",
1078 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1079 .bits
= HV_HYPERCALL_XMM_INPUT_AVAILABLE
}
1082 [HYPERV_FEAT_TLBFLUSH_EXT
] = {
1083 .desc
= "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
1085 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1086 .bits
= HV_EXT_GVA_RANGES_FLUSH_AVAILABLE
}
1088 .dependencies
= BIT(HYPERV_FEAT_TLBFLUSH
)
1090 [HYPERV_FEAT_TLBFLUSH_DIRECT
] = {
1091 .desc
= "direct TLB flush (hv-tlbflush-direct)",
1093 {.func
= HV_CPUID_NESTED_FEATURES
, .reg
= R_EAX
,
1094 .bits
= HV_NESTED_DIRECT_FLUSH
}
1096 .dependencies
= BIT(HYPERV_FEAT_VAPIC
)
1100 static struct kvm_cpuid2
*try_get_hv_cpuid(CPUState
*cs
, int max
,
1103 struct kvm_cpuid2
*cpuid
;
1106 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
1107 cpuid
= g_malloc0(size
);
1111 r
= kvm_ioctl(kvm_state
, KVM_GET_SUPPORTED_HV_CPUID
, cpuid
);
1113 r
= kvm_vcpu_ioctl(cs
, KVM_GET_SUPPORTED_HV_CPUID
, cpuid
);
1115 if (r
== 0 && cpuid
->nent
>= max
) {
1123 fprintf(stderr
, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
1132 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
1135 static struct kvm_cpuid2
*get_supported_hv_cpuid(CPUState
*cs
)
1137 struct kvm_cpuid2
*cpuid
;
1138 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
1144 kvm_check_extension(kvm_state
, KVM_CAP_SYS_HYPERV_CPUID
) > 0;
1147 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
1148 * unsupported, kvm_hyperv_expand_features() checks for that.
1150 assert(do_sys_ioctl
|| cs
->kvm_state
);
1153 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
1154 * -E2BIG, however, it doesn't report back the right size. Keep increasing
1155 * it and re-trying until we succeed.
1157 while ((cpuid
= try_get_hv_cpuid(cs
, max
, do_sys_ioctl
)) == NULL
) {
1162 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
1163 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
1164 * information early, just check for the capability and set the bit
1167 if (!do_sys_ioctl
&& kvm_check_extension(cs
->kvm_state
,
1168 KVM_CAP_HYPERV_ENLIGHTENED_VMCS
) > 0) {
1169 for (i
= 0; i
< cpuid
->nent
; i
++) {
1170 if (cpuid
->entries
[i
].function
== HV_CPUID_ENLIGHTMENT_INFO
) {
1171 cpuid
->entries
[i
].eax
|= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1180 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1181 * leaves from KVM_CAP_HYPERV* and present MSRs data.
1183 static struct kvm_cpuid2
*get_supported_hv_cpuid_legacy(CPUState
*cs
)
1185 X86CPU
*cpu
= X86_CPU(cs
);
1186 struct kvm_cpuid2
*cpuid
;
1187 struct kvm_cpuid_entry2
*entry_feat
, *entry_recomm
;
1189 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1190 cpuid
= g_malloc0(sizeof(*cpuid
) + 2 * sizeof(*cpuid
->entries
));
1193 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1194 entry_feat
= &cpuid
->entries
[0];
1195 entry_feat
->function
= HV_CPUID_FEATURES
;
1197 entry_recomm
= &cpuid
->entries
[1];
1198 entry_recomm
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1199 entry_recomm
->ebx
= cpu
->hyperv_spinlock_attempts
;
1201 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0) {
1202 entry_feat
->eax
|= HV_HYPERCALL_AVAILABLE
;
1203 entry_feat
->eax
|= HV_APIC_ACCESS_AVAILABLE
;
1204 entry_feat
->edx
|= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1205 entry_recomm
->eax
|= HV_RELAXED_TIMING_RECOMMENDED
;
1206 entry_recomm
->eax
|= HV_APIC_ACCESS_RECOMMENDED
;
1209 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) > 0) {
1210 entry_feat
->eax
|= HV_TIME_REF_COUNT_AVAILABLE
;
1211 entry_feat
->eax
|= HV_REFERENCE_TSC_AVAILABLE
;
1214 if (has_msr_hv_frequencies
) {
1215 entry_feat
->eax
|= HV_ACCESS_FREQUENCY_MSRS
;
1216 entry_feat
->edx
|= HV_FREQUENCY_MSRS_AVAILABLE
;
1219 if (has_msr_hv_crash
) {
1220 entry_feat
->edx
|= HV_GUEST_CRASH_MSR_AVAILABLE
;
1223 if (has_msr_hv_reenlightenment
) {
1224 entry_feat
->eax
|= HV_ACCESS_REENLIGHTENMENTS_CONTROL
;
1227 if (has_msr_hv_reset
) {
1228 entry_feat
->eax
|= HV_RESET_AVAILABLE
;
1231 if (has_msr_hv_vpindex
) {
1232 entry_feat
->eax
|= HV_VP_INDEX_AVAILABLE
;
1235 if (has_msr_hv_runtime
) {
1236 entry_feat
->eax
|= HV_VP_RUNTIME_AVAILABLE
;
1239 if (has_msr_hv_synic
) {
1240 unsigned int cap
= cpu
->hyperv_synic_kvm_only
?
1241 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1243 if (kvm_check_extension(cs
->kvm_state
, cap
) > 0) {
1244 entry_feat
->eax
|= HV_SYNIC_AVAILABLE
;
1248 if (has_msr_hv_stimer
) {
1249 entry_feat
->eax
|= HV_SYNTIMERS_AVAILABLE
;
1252 if (has_msr_hv_syndbg_options
) {
1253 entry_feat
->edx
|= HV_GUEST_DEBUGGING_AVAILABLE
;
1254 entry_feat
->edx
|= HV_FEATURE_DEBUG_MSRS_AVAILABLE
;
1255 entry_feat
->ebx
|= HV_PARTITION_DEBUGGING_ALLOWED
;
1258 if (kvm_check_extension(cs
->kvm_state
,
1259 KVM_CAP_HYPERV_TLBFLUSH
) > 0) {
1260 entry_recomm
->eax
|= HV_REMOTE_TLB_FLUSH_RECOMMENDED
;
1261 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
1264 if (kvm_check_extension(cs
->kvm_state
,
1265 KVM_CAP_HYPERV_ENLIGHTENED_VMCS
) > 0) {
1266 entry_recomm
->eax
|= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1269 if (kvm_check_extension(cs
->kvm_state
,
1270 KVM_CAP_HYPERV_SEND_IPI
) > 0) {
1271 entry_recomm
->eax
|= HV_CLUSTER_IPI_RECOMMENDED
;
1272 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
1278 static uint32_t hv_cpuid_get_host(CPUState
*cs
, uint32_t func
, int reg
)
1280 struct kvm_cpuid_entry2
*entry
;
1281 struct kvm_cpuid2
*cpuid
;
1283 if (hv_cpuid_cache
) {
1284 cpuid
= hv_cpuid_cache
;
1286 if (kvm_check_extension(kvm_state
, KVM_CAP_HYPERV_CPUID
) > 0) {
1287 cpuid
= get_supported_hv_cpuid(cs
);
1290 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1291 * before KVM context is created but this is only done when
1292 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1293 * KVM_CAP_HYPERV_CPUID.
1295 assert(cs
->kvm_state
);
1297 cpuid
= get_supported_hv_cpuid_legacy(cs
);
1299 hv_cpuid_cache
= cpuid
;
1306 entry
= cpuid_find_entry(cpuid
, func
, 0);
1311 return cpuid_entry_get_reg(entry
, reg
);
1314 static bool hyperv_feature_supported(CPUState
*cs
, int feature
)
1316 uint32_t func
, bits
;
1319 for (i
= 0; i
< ARRAY_SIZE(kvm_hyperv_properties
[feature
].flags
); i
++) {
1321 func
= kvm_hyperv_properties
[feature
].flags
[i
].func
;
1322 reg
= kvm_hyperv_properties
[feature
].flags
[i
].reg
;
1323 bits
= kvm_hyperv_properties
[feature
].flags
[i
].bits
;
1329 if ((hv_cpuid_get_host(cs
, func
, reg
) & bits
) != bits
) {
1337 /* Checks that all feature dependencies are enabled */
1338 static bool hv_feature_check_deps(X86CPU
*cpu
, int feature
, Error
**errp
)
1343 deps
= kvm_hyperv_properties
[feature
].dependencies
;
1345 dep_feat
= ctz64(deps
);
1346 if (!(hyperv_feat_enabled(cpu
, dep_feat
))) {
1347 error_setg(errp
, "Hyper-V %s requires Hyper-V %s",
1348 kvm_hyperv_properties
[feature
].desc
,
1349 kvm_hyperv_properties
[dep_feat
].desc
);
1352 deps
&= ~(1ull << dep_feat
);
1358 static uint32_t hv_build_cpuid_leaf(CPUState
*cs
, uint32_t func
, int reg
)
1360 X86CPU
*cpu
= X86_CPU(cs
);
1364 for (i
= 0; i
< ARRAY_SIZE(kvm_hyperv_properties
); i
++) {
1365 if (!hyperv_feat_enabled(cpu
, i
)) {
1369 for (j
= 0; j
< ARRAY_SIZE(kvm_hyperv_properties
[i
].flags
); j
++) {
1370 if (kvm_hyperv_properties
[i
].flags
[j
].func
!= func
) {
1373 if (kvm_hyperv_properties
[i
].flags
[j
].reg
!= reg
) {
1377 r
|= kvm_hyperv_properties
[i
].flags
[j
].bits
;
1381 /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
1382 if (func
== HV_CPUID_NESTED_FEATURES
&& reg
== R_EAX
) {
1383 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
)) {
1384 r
|= DEFAULT_EVMCS_VERSION
;
1392 * Expand Hyper-V CPU features. In partucular, check that all the requested
1393 * features are supported by the host and the sanity of the configuration
1394 * (that all the required dependencies are included). Also, this takes care
1395 * of 'hv_passthrough' mode and fills the environment with all supported
1398 bool kvm_hyperv_expand_features(X86CPU
*cpu
, Error
**errp
)
1400 CPUState
*cs
= CPU(cpu
);
1401 Error
*local_err
= NULL
;
1404 if (!hyperv_enabled(cpu
))
1408 * When kvm_hyperv_expand_features is called at CPU feature expansion
1409 * time per-CPU kvm_state is not available yet so we can only proceed
1410 * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1412 if (!cs
->kvm_state
&&
1413 !kvm_check_extension(kvm_state
, KVM_CAP_SYS_HYPERV_CPUID
))
1416 if (cpu
->hyperv_passthrough
) {
1417 cpu
->hyperv_vendor_id
[0] =
1418 hv_cpuid_get_host(cs
, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
, R_EBX
);
1419 cpu
->hyperv_vendor_id
[1] =
1420 hv_cpuid_get_host(cs
, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
, R_ECX
);
1421 cpu
->hyperv_vendor_id
[2] =
1422 hv_cpuid_get_host(cs
, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
, R_EDX
);
1423 cpu
->hyperv_vendor
= g_realloc(cpu
->hyperv_vendor
,
1424 sizeof(cpu
->hyperv_vendor_id
) + 1);
1425 memcpy(cpu
->hyperv_vendor
, cpu
->hyperv_vendor_id
,
1426 sizeof(cpu
->hyperv_vendor_id
));
1427 cpu
->hyperv_vendor
[sizeof(cpu
->hyperv_vendor_id
)] = 0;
1429 cpu
->hyperv_interface_id
[0] =
1430 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_EAX
);
1431 cpu
->hyperv_interface_id
[1] =
1432 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_EBX
);
1433 cpu
->hyperv_interface_id
[2] =
1434 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_ECX
);
1435 cpu
->hyperv_interface_id
[3] =
1436 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_EDX
);
1438 cpu
->hyperv_ver_id_build
=
1439 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EAX
);
1440 cpu
->hyperv_ver_id_major
=
1441 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EBX
) >> 16;
1442 cpu
->hyperv_ver_id_minor
=
1443 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EBX
) & 0xffff;
1444 cpu
->hyperv_ver_id_sp
=
1445 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_ECX
);
1446 cpu
->hyperv_ver_id_sb
=
1447 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EDX
) >> 24;
1448 cpu
->hyperv_ver_id_sn
=
1449 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EDX
) & 0xffffff;
1451 cpu
->hv_max_vps
= hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
,
1453 cpu
->hyperv_limits
[0] =
1454 hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
, R_EBX
);
1455 cpu
->hyperv_limits
[1] =
1456 hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
, R_ECX
);
1457 cpu
->hyperv_limits
[2] =
1458 hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
, R_EDX
);
1460 cpu
->hyperv_spinlock_attempts
=
1461 hv_cpuid_get_host(cs
, HV_CPUID_ENLIGHTMENT_INFO
, R_EBX
);
1464 * Mark feature as enabled in 'cpu->hyperv_features' as
1465 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1467 for (feat
= 0; feat
< ARRAY_SIZE(kvm_hyperv_properties
); feat
++) {
1468 if (hyperv_feature_supported(cs
, feat
)) {
1469 cpu
->hyperv_features
|= BIT(feat
);
1473 /* Check features availability and dependencies */
1474 for (feat
= 0; feat
< ARRAY_SIZE(kvm_hyperv_properties
); feat
++) {
1475 /* If the feature was not requested skip it. */
1476 if (!hyperv_feat_enabled(cpu
, feat
)) {
1480 /* Check if the feature is supported by KVM */
1481 if (!hyperv_feature_supported(cs
, feat
)) {
1482 error_setg(errp
, "Hyper-V %s is not supported by kernel",
1483 kvm_hyperv_properties
[feat
].desc
);
1487 /* Check dependencies */
1488 if (!hv_feature_check_deps(cpu
, feat
, &local_err
)) {
1489 error_propagate(errp
, local_err
);
1495 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1496 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
) &&
1497 !cpu
->hyperv_synic_kvm_only
&&
1498 !hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)) {
1499 error_setg(errp
, "Hyper-V %s requires Hyper-V %s",
1500 kvm_hyperv_properties
[HYPERV_FEAT_SYNIC
].desc
,
1501 kvm_hyperv_properties
[HYPERV_FEAT_VPINDEX
].desc
);
1509 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1511 static int hyperv_fill_cpuids(CPUState
*cs
,
1512 struct kvm_cpuid_entry2
*cpuid_ent
)
1514 X86CPU
*cpu
= X86_CPU(cs
);
1515 struct kvm_cpuid_entry2
*c
;
1516 uint32_t signature
[3];
1517 uint32_t cpuid_i
= 0, max_cpuid_leaf
= 0;
1518 uint32_t nested_eax
=
1519 hv_build_cpuid_leaf(cs
, HV_CPUID_NESTED_FEATURES
, R_EAX
);
1521 max_cpuid_leaf
= nested_eax
? HV_CPUID_NESTED_FEATURES
:
1522 HV_CPUID_IMPLEMENT_LIMITS
;
1524 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNDBG
)) {
1526 MAX(max_cpuid_leaf
, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
);
1529 c
= &cpuid_ent
[cpuid_i
++];
1530 c
->function
= HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
1531 c
->eax
= max_cpuid_leaf
;
1532 c
->ebx
= cpu
->hyperv_vendor_id
[0];
1533 c
->ecx
= cpu
->hyperv_vendor_id
[1];
1534 c
->edx
= cpu
->hyperv_vendor_id
[2];
1536 c
= &cpuid_ent
[cpuid_i
++];
1537 c
->function
= HV_CPUID_INTERFACE
;
1538 c
->eax
= cpu
->hyperv_interface_id
[0];
1539 c
->ebx
= cpu
->hyperv_interface_id
[1];
1540 c
->ecx
= cpu
->hyperv_interface_id
[2];
1541 c
->edx
= cpu
->hyperv_interface_id
[3];
1543 c
= &cpuid_ent
[cpuid_i
++];
1544 c
->function
= HV_CPUID_VERSION
;
1545 c
->eax
= cpu
->hyperv_ver_id_build
;
1546 c
->ebx
= (uint32_t)cpu
->hyperv_ver_id_major
<< 16 |
1547 cpu
->hyperv_ver_id_minor
;
1548 c
->ecx
= cpu
->hyperv_ver_id_sp
;
1549 c
->edx
= (uint32_t)cpu
->hyperv_ver_id_sb
<< 24 |
1550 (cpu
->hyperv_ver_id_sn
& 0xffffff);
1552 c
= &cpuid_ent
[cpuid_i
++];
1553 c
->function
= HV_CPUID_FEATURES
;
1554 c
->eax
= hv_build_cpuid_leaf(cs
, HV_CPUID_FEATURES
, R_EAX
);
1555 c
->ebx
= hv_build_cpuid_leaf(cs
, HV_CPUID_FEATURES
, R_EBX
);
1556 c
->edx
= hv_build_cpuid_leaf(cs
, HV_CPUID_FEATURES
, R_EDX
);
1558 /* Unconditionally required with any Hyper-V enlightenment */
1559 c
->eax
|= HV_HYPERCALL_AVAILABLE
;
1561 /* SynIC and Vmbus devices require messages/signals hypercalls */
1562 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
) &&
1563 !cpu
->hyperv_synic_kvm_only
) {
1564 c
->ebx
|= HV_POST_MESSAGES
| HV_SIGNAL_EVENTS
;
1568 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1569 c
->edx
|= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1571 c
= &cpuid_ent
[cpuid_i
++];
1572 c
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1573 c
->eax
= hv_build_cpuid_leaf(cs
, HV_CPUID_ENLIGHTMENT_INFO
, R_EAX
);
1574 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1576 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
) &&
1577 !hyperv_feat_enabled(cpu
, HYPERV_FEAT_AVIC
)) {
1578 c
->eax
|= HV_APIC_ACCESS_RECOMMENDED
;
1581 if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_ON
) {
1582 c
->eax
|= HV_NO_NONARCH_CORESHARING
;
1583 } else if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_AUTO
) {
1584 c
->eax
|= hv_cpuid_get_host(cs
, HV_CPUID_ENLIGHTMENT_INFO
, R_EAX
) &
1585 HV_NO_NONARCH_CORESHARING
;
1588 c
= &cpuid_ent
[cpuid_i
++];
1589 c
->function
= HV_CPUID_IMPLEMENT_LIMITS
;
1590 c
->eax
= cpu
->hv_max_vps
;
1591 c
->ebx
= cpu
->hyperv_limits
[0];
1592 c
->ecx
= cpu
->hyperv_limits
[1];
1593 c
->edx
= cpu
->hyperv_limits
[2];
1598 /* Create zeroed 0x40000006..0x40000009 leaves */
1599 for (function
= HV_CPUID_IMPLEMENT_LIMITS
+ 1;
1600 function
< HV_CPUID_NESTED_FEATURES
; function
++) {
1601 c
= &cpuid_ent
[cpuid_i
++];
1602 c
->function
= function
;
1605 c
= &cpuid_ent
[cpuid_i
++];
1606 c
->function
= HV_CPUID_NESTED_FEATURES
;
1607 c
->eax
= nested_eax
;
1610 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNDBG
)) {
1611 c
= &cpuid_ent
[cpuid_i
++];
1612 c
->function
= HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS
;
1613 c
->eax
= hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) ?
1614 HV_CPUID_NESTED_FEATURES
: HV_CPUID_IMPLEMENT_LIMITS
;
1615 memcpy(signature
, "Microsoft VS", 12);
1617 c
->ebx
= signature
[0];
1618 c
->ecx
= signature
[1];
1619 c
->edx
= signature
[2];
1621 c
= &cpuid_ent
[cpuid_i
++];
1622 c
->function
= HV_CPUID_SYNDBG_INTERFACE
;
1623 memcpy(signature
, "VS#1\0\0\0\0\0\0\0\0", 12);
1624 c
->eax
= signature
[0];
1629 c
= &cpuid_ent
[cpuid_i
++];
1630 c
->function
= HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
;
1631 c
->eax
= HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING
;
1640 static Error
*hv_passthrough_mig_blocker
;
1641 static Error
*hv_no_nonarch_cs_mig_blocker
;
1643 /* Checks that the exposed eVMCS version range is supported by KVM */
1644 static bool evmcs_version_supported(uint16_t evmcs_version
,
1645 uint16_t supported_evmcs_version
)
1647 uint8_t min_version
= evmcs_version
& 0xff;
1648 uint8_t max_version
= evmcs_version
>> 8;
1649 uint8_t min_supported_version
= supported_evmcs_version
& 0xff;
1650 uint8_t max_supported_version
= supported_evmcs_version
>> 8;
1652 return (min_version
>= min_supported_version
) &&
1653 (max_version
<= max_supported_version
);
1656 static int hyperv_init_vcpu(X86CPU
*cpu
)
1658 CPUState
*cs
= CPU(cpu
);
1659 Error
*local_err
= NULL
;
1662 if (cpu
->hyperv_passthrough
&& hv_passthrough_mig_blocker
== NULL
) {
1663 error_setg(&hv_passthrough_mig_blocker
,
1664 "'hv-passthrough' CPU flag prevents migration, use explicit"
1665 " set of hv-* flags instead");
1666 ret
= migrate_add_blocker(&hv_passthrough_mig_blocker
, &local_err
);
1668 error_report_err(local_err
);
1673 if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_AUTO
&&
1674 hv_no_nonarch_cs_mig_blocker
== NULL
) {
1675 error_setg(&hv_no_nonarch_cs_mig_blocker
,
1676 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1677 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1678 " make sure SMT is disabled and/or that vCPUs are properly"
1680 ret
= migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker
, &local_err
);
1682 error_report_err(local_err
);
1687 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
) && !hv_vpindex_settable
) {
1689 * the kernel doesn't support setting vp_index; assert that its value
1694 ret
= kvm_get_one_msr(cpu
, HV_X64_MSR_VP_INDEX
, &value
);
1699 if (value
!= hyperv_vp_index(CPU(cpu
))) {
1700 error_report("kernel's vp_index != QEMU's vp_index");
1705 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
1706 uint32_t synic_cap
= cpu
->hyperv_synic_kvm_only
?
1707 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1708 ret
= kvm_vcpu_enable_cap(cs
, synic_cap
, 0);
1710 error_report("failed to turn on HyperV SynIC in KVM: %s",
1715 if (!cpu
->hyperv_synic_kvm_only
) {
1716 ret
= hyperv_x86_synic_add(cpu
);
1718 error_report("failed to create HyperV SynIC: %s",
1725 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
)) {
1726 uint16_t evmcs_version
= DEFAULT_EVMCS_VERSION
;
1727 uint16_t supported_evmcs_version
;
1729 ret
= kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENLIGHTENED_VMCS
, 0,
1730 (uintptr_t)&supported_evmcs_version
);
1733 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1734 * option sets. Note: we hardcode the maximum supported eVMCS version
1735 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1736 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1740 error_report("Hyper-V %s is not supported by kernel",
1741 kvm_hyperv_properties
[HYPERV_FEAT_EVMCS
].desc
);
1745 if (!evmcs_version_supported(evmcs_version
, supported_evmcs_version
)) {
1746 error_report("eVMCS version range [%d..%d] is not supported by "
1747 "kernel (supported: [%d..%d])", evmcs_version
& 0xff,
1748 evmcs_version
>> 8, supported_evmcs_version
& 0xff,
1749 supported_evmcs_version
>> 8);
1754 if (cpu
->hyperv_enforce_cpuid
) {
1755 ret
= kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENFORCE_CPUID
, 0, 1);
1757 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
1763 /* Skip SynIC and VP_INDEX since they are hard deps already */
1764 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_STIMER
) &&
1765 hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
) &&
1766 hyperv_feat_enabled(cpu
, HYPERV_FEAT_RUNTIME
)) {
1767 hyperv_x86_set_vmbus_recommended_features_enabled();
1773 static Error
*invtsc_mig_blocker
;
1775 #define KVM_MAX_CPUID_ENTRIES 100
1777 static void kvm_init_xsave(CPUX86State
*env
)
1780 env
->xsave_buf_len
= QEMU_ALIGN_UP(has_xsave2
, 4096);
1782 env
->xsave_buf_len
= sizeof(struct kvm_xsave
);
1785 env
->xsave_buf
= qemu_memalign(4096, env
->xsave_buf_len
);
1786 memset(env
->xsave_buf
, 0, env
->xsave_buf_len
);
1788 * The allocated storage must be large enough for all of the
1789 * possible XSAVE state components.
1791 assert(kvm_arch_get_supported_cpuid(kvm_state
, 0xd, 0, R_ECX
) <=
1792 env
->xsave_buf_len
);
1795 static void kvm_init_nested_state(CPUX86State
*env
)
1797 struct kvm_vmx_nested_state_hdr
*vmx_hdr
;
1800 if (!env
->nested_state
) {
1804 size
= env
->nested_state
->size
;
1806 memset(env
->nested_state
, 0, size
);
1807 env
->nested_state
->size
= size
;
1809 if (cpu_has_vmx(env
)) {
1810 env
->nested_state
->format
= KVM_STATE_NESTED_FORMAT_VMX
;
1811 vmx_hdr
= &env
->nested_state
->hdr
.vmx
;
1812 vmx_hdr
->vmxon_pa
= -1ull;
1813 vmx_hdr
->vmcs12_pa
= -1ull;
1814 } else if (cpu_has_svm(env
)) {
1815 env
->nested_state
->format
= KVM_STATE_NESTED_FORMAT_SVM
;
1819 static uint32_t kvm_x86_build_cpuid(CPUX86State
*env
,
1820 struct kvm_cpuid_entry2
*entries
,
1823 uint32_t limit
, i
, j
;
1825 struct kvm_cpuid_entry2
*c
;
1827 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
1829 for (i
= 0; i
<= limit
; i
++) {
1831 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1834 c
= &entries
[cpuid_i
++];
1837 /* Keep reading function 2 till all the input is received */
1841 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
1842 KVM_CPUID_FLAG_STATE_READ_NEXT
;
1843 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1844 times
= c
->eax
& 0xff;
1846 for (j
= 1; j
< times
; ++j
) {
1847 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1850 c
= &entries
[cpuid_i
++];
1852 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
1853 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1858 if (!x86_has_extended_topo(env
->avail_cpu_topo
)) {
1866 for (j
= 0; ; j
++) {
1867 if (i
== 0xd && j
== 64) {
1872 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1874 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1876 if (i
== 4 && c
->eax
== 0) {
1879 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
1882 if (i
== 0x1f && !(c
->ecx
& 0xff00)) {
1885 if (i
== 0xd && c
->eax
== 0) {
1888 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1891 c
= &entries
[cpuid_i
++];
1895 for (j
= 0; ; j
++) {
1897 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1899 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1901 if (j
> 1 && (c
->eax
& 0xf) != 1) {
1905 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1908 c
= &entries
[cpuid_i
++];
1919 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1920 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1923 for (j
= 1; j
<= times
; ++j
) {
1924 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1927 c
= &entries
[cpuid_i
++];
1930 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1931 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1938 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1939 if (!c
->eax
&& !c
->ebx
&& !c
->ecx
&& !c
->edx
) {
1941 * KVM already returns all zeroes if a CPUID entry is missing,
1942 * so we can omit it and avoid hitting KVM's 80-entry limit.
1950 if (limit
>= 0x0a) {
1953 cpu_x86_cpuid(env
, 0x0a, 0, &eax
, &unused
, &unused
, &edx
);
1955 has_architectural_pmu_version
= eax
& 0xff;
1956 if (has_architectural_pmu_version
> 0) {
1957 num_architectural_pmu_gp_counters
= (eax
& 0xff00) >> 8;
1959 /* Shouldn't be more than 32, since that's the number of bits
1960 * available in EBX to tell us _which_ counters are available.
1963 if (num_architectural_pmu_gp_counters
> MAX_GP_COUNTERS
) {
1964 num_architectural_pmu_gp_counters
= MAX_GP_COUNTERS
;
1967 if (has_architectural_pmu_version
> 1) {
1968 num_architectural_pmu_fixed_counters
= edx
& 0x1f;
1970 if (num_architectural_pmu_fixed_counters
> MAX_FIXED_COUNTERS
) {
1971 num_architectural_pmu_fixed_counters
= MAX_FIXED_COUNTERS
;
1977 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
1979 for (i
= 0x80000000; i
<= limit
; i
++) {
1981 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1984 c
= &entries
[cpuid_i
++];
1988 /* Query for all AMD cache information leaves */
1989 for (j
= 0; ; j
++) {
1991 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1993 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1998 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2001 c
= &entries
[cpuid_i
++];
2007 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2008 if (!c
->eax
&& !c
->ebx
&& !c
->ecx
&& !c
->edx
) {
2010 * KVM already returns all zeroes if a CPUID entry is missing,
2011 * so we can omit it and avoid hitting KVM's 80-entry limit.
2019 /* Call Centaur's CPUID instructions they are supported. */
2020 if (env
->cpuid_xlevel2
> 0) {
2021 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
2023 for (i
= 0xC0000000; i
<= limit
; i
++) {
2025 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2028 c
= &entries
[cpuid_i
++];
2032 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2039 fprintf(stderr
, "cpuid_data is full, no space for "
2040 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
2044 int kvm_arch_init_vcpu(CPUState
*cs
)
2047 struct kvm_cpuid2 cpuid
;
2048 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
2051 * The kernel defines these structs with padding fields so there
2052 * should be no extra padding in our cpuid_data struct.
2054 QEMU_BUILD_BUG_ON(sizeof(cpuid_data
) !=
2055 sizeof(struct kvm_cpuid2
) +
2056 sizeof(struct kvm_cpuid_entry2
) * KVM_MAX_CPUID_ENTRIES
);
2058 X86CPU
*cpu
= X86_CPU(cs
);
2059 CPUX86State
*env
= &cpu
->env
;
2061 struct kvm_cpuid_entry2
*c
;
2062 uint32_t signature
[3];
2063 int kvm_base
= KVM_CPUID_SIGNATURE
;
2064 int max_nested_state_len
;
2066 Error
*local_err
= NULL
;
2068 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
2072 has_xsave2
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_XSAVE2
);
2074 r
= kvm_arch_set_tsc_khz(cs
);
2079 /* vcpu's TSC frequency is either specified by user, or following
2080 * the value used by KVM if the former is not present. In the
2081 * latter case, we query it from KVM and record in env->tsc_khz,
2082 * so that vcpu's TSC frequency can be migrated later via this field.
2084 if (!env
->tsc_khz
) {
2085 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
2086 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
2093 env
->apic_bus_freq
= KVM_APIC_BUS_FREQUENCY
;
2096 * kvm_hyperv_expand_features() is called here for the second time in case
2097 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
2098 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
2099 * check which Hyper-V enlightenments are supported and which are not, we
2100 * can still proceed and check/expand Hyper-V enlightenments here so legacy
2101 * behavior is preserved.
2103 if (!kvm_hyperv_expand_features(cpu
, &local_err
)) {
2104 error_report_err(local_err
);
2108 if (hyperv_enabled(cpu
)) {
2109 r
= hyperv_init_vcpu(cpu
);
2114 cpuid_i
= hyperv_fill_cpuids(cs
, cpuid_data
.entries
);
2115 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
2116 has_msr_hv_hypercall
= true;
2119 if (cs
->kvm_state
->xen_version
) {
2120 #ifdef CONFIG_XEN_EMU
2121 struct kvm_cpuid_entry2
*xen_max_leaf
;
2123 memcpy(signature
, "XenVMMXenVMM", 12);
2125 xen_max_leaf
= c
= &cpuid_data
.entries
[cpuid_i
++];
2126 c
->function
= kvm_base
+ XEN_CPUID_SIGNATURE
;
2127 c
->eax
= kvm_base
+ XEN_CPUID_TIME
;
2128 c
->ebx
= signature
[0];
2129 c
->ecx
= signature
[1];
2130 c
->edx
= signature
[2];
2132 c
= &cpuid_data
.entries
[cpuid_i
++];
2133 c
->function
= kvm_base
+ XEN_CPUID_VENDOR
;
2134 c
->eax
= cs
->kvm_state
->xen_version
;
2139 c
= &cpuid_data
.entries
[cpuid_i
++];
2140 c
->function
= kvm_base
+ XEN_CPUID_HVM_MSR
;
2141 /* Number of hypercall-transfer pages */
2143 /* Hypercall MSR base address */
2144 if (hyperv_enabled(cpu
)) {
2145 c
->ebx
= XEN_HYPERCALL_MSR_HYPERV
;
2146 kvm_xen_init(cs
->kvm_state
, c
->ebx
);
2148 c
->ebx
= XEN_HYPERCALL_MSR
;
2153 c
= &cpuid_data
.entries
[cpuid_i
++];
2154 c
->function
= kvm_base
+ XEN_CPUID_TIME
;
2155 c
->eax
= ((!!tsc_is_stable_and_known(env
) << 1) |
2156 (!!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
) << 2));
2157 /* default=0 (emulate if necessary) */
2159 /* guest tsc frequency */
2160 c
->ecx
= env
->user_tsc_khz
;
2161 /* guest tsc incarnation (migration count) */
2164 c
= &cpuid_data
.entries
[cpuid_i
++];
2165 c
->function
= kvm_base
+ XEN_CPUID_HVM
;
2166 xen_max_leaf
->eax
= kvm_base
+ XEN_CPUID_HVM
;
2167 if (cs
->kvm_state
->xen_version
>= XEN_VERSION(4, 5)) {
2168 c
->function
= kvm_base
+ XEN_CPUID_HVM
;
2170 if (cpu
->xen_vapic
) {
2171 c
->eax
|= XEN_HVM_CPUID_APIC_ACCESS_VIRT
;
2172 c
->eax
|= XEN_HVM_CPUID_X2APIC_VIRT
;
2175 c
->eax
|= XEN_HVM_CPUID_IOMMU_MAPPINGS
;
2177 if (cs
->kvm_state
->xen_version
>= XEN_VERSION(4, 6)) {
2178 c
->eax
|= XEN_HVM_CPUID_VCPU_ID_PRESENT
;
2179 c
->ebx
= cs
->cpu_index
;
2182 if (cs
->kvm_state
->xen_version
>= XEN_VERSION(4, 17)) {
2183 c
->eax
|= XEN_HVM_CPUID_UPCALL_VECTOR
;
2187 r
= kvm_xen_init_vcpu(cs
);
2193 #else /* CONFIG_XEN_EMU */
2194 /* This should never happen as kvm_arch_init() would have died first. */
2195 fprintf(stderr
, "Cannot enable Xen CPUID without Xen support\n");
2198 } else if (cpu
->expose_kvm
) {
2199 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
2200 c
= &cpuid_data
.entries
[cpuid_i
++];
2201 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
2202 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
2203 c
->ebx
= signature
[0];
2204 c
->ecx
= signature
[1];
2205 c
->edx
= signature
[2];
2207 c
= &cpuid_data
.entries
[cpuid_i
++];
2208 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
2209 c
->eax
= env
->features
[FEAT_KVM
];
2210 c
->edx
= env
->features
[FEAT_KVM_HINTS
];
2213 if (cpu
->kvm_pv_enforce_cpuid
) {
2214 r
= kvm_vcpu_enable_cap(cs
, KVM_CAP_ENFORCE_PV_FEATURE_CPUID
, 0, 1);
2217 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
2223 cpuid_i
= kvm_x86_build_cpuid(env
, cpuid_data
.entries
, cpuid_i
);
2224 cpuid_data
.cpuid
.nent
= cpuid_i
;
2226 if (((env
->cpuid_version
>> 8)&0xF) >= 6
2227 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
2228 (CPUID_MCE
| CPUID_MCA
)) {
2229 uint64_t mcg_cap
, unsupported_caps
;
2233 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
2235 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
2239 if (banks
< (env
->mcg_cap
& MCG_CAP_BANKS_MASK
)) {
2240 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2241 (int)(env
->mcg_cap
& MCG_CAP_BANKS_MASK
), banks
);
2245 unsupported_caps
= env
->mcg_cap
& ~(mcg_cap
| MCG_CAP_BANKS_MASK
);
2246 if (unsupported_caps
) {
2247 if (unsupported_caps
& MCG_LMCE_P
) {
2248 error_report("kvm: LMCE not supported");
2251 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64
,
2255 env
->mcg_cap
&= mcg_cap
| MCG_CAP_BANKS_MASK
;
2256 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &env
->mcg_cap
);
2258 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
2263 cpu
->vmsentry
= qemu_add_vm_change_state_handler(cpu_update_state
, env
);
2265 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
2267 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
2268 !!(c
->ecx
& CPUID_EXT_SMX
);
2271 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 7, 0);
2272 if (c
&& (c
->ebx
& CPUID_7_0_EBX_SGX
)) {
2273 has_msr_feature_control
= true;
2276 if (env
->mcg_cap
& MCG_LMCE_P
) {
2277 has_msr_mcg_ext_ctl
= has_msr_feature_control
= true;
2280 if (!env
->user_tsc_khz
) {
2281 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
2282 invtsc_mig_blocker
== NULL
) {
2283 error_setg(&invtsc_mig_blocker
,
2284 "State blocked by non-migratable CPU device"
2286 r
= migrate_add_blocker(&invtsc_mig_blocker
, &local_err
);
2288 error_report_err(local_err
);
2294 if (cpu
->vmware_cpuid_freq
2295 /* Guests depend on 0x40000000 to detect this feature, so only expose
2296 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
2298 && kvm_base
== KVM_CPUID_SIGNATURE
2299 /* TSC clock must be stable and known for this feature. */
2300 && tsc_is_stable_and_known(env
)) {
2302 c
= &cpuid_data
.entries
[cpuid_i
++];
2303 c
->function
= KVM_CPUID_SIGNATURE
| 0x10;
2304 c
->eax
= env
->tsc_khz
;
2305 c
->ebx
= env
->apic_bus_freq
/ 1000; /* Hz to KHz */
2306 c
->ecx
= c
->edx
= 0;
2308 c
= cpuid_find_entry(&cpuid_data
.cpuid
, kvm_base
, 0);
2309 c
->eax
= MAX(c
->eax
, KVM_CPUID_SIGNATURE
| 0x10);
2312 cpuid_data
.cpuid
.nent
= cpuid_i
;
2314 cpuid_data
.cpuid
.padding
= 0;
2315 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
2319 kvm_init_xsave(env
);
2321 max_nested_state_len
= kvm_max_nested_state_length();
2322 if (max_nested_state_len
> 0) {
2323 assert(max_nested_state_len
>= offsetof(struct kvm_nested_state
, data
));
2325 if (cpu_has_vmx(env
) || cpu_has_svm(env
)) {
2326 env
->nested_state
= g_malloc0(max_nested_state_len
);
2327 env
->nested_state
->size
= max_nested_state_len
;
2329 kvm_init_nested_state(env
);
2333 cpu
->kvm_msr_buf
= g_malloc0(MSR_BUF_SIZE
);
2335 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
)) {
2336 has_msr_tsc_aux
= false;
2344 migrate_del_blocker(&invtsc_mig_blocker
);
2349 int kvm_arch_destroy_vcpu(CPUState
*cs
)
2351 X86CPU
*cpu
= X86_CPU(cs
);
2352 CPUX86State
*env
= &cpu
->env
;
2354 g_free(env
->xsave_buf
);
2356 g_free(cpu
->kvm_msr_buf
);
2357 cpu
->kvm_msr_buf
= NULL
;
2359 g_free(env
->nested_state
);
2360 env
->nested_state
= NULL
;
2362 qemu_del_vm_change_state_handler(cpu
->vmsentry
);
2367 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
2369 CPUX86State
*env
= &cpu
->env
;
2372 if (kvm_irqchip_in_kernel()) {
2373 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
2374 KVM_MP_STATE_UNINITIALIZED
;
2376 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
2379 /* enabled by default */
2380 env
->poll_control_msr
= 1;
2382 kvm_init_nested_state(env
);
2384 sev_es_set_reset_vector(CPU(cpu
));
2387 void kvm_arch_after_reset_vcpu(X86CPU
*cpu
)
2389 CPUX86State
*env
= &cpu
->env
;
2393 * Reset SynIC after all other devices have been reset to let them remove
2394 * their SINT routes first.
2396 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
2397 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_synic_sint
); i
++) {
2398 env
->msr_hv_synic_sint
[i
] = HV_SINT_MASKED
;
2401 hyperv_x86_synic_reset(cpu
);
2405 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
2407 CPUX86State
*env
= &cpu
->env
;
2409 /* APs get directly into wait-for-SIPI state. */
2410 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
2411 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
2415 static int kvm_get_supported_feature_msrs(KVMState
*s
)
2419 if (kvm_feature_msrs
!= NULL
) {
2423 if (!kvm_check_extension(s
, KVM_CAP_GET_MSR_FEATURES
)) {
2427 struct kvm_msr_list msr_list
;
2430 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, &msr_list
);
2431 if (ret
< 0 && ret
!= -E2BIG
) {
2432 error_report("Fetch KVM feature MSR list failed: %s",
2437 assert(msr_list
.nmsrs
> 0);
2438 kvm_feature_msrs
= g_malloc0(sizeof(msr_list
) +
2439 msr_list
.nmsrs
* sizeof(msr_list
.indices
[0]));
2441 kvm_feature_msrs
->nmsrs
= msr_list
.nmsrs
;
2442 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, kvm_feature_msrs
);
2445 error_report("Fetch KVM feature MSR list failed: %s",
2447 g_free(kvm_feature_msrs
);
2448 kvm_feature_msrs
= NULL
;
2455 static int kvm_get_supported_msrs(KVMState
*s
)
2458 struct kvm_msr_list msr_list
, *kvm_msr_list
;
2461 * Obtain MSR list from KVM. These are the MSRs that we must
2465 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
2466 if (ret
< 0 && ret
!= -E2BIG
) {
2470 * Old kernel modules had a bug and could write beyond the provided
2471 * memory. Allocate at least a safe amount of 1K.
2473 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
2475 sizeof(msr_list
.indices
[0])));
2477 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
2478 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
2482 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
2483 switch (kvm_msr_list
->indices
[i
]) {
2485 has_msr_star
= true;
2487 case MSR_VM_HSAVE_PA
:
2488 has_msr_hsave_pa
= true;
2491 has_msr_tsc_aux
= true;
2493 case MSR_TSC_ADJUST
:
2494 has_msr_tsc_adjust
= true;
2496 case MSR_IA32_TSCDEADLINE
:
2497 has_msr_tsc_deadline
= true;
2499 case MSR_IA32_SMBASE
:
2500 has_msr_smbase
= true;
2503 has_msr_smi_count
= true;
2505 case MSR_IA32_MISC_ENABLE
:
2506 has_msr_misc_enable
= true;
2508 case MSR_IA32_BNDCFGS
:
2509 has_msr_bndcfgs
= true;
2514 case MSR_IA32_UMWAIT_CONTROL
:
2515 has_msr_umwait
= true;
2517 case HV_X64_MSR_CRASH_CTL
:
2518 has_msr_hv_crash
= true;
2520 case HV_X64_MSR_RESET
:
2521 has_msr_hv_reset
= true;
2523 case HV_X64_MSR_VP_INDEX
:
2524 has_msr_hv_vpindex
= true;
2526 case HV_X64_MSR_VP_RUNTIME
:
2527 has_msr_hv_runtime
= true;
2529 case HV_X64_MSR_SCONTROL
:
2530 has_msr_hv_synic
= true;
2532 case HV_X64_MSR_STIMER0_CONFIG
:
2533 has_msr_hv_stimer
= true;
2535 case HV_X64_MSR_TSC_FREQUENCY
:
2536 has_msr_hv_frequencies
= true;
2538 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
2539 has_msr_hv_reenlightenment
= true;
2541 case HV_X64_MSR_SYNDBG_OPTIONS
:
2542 has_msr_hv_syndbg_options
= true;
2544 case MSR_IA32_SPEC_CTRL
:
2545 has_msr_spec_ctrl
= true;
2547 case MSR_AMD64_TSC_RATIO
:
2548 has_tsc_scale_msr
= true;
2550 case MSR_IA32_TSX_CTRL
:
2551 has_msr_tsx_ctrl
= true;
2554 has_msr_virt_ssbd
= true;
2556 case MSR_IA32_ARCH_CAPABILITIES
:
2557 has_msr_arch_capabs
= true;
2559 case MSR_IA32_CORE_CAPABILITY
:
2560 has_msr_core_capabs
= true;
2562 case MSR_IA32_PERF_CAPABILITIES
:
2563 has_msr_perf_capabs
= true;
2565 case MSR_IA32_VMX_VMFUNC
:
2566 has_msr_vmx_vmfunc
= true;
2568 case MSR_IA32_UCODE_REV
:
2569 has_msr_ucode_rev
= true;
2571 case MSR_IA32_VMX_PROCBASED_CTLS2
:
2572 has_msr_vmx_procbased_ctls2
= true;
2575 has_msr_pkrs
= true;
2581 g_free(kvm_msr_list
);
2586 static bool kvm_rdmsr_core_thread_count(X86CPU
*cpu
,
2590 CPUState
*cs
= CPU(cpu
);
2592 *val
= cs
->nr_threads
* cs
->nr_cores
; /* thread count, bits 15..0 */
2593 *val
|= ((uint32_t)cs
->nr_cores
<< 16); /* core count, bits 31..16 */
2598 static bool kvm_rdmsr_rapl_power_unit(X86CPU
*cpu
,
2603 CPUState
*cs
= CPU(cpu
);
2605 *val
= cs
->kvm_state
->msr_energy
.msr_unit
;
2610 static bool kvm_rdmsr_pkg_power_limit(X86CPU
*cpu
,
2615 CPUState
*cs
= CPU(cpu
);
2617 *val
= cs
->kvm_state
->msr_energy
.msr_limit
;
2622 static bool kvm_rdmsr_pkg_power_info(X86CPU
*cpu
,
2627 CPUState
*cs
= CPU(cpu
);
2629 *val
= cs
->kvm_state
->msr_energy
.msr_info
;
2634 static bool kvm_rdmsr_pkg_energy_status(X86CPU
*cpu
,
2639 CPUState
*cs
= CPU(cpu
);
2640 *val
= cs
->kvm_state
->msr_energy
.msr_value
[cs
->cpu_index
];
2645 static Notifier smram_machine_done
;
2646 static KVMMemoryListener smram_listener
;
2647 static AddressSpace smram_address_space
;
2648 static MemoryRegion smram_as_root
;
2649 static MemoryRegion smram_as_mem
;
2651 static void register_smram_listener(Notifier
*n
, void *unused
)
2653 MemoryRegion
*smram
=
2654 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
2656 /* Outer container... */
2657 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
2658 memory_region_set_enabled(&smram_as_root
, true);
2660 /* ... with two regions inside: normal system memory with low
2663 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
2664 get_system_memory(), 0, ~0ull);
2665 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
2666 memory_region_set_enabled(&smram_as_mem
, true);
2669 /* ... SMRAM with higher priority */
2670 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
2671 memory_region_set_enabled(smram
, true);
2674 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
2675 kvm_memory_listener_register(kvm_state
, &smram_listener
,
2676 &smram_address_space
, 1, "kvm-smram");
2679 static void *kvm_msr_energy_thread(void *data
)
2682 struct KVMMsrEnergy
*vmsr
= &s
->msr_energy
;
2684 g_autofree vmsr_package_energy_stat
*pkg_stat
= NULL
;
2685 g_autofree vmsr_thread_stat
*thd_stat
= NULL
;
2686 g_autofree CPUState
*cpu
= NULL
;
2687 g_autofree
unsigned int *vpkgs_energy_stat
= NULL
;
2688 unsigned int num_threads
= 0;
2690 X86CPUTopoIDs topo_ids
;
2692 rcu_register_thread();
2694 /* Allocate memory for each package energy status */
2695 pkg_stat
= g_new0(vmsr_package_energy_stat
, vmsr
->host_topo
.maxpkgs
);
2697 /* Allocate memory for thread stats */
2698 thd_stat
= g_new0(vmsr_thread_stat
, 1);
2700 /* Allocate memory for holding virtual package energy counter */
2701 vpkgs_energy_stat
= g_new0(unsigned int, vmsr
->guest_vsockets
);
2703 /* Populate the max tick of each packages */
2704 for (int i
= 0; i
< vmsr
->host_topo
.maxpkgs
; i
++) {
2706 * Max numbers of ticks per package
2707 * Time in second * Number of ticks/second * Number of cores/package
2708 * ex: 100 ticks/second/CPU, 12 CPUs per Package gives 1200 ticks max
2710 vmsr
->host_topo
.maxticks
[i
] = (MSR_ENERGY_THREAD_SLEEP_US
/ 1000000)
2711 * sysconf(_SC_CLK_TCK
)
2712 * vmsr
->host_topo
.pkg_cpu_count
[i
];
2716 /* Get all qemu threads id */
2717 g_autofree pid_t
*thread_ids
2718 = vmsr_get_thread_ids(vmsr
->pid
, &num_threads
);
2720 if (thread_ids
== NULL
) {
2724 thd_stat
= g_renew(vmsr_thread_stat
, thd_stat
, num_threads
);
2725 /* Unlike g_new0, g_renew0 function doesn't exist yet... */
2726 memset(thd_stat
, 0, num_threads
* sizeof(vmsr_thread_stat
));
2728 /* Populate all the thread stats */
2729 for (int i
= 0; i
< num_threads
; i
++) {
2730 thd_stat
[i
].utime
= g_new0(unsigned long long, 2);
2731 thd_stat
[i
].stime
= g_new0(unsigned long long, 2);
2732 thd_stat
[i
].thread_id
= thread_ids
[i
];
2733 vmsr_read_thread_stat(vmsr
->pid
,
2734 thd_stat
[i
].thread_id
,
2735 &thd_stat
[i
].utime
[0],
2736 &thd_stat
[i
].stime
[0],
2737 &thd_stat
[i
].cpu_id
);
2738 thd_stat
[i
].pkg_id
=
2739 vmsr_get_physical_package_id(thd_stat
[i
].cpu_id
);
2742 /* Retrieve all packages power plane energy counter */
2743 for (int i
= 0; i
< vmsr
->host_topo
.maxpkgs
; i
++) {
2744 for (int j
= 0; j
< num_threads
; j
++) {
2746 * Use the first thread we found that ran on the CPU
2747 * of the package to read the packages energy counter
2749 if (thd_stat
[j
].pkg_id
== i
) {
2750 pkg_stat
[i
].e_start
=
2751 vmsr_read_msr(MSR_PKG_ENERGY_STATUS
,
2753 thd_stat
[j
].thread_id
,
2754 s
->msr_energy
.sioc
);
2760 /* Sleep a short period while the other threads are working */
2761 usleep(MSR_ENERGY_THREAD_SLEEP_US
);
2764 * Retrieve all packages power plane energy counter
2765 * Calculate the delta of all packages
2767 for (int i
= 0; i
< vmsr
->host_topo
.maxpkgs
; i
++) {
2768 for (int j
= 0; j
< num_threads
; j
++) {
2770 * Use the first thread we found that ran on the CPU
2771 * of the package to read the packages energy counter
2773 if (thd_stat
[j
].pkg_id
== i
) {
2775 vmsr_read_msr(MSR_PKG_ENERGY_STATUS
,
2777 thd_stat
[j
].thread_id
,
2778 s
->msr_energy
.sioc
);
2780 * Prevent the case we have migrate the VM
2781 * during the sleep period or any other cases
2782 * were energy counter might be lower after
2785 if (pkg_stat
[i
].e_end
> pkg_stat
[i
].e_start
) {
2786 pkg_stat
[i
].e_delta
=
2787 pkg_stat
[i
].e_end
- pkg_stat
[i
].e_start
;
2789 pkg_stat
[i
].e_delta
= 0;
2796 /* Delta of ticks spend by each thread between the sample */
2797 for (int i
= 0; i
< num_threads
; i
++) {
2798 vmsr_read_thread_stat(vmsr
->pid
,
2799 thd_stat
[i
].thread_id
,
2800 &thd_stat
[i
].utime
[1],
2801 &thd_stat
[i
].stime
[1],
2802 &thd_stat
[i
].cpu_id
);
2804 if (vmsr
->pid
< 0) {
2806 * We don't count the dead thread
2807 * i.e threads that existed before the sleep
2810 thd_stat
[i
].delta_ticks
= 0;
2812 vmsr_delta_ticks(thd_stat
, i
);
2817 * Identify the vcpu threads
2818 * Calculate the number of vcpu per package
2821 for (int i
= 0; i
< num_threads
; i
++) {
2822 if (cpu
->thread_id
== thd_stat
[i
].thread_id
) {
2823 thd_stat
[i
].is_vcpu
= true;
2824 thd_stat
[i
].vcpu_id
= cpu
->cpu_index
;
2825 pkg_stat
[thd_stat
[i
].pkg_id
].nb_vcpu
++;
2826 thd_stat
[i
].acpi_id
= kvm_arch_vcpu_id(cpu
);
2832 /* Retrieve the virtual package number of each vCPU */
2833 for (int i
= 0; i
< vmsr
->guest_cpu_list
->len
; i
++) {
2834 for (int j
= 0; j
< num_threads
; j
++) {
2835 if ((thd_stat
[j
].acpi_id
==
2836 vmsr
->guest_cpu_list
->cpus
[i
].arch_id
)
2837 && (thd_stat
[j
].is_vcpu
== true)) {
2838 x86_topo_ids_from_apicid(thd_stat
[j
].acpi_id
,
2839 &vmsr
->guest_topo_info
, &topo_ids
);
2840 thd_stat
[j
].vpkg_id
= topo_ids
.pkg_id
;
2845 /* Calculate the total energy of all non-vCPU thread */
2846 for (int i
= 0; i
< num_threads
; i
++) {
2847 if ((thd_stat
[i
].is_vcpu
!= true) &&
2848 (thd_stat
[i
].delta_ticks
> 0)) {
2850 temp
= vmsr_get_ratio(pkg_stat
[thd_stat
[i
].pkg_id
].e_delta
,
2851 thd_stat
[i
].delta_ticks
,
2852 vmsr
->host_topo
.maxticks
[thd_stat
[i
].pkg_id
]);
2853 pkg_stat
[thd_stat
[i
].pkg_id
].e_ratio
2854 += (uint64_t)lround(temp
);
2858 /* Calculate the ratio per non-vCPU thread of each package */
2859 for (int i
= 0; i
< vmsr
->host_topo
.maxpkgs
; i
++) {
2860 if (pkg_stat
[i
].nb_vcpu
> 0) {
2861 pkg_stat
[i
].e_ratio
= pkg_stat
[i
].e_ratio
/ pkg_stat
[i
].nb_vcpu
;
2866 * Calculate the energy for each Package:
2867 * Energy Package = sum of each vCPU energy that belongs to the package
2869 for (int i
= 0; i
< num_threads
; i
++) {
2870 if ((thd_stat
[i
].is_vcpu
== true) && \
2871 (thd_stat
[i
].delta_ticks
> 0)) {
2873 temp
= vmsr_get_ratio(pkg_stat
[thd_stat
[i
].pkg_id
].e_delta
,
2874 thd_stat
[i
].delta_ticks
,
2875 vmsr
->host_topo
.maxticks
[thd_stat
[i
].pkg_id
]);
2876 vpkgs_energy_stat
[thd_stat
[i
].vpkg_id
] +=
2877 (uint64_t)lround(temp
);
2878 vpkgs_energy_stat
[thd_stat
[i
].vpkg_id
] +=
2879 pkg_stat
[thd_stat
[i
].pkg_id
].e_ratio
;
2884 * Finally populate the vmsr register of each vCPU with the total
2885 * package value to emulate the real hardware where each CPU return the
2886 * value of the package it belongs.
2888 for (int i
= 0; i
< num_threads
; i
++) {
2889 if ((thd_stat
[i
].is_vcpu
== true) && \
2890 (thd_stat
[i
].delta_ticks
> 0)) {
2891 vmsr
->msr_value
[thd_stat
[i
].vcpu_id
] = \
2892 vpkgs_energy_stat
[thd_stat
[i
].vpkg_id
];
2896 /* Freeing memory before zeroing the pointer */
2897 for (int i
= 0; i
< num_threads
; i
++) {
2898 g_free(thd_stat
[i
].utime
);
2899 g_free(thd_stat
[i
].stime
);
2904 rcu_unregister_thread();
2908 static int kvm_msr_energy_thread_init(KVMState
*s
, MachineState
*ms
)
2910 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2911 struct KVMMsrEnergy
*r
= &s
->msr_energy
;
2916 * 1. Host cpu must be Intel cpu
2917 * 2. RAPL must be enabled on the Host
2919 if (!is_host_cpu_intel()) {
2920 error_report("The RAPL feature can only be enabled on hosts "
2921 "with Intel CPU models");
2926 if (!is_rapl_enabled()) {
2931 /* Retrieve the virtual topology */
2932 vmsr_init_topo_info(&r
->guest_topo_info
, ms
);
2934 /* Retrieve the number of vcpu */
2935 r
->guest_vcpus
= ms
->smp
.cpus
;
2937 /* Retrieve the number of virtual sockets */
2938 r
->guest_vsockets
= ms
->smp
.sockets
;
2940 /* Allocate register memory (MSR_PKG_STATUS) for each vcpu */
2941 r
->msr_value
= g_new0(uint64_t, r
->guest_vcpus
);
2943 /* Retrieve the CPUArchIDlist */
2944 r
->guest_cpu_list
= mc
->possible_cpu_arch_ids(ms
);
2946 /* Max number of cpus on the Host */
2947 r
->host_topo
.maxcpus
= vmsr_get_maxcpus();
2948 if (r
->host_topo
.maxcpus
== 0) {
2949 error_report("host max cpus = 0");
2954 /* Max number of packages on the host */
2955 r
->host_topo
.maxpkgs
= vmsr_get_max_physical_package(r
->host_topo
.maxcpus
);
2956 if (r
->host_topo
.maxpkgs
== 0) {
2957 error_report("host max pkgs = 0");
2962 /* Allocate memory for each package on the host */
2963 r
->host_topo
.pkg_cpu_count
= g_new0(unsigned int, r
->host_topo
.maxpkgs
);
2964 r
->host_topo
.maxticks
= g_new0(unsigned int, r
->host_topo
.maxpkgs
);
2966 vmsr_count_cpus_per_package(r
->host_topo
.pkg_cpu_count
,
2967 r
->host_topo
.maxpkgs
);
2968 for (int i
= 0; i
< r
->host_topo
.maxpkgs
; i
++) {
2969 if (r
->host_topo
.pkg_cpu_count
[i
] == 0) {
2970 error_report("cpu per packages = 0 on package_%d", i
);
2979 /* Compute the socket path if necessary */
2980 if (s
->msr_energy
.socket_path
== NULL
) {
2981 s
->msr_energy
.socket_path
= vmsr_compute_default_paths();
2984 /* Open socket with vmsr helper */
2985 s
->msr_energy
.sioc
= vmsr_open_socket(s
->msr_energy
.socket_path
);
2987 if (s
->msr_energy
.sioc
== NULL
) {
2988 error_report("vmsr socket opening failed");
2993 /* Those MSR values should not change */
2994 r
->msr_unit
= vmsr_read_msr(MSR_RAPL_POWER_UNIT
, 0, r
->pid
,
2995 s
->msr_energy
.sioc
);
2996 r
->msr_limit
= vmsr_read_msr(MSR_PKG_POWER_LIMIT
, 0, r
->pid
,
2997 s
->msr_energy
.sioc
);
2998 r
->msr_info
= vmsr_read_msr(MSR_PKG_POWER_INFO
, 0, r
->pid
,
2999 s
->msr_energy
.sioc
);
3000 if (r
->msr_unit
== 0 || r
->msr_limit
== 0 || r
->msr_info
== 0) {
3001 error_report("can't read any virtual msr");
3006 qemu_thread_create(&r
->msr_thr
, "kvm-msr",
3007 kvm_msr_energy_thread
,
3008 s
, QEMU_THREAD_JOINABLE
);
3013 int kvm_arch_get_default_type(MachineState
*ms
)
3018 static int kvm_vm_enable_exception_payload(KVMState
*s
)
3021 has_exception_payload
= kvm_check_extension(s
, KVM_CAP_EXCEPTION_PAYLOAD
);
3022 if (has_exception_payload
) {
3023 ret
= kvm_vm_enable_cap(s
, KVM_CAP_EXCEPTION_PAYLOAD
, 0, true);
3025 error_report("kvm: Failed to enable exception payload cap: %s",
3033 static int kvm_vm_enable_triple_fault_event(KVMState
*s
)
3036 has_triple_fault_event
= \
3037 kvm_check_extension(s
,
3038 KVM_CAP_X86_TRIPLE_FAULT_EVENT
);
3039 if (has_triple_fault_event
) {
3040 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_TRIPLE_FAULT_EVENT
, 0, true);
3042 error_report("kvm: Failed to enable triple fault event cap: %s",
3049 static int kvm_vm_set_identity_map_addr(KVMState
*s
, uint64_t identity_base
)
3051 return kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
3054 static int kvm_vm_set_nr_mmu_pages(KVMState
*s
)
3056 uint64_t shadow_mem
;
3058 shadow_mem
= object_property_get_int(OBJECT(s
),
3061 if (shadow_mem
!= -1) {
3063 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
3068 static int kvm_vm_set_tss_addr(KVMState
*s
, uint64_t tss_base
)
3070 return kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, tss_base
);
3073 static int kvm_vm_enable_disable_exits(KVMState
*s
)
3075 int disable_exits
= kvm_check_extension(s
, KVM_CAP_X86_DISABLE_EXITS
);
3076 /* Work around for kernel header with a typo. TODO: fix header and drop. */
3077 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
3078 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
3080 if (disable_exits
) {
3081 disable_exits
&= (KVM_X86_DISABLE_EXITS_MWAIT
|
3082 KVM_X86_DISABLE_EXITS_HLT
|
3083 KVM_X86_DISABLE_EXITS_PAUSE
|
3084 KVM_X86_DISABLE_EXITS_CSTATE
);
3087 return kvm_vm_enable_cap(s
, KVM_CAP_X86_DISABLE_EXITS
, 0,
3091 static int kvm_vm_enable_bus_lock_exit(KVMState
*s
)
3094 ret
= kvm_check_extension(s
, KVM_CAP_X86_BUS_LOCK_EXIT
);
3095 if (!(ret
& KVM_BUS_LOCK_DETECTION_EXIT
)) {
3096 error_report("kvm: bus lock detection unsupported");
3099 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_BUS_LOCK_EXIT
, 0,
3100 KVM_BUS_LOCK_DETECTION_EXIT
);
3102 error_report("kvm: Failed to enable bus lock detection cap: %s",
3109 static int kvm_vm_enable_notify_vmexit(KVMState
*s
)
3112 if (s
->notify_vmexit
!= NOTIFY_VMEXIT_OPTION_DISABLE
) {
3113 uint64_t notify_window_flags
=
3114 ((uint64_t)s
->notify_window
<< 32) |
3115 KVM_X86_NOTIFY_VMEXIT_ENABLED
|
3116 KVM_X86_NOTIFY_VMEXIT_USER
;
3117 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_NOTIFY_VMEXIT
, 0,
3118 notify_window_flags
);
3120 error_report("kvm: Failed to enable notify vmexit cap: %s",
3127 static int kvm_vm_enable_userspace_msr(KVMState
*s
)
3129 int ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_USER_SPACE_MSR
, 0,
3130 KVM_MSR_EXIT_REASON_FILTER
);
3132 error_report("Could not enable user space MSRs: %s",
3137 if (!kvm_filter_msr(s
, MSR_CORE_THREAD_COUNT
,
3138 kvm_rdmsr_core_thread_count
, NULL
)) {
3139 error_report("Could not install MSR_CORE_THREAD_COUNT handler!");
3146 static void kvm_vm_enable_energy_msrs(KVMState
*s
)
3149 if (s
->msr_energy
.enable
== true) {
3150 r
= kvm_filter_msr(s
, MSR_RAPL_POWER_UNIT
,
3151 kvm_rdmsr_rapl_power_unit
, NULL
);
3153 error_report("Could not install MSR_RAPL_POWER_UNIT \
3158 r
= kvm_filter_msr(s
, MSR_PKG_POWER_LIMIT
,
3159 kvm_rdmsr_pkg_power_limit
, NULL
);
3161 error_report("Could not install MSR_PKG_POWER_LIMIT \
3166 r
= kvm_filter_msr(s
, MSR_PKG_POWER_INFO
,
3167 kvm_rdmsr_pkg_power_info
, NULL
);
3169 error_report("Could not install MSR_PKG_POWER_INFO \
3173 r
= kvm_filter_msr(s
, MSR_PKG_ENERGY_STATUS
,
3174 kvm_rdmsr_pkg_energy_status
, NULL
);
3176 error_report("Could not install MSR_PKG_ENERGY_STATUS \
3184 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
3187 struct utsname utsname
;
3188 Error
*local_err
= NULL
;
3191 * Initialize SEV context, if required
3193 * If no memory encryption is requested (ms->cgs == NULL) this is
3196 * It's also a no-op if a non-SEV confidential guest support
3197 * mechanism is selected. SEV is the only mechanism available to
3198 * select on x86 at present, so this doesn't arise, but if new
3199 * mechanisms are supported in future (e.g. TDX), they'll need
3200 * their own initialization either here or elsewhere.
3203 ret
= confidential_guest_kvm_init(ms
->cgs
, &local_err
);
3205 error_report_err(local_err
);
3210 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
3211 has_sregs2
= kvm_check_extension(s
, KVM_CAP_SREGS2
) > 0;
3213 hv_vpindex_settable
= kvm_check_extension(s
, KVM_CAP_HYPERV_VP_INDEX
);
3215 ret
= kvm_vm_enable_exception_payload(s
);
3220 ret
= kvm_vm_enable_triple_fault_event(s
);
3225 if (s
->xen_version
) {
3226 #ifdef CONFIG_XEN_EMU
3227 if (!object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
)) {
3228 error_report("kvm: Xen support only available in PC machine");
3231 /* hyperv_enabled() doesn't work yet. */
3232 uint32_t msr
= XEN_HYPERCALL_MSR
;
3233 ret
= kvm_xen_init(s
, msr
);
3238 error_report("kvm: Xen support not enabled in qemu");
3243 ret
= kvm_get_supported_msrs(s
);
3248 kvm_get_supported_feature_msrs(s
);
3251 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
3253 ret
= kvm_vm_set_identity_map_addr(s
, KVM_IDENTITY_BASE
);
3258 /* Set TSS base one page after EPT identity map. */
3259 ret
= kvm_vm_set_tss_addr(s
, KVM_IDENTITY_BASE
+ 0x1000);
3264 /* Tell fw_cfg to notify the BIOS to reserve the range. */
3265 e820_add_entry(KVM_IDENTITY_BASE
, 0x4000, E820_RESERVED
);
3267 ret
= kvm_vm_set_nr_mmu_pages(s
);
3272 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
) &&
3273 object_dynamic_cast(OBJECT(ms
), TYPE_X86_MACHINE
) &&
3274 x86_machine_is_smm_enabled(X86_MACHINE(ms
))) {
3275 smram_machine_done
.notify
= register_smram_listener
;
3276 qemu_add_machine_init_done_notifier(&smram_machine_done
);
3279 if (enable_cpu_pm
) {
3280 ret
= kvm_vm_enable_disable_exits(s
);
3282 error_report("kvm: guest stopping CPU not supported: %s",
3287 if (object_dynamic_cast(OBJECT(ms
), TYPE_X86_MACHINE
)) {
3288 X86MachineState
*x86ms
= X86_MACHINE(ms
);
3290 if (x86ms
->bus_lock_ratelimit
> 0) {
3291 ret
= kvm_vm_enable_bus_lock_exit(s
);
3295 ratelimit_init(&bus_lock_ratelimit_ctrl
);
3296 ratelimit_set_speed(&bus_lock_ratelimit_ctrl
,
3297 x86ms
->bus_lock_ratelimit
, BUS_LOCK_SLICE_TIME
);
3301 if (kvm_check_extension(s
, KVM_CAP_X86_NOTIFY_VMEXIT
)) {
3302 ret
= kvm_vm_enable_notify_vmexit(s
);
3308 if (kvm_vm_check_extension(s
, KVM_CAP_X86_USER_SPACE_MSR
)) {
3309 ret
= kvm_vm_enable_userspace_msr(s
);
3314 if (s
->msr_energy
.enable
== true) {
3315 kvm_vm_enable_energy_msrs(s
);
3316 if (kvm_msr_energy_thread_init(s
, ms
)) {
3317 error_report("kvm : error RAPL feature requirement not met");
3326 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
3328 lhs
->selector
= rhs
->selector
;
3329 lhs
->base
= rhs
->base
;
3330 lhs
->limit
= rhs
->limit
;
3342 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
3344 unsigned flags
= rhs
->flags
;
3345 lhs
->selector
= rhs
->selector
;
3346 lhs
->base
= rhs
->base
;
3347 lhs
->limit
= rhs
->limit
;
3348 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
3349 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
3350 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
3351 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
3352 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
3353 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
3354 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
3355 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
3356 lhs
->unusable
= !lhs
->present
;
3360 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
3362 lhs
->selector
= rhs
->selector
;
3363 lhs
->base
= rhs
->base
;
3364 lhs
->limit
= rhs
->limit
;
3365 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
3366 ((rhs
->present
&& !rhs
->unusable
) * DESC_P_MASK
) |
3367 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
3368 (rhs
->db
<< DESC_B_SHIFT
) |
3369 (rhs
->s
* DESC_S_MASK
) |
3370 (rhs
->l
<< DESC_L_SHIFT
) |
3371 (rhs
->g
* DESC_G_MASK
) |
3372 (rhs
->avl
* DESC_AVL_MASK
);
3375 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
3378 *kvm_reg
= *qemu_reg
;
3380 *qemu_reg
= *kvm_reg
;
3384 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
3386 CPUX86State
*env
= &cpu
->env
;
3387 struct kvm_regs regs
;
3391 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
3397 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
3398 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
3399 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
3400 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
3401 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
3402 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
3403 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
3404 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
3405 #ifdef TARGET_X86_64
3406 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
3407 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
3408 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
3409 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
3410 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
3411 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
3412 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
3413 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
3416 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
3417 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
3420 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
3426 static int kvm_put_xsave(X86CPU
*cpu
)
3428 CPUX86State
*env
= &cpu
->env
;
3429 void *xsave
= env
->xsave_buf
;
3431 x86_cpu_xsave_all_areas(cpu
, xsave
, env
->xsave_buf_len
);
3433 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
3436 static int kvm_put_xcrs(X86CPU
*cpu
)
3438 CPUX86State
*env
= &cpu
->env
;
3439 struct kvm_xcrs xcrs
= {};
3447 xcrs
.xcrs
[0].xcr
= 0;
3448 xcrs
.xcrs
[0].value
= env
->xcr0
;
3449 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
3452 static int kvm_put_sregs(X86CPU
*cpu
)
3454 CPUX86State
*env
= &cpu
->env
;
3455 struct kvm_sregs sregs
;
3458 * The interrupt_bitmap is ignored because KVM_SET_SREGS is
3459 * always followed by KVM_SET_VCPU_EVENTS.
3461 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
3463 if ((env
->eflags
& VM_MASK
)) {
3464 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
3465 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
3466 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
3467 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
3468 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
3469 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
3471 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
3472 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
3473 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
3474 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
3475 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
3476 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
3479 set_seg(&sregs
.tr
, &env
->tr
);
3480 set_seg(&sregs
.ldt
, &env
->ldt
);
3482 sregs
.idt
.limit
= env
->idt
.limit
;
3483 sregs
.idt
.base
= env
->idt
.base
;
3484 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
3485 sregs
.gdt
.limit
= env
->gdt
.limit
;
3486 sregs
.gdt
.base
= env
->gdt
.base
;
3487 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
3489 sregs
.cr0
= env
->cr
[0];
3490 sregs
.cr2
= env
->cr
[2];
3491 sregs
.cr3
= env
->cr
[3];
3492 sregs
.cr4
= env
->cr
[4];
3494 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
3495 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
3497 sregs
.efer
= env
->efer
;
3499 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
3502 static int kvm_put_sregs2(X86CPU
*cpu
)
3504 CPUX86State
*env
= &cpu
->env
;
3505 struct kvm_sregs2 sregs
;
3510 if ((env
->eflags
& VM_MASK
)) {
3511 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
3512 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
3513 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
3514 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
3515 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
3516 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
3518 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
3519 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
3520 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
3521 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
3522 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
3523 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
3526 set_seg(&sregs
.tr
, &env
->tr
);
3527 set_seg(&sregs
.ldt
, &env
->ldt
);
3529 sregs
.idt
.limit
= env
->idt
.limit
;
3530 sregs
.idt
.base
= env
->idt
.base
;
3531 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
3532 sregs
.gdt
.limit
= env
->gdt
.limit
;
3533 sregs
.gdt
.base
= env
->gdt
.base
;
3534 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
3536 sregs
.cr0
= env
->cr
[0];
3537 sregs
.cr2
= env
->cr
[2];
3538 sregs
.cr3
= env
->cr
[3];
3539 sregs
.cr4
= env
->cr
[4];
3541 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
3542 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
3544 sregs
.efer
= env
->efer
;
3546 if (env
->pdptrs_valid
) {
3547 for (i
= 0; i
< 4; i
++) {
3548 sregs
.pdptrs
[i
] = env
->pdptrs
[i
];
3550 sregs
.flags
|= KVM_SREGS2_FLAGS_PDPTRS_VALID
;
3553 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS2
, &sregs
);
3557 static void kvm_msr_buf_reset(X86CPU
*cpu
)
3559 memset(cpu
->kvm_msr_buf
, 0, MSR_BUF_SIZE
);
3562 static void kvm_msr_entry_add(X86CPU
*cpu
, uint32_t index
, uint64_t value
)
3564 struct kvm_msrs
*msrs
= cpu
->kvm_msr_buf
;
3565 void *limit
= ((void *)msrs
) + MSR_BUF_SIZE
;
3566 struct kvm_msr_entry
*entry
= &msrs
->entries
[msrs
->nmsrs
];
3568 assert((void *)(entry
+ 1) <= limit
);
3570 entry
->index
= index
;
3571 entry
->reserved
= 0;
3572 entry
->data
= value
;
3576 static int kvm_put_one_msr(X86CPU
*cpu
, int index
, uint64_t value
)
3578 kvm_msr_buf_reset(cpu
);
3579 kvm_msr_entry_add(cpu
, index
, value
);
3581 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
3584 static int kvm_get_one_msr(X86CPU
*cpu
, int index
, uint64_t *value
)
3588 struct kvm_msrs info
;
3589 struct kvm_msr_entry entries
[1];
3592 .entries
[0].index
= index
,
3595 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
3600 *value
= msr_data
.entries
[0].data
;
3603 void kvm_put_apicbase(X86CPU
*cpu
, uint64_t value
)
3607 ret
= kvm_put_one_msr(cpu
, MSR_IA32_APICBASE
, value
);
3611 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
3613 CPUX86State
*env
= &cpu
->env
;
3616 if (!has_msr_tsc_deadline
) {
3620 ret
= kvm_put_one_msr(cpu
, MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
3630 * Provide a separate write service for the feature control MSR in order to
3631 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
3632 * before writing any other state because forcibly leaving nested mode
3633 * invalidates the VCPU state.
3635 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
3639 if (!has_msr_feature_control
) {
3643 ret
= kvm_put_one_msr(cpu
, MSR_IA32_FEATURE_CONTROL
,
3644 cpu
->env
.msr_ia32_feature_control
);
3653 static uint64_t make_vmx_msr_value(uint32_t index
, uint32_t features
)
3655 uint32_t default1
, can_be_one
, can_be_zero
;
3656 uint32_t must_be_one
;
3659 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
3660 default1
= 0x00000016;
3662 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
3663 default1
= 0x0401e172;
3665 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
3666 default1
= 0x000011ff;
3668 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
3669 default1
= 0x00036dff;
3671 case MSR_IA32_VMX_PROCBASED_CTLS2
:
3678 /* If a feature bit is set, the control can be either set or clear.
3679 * Otherwise the value is limited to either 0 or 1 by default1.
3681 can_be_one
= features
| default1
;
3682 can_be_zero
= features
| ~default1
;
3683 must_be_one
= ~can_be_zero
;
3686 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
3687 * Bit 32:63 -> 1 if the control bit can be one.
3689 return must_be_one
| (((uint64_t)can_be_one
) << 32);
3692 static void kvm_msr_entry_add_vmx(X86CPU
*cpu
, FeatureWordArray f
)
3694 uint64_t kvm_vmx_basic
=
3695 kvm_arch_get_supported_msr_feature(kvm_state
,
3696 MSR_IA32_VMX_BASIC
);
3698 if (!kvm_vmx_basic
) {
3699 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
3700 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
3705 uint64_t kvm_vmx_misc
=
3706 kvm_arch_get_supported_msr_feature(kvm_state
,
3708 uint64_t kvm_vmx_ept_vpid
=
3709 kvm_arch_get_supported_msr_feature(kvm_state
,
3710 MSR_IA32_VMX_EPT_VPID_CAP
);
3713 * If the guest is 64-bit, a value of 1 is allowed for the host address
3714 * space size vmexit control.
3716 uint64_t fixed_vmx_exit
= f
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
3717 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE
<< 32 : 0;
3720 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
3721 * not change them for backwards compatibility.
3723 uint64_t fixed_vmx_basic
= kvm_vmx_basic
&
3724 (MSR_VMX_BASIC_VMCS_REVISION_MASK
|
3725 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK
|
3726 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK
);
3729 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
3730 * change in the future but are always zero for now, clear them to be
3731 * future proof. Bits 32-63 in theory could change, though KVM does
3732 * not support dual-monitor treatment and probably never will; mask
3735 uint64_t fixed_vmx_misc
= kvm_vmx_misc
&
3736 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK
|
3737 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK
);
3740 * EPT memory types should not change either, so we do not bother
3741 * adding features for them.
3743 uint64_t fixed_vmx_ept_mask
=
3744 (f
[FEAT_VMX_SECONDARY_CTLS
] & VMX_SECONDARY_EXEC_ENABLE_EPT
?
3745 MSR_VMX_EPT_UC
| MSR_VMX_EPT_WB
: 0);
3746 uint64_t fixed_vmx_ept_vpid
= kvm_vmx_ept_vpid
& fixed_vmx_ept_mask
;
3748 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
3749 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
3750 f
[FEAT_VMX_PROCBASED_CTLS
]));
3751 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
3752 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
3753 f
[FEAT_VMX_PINBASED_CTLS
]));
3754 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_EXIT_CTLS
,
3755 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS
,
3756 f
[FEAT_VMX_EXIT_CTLS
]) | fixed_vmx_exit
);
3757 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
3758 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
3759 f
[FEAT_VMX_ENTRY_CTLS
]));
3760 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_PROCBASED_CTLS2
,
3761 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2
,
3762 f
[FEAT_VMX_SECONDARY_CTLS
]));
3763 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_EPT_VPID_CAP
,
3764 f
[FEAT_VMX_EPT_VPID_CAPS
] | fixed_vmx_ept_vpid
);
3765 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_BASIC
,
3766 f
[FEAT_VMX_BASIC
] | fixed_vmx_basic
);
3767 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_MISC
,
3768 f
[FEAT_VMX_MISC
] | fixed_vmx_misc
);
3769 if (has_msr_vmx_vmfunc
) {
3770 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMFUNC
, f
[FEAT_VMX_VMFUNC
]);
3774 * Just to be safe, write these with constant values. The CRn_FIXED1
3775 * MSRs are generated by KVM based on the vCPU's CPUID.
3777 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_CR0_FIXED0
,
3778 CR0_PE_MASK
| CR0_PG_MASK
| CR0_NE_MASK
);
3779 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_CR4_FIXED0
,
3782 if (f
[FEAT_7_1_EAX
] & CPUID_7_1_EAX_FRED
) {
3783 /* FRED injected-event data (0x2052). */
3784 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
, 0x52);
3785 } else if (f
[FEAT_VMX_EXIT_CTLS
] &
3786 VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS
) {
3787 /* Secondary VM-exit controls (0x2044). */
3788 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
, 0x44);
3789 } else if (f
[FEAT_VMX_SECONDARY_CTLS
] & VMX_SECONDARY_EXEC_TSC_SCALING
) {
3790 /* TSC multiplier (0x2032). */
3791 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
, 0x32);
3793 /* Preemption timer (0x482E). */
3794 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
, 0x2E);
3798 static void kvm_msr_entry_add_perf(X86CPU
*cpu
, FeatureWordArray f
)
3800 uint64_t kvm_perf_cap
=
3801 kvm_arch_get_supported_msr_feature(kvm_state
,
3802 MSR_IA32_PERF_CAPABILITIES
);
3805 kvm_msr_entry_add(cpu
, MSR_IA32_PERF_CAPABILITIES
,
3806 kvm_perf_cap
& f
[FEAT_PERF_CAPABILITIES
]);
3810 static int kvm_buf_set_msrs(X86CPU
*cpu
)
3812 int ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
3817 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
3818 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
3819 error_report("error: failed to set MSR 0x%" PRIx32
" to 0x%" PRIx64
,
3820 (uint32_t)e
->index
, (uint64_t)e
->data
);
3823 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
3827 static void kvm_init_msrs(X86CPU
*cpu
)
3829 CPUX86State
*env
= &cpu
->env
;
3831 kvm_msr_buf_reset(cpu
);
3832 if (has_msr_arch_capabs
) {
3833 kvm_msr_entry_add(cpu
, MSR_IA32_ARCH_CAPABILITIES
,
3834 env
->features
[FEAT_ARCH_CAPABILITIES
]);
3837 if (has_msr_core_capabs
) {
3838 kvm_msr_entry_add(cpu
, MSR_IA32_CORE_CAPABILITY
,
3839 env
->features
[FEAT_CORE_CAPABILITY
]);
3842 if (has_msr_perf_capabs
&& cpu
->enable_pmu
) {
3843 kvm_msr_entry_add_perf(cpu
, env
->features
);
3846 if (has_msr_ucode_rev
) {
3847 kvm_msr_entry_add(cpu
, MSR_IA32_UCODE_REV
, cpu
->ucode_rev
);
3851 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
3852 * all kernels with MSR features should have them.
3854 if (kvm_feature_msrs
&& cpu_has_vmx(env
)) {
3855 kvm_msr_entry_add_vmx(cpu
, env
->features
);
3858 assert(kvm_buf_set_msrs(cpu
) == 0);
3861 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
3863 CPUX86State
*env
= &cpu
->env
;
3866 kvm_msr_buf_reset(cpu
);
3868 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
3869 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
3870 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
3871 kvm_msr_entry_add(cpu
, MSR_PAT
, env
->pat
);
3873 kvm_msr_entry_add(cpu
, MSR_STAR
, env
->star
);
3875 if (has_msr_hsave_pa
) {
3876 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, env
->vm_hsave
);
3878 if (has_msr_tsc_aux
) {
3879 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, env
->tsc_aux
);
3881 if (has_msr_tsc_adjust
) {
3882 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, env
->tsc_adjust
);
3884 if (has_msr_misc_enable
) {
3885 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
,
3886 env
->msr_ia32_misc_enable
);
3888 if (has_msr_smbase
) {
3889 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, env
->smbase
);
3891 if (has_msr_smi_count
) {
3892 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, env
->msr_smi_count
);
3895 kvm_msr_entry_add(cpu
, MSR_IA32_PKRS
, env
->pkrs
);
3897 if (has_msr_bndcfgs
) {
3898 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
3901 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, env
->xss
);
3903 if (has_msr_umwait
) {
3904 kvm_msr_entry_add(cpu
, MSR_IA32_UMWAIT_CONTROL
, env
->umwait
);
3906 if (has_msr_spec_ctrl
) {
3907 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, env
->spec_ctrl
);
3909 if (has_tsc_scale_msr
) {
3910 kvm_msr_entry_add(cpu
, MSR_AMD64_TSC_RATIO
, env
->amd_tsc_scale_msr
);
3913 if (has_msr_tsx_ctrl
) {
3914 kvm_msr_entry_add(cpu
, MSR_IA32_TSX_CTRL
, env
->tsx_ctrl
);
3916 if (has_msr_virt_ssbd
) {
3917 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, env
->virt_ssbd
);
3920 #ifdef TARGET_X86_64
3921 if (lm_capable_kernel
) {
3922 kvm_msr_entry_add(cpu
, MSR_CSTAR
, env
->cstar
);
3923 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
3924 kvm_msr_entry_add(cpu
, MSR_FMASK
, env
->fmask
);
3925 kvm_msr_entry_add(cpu
, MSR_LSTAR
, env
->lstar
);
3926 if (env
->features
[FEAT_7_1_EAX
] & CPUID_7_1_EAX_FRED
) {
3927 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP0
, env
->fred_rsp0
);
3928 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP1
, env
->fred_rsp1
);
3929 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP2
, env
->fred_rsp2
);
3930 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP3
, env
->fred_rsp3
);
3931 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_STKLVLS
, env
->fred_stklvls
);
3932 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_SSP1
, env
->fred_ssp1
);
3933 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_SSP2
, env
->fred_ssp2
);
3934 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_SSP3
, env
->fred_ssp3
);
3935 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_CONFIG
, env
->fred_config
);
3941 * The following MSRs have side effects on the guest or are too heavy
3942 * for normal writeback. Limit them to reset or full state updates.
3944 if (level
>= KVM_PUT_RESET_STATE
) {
3945 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, env
->tsc
);
3946 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
3947 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
3948 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF_INT
)) {
3949 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_INT
, env
->async_pf_int_msr
);
3951 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
3952 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, env
->async_pf_en_msr
);
3954 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
3955 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, env
->pv_eoi_en_msr
);
3957 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
3958 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, env
->steal_time_msr
);
3961 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_POLL_CONTROL
)) {
3962 kvm_msr_entry_add(cpu
, MSR_KVM_POLL_CONTROL
, env
->poll_control_msr
);
3965 if (has_architectural_pmu_version
> 0) {
3966 if (has_architectural_pmu_version
> 1) {
3967 /* Stop the counter. */
3968 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
3969 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
3972 /* Set the counter values. */
3973 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
3974 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
,
3975 env
->msr_fixed_counters
[i
]);
3977 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
3978 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
,
3979 env
->msr_gp_counters
[i
]);
3980 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
,
3981 env
->msr_gp_evtsel
[i
]);
3983 if (has_architectural_pmu_version
> 1) {
3984 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
,
3985 env
->msr_global_status
);
3986 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
3987 env
->msr_global_ovf_ctrl
);
3989 /* Now start the PMU. */
3990 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
,
3991 env
->msr_fixed_ctr_ctrl
);
3992 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
3993 env
->msr_global_ctrl
);
3997 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
3998 * only sync them to KVM on the first cpu
4000 if (current_cpu
== first_cpu
) {
4001 if (has_msr_hv_hypercall
) {
4002 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
,
4003 env
->msr_hv_guest_os_id
);
4004 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
,
4005 env
->msr_hv_hypercall
);
4007 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
4008 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
,
4011 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
4012 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
,
4013 env
->msr_hv_reenlightenment_control
);
4014 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
,
4015 env
->msr_hv_tsc_emulation_control
);
4016 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
,
4017 env
->msr_hv_tsc_emulation_status
);
4019 #ifdef CONFIG_SYNDBG
4020 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNDBG
) &&
4021 has_msr_hv_syndbg_options
) {
4022 kvm_msr_entry_add(cpu
, HV_X64_MSR_SYNDBG_OPTIONS
,
4023 hyperv_syndbg_query_options());
4027 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
4028 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
,
4031 if (has_msr_hv_crash
) {
4034 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++)
4035 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
,
4036 env
->msr_hv_crash_params
[j
]);
4038 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_NOTIFY
);
4040 if (has_msr_hv_runtime
) {
4041 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, env
->msr_hv_runtime
);
4043 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)
4044 && hv_vpindex_settable
) {
4045 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_INDEX
,
4046 hyperv_vp_index(CPU(cpu
)));
4048 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
4051 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
, HV_SYNIC_VERSION
);
4053 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
,
4054 env
->msr_hv_synic_control
);
4055 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
,
4056 env
->msr_hv_synic_evt_page
);
4057 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
,
4058 env
->msr_hv_synic_msg_page
);
4060 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_synic_sint
); j
++) {
4061 kvm_msr_entry_add(cpu
, HV_X64_MSR_SINT0
+ j
,
4062 env
->msr_hv_synic_sint
[j
]);
4065 if (has_msr_hv_stimer
) {
4068 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_config
); j
++) {
4069 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_CONFIG
+ j
* 2,
4070 env
->msr_hv_stimer_config
[j
]);
4073 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_count
); j
++) {
4074 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_COUNT
+ j
* 2,
4075 env
->msr_hv_stimer_count
[j
]);
4078 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
4079 uint64_t phys_mask
= MAKE_64BIT_MASK(0, cpu
->phys_bits
);
4081 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, env
->mtrr_deftype
);
4082 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
4083 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
4084 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
4085 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
4086 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
4087 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
4088 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
4089 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
4090 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
4091 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
4092 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
4093 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
4094 /* The CPU GPs if we write to a bit above the physical limit of
4095 * the host CPU (and KVM emulates that)
4097 uint64_t mask
= env
->mtrr_var
[i
].mask
;
4100 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
),
4101 env
->mtrr_var
[i
].base
);
4102 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), mask
);
4105 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
4106 int addr_num
= kvm_arch_get_supported_cpuid(kvm_state
,
4107 0x14, 1, R_EAX
) & 0x7;
4109 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
,
4110 env
->msr_rtit_ctrl
);
4111 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
,
4112 env
->msr_rtit_status
);
4113 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
,
4114 env
->msr_rtit_output_base
);
4115 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
,
4116 env
->msr_rtit_output_mask
);
4117 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
,
4118 env
->msr_rtit_cr3_match
);
4119 for (i
= 0; i
< addr_num
; i
++) {
4120 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
,
4121 env
->msr_rtit_addrs
[i
]);
4125 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_SGX_LC
) {
4126 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH0
,
4127 env
->msr_ia32_sgxlepubkeyhash
[0]);
4128 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH1
,
4129 env
->msr_ia32_sgxlepubkeyhash
[1]);
4130 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH2
,
4131 env
->msr_ia32_sgxlepubkeyhash
[2]);
4132 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH3
,
4133 env
->msr_ia32_sgxlepubkeyhash
[3]);
4136 if (env
->features
[FEAT_XSAVE
] & CPUID_D_1_EAX_XFD
) {
4137 kvm_msr_entry_add(cpu
, MSR_IA32_XFD
,
4139 kvm_msr_entry_add(cpu
, MSR_IA32_XFD_ERR
,
4143 if (kvm_enabled() && cpu
->enable_pmu
&&
4144 (env
->features
[FEAT_7_0_EDX
] & CPUID_7_0_EDX_ARCH_LBR
)) {
4149 * Only migrate Arch LBR states when the host Arch LBR depth
4150 * equals that of source guest's, this is to avoid mismatch
4151 * of guest/host config for the msr hence avoid unexpected
4154 ret
= kvm_get_one_msr(cpu
, MSR_ARCH_LBR_DEPTH
, &depth
);
4156 if (ret
== 1 && !!depth
&& depth
== env
->msr_lbr_depth
) {
4157 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_CTL
, env
->msr_lbr_ctl
);
4158 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_DEPTH
, env
->msr_lbr_depth
);
4160 for (i
= 0; i
< ARCH_LBR_NR_ENTRIES
; i
++) {
4161 if (!env
->lbr_records
[i
].from
) {
4164 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_FROM_0
+ i
,
4165 env
->lbr_records
[i
].from
);
4166 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_TO_0
+ i
,
4167 env
->lbr_records
[i
].to
);
4168 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_INFO_0
+ i
,
4169 env
->lbr_records
[i
].info
);
4174 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
4175 * kvm_put_msr_feature_control. */
4179 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, env
->mcg_status
);
4180 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, env
->mcg_ctl
);
4181 if (has_msr_mcg_ext_ctl
) {
4182 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, env
->mcg_ext_ctl
);
4184 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
4185 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
4189 return kvm_buf_set_msrs(cpu
);
4193 static int kvm_get_xsave(X86CPU
*cpu
)
4195 CPUX86State
*env
= &cpu
->env
;
4196 void *xsave
= env
->xsave_buf
;
4200 type
= has_xsave2
? KVM_GET_XSAVE2
: KVM_GET_XSAVE
;
4201 ret
= kvm_vcpu_ioctl(CPU(cpu
), type
, xsave
);
4205 x86_cpu_xrstor_all_areas(cpu
, xsave
, env
->xsave_buf_len
);
4210 static int kvm_get_xcrs(X86CPU
*cpu
)
4212 CPUX86State
*env
= &cpu
->env
;
4214 struct kvm_xcrs xcrs
;
4220 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
4225 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
4226 /* Only support xcr0 now */
4227 if (xcrs
.xcrs
[i
].xcr
== 0) {
4228 env
->xcr0
= xcrs
.xcrs
[i
].value
;
4235 static int kvm_get_sregs(X86CPU
*cpu
)
4237 CPUX86State
*env
= &cpu
->env
;
4238 struct kvm_sregs sregs
;
4241 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
4247 * The interrupt_bitmap is ignored because KVM_GET_SREGS is
4248 * always preceded by KVM_GET_VCPU_EVENTS.
4251 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
4252 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
4253 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
4254 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
4255 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
4256 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
4258 get_seg(&env
->tr
, &sregs
.tr
);
4259 get_seg(&env
->ldt
, &sregs
.ldt
);
4261 env
->idt
.limit
= sregs
.idt
.limit
;
4262 env
->idt
.base
= sregs
.idt
.base
;
4263 env
->gdt
.limit
= sregs
.gdt
.limit
;
4264 env
->gdt
.base
= sregs
.gdt
.base
;
4266 env
->cr
[0] = sregs
.cr0
;
4267 env
->cr
[2] = sregs
.cr2
;
4268 env
->cr
[3] = sregs
.cr3
;
4269 env
->cr
[4] = sregs
.cr4
;
4271 env
->efer
= sregs
.efer
;
4272 if (sev_es_enabled() && env
->efer
& MSR_EFER_LME
&&
4273 env
->cr
[0] & CR0_PG_MASK
) {
4274 env
->efer
|= MSR_EFER_LMA
;
4277 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
4278 x86_update_hflags(env
);
4283 static int kvm_get_sregs2(X86CPU
*cpu
)
4285 CPUX86State
*env
= &cpu
->env
;
4286 struct kvm_sregs2 sregs
;
4289 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS2
, &sregs
);
4294 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
4295 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
4296 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
4297 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
4298 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
4299 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
4301 get_seg(&env
->tr
, &sregs
.tr
);
4302 get_seg(&env
->ldt
, &sregs
.ldt
);
4304 env
->idt
.limit
= sregs
.idt
.limit
;
4305 env
->idt
.base
= sregs
.idt
.base
;
4306 env
->gdt
.limit
= sregs
.gdt
.limit
;
4307 env
->gdt
.base
= sregs
.gdt
.base
;
4309 env
->cr
[0] = sregs
.cr0
;
4310 env
->cr
[2] = sregs
.cr2
;
4311 env
->cr
[3] = sregs
.cr3
;
4312 env
->cr
[4] = sregs
.cr4
;
4314 env
->efer
= sregs
.efer
;
4315 if (sev_es_enabled() && env
->efer
& MSR_EFER_LME
&&
4316 env
->cr
[0] & CR0_PG_MASK
) {
4317 env
->efer
|= MSR_EFER_LMA
;
4320 env
->pdptrs_valid
= sregs
.flags
& KVM_SREGS2_FLAGS_PDPTRS_VALID
;
4322 if (env
->pdptrs_valid
) {
4323 for (i
= 0; i
< 4; i
++) {
4324 env
->pdptrs
[i
] = sregs
.pdptrs
[i
];
4328 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
4329 x86_update_hflags(env
);
4334 static int kvm_get_msrs(X86CPU
*cpu
)
4336 CPUX86State
*env
= &cpu
->env
;
4337 struct kvm_msr_entry
*msrs
= cpu
->kvm_msr_buf
->entries
;
4339 uint64_t mtrr_top_bits
;
4341 kvm_msr_buf_reset(cpu
);
4343 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, 0);
4344 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, 0);
4345 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, 0);
4346 kvm_msr_entry_add(cpu
, MSR_PAT
, 0);
4348 kvm_msr_entry_add(cpu
, MSR_STAR
, 0);
4350 if (has_msr_hsave_pa
) {
4351 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, 0);
4353 if (has_msr_tsc_aux
) {
4354 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, 0);
4356 if (has_msr_tsc_adjust
) {
4357 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, 0);
4359 if (has_msr_tsc_deadline
) {
4360 kvm_msr_entry_add(cpu
, MSR_IA32_TSCDEADLINE
, 0);
4362 if (has_msr_misc_enable
) {
4363 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
, 0);
4365 if (has_msr_smbase
) {
4366 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, 0);
4368 if (has_msr_smi_count
) {
4369 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, 0);
4371 if (has_msr_feature_control
) {
4372 kvm_msr_entry_add(cpu
, MSR_IA32_FEATURE_CONTROL
, 0);
4375 kvm_msr_entry_add(cpu
, MSR_IA32_PKRS
, 0);
4377 if (has_msr_bndcfgs
) {
4378 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, 0);
4381 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, 0);
4383 if (has_msr_umwait
) {
4384 kvm_msr_entry_add(cpu
, MSR_IA32_UMWAIT_CONTROL
, 0);
4386 if (has_msr_spec_ctrl
) {
4387 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, 0);
4389 if (has_tsc_scale_msr
) {
4390 kvm_msr_entry_add(cpu
, MSR_AMD64_TSC_RATIO
, 0);
4393 if (has_msr_tsx_ctrl
) {
4394 kvm_msr_entry_add(cpu
, MSR_IA32_TSX_CTRL
, 0);
4396 if (has_msr_virt_ssbd
) {
4397 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, 0);
4399 if (!env
->tsc_valid
) {
4400 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, 0);
4401 env
->tsc_valid
= !runstate_is_running();
4404 #ifdef TARGET_X86_64
4405 if (lm_capable_kernel
) {
4406 kvm_msr_entry_add(cpu
, MSR_CSTAR
, 0);
4407 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, 0);
4408 kvm_msr_entry_add(cpu
, MSR_FMASK
, 0);
4409 kvm_msr_entry_add(cpu
, MSR_LSTAR
, 0);
4410 if (env
->features
[FEAT_7_1_EAX
] & CPUID_7_1_EAX_FRED
) {
4411 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP0
, 0);
4412 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP1
, 0);
4413 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP2
, 0);
4414 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_RSP3
, 0);
4415 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_STKLVLS
, 0);
4416 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_SSP1
, 0);
4417 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_SSP2
, 0);
4418 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_SSP3
, 0);
4419 kvm_msr_entry_add(cpu
, MSR_IA32_FRED_CONFIG
, 0);
4423 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, 0);
4424 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, 0);
4425 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF_INT
)) {
4426 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_INT
, 0);
4428 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
4429 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, 0);
4431 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
4432 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, 0);
4434 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
4435 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, 0);
4437 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_POLL_CONTROL
)) {
4438 kvm_msr_entry_add(cpu
, MSR_KVM_POLL_CONTROL
, 1);
4440 if (has_architectural_pmu_version
> 0) {
4441 if (has_architectural_pmu_version
> 1) {
4442 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
4443 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
4444 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
, 0);
4445 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
, 0);
4447 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
4448 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
, 0);
4450 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
4451 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
, 0);
4452 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
, 0);
4457 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, 0);
4458 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, 0);
4459 if (has_msr_mcg_ext_ctl
) {
4460 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, 0);
4462 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
4463 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, 0);
4467 if (has_msr_hv_hypercall
) {
4468 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
, 0);
4469 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
, 0);
4471 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
4472 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
, 0);
4474 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
4475 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, 0);
4477 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
4478 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0);
4479 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
, 0);
4480 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
, 0);
4482 if (has_msr_hv_syndbg_options
) {
4483 kvm_msr_entry_add(cpu
, HV_X64_MSR_SYNDBG_OPTIONS
, 0);
4485 if (has_msr_hv_crash
) {
4488 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++) {
4489 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
, 0);
4492 if (has_msr_hv_runtime
) {
4493 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, 0);
4495 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
4498 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
, 0);
4499 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
, 0);
4500 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
, 0);
4501 for (msr
= HV_X64_MSR_SINT0
; msr
<= HV_X64_MSR_SINT15
; msr
++) {
4502 kvm_msr_entry_add(cpu
, msr
, 0);
4505 if (has_msr_hv_stimer
) {
4508 for (msr
= HV_X64_MSR_STIMER0_CONFIG
; msr
<= HV_X64_MSR_STIMER3_COUNT
;
4510 kvm_msr_entry_add(cpu
, msr
, 0);
4513 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
4514 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, 0);
4515 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, 0);
4516 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, 0);
4517 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, 0);
4518 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, 0);
4519 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, 0);
4520 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, 0);
4521 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, 0);
4522 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, 0);
4523 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, 0);
4524 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, 0);
4525 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, 0);
4526 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
4527 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
), 0);
4528 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), 0);
4532 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
4534 kvm_arch_get_supported_cpuid(kvm_state
, 0x14, 1, R_EAX
) & 0x7;
4536 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
, 0);
4537 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
, 0);
4538 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
, 0);
4539 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
, 0);
4540 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
, 0);
4541 for (i
= 0; i
< addr_num
; i
++) {
4542 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
, 0);
4546 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_SGX_LC
) {
4547 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH0
, 0);
4548 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH1
, 0);
4549 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH2
, 0);
4550 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH3
, 0);
4553 if (env
->features
[FEAT_XSAVE
] & CPUID_D_1_EAX_XFD
) {
4554 kvm_msr_entry_add(cpu
, MSR_IA32_XFD
, 0);
4555 kvm_msr_entry_add(cpu
, MSR_IA32_XFD_ERR
, 0);
4558 if (kvm_enabled() && cpu
->enable_pmu
&&
4559 (env
->features
[FEAT_7_0_EDX
] & CPUID_7_0_EDX_ARCH_LBR
)) {
4562 ret
= kvm_get_one_msr(cpu
, MSR_ARCH_LBR_DEPTH
, &depth
);
4563 if (ret
== 1 && depth
== ARCH_LBR_NR_ENTRIES
) {
4564 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_CTL
, 0);
4565 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_DEPTH
, 0);
4567 for (i
= 0; i
< ARCH_LBR_NR_ENTRIES
; i
++) {
4568 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_FROM_0
+ i
, 0);
4569 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_TO_0
+ i
, 0);
4570 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_INFO_0
+ i
, 0);
4575 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, cpu
->kvm_msr_buf
);
4580 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
4581 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
4582 error_report("error: failed to get MSR 0x%" PRIx32
,
4583 (uint32_t)e
->index
);
4586 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
4588 * MTRR masks: Each mask consists of 5 parts
4589 * a 10..0: must be zero
4591 * c n-1.12: actual mask bits
4592 * d 51..n: reserved must be zero
4593 * e 63.52: reserved must be zero
4595 * 'n' is the number of physical bits supported by the CPU and is
4596 * apparently always <= 52. We know our 'n' but don't know what
4597 * the destinations 'n' is; it might be smaller, in which case
4598 * it masks (c) on loading. It might be larger, in which case
4599 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
4600 * we're migrating to.
4603 if (cpu
->fill_mtrr_mask
) {
4604 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 52);
4605 assert(cpu
->phys_bits
<= TARGET_PHYS_ADDR_SPACE_BITS
);
4606 mtrr_top_bits
= MAKE_64BIT_MASK(cpu
->phys_bits
, 52 - cpu
->phys_bits
);
4611 for (i
= 0; i
< ret
; i
++) {
4612 uint32_t index
= msrs
[i
].index
;
4614 case MSR_IA32_SYSENTER_CS
:
4615 env
->sysenter_cs
= msrs
[i
].data
;
4617 case MSR_IA32_SYSENTER_ESP
:
4618 env
->sysenter_esp
= msrs
[i
].data
;
4620 case MSR_IA32_SYSENTER_EIP
:
4621 env
->sysenter_eip
= msrs
[i
].data
;
4624 env
->pat
= msrs
[i
].data
;
4627 env
->star
= msrs
[i
].data
;
4629 #ifdef TARGET_X86_64
4631 env
->cstar
= msrs
[i
].data
;
4633 case MSR_KERNELGSBASE
:
4634 env
->kernelgsbase
= msrs
[i
].data
;
4637 env
->fmask
= msrs
[i
].data
;
4640 env
->lstar
= msrs
[i
].data
;
4642 case MSR_IA32_FRED_RSP0
:
4643 env
->fred_rsp0
= msrs
[i
].data
;
4645 case MSR_IA32_FRED_RSP1
:
4646 env
->fred_rsp1
= msrs
[i
].data
;
4648 case MSR_IA32_FRED_RSP2
:
4649 env
->fred_rsp2
= msrs
[i
].data
;
4651 case MSR_IA32_FRED_RSP3
:
4652 env
->fred_rsp3
= msrs
[i
].data
;
4654 case MSR_IA32_FRED_STKLVLS
:
4655 env
->fred_stklvls
= msrs
[i
].data
;
4657 case MSR_IA32_FRED_SSP1
:
4658 env
->fred_ssp1
= msrs
[i
].data
;
4660 case MSR_IA32_FRED_SSP2
:
4661 env
->fred_ssp2
= msrs
[i
].data
;
4663 case MSR_IA32_FRED_SSP3
:
4664 env
->fred_ssp3
= msrs
[i
].data
;
4666 case MSR_IA32_FRED_CONFIG
:
4667 env
->fred_config
= msrs
[i
].data
;
4671 env
->tsc
= msrs
[i
].data
;
4674 env
->tsc_aux
= msrs
[i
].data
;
4676 case MSR_TSC_ADJUST
:
4677 env
->tsc_adjust
= msrs
[i
].data
;
4679 case MSR_IA32_TSCDEADLINE
:
4680 env
->tsc_deadline
= msrs
[i
].data
;
4682 case MSR_VM_HSAVE_PA
:
4683 env
->vm_hsave
= msrs
[i
].data
;
4685 case MSR_KVM_SYSTEM_TIME
:
4686 env
->system_time_msr
= msrs
[i
].data
;
4688 case MSR_KVM_WALL_CLOCK
:
4689 env
->wall_clock_msr
= msrs
[i
].data
;
4691 case MSR_MCG_STATUS
:
4692 env
->mcg_status
= msrs
[i
].data
;
4695 env
->mcg_ctl
= msrs
[i
].data
;
4697 case MSR_MCG_EXT_CTL
:
4698 env
->mcg_ext_ctl
= msrs
[i
].data
;
4700 case MSR_IA32_MISC_ENABLE
:
4701 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
4703 case MSR_IA32_SMBASE
:
4704 env
->smbase
= msrs
[i
].data
;
4707 env
->msr_smi_count
= msrs
[i
].data
;
4709 case MSR_IA32_FEATURE_CONTROL
:
4710 env
->msr_ia32_feature_control
= msrs
[i
].data
;
4712 case MSR_IA32_BNDCFGS
:
4713 env
->msr_bndcfgs
= msrs
[i
].data
;
4716 env
->xss
= msrs
[i
].data
;
4718 case MSR_IA32_UMWAIT_CONTROL
:
4719 env
->umwait
= msrs
[i
].data
;
4722 env
->pkrs
= msrs
[i
].data
;
4725 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
4726 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
4727 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
4730 case MSR_KVM_ASYNC_PF_EN
:
4731 env
->async_pf_en_msr
= msrs
[i
].data
;
4733 case MSR_KVM_ASYNC_PF_INT
:
4734 env
->async_pf_int_msr
= msrs
[i
].data
;
4736 case MSR_KVM_PV_EOI_EN
:
4737 env
->pv_eoi_en_msr
= msrs
[i
].data
;
4739 case MSR_KVM_STEAL_TIME
:
4740 env
->steal_time_msr
= msrs
[i
].data
;
4742 case MSR_KVM_POLL_CONTROL
: {
4743 env
->poll_control_msr
= msrs
[i
].data
;
4746 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
4747 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
4749 case MSR_CORE_PERF_GLOBAL_CTRL
:
4750 env
->msr_global_ctrl
= msrs
[i
].data
;
4752 case MSR_CORE_PERF_GLOBAL_STATUS
:
4753 env
->msr_global_status
= msrs
[i
].data
;
4755 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
4756 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
4758 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
4759 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
4761 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
4762 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
4764 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
4765 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
4767 case HV_X64_MSR_HYPERCALL
:
4768 env
->msr_hv_hypercall
= msrs
[i
].data
;
4770 case HV_X64_MSR_GUEST_OS_ID
:
4771 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
4773 case HV_X64_MSR_APIC_ASSIST_PAGE
:
4774 env
->msr_hv_vapic
= msrs
[i
].data
;
4776 case HV_X64_MSR_REFERENCE_TSC
:
4777 env
->msr_hv_tsc
= msrs
[i
].data
;
4779 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
4780 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
4782 case HV_X64_MSR_VP_RUNTIME
:
4783 env
->msr_hv_runtime
= msrs
[i
].data
;
4785 case HV_X64_MSR_SCONTROL
:
4786 env
->msr_hv_synic_control
= msrs
[i
].data
;
4788 case HV_X64_MSR_SIEFP
:
4789 env
->msr_hv_synic_evt_page
= msrs
[i
].data
;
4791 case HV_X64_MSR_SIMP
:
4792 env
->msr_hv_synic_msg_page
= msrs
[i
].data
;
4794 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
4795 env
->msr_hv_synic_sint
[index
- HV_X64_MSR_SINT0
] = msrs
[i
].data
;
4797 case HV_X64_MSR_STIMER0_CONFIG
:
4798 case HV_X64_MSR_STIMER1_CONFIG
:
4799 case HV_X64_MSR_STIMER2_CONFIG
:
4800 case HV_X64_MSR_STIMER3_CONFIG
:
4801 env
->msr_hv_stimer_config
[(index
- HV_X64_MSR_STIMER0_CONFIG
)/2] =
4804 case HV_X64_MSR_STIMER0_COUNT
:
4805 case HV_X64_MSR_STIMER1_COUNT
:
4806 case HV_X64_MSR_STIMER2_COUNT
:
4807 case HV_X64_MSR_STIMER3_COUNT
:
4808 env
->msr_hv_stimer_count
[(index
- HV_X64_MSR_STIMER0_COUNT
)/2] =
4811 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
4812 env
->msr_hv_reenlightenment_control
= msrs
[i
].data
;
4814 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
4815 env
->msr_hv_tsc_emulation_control
= msrs
[i
].data
;
4817 case HV_X64_MSR_TSC_EMULATION_STATUS
:
4818 env
->msr_hv_tsc_emulation_status
= msrs
[i
].data
;
4820 case HV_X64_MSR_SYNDBG_OPTIONS
:
4821 env
->msr_hv_syndbg_options
= msrs
[i
].data
;
4823 case MSR_MTRRdefType
:
4824 env
->mtrr_deftype
= msrs
[i
].data
;
4826 case MSR_MTRRfix64K_00000
:
4827 env
->mtrr_fixed
[0] = msrs
[i
].data
;
4829 case MSR_MTRRfix16K_80000
:
4830 env
->mtrr_fixed
[1] = msrs
[i
].data
;
4832 case MSR_MTRRfix16K_A0000
:
4833 env
->mtrr_fixed
[2] = msrs
[i
].data
;
4835 case MSR_MTRRfix4K_C0000
:
4836 env
->mtrr_fixed
[3] = msrs
[i
].data
;
4838 case MSR_MTRRfix4K_C8000
:
4839 env
->mtrr_fixed
[4] = msrs
[i
].data
;
4841 case MSR_MTRRfix4K_D0000
:
4842 env
->mtrr_fixed
[5] = msrs
[i
].data
;
4844 case MSR_MTRRfix4K_D8000
:
4845 env
->mtrr_fixed
[6] = msrs
[i
].data
;
4847 case MSR_MTRRfix4K_E0000
:
4848 env
->mtrr_fixed
[7] = msrs
[i
].data
;
4850 case MSR_MTRRfix4K_E8000
:
4851 env
->mtrr_fixed
[8] = msrs
[i
].data
;
4853 case MSR_MTRRfix4K_F0000
:
4854 env
->mtrr_fixed
[9] = msrs
[i
].data
;
4856 case MSR_MTRRfix4K_F8000
:
4857 env
->mtrr_fixed
[10] = msrs
[i
].data
;
4859 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
4861 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
|
4864 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
4867 case MSR_IA32_SPEC_CTRL
:
4868 env
->spec_ctrl
= msrs
[i
].data
;
4870 case MSR_AMD64_TSC_RATIO
:
4871 env
->amd_tsc_scale_msr
= msrs
[i
].data
;
4873 case MSR_IA32_TSX_CTRL
:
4874 env
->tsx_ctrl
= msrs
[i
].data
;
4877 env
->virt_ssbd
= msrs
[i
].data
;
4879 case MSR_IA32_RTIT_CTL
:
4880 env
->msr_rtit_ctrl
= msrs
[i
].data
;
4882 case MSR_IA32_RTIT_STATUS
:
4883 env
->msr_rtit_status
= msrs
[i
].data
;
4885 case MSR_IA32_RTIT_OUTPUT_BASE
:
4886 env
->msr_rtit_output_base
= msrs
[i
].data
;
4888 case MSR_IA32_RTIT_OUTPUT_MASK
:
4889 env
->msr_rtit_output_mask
= msrs
[i
].data
;
4891 case MSR_IA32_RTIT_CR3_MATCH
:
4892 env
->msr_rtit_cr3_match
= msrs
[i
].data
;
4894 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
4895 env
->msr_rtit_addrs
[index
- MSR_IA32_RTIT_ADDR0_A
] = msrs
[i
].data
;
4897 case MSR_IA32_SGXLEPUBKEYHASH0
... MSR_IA32_SGXLEPUBKEYHASH3
:
4898 env
->msr_ia32_sgxlepubkeyhash
[index
- MSR_IA32_SGXLEPUBKEYHASH0
] =
4902 env
->msr_xfd
= msrs
[i
].data
;
4904 case MSR_IA32_XFD_ERR
:
4905 env
->msr_xfd_err
= msrs
[i
].data
;
4907 case MSR_ARCH_LBR_CTL
:
4908 env
->msr_lbr_ctl
= msrs
[i
].data
;
4910 case MSR_ARCH_LBR_DEPTH
:
4911 env
->msr_lbr_depth
= msrs
[i
].data
;
4913 case MSR_ARCH_LBR_FROM_0
... MSR_ARCH_LBR_FROM_0
+ 31:
4914 env
->lbr_records
[index
- MSR_ARCH_LBR_FROM_0
].from
= msrs
[i
].data
;
4916 case MSR_ARCH_LBR_TO_0
... MSR_ARCH_LBR_TO_0
+ 31:
4917 env
->lbr_records
[index
- MSR_ARCH_LBR_TO_0
].to
= msrs
[i
].data
;
4919 case MSR_ARCH_LBR_INFO_0
... MSR_ARCH_LBR_INFO_0
+ 31:
4920 env
->lbr_records
[index
- MSR_ARCH_LBR_INFO_0
].info
= msrs
[i
].data
;
4928 static int kvm_put_mp_state(X86CPU
*cpu
)
4930 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
4932 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
4935 static int kvm_get_mp_state(X86CPU
*cpu
)
4937 CPUState
*cs
= CPU(cpu
);
4938 CPUX86State
*env
= &cpu
->env
;
4939 struct kvm_mp_state mp_state
;
4942 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
4946 env
->mp_state
= mp_state
.mp_state
;
4947 if (kvm_irqchip_in_kernel()) {
4948 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
4953 static int kvm_get_apic(X86CPU
*cpu
)
4955 DeviceState
*apic
= cpu
->apic_state
;
4956 struct kvm_lapic_state kapic
;
4959 if (apic
&& kvm_irqchip_in_kernel()) {
4960 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
4965 kvm_get_apic_state(apic
, &kapic
);
4970 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
4972 CPUState
*cs
= CPU(cpu
);
4973 CPUX86State
*env
= &cpu
->env
;
4974 struct kvm_vcpu_events events
= {};
4978 if (has_exception_payload
) {
4979 events
.flags
|= KVM_VCPUEVENT_VALID_PAYLOAD
;
4980 events
.exception
.pending
= env
->exception_pending
;
4981 events
.exception_has_payload
= env
->exception_has_payload
;
4982 events
.exception_payload
= env
->exception_payload
;
4984 events
.exception
.nr
= env
->exception_nr
;
4985 events
.exception
.injected
= env
->exception_injected
;
4986 events
.exception
.has_error_code
= env
->has_error_code
;
4987 events
.exception
.error_code
= env
->error_code
;
4989 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
4990 events
.interrupt
.nr
= env
->interrupt_injected
;
4991 events
.interrupt
.soft
= env
->soft_interrupt
;
4993 events
.nmi
.injected
= env
->nmi_injected
;
4994 events
.nmi
.pending
= env
->nmi_pending
;
4995 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
4997 events
.sipi_vector
= env
->sipi_vector
;
4999 if (has_msr_smbase
) {
5000 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
5001 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
5002 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
5003 if (kvm_irqchip_in_kernel()) {
5004 /* As soon as these are moved to the kernel, remove them
5005 * from cs->interrupt_request.
5007 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
5008 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
5009 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
5011 /* Keep these in cs->interrupt_request. */
5012 events
.smi
.pending
= 0;
5013 events
.smi
.latched_init
= 0;
5017 if (level
>= KVM_PUT_RESET_STATE
) {
5018 events
.flags
|= KVM_VCPUEVENT_VALID_NMI_PENDING
;
5019 if (env
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
) {
5020 events
.flags
|= KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
5024 if (has_triple_fault_event
) {
5025 events
.flags
|= KVM_VCPUEVENT_VALID_TRIPLE_FAULT
;
5026 events
.triple_fault
.pending
= env
->triple_fault_pending
;
5029 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
5032 static int kvm_get_vcpu_events(X86CPU
*cpu
)
5034 CPUX86State
*env
= &cpu
->env
;
5035 struct kvm_vcpu_events events
;
5038 memset(&events
, 0, sizeof(events
));
5039 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
5044 if (events
.flags
& KVM_VCPUEVENT_VALID_PAYLOAD
) {
5045 env
->exception_pending
= events
.exception
.pending
;
5046 env
->exception_has_payload
= events
.exception_has_payload
;
5047 env
->exception_payload
= events
.exception_payload
;
5049 env
->exception_pending
= 0;
5050 env
->exception_has_payload
= false;
5052 env
->exception_injected
= events
.exception
.injected
;
5054 (env
->exception_pending
|| env
->exception_injected
) ?
5055 events
.exception
.nr
: -1;
5056 env
->has_error_code
= events
.exception
.has_error_code
;
5057 env
->error_code
= events
.exception
.error_code
;
5059 env
->interrupt_injected
=
5060 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
5061 env
->soft_interrupt
= events
.interrupt
.soft
;
5063 env
->nmi_injected
= events
.nmi
.injected
;
5064 env
->nmi_pending
= events
.nmi
.pending
;
5065 if (events
.nmi
.masked
) {
5066 env
->hflags2
|= HF2_NMI_MASK
;
5068 env
->hflags2
&= ~HF2_NMI_MASK
;
5071 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
5072 if (events
.smi
.smm
) {
5073 env
->hflags
|= HF_SMM_MASK
;
5075 env
->hflags
&= ~HF_SMM_MASK
;
5077 if (events
.smi
.pending
) {
5078 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
5080 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
5082 if (events
.smi
.smm_inside_nmi
) {
5083 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
5085 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
5087 if (events
.smi
.latched_init
) {
5088 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
5090 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
5094 if (events
.flags
& KVM_VCPUEVENT_VALID_TRIPLE_FAULT
) {
5095 env
->triple_fault_pending
= events
.triple_fault
.pending
;
5098 env
->sipi_vector
= events
.sipi_vector
;
5103 static int kvm_put_debugregs(X86CPU
*cpu
)
5105 CPUX86State
*env
= &cpu
->env
;
5106 struct kvm_debugregs dbgregs
;
5109 memset(&dbgregs
, 0, sizeof(dbgregs
));
5110 for (i
= 0; i
< 4; i
++) {
5111 dbgregs
.db
[i
] = env
->dr
[i
];
5113 dbgregs
.dr6
= env
->dr
[6];
5114 dbgregs
.dr7
= env
->dr
[7];
5117 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
5120 static int kvm_get_debugregs(X86CPU
*cpu
)
5122 CPUX86State
*env
= &cpu
->env
;
5123 struct kvm_debugregs dbgregs
;
5126 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
5130 for (i
= 0; i
< 4; i
++) {
5131 env
->dr
[i
] = dbgregs
.db
[i
];
5133 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
5134 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
5139 static int kvm_put_nested_state(X86CPU
*cpu
)
5141 CPUX86State
*env
= &cpu
->env
;
5142 int max_nested_state_len
= kvm_max_nested_state_length();
5144 if (!env
->nested_state
) {
5149 * Copy flags that are affected by reset from env->hflags and env->hflags2.
5151 if (env
->hflags
& HF_GUEST_MASK
) {
5152 env
->nested_state
->flags
|= KVM_STATE_NESTED_GUEST_MODE
;
5154 env
->nested_state
->flags
&= ~KVM_STATE_NESTED_GUEST_MODE
;
5157 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
5158 if (cpu_has_svm(env
) && (env
->hflags2
& HF2_GIF_MASK
)) {
5159 env
->nested_state
->flags
|= KVM_STATE_NESTED_GIF_SET
;
5161 env
->nested_state
->flags
&= ~KVM_STATE_NESTED_GIF_SET
;
5164 assert(env
->nested_state
->size
<= max_nested_state_len
);
5165 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_NESTED_STATE
, env
->nested_state
);
5168 static int kvm_get_nested_state(X86CPU
*cpu
)
5170 CPUX86State
*env
= &cpu
->env
;
5171 int max_nested_state_len
= kvm_max_nested_state_length();
5174 if (!env
->nested_state
) {
5179 * It is possible that migration restored a smaller size into
5180 * nested_state->hdr.size than what our kernel support.
5181 * We preserve migration origin nested_state->hdr.size for
5182 * call to KVM_SET_NESTED_STATE but wish that our next call
5183 * to KVM_GET_NESTED_STATE will use max size our kernel support.
5185 env
->nested_state
->size
= max_nested_state_len
;
5187 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_NESTED_STATE
, env
->nested_state
);
5193 * Copy flags that are affected by reset to env->hflags and env->hflags2.
5195 if (env
->nested_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
) {
5196 env
->hflags
|= HF_GUEST_MASK
;
5198 env
->hflags
&= ~HF_GUEST_MASK
;
5201 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
5202 if (cpu_has_svm(env
)) {
5203 if (env
->nested_state
->flags
& KVM_STATE_NESTED_GIF_SET
) {
5204 env
->hflags2
|= HF2_GIF_MASK
;
5206 env
->hflags2
&= ~HF2_GIF_MASK
;
5213 int kvm_arch_put_registers(CPUState
*cpu
, int level
, Error
**errp
)
5215 X86CPU
*x86_cpu
= X86_CPU(cpu
);
5218 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
5221 * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
5222 * root operation upon vCPU reset. kvm_put_msr_feature_control() should also
5223 * precede kvm_put_nested_state() when 'real' nested state is set.
5225 if (level
>= KVM_PUT_RESET_STATE
) {
5226 ret
= kvm_put_msr_feature_control(x86_cpu
);
5228 error_setg_errno(errp
, -ret
, "Failed to set feature control MSR");
5233 /* must be before kvm_put_nested_state so that EFER.SVME is set */
5234 ret
= has_sregs2
? kvm_put_sregs2(x86_cpu
) : kvm_put_sregs(x86_cpu
);
5236 error_setg_errno(errp
, -ret
, "Failed to set special registers");
5240 if (level
>= KVM_PUT_RESET_STATE
) {
5241 ret
= kvm_put_nested_state(x86_cpu
);
5243 error_setg_errno(errp
, -ret
, "Failed to set nested state");
5248 if (level
== KVM_PUT_FULL_STATE
) {
5249 /* We don't check for kvm_arch_set_tsc_khz() errors here,
5250 * because TSC frequency mismatch shouldn't abort migration,
5251 * unless the user explicitly asked for a more strict TSC
5252 * setting (e.g. using an explicit "tsc-freq" option).
5254 kvm_arch_set_tsc_khz(cpu
);
5257 #ifdef CONFIG_XEN_EMU
5258 if (xen_mode
== XEN_EMULATE
&& level
== KVM_PUT_FULL_STATE
) {
5259 ret
= kvm_put_xen_state(cpu
);
5261 error_setg_errno(errp
, -ret
, "Failed to set Xen state");
5267 ret
= kvm_getput_regs(x86_cpu
, 1);
5269 error_setg_errno(errp
, -ret
, "Failed to set general purpose registers");
5272 ret
= kvm_put_xsave(x86_cpu
);
5274 error_setg_errno(errp
, -ret
, "Failed to set XSAVE");
5277 ret
= kvm_put_xcrs(x86_cpu
);
5279 error_setg_errno(errp
, -ret
, "Failed to set XCRs");
5282 ret
= kvm_put_msrs(x86_cpu
, level
);
5284 error_setg_errno(errp
, -ret
, "Failed to set MSRs");
5287 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
5289 error_setg_errno(errp
, -ret
, "Failed to set vCPU events");
5292 if (level
>= KVM_PUT_RESET_STATE
) {
5293 ret
= kvm_put_mp_state(x86_cpu
);
5295 error_setg_errno(errp
, -ret
, "Failed to set MP state");
5300 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
5302 error_setg_errno(errp
, -ret
, "Failed to set TSC deadline MSR");
5305 ret
= kvm_put_debugregs(x86_cpu
);
5307 error_setg_errno(errp
, -ret
, "Failed to set debug registers");
5313 int kvm_arch_get_registers(CPUState
*cs
, Error
**errp
)
5315 X86CPU
*cpu
= X86_CPU(cs
);
5318 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
5320 ret
= kvm_get_vcpu_events(cpu
);
5322 error_setg_errno(errp
, -ret
, "Failed to get vCPU events");
5326 * KVM_GET_MPSTATE can modify CS and RIP, call it before
5327 * KVM_GET_REGS and KVM_GET_SREGS.
5329 ret
= kvm_get_mp_state(cpu
);
5331 error_setg_errno(errp
, -ret
, "Failed to get MP state");
5334 ret
= kvm_getput_regs(cpu
, 0);
5336 error_setg_errno(errp
, -ret
, "Failed to get general purpose registers");
5339 ret
= kvm_get_xsave(cpu
);
5341 error_setg_errno(errp
, -ret
, "Failed to get XSAVE");
5344 ret
= kvm_get_xcrs(cpu
);
5346 error_setg_errno(errp
, -ret
, "Failed to get XCRs");
5349 ret
= has_sregs2
? kvm_get_sregs2(cpu
) : kvm_get_sregs(cpu
);
5351 error_setg_errno(errp
, -ret
, "Failed to get special registers");
5354 ret
= kvm_get_msrs(cpu
);
5356 error_setg_errno(errp
, -ret
, "Failed to get MSRs");
5359 ret
= kvm_get_apic(cpu
);
5361 error_setg_errno(errp
, -ret
, "Failed to get APIC");
5364 ret
= kvm_get_debugregs(cpu
);
5366 error_setg_errno(errp
, -ret
, "Failed to get debug registers");
5369 ret
= kvm_get_nested_state(cpu
);
5371 error_setg_errno(errp
, -ret
, "Failed to get nested state");
5374 #ifdef CONFIG_XEN_EMU
5375 if (xen_mode
== XEN_EMULATE
) {
5376 ret
= kvm_get_xen_state(cs
);
5378 error_setg_errno(errp
, -ret
, "Failed to get Xen state");
5385 cpu_sync_bndcs_hflags(&cpu
->env
);
5389 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
5391 X86CPU
*x86_cpu
= X86_CPU(cpu
);
5392 CPUX86State
*env
= &x86_cpu
->env
;
5396 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
5397 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
5399 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
5401 DPRINTF("injected NMI\n");
5402 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
5404 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
5408 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
5410 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
5412 DPRINTF("injected SMI\n");
5413 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
5415 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
5421 if (!kvm_pic_in_kernel()) {
5425 /* Force the VCPU out of its inner loop to process any INIT requests
5426 * or (for userspace APIC, but it is cheap to combine the checks here)
5427 * pending TPR access reports.
5429 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
5430 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
5431 !(env
->hflags
& HF_SMM_MASK
)) {
5432 cpu
->exit_request
= 1;
5434 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
5435 cpu
->exit_request
= 1;
5439 if (!kvm_pic_in_kernel()) {
5440 /* Try to inject an interrupt if the guest can accept it */
5441 if (run
->ready_for_interrupt_injection
&&
5442 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
5443 (env
->eflags
& IF_MASK
)) {
5446 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
5447 irq
= cpu_get_pic_interrupt(env
);
5449 struct kvm_interrupt intr
;
5452 DPRINTF("injected interrupt %d\n", irq
);
5453 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
5456 "KVM: injection failed, interrupt lost (%s)\n",
5462 /* If we have an interrupt but the guest is not ready to receive an
5463 * interrupt, request an interrupt window exit. This will
5464 * cause a return to userspace as soon as the guest is ready to
5465 * receive interrupts. */
5466 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
5467 run
->request_interrupt_window
= 1;
5469 run
->request_interrupt_window
= 0;
5472 DPRINTF("setting tpr\n");
5473 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
5479 static void kvm_rate_limit_on_bus_lock(void)
5481 uint64_t delay_ns
= ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl
, 1);
5484 g_usleep(delay_ns
/ SCALE_US
);
5488 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
5490 X86CPU
*x86_cpu
= X86_CPU(cpu
);
5491 CPUX86State
*env
= &x86_cpu
->env
;
5493 if (run
->flags
& KVM_RUN_X86_SMM
) {
5494 env
->hflags
|= HF_SMM_MASK
;
5496 env
->hflags
&= ~HF_SMM_MASK
;
5499 env
->eflags
|= IF_MASK
;
5501 env
->eflags
&= ~IF_MASK
;
5503 if (run
->flags
& KVM_RUN_X86_BUS_LOCK
) {
5504 kvm_rate_limit_on_bus_lock();
5507 #ifdef CONFIG_XEN_EMU
5509 * If the callback is asserted as a GSI (or PCI INTx) then check if
5510 * vcpu_info->evtchn_upcall_pending has been cleared, and deassert
5511 * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
5512 * EOI and only resample then, exactly how the VFIO eventfd pairs
5513 * are designed to work for level triggered interrupts.
5515 if (x86_cpu
->env
.xen_callback_asserted
) {
5516 kvm_xen_maybe_deassert_callback(cpu
);
5520 /* We need to protect the apic state against concurrent accesses from
5521 * different threads in case the userspace irqchip is used. */
5522 if (!kvm_irqchip_in_kernel()) {
5525 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
5526 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
5527 if (!kvm_irqchip_in_kernel()) {
5530 return cpu_get_mem_attrs(env
);
5533 int kvm_arch_process_async_events(CPUState
*cs
)
5535 X86CPU
*cpu
= X86_CPU(cs
);
5536 CPUX86State
*env
= &cpu
->env
;
5538 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
5539 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
5540 assert(env
->mcg_cap
);
5542 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
5544 kvm_cpu_synchronize_state(cs
);
5546 if (env
->exception_nr
== EXCP08_DBLE
) {
5547 /* this means triple fault */
5548 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
5549 cs
->exit_request
= 1;
5552 kvm_queue_exception(env
, EXCP12_MCHK
, 0, 0);
5553 env
->has_error_code
= 0;
5556 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
5557 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
5561 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
5562 !(env
->hflags
& HF_SMM_MASK
)) {
5563 kvm_cpu_synchronize_state(cs
);
5567 if (kvm_irqchip_in_kernel()) {
5571 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
5572 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
5573 apic_poll_irq(cpu
->apic_state
);
5575 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
5576 (env
->eflags
& IF_MASK
)) ||
5577 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
5580 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
5581 kvm_cpu_synchronize_state(cs
);
5584 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
5585 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
5586 kvm_cpu_synchronize_state(cs
);
5587 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
5588 env
->tpr_access_type
);
5594 static int kvm_handle_halt(X86CPU
*cpu
)
5596 CPUState
*cs
= CPU(cpu
);
5597 CPUX86State
*env
= &cpu
->env
;
5599 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
5600 (env
->eflags
& IF_MASK
)) &&
5601 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
5609 static int kvm_handle_tpr_access(X86CPU
*cpu
)
5611 CPUState
*cs
= CPU(cpu
);
5612 struct kvm_run
*run
= cs
->kvm_run
;
5614 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
5615 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
5620 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
5622 static const uint8_t int3
= 0xcc;
5624 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
5625 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
5631 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
5635 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0)) {
5641 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
5653 static int nb_hw_breakpoint
;
5655 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
5659 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
5660 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
5661 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
5668 int kvm_arch_insert_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
5671 case GDB_BREAKPOINT_HW
:
5674 case GDB_WATCHPOINT_WRITE
:
5675 case GDB_WATCHPOINT_ACCESS
:
5682 if (addr
& (len
- 1)) {
5694 if (nb_hw_breakpoint
== 4) {
5697 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
5700 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
5701 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
5702 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
5708 int kvm_arch_remove_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
5712 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
5717 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
5722 void kvm_arch_remove_all_hw_breakpoints(void)
5724 nb_hw_breakpoint
= 0;
5727 static CPUWatchpoint hw_watchpoint
;
5729 static int kvm_handle_debug(X86CPU
*cpu
,
5730 struct kvm_debug_exit_arch
*arch_info
)
5732 CPUState
*cs
= CPU(cpu
);
5733 CPUX86State
*env
= &cpu
->env
;
5737 if (arch_info
->exception
== EXCP01_DB
) {
5738 if (arch_info
->dr6
& DR6_BS
) {
5739 if (cs
->singlestep_enabled
) {
5743 for (n
= 0; n
< 4; n
++) {
5744 if (arch_info
->dr6
& (1 << n
)) {
5745 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
5751 cs
->watchpoint_hit
= &hw_watchpoint
;
5752 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
5753 hw_watchpoint
.flags
= BP_MEM_WRITE
;
5757 cs
->watchpoint_hit
= &hw_watchpoint
;
5758 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
5759 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
5765 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
5769 cpu_synchronize_state(cs
);
5770 assert(env
->exception_nr
== -1);
5773 kvm_queue_exception(env
, arch_info
->exception
,
5774 arch_info
->exception
== EXCP01_DB
,
5776 env
->has_error_code
= 0;
5782 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
5784 const uint8_t type_code
[] = {
5785 [GDB_BREAKPOINT_HW
] = 0x0,
5786 [GDB_WATCHPOINT_WRITE
] = 0x1,
5787 [GDB_WATCHPOINT_ACCESS
] = 0x3
5789 const uint8_t len_code
[] = {
5790 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
5794 if (kvm_sw_breakpoints_active(cpu
)) {
5795 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
5797 if (nb_hw_breakpoint
> 0) {
5798 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
5799 dbg
->arch
.debugreg
[7] = 0x0600;
5800 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
5801 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
5802 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
5803 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
5804 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
5809 static bool kvm_install_msr_filters(KVMState
*s
)
5812 struct kvm_msr_filter filter
= {
5813 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
5817 for (i
= 0; i
< KVM_MSR_FILTER_MAX_RANGES
; i
++) {
5818 KVMMSRHandlers
*handler
= &msr_handlers
[i
];
5820 struct kvm_msr_filter_range
*range
= &filter
.ranges
[j
++];
5822 *range
= (struct kvm_msr_filter_range
) {
5825 .base
= handler
->msr
,
5826 .bitmap
= (__u8
*)&zero
,
5829 if (handler
->rdmsr
) {
5830 range
->flags
|= KVM_MSR_FILTER_READ
;
5833 if (handler
->wrmsr
) {
5834 range
->flags
|= KVM_MSR_FILTER_WRITE
;
5839 r
= kvm_vm_ioctl(s
, KVM_X86_SET_MSR_FILTER
, &filter
);
5847 static bool kvm_filter_msr(KVMState
*s
, uint32_t msr
, QEMURDMSRHandler
*rdmsr
,
5848 QEMUWRMSRHandler
*wrmsr
)
5852 for (i
= 0; i
< ARRAY_SIZE(msr_handlers
); i
++) {
5853 if (!msr_handlers
[i
].msr
) {
5854 msr_handlers
[i
] = (KVMMSRHandlers
) {
5860 if (!kvm_install_msr_filters(s
)) {
5861 msr_handlers
[i
] = (KVMMSRHandlers
) { };
5872 static int kvm_handle_rdmsr(X86CPU
*cpu
, struct kvm_run
*run
)
5877 for (i
= 0; i
< ARRAY_SIZE(msr_handlers
); i
++) {
5878 KVMMSRHandlers
*handler
= &msr_handlers
[i
];
5879 if (run
->msr
.index
== handler
->msr
) {
5880 if (handler
->rdmsr
) {
5881 r
= handler
->rdmsr(cpu
, handler
->msr
,
5882 (uint64_t *)&run
->msr
.data
);
5883 run
->msr
.error
= r
? 0 : 1;
5889 g_assert_not_reached();
5892 static int kvm_handle_wrmsr(X86CPU
*cpu
, struct kvm_run
*run
)
5897 for (i
= 0; i
< ARRAY_SIZE(msr_handlers
); i
++) {
5898 KVMMSRHandlers
*handler
= &msr_handlers
[i
];
5899 if (run
->msr
.index
== handler
->msr
) {
5900 if (handler
->wrmsr
) {
5901 r
= handler
->wrmsr(cpu
, handler
->msr
, run
->msr
.data
);
5902 run
->msr
.error
= r
? 0 : 1;
5908 g_assert_not_reached();
5911 static bool has_sgx_provisioning
;
5913 static bool __kvm_enable_sgx_provisioning(KVMState
*s
)
5917 if (!kvm_vm_check_extension(s
, KVM_CAP_SGX_ATTRIBUTE
)) {
5921 fd
= qemu_open_old("/dev/sgx_provision", O_RDONLY
);
5926 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SGX_ATTRIBUTE
, 0, fd
);
5928 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret
));
5935 bool kvm_enable_sgx_provisioning(KVMState
*s
)
5937 return MEMORIZE(__kvm_enable_sgx_provisioning(s
), has_sgx_provisioning
);
5940 static bool host_supports_vmx(void)
5942 uint32_t ecx
, unused
;
5944 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
5945 return ecx
& CPUID_EXT_VMX
;
5949 * Currently the handling here only supports use of KVM_HC_MAP_GPA_RANGE
5950 * to service guest-initiated memory attribute update requests so that
5951 * KVM_SET_MEMORY_ATTRIBUTES can update whether or not a page should be
5952 * backed by the private memory pool provided by guest_memfd, and as such
5953 * is only applicable to guest_memfd-backed guests (e.g. SNP/TDX).
5955 * Other other use-cases for KVM_HC_MAP_GPA_RANGE, such as for SEV live
5956 * migration, are not implemented here currently.
5958 * For the guest_memfd use-case, these exits will generally be synthesized
5959 * by KVM based on platform-specific hypercalls, like GHCB requests in the
5960 * case of SEV-SNP, and not issued directly within the guest though the
5961 * KVM_HC_MAP_GPA_RANGE hypercall. So in this case, KVM_HC_MAP_GPA_RANGE is
5962 * not actually advertised to guests via the KVM CPUID feature bit, as
5963 * opposed to SEV live migration where it would be. Since it is unlikely the
5964 * SEV live migration use-case would be useful for guest-memfd backed guests,
5965 * because private/shared page tracking is already provided through other
5966 * means, these 2 use-cases should be treated as being mutually-exclusive.
5968 static int kvm_handle_hc_map_gpa_range(struct kvm_run
*run
)
5970 uint64_t gpa
, size
, attributes
;
5972 if (!machine_require_guest_memfd(current_machine
))
5975 gpa
= run
->hypercall
.args
[0];
5976 size
= run
->hypercall
.args
[1] * TARGET_PAGE_SIZE
;
5977 attributes
= run
->hypercall
.args
[2];
5979 trace_kvm_hc_map_gpa_range(gpa
, size
, attributes
, run
->hypercall
.flags
);
5981 return kvm_convert_memory(gpa
, size
, attributes
& KVM_MAP_GPA_RANGE_ENCRYPTED
);
5984 static int kvm_handle_hypercall(struct kvm_run
*run
)
5986 if (run
->hypercall
.nr
== KVM_HC_MAP_GPA_RANGE
)
5987 return kvm_handle_hc_map_gpa_range(run
);
5992 #define VMX_INVALID_GUEST_STATE 0x80000021
5994 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
5996 X86CPU
*cpu
= X86_CPU(cs
);
6002 switch (run
->exit_reason
) {
6004 DPRINTF("handle_hlt\n");
6006 ret
= kvm_handle_halt(cpu
);
6009 case KVM_EXIT_SET_TPR
:
6012 case KVM_EXIT_TPR_ACCESS
:
6014 ret
= kvm_handle_tpr_access(cpu
);
6017 case KVM_EXIT_FAIL_ENTRY
:
6018 code
= run
->fail_entry
.hardware_entry_failure_reason
;
6019 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
6021 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
6023 "\nIf you're running a guest on an Intel machine without "
6024 "unrestricted mode\n"
6025 "support, the failure can be most likely due to the guest "
6026 "entering an invalid\n"
6027 "state for Intel VT. For example, the guest maybe running "
6028 "in big real mode\n"
6029 "which is not supported on less recent Intel processors."
6034 case KVM_EXIT_EXCEPTION
:
6035 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
6036 run
->ex
.exception
, run
->ex
.error_code
);
6039 case KVM_EXIT_DEBUG
:
6040 DPRINTF("kvm_exit_debug\n");
6042 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
6045 case KVM_EXIT_HYPERV
:
6046 ret
= kvm_hv_handle_exit(cpu
, &run
->hyperv
);
6048 case KVM_EXIT_IOAPIC_EOI
:
6049 ioapic_eoi_broadcast(run
->eoi
.vector
);
6052 case KVM_EXIT_X86_BUS_LOCK
:
6053 /* already handled in kvm_arch_post_run */
6056 case KVM_EXIT_NOTIFY
:
6057 ctx_invalid
= !!(run
->notify
.flags
& KVM_NOTIFY_CONTEXT_INVALID
);
6058 state
= KVM_STATE(current_accel());
6060 state
->notify_vmexit
== NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR
) {
6061 warn_report("KVM internal error: Encountered a notify exit "
6062 "with invalid context in guest.");
6065 warn_report_once("KVM: Encountered a notify exit with valid "
6066 "context in guest. "
6067 "The guest could be misbehaving.");
6071 case KVM_EXIT_X86_RDMSR
:
6072 /* We only enable MSR filtering, any other exit is bogus */
6073 assert(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
);
6074 ret
= kvm_handle_rdmsr(cpu
, run
);
6076 case KVM_EXIT_X86_WRMSR
:
6077 /* We only enable MSR filtering, any other exit is bogus */
6078 assert(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
);
6079 ret
= kvm_handle_wrmsr(cpu
, run
);
6081 #ifdef CONFIG_XEN_EMU
6083 ret
= kvm_xen_handle_exit(cpu
, &run
->xen
);
6086 case KVM_EXIT_HYPERCALL
:
6087 ret
= kvm_handle_hypercall(run
);
6090 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
6098 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
6100 X86CPU
*cpu
= X86_CPU(cs
);
6101 CPUX86State
*env
= &cpu
->env
;
6103 kvm_cpu_synchronize_state(cs
);
6104 return !(env
->cr
[0] & CR0_PE_MASK
) ||
6105 ((env
->segs
[R_CS
].selector
& 3) != 3);
6108 void kvm_arch_init_irq_routing(KVMState
*s
)
6110 /* We know at this point that we're using the in-kernel
6111 * irqchip, so we can use irqfds, and on x86 we know
6112 * we can use msi via irqfd and GSI routing.
6114 kvm_msi_via_irqfd_allowed
= true;
6115 kvm_gsi_routing_allowed
= true;
6117 if (kvm_irqchip_is_split()) {
6118 KVMRouteChange c
= kvm_irqchip_begin_route_changes(s
);
6121 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
6122 MSI routes for signaling interrupts to the local apics. */
6123 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
6124 if (kvm_irqchip_add_msi_route(&c
, 0, NULL
) < 0) {
6125 error_report("Could not enable split IRQ mode.");
6129 kvm_irqchip_commit_route_changes(&c
);
6133 int kvm_arch_irqchip_create(KVMState
*s
)
6136 if (kvm_kernel_irqchip_split()) {
6137 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SPLIT_IRQCHIP
, 0, 24);
6139 error_report("Could not enable split irqchip mode: %s",
6143 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
6144 kvm_split_irqchip
= true;
6152 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address
)
6160 env
= &X86_CPU(first_cpu
)->env
;
6161 if (!(env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID
))) {
6166 * If the remappable format bit is set, or the upper bits are
6167 * already set in address_hi, or the low extended bits aren't
6168 * there anyway, do nothing.
6170 ext_id
= address
& (0xff << MSI_ADDR_DEST_IDX_SHIFT
);
6171 if (!ext_id
|| (ext_id
& (1 << MSI_ADDR_DEST_IDX_SHIFT
)) || (address
>> 32)) {
6176 address
|= ext_id
<< 35;
6180 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
6181 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
6183 X86IOMMUState
*iommu
= x86_iommu_get_default();
6186 X86IOMMUClass
*class = X86_IOMMU_DEVICE_GET_CLASS(iommu
);
6188 if (class->int_remap
) {
6190 MSIMessage src
, dst
;
6192 src
.address
= route
->u
.msi
.address_hi
;
6193 src
.address
<<= VTD_MSI_ADDR_HI_SHIFT
;
6194 src
.address
|= route
->u
.msi
.address_lo
;
6195 src
.data
= route
->u
.msi
.data
;
6197 ret
= class->int_remap(iommu
, &src
, &dst
, dev
? \
6198 pci_requester_id(dev
) : \
6199 X86_IOMMU_SID_INVALID
);
6201 trace_kvm_x86_fixup_msi_error(route
->gsi
);
6206 * Handled untranslated compatibility format interrupt with
6207 * extended destination ID in the low bits 11-5. */
6208 dst
.address
= kvm_swizzle_msi_ext_dest_id(dst
.address
);
6210 route
->u
.msi
.address_hi
= dst
.address
>> VTD_MSI_ADDR_HI_SHIFT
;
6211 route
->u
.msi
.address_lo
= dst
.address
& VTD_MSI_ADDR_LO_MASK
;
6212 route
->u
.msi
.data
= dst
.data
;
6217 #ifdef CONFIG_XEN_EMU
6218 if (xen_mode
== XEN_EMULATE
) {
6219 int handled
= xen_evtchn_translate_pirq_msi(route
, address
, data
);
6222 * If it was a PIRQ and successfully routed (handled == 0) or it was
6223 * an error (handled < 0), return. If it wasn't a PIRQ, keep going.
6231 address
= kvm_swizzle_msi_ext_dest_id(address
);
6232 route
->u
.msi
.address_hi
= address
>> VTD_MSI_ADDR_HI_SHIFT
;
6233 route
->u
.msi
.address_lo
= address
& VTD_MSI_ADDR_LO_MASK
;
6237 typedef struct MSIRouteEntry MSIRouteEntry
;
6239 struct MSIRouteEntry
{
6240 PCIDevice
*dev
; /* Device pointer */
6241 int vector
; /* MSI/MSIX vector index */
6242 int virq
; /* Virtual IRQ index */
6243 QLIST_ENTRY(MSIRouteEntry
) list
;
6246 /* List of used GSI routes */
6247 static QLIST_HEAD(, MSIRouteEntry
) msi_route_list
= \
6248 QLIST_HEAD_INITIALIZER(msi_route_list
);
6250 void kvm_update_msi_routes_all(void *private, bool global
,
6251 uint32_t index
, uint32_t mask
)
6253 int cnt
= 0, vector
;
6254 MSIRouteEntry
*entry
;
6258 /* TODO: explicit route update */
6259 QLIST_FOREACH(entry
, &msi_route_list
, list
) {
6261 vector
= entry
->vector
;
6263 if (msix_enabled(dev
) && !msix_is_masked(dev
, vector
)) {
6264 msg
= msix_get_message(dev
, vector
);
6265 } else if (msi_enabled(dev
) && !msi_is_masked(dev
, vector
)) {
6266 msg
= msi_get_message(dev
, vector
);
6269 * Either MSI/MSIX is disabled for the device, or the
6270 * specific message was masked out. Skip this one.
6274 kvm_irqchip_update_msi_route(kvm_state
, entry
->virq
, msg
, dev
);
6276 kvm_irqchip_commit_routes(kvm_state
);
6277 trace_kvm_x86_update_msi_routes(cnt
);
6280 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
6281 int vector
, PCIDevice
*dev
)
6283 static bool notify_list_inited
= false;
6284 MSIRouteEntry
*entry
;
6287 /* These are (possibly) IOAPIC routes only used for split
6288 * kernel irqchip mode, while what we are housekeeping are
6289 * PCI devices only. */
6293 entry
= g_new0(MSIRouteEntry
, 1);
6295 entry
->vector
= vector
;
6296 entry
->virq
= route
->gsi
;
6297 QLIST_INSERT_HEAD(&msi_route_list
, entry
, list
);
6299 trace_kvm_x86_add_msi_route(route
->gsi
);
6301 if (!notify_list_inited
) {
6302 /* For the first time we do add route, add ourselves into
6303 * IOMMU's IEC notify list if needed. */
6304 X86IOMMUState
*iommu
= x86_iommu_get_default();
6306 x86_iommu_iec_register_notifier(iommu
,
6307 kvm_update_msi_routes_all
,
6310 notify_list_inited
= true;
6315 int kvm_arch_release_virq_post(int virq
)
6317 MSIRouteEntry
*entry
, *next
;
6318 QLIST_FOREACH_SAFE(entry
, &msi_route_list
, list
, next
) {
6319 if (entry
->virq
== virq
) {
6320 trace_kvm_x86_remove_msi_route(virq
);
6321 QLIST_REMOVE(entry
, list
);
6329 int kvm_arch_msi_data_to_gsi(uint32_t data
)
6334 bool kvm_has_waitpkg(void)
6336 return has_msr_umwait
;
6339 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
6341 void kvm_request_xsave_components(X86CPU
*cpu
, uint64_t mask
)
6343 KVMState
*s
= kvm_state
;
6346 mask
&= XSTATE_DYNAMIC_MASK
;
6351 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
6352 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
6353 * about them already because they are not supported features.
6355 supported
= kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EAX
);
6356 supported
|= (uint64_t)kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EDX
) << 32;
6360 int bit
= ctz64(mask
);
6361 int rc
= syscall(SYS_arch_prctl
, ARCH_REQ_XCOMP_GUEST_PERM
, bit
);
6364 * Older kernel version (<5.17) do not support
6365 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
6366 * any dynamic feature from kvm_arch_get_supported_cpuid.
6368 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
6369 "for feature bit %d", bit
);
6371 mask
&= ~BIT_ULL(bit
);
6375 static int kvm_arch_get_notify_vmexit(Object
*obj
, Error
**errp
)
6377 KVMState
*s
= KVM_STATE(obj
);
6378 return s
->notify_vmexit
;
6381 static void kvm_arch_set_notify_vmexit(Object
*obj
, int value
, Error
**errp
)
6383 KVMState
*s
= KVM_STATE(obj
);
6386 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
6390 s
->notify_vmexit
= value
;
6393 static void kvm_arch_get_notify_window(Object
*obj
, Visitor
*v
,
6394 const char *name
, void *opaque
,
6397 KVMState
*s
= KVM_STATE(obj
);
6398 uint32_t value
= s
->notify_window
;
6400 visit_type_uint32(v
, name
, &value
, errp
);
6403 static void kvm_arch_set_notify_window(Object
*obj
, Visitor
*v
,
6404 const char *name
, void *opaque
,
6407 KVMState
*s
= KVM_STATE(obj
);
6411 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
6415 if (!visit_type_uint32(v
, name
, &value
, errp
)) {
6419 s
->notify_window
= value
;
6422 static void kvm_arch_get_xen_version(Object
*obj
, Visitor
*v
,
6423 const char *name
, void *opaque
,
6426 KVMState
*s
= KVM_STATE(obj
);
6427 uint32_t value
= s
->xen_version
;
6429 visit_type_uint32(v
, name
, &value
, errp
);
6432 static void kvm_arch_set_xen_version(Object
*obj
, Visitor
*v
,
6433 const char *name
, void *opaque
,
6436 KVMState
*s
= KVM_STATE(obj
);
6437 Error
*error
= NULL
;
6440 visit_type_uint32(v
, name
, &value
, &error
);
6442 error_propagate(errp
, error
);
6446 s
->xen_version
= value
;
6447 if (value
&& xen_mode
== XEN_DISABLED
) {
6448 xen_mode
= XEN_EMULATE
;
6452 static void kvm_arch_get_xen_gnttab_max_frames(Object
*obj
, Visitor
*v
,
6453 const char *name
, void *opaque
,
6456 KVMState
*s
= KVM_STATE(obj
);
6457 uint16_t value
= s
->xen_gnttab_max_frames
;
6459 visit_type_uint16(v
, name
, &value
, errp
);
6462 static void kvm_arch_set_xen_gnttab_max_frames(Object
*obj
, Visitor
*v
,
6463 const char *name
, void *opaque
,
6466 KVMState
*s
= KVM_STATE(obj
);
6467 Error
*error
= NULL
;
6470 visit_type_uint16(v
, name
, &value
, &error
);
6472 error_propagate(errp
, error
);
6476 s
->xen_gnttab_max_frames
= value
;
6479 static void kvm_arch_get_xen_evtchn_max_pirq(Object
*obj
, Visitor
*v
,
6480 const char *name
, void *opaque
,
6483 KVMState
*s
= KVM_STATE(obj
);
6484 uint16_t value
= s
->xen_evtchn_max_pirq
;
6486 visit_type_uint16(v
, name
, &value
, errp
);
6489 static void kvm_arch_set_xen_evtchn_max_pirq(Object
*obj
, Visitor
*v
,
6490 const char *name
, void *opaque
,
6493 KVMState
*s
= KVM_STATE(obj
);
6494 Error
*error
= NULL
;
6497 visit_type_uint16(v
, name
, &value
, &error
);
6499 error_propagate(errp
, error
);
6503 s
->xen_evtchn_max_pirq
= value
;
6506 void kvm_arch_accel_class_init(ObjectClass
*oc
)
6508 object_class_property_add_enum(oc
, "notify-vmexit", "NotifyVMexitOption",
6509 &NotifyVmexitOption_lookup
,
6510 kvm_arch_get_notify_vmexit
,
6511 kvm_arch_set_notify_vmexit
);
6512 object_class_property_set_description(oc
, "notify-vmexit",
6513 "Enable notify VM exit");
6515 object_class_property_add(oc
, "notify-window", "uint32",
6516 kvm_arch_get_notify_window
,
6517 kvm_arch_set_notify_window
,
6519 object_class_property_set_description(oc
, "notify-window",
6520 "Clock cycles without an event window "
6521 "after which a notification VM exit occurs");
6523 object_class_property_add(oc
, "xen-version", "uint32",
6524 kvm_arch_get_xen_version
,
6525 kvm_arch_set_xen_version
,
6527 object_class_property_set_description(oc
, "xen-version",
6528 "Xen version to be emulated "
6529 "(in XENVER_version form "
6530 "e.g. 0x4000a for 4.10)");
6532 object_class_property_add(oc
, "xen-gnttab-max-frames", "uint16",
6533 kvm_arch_get_xen_gnttab_max_frames
,
6534 kvm_arch_set_xen_gnttab_max_frames
,
6536 object_class_property_set_description(oc
, "xen-gnttab-max-frames",
6537 "Maximum number of grant table frames");
6539 object_class_property_add(oc
, "xen-evtchn-max-pirq", "uint16",
6540 kvm_arch_get_xen_evtchn_max_pirq
,
6541 kvm_arch_set_xen_evtchn_max_pirq
,
6543 object_class_property_set_description(oc
, "xen-evtchn-max-pirq",
6544 "Maximum number of Xen PIRQs");
6547 void kvm_set_max_apic_id(uint32_t max_apic_id
)
6549 kvm_vm_enable_cap(kvm_state
, KVM_CAP_MAX_VCPU_ID
, 0, max_apic_id
);