1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
6 * derived from arch/x86/kvm/x86.c
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
18 #include <asm/processor.h>
20 #include <asm/fpu/xstate.h>
27 static u32
xstate_required_size(u64 xstate_bv
, bool compacted
)
30 u32 ret
= XSAVE_HDR_SIZE
+ XSAVE_HDR_OFFSET
;
32 xstate_bv
&= XFEATURE_MASK_EXTEND
;
34 if (xstate_bv
& 0x1) {
35 u32 eax
, ebx
, ecx
, edx
, offset
;
36 cpuid_count(0xD, feature_bit
, &eax
, &ebx
, &ecx
, &edx
);
37 offset
= compacted
? ret
: ebx
;
38 ret
= max(ret
, offset
+ eax
);
48 bool kvm_mpx_supported(void)
50 return ((host_xcr0
& (XFEATURE_MASK_BNDREGS
| XFEATURE_MASK_BNDCSR
))
51 && kvm_x86_ops
->mpx_supported());
53 EXPORT_SYMBOL_GPL(kvm_mpx_supported
);
55 u64
kvm_supported_xcr0(void)
57 u64 xcr0
= KVM_SUPPORTED_XCR0
& host_xcr0
;
59 if (!kvm_mpx_supported())
60 xcr0
&= ~(XFEATURE_MASK_BNDREGS
| XFEATURE_MASK_BNDCSR
);
67 int kvm_update_cpuid(struct kvm_vcpu
*vcpu
)
69 struct kvm_cpuid_entry2
*best
;
70 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
72 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
76 /* Update OSXSAVE bit */
77 if (boot_cpu_has(X86_FEATURE_XSAVE
) && best
->function
== 0x1) {
78 best
->ecx
&= ~F(OSXSAVE
);
79 if (kvm_read_cr4_bits(vcpu
, X86_CR4_OSXSAVE
))
80 best
->ecx
|= F(OSXSAVE
);
83 best
->edx
&= ~F(APIC
);
84 if (vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
)
88 if (best
->ecx
& F(TSC_DEADLINE_TIMER
))
89 apic
->lapic_timer
.timer_mode_mask
= 3 << 17;
91 apic
->lapic_timer
.timer_mode_mask
= 1 << 17;
94 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
96 /* Update OSPKE bit */
97 if (boot_cpu_has(X86_FEATURE_PKU
) && best
->function
== 0x7) {
98 best
->ecx
&= ~F(OSPKE
);
99 if (kvm_read_cr4_bits(vcpu
, X86_CR4_PKE
))
100 best
->ecx
|= F(OSPKE
);
104 best
= kvm_find_cpuid_entry(vcpu
, 0xD, 0);
106 vcpu
->arch
.guest_supported_xcr0
= 0;
107 vcpu
->arch
.guest_xstate_size
= XSAVE_HDR_SIZE
+ XSAVE_HDR_OFFSET
;
109 vcpu
->arch
.guest_supported_xcr0
=
110 (best
->eax
| ((u64
)best
->edx
<< 32)) &
111 kvm_supported_xcr0();
112 vcpu
->arch
.guest_xstate_size
= best
->ebx
=
113 xstate_required_size(vcpu
->arch
.xcr0
, false);
116 best
= kvm_find_cpuid_entry(vcpu
, 0xD, 1);
117 if (best
&& (best
->eax
& (F(XSAVES
) | F(XSAVEC
))))
118 best
->ebx
= xstate_required_size(vcpu
->arch
.xcr0
, true);
121 * The existing code assumes virtual address is 48-bit or 57-bit in the
122 * canonical address checks; exit if it is ever changed.
124 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
126 int vaddr_bits
= (best
->eax
& 0xff00) >> 8;
128 if (vaddr_bits
!= 48 && vaddr_bits
!= 57 && vaddr_bits
!= 0)
132 best
= kvm_find_cpuid_entry(vcpu
, KVM_CPUID_FEATURES
, 0);
133 if (kvm_hlt_in_guest(vcpu
->kvm
) && best
&&
134 (best
->eax
& (1 << KVM_FEATURE_PV_UNHALT
)))
135 best
->eax
&= ~(1 << KVM_FEATURE_PV_UNHALT
);
137 if (!kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT
)) {
138 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
140 if (vcpu
->arch
.ia32_misc_enable_msr
& MSR_IA32_MISC_ENABLE_MWAIT
)
141 best
->ecx
|= F(MWAIT
);
143 best
->ecx
&= ~F(MWAIT
);
147 /* Update physical-address width */
148 vcpu
->arch
.maxphyaddr
= cpuid_query_maxphyaddr(vcpu
);
149 kvm_mmu_reset_context(vcpu
);
151 kvm_pmu_refresh(vcpu
);
155 static int is_efer_nx(void)
157 unsigned long long efer
= 0;
159 rdmsrl_safe(MSR_EFER
, &efer
);
160 return efer
& EFER_NX
;
163 static void cpuid_fix_nx_cap(struct kvm_vcpu
*vcpu
)
166 struct kvm_cpuid_entry2
*e
, *entry
;
169 for (i
= 0; i
< vcpu
->arch
.cpuid_nent
; ++i
) {
170 e
= &vcpu
->arch
.cpuid_entries
[i
];
171 if (e
->function
== 0x80000001) {
176 if (entry
&& (entry
->edx
& F(NX
)) && !is_efer_nx()) {
177 entry
->edx
&= ~F(NX
);
178 printk(KERN_INFO
"kvm: guest NX capability removed\n");
182 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
)
184 struct kvm_cpuid_entry2
*best
;
186 best
= kvm_find_cpuid_entry(vcpu
, 0x80000000, 0);
187 if (!best
|| best
->eax
< 0x80000008)
189 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
191 return best
->eax
& 0xff;
195 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr
);
197 /* when an old userspace process fills a new kernel module */
198 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
199 struct kvm_cpuid
*cpuid
,
200 struct kvm_cpuid_entry __user
*entries
)
203 struct kvm_cpuid_entry
*cpuid_entries
= NULL
;
206 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
211 vmalloc(array_size(sizeof(struct kvm_cpuid_entry
),
216 if (copy_from_user(cpuid_entries
, entries
,
217 cpuid
->nent
* sizeof(struct kvm_cpuid_entry
)))
220 for (i
= 0; i
< cpuid
->nent
; i
++) {
221 vcpu
->arch
.cpuid_entries
[i
].function
= cpuid_entries
[i
].function
;
222 vcpu
->arch
.cpuid_entries
[i
].eax
= cpuid_entries
[i
].eax
;
223 vcpu
->arch
.cpuid_entries
[i
].ebx
= cpuid_entries
[i
].ebx
;
224 vcpu
->arch
.cpuid_entries
[i
].ecx
= cpuid_entries
[i
].ecx
;
225 vcpu
->arch
.cpuid_entries
[i
].edx
= cpuid_entries
[i
].edx
;
226 vcpu
->arch
.cpuid_entries
[i
].index
= 0;
227 vcpu
->arch
.cpuid_entries
[i
].flags
= 0;
228 vcpu
->arch
.cpuid_entries
[i
].padding
[0] = 0;
229 vcpu
->arch
.cpuid_entries
[i
].padding
[1] = 0;
230 vcpu
->arch
.cpuid_entries
[i
].padding
[2] = 0;
232 vcpu
->arch
.cpuid_nent
= cpuid
->nent
;
233 cpuid_fix_nx_cap(vcpu
);
234 kvm_apic_set_version(vcpu
);
235 kvm_x86_ops
->cpuid_update(vcpu
);
236 r
= kvm_update_cpuid(vcpu
);
239 vfree(cpuid_entries
);
243 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
244 struct kvm_cpuid2
*cpuid
,
245 struct kvm_cpuid_entry2 __user
*entries
)
250 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
253 if (copy_from_user(&vcpu
->arch
.cpuid_entries
, entries
,
254 cpuid
->nent
* sizeof(struct kvm_cpuid_entry2
)))
256 vcpu
->arch
.cpuid_nent
= cpuid
->nent
;
257 kvm_apic_set_version(vcpu
);
258 kvm_x86_ops
->cpuid_update(vcpu
);
259 r
= kvm_update_cpuid(vcpu
);
264 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
265 struct kvm_cpuid2
*cpuid
,
266 struct kvm_cpuid_entry2 __user
*entries
)
271 if (cpuid
->nent
< vcpu
->arch
.cpuid_nent
)
274 if (copy_to_user(entries
, &vcpu
->arch
.cpuid_entries
,
275 vcpu
->arch
.cpuid_nent
* sizeof(struct kvm_cpuid_entry2
)))
280 cpuid
->nent
= vcpu
->arch
.cpuid_nent
;
284 static __always_inline
void cpuid_mask(u32
*word
, int wordnum
)
286 reverse_cpuid_check(wordnum
);
287 *word
&= boot_cpu_data
.x86_capability
[wordnum
];
290 static void do_host_cpuid(struct kvm_cpuid_entry2
*entry
, u32 function
,
293 entry
->function
= function
;
294 entry
->index
= index
;
297 cpuid_count(entry
->function
, entry
->index
,
298 &entry
->eax
, &entry
->ebx
, &entry
->ecx
, &entry
->edx
);
302 entry
->flags
|= KVM_CPUID_FLAG_STATEFUL_FUNC
;
316 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
321 static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2
*entry
,
322 u32 func
, int *nent
, int maxnent
)
324 entry
->function
= func
;
334 entry
->ecx
= F(MOVBE
);
338 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
340 entry
->ecx
= F(RDPID
);
349 static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2
*entry
, int index
)
351 unsigned f_invpcid
= kvm_x86_ops
->invpcid_supported() ? F(INVPCID
) : 0;
352 unsigned f_mpx
= kvm_mpx_supported() ? F(MPX
) : 0;
353 unsigned f_umip
= kvm_x86_ops
->umip_emulated() ? F(UMIP
) : 0;
354 unsigned f_intel_pt
= kvm_x86_ops
->pt_supported() ? F(INTEL_PT
) : 0;
356 unsigned f_pku
= kvm_x86_ops
->pku_supported() ? F(PKU
) : 0;
359 const u32 kvm_cpuid_7_0_ebx_x86_features
=
360 F(FSGSBASE
) | F(BMI1
) | F(HLE
) | F(AVX2
) | F(SMEP
) |
361 F(BMI2
) | F(ERMS
) | f_invpcid
| F(RTM
) | f_mpx
| F(RDSEED
) |
362 F(ADX
) | F(SMAP
) | F(AVX512IFMA
) | F(AVX512F
) | F(AVX512PF
) |
363 F(AVX512ER
) | F(AVX512CD
) | F(CLFLUSHOPT
) | F(CLWB
) | F(AVX512DQ
) |
364 F(SHA_NI
) | F(AVX512BW
) | F(AVX512VL
) | f_intel_pt
;
367 const u32 kvm_cpuid_7_0_ecx_x86_features
=
368 F(AVX512VBMI
) | F(LA57
) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID
) |
369 F(AVX512_VPOPCNTDQ
) | F(UMIP
) | F(AVX512_VBMI2
) | F(GFNI
) |
370 F(VAES
) | F(VPCLMULQDQ
) | F(AVX512_VNNI
) | F(AVX512_BITALG
) |
371 F(CLDEMOTE
) | F(MOVDIRI
) | F(MOVDIR64B
) | 0 /*WAITPKG*/;
374 const u32 kvm_cpuid_7_0_edx_x86_features
=
375 F(AVX512_4VNNIW
) | F(AVX512_4FMAPS
) | F(SPEC_CTRL
) |
376 F(SPEC_CTRL_SSBD
) | F(ARCH_CAPABILITIES
) | F(INTEL_STIBP
) |
380 const u32 kvm_cpuid_7_1_eax_x86_features
=
385 entry
->eax
= min(entry
->eax
, 1u);
386 entry
->ebx
&= kvm_cpuid_7_0_ebx_x86_features
;
387 cpuid_mask(&entry
->ebx
, CPUID_7_0_EBX
);
388 /* TSC_ADJUST is emulated */
389 entry
->ebx
|= F(TSC_ADJUST
);
391 entry
->ecx
&= kvm_cpuid_7_0_ecx_x86_features
;
392 f_la57
= entry
->ecx
& F(LA57
);
393 cpuid_mask(&entry
->ecx
, CPUID_7_ECX
);
394 /* Set LA57 based on hardware capability. */
395 entry
->ecx
|= f_la57
;
396 entry
->ecx
|= f_umip
;
398 /* PKU is not yet implemented for shadow paging. */
399 if (!tdp_enabled
|| !boot_cpu_has(X86_FEATURE_OSPKE
))
400 entry
->ecx
&= ~F(PKU
);
402 entry
->edx
&= kvm_cpuid_7_0_edx_x86_features
;
403 cpuid_mask(&entry
->edx
, CPUID_7_EDX
);
404 if (boot_cpu_has(X86_FEATURE_IBPB
) && boot_cpu_has(X86_FEATURE_IBRS
))
405 entry
->edx
|= F(SPEC_CTRL
);
406 if (boot_cpu_has(X86_FEATURE_STIBP
))
407 entry
->edx
|= F(INTEL_STIBP
);
408 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
409 boot_cpu_has(X86_FEATURE_AMD_SSBD
))
410 entry
->edx
|= F(SPEC_CTRL_SSBD
);
412 * We emulate ARCH_CAPABILITIES in software even
413 * if the host doesn't support it.
415 entry
->edx
|= F(ARCH_CAPABILITIES
);
418 entry
->eax
&= kvm_cpuid_7_1_eax_x86_features
;
433 static inline int __do_cpuid_func(struct kvm_cpuid_entry2
*entry
, u32 function
,
434 int *nent
, int maxnent
)
437 unsigned f_nx
= is_efer_nx() ? F(NX
) : 0;
439 unsigned f_gbpages
= (kvm_x86_ops
->get_lpage_level() == PT_PDPE_LEVEL
)
441 unsigned f_lm
= F(LM
);
443 unsigned f_gbpages
= 0;
446 unsigned f_rdtscp
= kvm_x86_ops
->rdtscp_supported() ? F(RDTSCP
) : 0;
447 unsigned f_xsaves
= kvm_x86_ops
->xsaves_supported() ? F(XSAVES
) : 0;
448 unsigned f_intel_pt
= kvm_x86_ops
->pt_supported() ? F(INTEL_PT
) : 0;
451 const u32 kvm_cpuid_1_edx_x86_features
=
452 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
453 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
454 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SEP
) |
455 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
456 F(PAT
) | F(PSE36
) | 0 /* PSN */ | F(CLFLUSH
) |
457 0 /* Reserved, DS, ACPI */ | F(MMX
) |
458 F(FXSR
) | F(XMM
) | F(XMM2
) | F(SELFSNOOP
) |
459 0 /* HTT, TM, Reserved, PBE */;
460 /* cpuid 0x80000001.edx */
461 const u32 kvm_cpuid_8000_0001_edx_x86_features
=
462 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
463 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
464 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SYSCALL
) |
465 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
466 F(PAT
) | F(PSE36
) | 0 /* Reserved */ |
467 f_nx
| 0 /* Reserved */ | F(MMXEXT
) | F(MMX
) |
468 F(FXSR
) | F(FXSR_OPT
) | f_gbpages
| f_rdtscp
|
469 0 /* Reserved */ | f_lm
| F(3DNOWEXT
) | F(3DNOW
);
471 const u32 kvm_cpuid_1_ecx_x86_features
=
472 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
473 * but *not* advertised to guests via CPUID ! */
474 F(XMM3
) | F(PCLMULQDQ
) | 0 /* DTES64, MONITOR */ |
475 0 /* DS-CPL, VMX, SMX, EST */ |
476 0 /* TM2 */ | F(SSSE3
) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
477 F(FMA
) | F(CX16
) | 0 /* xTPR Update, PDCM */ |
478 F(PCID
) | 0 /* Reserved, DCA */ | F(XMM4_1
) |
479 F(XMM4_2
) | F(X2APIC
) | F(MOVBE
) | F(POPCNT
) |
480 0 /* Reserved*/ | F(AES
) | F(XSAVE
) | 0 /* OSXSAVE */ | F(AVX
) |
482 /* cpuid 0x80000001.ecx */
483 const u32 kvm_cpuid_8000_0001_ecx_x86_features
=
484 F(LAHF_LM
) | F(CMP_LEGACY
) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
485 F(CR8_LEGACY
) | F(ABM
) | F(SSE4A
) | F(MISALIGNSSE
) |
486 F(3DNOWPREFETCH
) | F(OSVW
) | 0 /* IBS */ | F(XOP
) |
487 0 /* SKINIT, WDT, LWP */ | F(FMA4
) | F(TBM
) |
488 F(TOPOEXT
) | F(PERFCTR_CORE
);
490 /* cpuid 0x80000008.ebx */
491 const u32 kvm_cpuid_8000_0008_ebx_x86_features
=
492 F(CLZERO
) | F(XSAVEERPTR
) |
493 F(WBNOINVD
) | F(AMD_IBPB
) | F(AMD_IBRS
) | F(AMD_SSBD
) | F(VIRT_SSBD
) |
494 F(AMD_SSB_NO
) | F(AMD_STIBP
) | F(AMD_STIBP_ALWAYS_ON
);
496 /* cpuid 0xC0000001.edx */
497 const u32 kvm_cpuid_C000_0001_edx_x86_features
=
498 F(XSTORE
) | F(XSTORE_EN
) | F(XCRYPT
) | F(XCRYPT_EN
) |
499 F(ACE2
) | F(ACE2_EN
) | F(PHE
) | F(PHE_EN
) |
502 /* cpuid 0xD.1.eax */
503 const u32 kvm_cpuid_D_1_eax_x86_features
=
504 F(XSAVEOPT
) | F(XSAVEC
) | F(XGETBV1
) | f_xsaves
;
506 /* all calls to cpuid_count() should be made on the same cpu */
511 if (WARN_ON(*nent
>= maxnent
))
514 do_host_cpuid(entry
, function
, 0);
519 /* Limited to the highest leaf implemented in KVM. */
520 entry
->eax
= min(entry
->eax
, 0x1fU
);
523 entry
->edx
&= kvm_cpuid_1_edx_x86_features
;
524 cpuid_mask(&entry
->edx
, CPUID_1_EDX
);
525 entry
->ecx
&= kvm_cpuid_1_ecx_x86_features
;
526 cpuid_mask(&entry
->ecx
, CPUID_1_ECX
);
527 /* we support x2apic emulation even if host does not support
528 * it since we emulate x2apic in software */
529 entry
->ecx
|= F(X2APIC
);
531 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
532 * may return different values. This forces us to get_cpu() before
533 * issuing the first command, and also to emulate this annoying behavior
534 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
536 int t
, times
= entry
->eax
& 0xff;
538 entry
->flags
|= KVM_CPUID_FLAG_STATE_READ_NEXT
;
539 for (t
= 1; t
< times
; ++t
) {
540 if (*nent
>= maxnent
)
543 do_host_cpuid(&entry
[t
], function
, 0);
548 /* functions 4 and 0x8000001d have additional index. */
553 /* read more entries until cache_type is zero */
555 if (*nent
>= maxnent
)
558 cache_type
= entry
[i
- 1].eax
& 0x1f;
561 do_host_cpuid(&entry
[i
], function
, i
);
566 case 6: /* Thermal management */
567 entry
->eax
= 0x4; /* allow ARAT */
572 /* function 7 has additional index. */
577 do_cpuid_7_mask(&entry
[i
], i
);
580 if (*nent
>= maxnent
)
584 do_host_cpuid(&entry
[i
], function
, i
);
591 case 0xa: { /* Architectural Performance Monitoring */
592 struct x86_pmu_capability cap
;
593 union cpuid10_eax eax
;
594 union cpuid10_edx edx
;
596 perf_get_x86_pmu_capability(&cap
);
599 * Only support guest architectural pmu on a host
600 * with architectural pmu.
603 memset(&cap
, 0, sizeof(cap
));
605 eax
.split
.version_id
= min(cap
.version
, 2);
606 eax
.split
.num_counters
= cap
.num_counters_gp
;
607 eax
.split
.bit_width
= cap
.bit_width_gp
;
608 eax
.split
.mask_length
= cap
.events_mask_len
;
610 edx
.split
.num_counters_fixed
= cap
.num_counters_fixed
;
611 edx
.split
.bit_width_fixed
= cap
.bit_width_fixed
;
612 edx
.split
.reserved
= 0;
614 entry
->eax
= eax
.full
;
615 entry
->ebx
= cap
.events_mask
;
617 entry
->edx
= edx
.full
;
621 * Per Intel's SDM, the 0x1f is a superset of 0xb,
622 * thus they can be handled by common code.
629 * We filled in entry[0] for CPUID(EAX=<function>,
630 * ECX=00H) above. If its level type (ECX[15:8]) is
631 * zero, then the leaf is unimplemented, and we're
632 * done. Otherwise, continue to populate entries
633 * until the level type (ECX[15:8]) of the previously
634 * added entry is zero.
636 for (i
= 1; entry
[i
- 1].ecx
& 0xff00; ++i
) {
637 if (*nent
>= maxnent
)
640 do_host_cpuid(&entry
[i
], function
, i
);
647 u64 supported
= kvm_supported_xcr0();
649 entry
->eax
&= supported
;
650 entry
->ebx
= xstate_required_size(supported
, false);
651 entry
->ecx
= entry
->ebx
;
652 entry
->edx
&= supported
>> 32;
656 for (idx
= 1, i
= 1; idx
< 64; ++idx
) {
657 u64 mask
= ((u64
)1 << idx
);
658 if (*nent
>= maxnent
)
661 do_host_cpuid(&entry
[i
], function
, idx
);
663 entry
[i
].eax
&= kvm_cpuid_D_1_eax_x86_features
;
664 cpuid_mask(&entry
[i
].eax
, CPUID_D_1_EAX
);
666 if (entry
[i
].eax
& (F(XSAVES
)|F(XSAVEC
)))
668 xstate_required_size(supported
,
671 if (entry
[i
].eax
== 0 || !(supported
& mask
))
673 if (WARN_ON_ONCE(entry
[i
].ecx
& 1))
685 int t
, times
= entry
->eax
;
690 for (t
= 1; t
<= times
; ++t
) {
691 if (*nent
>= maxnent
)
693 do_host_cpuid(&entry
[t
], function
, t
);
698 case KVM_CPUID_SIGNATURE
: {
699 static const char signature
[12] = "KVMKVMKVM\0\0";
700 const u32
*sigptr
= (const u32
*)signature
;
701 entry
->eax
= KVM_CPUID_FEATURES
;
702 entry
->ebx
= sigptr
[0];
703 entry
->ecx
= sigptr
[1];
704 entry
->edx
= sigptr
[2];
707 case KVM_CPUID_FEATURES
:
708 entry
->eax
= (1 << KVM_FEATURE_CLOCKSOURCE
) |
709 (1 << KVM_FEATURE_NOP_IO_DELAY
) |
710 (1 << KVM_FEATURE_CLOCKSOURCE2
) |
711 (1 << KVM_FEATURE_ASYNC_PF
) |
712 (1 << KVM_FEATURE_PV_EOI
) |
713 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
) |
714 (1 << KVM_FEATURE_PV_UNHALT
) |
715 (1 << KVM_FEATURE_PV_TLB_FLUSH
) |
716 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT
) |
717 (1 << KVM_FEATURE_PV_SEND_IPI
) |
718 (1 << KVM_FEATURE_POLL_CONTROL
) |
719 (1 << KVM_FEATURE_PV_SCHED_YIELD
);
722 entry
->eax
|= (1 << KVM_FEATURE_STEAL_TIME
);
729 entry
->eax
= min(entry
->eax
, 0x8000001f);
732 entry
->edx
&= kvm_cpuid_8000_0001_edx_x86_features
;
733 cpuid_mask(&entry
->edx
, CPUID_8000_0001_EDX
);
734 entry
->ecx
&= kvm_cpuid_8000_0001_ecx_x86_features
;
735 cpuid_mask(&entry
->ecx
, CPUID_8000_0001_ECX
);
737 case 0x80000007: /* Advanced power management */
738 /* invariant TSC is CPUID.80000007H:EDX[8] */
739 entry
->edx
&= (1 << 8);
740 /* mask against host */
741 entry
->edx
&= boot_cpu_data
.x86_power
;
742 entry
->eax
= entry
->ebx
= entry
->ecx
= 0;
745 unsigned g_phys_as
= (entry
->eax
>> 16) & 0xff;
746 unsigned virt_as
= max((entry
->eax
>> 8) & 0xff, 48U);
747 unsigned phys_as
= entry
->eax
& 0xff;
751 entry
->eax
= g_phys_as
| (virt_as
<< 8);
753 entry
->ebx
&= kvm_cpuid_8000_0008_ebx_x86_features
;
754 cpuid_mask(&entry
->ebx
, CPUID_8000_0008_EBX
);
756 * AMD has separate bits for each SPEC_CTRL bit.
757 * arch/x86/kernel/cpu/bugs.c is kind enough to
758 * record that in cpufeatures so use them.
760 if (boot_cpu_has(X86_FEATURE_IBPB
))
761 entry
->ebx
|= F(AMD_IBPB
);
762 if (boot_cpu_has(X86_FEATURE_IBRS
))
763 entry
->ebx
|= F(AMD_IBRS
);
764 if (boot_cpu_has(X86_FEATURE_STIBP
))
765 entry
->ebx
|= F(AMD_STIBP
);
766 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) ||
767 boot_cpu_has(X86_FEATURE_AMD_SSBD
))
768 entry
->ebx
|= F(AMD_SSBD
);
769 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
770 entry
->ebx
|= F(AMD_SSB_NO
);
772 * The preference is to use SPEC CTRL MSR instead of the
775 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
776 !boot_cpu_has(X86_FEATURE_AMD_SSBD
))
777 entry
->ebx
|= F(VIRT_SSBD
);
781 entry
->ecx
= entry
->edx
= 0;
786 /* Support memory encryption cpuid if host supports it */
788 if (!boot_cpu_has(X86_FEATURE_SEV
))
789 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
791 /*Add support for Centaur's CPUID instruction*/
793 /*Just support up to 0xC0000004 now*/
794 entry
->eax
= min(entry
->eax
, 0xC0000004);
797 entry
->edx
&= kvm_cpuid_C000_0001_edx_x86_features
;
798 cpuid_mask(&entry
->edx
, CPUID_C000_0001_EDX
);
800 case 3: /* Processor serial number */
801 case 5: /* MONITOR/MWAIT */
806 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
810 kvm_x86_ops
->set_supported_cpuid(function
, entry
);
820 static int do_cpuid_func(struct kvm_cpuid_entry2
*entry
, u32 func
,
821 int *nent
, int maxnent
, unsigned int type
)
823 if (*nent
>= maxnent
)
826 if (type
== KVM_GET_EMULATED_CPUID
)
827 return __do_cpuid_func_emulated(entry
, func
, nent
, maxnent
);
829 return __do_cpuid_func(entry
, func
, nent
, maxnent
);
832 struct kvm_cpuid_param
{
834 bool (*qualifier
)(const struct kvm_cpuid_param
*param
);
837 static bool is_centaur_cpu(const struct kvm_cpuid_param
*param
)
839 return boot_cpu_data
.x86_vendor
== X86_VENDOR_CENTAUR
;
842 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user
*entries
,
843 __u32 num_entries
, unsigned int ioctl_type
)
848 if (ioctl_type
!= KVM_GET_EMULATED_CPUID
)
852 * We want to make sure that ->padding is being passed clean from
853 * userspace in case we want to use it for something in the future.
855 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
856 * have to give ourselves satisfied only with the emulated side. /me
859 for (i
= 0; i
< num_entries
; i
++) {
860 if (copy_from_user(pad
, entries
[i
].padding
, sizeof(pad
)))
863 if (pad
[0] || pad
[1] || pad
[2])
869 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
870 struct kvm_cpuid_entry2 __user
*entries
,
873 struct kvm_cpuid_entry2
*cpuid_entries
;
874 int limit
, nent
= 0, r
= -E2BIG
, i
;
876 static const struct kvm_cpuid_param param
[] = {
878 { .func
= 0x80000000 },
879 { .func
= 0xC0000000, .qualifier
= is_centaur_cpu
},
880 { .func
= KVM_CPUID_SIGNATURE
},
885 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
886 cpuid
->nent
= KVM_MAX_CPUID_ENTRIES
;
888 if (sanity_check_entries(entries
, cpuid
->nent
, type
))
892 cpuid_entries
= vzalloc(array_size(sizeof(struct kvm_cpuid_entry2
),
898 for (i
= 0; i
< ARRAY_SIZE(param
); i
++) {
899 const struct kvm_cpuid_param
*ent
= ¶m
[i
];
901 if (ent
->qualifier
&& !ent
->qualifier(ent
))
904 r
= do_cpuid_func(&cpuid_entries
[nent
], ent
->func
,
905 &nent
, cpuid
->nent
, type
);
910 limit
= cpuid_entries
[nent
- 1].eax
;
911 for (func
= ent
->func
+ 1; func
<= limit
&& nent
< cpuid
->nent
&& r
== 0; ++func
)
912 r
= do_cpuid_func(&cpuid_entries
[nent
], func
,
913 &nent
, cpuid
->nent
, type
);
920 if (copy_to_user(entries
, cpuid_entries
,
921 nent
* sizeof(struct kvm_cpuid_entry2
)))
927 vfree(cpuid_entries
);
932 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu
*vcpu
, int i
)
934 struct kvm_cpuid_entry2
*e
= &vcpu
->arch
.cpuid_entries
[i
];
935 struct kvm_cpuid_entry2
*ej
;
937 int nent
= vcpu
->arch
.cpuid_nent
;
939 e
->flags
&= ~KVM_CPUID_FLAG_STATE_READ_NEXT
;
940 /* when no next entry is found, the current entry[i] is reselected */
943 ej
= &vcpu
->arch
.cpuid_entries
[j
];
944 } while (ej
->function
!= e
->function
);
946 ej
->flags
|= KVM_CPUID_FLAG_STATE_READ_NEXT
;
951 /* find an entry with matching function, matching index (if needed), and that
952 * should be read next (if it's stateful) */
953 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2
*e
,
954 u32 function
, u32 index
)
956 if (e
->function
!= function
)
958 if ((e
->flags
& KVM_CPUID_FLAG_SIGNIFCANT_INDEX
) && e
->index
!= index
)
960 if ((e
->flags
& KVM_CPUID_FLAG_STATEFUL_FUNC
) &&
961 !(e
->flags
& KVM_CPUID_FLAG_STATE_READ_NEXT
))
966 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
967 u32 function
, u32 index
)
970 struct kvm_cpuid_entry2
*best
= NULL
;
972 for (i
= 0; i
< vcpu
->arch
.cpuid_nent
; ++i
) {
973 struct kvm_cpuid_entry2
*e
;
975 e
= &vcpu
->arch
.cpuid_entries
[i
];
976 if (is_matching_cpuid_entry(e
, function
, index
)) {
977 if (e
->flags
& KVM_CPUID_FLAG_STATEFUL_FUNC
)
978 move_to_next_stateful_cpuid_entry(vcpu
, i
);
985 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry
);
988 * If the basic or extended CPUID leaf requested is higher than the
989 * maximum supported basic or extended leaf, respectively, then it is
992 static bool cpuid_function_in_range(struct kvm_vcpu
*vcpu
, u32 function
)
994 struct kvm_cpuid_entry2
*max
;
996 max
= kvm_find_cpuid_entry(vcpu
, function
& 0x80000000, 0);
997 return max
&& function
<= max
->eax
;
1000 bool kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
,
1001 u32
*ecx
, u32
*edx
, bool check_limit
)
1003 u32 function
= *eax
, index
= *ecx
;
1004 struct kvm_cpuid_entry2
*entry
;
1005 struct kvm_cpuid_entry2
*max
;
1008 entry
= kvm_find_cpuid_entry(vcpu
, function
, index
);
1011 * Intel CPUID semantics treats any query for an out-of-range
1012 * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
1013 * requested. AMD CPUID semantics returns all zeroes for any
1014 * undefined leaf, whether or not the leaf is in range.
1016 if (!entry
&& check_limit
&& !guest_cpuid_is_amd(vcpu
) &&
1017 !cpuid_function_in_range(vcpu
, function
)) {
1018 max
= kvm_find_cpuid_entry(vcpu
, 0, 0);
1020 function
= max
->eax
;
1021 entry
= kvm_find_cpuid_entry(vcpu
, function
, index
);
1029 if (function
== 7 && index
== 0) {
1031 if (!__kvm_get_msr(vcpu
, MSR_IA32_TSX_CTRL
, &data
, true) &&
1032 (data
& TSX_CTRL_CPUID_CLEAR
))
1033 *ebx
&= ~(F(RTM
) | F(HLE
));
1036 *eax
= *ebx
= *ecx
= *edx
= 0;
1038 * When leaf 0BH or 1FH is defined, CL is pass-through
1039 * and EDX is always the x2APIC ID, even for undefined
1040 * subleaves. Index 1 will exist iff the leaf is
1041 * implemented, so we pass through CL iff leaf 1
1042 * exists. EDX can be copied from any existing index.
1044 if (function
== 0xb || function
== 0x1f) {
1045 entry
= kvm_find_cpuid_entry(vcpu
, function
, 1);
1047 *ecx
= index
& 0xff;
1052 trace_kvm_cpuid(function
, *eax
, *ebx
, *ecx
, *edx
, found
);
1055 EXPORT_SYMBOL_GPL(kvm_cpuid
);
1057 int kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
)
1059 u32 eax
, ebx
, ecx
, edx
;
1061 if (cpuid_fault_enabled(vcpu
) && !kvm_require_cpl(vcpu
, 0))
1064 eax
= kvm_rax_read(vcpu
);
1065 ecx
= kvm_rcx_read(vcpu
);
1066 kvm_cpuid(vcpu
, &eax
, &ebx
, &ecx
, &edx
, true);
1067 kvm_rax_write(vcpu
, eax
);
1068 kvm_rbx_write(vcpu
, ebx
);
1069 kvm_rcx_write(vcpu
, ecx
);
1070 kvm_rdx_write(vcpu
, edx
);
1071 return kvm_skip_emulated_instruction(vcpu
);
1073 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid
);