1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
7 #include <asm/processor.h>
9 int kvm_update_cpuid(struct kvm_vcpu
*vcpu
);
10 bool kvm_mpx_supported(void);
11 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
12 u32 function
, u32 index
);
13 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
14 struct kvm_cpuid_entry2 __user
*entries
,
16 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
17 struct kvm_cpuid
*cpuid
,
18 struct kvm_cpuid_entry __user
*entries
);
19 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
20 struct kvm_cpuid2
*cpuid
,
21 struct kvm_cpuid_entry2 __user
*entries
);
22 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
23 struct kvm_cpuid2
*cpuid
,
24 struct kvm_cpuid_entry2 __user
*entries
);
25 bool kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
,
26 u32
*ecx
, u32
*edx
, bool check_limit
);
28 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
);
30 static inline int cpuid_maxphyaddr(struct kvm_vcpu
*vcpu
)
32 return vcpu
->arch
.maxphyaddr
;
41 static const struct cpuid_reg reverse_cpuid
[] = {
42 [CPUID_1_EDX
] = { 1, 0, CPUID_EDX
},
43 [CPUID_8000_0001_EDX
] = {0x80000001, 0, CPUID_EDX
},
44 [CPUID_8086_0001_EDX
] = {0x80860001, 0, CPUID_EDX
},
45 [CPUID_1_ECX
] = { 1, 0, CPUID_ECX
},
46 [CPUID_C000_0001_EDX
] = {0xc0000001, 0, CPUID_EDX
},
47 [CPUID_8000_0001_ECX
] = {0x80000001, 0, CPUID_ECX
},
48 [CPUID_7_0_EBX
] = { 7, 0, CPUID_EBX
},
49 [CPUID_D_1_EAX
] = { 0xd, 1, CPUID_EAX
},
50 [CPUID_8000_0008_EBX
] = {0x80000008, 0, CPUID_EBX
},
51 [CPUID_6_EAX
] = { 6, 0, CPUID_EAX
},
52 [CPUID_8000_000A_EDX
] = {0x8000000a, 0, CPUID_EDX
},
53 [CPUID_7_ECX
] = { 7, 0, CPUID_ECX
},
54 [CPUID_8000_0007_EBX
] = {0x80000007, 0, CPUID_EBX
},
55 [CPUID_7_EDX
] = { 7, 0, CPUID_EDX
},
58 static __always_inline
struct cpuid_reg
x86_feature_cpuid(unsigned x86_feature
)
60 unsigned x86_leaf
= x86_feature
/ 32;
62 BUILD_BUG_ON(x86_leaf
>= ARRAY_SIZE(reverse_cpuid
));
63 BUILD_BUG_ON(reverse_cpuid
[x86_leaf
].function
== 0);
65 return reverse_cpuid
[x86_leaf
];
68 static __always_inline
int *guest_cpuid_get_register(struct kvm_vcpu
*vcpu
, unsigned x86_feature
)
70 struct kvm_cpuid_entry2
*entry
;
71 const struct cpuid_reg cpuid
= x86_feature_cpuid(x86_feature
);
73 entry
= kvm_find_cpuid_entry(vcpu
, cpuid
.function
, cpuid
.index
);
92 static __always_inline
bool guest_cpuid_has(struct kvm_vcpu
*vcpu
, unsigned x86_feature
)
96 if (x86_feature
== X86_FEATURE_XSAVE
&&
97 !static_cpu_has(X86_FEATURE_XSAVE
))
100 reg
= guest_cpuid_get_register(vcpu
, x86_feature
);
104 return *reg
& bit(x86_feature
);
107 static __always_inline
void guest_cpuid_clear(struct kvm_vcpu
*vcpu
, unsigned x86_feature
)
111 reg
= guest_cpuid_get_register(vcpu
, x86_feature
);
113 *reg
&= ~bit(x86_feature
);
116 static inline bool guest_cpuid_is_amd(struct kvm_vcpu
*vcpu
)
118 struct kvm_cpuid_entry2
*best
;
120 best
= kvm_find_cpuid_entry(vcpu
, 0, 0);
121 return best
&& best
->ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
;
124 static inline int guest_cpuid_family(struct kvm_vcpu
*vcpu
)
126 struct kvm_cpuid_entry2
*best
;
128 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
132 return x86_family(best
->eax
);
135 static inline int guest_cpuid_model(struct kvm_vcpu
*vcpu
)
137 struct kvm_cpuid_entry2
*best
;
139 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
143 return x86_model(best
->eax
);
146 static inline int guest_cpuid_stepping(struct kvm_vcpu
*vcpu
)
148 struct kvm_cpuid_entry2
*best
;
150 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
154 return x86_stepping(best
->eax
);
157 static inline bool supports_cpuid_fault(struct kvm_vcpu
*vcpu
)
159 return vcpu
->arch
.msr_platform_info
& MSR_PLATFORM_INFO_CPUID_FAULT
;
162 static inline bool cpuid_fault_enabled(struct kvm_vcpu
*vcpu
)
164 return vcpu
->arch
.msr_misc_features_enables
&
165 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;