1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
7 int kvm_update_cpuid(struct kvm_vcpu
*vcpu
);
8 bool kvm_mpx_supported(void);
9 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
10 u32 function
, u32 index
);
11 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
12 struct kvm_cpuid_entry2 __user
*entries
,
14 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
15 struct kvm_cpuid
*cpuid
,
16 struct kvm_cpuid_entry __user
*entries
);
17 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
18 struct kvm_cpuid2
*cpuid
,
19 struct kvm_cpuid_entry2 __user
*entries
);
20 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
21 struct kvm_cpuid2
*cpuid
,
22 struct kvm_cpuid_entry2 __user
*entries
);
23 void kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
, u32
*ecx
, u32
*edx
);
25 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
);
27 static inline int cpuid_maxphyaddr(struct kvm_vcpu
*vcpu
)
29 return vcpu
->arch
.maxphyaddr
;
32 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu
*vcpu
)
34 struct kvm_cpuid_entry2
*best
;
36 if (!static_cpu_has(X86_FEATURE_XSAVE
))
39 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
40 return best
&& (best
->ecx
& bit(X86_FEATURE_XSAVE
));
43 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu
*vcpu
)
45 struct kvm_cpuid_entry2
*best
;
47 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
48 return best
&& (best
->edx
& bit(X86_FEATURE_MTRR
));
51 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu
*vcpu
)
53 struct kvm_cpuid_entry2
*best
;
55 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
56 return best
&& (best
->ebx
& bit(X86_FEATURE_TSC_ADJUST
));
59 static inline bool guest_cpuid_has_smep(struct kvm_vcpu
*vcpu
)
61 struct kvm_cpuid_entry2
*best
;
63 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
64 return best
&& (best
->ebx
& bit(X86_FEATURE_SMEP
));
67 static inline bool guest_cpuid_has_smap(struct kvm_vcpu
*vcpu
)
69 struct kvm_cpuid_entry2
*best
;
71 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
72 return best
&& (best
->ebx
& bit(X86_FEATURE_SMAP
));
75 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu
*vcpu
)
77 struct kvm_cpuid_entry2
*best
;
79 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
80 return best
&& (best
->ebx
& bit(X86_FEATURE_FSGSBASE
));
83 static inline bool guest_cpuid_has_pku(struct kvm_vcpu
*vcpu
)
85 struct kvm_cpuid_entry2
*best
;
87 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
88 return best
&& (best
->ecx
& bit(X86_FEATURE_PKU
));
91 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu
*vcpu
)
93 struct kvm_cpuid_entry2
*best
;
95 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
96 return best
&& (best
->edx
& bit(X86_FEATURE_LM
));
99 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu
*vcpu
)
101 struct kvm_cpuid_entry2
*best
;
103 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
104 return best
&& (best
->ecx
& bit(X86_FEATURE_OSVW
));
107 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu
*vcpu
)
109 struct kvm_cpuid_entry2
*best
;
111 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
112 return best
&& (best
->ecx
& bit(X86_FEATURE_PCID
));
115 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu
*vcpu
)
117 struct kvm_cpuid_entry2
*best
;
119 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
120 return best
&& (best
->ecx
& bit(X86_FEATURE_X2APIC
));
123 static inline bool guest_cpuid_is_amd(struct kvm_vcpu
*vcpu
)
125 struct kvm_cpuid_entry2
*best
;
127 best
= kvm_find_cpuid_entry(vcpu
, 0, 0);
128 return best
&& best
->ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
;
131 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu
*vcpu
)
133 struct kvm_cpuid_entry2
*best
;
135 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
136 return best
&& (best
->edx
& bit(X86_FEATURE_GBPAGES
));
139 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu
*vcpu
)
141 struct kvm_cpuid_entry2
*best
;
143 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
144 return best
&& (best
->ebx
& bit(X86_FEATURE_RTM
));
147 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu
*vcpu
)
149 struct kvm_cpuid_entry2
*best
;
151 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
152 return best
&& (best
->ebx
& bit(X86_FEATURE_MPX
));
155 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu
*vcpu
)
157 struct kvm_cpuid_entry2
*best
;
159 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
160 return best
&& (best
->edx
& bit(X86_FEATURE_RDTSCP
));
163 static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu
*vcpu
)
165 struct kvm_cpuid_entry2
*best
;
167 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
168 if (best
&& (best
->ebx
& bit(X86_FEATURE_AMD_IBPB
)))
170 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
171 return best
&& (best
->edx
& bit(X86_FEATURE_SPEC_CTRL
));
174 static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu
*vcpu
)
176 struct kvm_cpuid_entry2
*best
;
178 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
179 if (best
&& (best
->ebx
& (bit(X86_FEATURE_AMD_IBRS
| bit(X86_FEATURE_AMD_SSBD
)))))
181 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
182 return best
&& (best
->edx
& (bit(X86_FEATURE_SPEC_CTRL
) | bit(X86_FEATURE_SPEC_CTRL_SSBD
)));
185 static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu
*vcpu
)
187 struct kvm_cpuid_entry2
*best
;
189 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
190 return best
&& (best
->edx
& bit(X86_FEATURE_ARCH_CAPABILITIES
));
193 static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu
*vcpu
)
195 struct kvm_cpuid_entry2
*best
;
197 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
198 return best
&& (best
->ebx
& bit(X86_FEATURE_VIRT_SSBD
));
204 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
208 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu
*vcpu
)
210 struct kvm_cpuid_entry2
*best
;
212 best
= kvm_find_cpuid_entry(vcpu
, 0x8000000a, 0);
215 * NRIPS is a scattered cpuid feature, so we can't use
216 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
217 * position 8, not 3).
219 return best
&& (best
->edx
& bit(BIT_NRIPS
));
223 static inline int guest_cpuid_family(struct kvm_vcpu
*vcpu
)
225 struct kvm_cpuid_entry2
*best
;
227 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
231 return x86_family(best
->eax
);
234 static inline int guest_cpuid_model(struct kvm_vcpu
*vcpu
)
236 struct kvm_cpuid_entry2
*best
;
238 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
242 return x86_model(best
->eax
);
245 static inline int guest_cpuid_stepping(struct kvm_vcpu
*vcpu
)
247 struct kvm_cpuid_entry2
*best
;
249 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
253 return x86_stepping(best
->eax
);