1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
7 int kvm_update_cpuid(struct kvm_vcpu
*vcpu
);
8 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
9 u32 function
, u32 index
);
10 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
11 struct kvm_cpuid_entry2 __user
*entries
,
13 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
14 struct kvm_cpuid
*cpuid
,
15 struct kvm_cpuid_entry __user
*entries
);
16 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
17 struct kvm_cpuid2
*cpuid
,
18 struct kvm_cpuid_entry2 __user
*entries
);
19 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
20 struct kvm_cpuid2
*cpuid
,
21 struct kvm_cpuid_entry2 __user
*entries
);
22 void kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
, u32
*ecx
, u32
*edx
);
24 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
);
26 static inline int cpuid_maxphyaddr(struct kvm_vcpu
*vcpu
)
28 return vcpu
->arch
.maxphyaddr
;
31 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu
*vcpu
)
33 struct kvm_cpuid_entry2
*best
;
35 if (!static_cpu_has(X86_FEATURE_XSAVE
))
38 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
39 return best
&& (best
->ecx
& bit(X86_FEATURE_XSAVE
));
42 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu
*vcpu
)
44 struct kvm_cpuid_entry2
*best
;
46 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
47 return best
&& (best
->edx
& bit(X86_FEATURE_MTRR
));
50 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu
*vcpu
)
52 struct kvm_cpuid_entry2
*best
;
54 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
55 return best
&& (best
->ebx
& bit(X86_FEATURE_TSC_ADJUST
));
58 static inline bool guest_cpuid_has_smep(struct kvm_vcpu
*vcpu
)
60 struct kvm_cpuid_entry2
*best
;
62 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
63 return best
&& (best
->ebx
& bit(X86_FEATURE_SMEP
));
66 static inline bool guest_cpuid_has_smap(struct kvm_vcpu
*vcpu
)
68 struct kvm_cpuid_entry2
*best
;
70 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
71 return best
&& (best
->ebx
& bit(X86_FEATURE_SMAP
));
74 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu
*vcpu
)
76 struct kvm_cpuid_entry2
*best
;
78 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
79 return best
&& (best
->ebx
& bit(X86_FEATURE_FSGSBASE
));
82 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu
*vcpu
)
84 struct kvm_cpuid_entry2
*best
;
86 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
87 return best
&& (best
->edx
& bit(X86_FEATURE_LM
));
90 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu
*vcpu
)
92 struct kvm_cpuid_entry2
*best
;
94 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
95 return best
&& (best
->ecx
& bit(X86_FEATURE_OSVW
));
98 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu
*vcpu
)
100 struct kvm_cpuid_entry2
*best
;
102 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
103 return best
&& (best
->ecx
& bit(X86_FEATURE_PCID
));
106 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu
*vcpu
)
108 struct kvm_cpuid_entry2
*best
;
110 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
111 return best
&& (best
->ecx
& bit(X86_FEATURE_X2APIC
));
114 static inline bool guest_cpuid_is_amd(struct kvm_vcpu
*vcpu
)
116 struct kvm_cpuid_entry2
*best
;
118 best
= kvm_find_cpuid_entry(vcpu
, 0, 0);
119 return best
&& best
->ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
;
122 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu
*vcpu
)
124 struct kvm_cpuid_entry2
*best
;
126 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
127 return best
&& (best
->edx
& bit(X86_FEATURE_GBPAGES
));
130 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu
*vcpu
)
132 struct kvm_cpuid_entry2
*best
;
134 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
135 return best
&& (best
->ebx
& bit(X86_FEATURE_RTM
));
138 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu
*vcpu
)
140 struct kvm_cpuid_entry2
*best
;
142 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
143 return best
&& (best
->ebx
& bit(X86_FEATURE_MPX
));
146 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu
*vcpu
)
148 struct kvm_cpuid_entry2
*best
;
150 best
= kvm_find_cpuid_entry(vcpu
, 7, 0);
151 return best
&& (best
->ebx
& bit(X86_FEATURE_PCOMMIT
));
154 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu
*vcpu
)
156 struct kvm_cpuid_entry2
*best
;
158 best
= kvm_find_cpuid_entry(vcpu
, 0x80000001, 0);
159 return best
&& (best
->edx
& bit(X86_FEATURE_RDTSCP
));
163 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
167 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu
*vcpu
)
169 struct kvm_cpuid_entry2
*best
;
171 best
= kvm_find_cpuid_entry(vcpu
, 0x8000000a, 0);
174 * NRIPS is a scattered cpuid feature, so we can't use
175 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
176 * position 8, not 3).
178 return best
&& (best
->edx
& bit(BIT_NRIPS
));
182 static inline int guest_cpuid_family(struct kvm_vcpu
*vcpu
)
184 struct kvm_cpuid_entry2
*best
;
186 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
190 return x86_family(best
->eax
);
193 static inline int guest_cpuid_model(struct kvm_vcpu
*vcpu
)
195 struct kvm_cpuid_entry2
*best
;
197 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
201 return x86_model(best
->eax
);
204 static inline int guest_cpuid_stepping(struct kvm_vcpu
*vcpu
)
206 struct kvm_cpuid_entry2
*best
;
208 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
212 return x86_stepping(best
->eax
);