1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_X86_H
3 #define ARCH_X86_KVM_X86_H
5 #include <asm/processor.h>
7 #include <linux/kvm_host.h>
8 #include <asm/pvclock.h>
9 #include "kvm_cache_regs.h"
11 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
13 static inline void kvm_clear_exception_queue(struct kvm_vcpu
*vcpu
)
15 vcpu
->arch
.exception
.pending
= false;
16 vcpu
->arch
.exception
.injected
= false;
19 static inline void kvm_queue_interrupt(struct kvm_vcpu
*vcpu
, u8 vector
,
22 vcpu
->arch
.interrupt
.pending
= true;
23 vcpu
->arch
.interrupt
.soft
= soft
;
24 vcpu
->arch
.interrupt
.nr
= vector
;
27 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu
*vcpu
)
29 vcpu
->arch
.interrupt
.pending
= false;
32 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu
*vcpu
)
34 return vcpu
->arch
.exception
.injected
|| vcpu
->arch
.interrupt
.pending
||
35 vcpu
->arch
.nmi_injected
;
38 static inline bool kvm_exception_is_soft(unsigned int nr
)
40 return (nr
== BP_VECTOR
) || (nr
== OF_VECTOR
);
43 static inline bool is_protmode(struct kvm_vcpu
*vcpu
)
45 return kvm_read_cr0_bits(vcpu
, X86_CR0_PE
);
48 static inline int is_long_mode(struct kvm_vcpu
*vcpu
)
51 return vcpu
->arch
.efer
& EFER_LMA
;
57 static inline bool is_64_bit_mode(struct kvm_vcpu
*vcpu
)
61 if (!is_long_mode(vcpu
))
63 kvm_x86_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
67 static inline bool is_la57_mode(struct kvm_vcpu
*vcpu
)
70 return (vcpu
->arch
.efer
& EFER_LMA
) &&
71 kvm_read_cr4_bits(vcpu
, X86_CR4_LA57
);
77 static inline bool mmu_is_nested(struct kvm_vcpu
*vcpu
)
79 return vcpu
->arch
.walk_mmu
== &vcpu
->arch
.nested_mmu
;
82 static inline int is_pae(struct kvm_vcpu
*vcpu
)
84 return kvm_read_cr4_bits(vcpu
, X86_CR4_PAE
);
87 static inline int is_pse(struct kvm_vcpu
*vcpu
)
89 return kvm_read_cr4_bits(vcpu
, X86_CR4_PSE
);
92 static inline int is_paging(struct kvm_vcpu
*vcpu
)
94 return likely(kvm_read_cr0_bits(vcpu
, X86_CR0_PG
));
97 static inline bool is_pae_paging(struct kvm_vcpu
*vcpu
)
99 return !is_long_mode(vcpu
) && is_pae(vcpu
) && is_paging(vcpu
);
102 static inline u32
bit(int bitno
)
104 return 1 << (bitno
& 31);
107 static inline u8
vcpu_virt_addr_bits(struct kvm_vcpu
*vcpu
)
109 return kvm_read_cr4_bits(vcpu
, X86_CR4_LA57
) ? 57 : 48;
112 static inline u8
ctxt_virt_addr_bits(struct x86_emulate_ctxt
*ctxt
)
114 return (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_LA57
) ? 57 : 48;
117 static inline u64
get_canonical(u64 la
, u8 vaddr_bits
)
119 return ((int64_t)la
<< (64 - vaddr_bits
)) >> (64 - vaddr_bits
);
122 static inline bool is_noncanonical_address(u64 la
, struct kvm_vcpu
*vcpu
)
125 return get_canonical(la
, vcpu_virt_addr_bits(vcpu
)) != la
;
131 static inline bool emul_is_noncanonical_address(u64 la
,
132 struct x86_emulate_ctxt
*ctxt
)
135 return get_canonical(la
, ctxt_virt_addr_bits(ctxt
)) != la
;
141 static inline void vcpu_cache_mmio_info(struct kvm_vcpu
*vcpu
,
142 gva_t gva
, gfn_t gfn
, unsigned access
)
144 u64 gen
= kvm_memslots(vcpu
->kvm
)->generation
;
146 if (unlikely(gen
& 1))
150 * If this is a shadow nested page table, the "GVA" is
153 vcpu
->arch
.mmio_gva
= mmu_is_nested(vcpu
) ? 0 : gva
& PAGE_MASK
;
154 vcpu
->arch
.access
= access
;
155 vcpu
->arch
.mmio_gfn
= gfn
;
156 vcpu
->arch
.mmio_gen
= gen
;
159 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu
*vcpu
)
161 return vcpu
->arch
.mmio_gen
== kvm_memslots(vcpu
->kvm
)->generation
;
165 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
166 * clear all mmio cache info.
168 #define MMIO_GVA_ANY (~(gva_t)0)
170 static inline void vcpu_clear_mmio_info(struct kvm_vcpu
*vcpu
, gva_t gva
)
172 if (gva
!= MMIO_GVA_ANY
&& vcpu
->arch
.mmio_gva
!= (gva
& PAGE_MASK
))
175 vcpu
->arch
.mmio_gva
= 0;
178 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu
*vcpu
, unsigned long gva
)
180 if (vcpu_match_mmio_gen(vcpu
) && vcpu
->arch
.mmio_gva
&&
181 vcpu
->arch
.mmio_gva
== (gva
& PAGE_MASK
))
187 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
189 if (vcpu_match_mmio_gen(vcpu
) && vcpu
->arch
.mmio_gfn
&&
190 vcpu
->arch
.mmio_gfn
== gpa
>> PAGE_SHIFT
)
196 static inline unsigned long kvm_register_readl(struct kvm_vcpu
*vcpu
,
199 unsigned long val
= kvm_register_read(vcpu
, reg
);
201 return is_64_bit_mode(vcpu
) ? val
: (u32
)val
;
204 static inline void kvm_register_writel(struct kvm_vcpu
*vcpu
,
208 if (!is_64_bit_mode(vcpu
))
210 return kvm_register_write(vcpu
, reg
, val
);
213 static inline bool kvm_check_has_quirk(struct kvm
*kvm
, u64 quirk
)
215 return !(kvm
->arch
.disabled_quirks
& quirk
);
218 void kvm_before_handle_nmi(struct kvm_vcpu
*vcpu
);
219 void kvm_after_handle_nmi(struct kvm_vcpu
*vcpu
);
220 void kvm_set_pending_timer(struct kvm_vcpu
*vcpu
);
221 int kvm_inject_realmode_interrupt(struct kvm_vcpu
*vcpu
, int irq
, int inc_eip
);
223 void kvm_write_tsc(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
);
224 u64
get_kvmclock_ns(struct kvm
*kvm
);
226 int kvm_read_guest_virt(struct kvm_vcpu
*vcpu
,
227 gva_t addr
, void *val
, unsigned int bytes
,
228 struct x86_exception
*exception
);
230 int kvm_write_guest_virt_system(struct kvm_vcpu
*vcpu
,
231 gva_t addr
, void *val
, unsigned int bytes
,
232 struct x86_exception
*exception
);
234 void kvm_vcpu_mtrr_init(struct kvm_vcpu
*vcpu
);
235 u8
kvm_mtrr_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
);
236 bool kvm_mtrr_valid(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
237 int kvm_mtrr_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
238 int kvm_mtrr_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
);
239 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
241 bool kvm_vector_hashing_enabled(void);
243 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
244 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
245 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
246 | XFEATURE_MASK_PKRU)
247 extern u64 host_xcr0
;
249 extern u64
kvm_supported_xcr0(void);
251 extern unsigned int min_timer_period_us
;
253 extern unsigned int lapic_timer_advance_ns
;
255 extern struct static_key kvm_no_apic_vcpu
;
257 static inline u64
nsec_to_cycles(struct kvm_vcpu
*vcpu
, u64 nsec
)
259 return pvclock_scale_delta(nsec
, vcpu
->arch
.virtual_tsc_mult
,
260 vcpu
->arch
.virtual_tsc_shift
);
263 /* Same "calling convention" as do_div:
264 * - divide (n << 32) by base
268 #define do_shl32_div32(n, base) \
271 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
272 : "rm" (base), "0" (0), "1" ((u32) n)); \
277 static inline bool kvm_mwait_in_guest(void)
279 unsigned int eax
, ebx
, ecx
, edx
;
281 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_MWAIT
))
284 switch (boot_cpu_data
.x86_vendor
) {
286 /* All AMD CPUs have a working MWAIT implementation */
288 case X86_VENDOR_INTEL
:
289 /* Handle Intel below */
296 * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as
297 * they would allow guest to stop the CPU completely by disabling
298 * interrupts then invoking MWAIT.
300 if (boot_cpu_data
.cpuid_level
< CPUID_MWAIT_LEAF
)
303 cpuid(CPUID_MWAIT_LEAF
, &eax
, &ebx
, &ecx
, &edx
);
305 if (!(ecx
& CPUID5_ECX_INTERRUPT_BREAK
))