1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
5 #include <linux/kvm_host.h>
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
12 #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
15 return vcpu->arch.regs[VCPU_REGS_##uname]; \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
20 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
22 BUILD_KVM_GPR_ACCESSORS(rax
, RAX
)
23 BUILD_KVM_GPR_ACCESSORS(rbx
, RBX
)
24 BUILD_KVM_GPR_ACCESSORS(rcx
, RCX
)
25 BUILD_KVM_GPR_ACCESSORS(rdx
, RDX
)
26 BUILD_KVM_GPR_ACCESSORS(rbp
, RBP
)
27 BUILD_KVM_GPR_ACCESSORS(rsi
, RSI
)
28 BUILD_KVM_GPR_ACCESSORS(rdi
, RDI
)
30 BUILD_KVM_GPR_ACCESSORS(r8
, R8
)
31 BUILD_KVM_GPR_ACCESSORS(r9
, R9
)
32 BUILD_KVM_GPR_ACCESSORS(r10
, R10
)
33 BUILD_KVM_GPR_ACCESSORS(r11
, R11
)
34 BUILD_KVM_GPR_ACCESSORS(r12
, R12
)
35 BUILD_KVM_GPR_ACCESSORS(r13
, R13
)
36 BUILD_KVM_GPR_ACCESSORS(r14
, R14
)
37 BUILD_KVM_GPR_ACCESSORS(r15
, R15
)
40 static inline bool kvm_register_is_available(struct kvm_vcpu
*vcpu
,
43 return test_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
46 static inline bool kvm_register_is_dirty(struct kvm_vcpu
*vcpu
,
49 return test_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_dirty
);
52 static inline void kvm_register_mark_available(struct kvm_vcpu
*vcpu
,
55 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
58 static inline void kvm_register_mark_dirty(struct kvm_vcpu
*vcpu
,
61 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
62 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_dirty
);
65 static inline unsigned long kvm_register_read(struct kvm_vcpu
*vcpu
, int reg
)
67 if (WARN_ON_ONCE((unsigned int)reg
>= NR_VCPU_REGS
))
70 if (!kvm_register_is_available(vcpu
, reg
))
71 kvm_x86_ops
.cache_reg(vcpu
, reg
);
73 return vcpu
->arch
.regs
[reg
];
76 static inline void kvm_register_write(struct kvm_vcpu
*vcpu
, int reg
,
79 if (WARN_ON_ONCE((unsigned int)reg
>= NR_VCPU_REGS
))
82 vcpu
->arch
.regs
[reg
] = val
;
83 kvm_register_mark_dirty(vcpu
, reg
);
86 static inline unsigned long kvm_rip_read(struct kvm_vcpu
*vcpu
)
88 return kvm_register_read(vcpu
, VCPU_REGS_RIP
);
91 static inline void kvm_rip_write(struct kvm_vcpu
*vcpu
, unsigned long val
)
93 kvm_register_write(vcpu
, VCPU_REGS_RIP
, val
);
96 static inline unsigned long kvm_rsp_read(struct kvm_vcpu
*vcpu
)
98 return kvm_register_read(vcpu
, VCPU_REGS_RSP
);
101 static inline void kvm_rsp_write(struct kvm_vcpu
*vcpu
, unsigned long val
)
103 kvm_register_write(vcpu
, VCPU_REGS_RSP
, val
);
106 static inline u64
kvm_pdptr_read(struct kvm_vcpu
*vcpu
, int index
)
108 might_sleep(); /* on svm */
110 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_PDPTR
))
111 kvm_x86_ops
.cache_reg(vcpu
, VCPU_EXREG_PDPTR
);
113 return vcpu
->arch
.walk_mmu
->pdptrs
[index
];
116 static inline ulong
kvm_read_cr0_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
118 ulong tmask
= mask
& KVM_POSSIBLE_CR0_GUEST_BITS
;
119 if (tmask
& vcpu
->arch
.cr0_guest_owned_bits
)
120 kvm_x86_ops
.decache_cr0_guest_bits(vcpu
);
121 return vcpu
->arch
.cr0
& mask
;
124 static inline ulong
kvm_read_cr0(struct kvm_vcpu
*vcpu
)
126 return kvm_read_cr0_bits(vcpu
, ~0UL);
129 static inline ulong
kvm_read_cr4_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
131 ulong tmask
= mask
& KVM_POSSIBLE_CR4_GUEST_BITS
;
132 if (tmask
& vcpu
->arch
.cr4_guest_owned_bits
)
133 kvm_x86_ops
.decache_cr4_guest_bits(vcpu
);
134 return vcpu
->arch
.cr4
& mask
;
137 static inline ulong
kvm_read_cr3(struct kvm_vcpu
*vcpu
)
139 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_CR3
))
140 kvm_x86_ops
.cache_reg(vcpu
, VCPU_EXREG_CR3
);
141 return vcpu
->arch
.cr3
;
144 static inline ulong
kvm_read_cr4(struct kvm_vcpu
*vcpu
)
146 return kvm_read_cr4_bits(vcpu
, ~0UL);
149 static inline u64
kvm_read_edx_eax(struct kvm_vcpu
*vcpu
)
151 return (kvm_rax_read(vcpu
) & -1u)
152 | ((u64
)(kvm_rdx_read(vcpu
) & -1u) << 32);
155 static inline void enter_guest_mode(struct kvm_vcpu
*vcpu
)
157 vcpu
->arch
.hflags
|= HF_GUEST_MASK
;
160 static inline void leave_guest_mode(struct kvm_vcpu
*vcpu
)
162 vcpu
->arch
.hflags
&= ~HF_GUEST_MASK
;
164 if (vcpu
->arch
.load_eoi_exitmap_pending
) {
165 vcpu
->arch
.load_eoi_exitmap_pending
= false;
166 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP
, vcpu
);
170 static inline bool is_guest_mode(struct kvm_vcpu
*vcpu
)
172 return vcpu
->arch
.hflags
& HF_GUEST_MASK
;
175 static inline bool is_smm(struct kvm_vcpu
*vcpu
)
177 return vcpu
->arch
.hflags
& HF_SMM_MASK
;