1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
5 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
6 #define KVM_POSSIBLE_CR4_GUEST_BITS \
7 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
8 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
10 static inline unsigned long kvm_register_read(struct kvm_vcpu
*vcpu
,
13 if (!test_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
))
14 kvm_x86_ops
->cache_reg(vcpu
, reg
);
16 return vcpu
->arch
.regs
[reg
];
19 static inline void kvm_register_write(struct kvm_vcpu
*vcpu
,
23 vcpu
->arch
.regs
[reg
] = val
;
24 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_dirty
);
25 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
28 static inline unsigned long kvm_rip_read(struct kvm_vcpu
*vcpu
)
30 return kvm_register_read(vcpu
, VCPU_REGS_RIP
);
33 static inline void kvm_rip_write(struct kvm_vcpu
*vcpu
, unsigned long val
)
35 kvm_register_write(vcpu
, VCPU_REGS_RIP
, val
);
38 static inline u64
kvm_pdptr_read(struct kvm_vcpu
*vcpu
, int index
)
40 might_sleep(); /* on svm */
42 if (!test_bit(VCPU_EXREG_PDPTR
,
43 (unsigned long *)&vcpu
->arch
.regs_avail
))
44 kvm_x86_ops
->cache_reg(vcpu
, (enum kvm_reg
)VCPU_EXREG_PDPTR
);
46 return vcpu
->arch
.walk_mmu
->pdptrs
[index
];
49 static inline ulong
kvm_read_cr0_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
51 ulong tmask
= mask
& KVM_POSSIBLE_CR0_GUEST_BITS
;
52 if (tmask
& vcpu
->arch
.cr0_guest_owned_bits
)
53 kvm_x86_ops
->decache_cr0_guest_bits(vcpu
);
54 return vcpu
->arch
.cr0
& mask
;
57 static inline ulong
kvm_read_cr0(struct kvm_vcpu
*vcpu
)
59 return kvm_read_cr0_bits(vcpu
, ~0UL);
62 static inline ulong
kvm_read_cr4_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
64 ulong tmask
= mask
& KVM_POSSIBLE_CR4_GUEST_BITS
;
65 if (tmask
& vcpu
->arch
.cr4_guest_owned_bits
)
66 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
67 return vcpu
->arch
.cr4
& mask
;
70 static inline ulong
kvm_read_cr3(struct kvm_vcpu
*vcpu
)
72 if (!test_bit(VCPU_EXREG_CR3
, (ulong
*)&vcpu
->arch
.regs_avail
))
73 kvm_x86_ops
->decache_cr3(vcpu
);
74 return vcpu
->arch
.cr3
;
77 static inline ulong
kvm_read_cr4(struct kvm_vcpu
*vcpu
)
79 return kvm_read_cr4_bits(vcpu
, ~0UL);
82 static inline u64
kvm_read_edx_eax(struct kvm_vcpu
*vcpu
)
84 return (kvm_register_read(vcpu
, VCPU_REGS_RAX
) & -1u)
85 | ((u64
)(kvm_register_read(vcpu
, VCPU_REGS_RDX
) & -1u) << 32);
88 static inline void enter_guest_mode(struct kvm_vcpu
*vcpu
)
90 vcpu
->arch
.hflags
|= HF_GUEST_MASK
;
93 static inline void leave_guest_mode(struct kvm_vcpu
*vcpu
)
95 vcpu
->arch
.hflags
&= ~HF_GUEST_MASK
;
97 if (vcpu
->arch
.load_eoi_exitmap_pending
) {
98 vcpu
->arch
.load_eoi_exitmap_pending
= false;
99 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP
, vcpu
);
103 static inline bool is_guest_mode(struct kvm_vcpu
*vcpu
)
105 return vcpu
->arch
.hflags
& HF_GUEST_MASK
;
108 static inline bool is_smm(struct kvm_vcpu
*vcpu
)
110 return vcpu
->arch
.hflags
& HF_SMM_MASK
;