1 #ifndef ASM_KVM_CACHE_REGS_H
2 #define ASM_KVM_CACHE_REGS_H
4 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5 #define KVM_POSSIBLE_CR4_GUEST_BITS \
6 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
9 static inline unsigned long kvm_register_read(struct kvm_vcpu
*vcpu
,
12 if (!test_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
))
13 kvm_x86_ops
->cache_reg(vcpu
, reg
);
15 return vcpu
->arch
.regs
[reg
];
18 static inline void kvm_register_write(struct kvm_vcpu
*vcpu
,
22 vcpu
->arch
.regs
[reg
] = val
;
23 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_dirty
);
24 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
27 static inline unsigned long kvm_rip_read(struct kvm_vcpu
*vcpu
)
29 return kvm_register_read(vcpu
, VCPU_REGS_RIP
);
32 static inline void kvm_rip_write(struct kvm_vcpu
*vcpu
, unsigned long val
)
34 kvm_register_write(vcpu
, VCPU_REGS_RIP
, val
);
37 static inline u64
kvm_pdptr_read(struct kvm_vcpu
*vcpu
, int index
)
39 might_sleep(); /* on svm */
41 if (!test_bit(VCPU_EXREG_PDPTR
,
42 (unsigned long *)&vcpu
->arch
.regs_avail
))
43 kvm_x86_ops
->cache_reg(vcpu
, VCPU_EXREG_PDPTR
);
45 return vcpu
->arch
.walk_mmu
->pdptrs
[index
];
48 static inline ulong
kvm_read_cr0_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
50 ulong tmask
= mask
& KVM_POSSIBLE_CR0_GUEST_BITS
;
51 if (tmask
& vcpu
->arch
.cr0_guest_owned_bits
)
52 kvm_x86_ops
->decache_cr0_guest_bits(vcpu
);
53 return vcpu
->arch
.cr0
& mask
;
56 static inline ulong
kvm_read_cr0(struct kvm_vcpu
*vcpu
)
58 return kvm_read_cr0_bits(vcpu
, ~0UL);
61 static inline ulong
kvm_read_cr4_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
63 ulong tmask
= mask
& KVM_POSSIBLE_CR4_GUEST_BITS
;
64 if (tmask
& vcpu
->arch
.cr4_guest_owned_bits
)
65 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
66 return vcpu
->arch
.cr4
& mask
;
69 static inline ulong
kvm_read_cr3(struct kvm_vcpu
*vcpu
)
71 if (!test_bit(VCPU_EXREG_CR3
, (ulong
*)&vcpu
->arch
.regs_avail
))
72 kvm_x86_ops
->decache_cr3(vcpu
);
73 return vcpu
->arch
.cr3
;
76 static inline ulong
kvm_read_cr4(struct kvm_vcpu
*vcpu
)
78 return kvm_read_cr4_bits(vcpu
, ~0UL);
81 static inline u64
kvm_read_edx_eax(struct kvm_vcpu
*vcpu
)
83 return (kvm_register_read(vcpu
, VCPU_REGS_RAX
) & -1u)
84 | ((u64
)(kvm_register_read(vcpu
, VCPU_REGS_RDX
) & -1u) << 32);
87 static inline u32
kvm_read_pkru(struct kvm_vcpu
*vcpu
)
89 return kvm_x86_ops
->get_pkru(vcpu
);
92 static inline void enter_guest_mode(struct kvm_vcpu
*vcpu
)
94 vcpu
->arch
.hflags
|= HF_GUEST_MASK
;
97 static inline void leave_guest_mode(struct kvm_vcpu
*vcpu
)
99 vcpu
->arch
.hflags
&= ~HF_GUEST_MASK
;
102 static inline bool is_guest_mode(struct kvm_vcpu
*vcpu
)
104 return vcpu
->arch
.hflags
& HF_GUEST_MASK
;
107 static inline bool is_smm(struct kvm_vcpu
*vcpu
)
109 return vcpu
->arch
.hflags
& HF_SMM_MASK
;