1 #ifndef ASM_KVM_CACHE_REGS_H
2 #define ASM_KVM_CACHE_REGS_H
4 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5 #define KVM_POSSIBLE_CR4_GUEST_BITS \
6 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
9 static inline unsigned long kvm_register_read(struct kvm_vcpu
*vcpu
,
12 if (!test_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
))
13 kvm_x86_ops
->cache_reg(vcpu
, reg
);
15 return vcpu
->arch
.regs
[reg
];
18 static inline void kvm_register_write(struct kvm_vcpu
*vcpu
,
22 vcpu
->arch
.regs
[reg
] = val
;
23 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_dirty
);
24 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
27 static inline unsigned long kvm_rip_read(struct kvm_vcpu
*vcpu
)
29 return kvm_register_read(vcpu
, VCPU_REGS_RIP
);
32 static inline void kvm_rip_write(struct kvm_vcpu
*vcpu
, unsigned long val
)
34 kvm_register_write(vcpu
, VCPU_REGS_RIP
, val
);
37 static inline u64
kvm_pdptr_read(struct kvm_vcpu
*vcpu
, int index
)
39 might_sleep(); /* on svm */
41 if (!test_bit(VCPU_EXREG_PDPTR
,
42 (unsigned long *)&vcpu
->arch
.regs_avail
))
43 kvm_x86_ops
->cache_reg(vcpu
, VCPU_EXREG_PDPTR
);
45 return vcpu
->arch
.walk_mmu
->pdptrs
[index
];
48 static inline u64
kvm_pdptr_read_mmu(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
, int index
)
50 load_pdptrs(vcpu
, mmu
, mmu
->get_cr3(vcpu
));
52 return mmu
->pdptrs
[index
];
55 static inline ulong
kvm_read_cr0_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
57 ulong tmask
= mask
& KVM_POSSIBLE_CR0_GUEST_BITS
;
58 if (tmask
& vcpu
->arch
.cr0_guest_owned_bits
)
59 kvm_x86_ops
->decache_cr0_guest_bits(vcpu
);
60 return vcpu
->arch
.cr0
& mask
;
63 static inline ulong
kvm_read_cr0(struct kvm_vcpu
*vcpu
)
65 return kvm_read_cr0_bits(vcpu
, ~0UL);
68 static inline ulong
kvm_read_cr4_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
70 ulong tmask
= mask
& KVM_POSSIBLE_CR4_GUEST_BITS
;
71 if (tmask
& vcpu
->arch
.cr4_guest_owned_bits
)
72 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
73 return vcpu
->arch
.cr4
& mask
;
76 static inline ulong
kvm_read_cr3(struct kvm_vcpu
*vcpu
)
78 if (!test_bit(VCPU_EXREG_CR3
, (ulong
*)&vcpu
->arch
.regs_avail
))
79 kvm_x86_ops
->decache_cr3(vcpu
);
80 return vcpu
->arch
.cr3
;
83 static inline ulong
kvm_read_cr4(struct kvm_vcpu
*vcpu
)
85 return kvm_read_cr4_bits(vcpu
, ~0UL);
88 static inline u64
kvm_read_edx_eax(struct kvm_vcpu
*vcpu
)
90 return (kvm_register_read(vcpu
, VCPU_REGS_RAX
) & -1u)
91 | ((u64
)(kvm_register_read(vcpu
, VCPU_REGS_RDX
) & -1u) << 32);
94 static inline void enter_guest_mode(struct kvm_vcpu
*vcpu
)
96 vcpu
->arch
.hflags
|= HF_GUEST_MASK
;
99 static inline void leave_guest_mode(struct kvm_vcpu
*vcpu
)
101 vcpu
->arch
.hflags
&= ~HF_GUEST_MASK
;
104 static inline bool is_guest_mode(struct kvm_vcpu
*vcpu
)
106 return vcpu
->arch
.hflags
& HF_GUEST_MASK
;