1 #ifndef __KVM_X86_MMU_H
2 #define __KVM_X86_MMU_H
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
8 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
9 #define PT32_PT_BITS 10
10 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
12 #define PT_WRITABLE_SHIFT 1
14 #define PT_PRESENT_MASK (1ULL << 0)
15 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
16 #define PT_USER_MASK (1ULL << 2)
17 #define PT_PWT_MASK (1ULL << 3)
18 #define PT_PCD_MASK (1ULL << 4)
19 #define PT_ACCESSED_SHIFT 5
20 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
21 #define PT_DIRTY_SHIFT 6
22 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
23 #define PT_PAGE_SIZE_SHIFT 7
24 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
25 #define PT_PAT_MASK (1ULL << 7)
26 #define PT_GLOBAL_MASK (1ULL << 8)
27 #define PT64_NX_SHIFT 63
28 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
30 #define PT_PAT_SHIFT 7
31 #define PT_DIR_PAT_SHIFT 12
32 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
34 #define PT32_DIR_PSE36_SIZE 4
35 #define PT32_DIR_PSE36_SHIFT 13
36 #define PT32_DIR_PSE36_MASK \
37 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
39 #define PT64_ROOT_LEVEL 4
40 #define PT32_ROOT_LEVEL 2
41 #define PT32E_ROOT_LEVEL 3
43 #define PT_PDPE_LEVEL 3
44 #define PT_DIRECTORY_LEVEL 2
45 #define PT_PAGE_TABLE_LEVEL 1
47 #define PFERR_PRESENT_MASK (1U << 0)
48 #define PFERR_WRITE_MASK (1U << 1)
49 #define PFERR_USER_MASK (1U << 2)
50 #define PFERR_RSVD_MASK (1U << 3)
51 #define PFERR_FETCH_MASK (1U << 4)
53 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu
*vcpu
, u64 addr
, u64 sptes
[4]);
54 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask
);
57 * Return values of handle_mmio_page_fault_common:
58 * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
60 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
61 * fault path update the mmio spte.
62 * RET_MMIO_PF_RETRY: let CPU fault again on the address.
63 * RET_MMIO_PF_BUG: bug is detected.
66 RET_MMIO_PF_EMULATE
= 1,
67 RET_MMIO_PF_INVALID
= 2,
68 RET_MMIO_PF_RETRY
= 0,
72 int handle_mmio_page_fault_common(struct kvm_vcpu
*vcpu
, u64 addr
, bool direct
);
73 int kvm_init_shadow_mmu(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*context
);
74 int kvm_init_shadow_ept_mmu(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*context
,
77 static inline unsigned int kvm_mmu_available_pages(struct kvm
*kvm
)
79 if (kvm
->arch
.n_max_mmu_pages
> kvm
->arch
.n_used_mmu_pages
)
80 return kvm
->arch
.n_max_mmu_pages
-
81 kvm
->arch
.n_used_mmu_pages
;
86 static inline int kvm_mmu_reload(struct kvm_vcpu
*vcpu
)
88 if (likely(vcpu
->arch
.mmu
.root_hpa
!= INVALID_PAGE
))
91 return kvm_mmu_load(vcpu
);
94 static inline int is_present_gpte(unsigned long pte
)
96 return pte
& PT_PRESENT_MASK
;
99 static inline int is_writable_pte(unsigned long pte
)
101 return pte
& PT_WRITABLE_MASK
;
104 static inline bool is_write_protection(struct kvm_vcpu
*vcpu
)
106 return kvm_read_cr0_bits(vcpu
, X86_CR0_WP
);
110 * Will a fault with a given page-fault error code (pfec) cause a permission
111 * fault with the given access (in ACC_* format)?
113 static inline bool permission_fault(struct kvm_mmu
*mmu
, unsigned pte_access
,
116 return (mmu
->permissions
[pfec
>> 1] >> pte_access
) & 1;
119 void kvm_mmu_invalidate_zap_all_pages(struct kvm
*kvm
);