2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK
37 #define pt_element_t u32
38 #define guest_walker guest_walker32
39 #define FNAME(name) paging##32_##name
40 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
41 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
42 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
43 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
44 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
45 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
46 #define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK
48 #error Invalid PTTYPE value
52 * The guest_walker structure emulates the behavior of the hardware page
58 pt_element_t inherited_ar
;
61 static void FNAME(init_walker
)(struct guest_walker
*walker
,
62 struct kvm_vcpu
*vcpu
)
65 struct kvm_memory_slot
*slot
;
67 walker
->level
= vcpu
->mmu
.root_level
;
68 slot
= gfn_to_memslot(vcpu
->kvm
,
69 (vcpu
->cr3
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
);
70 hpa
= safe_gpa_to_hpa(vcpu
, vcpu
->cr3
& PT64_BASE_ADDR_MASK
);
71 walker
->table
= kmap_atomic(pfn_to_page(hpa
>> PAGE_SHIFT
), KM_USER0
);
73 ASSERT((!kvm_arch_ops
->is_long_mode(vcpu
) && is_pae(vcpu
)) ||
74 (vcpu
->cr3
& ~(PAGE_MASK
| CR3_FLAGS_MASK
)) == 0);
76 walker
->table
= (pt_element_t
*)( (unsigned long)walker
->table
|
77 (unsigned long)(vcpu
->cr3
& ~(PAGE_MASK
| CR3_FLAGS_MASK
)) );
78 walker
->inherited_ar
= PT_USER_MASK
| PT_WRITABLE_MASK
;
81 static void FNAME(release_walker
)(struct guest_walker
*walker
)
83 kunmap_atomic(walker
->table
, KM_USER0
);
86 static void FNAME(set_pte
)(struct kvm_vcpu
*vcpu
, u64 guest_pte
,
87 u64
*shadow_pte
, u64 access_bits
)
89 ASSERT(*shadow_pte
== 0);
90 access_bits
&= guest_pte
;
91 *shadow_pte
= (guest_pte
& PT_PTE_COPY_MASK
);
92 set_pte_common(vcpu
, shadow_pte
, guest_pte
& PT_BASE_ADDR_MASK
,
93 guest_pte
& PT_DIRTY_MASK
, access_bits
);
96 static void FNAME(set_pde
)(struct kvm_vcpu
*vcpu
, u64 guest_pde
,
97 u64
*shadow_pte
, u64 access_bits
,
102 ASSERT(*shadow_pte
== 0);
103 access_bits
&= guest_pde
;
104 gaddr
= (guest_pde
& PT_DIR_BASE_ADDR_MASK
) + PAGE_SIZE
* index
;
105 if (PTTYPE
== 32 && is_cpuid_PSE36())
106 gaddr
|= (guest_pde
& PT32_DIR_PSE36_MASK
) <<
107 (32 - PT32_DIR_PSE36_SHIFT
);
108 *shadow_pte
= (guest_pde
& (PT_NON_PTE_COPY_MASK
| PT_GLOBAL_MASK
)) |
109 ((guest_pde
& PT_DIR_PAT_MASK
) >>
110 (PT_DIR_PAT_SHIFT
- PT_PAT_SHIFT
));
111 set_pte_common(vcpu
, shadow_pte
, gaddr
,
112 guest_pde
& PT_DIRTY_MASK
, access_bits
);
116 * Fetch a guest pte from a specific level in the paging hierarchy.
118 static pt_element_t
*FNAME(fetch_guest
)(struct kvm_vcpu
*vcpu
,
119 struct guest_walker
*walker
,
124 ASSERT(level
> 0 && level
<= walker
->level
);
127 int index
= PT_INDEX(addr
, walker
->level
);
130 ASSERT(((unsigned long)walker
->table
& PAGE_MASK
) ==
131 ((unsigned long)&walker
->table
[index
] & PAGE_MASK
));
132 if (level
== walker
->level
||
133 !is_present_pte(walker
->table
[index
]) ||
134 (walker
->level
== PT_DIRECTORY_LEVEL
&&
135 (walker
->table
[index
] & PT_PAGE_SIZE_MASK
) &&
136 (PTTYPE
== 64 || is_pse(vcpu
))))
137 return &walker
->table
[index
];
138 if (walker
->level
!= 3 || kvm_arch_ops
->is_long_mode(vcpu
))
139 walker
->inherited_ar
&= walker
->table
[index
];
140 paddr
= safe_gpa_to_hpa(vcpu
, walker
->table
[index
] & PT_BASE_ADDR_MASK
);
141 kunmap_atomic(walker
->table
, KM_USER0
);
142 walker
->table
= kmap_atomic(pfn_to_page(paddr
>> PAGE_SHIFT
),
149 * Fetch a shadow pte for a specific level in the paging hierarchy.
151 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
152 struct guest_walker
*walker
)
156 u64
*prev_shadow_ent
= NULL
;
158 shadow_addr
= vcpu
->mmu
.root_hpa
;
159 level
= vcpu
->mmu
.shadow_root_level
;
162 u32 index
= SHADOW_PT_INDEX(addr
, level
);
163 u64
*shadow_ent
= ((u64
*)__va(shadow_addr
)) + index
;
164 pt_element_t
*guest_ent
;
166 if (is_present_pte(*shadow_ent
) || is_io_pte(*shadow_ent
)) {
167 if (level
== PT_PAGE_TABLE_LEVEL
)
169 shadow_addr
= *shadow_ent
& PT64_BASE_ADDR_MASK
;
170 prev_shadow_ent
= shadow_ent
;
174 if (PTTYPE
== 32 && level
> PT32_ROOT_LEVEL
) {
175 ASSERT(level
== PT32E_ROOT_LEVEL
);
176 guest_ent
= FNAME(fetch_guest
)(vcpu
, walker
,
177 PT32_ROOT_LEVEL
, addr
);
179 guest_ent
= FNAME(fetch_guest
)(vcpu
, walker
,
182 if (!is_present_pte(*guest_ent
))
185 /* Don't set accessed bit on PAE PDPTRs */
186 if (vcpu
->mmu
.root_level
!= 3 || walker
->level
!= 3)
187 *guest_ent
|= PT_ACCESSED_MASK
;
189 if (level
== PT_PAGE_TABLE_LEVEL
) {
191 if (walker
->level
== PT_DIRECTORY_LEVEL
) {
193 *prev_shadow_ent
|= PT_SHADOW_PS_MARK
;
194 FNAME(set_pde
)(vcpu
, *guest_ent
, shadow_ent
,
195 walker
->inherited_ar
,
196 PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
));
198 ASSERT(walker
->level
== PT_PAGE_TABLE_LEVEL
);
199 FNAME(set_pte
)(vcpu
, *guest_ent
, shadow_ent
, walker
->inherited_ar
);
204 shadow_addr
= kvm_mmu_alloc_page(vcpu
, shadow_ent
);
205 if (!VALID_PAGE(shadow_addr
))
206 return ERR_PTR(-ENOMEM
);
207 if (!kvm_arch_ops
->is_long_mode(vcpu
) && level
== 3)
208 *shadow_ent
= shadow_addr
|
209 (*guest_ent
& (PT_PRESENT_MASK
| PT_PWT_MASK
| PT_PCD_MASK
));
211 *shadow_ent
= shadow_addr
|
212 (*guest_ent
& PT_NON_PTE_COPY_MASK
);
213 *shadow_ent
|= (PT_WRITABLE_MASK
| PT_USER_MASK
);
215 prev_shadow_ent
= shadow_ent
;
220 * The guest faulted for write. We need to
222 * - check write permissions
223 * - update the guest pte dirty bit
224 * - update our own dirty page tracking structures
226 static int FNAME(fix_write_pf
)(struct kvm_vcpu
*vcpu
,
228 struct guest_walker
*walker
,
232 pt_element_t
*guest_ent
;
236 if (is_writeble_pte(*shadow_ent
))
239 writable_shadow
= *shadow_ent
& PT_SHADOW_WRITABLE_MASK
;
242 * User mode access. Fail if it's a kernel page or a read-only
245 if (!(*shadow_ent
& PT_SHADOW_USER_MASK
) || !writable_shadow
)
247 ASSERT(*shadow_ent
& PT_USER_MASK
);
250 * Kernel mode access. Fail if it's a read-only page and
251 * supervisor write protection is enabled.
253 if (!writable_shadow
) {
254 if (is_write_protection(vcpu
))
256 *shadow_ent
&= ~PT_USER_MASK
;
259 guest_ent
= FNAME(fetch_guest
)(vcpu
, walker
, PT_PAGE_TABLE_LEVEL
, addr
);
261 if (!is_present_pte(*guest_ent
)) {
266 gfn
= (*guest_ent
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
267 mark_page_dirty(vcpu
->kvm
, gfn
);
268 *shadow_ent
|= PT_WRITABLE_MASK
;
269 *guest_ent
|= PT_DIRTY_MASK
;
275 * Page fault handler. There are several causes for a page fault:
276 * - there is no shadow pte for the guest pte
277 * - write access through a shadow pte marked read only so that we can set
279 * - write access to a shadow pte marked read only so we can update the page
280 * dirty bitmap, when userspace requests it
281 * - mmio access; in this case we will never install a present shadow pte
282 * - normal guest page fault due to the guest pte marked not present, not
283 * writable, or not executable
285 * Returns: 1 if we need to emulate the instruction, 0 otherwise
287 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
290 int write_fault
= error_code
& PFERR_WRITE_MASK
;
291 int pte_present
= error_code
& PFERR_PRESENT_MASK
;
292 int user_fault
= error_code
& PFERR_USER_MASK
;
293 struct guest_walker walker
;
298 * Look up the shadow pte for the faulting address.
301 FNAME(init_walker
)(&walker
, vcpu
);
302 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
);
303 if (IS_ERR(shadow_pte
)) { /* must be -ENOMEM */
304 nonpaging_flush(vcpu
);
305 FNAME(release_walker
)(&walker
);
312 * The page is not mapped by the guest. Let the guest handle it.
315 inject_page_fault(vcpu
, addr
, error_code
);
316 FNAME(release_walker
)(&walker
);
321 * Update the shadow pte.
324 fixed
= FNAME(fix_write_pf
)(vcpu
, shadow_pte
, &walker
, addr
,
327 fixed
= fix_read_pf(shadow_pte
);
329 FNAME(release_walker
)(&walker
);
332 * mmio: emulate if accessible, otherwise its a guest fault.
334 if (is_io_pte(*shadow_pte
)) {
335 if (may_access(*shadow_pte
, write_fault
, user_fault
))
337 pgprintk("%s: io work, no access\n", __FUNCTION__
);
338 inject_page_fault(vcpu
, addr
,
339 error_code
| PFERR_PRESENT_MASK
);
344 * pte not present, guest page fault.
346 if (pte_present
&& !fixed
) {
347 inject_page_fault(vcpu
, addr
, error_code
);
356 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
358 struct guest_walker walker
;
359 pt_element_t guest_pte
;
362 FNAME(init_walker
)(&walker
, vcpu
);
363 guest_pte
= *FNAME(fetch_guest
)(vcpu
, &walker
, PT_PAGE_TABLE_LEVEL
,
365 FNAME(release_walker
)(&walker
);
367 if (!is_present_pte(guest_pte
))
370 if (walker
.level
== PT_DIRECTORY_LEVEL
) {
371 ASSERT((guest_pte
& PT_PAGE_SIZE_MASK
));
372 ASSERT(PTTYPE
== 64 || is_pse(vcpu
));
374 gpa
= (guest_pte
& PT_DIR_BASE_ADDR_MASK
) | (vaddr
&
375 (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL
) | ~PAGE_MASK
));
377 if (PTTYPE
== 32 && is_cpuid_PSE36())
378 gpa
|= (guest_pte
& PT32_DIR_PSE36_MASK
) <<
379 (32 - PT32_DIR_PSE36_SHIFT
);
381 gpa
= (guest_pte
& PT_BASE_ADDR_MASK
);
382 gpa
|= (vaddr
& ~PAGE_MASK
);
391 #undef PT_BASE_ADDR_MASK
393 #undef SHADOW_PT_INDEX
395 #undef PT_PTE_COPY_MASK
396 #undef PT_NON_PTE_COPY_MASK
397 #undef PT_DIR_BASE_ADDR_MASK