2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
52 #define PT_MAX_FULL_LEVELS 2
53 #define CMPXCHG cmpxchg
55 #error Invalid PTTYPE value
58 #define gpte_to_gfn FNAME(gpte_to_gfn)
59 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
62 * The guest_walker structure emulates the behavior of the hardware page
67 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
68 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
69 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
76 static gfn_t
gpte_to_gfn(pt_element_t gpte
)
78 return (gpte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
81 static gfn_t
gpte_to_gfn_pde(pt_element_t gpte
)
83 return (gpte
& PT_DIR_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
86 static bool FNAME(cmpxchg_gpte
)(struct kvm
*kvm
,
87 gfn_t table_gfn
, unsigned index
,
88 pt_element_t orig_pte
, pt_element_t new_pte
)
94 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
96 down_read(¤t
->mm
->mmap_sem
);
97 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
98 page
= gfn_to_page(kvm
, table_gfn
);
99 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
101 up_read(¤t
->mm
->mmap_sem
);
103 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
104 table
= kmap_atomic(page
, KM_USER0
);
106 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
108 kunmap_atomic(table
, KM_USER0
);
110 kvm_release_page_dirty(page
);
112 return (ret
!= orig_pte
);
115 static unsigned FNAME(gpte_access
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
)
119 access
= (gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
)) | ACC_EXEC_MASK
;
122 access
&= ~(gpte
>> PT64_NX_SHIFT
);
128 * Fetch a guest pte for a guest virtual address
130 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
131 struct kvm_vcpu
*vcpu
, gva_t addr
,
132 int write_fault
, int user_fault
, int fetch_fault
)
136 unsigned index
, pt_access
, pte_access
;
139 pgprintk("%s: addr %lx\n", __FUNCTION__
, addr
);
141 walker
->level
= vcpu
->arch
.mmu
.root_level
;
142 pte
= vcpu
->arch
.cr3
;
144 if (!is_long_mode(vcpu
)) {
145 pte
= vcpu
->arch
.pdptrs
[(addr
>> 30) & 3];
146 if (!is_present_pte(pte
))
151 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
152 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
153 (vcpu
->cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
155 (vcpu
->arch
.cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
156 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
161 index
= PT_INDEX(addr
, walker
->level
);
163 table_gfn
= gpte_to_gfn(pte
);
164 pte_gpa
= gfn_to_gpa(table_gfn
);
165 pte_gpa
+= index
* sizeof(pt_element_t
);
166 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
167 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
168 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__
,
169 walker
->level
- 1, table_gfn
);
171 kvm_read_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
));
173 if (!is_present_pte(pte
))
176 if (write_fault
&& !is_writeble_pte(pte
))
177 if (user_fault
|| is_write_protection(vcpu
))
180 if (user_fault
&& !(pte
& PT_USER_MASK
))
184 if (fetch_fault
&& is_nx(vcpu
) && (pte
& PT64_NX_MASK
))
188 if (!(pte
& PT_ACCESSED_MASK
)) {
189 mark_page_dirty(vcpu
->kvm
, table_gfn
);
190 if (FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
,
191 index
, pte
, pte
|PT_ACCESSED_MASK
))
193 pte
|= PT_ACCESSED_MASK
;
196 pte_access
= pt_access
& FNAME(gpte_access
)(vcpu
, pte
);
198 walker
->ptes
[walker
->level
- 1] = pte
;
200 if (walker
->level
== PT_PAGE_TABLE_LEVEL
) {
201 walker
->gfn
= gpte_to_gfn(pte
);
205 if (walker
->level
== PT_DIRECTORY_LEVEL
206 && (pte
& PT_PAGE_SIZE_MASK
)
207 && (PTTYPE
== 64 || is_pse(vcpu
))) {
208 walker
->gfn
= gpte_to_gfn_pde(pte
);
209 walker
->gfn
+= PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
);
210 if (PTTYPE
== 32 && is_cpuid_PSE36())
211 walker
->gfn
+= pse36_gfn_delta(pte
);
215 pt_access
= pte_access
;
219 if (write_fault
&& !is_dirty_pte(pte
)) {
222 mark_page_dirty(vcpu
->kvm
, table_gfn
);
223 ret
= FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
, index
, pte
,
227 pte
|= PT_DIRTY_MASK
;
228 kvm_mmu_pte_write(vcpu
, pte_gpa
, (u8
*)&pte
, sizeof(pte
));
229 walker
->ptes
[walker
->level
- 1] = pte
;
232 walker
->pt_access
= pt_access
;
233 walker
->pte_access
= pte_access
;
234 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
235 __FUNCTION__
, (u64
)pte
, pt_access
, pte_access
);
239 walker
->error_code
= 0;
243 walker
->error_code
= PFERR_PRESENT_MASK
;
247 walker
->error_code
|= PFERR_WRITE_MASK
;
249 walker
->error_code
|= PFERR_USER_MASK
;
251 walker
->error_code
|= PFERR_FETCH_MASK
;
255 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*page
,
256 u64
*spte
, const void *pte
, int bytes
,
263 gpte
= *(const pt_element_t
*)pte
;
264 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
265 if (!offset_in_pte
&& !is_present_pte(gpte
))
266 set_shadow_pte(spte
, shadow_notrap_nonpresent_pte
);
269 if (bytes
< sizeof(pt_element_t
))
271 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__
, (u64
)gpte
, spte
);
272 pte_access
= page
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
273 if (gpte_to_gfn(gpte
) != vcpu
->arch
.update_pte
.gfn
)
275 npage
= vcpu
->arch
.update_pte
.page
;
279 mmu_set_spte(vcpu
, spte
, page
->role
.access
, pte_access
, 0, 0,
280 gpte
& PT_DIRTY_MASK
, NULL
, gpte_to_gfn(gpte
), npage
);
284 * Fetch a shadow pte for a specific level in the paging hierarchy.
286 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
287 struct guest_walker
*walker
,
288 int user_fault
, int write_fault
, int *ptwrite
,
294 unsigned access
= walker
->pt_access
;
296 if (!is_present_pte(walker
->ptes
[walker
->level
- 1]))
299 shadow_addr
= vcpu
->arch
.mmu
.root_hpa
;
300 level
= vcpu
->arch
.mmu
.shadow_root_level
;
301 if (level
== PT32E_ROOT_LEVEL
) {
302 shadow_addr
= vcpu
->arch
.mmu
.pae_root
[(addr
>> 30) & 3];
303 shadow_addr
&= PT64_BASE_ADDR_MASK
;
308 u32 index
= SHADOW_PT_INDEX(addr
, level
);
309 struct kvm_mmu_page
*shadow_page
;
313 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
316 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
318 shadow_ent
= ((u64
*)__va(shadow_addr
)) + index
;
319 if (level
== PT_PAGE_TABLE_LEVEL
)
321 if (is_shadow_present_pte(*shadow_ent
)) {
322 shadow_addr
= *shadow_ent
& PT64_BASE_ADDR_MASK
;
326 if (level
- 1 == PT_PAGE_TABLE_LEVEL
327 && walker
->level
== PT_DIRECTORY_LEVEL
) {
329 if (!is_dirty_pte(walker
->ptes
[level
- 1]))
330 access
&= ~ACC_WRITE_MASK
;
331 table_gfn
= gpte_to_gfn(walker
->ptes
[level
- 1]);
334 table_gfn
= walker
->table_gfn
[level
- 2];
336 shadow_page
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
337 metaphysical
, access
,
338 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
339 shadow_ent
, &new_page
);
340 if (new_page
&& !metaphysical
) {
344 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
346 pt_element_t curr_pte
;
347 r
= kvm_read_guest_atomic(vcpu
->kvm
,
348 walker
->pte_gpa
[level
- 2],
349 &curr_pte
, sizeof(curr_pte
));
350 if (r
|| curr_pte
!= walker
->ptes
[level
- 2]) {
351 kvm_release_page_clean(page
);
355 shadow_addr
= __pa(shadow_page
->spt
);
356 shadow_pte
= shadow_addr
| PT_PRESENT_MASK
| PT_ACCESSED_MASK
357 | PT_WRITABLE_MASK
| PT_USER_MASK
;
358 *shadow_ent
= shadow_pte
;
361 mmu_set_spte(vcpu
, shadow_ent
, access
, walker
->pte_access
& access
,
362 user_fault
, write_fault
,
363 walker
->ptes
[walker
->level
-1] & PT_DIRTY_MASK
,
364 ptwrite
, walker
->gfn
, page
);
370 * Page fault handler. There are several causes for a page fault:
371 * - there is no shadow pte for the guest pte
372 * - write access through a shadow pte marked read only so that we can set
374 * - write access to a shadow pte marked read only so we can update the page
375 * dirty bitmap, when userspace requests it
376 * - mmio access; in this case we will never install a present shadow pte
377 * - normal guest page fault due to the guest pte marked not present, not
378 * writable, or not executable
380 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
381 * a negative value on error.
383 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
386 int write_fault
= error_code
& PFERR_WRITE_MASK
;
387 int user_fault
= error_code
& PFERR_USER_MASK
;
388 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
389 struct guest_walker walker
;
395 pgprintk("%s: addr %lx err %x\n", __FUNCTION__
, addr
, error_code
);
396 kvm_mmu_audit(vcpu
, "pre page fault");
398 r
= mmu_topup_memory_caches(vcpu
);
402 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
403 down_read(¤t
->mm
->mmap_sem
);
405 down_read(&vcpu
->kvm
->slots_lock
);
406 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
408 * Look up the shadow pte for the faulting address.
410 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
414 * The page is not mapped by the guest. Let the guest handle it.
417 pgprintk("%s: guest page fault\n", __FUNCTION__
);
418 inject_page_fault(vcpu
, addr
, walker
.error_code
);
419 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
420 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
421 up_read(¤t
->mm
->mmap_sem
);
423 up_read(&vcpu
->kvm
->slots_lock
);
424 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
428 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
430 down_read(¤t
->mm
->mmap_sem
);
431 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
432 page
= gfn_to_page(vcpu
->kvm
, walker
.gfn
);
433 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
435 up_read(¤t
->mm
->mmap_sem
);
436 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
438 spin_lock(&vcpu
->kvm
->mmu_lock
);
439 kvm_mmu_free_some_pages(vcpu
);
440 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
442 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__
,
443 shadow_pte
, *shadow_pte
, write_pt
);
446 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
449 * mmio: emulate if accessible, otherwise its a guest fault.
451 if (shadow_pte
&& is_io_pte(*shadow_pte
)) {
452 spin_unlock(&vcpu
->kvm
->mmu_lock
);
453 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
454 up_read(¤t
->mm
->mmap_sem
);
456 up_read(&vcpu
->kvm
->slots_lock
);
457 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
461 ++vcpu
->stat
.pf_fixed
;
462 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
463 spin_unlock(&vcpu
->kvm
->mmu_lock
);
464 <<<<<<< HEAD
:arch
/x86
/kvm
/paging_tmpl
.h
465 up_read(¤t
->mm
->mmap_sem
);
467 up_read(&vcpu
->kvm
->slots_lock
);
468 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/x86
/kvm
/paging_tmpl
.h
473 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
475 struct guest_walker walker
;
476 gpa_t gpa
= UNMAPPED_GVA
;
479 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, 0, 0, 0);
482 gpa
= gfn_to_gpa(walker
.gfn
);
483 gpa
|= vaddr
& ~PAGE_MASK
;
489 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
490 struct kvm_mmu_page
*sp
)
492 int i
, offset
= 0, r
= 0;
495 if (sp
->role
.metaphysical
496 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
497 nonpaging_prefetch_page(vcpu
, sp
);
502 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
504 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
505 gpa_t pte_gpa
= gfn_to_gpa(sp
->gfn
);
506 pte_gpa
+= (i
+offset
) * sizeof(pt_element_t
);
508 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &pt
,
509 sizeof(pt_element_t
));
510 if (r
|| is_present_pte(pt
))
511 sp
->spt
[i
] = shadow_trap_nonpresent_pte
;
513 sp
->spt
[i
] = shadow_notrap_nonpresent_pte
;
520 #undef PT_BASE_ADDR_MASK
522 #undef SHADOW_PT_INDEX
524 #undef PT_DIR_BASE_ADDR_MASK
526 #undef PT_MAX_FULL_LEVELS
528 #undef gpte_to_gfn_pde