2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
33 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #define PT_MAX_FULL_LEVELS 4
36 #define CMPXCHG cmpxchg
38 #define CMPXCHG cmpxchg64
39 #define PT_MAX_FULL_LEVELS 2
42 #define pt_element_t u32
43 #define guest_walker guest_walker32
44 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
47 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
51 #define CMPXCHG cmpxchg
53 #error Invalid PTTYPE value
56 #define gpte_to_gfn FNAME(gpte_to_gfn)
57 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
60 * The guest_walker structure emulates the behavior of the hardware page
65 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
66 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
67 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
74 static gfn_t
gpte_to_gfn(pt_element_t gpte
)
76 return (gpte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
79 static gfn_t
gpte_to_gfn_pde(pt_element_t gpte
)
81 return (gpte
& PT_DIR_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
84 static bool FNAME(cmpxchg_gpte
)(struct kvm
*kvm
,
85 gfn_t table_gfn
, unsigned index
,
86 pt_element_t orig_pte
, pt_element_t new_pte
)
92 page
= gfn_to_page(kvm
, table_gfn
);
94 table
= kmap_atomic(page
, KM_USER0
);
95 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
96 kunmap_atomic(table
, KM_USER0
);
98 kvm_release_page_dirty(page
);
100 return (ret
!= orig_pte
);
103 static unsigned FNAME(gpte_access
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
)
107 access
= (gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
)) | ACC_EXEC_MASK
;
110 access
&= ~(gpte
>> PT64_NX_SHIFT
);
116 * Fetch a guest pte for a guest virtual address
118 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
119 struct kvm_vcpu
*vcpu
, gva_t addr
,
120 int write_fault
, int user_fault
, int fetch_fault
)
124 unsigned index
, pt_access
, pte_access
;
128 pgprintk("%s: addr %lx\n", __func__
, addr
);
130 walker
->level
= vcpu
->arch
.mmu
.root_level
;
131 pte
= vcpu
->arch
.cr3
;
133 if (!is_long_mode(vcpu
)) {
134 pte
= vcpu
->arch
.pdptrs
[(addr
>> 30) & 3];
135 if (!is_present_pte(pte
))
140 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
141 (vcpu
->arch
.cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
146 index
= PT_INDEX(addr
, walker
->level
);
148 table_gfn
= gpte_to_gfn(pte
);
149 pte_gpa
= gfn_to_gpa(table_gfn
);
150 pte_gpa
+= index
* sizeof(pt_element_t
);
151 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
152 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
153 pgprintk("%s: table_gfn[%d] %lx\n", __func__
,
154 walker
->level
- 1, table_gfn
);
156 kvm_read_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
));
158 if (!is_present_pte(pte
))
161 rsvd_fault
= is_rsvd_bits_set(vcpu
, pte
, walker
->level
);
165 if (write_fault
&& !is_writeble_pte(pte
))
166 if (user_fault
|| is_write_protection(vcpu
))
169 if (user_fault
&& !(pte
& PT_USER_MASK
))
173 if (fetch_fault
&& is_nx(vcpu
) && (pte
& PT64_NX_MASK
))
177 if (!(pte
& PT_ACCESSED_MASK
)) {
178 mark_page_dirty(vcpu
->kvm
, table_gfn
);
179 if (FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
,
180 index
, pte
, pte
|PT_ACCESSED_MASK
))
182 pte
|= PT_ACCESSED_MASK
;
185 pte_access
= pt_access
& FNAME(gpte_access
)(vcpu
, pte
);
187 walker
->ptes
[walker
->level
- 1] = pte
;
189 if (walker
->level
== PT_PAGE_TABLE_LEVEL
) {
190 walker
->gfn
= gpte_to_gfn(pte
);
194 if (walker
->level
== PT_DIRECTORY_LEVEL
195 && (pte
& PT_PAGE_SIZE_MASK
)
196 && (PTTYPE
== 64 || is_pse(vcpu
))) {
197 walker
->gfn
= gpte_to_gfn_pde(pte
);
198 walker
->gfn
+= PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
);
199 if (PTTYPE
== 32 && is_cpuid_PSE36())
200 walker
->gfn
+= pse36_gfn_delta(pte
);
204 pt_access
= pte_access
;
208 if (write_fault
&& !is_dirty_pte(pte
)) {
211 mark_page_dirty(vcpu
->kvm
, table_gfn
);
212 ret
= FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
, index
, pte
,
216 pte
|= PT_DIRTY_MASK
;
217 walker
->ptes
[walker
->level
- 1] = pte
;
220 walker
->pt_access
= pt_access
;
221 walker
->pte_access
= pte_access
;
222 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
223 __func__
, (u64
)pte
, pt_access
, pte_access
);
227 walker
->error_code
= 0;
231 walker
->error_code
= PFERR_PRESENT_MASK
;
235 walker
->error_code
|= PFERR_WRITE_MASK
;
237 walker
->error_code
|= PFERR_USER_MASK
;
239 walker
->error_code
|= PFERR_FETCH_MASK
;
241 walker
->error_code
|= PFERR_RSVD_MASK
;
245 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*page
,
246 u64
*spte
, const void *pte
)
251 int largepage
= vcpu
->arch
.update_pte
.largepage
;
253 gpte
= *(const pt_element_t
*)pte
;
254 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
255 if (!is_present_pte(gpte
))
256 set_shadow_pte(spte
, shadow_notrap_nonpresent_pte
);
259 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
260 pte_access
= page
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
261 if (gpte_to_gfn(gpte
) != vcpu
->arch
.update_pte
.gfn
)
263 pfn
= vcpu
->arch
.update_pte
.pfn
;
264 if (is_error_pfn(pfn
))
266 if (mmu_notifier_retry(vcpu
, vcpu
->arch
.update_pte
.mmu_seq
))
269 mmu_set_spte(vcpu
, spte
, page
->role
.access
, pte_access
, 0, 0,
270 gpte
& PT_DIRTY_MASK
, NULL
, largepage
,
271 gpte_to_gfn(gpte
), pfn
, true);
275 * Fetch a shadow pte for a specific level in the paging hierarchy.
277 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
278 struct guest_walker
*gw
,
279 int user_fault
, int write_fault
, int largepage
,
280 int *ptwrite
, pfn_t pfn
)
282 unsigned access
= gw
->pt_access
;
283 struct kvm_mmu_page
*shadow_page
;
284 u64 spte
, *sptep
= NULL
;
289 pt_element_t curr_pte
;
290 struct kvm_shadow_walk_iterator iterator
;
292 if (!is_present_pte(gw
->ptes
[gw
->level
- 1]))
295 for_each_shadow_entry(vcpu
, addr
, iterator
) {
296 level
= iterator
.level
;
297 sptep
= iterator
.sptep
;
298 if (level
== PT_PAGE_TABLE_LEVEL
299 || (largepage
&& level
== PT_DIRECTORY_LEVEL
)) {
300 mmu_set_spte(vcpu
, sptep
, access
,
301 gw
->pte_access
& access
,
302 user_fault
, write_fault
,
303 gw
->ptes
[gw
->level
-1] & PT_DIRTY_MASK
,
305 gw
->gfn
, pfn
, false);
309 if (is_shadow_present_pte(*sptep
) && !is_large_pte(*sptep
))
312 if (is_large_pte(*sptep
)) {
313 rmap_remove(vcpu
->kvm
, sptep
);
314 set_shadow_pte(sptep
, shadow_trap_nonpresent_pte
);
315 kvm_flush_remote_tlbs(vcpu
->kvm
);
318 if (level
== PT_DIRECTORY_LEVEL
319 && gw
->level
== PT_DIRECTORY_LEVEL
) {
321 if (!is_dirty_pte(gw
->ptes
[level
- 1]))
322 access
&= ~ACC_WRITE_MASK
;
323 table_gfn
= gpte_to_gfn(gw
->ptes
[level
- 1]);
326 table_gfn
= gw
->table_gfn
[level
- 2];
328 shadow_page
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
329 direct
, access
, sptep
);
331 r
= kvm_read_guest_atomic(vcpu
->kvm
,
332 gw
->pte_gpa
[level
- 2],
333 &curr_pte
, sizeof(curr_pte
));
334 if (r
|| curr_pte
!= gw
->ptes
[level
- 2]) {
335 kvm_mmu_put_page(shadow_page
, sptep
);
336 kvm_release_pfn_clean(pfn
);
342 spte
= __pa(shadow_page
->spt
)
343 | PT_PRESENT_MASK
| PT_ACCESSED_MASK
344 | PT_WRITABLE_MASK
| PT_USER_MASK
;
352 * Page fault handler. There are several causes for a page fault:
353 * - there is no shadow pte for the guest pte
354 * - write access through a shadow pte marked read only so that we can set
356 * - write access to a shadow pte marked read only so we can update the page
357 * dirty bitmap, when userspace requests it
358 * - mmio access; in this case we will never install a present shadow pte
359 * - normal guest page fault due to the guest pte marked not present, not
360 * writable, or not executable
362 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
363 * a negative value on error.
365 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
368 int write_fault
= error_code
& PFERR_WRITE_MASK
;
369 int user_fault
= error_code
& PFERR_USER_MASK
;
370 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
371 struct guest_walker walker
;
377 unsigned long mmu_seq
;
379 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
380 kvm_mmu_audit(vcpu
, "pre page fault");
382 r
= mmu_topup_memory_caches(vcpu
);
387 * Look up the guest pte for the faulting address.
389 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
393 * The page is not mapped by the guest. Let the guest handle it.
396 pgprintk("%s: guest page fault\n", __func__
);
397 inject_page_fault(vcpu
, addr
, walker
.error_code
);
398 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
402 if (walker
.level
== PT_DIRECTORY_LEVEL
) {
404 large_gfn
= walker
.gfn
& ~(KVM_PAGES_PER_HPAGE
-1);
405 if (is_largepage_backed(vcpu
, large_gfn
)) {
406 walker
.gfn
= large_gfn
;
410 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
412 pfn
= gfn_to_pfn(vcpu
->kvm
, walker
.gfn
);
415 if (is_error_pfn(pfn
)) {
416 pgprintk("gfn %lx is mmio\n", walker
.gfn
);
417 kvm_release_pfn_clean(pfn
);
421 spin_lock(&vcpu
->kvm
->mmu_lock
);
422 if (mmu_notifier_retry(vcpu
, mmu_seq
))
424 kvm_mmu_free_some_pages(vcpu
);
425 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
426 largepage
, &write_pt
, pfn
);
428 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__
,
429 shadow_pte
, *shadow_pte
, write_pt
);
432 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
434 ++vcpu
->stat
.pf_fixed
;
435 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
436 spin_unlock(&vcpu
->kvm
->mmu_lock
);
441 spin_unlock(&vcpu
->kvm
->mmu_lock
);
442 kvm_release_pfn_clean(pfn
);
446 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
)
448 struct kvm_shadow_walk_iterator iterator
;
455 spin_lock(&vcpu
->kvm
->mmu_lock
);
457 for_each_shadow_entry(vcpu
, gva
, iterator
) {
458 level
= iterator
.level
;
459 sptep
= iterator
.sptep
;
461 /* FIXME: properly handle invlpg on large guest pages */
462 if (level
== PT_PAGE_TABLE_LEVEL
||
463 ((level
== PT_DIRECTORY_LEVEL
) && is_large_pte(*sptep
))) {
464 struct kvm_mmu_page
*sp
= page_header(__pa(sptep
));
466 pte_gpa
= (sp
->gfn
<< PAGE_SHIFT
);
467 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
469 if (is_shadow_present_pte(*sptep
)) {
470 rmap_remove(vcpu
->kvm
, sptep
);
471 if (is_large_pte(*sptep
))
472 --vcpu
->kvm
->stat
.lpages
;
475 set_shadow_pte(sptep
, shadow_trap_nonpresent_pte
);
479 if (!is_shadow_present_pte(*sptep
))
484 kvm_flush_remote_tlbs(vcpu
->kvm
);
485 spin_unlock(&vcpu
->kvm
->mmu_lock
);
489 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
490 sizeof(pt_element_t
)))
492 if (is_present_pte(gpte
) && (gpte
& PT_ACCESSED_MASK
)) {
493 if (mmu_topup_memory_caches(vcpu
))
495 kvm_mmu_pte_write(vcpu
, pte_gpa
, (const u8
*)&gpte
,
496 sizeof(pt_element_t
), 0);
500 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
502 struct guest_walker walker
;
503 gpa_t gpa
= UNMAPPED_GVA
;
506 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, 0, 0, 0);
509 gpa
= gfn_to_gpa(walker
.gfn
);
510 gpa
|= vaddr
& ~PAGE_MASK
;
516 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
517 struct kvm_mmu_page
*sp
)
520 pt_element_t pt
[256 / sizeof(pt_element_t
)];
524 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
525 nonpaging_prefetch_page(vcpu
, sp
);
529 pte_gpa
= gfn_to_gpa(sp
->gfn
);
531 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
532 pte_gpa
+= offset
* sizeof(pt_element_t
);
535 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
+= ARRAY_SIZE(pt
)) {
536 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, pt
, sizeof pt
);
537 pte_gpa
+= ARRAY_SIZE(pt
) * sizeof(pt_element_t
);
538 for (j
= 0; j
< ARRAY_SIZE(pt
); ++j
)
539 if (r
|| is_present_pte(pt
[j
]))
540 sp
->spt
[i
+j
] = shadow_trap_nonpresent_pte
;
542 sp
->spt
[i
+j
] = shadow_notrap_nonpresent_pte
;
547 * Using the cached information from sp->gfns is safe because:
548 * - The spte has a reference to the struct page, so the pfn for a given gfn
549 * can't change unless all sptes pointing to it are nuked first.
550 * - Alias changes zap the entire shadow cache.
552 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
554 int i
, offset
, nr_present
;
556 offset
= nr_present
= 0;
559 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
561 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
565 gfn_t gfn
= sp
->gfns
[i
];
567 if (!is_shadow_present_pte(sp
->spt
[i
]))
570 pte_gpa
= gfn_to_gpa(sp
->gfn
);
571 pte_gpa
+= (i
+offset
) * sizeof(pt_element_t
);
573 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
574 sizeof(pt_element_t
)))
577 if (gpte_to_gfn(gpte
) != gfn
|| !is_present_pte(gpte
) ||
578 !(gpte
& PT_ACCESSED_MASK
)) {
581 rmap_remove(vcpu
->kvm
, &sp
->spt
[i
]);
582 if (is_present_pte(gpte
))
583 nonpresent
= shadow_trap_nonpresent_pte
;
585 nonpresent
= shadow_notrap_nonpresent_pte
;
586 set_shadow_pte(&sp
->spt
[i
], nonpresent
);
591 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
592 set_spte(vcpu
, &sp
->spt
[i
], pte_access
, 0, 0,
593 is_dirty_pte(gpte
), 0, gfn
,
594 spte_to_pfn(sp
->spt
[i
]), true, false);
603 #undef PT_BASE_ADDR_MASK
606 #undef PT_DIR_BASE_ADDR_MASK
608 #undef PT_MAX_FULL_LEVELS
610 #undef gpte_to_gfn_pde