1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
10 * Copyright (C) 2006 Qumranet, Inc.
11 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Avi Kivity <avi@qumranet.com>
19 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
20 * so the code in this file is compiled twice, once per pte size.
24 #define pt_element_t u64
25 #define guest_walker guest_walker64
26 #define FNAME(name) paging##64_##name
27 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
28 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
29 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
30 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
31 #define PT_LEVEL_BITS PT64_LEVEL_BITS
32 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
33 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
34 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2
52 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
53 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
54 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
55 #define CMPXCHG cmpxchg
56 #elif PTTYPE == PTTYPE_EPT
57 #define pt_element_t u64
58 #define guest_walker guest_walkerEPT
59 #define FNAME(name) ept_##name
60 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
61 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
62 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
63 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
64 #define PT_LEVEL_BITS PT64_LEVEL_BITS
65 #define PT_GUEST_DIRTY_SHIFT 9
66 #define PT_GUEST_ACCESSED_SHIFT 8
67 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
68 #define CMPXCHG cmpxchg64
69 #define PT_MAX_FULL_LEVELS 4
71 #error Invalid PTTYPE value
74 #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
75 #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
77 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
78 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
81 * The guest_walker structure emulates the behavior of the hardware page
87 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
88 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
89 pt_element_t prefetch_ptes
[PTE_PREFETCH_NUM
];
90 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
91 pt_element_t __user
*ptep_user
[PT_MAX_FULL_LEVELS
];
92 bool pte_writable
[PT_MAX_FULL_LEVELS
];
96 struct x86_exception fault
;
99 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
101 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
104 static inline void FNAME(protect_clean_gpte
)(struct kvm_mmu
*mmu
, unsigned *access
,
109 /* dirty bit is not supported, so no need to track it */
110 if (!PT_HAVE_ACCESSED_DIRTY(mmu
))
113 BUILD_BUG_ON(PT_WRITABLE_MASK
!= ACC_WRITE_MASK
);
115 mask
= (unsigned)~ACC_WRITE_MASK
;
116 /* Allow write access to dirty gptes */
117 mask
|= (gpte
>> (PT_GUEST_DIRTY_SHIFT
- PT_WRITABLE_SHIFT
)) &
122 static inline int FNAME(is_present_gpte
)(unsigned long pte
)
124 #if PTTYPE != PTTYPE_EPT
125 return pte
& PT_PRESENT_MASK
;
131 static bool FNAME(is_bad_mt_xwr
)(struct rsvd_bits_validate
*rsvd_check
, u64 gpte
)
133 #if PTTYPE != PTTYPE_EPT
136 return __is_bad_mt_xwr(rsvd_check
, gpte
);
140 static bool FNAME(is_rsvd_bits_set
)(struct kvm_mmu
*mmu
, u64 gpte
, int level
)
142 return __is_rsvd_bits_set(&mmu
->guest_rsvd_check
, gpte
, level
) ||
143 FNAME(is_bad_mt_xwr
)(&mmu
->guest_rsvd_check
, gpte
);
146 static int FNAME(cmpxchg_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
147 pt_element_t __user
*ptep_user
, unsigned index
,
148 pt_element_t orig_pte
, pt_element_t new_pte
)
155 npages
= get_user_pages_fast((unsigned long)ptep_user
, 1, FOLL_WRITE
, &page
);
156 if (likely(npages
== 1)) {
157 table
= kmap_atomic(page
);
158 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
159 kunmap_atomic(table
);
161 kvm_release_page_dirty(page
);
163 struct vm_area_struct
*vma
;
164 unsigned long vaddr
= (unsigned long)ptep_user
& PAGE_MASK
;
168 down_read(¤t
->mm
->mmap_sem
);
169 vma
= find_vma_intersection(current
->mm
, vaddr
, vaddr
+ PAGE_SIZE
);
170 if (!vma
|| !(vma
->vm_flags
& VM_PFNMAP
)) {
171 up_read(¤t
->mm
->mmap_sem
);
174 pfn
= ((vaddr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
175 paddr
= pfn
<< PAGE_SHIFT
;
176 table
= memremap(paddr
, PAGE_SIZE
, MEMREMAP_WB
);
178 up_read(¤t
->mm
->mmap_sem
);
181 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
183 up_read(¤t
->mm
->mmap_sem
);
186 return (ret
!= orig_pte
);
189 static bool FNAME(prefetch_invalid_gpte
)(struct kvm_vcpu
*vcpu
,
190 struct kvm_mmu_page
*sp
, u64
*spte
,
193 if (!FNAME(is_present_gpte
)(gpte
))
196 /* if accessed bit is not supported prefetch non accessed gpte */
197 if (PT_HAVE_ACCESSED_DIRTY(vcpu
->arch
.mmu
) &&
198 !(gpte
& PT_GUEST_ACCESSED_MASK
))
201 if (FNAME(is_rsvd_bits_set
)(vcpu
->arch
.mmu
, gpte
, PT_PAGE_TABLE_LEVEL
))
207 drop_spte(vcpu
->kvm
, spte
);
212 * For PTTYPE_EPT, a page table can be executable but not readable
213 * on supported processors. Therefore, set_spte does not automatically
214 * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
215 * to signify readability since it isn't used in the EPT case
217 static inline unsigned FNAME(gpte_access
)(u64 gpte
)
220 #if PTTYPE == PTTYPE_EPT
221 access
= ((gpte
& VMX_EPT_WRITABLE_MASK
) ? ACC_WRITE_MASK
: 0) |
222 ((gpte
& VMX_EPT_EXECUTABLE_MASK
) ? ACC_EXEC_MASK
: 0) |
223 ((gpte
& VMX_EPT_READABLE_MASK
) ? ACC_USER_MASK
: 0);
225 BUILD_BUG_ON(ACC_EXEC_MASK
!= PT_PRESENT_MASK
);
226 BUILD_BUG_ON(ACC_EXEC_MASK
!= 1);
227 access
= gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
| PT_PRESENT_MASK
);
228 /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
229 access
^= (gpte
>> PT64_NX_SHIFT
);
235 static int FNAME(update_accessed_dirty_bits
)(struct kvm_vcpu
*vcpu
,
237 struct guest_walker
*walker
,
240 unsigned level
, index
;
241 pt_element_t pte
, orig_pte
;
242 pt_element_t __user
*ptep_user
;
246 /* dirty/accessed bits are not supported, so no need to update them */
247 if (!PT_HAVE_ACCESSED_DIRTY(mmu
))
250 for (level
= walker
->max_level
; level
>= walker
->level
; --level
) {
251 pte
= orig_pte
= walker
->ptes
[level
- 1];
252 table_gfn
= walker
->table_gfn
[level
- 1];
253 ptep_user
= walker
->ptep_user
[level
- 1];
254 index
= offset_in_page(ptep_user
) / sizeof(pt_element_t
);
255 if (!(pte
& PT_GUEST_ACCESSED_MASK
)) {
256 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
, sizeof(pte
));
257 pte
|= PT_GUEST_ACCESSED_MASK
;
259 if (level
== walker
->level
&& write_fault
&&
260 !(pte
& PT_GUEST_DIRTY_MASK
)) {
261 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
262 #if PTTYPE == PTTYPE_EPT
263 if (kvm_arch_write_log_dirty(vcpu
))
266 pte
|= PT_GUEST_DIRTY_MASK
;
272 * If the slot is read-only, simply do not process the accessed
273 * and dirty bits. This is the correct thing to do if the slot
274 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
275 * are only supported if the accessed and dirty bits are already
276 * set in the ROM (so that MMIO writes are never needed).
278 * Note that NPT does not allow this at all and faults, since
279 * it always wants nested page table entries for the guest
280 * page tables to be writable. And EPT works but will simply
281 * overwrite the read-only memory to set the accessed and dirty
284 if (unlikely(!walker
->pte_writable
[level
- 1]))
287 ret
= FNAME(cmpxchg_gpte
)(vcpu
, mmu
, ptep_user
, index
, orig_pte
, pte
);
291 kvm_vcpu_mark_page_dirty(vcpu
, table_gfn
);
292 walker
->ptes
[level
- 1] = pte
;
297 static inline unsigned FNAME(gpte_pkeys
)(struct kvm_vcpu
*vcpu
, u64 gpte
)
301 pte_t pte
= {.pte
= gpte
};
303 pkeys
= pte_flags_pkey(pte_flags(pte
));
309 * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
311 static int FNAME(walk_addr_generic
)(struct guest_walker
*walker
,
312 struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
313 gpa_t addr
, u32 access
)
317 pt_element_t __user
*uninitialized_var(ptep_user
);
319 u64 pt_access
, pte_access
;
320 unsigned index
, accessed_dirty
, pte_pkey
;
321 unsigned nested_access
;
325 u64 walk_nx_mask
= 0;
326 const int write_fault
= access
& PFERR_WRITE_MASK
;
327 const int user_fault
= access
& PFERR_USER_MASK
;
328 const int fetch_fault
= access
& PFERR_FETCH_MASK
;
333 trace_kvm_mmu_pagetable_walk(addr
, access
);
335 walker
->level
= mmu
->root_level
;
336 pte
= mmu
->get_cr3(vcpu
);
337 have_ad
= PT_HAVE_ACCESSED_DIRTY(mmu
);
340 walk_nx_mask
= 1ULL << PT64_NX_SHIFT
;
341 if (walker
->level
== PT32E_ROOT_LEVEL
) {
342 pte
= mmu
->get_pdptr(vcpu
, (addr
>> 30) & 3);
343 trace_kvm_mmu_paging_element(pte
, walker
->level
);
344 if (!FNAME(is_present_gpte
)(pte
))
349 walker
->max_level
= walker
->level
;
350 ASSERT(!(is_long_mode(vcpu
) && !is_pae(vcpu
)));
353 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
354 * by the MOV to CR instruction are treated as reads and do not cause the
355 * processor to set the dirty flag in any EPT paging-structure entry.
357 nested_access
= (have_ad
? PFERR_WRITE_MASK
: 0) | PFERR_USER_MASK
;
364 unsigned long host_addr
;
366 pt_access
= pte_access
;
369 index
= PT_INDEX(addr
, walker
->level
);
370 table_gfn
= gpte_to_gfn(pte
);
371 offset
= index
* sizeof(pt_element_t
);
372 pte_gpa
= gfn_to_gpa(table_gfn
) + offset
;
374 BUG_ON(walker
->level
< 1);
375 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
376 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
378 real_gfn
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(table_gfn
),
383 * FIXME: This can happen if emulation (for of an INS/OUTS
384 * instruction) triggers a nested page fault. The exit
385 * qualification / exit info field will incorrectly have
386 * "guest page access" as the nested page fault's cause,
387 * instead of "guest page structure access". To fix this,
388 * the x86_exception struct should be augmented with enough
389 * information to fix the exit_qualification or exit_info_1
392 if (unlikely(real_gfn
== UNMAPPED_GVA
))
395 real_gfn
= gpa_to_gfn(real_gfn
);
397 host_addr
= kvm_vcpu_gfn_to_hva_prot(vcpu
, real_gfn
,
398 &walker
->pte_writable
[walker
->level
- 1]);
399 if (unlikely(kvm_is_error_hva(host_addr
)))
402 ptep_user
= (pt_element_t __user
*)((void *)host_addr
+ offset
);
403 if (unlikely(__copy_from_user(&pte
, ptep_user
, sizeof(pte
))))
405 walker
->ptep_user
[walker
->level
- 1] = ptep_user
;
407 trace_kvm_mmu_paging_element(pte
, walker
->level
);
410 * Inverting the NX it lets us AND it like other
413 pte_access
= pt_access
& (pte
^ walk_nx_mask
);
415 if (unlikely(!FNAME(is_present_gpte
)(pte
)))
418 if (unlikely(FNAME(is_rsvd_bits_set
)(mmu
, pte
, walker
->level
))) {
419 errcode
= PFERR_RSVD_MASK
| PFERR_PRESENT_MASK
;
423 walker
->ptes
[walker
->level
- 1] = pte
;
424 } while (!is_last_gpte(mmu
, walker
->level
, pte
));
426 pte_pkey
= FNAME(gpte_pkeys
)(vcpu
, pte
);
427 accessed_dirty
= have_ad
? pte_access
& PT_GUEST_ACCESSED_MASK
: 0;
429 /* Convert to ACC_*_MASK flags for struct guest_walker. */
430 walker
->pt_access
= FNAME(gpte_access
)(pt_access
^ walk_nx_mask
);
431 walker
->pte_access
= FNAME(gpte_access
)(pte_access
^ walk_nx_mask
);
432 errcode
= permission_fault(vcpu
, mmu
, walker
->pte_access
, pte_pkey
, access
);
433 if (unlikely(errcode
))
436 gfn
= gpte_to_gfn_lvl(pte
, walker
->level
);
437 gfn
+= (addr
& PT_LVL_OFFSET_MASK(walker
->level
)) >> PAGE_SHIFT
;
439 if (PTTYPE
== 32 && walker
->level
== PT_DIRECTORY_LEVEL
&& is_cpuid_PSE36())
440 gfn
+= pse36_gfn_delta(pte
);
442 real_gpa
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(gfn
), access
, &walker
->fault
);
443 if (real_gpa
== UNMAPPED_GVA
)
446 walker
->gfn
= real_gpa
>> PAGE_SHIFT
;
449 FNAME(protect_clean_gpte
)(mmu
, &walker
->pte_access
, pte
);
452 * On a write fault, fold the dirty bit into accessed_dirty.
453 * For modes without A/D bits support accessed_dirty will be
456 accessed_dirty
&= pte
>>
457 (PT_GUEST_DIRTY_SHIFT
- PT_GUEST_ACCESSED_SHIFT
);
459 if (unlikely(!accessed_dirty
)) {
460 ret
= FNAME(update_accessed_dirty_bits
)(vcpu
, mmu
, walker
, write_fault
);
461 if (unlikely(ret
< 0))
467 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
468 __func__
, (u64
)pte
, walker
->pte_access
, walker
->pt_access
);
472 errcode
|= write_fault
| user_fault
;
473 if (fetch_fault
&& (mmu
->nx
||
474 kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
)))
475 errcode
|= PFERR_FETCH_MASK
;
477 walker
->fault
.vector
= PF_VECTOR
;
478 walker
->fault
.error_code_valid
= true;
479 walker
->fault
.error_code
= errcode
;
481 #if PTTYPE == PTTYPE_EPT
483 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
484 * misconfiguration requires to be injected. The detection is
485 * done by is_rsvd_bits_set() above.
487 * We set up the value of exit_qualification to inject:
488 * [2:0] - Derive from the access bits. The exit_qualification might be
489 * out of date if it is serving an EPT misconfiguration.
490 * [5:3] - Calculated by the page walk of the guest EPT page tables
491 * [7:8] - Derived from [7:8] of real exit_qualification
493 * The other bits are set to 0.
495 if (!(errcode
& PFERR_RSVD_MASK
)) {
496 vcpu
->arch
.exit_qualification
&= 0x180;
498 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_WRITE
;
500 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_READ
;
502 vcpu
->arch
.exit_qualification
|= EPT_VIOLATION_ACC_INSTR
;
503 vcpu
->arch
.exit_qualification
|= (pte_access
& 0x7) << 3;
506 walker
->fault
.address
= addr
;
507 walker
->fault
.nested_page_fault
= mmu
!= vcpu
->arch
.walk_mmu
;
509 trace_kvm_mmu_walker_error(walker
->fault
.error_code
);
513 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
514 struct kvm_vcpu
*vcpu
, gpa_t addr
, u32 access
)
516 return FNAME(walk_addr_generic
)(walker
, vcpu
, vcpu
->arch
.mmu
, addr
,
520 #if PTTYPE != PTTYPE_EPT
521 static int FNAME(walk_addr_nested
)(struct guest_walker
*walker
,
522 struct kvm_vcpu
*vcpu
, gva_t addr
,
525 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.nested_mmu
,
531 FNAME(prefetch_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
532 u64
*spte
, pt_element_t gpte
, bool no_dirty_log
)
538 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, spte
, gpte
))
541 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
543 gfn
= gpte_to_gfn(gpte
);
544 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(gpte
);
545 FNAME(protect_clean_gpte
)(vcpu
->arch
.mmu
, &pte_access
, gpte
);
546 pfn
= pte_prefetch_gfn_to_pfn(vcpu
, gfn
,
547 no_dirty_log
&& (pte_access
& ACC_WRITE_MASK
));
548 if (is_error_pfn(pfn
))
552 * we call mmu_set_spte() with host_writable = true because
553 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
555 mmu_set_spte(vcpu
, spte
, pte_access
, 0, PT_PAGE_TABLE_LEVEL
, gfn
, pfn
,
558 kvm_release_pfn_clean(pfn
);
562 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
563 u64
*spte
, const void *pte
)
565 pt_element_t gpte
= *(const pt_element_t
*)pte
;
567 FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gpte
, false);
570 static bool FNAME(gpte_changed
)(struct kvm_vcpu
*vcpu
,
571 struct guest_walker
*gw
, int level
)
573 pt_element_t curr_pte
;
574 gpa_t base_gpa
, pte_gpa
= gw
->pte_gpa
[level
- 1];
578 if (level
== PT_PAGE_TABLE_LEVEL
) {
579 mask
= PTE_PREFETCH_NUM
* sizeof(pt_element_t
) - 1;
580 base_gpa
= pte_gpa
& ~mask
;
581 index
= (pte_gpa
- base_gpa
) / sizeof(pt_element_t
);
583 r
= kvm_vcpu_read_guest_atomic(vcpu
, base_gpa
,
584 gw
->prefetch_ptes
, sizeof(gw
->prefetch_ptes
));
585 curr_pte
= gw
->prefetch_ptes
[index
];
587 r
= kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
,
588 &curr_pte
, sizeof(curr_pte
));
590 return r
|| curr_pte
!= gw
->ptes
[level
- 1];
593 static void FNAME(pte_prefetch
)(struct kvm_vcpu
*vcpu
, struct guest_walker
*gw
,
596 struct kvm_mmu_page
*sp
;
597 pt_element_t
*gptep
= gw
->prefetch_ptes
;
601 sp
= page_header(__pa(sptep
));
603 if (sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)
607 return __direct_pte_prefetch(vcpu
, sp
, sptep
);
609 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
612 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
616 if (is_shadow_present_pte(*spte
))
619 if (!FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gptep
[i
], true))
625 * Fetch a shadow pte for a specific level in the paging hierarchy.
626 * If the guest tries to write a write-protected page, we need to
627 * emulate this operation, return 1 to indicate this case.
629 static int FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gpa_t addr
,
630 struct guest_walker
*gw
,
631 int write_fault
, int max_level
,
632 kvm_pfn_t pfn
, bool map_writable
, bool prefault
,
633 bool lpage_disallowed
)
635 struct kvm_mmu_page
*sp
= NULL
;
636 struct kvm_shadow_walk_iterator it
;
637 unsigned direct_access
, access
= gw
->pt_access
;
638 int top_level
, hlevel
, ret
;
639 gfn_t base_gfn
= gw
->gfn
;
641 direct_access
= gw
->pte_access
;
643 top_level
= vcpu
->arch
.mmu
->root_level
;
644 if (top_level
== PT32E_ROOT_LEVEL
)
645 top_level
= PT32_ROOT_LEVEL
;
647 * Verify that the top-level gpte is still there. Since the page
648 * is a root page, it is either write protected (and cannot be
649 * changed from now on) or it is invalid (in which case, we don't
650 * really care if it changes underneath us after this point).
652 if (FNAME(gpte_changed
)(vcpu
, gw
, top_level
))
653 goto out_gpte_changed
;
655 if (WARN_ON(!VALID_PAGE(vcpu
->arch
.mmu
->root_hpa
)))
656 goto out_gpte_changed
;
658 for (shadow_walk_init(&it
, vcpu
, addr
);
659 shadow_walk_okay(&it
) && it
.level
> gw
->level
;
660 shadow_walk_next(&it
)) {
663 clear_sp_write_flooding_count(it
.sptep
);
664 drop_large_spte(vcpu
, it
.sptep
);
667 if (!is_shadow_present_pte(*it
.sptep
)) {
668 table_gfn
= gw
->table_gfn
[it
.level
- 2];
669 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, it
.level
-1,
674 * Verify that the gpte in the page we've just write
675 * protected is still there.
677 if (FNAME(gpte_changed
)(vcpu
, gw
, it
.level
- 1))
678 goto out_gpte_changed
;
681 link_shadow_page(vcpu
, it
.sptep
, sp
);
684 hlevel
= kvm_mmu_hugepage_adjust(vcpu
, gw
->gfn
, max_level
, &pfn
);
686 trace_kvm_mmu_spte_requested(addr
, gw
->level
, pfn
);
688 for (; shadow_walk_okay(&it
); shadow_walk_next(&it
)) {
689 clear_sp_write_flooding_count(it
.sptep
);
692 * We cannot overwrite existing page tables with an NX
693 * large page, as the leaf could be executable.
695 disallowed_hugepage_adjust(it
, gw
->gfn
, &pfn
, &hlevel
);
697 base_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(it
.level
) - 1);
698 if (it
.level
== hlevel
)
701 validate_direct_spte(vcpu
, it
.sptep
, direct_access
);
703 drop_large_spte(vcpu
, it
.sptep
);
705 if (!is_shadow_present_pte(*it
.sptep
)) {
706 sp
= kvm_mmu_get_page(vcpu
, base_gfn
, addr
,
707 it
.level
- 1, true, direct_access
);
708 link_shadow_page(vcpu
, it
.sptep
, sp
);
709 if (lpage_disallowed
)
710 account_huge_nx_page(vcpu
->kvm
, sp
);
714 ret
= mmu_set_spte(vcpu
, it
.sptep
, gw
->pte_access
, write_fault
,
715 it
.level
, base_gfn
, pfn
, prefault
, map_writable
);
716 FNAME(pte_prefetch
)(vcpu
, gw
, it
.sptep
);
717 ++vcpu
->stat
.pf_fixed
;
725 * To see whether the mapped gfn can write its page table in the current
728 * It is the helper function of FNAME(page_fault). When guest uses large page
729 * size to map the writable gfn which is used as current page table, we should
730 * force kvm to use small page size to map it because new shadow page will be
731 * created when kvm establishes shadow page table that stop kvm using large
732 * page size. Do it early can avoid unnecessary #PF and emulation.
734 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
735 * currently used as its page table.
737 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
738 * since the PDPT is always shadowed, that means, we can not use large page
739 * size to map the gfn which is used as PDPT.
742 FNAME(is_self_change_mapping
)(struct kvm_vcpu
*vcpu
,
743 struct guest_walker
*walker
, int user_fault
,
744 bool *write_fault_to_shadow_pgtable
)
747 gfn_t mask
= ~(KVM_PAGES_PER_HPAGE(walker
->level
) - 1);
748 bool self_changed
= false;
750 if (!(walker
->pte_access
& ACC_WRITE_MASK
||
751 (!is_write_protection(vcpu
) && !user_fault
)))
754 for (level
= walker
->level
; level
<= walker
->max_level
; level
++) {
755 gfn_t gfn
= walker
->gfn
^ walker
->table_gfn
[level
- 1];
757 self_changed
|= !(gfn
& mask
);
758 *write_fault_to_shadow_pgtable
|= !gfn
;
765 * Page fault handler. There are several causes for a page fault:
766 * - there is no shadow pte for the guest pte
767 * - write access through a shadow pte marked read only so that we can set
769 * - write access to a shadow pte marked read only so we can update the page
770 * dirty bitmap, when userspace requests it
771 * - mmio access; in this case we will never install a present shadow pte
772 * - normal guest page fault due to the guest pte marked not present, not
773 * writable, or not executable
775 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
776 * a negative value on error.
778 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gpa_t addr
, u32 error_code
,
781 int write_fault
= error_code
& PFERR_WRITE_MASK
;
782 int user_fault
= error_code
& PFERR_USER_MASK
;
783 struct guest_walker walker
;
786 unsigned long mmu_seq
;
787 bool map_writable
, is_self_change_mapping
;
788 bool lpage_disallowed
= (error_code
& PFERR_FETCH_MASK
) &&
789 is_nx_huge_page_enabled();
792 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
794 r
= mmu_topup_memory_caches(vcpu
);
799 * If PFEC.RSVD is set, this is a shadow page fault.
800 * The bit needs to be cleared before walking guest page tables.
802 error_code
&= ~PFERR_RSVD_MASK
;
805 * Look up the guest pte for the faulting address.
807 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, error_code
);
810 * The page is not mapped by the guest. Let the guest handle it.
813 pgprintk("%s: guest page fault\n", __func__
);
815 inject_page_fault(vcpu
, &walker
.fault
);
820 if (page_fault_handle_page_track(vcpu
, error_code
, walker
.gfn
)) {
821 shadow_page_table_clear_flood(vcpu
, addr
);
822 return RET_PF_EMULATE
;
825 vcpu
->arch
.write_fault_to_shadow_pgtable
= false;
827 is_self_change_mapping
= FNAME(is_self_change_mapping
)(vcpu
,
828 &walker
, user_fault
, &vcpu
->arch
.write_fault_to_shadow_pgtable
);
830 if (lpage_disallowed
|| is_self_change_mapping
)
831 max_level
= PT_PAGE_TABLE_LEVEL
;
833 max_level
= walker
.level
;
835 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
838 if (try_async_pf(vcpu
, prefault
, walker
.gfn
, addr
, &pfn
, write_fault
,
842 if (handle_abnormal_pfn(vcpu
, addr
, walker
.gfn
, pfn
, walker
.pte_access
, &r
))
846 * Do not change pte_access if the pfn is a mmio page, otherwise
847 * we will cache the incorrect access into mmio spte.
849 if (write_fault
&& !(walker
.pte_access
& ACC_WRITE_MASK
) &&
850 !is_write_protection(vcpu
) && !user_fault
&&
851 !is_noslot_pfn(pfn
)) {
852 walker
.pte_access
|= ACC_WRITE_MASK
;
853 walker
.pte_access
&= ~ACC_USER_MASK
;
856 * If we converted a user page to a kernel page,
857 * so that the kernel can write to it when cr0.wp=0,
858 * then we should prevent the kernel from executing it
859 * if SMEP is enabled.
861 if (kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
))
862 walker
.pte_access
&= ~ACC_EXEC_MASK
;
866 spin_lock(&vcpu
->kvm
->mmu_lock
);
867 if (mmu_notifier_retry(vcpu
->kvm
, mmu_seq
))
870 kvm_mmu_audit(vcpu
, AUDIT_PRE_PAGE_FAULT
);
871 if (make_mmu_pages_available(vcpu
) < 0)
873 r
= FNAME(fetch
)(vcpu
, addr
, &walker
, write_fault
, max_level
, pfn
,
874 map_writable
, prefault
, lpage_disallowed
);
875 kvm_mmu_audit(vcpu
, AUDIT_POST_PAGE_FAULT
);
878 spin_unlock(&vcpu
->kvm
->mmu_lock
);
879 kvm_release_pfn_clean(pfn
);
883 static gpa_t
FNAME(get_level1_sp_gpa
)(struct kvm_mmu_page
*sp
)
887 WARN_ON(sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
890 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
892 return gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
895 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
, hpa_t root_hpa
)
897 struct kvm_shadow_walk_iterator iterator
;
898 struct kvm_mmu_page
*sp
;
902 vcpu_clear_mmio_info(vcpu
, gva
);
905 * No need to check return value here, rmap_can_add() can
906 * help us to skip pte prefetch later.
908 mmu_topup_memory_caches(vcpu
);
910 if (!VALID_PAGE(root_hpa
)) {
915 spin_lock(&vcpu
->kvm
->mmu_lock
);
916 for_each_shadow_entry_using_root(vcpu
, root_hpa
, gva
, iterator
) {
917 level
= iterator
.level
;
918 sptep
= iterator
.sptep
;
920 sp
= page_header(__pa(sptep
));
921 if (is_last_spte(*sptep
, level
)) {
928 pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
929 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
931 if (mmu_page_zap_pte(vcpu
->kvm
, sp
, sptep
))
932 kvm_flush_remote_tlbs_with_address(vcpu
->kvm
,
933 sp
->gfn
, KVM_PAGES_PER_HPAGE(sp
->role
.level
));
935 if (!rmap_can_add(vcpu
))
938 if (kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
, &gpte
,
939 sizeof(pt_element_t
)))
942 FNAME(update_pte
)(vcpu
, sp
, sptep
, &gpte
);
945 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
948 spin_unlock(&vcpu
->kvm
->mmu_lock
);
951 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
952 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gpa_t addr
, u32 access
,
953 struct x86_exception
*exception
)
955 struct guest_walker walker
;
956 gpa_t gpa
= UNMAPPED_GVA
;
959 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, access
);
962 gpa
= gfn_to_gpa(walker
.gfn
);
963 gpa
|= addr
& ~PAGE_MASK
;
964 } else if (exception
)
965 *exception
= walker
.fault
;
970 #if PTTYPE != PTTYPE_EPT
971 /* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
972 static gpa_t
FNAME(gva_to_gpa_nested
)(struct kvm_vcpu
*vcpu
, gpa_t vaddr
,
974 struct x86_exception
*exception
)
976 struct guest_walker walker
;
977 gpa_t gpa
= UNMAPPED_GVA
;
980 #ifndef CONFIG_X86_64
981 /* A 64-bit GVA should be impossible on 32-bit KVM. */
982 WARN_ON_ONCE(vaddr
>> 32);
985 r
= FNAME(walk_addr_nested
)(&walker
, vcpu
, vaddr
, access
);
988 gpa
= gfn_to_gpa(walker
.gfn
);
989 gpa
|= vaddr
& ~PAGE_MASK
;
990 } else if (exception
)
991 *exception
= walker
.fault
;
998 * Using the cached information from sp->gfns is safe because:
999 * - The spte has a reference to the struct page, so the pfn for a given gfn
1000 * can't change unless all sptes pointing to it are nuked first.
1003 * We should flush all tlbs if spte is dropped even though guest is
1004 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
1005 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
1006 * used by guest then tlbs are not flushed, so guest is allowed to access the
1008 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
1010 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
1012 int i
, nr_present
= 0;
1014 gpa_t first_pte_gpa
;
1015 int set_spte_ret
= 0;
1017 /* direct kvm_mmu_page can not be unsync. */
1018 BUG_ON(sp
->role
.direct
);
1020 first_pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
1022 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
1023 unsigned pte_access
;
1031 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
1033 if (kvm_vcpu_read_guest_atomic(vcpu
, pte_gpa
, &gpte
,
1034 sizeof(pt_element_t
)))
1037 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, &sp
->spt
[i
], gpte
)) {
1039 * Update spte before increasing tlbs_dirty to make
1040 * sure no tlb flush is lost after spte is zapped; see
1041 * the comments in kvm_flush_remote_tlbs().
1044 vcpu
->kvm
->tlbs_dirty
++;
1048 gfn
= gpte_to_gfn(gpte
);
1049 pte_access
= sp
->role
.access
;
1050 pte_access
&= FNAME(gpte_access
)(gpte
);
1051 FNAME(protect_clean_gpte
)(vcpu
->arch
.mmu
, &pte_access
, gpte
);
1053 if (sync_mmio_spte(vcpu
, &sp
->spt
[i
], gfn
, pte_access
,
1057 if (gfn
!= sp
->gfns
[i
]) {
1058 drop_spte(vcpu
->kvm
, &sp
->spt
[i
]);
1060 * The same as above where we are doing
1061 * prefetch_invalid_gpte().
1064 vcpu
->kvm
->tlbs_dirty
++;
1070 host_writable
= sp
->spt
[i
] & SPTE_HOST_WRITEABLE
;
1072 set_spte_ret
|= set_spte(vcpu
, &sp
->spt
[i
],
1073 pte_access
, PT_PAGE_TABLE_LEVEL
,
1074 gfn
, spte_to_pfn(sp
->spt
[i
]),
1075 true, false, host_writable
);
1078 if (set_spte_ret
& SET_SPTE_NEED_REMOTE_TLB_FLUSH
)
1079 kvm_flush_remote_tlbs(vcpu
->kvm
);
1087 #undef PT_BASE_ADDR_MASK
1089 #undef PT_LVL_ADDR_MASK
1090 #undef PT_LVL_OFFSET_MASK
1091 #undef PT_LEVEL_BITS
1092 #undef PT_MAX_FULL_LEVELS
1094 #undef gpte_to_gfn_lvl
1096 #undef PT_GUEST_ACCESSED_MASK
1097 #undef PT_GUEST_DIRTY_MASK
1098 #undef PT_GUEST_DIRTY_SHIFT
1099 #undef PT_GUEST_ACCESSED_SHIFT
1100 #undef PT_HAVE_ACCESSED_DIRTY