2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
24 #include "kvm_cache_regs.h"
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/swap.h>
34 #include <linux/hugetlb.h>
35 #include <linux/compiler.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
41 #include <asm/cmpxchg.h>
46 * When setting this variable to true it enables Two-Dimensional-Paging
47 * where the hardware walks 2 page tables:
48 * 1. the guest-virtual to guest-physical
49 * 2. while doing 1. it walks guest-physical to host-physical
50 * If the hardware supports that we don't need to do shadow paging.
52 bool tdp_enabled
= false;
56 AUDIT_POST_PAGE_FAULT
,
63 char *audit_point_name
[] = {
76 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
77 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
81 #define pgprintk(x...) do { } while (0)
82 #define rmap_printk(x...) do { } while (0)
88 module_param(dbg
, bool, 0644);
91 static int oos_shadow
= 1;
92 module_param(oos_shadow
, bool, 0644);
95 #define ASSERT(x) do { } while (0)
99 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
100 __FILE__, __LINE__, #x); \
104 #define PTE_PREFETCH_NUM 8
106 #define PT_FIRST_AVAIL_BITS_SHIFT 9
107 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
109 #define PT64_LEVEL_BITS 9
111 #define PT64_LEVEL_SHIFT(level) \
112 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118 #define PT32_LEVEL_BITS 10
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 #define PT32_LVL_OFFSET_MASK(level) \
124 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
125 * PT32_LEVEL_BITS))) - 1))
127 #define PT32_INDEX(address, level)\
128 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
131 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
132 #define PT64_DIR_BASE_ADDR_MASK \
133 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT64_LVL_ADDR_MASK(level) \
135 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
136 * PT64_LEVEL_BITS))) - 1))
137 #define PT64_LVL_OFFSET_MASK(level) \
138 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
139 * PT64_LEVEL_BITS))) - 1))
141 #define PT32_BASE_ADDR_MASK PAGE_MASK
142 #define PT32_DIR_BASE_ADDR_MASK \
143 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
144 #define PT32_LVL_ADDR_MASK(level) \
145 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
146 * PT32_LEVEL_BITS))) - 1))
148 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
153 #define ACC_EXEC_MASK 1
154 #define ACC_WRITE_MASK PT_WRITABLE_MASK
155 #define ACC_USER_MASK PT_USER_MASK
156 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
158 #include <trace/events/kvm.h>
160 #define CREATE_TRACE_POINTS
161 #include "mmutrace.h"
163 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
165 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
167 struct kvm_rmap_desc
{
168 u64
*sptes
[RMAP_EXT
];
169 struct kvm_rmap_desc
*more
;
172 struct kvm_shadow_walk_iterator
{
180 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
181 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
182 shadow_walk_okay(&(_walker)); \
183 shadow_walk_next(&(_walker)))
185 typedef void (*mmu_parent_walk_fn
) (struct kvm_mmu_page
*sp
, u64
*spte
);
187 static struct kmem_cache
*pte_chain_cache
;
188 static struct kmem_cache
*rmap_desc_cache
;
189 static struct kmem_cache
*mmu_page_header_cache
;
190 static struct percpu_counter kvm_total_used_mmu_pages
;
192 static u64 __read_mostly shadow_trap_nonpresent_pte
;
193 static u64 __read_mostly shadow_notrap_nonpresent_pte
;
194 static u64 __read_mostly shadow_nx_mask
;
195 static u64 __read_mostly shadow_x_mask
; /* mutual exclusive with nx_mask */
196 static u64 __read_mostly shadow_user_mask
;
197 static u64 __read_mostly shadow_accessed_mask
;
198 static u64 __read_mostly shadow_dirty_mask
;
200 static inline u64
rsvd_bits(int s
, int e
)
202 return ((1ULL << (e
- s
+ 1)) - 1) << s
;
205 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte
, u64 notrap_pte
)
207 shadow_trap_nonpresent_pte
= trap_pte
;
208 shadow_notrap_nonpresent_pte
= notrap_pte
;
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes
);
212 void kvm_mmu_set_mask_ptes(u64 user_mask
, u64 accessed_mask
,
213 u64 dirty_mask
, u64 nx_mask
, u64 x_mask
)
215 shadow_user_mask
= user_mask
;
216 shadow_accessed_mask
= accessed_mask
;
217 shadow_dirty_mask
= dirty_mask
;
218 shadow_nx_mask
= nx_mask
;
219 shadow_x_mask
= x_mask
;
221 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes
);
223 static bool is_write_protection(struct kvm_vcpu
*vcpu
)
225 return kvm_read_cr0_bits(vcpu
, X86_CR0_WP
);
228 static int is_cpuid_PSE36(void)
233 static int is_nx(struct kvm_vcpu
*vcpu
)
235 return vcpu
->arch
.efer
& EFER_NX
;
238 static int is_shadow_present_pte(u64 pte
)
240 return pte
!= shadow_trap_nonpresent_pte
241 && pte
!= shadow_notrap_nonpresent_pte
;
244 static int is_large_pte(u64 pte
)
246 return pte
& PT_PAGE_SIZE_MASK
;
249 static int is_writable_pte(unsigned long pte
)
251 return pte
& PT_WRITABLE_MASK
;
254 static int is_dirty_gpte(unsigned long pte
)
256 return pte
& PT_DIRTY_MASK
;
259 static int is_rmap_spte(u64 pte
)
261 return is_shadow_present_pte(pte
);
264 static int is_last_spte(u64 pte
, int level
)
266 if (level
== PT_PAGE_TABLE_LEVEL
)
268 if (is_large_pte(pte
))
273 static pfn_t
spte_to_pfn(u64 pte
)
275 return (pte
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
278 static gfn_t
pse36_gfn_delta(u32 gpte
)
280 int shift
= 32 - PT32_DIR_PSE36_SHIFT
- PAGE_SHIFT
;
282 return (gpte
& PT32_DIR_PSE36_MASK
) << shift
;
285 static void __set_spte(u64
*sptep
, u64 spte
)
287 set_64bit(sptep
, spte
);
290 static u64
__xchg_spte(u64
*sptep
, u64 new_spte
)
293 return xchg(sptep
, new_spte
);
299 } while (cmpxchg64(sptep
, old_spte
, new_spte
) != old_spte
);
305 static bool spte_has_volatile_bits(u64 spte
)
307 if (!shadow_accessed_mask
)
310 if (!is_shadow_present_pte(spte
))
313 if ((spte
& shadow_accessed_mask
) &&
314 (!is_writable_pte(spte
) || (spte
& shadow_dirty_mask
)))
320 static bool spte_is_bit_cleared(u64 old_spte
, u64 new_spte
, u64 bit_mask
)
322 return (old_spte
& bit_mask
) && !(new_spte
& bit_mask
);
325 static void update_spte(u64
*sptep
, u64 new_spte
)
327 u64 mask
, old_spte
= *sptep
;
329 WARN_ON(!is_rmap_spte(new_spte
));
331 new_spte
|= old_spte
& shadow_dirty_mask
;
333 mask
= shadow_accessed_mask
;
334 if (is_writable_pte(old_spte
))
335 mask
|= shadow_dirty_mask
;
337 if (!spte_has_volatile_bits(old_spte
) || (new_spte
& mask
) == mask
)
338 __set_spte(sptep
, new_spte
);
340 old_spte
= __xchg_spte(sptep
, new_spte
);
342 if (!shadow_accessed_mask
)
345 if (spte_is_bit_cleared(old_spte
, new_spte
, shadow_accessed_mask
))
346 kvm_set_pfn_accessed(spte_to_pfn(old_spte
));
347 if (spte_is_bit_cleared(old_spte
, new_spte
, shadow_dirty_mask
))
348 kvm_set_pfn_dirty(spte_to_pfn(old_spte
));
351 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
352 struct kmem_cache
*base_cache
, int min
)
356 if (cache
->nobjs
>= min
)
358 while (cache
->nobjs
< ARRAY_SIZE(cache
->objects
)) {
359 obj
= kmem_cache_zalloc(base_cache
, GFP_KERNEL
);
362 cache
->objects
[cache
->nobjs
++] = obj
;
367 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
,
368 struct kmem_cache
*cache
)
371 kmem_cache_free(cache
, mc
->objects
[--mc
->nobjs
]);
374 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache
*cache
,
379 if (cache
->nobjs
>= min
)
381 while (cache
->nobjs
< ARRAY_SIZE(cache
->objects
)) {
382 page
= (void *)__get_free_page(GFP_KERNEL
);
385 cache
->objects
[cache
->nobjs
++] = page
;
390 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache
*mc
)
393 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
396 static int mmu_topup_memory_caches(struct kvm_vcpu
*vcpu
)
400 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_pte_chain_cache
,
404 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_rmap_desc_cache
,
405 rmap_desc_cache
, 4 + PTE_PREFETCH_NUM
);
408 r
= mmu_topup_memory_cache_page(&vcpu
->arch
.mmu_page_cache
, 8);
411 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_page_header_cache
,
412 mmu_page_header_cache
, 4);
417 static void mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
419 mmu_free_memory_cache(&vcpu
->arch
.mmu_pte_chain_cache
, pte_chain_cache
);
420 mmu_free_memory_cache(&vcpu
->arch
.mmu_rmap_desc_cache
, rmap_desc_cache
);
421 mmu_free_memory_cache_page(&vcpu
->arch
.mmu_page_cache
);
422 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_header_cache
,
423 mmu_page_header_cache
);
426 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
,
432 p
= mc
->objects
[--mc
->nobjs
];
436 static struct kvm_pte_chain
*mmu_alloc_pte_chain(struct kvm_vcpu
*vcpu
)
438 return mmu_memory_cache_alloc(&vcpu
->arch
.mmu_pte_chain_cache
,
439 sizeof(struct kvm_pte_chain
));
442 static void mmu_free_pte_chain(struct kvm_pte_chain
*pc
)
444 kmem_cache_free(pte_chain_cache
, pc
);
447 static struct kvm_rmap_desc
*mmu_alloc_rmap_desc(struct kvm_vcpu
*vcpu
)
449 return mmu_memory_cache_alloc(&vcpu
->arch
.mmu_rmap_desc_cache
,
450 sizeof(struct kvm_rmap_desc
));
453 static void mmu_free_rmap_desc(struct kvm_rmap_desc
*rd
)
455 kmem_cache_free(rmap_desc_cache
, rd
);
458 static gfn_t
kvm_mmu_page_get_gfn(struct kvm_mmu_page
*sp
, int index
)
460 if (!sp
->role
.direct
)
461 return sp
->gfns
[index
];
463 return sp
->gfn
+ (index
<< ((sp
->role
.level
- 1) * PT64_LEVEL_BITS
));
466 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page
*sp
, int index
, gfn_t gfn
)
469 BUG_ON(gfn
!= kvm_mmu_page_get_gfn(sp
, index
));
471 sp
->gfns
[index
] = gfn
;
475 * Return the pointer to the large page information for a given gfn,
476 * handling slots that are not large page aligned.
478 static struct kvm_lpage_info
*lpage_info_slot(gfn_t gfn
,
479 struct kvm_memory_slot
*slot
,
484 idx
= (gfn
>> KVM_HPAGE_GFN_SHIFT(level
)) -
485 (slot
->base_gfn
>> KVM_HPAGE_GFN_SHIFT(level
));
486 return &slot
->lpage_info
[level
- 2][idx
];
489 static void account_shadowed(struct kvm
*kvm
, gfn_t gfn
)
491 struct kvm_memory_slot
*slot
;
492 struct kvm_lpage_info
*linfo
;
495 slot
= gfn_to_memslot(kvm
, gfn
);
496 for (i
= PT_DIRECTORY_LEVEL
;
497 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
498 linfo
= lpage_info_slot(gfn
, slot
, i
);
499 linfo
->write_count
+= 1;
503 static void unaccount_shadowed(struct kvm
*kvm
, gfn_t gfn
)
505 struct kvm_memory_slot
*slot
;
506 struct kvm_lpage_info
*linfo
;
509 slot
= gfn_to_memslot(kvm
, gfn
);
510 for (i
= PT_DIRECTORY_LEVEL
;
511 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
512 linfo
= lpage_info_slot(gfn
, slot
, i
);
513 linfo
->write_count
-= 1;
514 WARN_ON(linfo
->write_count
< 0);
518 static int has_wrprotected_page(struct kvm
*kvm
,
522 struct kvm_memory_slot
*slot
;
523 struct kvm_lpage_info
*linfo
;
525 slot
= gfn_to_memslot(kvm
, gfn
);
527 linfo
= lpage_info_slot(gfn
, slot
, level
);
528 return linfo
->write_count
;
534 static int host_mapping_level(struct kvm
*kvm
, gfn_t gfn
)
536 unsigned long page_size
;
539 page_size
= kvm_host_page_size(kvm
, gfn
);
541 for (i
= PT_PAGE_TABLE_LEVEL
;
542 i
< (PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
); ++i
) {
543 if (page_size
>= KVM_HPAGE_SIZE(i
))
552 static struct kvm_memory_slot
*
553 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
556 struct kvm_memory_slot
*slot
;
558 slot
= gfn_to_memslot(vcpu
->kvm
, gfn
);
559 if (!slot
|| slot
->flags
& KVM_MEMSLOT_INVALID
||
560 (no_dirty_log
&& slot
->dirty_bitmap
))
566 static bool mapping_level_dirty_bitmap(struct kvm_vcpu
*vcpu
, gfn_t large_gfn
)
568 return gfn_to_memslot_dirty_bitmap(vcpu
, large_gfn
, true);
571 static int mapping_level(struct kvm_vcpu
*vcpu
, gfn_t large_gfn
)
573 int host_level
, level
, max_level
;
575 host_level
= host_mapping_level(vcpu
->kvm
, large_gfn
);
577 if (host_level
== PT_PAGE_TABLE_LEVEL
)
580 max_level
= kvm_x86_ops
->get_lpage_level() < host_level
?
581 kvm_x86_ops
->get_lpage_level() : host_level
;
583 for (level
= PT_DIRECTORY_LEVEL
; level
<= max_level
; ++level
)
584 if (has_wrprotected_page(vcpu
->kvm
, large_gfn
, level
))
591 * Take gfn and return the reverse mapping to it.
594 static unsigned long *gfn_to_rmap(struct kvm
*kvm
, gfn_t gfn
, int level
)
596 struct kvm_memory_slot
*slot
;
597 struct kvm_lpage_info
*linfo
;
599 slot
= gfn_to_memslot(kvm
, gfn
);
600 if (likely(level
== PT_PAGE_TABLE_LEVEL
))
601 return &slot
->rmap
[gfn
- slot
->base_gfn
];
603 linfo
= lpage_info_slot(gfn
, slot
, level
);
605 return &linfo
->rmap_pde
;
609 * Reverse mapping data structures:
611 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
612 * that points to page_address(page).
614 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
615 * containing more mappings.
617 * Returns the number of rmap entries before the spte was added or zero if
618 * the spte was not added.
621 static int rmap_add(struct kvm_vcpu
*vcpu
, u64
*spte
, gfn_t gfn
)
623 struct kvm_mmu_page
*sp
;
624 struct kvm_rmap_desc
*desc
;
625 unsigned long *rmapp
;
628 if (!is_rmap_spte(*spte
))
630 sp
= page_header(__pa(spte
));
631 kvm_mmu_page_set_gfn(sp
, spte
- sp
->spt
, gfn
);
632 rmapp
= gfn_to_rmap(vcpu
->kvm
, gfn
, sp
->role
.level
);
634 rmap_printk("rmap_add: %p %llx 0->1\n", spte
, *spte
);
635 *rmapp
= (unsigned long)spte
;
636 } else if (!(*rmapp
& 1)) {
637 rmap_printk("rmap_add: %p %llx 1->many\n", spte
, *spte
);
638 desc
= mmu_alloc_rmap_desc(vcpu
);
639 desc
->sptes
[0] = (u64
*)*rmapp
;
640 desc
->sptes
[1] = spte
;
641 *rmapp
= (unsigned long)desc
| 1;
644 rmap_printk("rmap_add: %p %llx many->many\n", spte
, *spte
);
645 desc
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
646 while (desc
->sptes
[RMAP_EXT
-1] && desc
->more
) {
650 if (desc
->sptes
[RMAP_EXT
-1]) {
651 desc
->more
= mmu_alloc_rmap_desc(vcpu
);
654 for (i
= 0; desc
->sptes
[i
]; ++i
)
656 desc
->sptes
[i
] = spte
;
661 static void rmap_desc_remove_entry(unsigned long *rmapp
,
662 struct kvm_rmap_desc
*desc
,
664 struct kvm_rmap_desc
*prev_desc
)
668 for (j
= RMAP_EXT
- 1; !desc
->sptes
[j
] && j
> i
; --j
)
670 desc
->sptes
[i
] = desc
->sptes
[j
];
671 desc
->sptes
[j
] = NULL
;
674 if (!prev_desc
&& !desc
->more
)
675 *rmapp
= (unsigned long)desc
->sptes
[0];
678 prev_desc
->more
= desc
->more
;
680 *rmapp
= (unsigned long)desc
->more
| 1;
681 mmu_free_rmap_desc(desc
);
684 static void rmap_remove(struct kvm
*kvm
, u64
*spte
)
686 struct kvm_rmap_desc
*desc
;
687 struct kvm_rmap_desc
*prev_desc
;
688 struct kvm_mmu_page
*sp
;
690 unsigned long *rmapp
;
693 sp
= page_header(__pa(spte
));
694 gfn
= kvm_mmu_page_get_gfn(sp
, spte
- sp
->spt
);
695 rmapp
= gfn_to_rmap(kvm
, gfn
, sp
->role
.level
);
697 printk(KERN_ERR
"rmap_remove: %p 0->BUG\n", spte
);
699 } else if (!(*rmapp
& 1)) {
700 rmap_printk("rmap_remove: %p 1->0\n", spte
);
701 if ((u64
*)*rmapp
!= spte
) {
702 printk(KERN_ERR
"rmap_remove: %p 1->BUG\n", spte
);
707 rmap_printk("rmap_remove: %p many->many\n", spte
);
708 desc
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
711 for (i
= 0; i
< RMAP_EXT
&& desc
->sptes
[i
]; ++i
)
712 if (desc
->sptes
[i
] == spte
) {
713 rmap_desc_remove_entry(rmapp
,
721 pr_err("rmap_remove: %p many->many\n", spte
);
726 static int set_spte_track_bits(u64
*sptep
, u64 new_spte
)
729 u64 old_spte
= *sptep
;
731 if (!spte_has_volatile_bits(old_spte
))
732 __set_spte(sptep
, new_spte
);
734 old_spte
= __xchg_spte(sptep
, new_spte
);
736 if (!is_rmap_spte(old_spte
))
739 pfn
= spte_to_pfn(old_spte
);
740 if (!shadow_accessed_mask
|| old_spte
& shadow_accessed_mask
)
741 kvm_set_pfn_accessed(pfn
);
742 if (!shadow_dirty_mask
|| (old_spte
& shadow_dirty_mask
))
743 kvm_set_pfn_dirty(pfn
);
747 static void drop_spte(struct kvm
*kvm
, u64
*sptep
, u64 new_spte
)
749 if (set_spte_track_bits(sptep
, new_spte
))
750 rmap_remove(kvm
, sptep
);
753 static u64
*rmap_next(struct kvm
*kvm
, unsigned long *rmapp
, u64
*spte
)
755 struct kvm_rmap_desc
*desc
;
761 else if (!(*rmapp
& 1)) {
763 return (u64
*)*rmapp
;
766 desc
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
769 for (i
= 0; i
< RMAP_EXT
&& desc
->sptes
[i
]; ++i
) {
770 if (prev_spte
== spte
)
771 return desc
->sptes
[i
];
772 prev_spte
= desc
->sptes
[i
];
779 static int rmap_write_protect(struct kvm
*kvm
, u64 gfn
)
781 unsigned long *rmapp
;
783 int i
, write_protected
= 0;
785 rmapp
= gfn_to_rmap(kvm
, gfn
, PT_PAGE_TABLE_LEVEL
);
787 spte
= rmap_next(kvm
, rmapp
, NULL
);
790 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
791 rmap_printk("rmap_write_protect: spte %p %llx\n", spte
, *spte
);
792 if (is_writable_pte(*spte
)) {
793 update_spte(spte
, *spte
& ~PT_WRITABLE_MASK
);
796 spte
= rmap_next(kvm
, rmapp
, spte
);
799 /* check for huge page mappings */
800 for (i
= PT_DIRECTORY_LEVEL
;
801 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
802 rmapp
= gfn_to_rmap(kvm
, gfn
, i
);
803 spte
= rmap_next(kvm
, rmapp
, NULL
);
806 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
807 BUG_ON((*spte
& (PT_PAGE_SIZE_MASK
|PT_PRESENT_MASK
)) != (PT_PAGE_SIZE_MASK
|PT_PRESENT_MASK
));
808 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte
, *spte
, gfn
);
809 if (is_writable_pte(*spte
)) {
811 shadow_trap_nonpresent_pte
);
816 spte
= rmap_next(kvm
, rmapp
, spte
);
820 return write_protected
;
823 static int kvm_unmap_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
827 int need_tlb_flush
= 0;
829 while ((spte
= rmap_next(kvm
, rmapp
, NULL
))) {
830 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
831 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte
, *spte
);
832 drop_spte(kvm
, spte
, shadow_trap_nonpresent_pte
);
835 return need_tlb_flush
;
838 static int kvm_set_pte_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
843 pte_t
*ptep
= (pte_t
*)data
;
846 WARN_ON(pte_huge(*ptep
));
847 new_pfn
= pte_pfn(*ptep
);
848 spte
= rmap_next(kvm
, rmapp
, NULL
);
850 BUG_ON(!is_shadow_present_pte(*spte
));
851 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte
, *spte
);
853 if (pte_write(*ptep
)) {
854 drop_spte(kvm
, spte
, shadow_trap_nonpresent_pte
);
855 spte
= rmap_next(kvm
, rmapp
, NULL
);
857 new_spte
= *spte
&~ (PT64_BASE_ADDR_MASK
);
858 new_spte
|= (u64
)new_pfn
<< PAGE_SHIFT
;
860 new_spte
&= ~PT_WRITABLE_MASK
;
861 new_spte
&= ~SPTE_HOST_WRITEABLE
;
862 new_spte
&= ~shadow_accessed_mask
;
863 set_spte_track_bits(spte
, new_spte
);
864 spte
= rmap_next(kvm
, rmapp
, spte
);
868 kvm_flush_remote_tlbs(kvm
);
873 static int kvm_handle_hva(struct kvm
*kvm
, unsigned long hva
,
875 int (*handler
)(struct kvm
*kvm
, unsigned long *rmapp
,
881 struct kvm_memslots
*slots
;
883 slots
= kvm_memslots(kvm
);
885 for (i
= 0; i
< slots
->nmemslots
; i
++) {
886 struct kvm_memory_slot
*memslot
= &slots
->memslots
[i
];
887 unsigned long start
= memslot
->userspace_addr
;
890 end
= start
+ (memslot
->npages
<< PAGE_SHIFT
);
891 if (hva
>= start
&& hva
< end
) {
892 gfn_t gfn_offset
= (hva
- start
) >> PAGE_SHIFT
;
893 gfn_t gfn
= memslot
->base_gfn
+ gfn_offset
;
895 ret
= handler(kvm
, &memslot
->rmap
[gfn_offset
], data
);
897 for (j
= 0; j
< KVM_NR_PAGE_SIZES
- 1; ++j
) {
898 struct kvm_lpage_info
*linfo
;
900 linfo
= lpage_info_slot(gfn
, memslot
,
901 PT_DIRECTORY_LEVEL
+ j
);
902 ret
|= handler(kvm
, &linfo
->rmap_pde
, data
);
904 trace_kvm_age_page(hva
, memslot
, ret
);
912 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
914 return kvm_handle_hva(kvm
, hva
, 0, kvm_unmap_rmapp
);
917 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
919 kvm_handle_hva(kvm
, hva
, (unsigned long)&pte
, kvm_set_pte_rmapp
);
922 static int kvm_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
929 * Emulate the accessed bit for EPT, by checking if this page has
930 * an EPT mapping, and clearing it if it does. On the next access,
931 * a new EPT mapping will be established.
932 * This has some overhead, but not as much as the cost of swapping
933 * out actively used pages or breaking up actively used hugepages.
935 if (!shadow_accessed_mask
)
936 return kvm_unmap_rmapp(kvm
, rmapp
, data
);
938 spte
= rmap_next(kvm
, rmapp
, NULL
);
942 BUG_ON(!(_spte
& PT_PRESENT_MASK
));
943 _young
= _spte
& PT_ACCESSED_MASK
;
946 clear_bit(PT_ACCESSED_SHIFT
, (unsigned long *)spte
);
948 spte
= rmap_next(kvm
, rmapp
, spte
);
953 static int kvm_test_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
960 * If there's no access bit in the secondary pte set by the
961 * hardware it's up to gup-fast/gup to set the access bit in
962 * the primary pte or in the page structure.
964 if (!shadow_accessed_mask
)
967 spte
= rmap_next(kvm
, rmapp
, NULL
);
970 BUG_ON(!(_spte
& PT_PRESENT_MASK
));
971 young
= _spte
& PT_ACCESSED_MASK
;
976 spte
= rmap_next(kvm
, rmapp
, spte
);
982 #define RMAP_RECYCLE_THRESHOLD 1000
984 static void rmap_recycle(struct kvm_vcpu
*vcpu
, u64
*spte
, gfn_t gfn
)
986 unsigned long *rmapp
;
987 struct kvm_mmu_page
*sp
;
989 sp
= page_header(__pa(spte
));
991 rmapp
= gfn_to_rmap(vcpu
->kvm
, gfn
, sp
->role
.level
);
993 kvm_unmap_rmapp(vcpu
->kvm
, rmapp
, 0);
994 kvm_flush_remote_tlbs(vcpu
->kvm
);
997 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
999 return kvm_handle_hva(kvm
, hva
, 0, kvm_age_rmapp
);
1002 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
1004 return kvm_handle_hva(kvm
, hva
, 0, kvm_test_age_rmapp
);
1008 static int is_empty_shadow_page(u64
*spt
)
1013 for (pos
= spt
, end
= pos
+ PAGE_SIZE
/ sizeof(u64
); pos
!= end
; pos
++)
1014 if (is_shadow_present_pte(*pos
)) {
1015 printk(KERN_ERR
"%s: %p %llx\n", __func__
,
1024 * This value is the sum of all of the kvm instances's
1025 * kvm->arch.n_used_mmu_pages values. We need a global,
1026 * aggregate version in order to make the slab shrinker
1029 static inline void kvm_mod_used_mmu_pages(struct kvm
*kvm
, int nr
)
1031 kvm
->arch
.n_used_mmu_pages
+= nr
;
1032 percpu_counter_add(&kvm_total_used_mmu_pages
, nr
);
1035 static void kvm_mmu_free_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1037 ASSERT(is_empty_shadow_page(sp
->spt
));
1038 hlist_del(&sp
->hash_link
);
1039 list_del(&sp
->link
);
1040 free_page((unsigned long)sp
->spt
);
1041 if (!sp
->role
.direct
)
1042 free_page((unsigned long)sp
->gfns
);
1043 kmem_cache_free(mmu_page_header_cache
, sp
);
1044 kvm_mod_used_mmu_pages(kvm
, -1);
1047 static unsigned kvm_page_table_hashfn(gfn_t gfn
)
1049 return gfn
& ((1 << KVM_MMU_HASH_SHIFT
) - 1);
1052 static struct kvm_mmu_page
*kvm_mmu_alloc_page(struct kvm_vcpu
*vcpu
,
1053 u64
*parent_pte
, int direct
)
1055 struct kvm_mmu_page
*sp
;
1057 sp
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_header_cache
, sizeof *sp
);
1058 sp
->spt
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_cache
, PAGE_SIZE
);
1060 sp
->gfns
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_cache
,
1062 set_page_private(virt_to_page(sp
->spt
), (unsigned long)sp
);
1063 list_add(&sp
->link
, &vcpu
->kvm
->arch
.active_mmu_pages
);
1064 bitmap_zero(sp
->slot_bitmap
, KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
);
1065 sp
->multimapped
= 0;
1066 sp
->parent_pte
= parent_pte
;
1067 kvm_mod_used_mmu_pages(vcpu
->kvm
, +1);
1071 static void mmu_page_add_parent_pte(struct kvm_vcpu
*vcpu
,
1072 struct kvm_mmu_page
*sp
, u64
*parent_pte
)
1074 struct kvm_pte_chain
*pte_chain
;
1075 struct hlist_node
*node
;
1080 if (!sp
->multimapped
) {
1081 u64
*old
= sp
->parent_pte
;
1084 sp
->parent_pte
= parent_pte
;
1087 sp
->multimapped
= 1;
1088 pte_chain
= mmu_alloc_pte_chain(vcpu
);
1089 INIT_HLIST_HEAD(&sp
->parent_ptes
);
1090 hlist_add_head(&pte_chain
->link
, &sp
->parent_ptes
);
1091 pte_chain
->parent_ptes
[0] = old
;
1093 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
) {
1094 if (pte_chain
->parent_ptes
[NR_PTE_CHAIN_ENTRIES
-1])
1096 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
)
1097 if (!pte_chain
->parent_ptes
[i
]) {
1098 pte_chain
->parent_ptes
[i
] = parent_pte
;
1102 pte_chain
= mmu_alloc_pte_chain(vcpu
);
1104 hlist_add_head(&pte_chain
->link
, &sp
->parent_ptes
);
1105 pte_chain
->parent_ptes
[0] = parent_pte
;
1108 static void mmu_page_remove_parent_pte(struct kvm_mmu_page
*sp
,
1111 struct kvm_pte_chain
*pte_chain
;
1112 struct hlist_node
*node
;
1115 if (!sp
->multimapped
) {
1116 BUG_ON(sp
->parent_pte
!= parent_pte
);
1117 sp
->parent_pte
= NULL
;
1120 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
)
1121 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
) {
1122 if (!pte_chain
->parent_ptes
[i
])
1124 if (pte_chain
->parent_ptes
[i
] != parent_pte
)
1126 while (i
+ 1 < NR_PTE_CHAIN_ENTRIES
1127 && pte_chain
->parent_ptes
[i
+ 1]) {
1128 pte_chain
->parent_ptes
[i
]
1129 = pte_chain
->parent_ptes
[i
+ 1];
1132 pte_chain
->parent_ptes
[i
] = NULL
;
1134 hlist_del(&pte_chain
->link
);
1135 mmu_free_pte_chain(pte_chain
);
1136 if (hlist_empty(&sp
->parent_ptes
)) {
1137 sp
->multimapped
= 0;
1138 sp
->parent_pte
= NULL
;
1146 static void mmu_parent_walk(struct kvm_mmu_page
*sp
, mmu_parent_walk_fn fn
)
1148 struct kvm_pte_chain
*pte_chain
;
1149 struct hlist_node
*node
;
1150 struct kvm_mmu_page
*parent_sp
;
1153 if (!sp
->multimapped
&& sp
->parent_pte
) {
1154 parent_sp
= page_header(__pa(sp
->parent_pte
));
1155 fn(parent_sp
, sp
->parent_pte
);
1159 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
)
1160 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
) {
1161 u64
*spte
= pte_chain
->parent_ptes
[i
];
1165 parent_sp
= page_header(__pa(spte
));
1166 fn(parent_sp
, spte
);
1170 static void mark_unsync(struct kvm_mmu_page
*sp
, u64
*spte
);
1171 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page
*sp
)
1173 mmu_parent_walk(sp
, mark_unsync
);
1176 static void mark_unsync(struct kvm_mmu_page
*sp
, u64
*spte
)
1180 index
= spte
- sp
->spt
;
1181 if (__test_and_set_bit(index
, sp
->unsync_child_bitmap
))
1183 if (sp
->unsync_children
++)
1185 kvm_mmu_mark_parents_unsync(sp
);
1188 static void nonpaging_prefetch_page(struct kvm_vcpu
*vcpu
,
1189 struct kvm_mmu_page
*sp
)
1193 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
1194 sp
->spt
[i
] = shadow_trap_nonpresent_pte
;
1197 static int nonpaging_sync_page(struct kvm_vcpu
*vcpu
,
1198 struct kvm_mmu_page
*sp
)
1203 static void nonpaging_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
)
1207 static void nonpaging_update_pte(struct kvm_vcpu
*vcpu
,
1208 struct kvm_mmu_page
*sp
, u64
*spte
,
1214 #define KVM_PAGE_ARRAY_NR 16
1216 struct kvm_mmu_pages
{
1217 struct mmu_page_and_offset
{
1218 struct kvm_mmu_page
*sp
;
1220 } page
[KVM_PAGE_ARRAY_NR
];
1224 #define for_each_unsync_children(bitmap, idx) \
1225 for (idx = find_first_bit(bitmap, 512); \
1227 idx = find_next_bit(bitmap, 512, idx+1))
1229 static int mmu_pages_add(struct kvm_mmu_pages
*pvec
, struct kvm_mmu_page
*sp
,
1235 for (i
=0; i
< pvec
->nr
; i
++)
1236 if (pvec
->page
[i
].sp
== sp
)
1239 pvec
->page
[pvec
->nr
].sp
= sp
;
1240 pvec
->page
[pvec
->nr
].idx
= idx
;
1242 return (pvec
->nr
== KVM_PAGE_ARRAY_NR
);
1245 static int __mmu_unsync_walk(struct kvm_mmu_page
*sp
,
1246 struct kvm_mmu_pages
*pvec
)
1248 int i
, ret
, nr_unsync_leaf
= 0;
1250 for_each_unsync_children(sp
->unsync_child_bitmap
, i
) {
1251 struct kvm_mmu_page
*child
;
1252 u64 ent
= sp
->spt
[i
];
1254 if (!is_shadow_present_pte(ent
) || is_large_pte(ent
))
1255 goto clear_child_bitmap
;
1257 child
= page_header(ent
& PT64_BASE_ADDR_MASK
);
1259 if (child
->unsync_children
) {
1260 if (mmu_pages_add(pvec
, child
, i
))
1263 ret
= __mmu_unsync_walk(child
, pvec
);
1265 goto clear_child_bitmap
;
1267 nr_unsync_leaf
+= ret
;
1270 } else if (child
->unsync
) {
1272 if (mmu_pages_add(pvec
, child
, i
))
1275 goto clear_child_bitmap
;
1280 __clear_bit(i
, sp
->unsync_child_bitmap
);
1281 sp
->unsync_children
--;
1282 WARN_ON((int)sp
->unsync_children
< 0);
1286 return nr_unsync_leaf
;
1289 static int mmu_unsync_walk(struct kvm_mmu_page
*sp
,
1290 struct kvm_mmu_pages
*pvec
)
1292 if (!sp
->unsync_children
)
1295 mmu_pages_add(pvec
, sp
, 0);
1296 return __mmu_unsync_walk(sp
, pvec
);
1299 static void kvm_unlink_unsync_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1301 WARN_ON(!sp
->unsync
);
1302 trace_kvm_mmu_sync_page(sp
);
1304 --kvm
->stat
.mmu_unsync
;
1307 static int kvm_mmu_prepare_zap_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1308 struct list_head
*invalid_list
);
1309 static void kvm_mmu_commit_zap_page(struct kvm
*kvm
,
1310 struct list_head
*invalid_list
);
1312 #define for_each_gfn_sp(kvm, sp, gfn, pos) \
1313 hlist_for_each_entry(sp, pos, \
1314 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1315 if ((sp)->gfn != (gfn)) {} else
1317 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
1318 hlist_for_each_entry(sp, pos, \
1319 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1320 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1321 (sp)->role.invalid) {} else
1323 /* @sp->gfn should be write-protected at the call site */
1324 static int __kvm_sync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
1325 struct list_head
*invalid_list
, bool clear_unsync
)
1327 if (sp
->role
.cr4_pae
!= !!is_pae(vcpu
)) {
1328 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, invalid_list
);
1333 kvm_unlink_unsync_page(vcpu
->kvm
, sp
);
1335 if (vcpu
->arch
.mmu
.sync_page(vcpu
, sp
)) {
1336 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, invalid_list
);
1340 kvm_mmu_flush_tlb(vcpu
);
1344 static int kvm_sync_page_transient(struct kvm_vcpu
*vcpu
,
1345 struct kvm_mmu_page
*sp
)
1347 LIST_HEAD(invalid_list
);
1350 ret
= __kvm_sync_page(vcpu
, sp
, &invalid_list
, false);
1352 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1357 static int kvm_sync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
1358 struct list_head
*invalid_list
)
1360 return __kvm_sync_page(vcpu
, sp
, invalid_list
, true);
1363 /* @gfn should be write-protected at the call site */
1364 static void kvm_sync_pages(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1366 struct kvm_mmu_page
*s
;
1367 struct hlist_node
*node
;
1368 LIST_HEAD(invalid_list
);
1371 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1375 WARN_ON(s
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
1376 kvm_unlink_unsync_page(vcpu
->kvm
, s
);
1377 if ((s
->role
.cr4_pae
!= !!is_pae(vcpu
)) ||
1378 (vcpu
->arch
.mmu
.sync_page(vcpu
, s
))) {
1379 kvm_mmu_prepare_zap_page(vcpu
->kvm
, s
, &invalid_list
);
1385 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1387 kvm_mmu_flush_tlb(vcpu
);
1390 struct mmu_page_path
{
1391 struct kvm_mmu_page
*parent
[PT64_ROOT_LEVEL
-1];
1392 unsigned int idx
[PT64_ROOT_LEVEL
-1];
1395 #define for_each_sp(pvec, sp, parents, i) \
1396 for (i = mmu_pages_next(&pvec, &parents, -1), \
1397 sp = pvec.page[i].sp; \
1398 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1399 i = mmu_pages_next(&pvec, &parents, i))
1401 static int mmu_pages_next(struct kvm_mmu_pages
*pvec
,
1402 struct mmu_page_path
*parents
,
1407 for (n
= i
+1; n
< pvec
->nr
; n
++) {
1408 struct kvm_mmu_page
*sp
= pvec
->page
[n
].sp
;
1410 if (sp
->role
.level
== PT_PAGE_TABLE_LEVEL
) {
1411 parents
->idx
[0] = pvec
->page
[n
].idx
;
1415 parents
->parent
[sp
->role
.level
-2] = sp
;
1416 parents
->idx
[sp
->role
.level
-1] = pvec
->page
[n
].idx
;
1422 static void mmu_pages_clear_parents(struct mmu_page_path
*parents
)
1424 struct kvm_mmu_page
*sp
;
1425 unsigned int level
= 0;
1428 unsigned int idx
= parents
->idx
[level
];
1430 sp
= parents
->parent
[level
];
1434 --sp
->unsync_children
;
1435 WARN_ON((int)sp
->unsync_children
< 0);
1436 __clear_bit(idx
, sp
->unsync_child_bitmap
);
1438 } while (level
< PT64_ROOT_LEVEL
-1 && !sp
->unsync_children
);
1441 static void kvm_mmu_pages_init(struct kvm_mmu_page
*parent
,
1442 struct mmu_page_path
*parents
,
1443 struct kvm_mmu_pages
*pvec
)
1445 parents
->parent
[parent
->role
.level
-1] = NULL
;
1449 static void mmu_sync_children(struct kvm_vcpu
*vcpu
,
1450 struct kvm_mmu_page
*parent
)
1453 struct kvm_mmu_page
*sp
;
1454 struct mmu_page_path parents
;
1455 struct kvm_mmu_pages pages
;
1456 LIST_HEAD(invalid_list
);
1458 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1459 while (mmu_unsync_walk(parent
, &pages
)) {
1462 for_each_sp(pages
, sp
, parents
, i
)
1463 protected |= rmap_write_protect(vcpu
->kvm
, sp
->gfn
);
1466 kvm_flush_remote_tlbs(vcpu
->kvm
);
1468 for_each_sp(pages
, sp
, parents
, i
) {
1469 kvm_sync_page(vcpu
, sp
, &invalid_list
);
1470 mmu_pages_clear_parents(&parents
);
1472 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1473 cond_resched_lock(&vcpu
->kvm
->mmu_lock
);
1474 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1478 static struct kvm_mmu_page
*kvm_mmu_get_page(struct kvm_vcpu
*vcpu
,
1486 union kvm_mmu_page_role role
;
1488 struct kvm_mmu_page
*sp
;
1489 struct hlist_node
*node
;
1490 bool need_sync
= false;
1492 role
= vcpu
->arch
.mmu
.base_role
;
1494 role
.direct
= direct
;
1497 role
.access
= access
;
1498 if (!vcpu
->arch
.mmu
.direct_map
1499 && vcpu
->arch
.mmu
.root_level
<= PT32_ROOT_LEVEL
) {
1500 quadrant
= gaddr
>> (PAGE_SHIFT
+ (PT64_PT_BITS
* level
));
1501 quadrant
&= (1 << ((PT32_PT_BITS
- PT64_PT_BITS
) * level
)) - 1;
1502 role
.quadrant
= quadrant
;
1504 for_each_gfn_sp(vcpu
->kvm
, sp
, gfn
, node
) {
1505 if (!need_sync
&& sp
->unsync
)
1508 if (sp
->role
.word
!= role
.word
)
1511 if (sp
->unsync
&& kvm_sync_page_transient(vcpu
, sp
))
1514 mmu_page_add_parent_pte(vcpu
, sp
, parent_pte
);
1515 if (sp
->unsync_children
) {
1516 kvm_make_request(KVM_REQ_MMU_SYNC
, vcpu
);
1517 kvm_mmu_mark_parents_unsync(sp
);
1518 } else if (sp
->unsync
)
1519 kvm_mmu_mark_parents_unsync(sp
);
1521 trace_kvm_mmu_get_page(sp
, false);
1524 ++vcpu
->kvm
->stat
.mmu_cache_miss
;
1525 sp
= kvm_mmu_alloc_page(vcpu
, parent_pte
, direct
);
1530 hlist_add_head(&sp
->hash_link
,
1531 &vcpu
->kvm
->arch
.mmu_page_hash
[kvm_page_table_hashfn(gfn
)]);
1533 if (rmap_write_protect(vcpu
->kvm
, gfn
))
1534 kvm_flush_remote_tlbs(vcpu
->kvm
);
1535 if (level
> PT_PAGE_TABLE_LEVEL
&& need_sync
)
1536 kvm_sync_pages(vcpu
, gfn
);
1538 account_shadowed(vcpu
->kvm
, gfn
);
1540 if (shadow_trap_nonpresent_pte
!= shadow_notrap_nonpresent_pte
)
1541 vcpu
->arch
.mmu
.prefetch_page(vcpu
, sp
);
1543 nonpaging_prefetch_page(vcpu
, sp
);
1544 trace_kvm_mmu_get_page(sp
, true);
1548 static void shadow_walk_init(struct kvm_shadow_walk_iterator
*iterator
,
1549 struct kvm_vcpu
*vcpu
, u64 addr
)
1551 iterator
->addr
= addr
;
1552 iterator
->shadow_addr
= vcpu
->arch
.mmu
.root_hpa
;
1553 iterator
->level
= vcpu
->arch
.mmu
.shadow_root_level
;
1555 if (iterator
->level
== PT64_ROOT_LEVEL
&&
1556 vcpu
->arch
.mmu
.root_level
< PT64_ROOT_LEVEL
&&
1557 !vcpu
->arch
.mmu
.direct_map
)
1560 if (iterator
->level
== PT32E_ROOT_LEVEL
) {
1561 iterator
->shadow_addr
1562 = vcpu
->arch
.mmu
.pae_root
[(addr
>> 30) & 3];
1563 iterator
->shadow_addr
&= PT64_BASE_ADDR_MASK
;
1565 if (!iterator
->shadow_addr
)
1566 iterator
->level
= 0;
1570 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator
*iterator
)
1572 if (iterator
->level
< PT_PAGE_TABLE_LEVEL
)
1575 if (iterator
->level
== PT_PAGE_TABLE_LEVEL
)
1576 if (is_large_pte(*iterator
->sptep
))
1579 iterator
->index
= SHADOW_PT_INDEX(iterator
->addr
, iterator
->level
);
1580 iterator
->sptep
= ((u64
*)__va(iterator
->shadow_addr
)) + iterator
->index
;
1584 static void shadow_walk_next(struct kvm_shadow_walk_iterator
*iterator
)
1586 iterator
->shadow_addr
= *iterator
->sptep
& PT64_BASE_ADDR_MASK
;
1590 static void link_shadow_page(u64
*sptep
, struct kvm_mmu_page
*sp
)
1594 spte
= __pa(sp
->spt
)
1595 | PT_PRESENT_MASK
| PT_ACCESSED_MASK
1596 | PT_WRITABLE_MASK
| PT_USER_MASK
;
1597 __set_spte(sptep
, spte
);
1600 static void drop_large_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
)
1602 if (is_large_pte(*sptep
)) {
1603 drop_spte(vcpu
->kvm
, sptep
, shadow_trap_nonpresent_pte
);
1604 kvm_flush_remote_tlbs(vcpu
->kvm
);
1608 static void validate_direct_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
1609 unsigned direct_access
)
1611 if (is_shadow_present_pte(*sptep
) && !is_large_pte(*sptep
)) {
1612 struct kvm_mmu_page
*child
;
1615 * For the direct sp, if the guest pte's dirty bit
1616 * changed form clean to dirty, it will corrupt the
1617 * sp's access: allow writable in the read-only sp,
1618 * so we should update the spte at this point to get
1619 * a new sp with the correct access.
1621 child
= page_header(*sptep
& PT64_BASE_ADDR_MASK
);
1622 if (child
->role
.access
== direct_access
)
1625 mmu_page_remove_parent_pte(child
, sptep
);
1626 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
1627 kvm_flush_remote_tlbs(vcpu
->kvm
);
1631 static void kvm_mmu_page_unlink_children(struct kvm
*kvm
,
1632 struct kvm_mmu_page
*sp
)
1640 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
1643 if (is_shadow_present_pte(ent
)) {
1644 if (!is_last_spte(ent
, sp
->role
.level
)) {
1645 ent
&= PT64_BASE_ADDR_MASK
;
1646 mmu_page_remove_parent_pte(page_header(ent
),
1649 if (is_large_pte(ent
))
1651 drop_spte(kvm
, &pt
[i
],
1652 shadow_trap_nonpresent_pte
);
1655 pt
[i
] = shadow_trap_nonpresent_pte
;
1659 static void kvm_mmu_put_page(struct kvm_mmu_page
*sp
, u64
*parent_pte
)
1661 mmu_page_remove_parent_pte(sp
, parent_pte
);
1664 static void kvm_mmu_reset_last_pte_updated(struct kvm
*kvm
)
1667 struct kvm_vcpu
*vcpu
;
1669 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1670 vcpu
->arch
.last_pte_updated
= NULL
;
1673 static void kvm_mmu_unlink_parents(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1677 while (sp
->multimapped
|| sp
->parent_pte
) {
1678 if (!sp
->multimapped
)
1679 parent_pte
= sp
->parent_pte
;
1681 struct kvm_pte_chain
*chain
;
1683 chain
= container_of(sp
->parent_ptes
.first
,
1684 struct kvm_pte_chain
, link
);
1685 parent_pte
= chain
->parent_ptes
[0];
1687 BUG_ON(!parent_pte
);
1688 kvm_mmu_put_page(sp
, parent_pte
);
1689 __set_spte(parent_pte
, shadow_trap_nonpresent_pte
);
1693 static int mmu_zap_unsync_children(struct kvm
*kvm
,
1694 struct kvm_mmu_page
*parent
,
1695 struct list_head
*invalid_list
)
1698 struct mmu_page_path parents
;
1699 struct kvm_mmu_pages pages
;
1701 if (parent
->role
.level
== PT_PAGE_TABLE_LEVEL
)
1704 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1705 while (mmu_unsync_walk(parent
, &pages
)) {
1706 struct kvm_mmu_page
*sp
;
1708 for_each_sp(pages
, sp
, parents
, i
) {
1709 kvm_mmu_prepare_zap_page(kvm
, sp
, invalid_list
);
1710 mmu_pages_clear_parents(&parents
);
1713 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1719 static int kvm_mmu_prepare_zap_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1720 struct list_head
*invalid_list
)
1724 trace_kvm_mmu_prepare_zap_page(sp
);
1725 ++kvm
->stat
.mmu_shadow_zapped
;
1726 ret
= mmu_zap_unsync_children(kvm
, sp
, invalid_list
);
1727 kvm_mmu_page_unlink_children(kvm
, sp
);
1728 kvm_mmu_unlink_parents(kvm
, sp
);
1729 if (!sp
->role
.invalid
&& !sp
->role
.direct
)
1730 unaccount_shadowed(kvm
, sp
->gfn
);
1732 kvm_unlink_unsync_page(kvm
, sp
);
1733 if (!sp
->root_count
) {
1736 list_move(&sp
->link
, invalid_list
);
1738 list_move(&sp
->link
, &kvm
->arch
.active_mmu_pages
);
1739 kvm_reload_remote_mmus(kvm
);
1742 sp
->role
.invalid
= 1;
1743 kvm_mmu_reset_last_pte_updated(kvm
);
1747 static void kvm_mmu_commit_zap_page(struct kvm
*kvm
,
1748 struct list_head
*invalid_list
)
1750 struct kvm_mmu_page
*sp
;
1752 if (list_empty(invalid_list
))
1755 kvm_flush_remote_tlbs(kvm
);
1758 sp
= list_first_entry(invalid_list
, struct kvm_mmu_page
, link
);
1759 WARN_ON(!sp
->role
.invalid
|| sp
->root_count
);
1760 kvm_mmu_free_page(kvm
, sp
);
1761 } while (!list_empty(invalid_list
));
1766 * Changing the number of mmu pages allocated to the vm
1767 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1769 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned int goal_nr_mmu_pages
)
1771 LIST_HEAD(invalid_list
);
1773 * If we set the number of mmu pages to be smaller be than the
1774 * number of actived pages , we must to free some mmu pages before we
1778 if (kvm
->arch
.n_used_mmu_pages
> goal_nr_mmu_pages
) {
1779 while (kvm
->arch
.n_used_mmu_pages
> goal_nr_mmu_pages
&&
1780 !list_empty(&kvm
->arch
.active_mmu_pages
)) {
1781 struct kvm_mmu_page
*page
;
1783 page
= container_of(kvm
->arch
.active_mmu_pages
.prev
,
1784 struct kvm_mmu_page
, link
);
1785 kvm_mmu_prepare_zap_page(kvm
, page
, &invalid_list
);
1786 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1788 goal_nr_mmu_pages
= kvm
->arch
.n_used_mmu_pages
;
1791 kvm
->arch
.n_max_mmu_pages
= goal_nr_mmu_pages
;
1794 static int kvm_mmu_unprotect_page(struct kvm
*kvm
, gfn_t gfn
)
1796 struct kvm_mmu_page
*sp
;
1797 struct hlist_node
*node
;
1798 LIST_HEAD(invalid_list
);
1801 pgprintk("%s: looking for gfn %llx\n", __func__
, gfn
);
1804 for_each_gfn_indirect_valid_sp(kvm
, sp
, gfn
, node
) {
1805 pgprintk("%s: gfn %llx role %x\n", __func__
, gfn
,
1808 kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
);
1810 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1814 static void mmu_unshadow(struct kvm
*kvm
, gfn_t gfn
)
1816 struct kvm_mmu_page
*sp
;
1817 struct hlist_node
*node
;
1818 LIST_HEAD(invalid_list
);
1820 for_each_gfn_indirect_valid_sp(kvm
, sp
, gfn
, node
) {
1821 pgprintk("%s: zap %llx %x\n",
1822 __func__
, gfn
, sp
->role
.word
);
1823 kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
);
1825 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1828 static void page_header_update_slot(struct kvm
*kvm
, void *pte
, gfn_t gfn
)
1830 int slot
= memslot_id(kvm
, gfn
);
1831 struct kvm_mmu_page
*sp
= page_header(__pa(pte
));
1833 __set_bit(slot
, sp
->slot_bitmap
);
1836 static void mmu_convert_notrap(struct kvm_mmu_page
*sp
)
1841 if (shadow_trap_nonpresent_pte
== shadow_notrap_nonpresent_pte
)
1844 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
1845 if (pt
[i
] == shadow_notrap_nonpresent_pte
)
1846 __set_spte(&pt
[i
], shadow_trap_nonpresent_pte
);
1851 * The function is based on mtrr_type_lookup() in
1852 * arch/x86/kernel/cpu/mtrr/generic.c
1854 static int get_mtrr_type(struct mtrr_state_type
*mtrr_state
,
1859 u8 prev_match
, curr_match
;
1860 int num_var_ranges
= KVM_NR_VAR_MTRR
;
1862 if (!mtrr_state
->enabled
)
1865 /* Make end inclusive end, instead of exclusive */
1868 /* Look in fixed ranges. Just return the type as per start */
1869 if (mtrr_state
->have_fixed
&& (start
< 0x100000)) {
1872 if (start
< 0x80000) {
1874 idx
+= (start
>> 16);
1875 return mtrr_state
->fixed_ranges
[idx
];
1876 } else if (start
< 0xC0000) {
1878 idx
+= ((start
- 0x80000) >> 14);
1879 return mtrr_state
->fixed_ranges
[idx
];
1880 } else if (start
< 0x1000000) {
1882 idx
+= ((start
- 0xC0000) >> 12);
1883 return mtrr_state
->fixed_ranges
[idx
];
1888 * Look in variable ranges
1889 * Look of multiple ranges matching this address and pick type
1890 * as per MTRR precedence
1892 if (!(mtrr_state
->enabled
& 2))
1893 return mtrr_state
->def_type
;
1896 for (i
= 0; i
< num_var_ranges
; ++i
) {
1897 unsigned short start_state
, end_state
;
1899 if (!(mtrr_state
->var_ranges
[i
].mask_lo
& (1 << 11)))
1902 base
= (((u64
)mtrr_state
->var_ranges
[i
].base_hi
) << 32) +
1903 (mtrr_state
->var_ranges
[i
].base_lo
& PAGE_MASK
);
1904 mask
= (((u64
)mtrr_state
->var_ranges
[i
].mask_hi
) << 32) +
1905 (mtrr_state
->var_ranges
[i
].mask_lo
& PAGE_MASK
);
1907 start_state
= ((start
& mask
) == (base
& mask
));
1908 end_state
= ((end
& mask
) == (base
& mask
));
1909 if (start_state
!= end_state
)
1912 if ((start
& mask
) != (base
& mask
))
1915 curr_match
= mtrr_state
->var_ranges
[i
].base_lo
& 0xff;
1916 if (prev_match
== 0xFF) {
1917 prev_match
= curr_match
;
1921 if (prev_match
== MTRR_TYPE_UNCACHABLE
||
1922 curr_match
== MTRR_TYPE_UNCACHABLE
)
1923 return MTRR_TYPE_UNCACHABLE
;
1925 if ((prev_match
== MTRR_TYPE_WRBACK
&&
1926 curr_match
== MTRR_TYPE_WRTHROUGH
) ||
1927 (prev_match
== MTRR_TYPE_WRTHROUGH
&&
1928 curr_match
== MTRR_TYPE_WRBACK
)) {
1929 prev_match
= MTRR_TYPE_WRTHROUGH
;
1930 curr_match
= MTRR_TYPE_WRTHROUGH
;
1933 if (prev_match
!= curr_match
)
1934 return MTRR_TYPE_UNCACHABLE
;
1937 if (prev_match
!= 0xFF)
1940 return mtrr_state
->def_type
;
1943 u8
kvm_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1947 mtrr
= get_mtrr_type(&vcpu
->arch
.mtrr_state
, gfn
<< PAGE_SHIFT
,
1948 (gfn
<< PAGE_SHIFT
) + PAGE_SIZE
);
1949 if (mtrr
== 0xfe || mtrr
== 0xff)
1950 mtrr
= MTRR_TYPE_WRBACK
;
1953 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type
);
1955 static void __kvm_unsync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
1957 trace_kvm_mmu_unsync_page(sp
);
1958 ++vcpu
->kvm
->stat
.mmu_unsync
;
1961 kvm_mmu_mark_parents_unsync(sp
);
1962 mmu_convert_notrap(sp
);
1965 static void kvm_unsync_pages(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1967 struct kvm_mmu_page
*s
;
1968 struct hlist_node
*node
;
1970 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1973 WARN_ON(s
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
1974 __kvm_unsync_page(vcpu
, s
);
1978 static int mmu_need_write_protect(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
1981 struct kvm_mmu_page
*s
;
1982 struct hlist_node
*node
;
1983 bool need_unsync
= false;
1985 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1989 if (s
->role
.level
!= PT_PAGE_TABLE_LEVEL
)
1992 if (!need_unsync
&& !s
->unsync
) {
1999 kvm_unsync_pages(vcpu
, gfn
);
2003 static int set_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
2004 unsigned pte_access
, int user_fault
,
2005 int write_fault
, int dirty
, int level
,
2006 gfn_t gfn
, pfn_t pfn
, bool speculative
,
2007 bool can_unsync
, bool host_writable
)
2009 u64 spte
, entry
= *sptep
;
2013 * We don't set the accessed bit, since we sometimes want to see
2014 * whether the guest actually used the pte (in order to detect
2017 spte
= PT_PRESENT_MASK
;
2019 spte
|= shadow_accessed_mask
;
2021 pte_access
&= ~ACC_WRITE_MASK
;
2022 if (pte_access
& ACC_EXEC_MASK
)
2023 spte
|= shadow_x_mask
;
2025 spte
|= shadow_nx_mask
;
2026 if (pte_access
& ACC_USER_MASK
)
2027 spte
|= shadow_user_mask
;
2028 if (level
> PT_PAGE_TABLE_LEVEL
)
2029 spte
|= PT_PAGE_SIZE_MASK
;
2031 spte
|= kvm_x86_ops
->get_mt_mask(vcpu
, gfn
,
2032 kvm_is_mmio_pfn(pfn
));
2035 spte
|= SPTE_HOST_WRITEABLE
;
2037 pte_access
&= ~ACC_WRITE_MASK
;
2039 spte
|= (u64
)pfn
<< PAGE_SHIFT
;
2041 if ((pte_access
& ACC_WRITE_MASK
)
2042 || (!vcpu
->arch
.mmu
.direct_map
&& write_fault
2043 && !is_write_protection(vcpu
) && !user_fault
)) {
2045 if (level
> PT_PAGE_TABLE_LEVEL
&&
2046 has_wrprotected_page(vcpu
->kvm
, gfn
, level
)) {
2048 drop_spte(vcpu
->kvm
, sptep
, shadow_trap_nonpresent_pte
);
2052 spte
|= PT_WRITABLE_MASK
;
2054 if (!vcpu
->arch
.mmu
.direct_map
2055 && !(pte_access
& ACC_WRITE_MASK
))
2056 spte
&= ~PT_USER_MASK
;
2059 * Optimization: for pte sync, if spte was writable the hash
2060 * lookup is unnecessary (and expensive). Write protection
2061 * is responsibility of mmu_get_page / kvm_sync_page.
2062 * Same reasoning can be applied to dirty page accounting.
2064 if (!can_unsync
&& is_writable_pte(*sptep
))
2067 if (mmu_need_write_protect(vcpu
, gfn
, can_unsync
)) {
2068 pgprintk("%s: found shadow page for %llx, marking ro\n",
2071 pte_access
&= ~ACC_WRITE_MASK
;
2072 if (is_writable_pte(spte
))
2073 spte
&= ~PT_WRITABLE_MASK
;
2077 if (pte_access
& ACC_WRITE_MASK
)
2078 mark_page_dirty(vcpu
->kvm
, gfn
);
2081 update_spte(sptep
, spte
);
2083 * If we overwrite a writable spte with a read-only one we
2084 * should flush remote TLBs. Otherwise rmap_write_protect
2085 * will find a read-only spte, even though the writable spte
2086 * might be cached on a CPU's TLB.
2088 if (is_writable_pte(entry
) && !is_writable_pte(*sptep
))
2089 kvm_flush_remote_tlbs(vcpu
->kvm
);
2094 static void mmu_set_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
2095 unsigned pt_access
, unsigned pte_access
,
2096 int user_fault
, int write_fault
, int dirty
,
2097 int *ptwrite
, int level
, gfn_t gfn
,
2098 pfn_t pfn
, bool speculative
,
2101 int was_rmapped
= 0;
2104 pgprintk("%s: spte %llx access %x write_fault %d"
2105 " user_fault %d gfn %llx\n",
2106 __func__
, *sptep
, pt_access
,
2107 write_fault
, user_fault
, gfn
);
2109 if (is_rmap_spte(*sptep
)) {
2111 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2112 * the parent of the now unreachable PTE.
2114 if (level
> PT_PAGE_TABLE_LEVEL
&&
2115 !is_large_pte(*sptep
)) {
2116 struct kvm_mmu_page
*child
;
2119 child
= page_header(pte
& PT64_BASE_ADDR_MASK
);
2120 mmu_page_remove_parent_pte(child
, sptep
);
2121 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
2122 kvm_flush_remote_tlbs(vcpu
->kvm
);
2123 } else if (pfn
!= spte_to_pfn(*sptep
)) {
2124 pgprintk("hfn old %llx new %llx\n",
2125 spte_to_pfn(*sptep
), pfn
);
2126 drop_spte(vcpu
->kvm
, sptep
, shadow_trap_nonpresent_pte
);
2127 kvm_flush_remote_tlbs(vcpu
->kvm
);
2132 if (set_spte(vcpu
, sptep
, pte_access
, user_fault
, write_fault
,
2133 dirty
, level
, gfn
, pfn
, speculative
, true,
2137 kvm_mmu_flush_tlb(vcpu
);
2140 pgprintk("%s: setting spte %llx\n", __func__
, *sptep
);
2141 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2142 is_large_pte(*sptep
)? "2MB" : "4kB",
2143 *sptep
& PT_PRESENT_MASK
?"RW":"R", gfn
,
2145 if (!was_rmapped
&& is_large_pte(*sptep
))
2146 ++vcpu
->kvm
->stat
.lpages
;
2148 page_header_update_slot(vcpu
->kvm
, sptep
, gfn
);
2150 rmap_count
= rmap_add(vcpu
, sptep
, gfn
);
2151 if (rmap_count
> RMAP_RECYCLE_THRESHOLD
)
2152 rmap_recycle(vcpu
, sptep
, gfn
);
2154 kvm_release_pfn_clean(pfn
);
2156 vcpu
->arch
.last_pte_updated
= sptep
;
2157 vcpu
->arch
.last_pte_gfn
= gfn
;
2161 static void nonpaging_new_cr3(struct kvm_vcpu
*vcpu
)
2165 static pfn_t
pte_prefetch_gfn_to_pfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
2168 struct kvm_memory_slot
*slot
;
2171 slot
= gfn_to_memslot_dirty_bitmap(vcpu
, gfn
, no_dirty_log
);
2174 return page_to_pfn(bad_page
);
2177 hva
= gfn_to_hva_memslot(slot
, gfn
);
2179 return hva_to_pfn_atomic(vcpu
->kvm
, hva
);
2182 static int direct_pte_prefetch_many(struct kvm_vcpu
*vcpu
,
2183 struct kvm_mmu_page
*sp
,
2184 u64
*start
, u64
*end
)
2186 struct page
*pages
[PTE_PREFETCH_NUM
];
2187 unsigned access
= sp
->role
.access
;
2191 gfn
= kvm_mmu_page_get_gfn(sp
, start
- sp
->spt
);
2192 if (!gfn_to_memslot_dirty_bitmap(vcpu
, gfn
, access
& ACC_WRITE_MASK
))
2195 ret
= gfn_to_page_many_atomic(vcpu
->kvm
, gfn
, pages
, end
- start
);
2199 for (i
= 0; i
< ret
; i
++, gfn
++, start
++)
2200 mmu_set_spte(vcpu
, start
, ACC_ALL
,
2201 access
, 0, 0, 1, NULL
,
2202 sp
->role
.level
, gfn
,
2203 page_to_pfn(pages
[i
]), true, true);
2208 static void __direct_pte_prefetch(struct kvm_vcpu
*vcpu
,
2209 struct kvm_mmu_page
*sp
, u64
*sptep
)
2211 u64
*spte
, *start
= NULL
;
2214 WARN_ON(!sp
->role
.direct
);
2216 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
2219 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
2220 if (*spte
!= shadow_trap_nonpresent_pte
|| spte
== sptep
) {
2223 if (direct_pte_prefetch_many(vcpu
, sp
, start
, spte
) < 0)
2231 static void direct_pte_prefetch(struct kvm_vcpu
*vcpu
, u64
*sptep
)
2233 struct kvm_mmu_page
*sp
;
2236 * Since it's no accessed bit on EPT, it's no way to
2237 * distinguish between actually accessed translations
2238 * and prefetched, so disable pte prefetch if EPT is
2241 if (!shadow_accessed_mask
)
2244 sp
= page_header(__pa(sptep
));
2245 if (sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)
2248 __direct_pte_prefetch(vcpu
, sp
, sptep
);
2251 static int __direct_map(struct kvm_vcpu
*vcpu
, gpa_t v
, int write
,
2252 int map_writable
, int level
, gfn_t gfn
, pfn_t pfn
,
2255 struct kvm_shadow_walk_iterator iterator
;
2256 struct kvm_mmu_page
*sp
;
2260 for_each_shadow_entry(vcpu
, (u64
)gfn
<< PAGE_SHIFT
, iterator
) {
2261 if (iterator
.level
== level
) {
2262 unsigned pte_access
= ACC_ALL
;
2264 mmu_set_spte(vcpu
, iterator
.sptep
, ACC_ALL
, pte_access
,
2265 0, write
, 1, &pt_write
,
2266 level
, gfn
, pfn
, prefault
, map_writable
);
2267 direct_pte_prefetch(vcpu
, iterator
.sptep
);
2268 ++vcpu
->stat
.pf_fixed
;
2272 if (*iterator
.sptep
== shadow_trap_nonpresent_pte
) {
2273 u64 base_addr
= iterator
.addr
;
2275 base_addr
&= PT64_LVL_ADDR_MASK(iterator
.level
);
2276 pseudo_gfn
= base_addr
>> PAGE_SHIFT
;
2277 sp
= kvm_mmu_get_page(vcpu
, pseudo_gfn
, iterator
.addr
,
2279 1, ACC_ALL
, iterator
.sptep
);
2281 pgprintk("nonpaging_map: ENOMEM\n");
2282 kvm_release_pfn_clean(pfn
);
2286 __set_spte(iterator
.sptep
,
2288 | PT_PRESENT_MASK
| PT_WRITABLE_MASK
2289 | shadow_user_mask
| shadow_x_mask
2290 | shadow_accessed_mask
);
2296 static void kvm_send_hwpoison_signal(unsigned long address
, struct task_struct
*tsk
)
2300 info
.si_signo
= SIGBUS
;
2302 info
.si_code
= BUS_MCEERR_AR
;
2303 info
.si_addr
= (void __user
*)address
;
2304 info
.si_addr_lsb
= PAGE_SHIFT
;
2306 send_sig_info(SIGBUS
, &info
, tsk
);
2309 static int kvm_handle_bad_page(struct kvm
*kvm
, gfn_t gfn
, pfn_t pfn
)
2311 kvm_release_pfn_clean(pfn
);
2312 if (is_hwpoison_pfn(pfn
)) {
2313 kvm_send_hwpoison_signal(gfn_to_hva(kvm
, gfn
), current
);
2315 } else if (is_fault_pfn(pfn
))
2321 static void transparent_hugepage_adjust(struct kvm_vcpu
*vcpu
,
2322 gfn_t
*gfnp
, pfn_t
*pfnp
, int *levelp
)
2326 int level
= *levelp
;
2329 * Check if it's a transparent hugepage. If this would be an
2330 * hugetlbfs page, level wouldn't be set to
2331 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2334 if (!is_error_pfn(pfn
) && !kvm_is_mmio_pfn(pfn
) &&
2335 level
== PT_PAGE_TABLE_LEVEL
&&
2336 PageTransCompound(pfn_to_page(pfn
)) &&
2337 !has_wrprotected_page(vcpu
->kvm
, gfn
, PT_DIRECTORY_LEVEL
)) {
2340 * mmu_notifier_retry was successful and we hold the
2341 * mmu_lock here, so the pmd can't become splitting
2342 * from under us, and in turn
2343 * __split_huge_page_refcount() can't run from under
2344 * us and we can safely transfer the refcount from
2345 * PG_tail to PG_head as we switch the pfn to tail to
2348 *levelp
= level
= PT_DIRECTORY_LEVEL
;
2349 mask
= KVM_PAGES_PER_HPAGE(level
) - 1;
2350 VM_BUG_ON((gfn
& mask
) != (pfn
& mask
));
2354 kvm_release_pfn_clean(pfn
);
2356 if (!get_page_unless_zero(pfn_to_page(pfn
)))
2363 static bool try_async_pf(struct kvm_vcpu
*vcpu
, bool prefault
, gfn_t gfn
,
2364 gva_t gva
, pfn_t
*pfn
, bool write
, bool *writable
);
2366 static int nonpaging_map(struct kvm_vcpu
*vcpu
, gva_t v
, int write
, gfn_t gfn
,
2373 unsigned long mmu_seq
;
2376 force_pt_level
= mapping_level_dirty_bitmap(vcpu
, gfn
);
2377 if (likely(!force_pt_level
)) {
2378 level
= mapping_level(vcpu
, gfn
);
2380 * This path builds a PAE pagetable - so we can map
2381 * 2mb pages at maximum. Therefore check if the level
2382 * is larger than that.
2384 if (level
> PT_DIRECTORY_LEVEL
)
2385 level
= PT_DIRECTORY_LEVEL
;
2387 gfn
&= ~(KVM_PAGES_PER_HPAGE(level
) - 1);
2389 level
= PT_PAGE_TABLE_LEVEL
;
2391 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2394 if (try_async_pf(vcpu
, prefault
, gfn
, v
, &pfn
, write
, &map_writable
))
2398 if (is_error_pfn(pfn
))
2399 return kvm_handle_bad_page(vcpu
->kvm
, gfn
, pfn
);
2401 spin_lock(&vcpu
->kvm
->mmu_lock
);
2402 if (mmu_notifier_retry(vcpu
, mmu_seq
))
2404 kvm_mmu_free_some_pages(vcpu
);
2405 if (likely(!force_pt_level
))
2406 transparent_hugepage_adjust(vcpu
, &gfn
, &pfn
, &level
);
2407 r
= __direct_map(vcpu
, v
, write
, map_writable
, level
, gfn
, pfn
,
2409 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2415 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2416 kvm_release_pfn_clean(pfn
);
2421 static void mmu_free_roots(struct kvm_vcpu
*vcpu
)
2424 struct kvm_mmu_page
*sp
;
2425 LIST_HEAD(invalid_list
);
2427 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2429 spin_lock(&vcpu
->kvm
->mmu_lock
);
2430 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
&&
2431 (vcpu
->arch
.mmu
.root_level
== PT64_ROOT_LEVEL
||
2432 vcpu
->arch
.mmu
.direct_map
)) {
2433 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2435 sp
= page_header(root
);
2437 if (!sp
->root_count
&& sp
->role
.invalid
) {
2438 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, &invalid_list
);
2439 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2441 vcpu
->arch
.mmu
.root_hpa
= INVALID_PAGE
;
2442 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2445 for (i
= 0; i
< 4; ++i
) {
2446 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2449 root
&= PT64_BASE_ADDR_MASK
;
2450 sp
= page_header(root
);
2452 if (!sp
->root_count
&& sp
->role
.invalid
)
2453 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
2456 vcpu
->arch
.mmu
.pae_root
[i
] = INVALID_PAGE
;
2458 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2459 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2460 vcpu
->arch
.mmu
.root_hpa
= INVALID_PAGE
;
2463 static int mmu_check_root(struct kvm_vcpu
*vcpu
, gfn_t root_gfn
)
2467 if (!kvm_is_visible_gfn(vcpu
->kvm
, root_gfn
)) {
2468 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
2475 static int mmu_alloc_direct_roots(struct kvm_vcpu
*vcpu
)
2477 struct kvm_mmu_page
*sp
;
2480 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2481 spin_lock(&vcpu
->kvm
->mmu_lock
);
2482 kvm_mmu_free_some_pages(vcpu
);
2483 sp
= kvm_mmu_get_page(vcpu
, 0, 0, PT64_ROOT_LEVEL
,
2486 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2487 vcpu
->arch
.mmu
.root_hpa
= __pa(sp
->spt
);
2488 } else if (vcpu
->arch
.mmu
.shadow_root_level
== PT32E_ROOT_LEVEL
) {
2489 for (i
= 0; i
< 4; ++i
) {
2490 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2492 ASSERT(!VALID_PAGE(root
));
2493 spin_lock(&vcpu
->kvm
->mmu_lock
);
2494 kvm_mmu_free_some_pages(vcpu
);
2495 sp
= kvm_mmu_get_page(vcpu
, i
<< (30 - PAGE_SHIFT
),
2497 PT32_ROOT_LEVEL
, 1, ACC_ALL
,
2499 root
= __pa(sp
->spt
);
2501 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2502 vcpu
->arch
.mmu
.pae_root
[i
] = root
| PT_PRESENT_MASK
;
2504 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.pae_root
);
2511 static int mmu_alloc_shadow_roots(struct kvm_vcpu
*vcpu
)
2513 struct kvm_mmu_page
*sp
;
2518 root_gfn
= vcpu
->arch
.mmu
.get_cr3(vcpu
) >> PAGE_SHIFT
;
2520 if (mmu_check_root(vcpu
, root_gfn
))
2524 * Do we shadow a long mode page table? If so we need to
2525 * write-protect the guests page table root.
2527 if (vcpu
->arch
.mmu
.root_level
== PT64_ROOT_LEVEL
) {
2528 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2530 ASSERT(!VALID_PAGE(root
));
2532 spin_lock(&vcpu
->kvm
->mmu_lock
);
2533 kvm_mmu_free_some_pages(vcpu
);
2534 sp
= kvm_mmu_get_page(vcpu
, root_gfn
, 0, PT64_ROOT_LEVEL
,
2536 root
= __pa(sp
->spt
);
2538 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2539 vcpu
->arch
.mmu
.root_hpa
= root
;
2544 * We shadow a 32 bit page table. This may be a legacy 2-level
2545 * or a PAE 3-level page table. In either case we need to be aware that
2546 * the shadow page table may be a PAE or a long mode page table.
2548 pm_mask
= PT_PRESENT_MASK
;
2549 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
)
2550 pm_mask
|= PT_ACCESSED_MASK
| PT_WRITABLE_MASK
| PT_USER_MASK
;
2552 for (i
= 0; i
< 4; ++i
) {
2553 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2555 ASSERT(!VALID_PAGE(root
));
2556 if (vcpu
->arch
.mmu
.root_level
== PT32E_ROOT_LEVEL
) {
2557 pdptr
= kvm_pdptr_read_mmu(vcpu
, &vcpu
->arch
.mmu
, i
);
2558 if (!is_present_gpte(pdptr
)) {
2559 vcpu
->arch
.mmu
.pae_root
[i
] = 0;
2562 root_gfn
= pdptr
>> PAGE_SHIFT
;
2563 if (mmu_check_root(vcpu
, root_gfn
))
2566 spin_lock(&vcpu
->kvm
->mmu_lock
);
2567 kvm_mmu_free_some_pages(vcpu
);
2568 sp
= kvm_mmu_get_page(vcpu
, root_gfn
, i
<< 30,
2571 root
= __pa(sp
->spt
);
2573 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2575 vcpu
->arch
.mmu
.pae_root
[i
] = root
| pm_mask
;
2577 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.pae_root
);
2580 * If we shadow a 32 bit page table with a long mode page
2581 * table we enter this path.
2583 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2584 if (vcpu
->arch
.mmu
.lm_root
== NULL
) {
2586 * The additional page necessary for this is only
2587 * allocated on demand.
2592 lm_root
= (void*)get_zeroed_page(GFP_KERNEL
);
2593 if (lm_root
== NULL
)
2596 lm_root
[0] = __pa(vcpu
->arch
.mmu
.pae_root
) | pm_mask
;
2598 vcpu
->arch
.mmu
.lm_root
= lm_root
;
2601 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.lm_root
);
2607 static int mmu_alloc_roots(struct kvm_vcpu
*vcpu
)
2609 if (vcpu
->arch
.mmu
.direct_map
)
2610 return mmu_alloc_direct_roots(vcpu
);
2612 return mmu_alloc_shadow_roots(vcpu
);
2615 static void mmu_sync_roots(struct kvm_vcpu
*vcpu
)
2618 struct kvm_mmu_page
*sp
;
2620 if (vcpu
->arch
.mmu
.direct_map
)
2623 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2626 trace_kvm_mmu_audit(vcpu
, AUDIT_PRE_SYNC
);
2627 if (vcpu
->arch
.mmu
.root_level
== PT64_ROOT_LEVEL
) {
2628 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2629 sp
= page_header(root
);
2630 mmu_sync_children(vcpu
, sp
);
2631 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_SYNC
);
2634 for (i
= 0; i
< 4; ++i
) {
2635 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2637 if (root
&& VALID_PAGE(root
)) {
2638 root
&= PT64_BASE_ADDR_MASK
;
2639 sp
= page_header(root
);
2640 mmu_sync_children(vcpu
, sp
);
2643 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_SYNC
);
2646 void kvm_mmu_sync_roots(struct kvm_vcpu
*vcpu
)
2648 spin_lock(&vcpu
->kvm
->mmu_lock
);
2649 mmu_sync_roots(vcpu
);
2650 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2653 static gpa_t
nonpaging_gva_to_gpa(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
2654 u32 access
, struct x86_exception
*exception
)
2657 exception
->error_code
= 0;
2661 static gpa_t
nonpaging_gva_to_gpa_nested(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
2663 struct x86_exception
*exception
)
2666 exception
->error_code
= 0;
2667 return vcpu
->arch
.nested_mmu
.translate_gpa(vcpu
, vaddr
, access
);
2670 static int nonpaging_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
,
2671 u32 error_code
, bool prefault
)
2676 pgprintk("%s: gva %lx error %x\n", __func__
, gva
, error_code
);
2677 r
= mmu_topup_memory_caches(vcpu
);
2682 ASSERT(VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2684 gfn
= gva
>> PAGE_SHIFT
;
2686 return nonpaging_map(vcpu
, gva
& PAGE_MASK
,
2687 error_code
& PFERR_WRITE_MASK
, gfn
, prefault
);
2690 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
, gva_t gva
, gfn_t gfn
)
2692 struct kvm_arch_async_pf arch
;
2694 arch
.token
= (vcpu
->arch
.apf
.id
++ << 12) | vcpu
->vcpu_id
;
2696 arch
.direct_map
= vcpu
->arch
.mmu
.direct_map
;
2697 arch
.cr3
= vcpu
->arch
.mmu
.get_cr3(vcpu
);
2699 return kvm_setup_async_pf(vcpu
, gva
, gfn
, &arch
);
2702 static bool can_do_async_pf(struct kvm_vcpu
*vcpu
)
2704 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
) ||
2705 kvm_event_needs_reinjection(vcpu
)))
2708 return kvm_x86_ops
->interrupt_allowed(vcpu
);
2711 static bool try_async_pf(struct kvm_vcpu
*vcpu
, bool prefault
, gfn_t gfn
,
2712 gva_t gva
, pfn_t
*pfn
, bool write
, bool *writable
)
2716 *pfn
= gfn_to_pfn_async(vcpu
->kvm
, gfn
, &async
, write
, writable
);
2719 return false; /* *pfn has correct page already */
2721 put_page(pfn_to_page(*pfn
));
2723 if (!prefault
&& can_do_async_pf(vcpu
)) {
2724 trace_kvm_try_async_get_page(gva
, gfn
);
2725 if (kvm_find_async_pf_gfn(vcpu
, gfn
)) {
2726 trace_kvm_async_pf_doublefault(gva
, gfn
);
2727 kvm_make_request(KVM_REQ_APF_HALT
, vcpu
);
2729 } else if (kvm_arch_setup_async_pf(vcpu
, gva
, gfn
))
2733 *pfn
= gfn_to_pfn_prot(vcpu
->kvm
, gfn
, write
, writable
);
2738 static int tdp_page_fault(struct kvm_vcpu
*vcpu
, gva_t gpa
, u32 error_code
,
2745 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2746 unsigned long mmu_seq
;
2747 int write
= error_code
& PFERR_WRITE_MASK
;
2751 ASSERT(VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2753 r
= mmu_topup_memory_caches(vcpu
);
2757 force_pt_level
= mapping_level_dirty_bitmap(vcpu
, gfn
);
2758 if (likely(!force_pt_level
)) {
2759 level
= mapping_level(vcpu
, gfn
);
2760 gfn
&= ~(KVM_PAGES_PER_HPAGE(level
) - 1);
2762 level
= PT_PAGE_TABLE_LEVEL
;
2764 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2767 if (try_async_pf(vcpu
, prefault
, gfn
, gpa
, &pfn
, write
, &map_writable
))
2771 if (is_error_pfn(pfn
))
2772 return kvm_handle_bad_page(vcpu
->kvm
, gfn
, pfn
);
2773 spin_lock(&vcpu
->kvm
->mmu_lock
);
2774 if (mmu_notifier_retry(vcpu
, mmu_seq
))
2776 kvm_mmu_free_some_pages(vcpu
);
2777 if (likely(!force_pt_level
))
2778 transparent_hugepage_adjust(vcpu
, &gfn
, &pfn
, &level
);
2779 r
= __direct_map(vcpu
, gpa
, write
, map_writable
,
2780 level
, gfn
, pfn
, prefault
);
2781 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2786 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2787 kvm_release_pfn_clean(pfn
);
2791 static void nonpaging_free(struct kvm_vcpu
*vcpu
)
2793 mmu_free_roots(vcpu
);
2796 static int nonpaging_init_context(struct kvm_vcpu
*vcpu
,
2797 struct kvm_mmu
*context
)
2799 context
->new_cr3
= nonpaging_new_cr3
;
2800 context
->page_fault
= nonpaging_page_fault
;
2801 context
->gva_to_gpa
= nonpaging_gva_to_gpa
;
2802 context
->free
= nonpaging_free
;
2803 context
->prefetch_page
= nonpaging_prefetch_page
;
2804 context
->sync_page
= nonpaging_sync_page
;
2805 context
->invlpg
= nonpaging_invlpg
;
2806 context
->update_pte
= nonpaging_update_pte
;
2807 context
->root_level
= 0;
2808 context
->shadow_root_level
= PT32E_ROOT_LEVEL
;
2809 context
->root_hpa
= INVALID_PAGE
;
2810 context
->direct_map
= true;
2811 context
->nx
= false;
2815 void kvm_mmu_flush_tlb(struct kvm_vcpu
*vcpu
)
2817 ++vcpu
->stat
.tlb_flush
;
2818 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2821 static void paging_new_cr3(struct kvm_vcpu
*vcpu
)
2823 pgprintk("%s: cr3 %lx\n", __func__
, kvm_read_cr3(vcpu
));
2824 mmu_free_roots(vcpu
);
2827 static unsigned long get_cr3(struct kvm_vcpu
*vcpu
)
2829 return kvm_read_cr3(vcpu
);
2832 static void inject_page_fault(struct kvm_vcpu
*vcpu
,
2833 struct x86_exception
*fault
)
2835 vcpu
->arch
.mmu
.inject_page_fault(vcpu
, fault
);
2838 static void paging_free(struct kvm_vcpu
*vcpu
)
2840 nonpaging_free(vcpu
);
2843 static bool is_rsvd_bits_set(struct kvm_mmu
*mmu
, u64 gpte
, int level
)
2847 bit7
= (gpte
>> 7) & 1;
2848 return (gpte
& mmu
->rsvd_bits_mask
[bit7
][level
-1]) != 0;
2852 #include "paging_tmpl.h"
2856 #include "paging_tmpl.h"
2859 static void reset_rsvds_bits_mask(struct kvm_vcpu
*vcpu
,
2860 struct kvm_mmu
*context
,
2863 int maxphyaddr
= cpuid_maxphyaddr(vcpu
);
2864 u64 exb_bit_rsvd
= 0;
2867 exb_bit_rsvd
= rsvd_bits(63, 63);
2869 case PT32_ROOT_LEVEL
:
2870 /* no rsvd bits for 2 level 4K page table entries */
2871 context
->rsvd_bits_mask
[0][1] = 0;
2872 context
->rsvd_bits_mask
[0][0] = 0;
2873 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2875 if (!is_pse(vcpu
)) {
2876 context
->rsvd_bits_mask
[1][1] = 0;
2880 if (is_cpuid_PSE36())
2881 /* 36bits PSE 4MB page */
2882 context
->rsvd_bits_mask
[1][1] = rsvd_bits(17, 21);
2884 /* 32 bits PSE 4MB page */
2885 context
->rsvd_bits_mask
[1][1] = rsvd_bits(13, 21);
2887 case PT32E_ROOT_LEVEL
:
2888 context
->rsvd_bits_mask
[0][2] =
2889 rsvd_bits(maxphyaddr
, 63) |
2890 rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
2891 context
->rsvd_bits_mask
[0][1] = exb_bit_rsvd
|
2892 rsvd_bits(maxphyaddr
, 62); /* PDE */
2893 context
->rsvd_bits_mask
[0][0] = exb_bit_rsvd
|
2894 rsvd_bits(maxphyaddr
, 62); /* PTE */
2895 context
->rsvd_bits_mask
[1][1] = exb_bit_rsvd
|
2896 rsvd_bits(maxphyaddr
, 62) |
2897 rsvd_bits(13, 20); /* large page */
2898 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2900 case PT64_ROOT_LEVEL
:
2901 context
->rsvd_bits_mask
[0][3] = exb_bit_rsvd
|
2902 rsvd_bits(maxphyaddr
, 51) | rsvd_bits(7, 8);
2903 context
->rsvd_bits_mask
[0][2] = exb_bit_rsvd
|
2904 rsvd_bits(maxphyaddr
, 51) | rsvd_bits(7, 8);
2905 context
->rsvd_bits_mask
[0][1] = exb_bit_rsvd
|
2906 rsvd_bits(maxphyaddr
, 51);
2907 context
->rsvd_bits_mask
[0][0] = exb_bit_rsvd
|
2908 rsvd_bits(maxphyaddr
, 51);
2909 context
->rsvd_bits_mask
[1][3] = context
->rsvd_bits_mask
[0][3];
2910 context
->rsvd_bits_mask
[1][2] = exb_bit_rsvd
|
2911 rsvd_bits(maxphyaddr
, 51) |
2913 context
->rsvd_bits_mask
[1][1] = exb_bit_rsvd
|
2914 rsvd_bits(maxphyaddr
, 51) |
2915 rsvd_bits(13, 20); /* large page */
2916 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2921 static int paging64_init_context_common(struct kvm_vcpu
*vcpu
,
2922 struct kvm_mmu
*context
,
2925 context
->nx
= is_nx(vcpu
);
2927 reset_rsvds_bits_mask(vcpu
, context
, level
);
2929 ASSERT(is_pae(vcpu
));
2930 context
->new_cr3
= paging_new_cr3
;
2931 context
->page_fault
= paging64_page_fault
;
2932 context
->gva_to_gpa
= paging64_gva_to_gpa
;
2933 context
->prefetch_page
= paging64_prefetch_page
;
2934 context
->sync_page
= paging64_sync_page
;
2935 context
->invlpg
= paging64_invlpg
;
2936 context
->update_pte
= paging64_update_pte
;
2937 context
->free
= paging_free
;
2938 context
->root_level
= level
;
2939 context
->shadow_root_level
= level
;
2940 context
->root_hpa
= INVALID_PAGE
;
2941 context
->direct_map
= false;
2945 static int paging64_init_context(struct kvm_vcpu
*vcpu
,
2946 struct kvm_mmu
*context
)
2948 return paging64_init_context_common(vcpu
, context
, PT64_ROOT_LEVEL
);
2951 static int paging32_init_context(struct kvm_vcpu
*vcpu
,
2952 struct kvm_mmu
*context
)
2954 context
->nx
= false;
2956 reset_rsvds_bits_mask(vcpu
, context
, PT32_ROOT_LEVEL
);
2958 context
->new_cr3
= paging_new_cr3
;
2959 context
->page_fault
= paging32_page_fault
;
2960 context
->gva_to_gpa
= paging32_gva_to_gpa
;
2961 context
->free
= paging_free
;
2962 context
->prefetch_page
= paging32_prefetch_page
;
2963 context
->sync_page
= paging32_sync_page
;
2964 context
->invlpg
= paging32_invlpg
;
2965 context
->update_pte
= paging32_update_pte
;
2966 context
->root_level
= PT32_ROOT_LEVEL
;
2967 context
->shadow_root_level
= PT32E_ROOT_LEVEL
;
2968 context
->root_hpa
= INVALID_PAGE
;
2969 context
->direct_map
= false;
2973 static int paging32E_init_context(struct kvm_vcpu
*vcpu
,
2974 struct kvm_mmu
*context
)
2976 return paging64_init_context_common(vcpu
, context
, PT32E_ROOT_LEVEL
);
2979 static int init_kvm_tdp_mmu(struct kvm_vcpu
*vcpu
)
2981 struct kvm_mmu
*context
= vcpu
->arch
.walk_mmu
;
2983 context
->base_role
.word
= 0;
2984 context
->new_cr3
= nonpaging_new_cr3
;
2985 context
->page_fault
= tdp_page_fault
;
2986 context
->free
= nonpaging_free
;
2987 context
->prefetch_page
= nonpaging_prefetch_page
;
2988 context
->sync_page
= nonpaging_sync_page
;
2989 context
->invlpg
= nonpaging_invlpg
;
2990 context
->update_pte
= nonpaging_update_pte
;
2991 context
->shadow_root_level
= kvm_x86_ops
->get_tdp_level();
2992 context
->root_hpa
= INVALID_PAGE
;
2993 context
->direct_map
= true;
2994 context
->set_cr3
= kvm_x86_ops
->set_tdp_cr3
;
2995 context
->get_cr3
= get_cr3
;
2996 context
->inject_page_fault
= kvm_inject_page_fault
;
2997 context
->nx
= is_nx(vcpu
);
2999 if (!is_paging(vcpu
)) {
3000 context
->nx
= false;
3001 context
->gva_to_gpa
= nonpaging_gva_to_gpa
;
3002 context
->root_level
= 0;
3003 } else if (is_long_mode(vcpu
)) {
3004 context
->nx
= is_nx(vcpu
);
3005 reset_rsvds_bits_mask(vcpu
, context
, PT64_ROOT_LEVEL
);
3006 context
->gva_to_gpa
= paging64_gva_to_gpa
;
3007 context
->root_level
= PT64_ROOT_LEVEL
;
3008 } else if (is_pae(vcpu
)) {
3009 context
->nx
= is_nx(vcpu
);
3010 reset_rsvds_bits_mask(vcpu
, context
, PT32E_ROOT_LEVEL
);
3011 context
->gva_to_gpa
= paging64_gva_to_gpa
;
3012 context
->root_level
= PT32E_ROOT_LEVEL
;
3014 context
->nx
= false;
3015 reset_rsvds_bits_mask(vcpu
, context
, PT32_ROOT_LEVEL
);
3016 context
->gva_to_gpa
= paging32_gva_to_gpa
;
3017 context
->root_level
= PT32_ROOT_LEVEL
;
3023 int kvm_init_shadow_mmu(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*context
)
3027 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
3029 if (!is_paging(vcpu
))
3030 r
= nonpaging_init_context(vcpu
, context
);
3031 else if (is_long_mode(vcpu
))
3032 r
= paging64_init_context(vcpu
, context
);
3033 else if (is_pae(vcpu
))
3034 r
= paging32E_init_context(vcpu
, context
);
3036 r
= paging32_init_context(vcpu
, context
);
3038 vcpu
->arch
.mmu
.base_role
.cr4_pae
= !!is_pae(vcpu
);
3039 vcpu
->arch
.mmu
.base_role
.cr0_wp
= is_write_protection(vcpu
);
3043 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu
);
3045 static int init_kvm_softmmu(struct kvm_vcpu
*vcpu
)
3047 int r
= kvm_init_shadow_mmu(vcpu
, vcpu
->arch
.walk_mmu
);
3049 vcpu
->arch
.walk_mmu
->set_cr3
= kvm_x86_ops
->set_cr3
;
3050 vcpu
->arch
.walk_mmu
->get_cr3
= get_cr3
;
3051 vcpu
->arch
.walk_mmu
->inject_page_fault
= kvm_inject_page_fault
;
3056 static int init_kvm_nested_mmu(struct kvm_vcpu
*vcpu
)
3058 struct kvm_mmu
*g_context
= &vcpu
->arch
.nested_mmu
;
3060 g_context
->get_cr3
= get_cr3
;
3061 g_context
->inject_page_fault
= kvm_inject_page_fault
;
3064 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
3065 * translation of l2_gpa to l1_gpa addresses is done using the
3066 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
3067 * functions between mmu and nested_mmu are swapped.
3069 if (!is_paging(vcpu
)) {
3070 g_context
->nx
= false;
3071 g_context
->root_level
= 0;
3072 g_context
->gva_to_gpa
= nonpaging_gva_to_gpa_nested
;
3073 } else if (is_long_mode(vcpu
)) {
3074 g_context
->nx
= is_nx(vcpu
);
3075 reset_rsvds_bits_mask(vcpu
, g_context
, PT64_ROOT_LEVEL
);
3076 g_context
->root_level
= PT64_ROOT_LEVEL
;
3077 g_context
->gva_to_gpa
= paging64_gva_to_gpa_nested
;
3078 } else if (is_pae(vcpu
)) {
3079 g_context
->nx
= is_nx(vcpu
);
3080 reset_rsvds_bits_mask(vcpu
, g_context
, PT32E_ROOT_LEVEL
);
3081 g_context
->root_level
= PT32E_ROOT_LEVEL
;
3082 g_context
->gva_to_gpa
= paging64_gva_to_gpa_nested
;
3084 g_context
->nx
= false;
3085 reset_rsvds_bits_mask(vcpu
, g_context
, PT32_ROOT_LEVEL
);
3086 g_context
->root_level
= PT32_ROOT_LEVEL
;
3087 g_context
->gva_to_gpa
= paging32_gva_to_gpa_nested
;
3093 static int init_kvm_mmu(struct kvm_vcpu
*vcpu
)
3095 if (mmu_is_nested(vcpu
))
3096 return init_kvm_nested_mmu(vcpu
);
3097 else if (tdp_enabled
)
3098 return init_kvm_tdp_mmu(vcpu
);
3100 return init_kvm_softmmu(vcpu
);
3103 static void destroy_kvm_mmu(struct kvm_vcpu
*vcpu
)
3106 if (VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
3107 /* mmu.free() should set root_hpa = INVALID_PAGE */
3108 vcpu
->arch
.mmu
.free(vcpu
);
3111 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
)
3113 destroy_kvm_mmu(vcpu
);
3114 return init_kvm_mmu(vcpu
);
3116 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context
);
3118 int kvm_mmu_load(struct kvm_vcpu
*vcpu
)
3122 r
= mmu_topup_memory_caches(vcpu
);
3125 r
= mmu_alloc_roots(vcpu
);
3126 spin_lock(&vcpu
->kvm
->mmu_lock
);
3127 mmu_sync_roots(vcpu
);
3128 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3131 /* set_cr3() should ensure TLB has been flushed */
3132 vcpu
->arch
.mmu
.set_cr3(vcpu
, vcpu
->arch
.mmu
.root_hpa
);
3136 EXPORT_SYMBOL_GPL(kvm_mmu_load
);
3138 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
)
3140 mmu_free_roots(vcpu
);
3142 EXPORT_SYMBOL_GPL(kvm_mmu_unload
);
3144 static void mmu_pte_write_zap_pte(struct kvm_vcpu
*vcpu
,
3145 struct kvm_mmu_page
*sp
,
3149 struct kvm_mmu_page
*child
;
3152 if (is_shadow_present_pte(pte
)) {
3153 if (is_last_spte(pte
, sp
->role
.level
))
3154 drop_spte(vcpu
->kvm
, spte
, shadow_trap_nonpresent_pte
);
3156 child
= page_header(pte
& PT64_BASE_ADDR_MASK
);
3157 mmu_page_remove_parent_pte(child
, spte
);
3160 __set_spte(spte
, shadow_trap_nonpresent_pte
);
3161 if (is_large_pte(pte
))
3162 --vcpu
->kvm
->stat
.lpages
;
3165 static void mmu_pte_write_new_pte(struct kvm_vcpu
*vcpu
,
3166 struct kvm_mmu_page
*sp
, u64
*spte
,
3169 if (sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
) {
3170 ++vcpu
->kvm
->stat
.mmu_pde_zapped
;
3174 ++vcpu
->kvm
->stat
.mmu_pte_updated
;
3175 vcpu
->arch
.mmu
.update_pte(vcpu
, sp
, spte
, new);
3178 static bool need_remote_flush(u64 old
, u64
new)
3180 if (!is_shadow_present_pte(old
))
3182 if (!is_shadow_present_pte(new))
3184 if ((old
^ new) & PT64_BASE_ADDR_MASK
)
3186 old
^= PT64_NX_MASK
;
3187 new ^= PT64_NX_MASK
;
3188 return (old
& ~new & PT64_PERM_MASK
) != 0;
3191 static void mmu_pte_write_flush_tlb(struct kvm_vcpu
*vcpu
, bool zap_page
,
3192 bool remote_flush
, bool local_flush
)
3198 kvm_flush_remote_tlbs(vcpu
->kvm
);
3199 else if (local_flush
)
3200 kvm_mmu_flush_tlb(vcpu
);
3203 static bool last_updated_pte_accessed(struct kvm_vcpu
*vcpu
)
3205 u64
*spte
= vcpu
->arch
.last_pte_updated
;
3207 return !!(spte
&& (*spte
& shadow_accessed_mask
));
3210 static void kvm_mmu_access_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
3212 u64
*spte
= vcpu
->arch
.last_pte_updated
;
3215 && vcpu
->arch
.last_pte_gfn
== gfn
3216 && shadow_accessed_mask
3217 && !(*spte
& shadow_accessed_mask
)
3218 && is_shadow_present_pte(*spte
))
3219 set_bit(PT_ACCESSED_SHIFT
, (unsigned long *)spte
);
3222 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
3223 const u8
*new, int bytes
,
3224 bool guest_initiated
)
3226 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
3227 union kvm_mmu_page_role mask
= { .word
= 0 };
3228 struct kvm_mmu_page
*sp
;
3229 struct hlist_node
*node
;
3230 LIST_HEAD(invalid_list
);
3231 u64 entry
, gentry
, *spte
;
3232 unsigned pte_size
, page_offset
, misaligned
, quadrant
, offset
;
3233 int level
, npte
, invlpg_counter
, r
, flooded
= 0;
3234 bool remote_flush
, local_flush
, zap_page
;
3236 zap_page
= remote_flush
= local_flush
= false;
3237 offset
= offset_in_page(gpa
);
3239 pgprintk("%s: gpa %llx bytes %d\n", __func__
, gpa
, bytes
);
3241 invlpg_counter
= atomic_read(&vcpu
->kvm
->arch
.invlpg_counter
);
3244 * Assume that the pte write on a page table of the same type
3245 * as the current vcpu paging mode since we update the sptes only
3246 * when they have the same mode.
3248 if ((is_pae(vcpu
) && bytes
== 4) || !new) {
3249 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3254 r
= kvm_read_guest(vcpu
->kvm
, gpa
, &gentry
, min(bytes
, 8));
3257 new = (const u8
*)&gentry
;
3262 gentry
= *(const u32
*)new;
3265 gentry
= *(const u64
*)new;
3272 spin_lock(&vcpu
->kvm
->mmu_lock
);
3273 if (atomic_read(&vcpu
->kvm
->arch
.invlpg_counter
) != invlpg_counter
)
3275 kvm_mmu_free_some_pages(vcpu
);
3276 ++vcpu
->kvm
->stat
.mmu_pte_write
;
3277 trace_kvm_mmu_audit(vcpu
, AUDIT_PRE_PTE_WRITE
);
3278 if (guest_initiated
) {
3279 kvm_mmu_access_page(vcpu
, gfn
);
3280 if (gfn
== vcpu
->arch
.last_pt_write_gfn
3281 && !last_updated_pte_accessed(vcpu
)) {
3282 ++vcpu
->arch
.last_pt_write_count
;
3283 if (vcpu
->arch
.last_pt_write_count
>= 3)
3286 vcpu
->arch
.last_pt_write_gfn
= gfn
;
3287 vcpu
->arch
.last_pt_write_count
= 1;
3288 vcpu
->arch
.last_pte_updated
= NULL
;
3292 mask
.cr0_wp
= mask
.cr4_pae
= mask
.nxe
= 1;
3293 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, sp
, gfn
, node
) {
3294 pte_size
= sp
->role
.cr4_pae
? 8 : 4;
3295 misaligned
= (offset
^ (offset
+ bytes
- 1)) & ~(pte_size
- 1);
3296 misaligned
|= bytes
< 4;
3297 if (misaligned
|| flooded
) {
3299 * Misaligned accesses are too much trouble to fix
3300 * up; also, they usually indicate a page is not used
3303 * If we're seeing too many writes to a page,
3304 * it may no longer be a page table, or we may be
3305 * forking, in which case it is better to unmap the
3308 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3309 gpa
, bytes
, sp
->role
.word
);
3310 zap_page
|= !!kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
3312 ++vcpu
->kvm
->stat
.mmu_flooded
;
3315 page_offset
= offset
;
3316 level
= sp
->role
.level
;
3318 if (!sp
->role
.cr4_pae
) {
3319 page_offset
<<= 1; /* 32->64 */
3321 * A 32-bit pde maps 4MB while the shadow pdes map
3322 * only 2MB. So we need to double the offset again
3323 * and zap two pdes instead of one.
3325 if (level
== PT32_ROOT_LEVEL
) {
3326 page_offset
&= ~7; /* kill rounding error */
3330 quadrant
= page_offset
>> PAGE_SHIFT
;
3331 page_offset
&= ~PAGE_MASK
;
3332 if (quadrant
!= sp
->role
.quadrant
)
3336 spte
= &sp
->spt
[page_offset
/ sizeof(*spte
)];
3339 mmu_pte_write_zap_pte(vcpu
, sp
, spte
);
3341 !((sp
->role
.word
^ vcpu
->arch
.mmu
.base_role
.word
)
3343 mmu_pte_write_new_pte(vcpu
, sp
, spte
, &gentry
);
3344 if (!remote_flush
&& need_remote_flush(entry
, *spte
))
3345 remote_flush
= true;
3349 mmu_pte_write_flush_tlb(vcpu
, zap_page
, remote_flush
, local_flush
);
3350 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
3351 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_PTE_WRITE
);
3352 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3355 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
)
3360 if (vcpu
->arch
.mmu
.direct_map
)
3363 gpa
= kvm_mmu_gva_to_gpa_read(vcpu
, gva
, NULL
);
3365 spin_lock(&vcpu
->kvm
->mmu_lock
);
3366 r
= kvm_mmu_unprotect_page(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
3367 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3370 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt
);
3372 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
)
3374 LIST_HEAD(invalid_list
);
3376 while (kvm_mmu_available_pages(vcpu
->kvm
) < KVM_REFILL_PAGES
&&
3377 !list_empty(&vcpu
->kvm
->arch
.active_mmu_pages
)) {
3378 struct kvm_mmu_page
*sp
;
3380 sp
= container_of(vcpu
->kvm
->arch
.active_mmu_pages
.prev
,
3381 struct kvm_mmu_page
, link
);
3382 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, &invalid_list
);
3383 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
3384 ++vcpu
->kvm
->stat
.mmu_recycled
;
3388 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t cr2
, u32 error_code
,
3389 void *insn
, int insn_len
)
3392 enum emulation_result er
;
3394 r
= vcpu
->arch
.mmu
.page_fault(vcpu
, cr2
, error_code
, false);
3403 r
= mmu_topup_memory_caches(vcpu
);
3407 er
= x86_emulate_instruction(vcpu
, cr2
, 0, insn
, insn_len
);
3412 case EMULATE_DO_MMIO
:
3413 ++vcpu
->stat
.mmio_exits
;
3423 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault
);
3425 void kvm_mmu_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
)
3427 vcpu
->arch
.mmu
.invlpg(vcpu
, gva
);
3428 kvm_mmu_flush_tlb(vcpu
);
3429 ++vcpu
->stat
.invlpg
;
3431 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg
);
3433 void kvm_enable_tdp(void)
3437 EXPORT_SYMBOL_GPL(kvm_enable_tdp
);
3439 void kvm_disable_tdp(void)
3441 tdp_enabled
= false;
3443 EXPORT_SYMBOL_GPL(kvm_disable_tdp
);
3445 static void free_mmu_pages(struct kvm_vcpu
*vcpu
)
3447 free_page((unsigned long)vcpu
->arch
.mmu
.pae_root
);
3448 if (vcpu
->arch
.mmu
.lm_root
!= NULL
)
3449 free_page((unsigned long)vcpu
->arch
.mmu
.lm_root
);
3452 static int alloc_mmu_pages(struct kvm_vcpu
*vcpu
)
3460 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3461 * Therefore we need to allocate shadow page tables in the first
3462 * 4GB of memory, which happens to fit the DMA32 zone.
3464 page
= alloc_page(GFP_KERNEL
| __GFP_DMA32
);
3468 vcpu
->arch
.mmu
.pae_root
= page_address(page
);
3469 for (i
= 0; i
< 4; ++i
)
3470 vcpu
->arch
.mmu
.pae_root
[i
] = INVALID_PAGE
;
3475 int kvm_mmu_create(struct kvm_vcpu
*vcpu
)
3478 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
3480 return alloc_mmu_pages(vcpu
);
3483 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
)
3486 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
3488 return init_kvm_mmu(vcpu
);
3491 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
)
3493 struct kvm_mmu_page
*sp
;
3495 list_for_each_entry(sp
, &kvm
->arch
.active_mmu_pages
, link
) {
3499 if (!test_bit(slot
, sp
->slot_bitmap
))
3503 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
3504 if (!is_shadow_present_pte(pt
[i
]) ||
3505 !is_last_spte(pt
[i
], sp
->role
.level
))
3508 if (is_large_pte(pt
[i
])) {
3509 drop_spte(kvm
, &pt
[i
],
3510 shadow_trap_nonpresent_pte
);
3516 if (is_writable_pte(pt
[i
]))
3517 update_spte(&pt
[i
], pt
[i
] & ~PT_WRITABLE_MASK
);
3520 kvm_flush_remote_tlbs(kvm
);
3523 void kvm_mmu_zap_all(struct kvm
*kvm
)
3525 struct kvm_mmu_page
*sp
, *node
;
3526 LIST_HEAD(invalid_list
);
3528 spin_lock(&kvm
->mmu_lock
);
3530 list_for_each_entry_safe(sp
, node
, &kvm
->arch
.active_mmu_pages
, link
)
3531 if (kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
))
3534 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
3535 spin_unlock(&kvm
->mmu_lock
);
3538 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm
*kvm
,
3539 struct list_head
*invalid_list
)
3541 struct kvm_mmu_page
*page
;
3543 page
= container_of(kvm
->arch
.active_mmu_pages
.prev
,
3544 struct kvm_mmu_page
, link
);
3545 return kvm_mmu_prepare_zap_page(kvm
, page
, invalid_list
);
3548 static int mmu_shrink(struct shrinker
*shrink
, int nr_to_scan
, gfp_t gfp_mask
)
3551 struct kvm
*kvm_freed
= NULL
;
3553 if (nr_to_scan
== 0)
3556 raw_spin_lock(&kvm_lock
);
3558 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
3559 int idx
, freed_pages
;
3560 LIST_HEAD(invalid_list
);
3562 idx
= srcu_read_lock(&kvm
->srcu
);
3563 spin_lock(&kvm
->mmu_lock
);
3564 if (!kvm_freed
&& nr_to_scan
> 0 &&
3565 kvm
->arch
.n_used_mmu_pages
> 0) {
3566 freed_pages
= kvm_mmu_remove_some_alloc_mmu_pages(kvm
,
3572 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
3573 spin_unlock(&kvm
->mmu_lock
);
3574 srcu_read_unlock(&kvm
->srcu
, idx
);
3577 list_move_tail(&kvm_freed
->vm_list
, &vm_list
);
3579 raw_spin_unlock(&kvm_lock
);
3582 return percpu_counter_read_positive(&kvm_total_used_mmu_pages
);
3585 static struct shrinker mmu_shrinker
= {
3586 .shrink
= mmu_shrink
,
3587 .seeks
= DEFAULT_SEEKS
* 10,
3590 static void mmu_destroy_caches(void)
3592 if (pte_chain_cache
)
3593 kmem_cache_destroy(pte_chain_cache
);
3594 if (rmap_desc_cache
)
3595 kmem_cache_destroy(rmap_desc_cache
);
3596 if (mmu_page_header_cache
)
3597 kmem_cache_destroy(mmu_page_header_cache
);
3600 int kvm_mmu_module_init(void)
3602 pte_chain_cache
= kmem_cache_create("kvm_pte_chain",
3603 sizeof(struct kvm_pte_chain
),
3605 if (!pte_chain_cache
)
3607 rmap_desc_cache
= kmem_cache_create("kvm_rmap_desc",
3608 sizeof(struct kvm_rmap_desc
),
3610 if (!rmap_desc_cache
)
3613 mmu_page_header_cache
= kmem_cache_create("kvm_mmu_page_header",
3614 sizeof(struct kvm_mmu_page
),
3616 if (!mmu_page_header_cache
)
3619 if (percpu_counter_init(&kvm_total_used_mmu_pages
, 0))
3622 register_shrinker(&mmu_shrinker
);
3627 mmu_destroy_caches();
3632 * Caculate mmu pages needed for kvm.
3634 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm
*kvm
)
3637 unsigned int nr_mmu_pages
;
3638 unsigned int nr_pages
= 0;
3639 struct kvm_memslots
*slots
;
3641 slots
= kvm_memslots(kvm
);
3643 for (i
= 0; i
< slots
->nmemslots
; i
++)
3644 nr_pages
+= slots
->memslots
[i
].npages
;
3646 nr_mmu_pages
= nr_pages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
3647 nr_mmu_pages
= max(nr_mmu_pages
,
3648 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES
);
3650 return nr_mmu_pages
;
3653 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer
*buffer
,
3656 if (len
> buffer
->len
)
3661 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer
*buffer
,
3666 ret
= pv_mmu_peek_buffer(buffer
, len
);
3671 buffer
->processed
+= len
;
3675 static int kvm_pv_mmu_write(struct kvm_vcpu
*vcpu
,
3676 gpa_t addr
, gpa_t value
)
3681 if (!is_long_mode(vcpu
) && !is_pae(vcpu
))
3684 r
= mmu_topup_memory_caches(vcpu
);
3688 if (!emulator_write_phys(vcpu
, addr
, &value
, bytes
))
3694 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu
*vcpu
)
3696 (void)kvm_set_cr3(vcpu
, kvm_read_cr3(vcpu
));
3700 static int kvm_pv_mmu_release_pt(struct kvm_vcpu
*vcpu
, gpa_t addr
)
3702 spin_lock(&vcpu
->kvm
->mmu_lock
);
3703 mmu_unshadow(vcpu
->kvm
, addr
>> PAGE_SHIFT
);
3704 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3708 static int kvm_pv_mmu_op_one(struct kvm_vcpu
*vcpu
,
3709 struct kvm_pv_mmu_op_buffer
*buffer
)
3711 struct kvm_mmu_op_header
*header
;
3713 header
= pv_mmu_peek_buffer(buffer
, sizeof *header
);
3716 switch (header
->op
) {
3717 case KVM_MMU_OP_WRITE_PTE
: {
3718 struct kvm_mmu_op_write_pte
*wpte
;
3720 wpte
= pv_mmu_read_buffer(buffer
, sizeof *wpte
);
3723 return kvm_pv_mmu_write(vcpu
, wpte
->pte_phys
,
3726 case KVM_MMU_OP_FLUSH_TLB
: {
3727 struct kvm_mmu_op_flush_tlb
*ftlb
;
3729 ftlb
= pv_mmu_read_buffer(buffer
, sizeof *ftlb
);
3732 return kvm_pv_mmu_flush_tlb(vcpu
);
3734 case KVM_MMU_OP_RELEASE_PT
: {
3735 struct kvm_mmu_op_release_pt
*rpt
;
3737 rpt
= pv_mmu_read_buffer(buffer
, sizeof *rpt
);
3740 return kvm_pv_mmu_release_pt(vcpu
, rpt
->pt_phys
);
3746 int kvm_pv_mmu_op(struct kvm_vcpu
*vcpu
, unsigned long bytes
,
3747 gpa_t addr
, unsigned long *ret
)
3750 struct kvm_pv_mmu_op_buffer
*buffer
= &vcpu
->arch
.mmu_op_buffer
;
3752 buffer
->ptr
= buffer
->buf
;
3753 buffer
->len
= min_t(unsigned long, bytes
, sizeof buffer
->buf
);
3754 buffer
->processed
= 0;
3756 r
= kvm_read_guest(vcpu
->kvm
, addr
, buffer
->buf
, buffer
->len
);
3760 while (buffer
->len
) {
3761 r
= kvm_pv_mmu_op_one(vcpu
, buffer
);
3770 *ret
= buffer
->processed
;
3774 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu
*vcpu
, u64 addr
, u64 sptes
[4])
3776 struct kvm_shadow_walk_iterator iterator
;
3779 spin_lock(&vcpu
->kvm
->mmu_lock
);
3780 for_each_shadow_entry(vcpu
, addr
, iterator
) {
3781 sptes
[iterator
.level
-1] = *iterator
.sptep
;
3783 if (!is_shadow_present_pte(*iterator
.sptep
))
3786 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3790 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy
);
3792 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
)
3796 destroy_kvm_mmu(vcpu
);
3797 free_mmu_pages(vcpu
);
3798 mmu_free_memory_caches(vcpu
);
3801 #ifdef CONFIG_KVM_MMU_AUDIT
3802 #include "mmu_audit.c"
3804 static void mmu_audit_disable(void) { }
3807 void kvm_mmu_module_exit(void)
3809 mmu_destroy_caches();
3810 percpu_counter_destroy(&kvm_total_used_mmu_pages
);
3811 unregister_shrinker(&mmu_shrinker
);
3812 mmu_audit_disable();