Linux 4.1.18
[linux/fpc-iii.git] / arch / x86 / kvm / mmu.c
blob554e877e0bc4a68cc4c60ddcb8402564b30a6bc2
1 /*
2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * MMU support
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 * Authors:
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
21 #include "irq.h"
22 #include "mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "cpuid.h"
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/mm.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/swap.h>
34 #include <linux/hugetlb.h>
35 #include <linux/compiler.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
40 #include <asm/page.h>
41 #include <asm/cmpxchg.h>
42 #include <asm/io.h>
43 #include <asm/vmx.h>
46 * When setting this variable to true it enables Two-Dimensional-Paging
47 * where the hardware walks 2 page tables:
48 * 1. the guest-virtual to guest-physical
49 * 2. while doing 1. it walks guest-physical to host-physical
50 * If the hardware supports that we don't need to do shadow paging.
52 bool tdp_enabled = false;
54 enum {
55 AUDIT_PRE_PAGE_FAULT,
56 AUDIT_POST_PAGE_FAULT,
57 AUDIT_PRE_PTE_WRITE,
58 AUDIT_POST_PTE_WRITE,
59 AUDIT_PRE_SYNC,
60 AUDIT_POST_SYNC
63 #undef MMU_DEBUG
65 #ifdef MMU_DEBUG
66 static bool dbg = 0;
67 module_param(dbg, bool, 0644);
69 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
70 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
71 #define MMU_WARN_ON(x) WARN_ON(x)
72 #else
73 #define pgprintk(x...) do { } while (0)
74 #define rmap_printk(x...) do { } while (0)
75 #define MMU_WARN_ON(x) do { } while (0)
76 #endif
78 #define PTE_PREFETCH_NUM 8
80 #define PT_FIRST_AVAIL_BITS_SHIFT 10
81 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
83 #define PT64_LEVEL_BITS 9
85 #define PT64_LEVEL_SHIFT(level) \
86 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
88 #define PT64_INDEX(address, level)\
89 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
92 #define PT32_LEVEL_BITS 10
94 #define PT32_LEVEL_SHIFT(level) \
95 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
97 #define PT32_LVL_OFFSET_MASK(level) \
98 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
99 * PT32_LEVEL_BITS))) - 1))
101 #define PT32_INDEX(address, level)\
102 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
105 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
106 #define PT64_DIR_BASE_ADDR_MASK \
107 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
108 #define PT64_LVL_ADDR_MASK(level) \
109 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
110 * PT64_LEVEL_BITS))) - 1))
111 #define PT64_LVL_OFFSET_MASK(level) \
112 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
113 * PT64_LEVEL_BITS))) - 1))
115 #define PT32_BASE_ADDR_MASK PAGE_MASK
116 #define PT32_DIR_BASE_ADDR_MASK \
117 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
118 #define PT32_LVL_ADDR_MASK(level) \
119 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
120 * PT32_LEVEL_BITS))) - 1))
122 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
123 | shadow_x_mask | shadow_nx_mask)
125 #define ACC_EXEC_MASK 1
126 #define ACC_WRITE_MASK PT_WRITABLE_MASK
127 #define ACC_USER_MASK PT_USER_MASK
128 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
130 #include <trace/events/kvm.h>
132 #define CREATE_TRACE_POINTS
133 #include "mmutrace.h"
135 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
136 #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
138 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
140 /* make pte_list_desc fit well in cache line */
141 #define PTE_LIST_EXT 3
143 struct pte_list_desc {
144 u64 *sptes[PTE_LIST_EXT];
145 struct pte_list_desc *more;
148 struct kvm_shadow_walk_iterator {
149 u64 addr;
150 hpa_t shadow_addr;
151 u64 *sptep;
152 int level;
153 unsigned index;
156 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
157 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
158 shadow_walk_okay(&(_walker)); \
159 shadow_walk_next(&(_walker)))
161 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
162 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
163 shadow_walk_okay(&(_walker)) && \
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
165 __shadow_walk_next(&(_walker), spte))
167 static struct kmem_cache *pte_list_desc_cache;
168 static struct kmem_cache *mmu_page_header_cache;
169 static struct percpu_counter kvm_total_used_mmu_pages;
171 static u64 __read_mostly shadow_nx_mask;
172 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
173 static u64 __read_mostly shadow_user_mask;
174 static u64 __read_mostly shadow_accessed_mask;
175 static u64 __read_mostly shadow_dirty_mask;
176 static u64 __read_mostly shadow_mmio_mask;
178 static void mmu_spte_set(u64 *sptep, u64 spte);
179 static void mmu_free_roots(struct kvm_vcpu *vcpu);
181 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
183 shadow_mmio_mask = mmio_mask;
185 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
188 * the low bit of the generation number is always presumed to be zero.
189 * This disables mmio caching during memslot updates. The concept is
190 * similar to a seqcount but instead of retrying the access we just punt
191 * and ignore the cache.
193 * spte bits 3-11 are used as bits 1-9 of the generation number,
194 * the bits 52-61 are used as bits 10-19 of the generation number.
196 #define MMIO_SPTE_GEN_LOW_SHIFT 2
197 #define MMIO_SPTE_GEN_HIGH_SHIFT 52
199 #define MMIO_GEN_SHIFT 20
200 #define MMIO_GEN_LOW_SHIFT 10
201 #define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
202 #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
204 static u64 generation_mmio_spte_mask(unsigned int gen)
206 u64 mask;
208 WARN_ON(gen & ~MMIO_GEN_MASK);
210 mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
211 mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
212 return mask;
215 static unsigned int get_mmio_spte_generation(u64 spte)
217 unsigned int gen;
219 spte &= ~shadow_mmio_mask;
221 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
222 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
223 return gen;
226 static unsigned int kvm_current_mmio_generation(struct kvm *kvm)
228 return kvm_memslots(kvm)->generation & MMIO_GEN_MASK;
231 static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
232 unsigned access)
234 unsigned int gen = kvm_current_mmio_generation(kvm);
235 u64 mask = generation_mmio_spte_mask(gen);
237 access &= ACC_WRITE_MASK | ACC_USER_MASK;
238 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
240 trace_mark_mmio_spte(sptep, gfn, access, gen);
241 mmu_spte_set(sptep, mask);
244 static bool is_mmio_spte(u64 spte)
246 return (spte & shadow_mmio_mask) == shadow_mmio_mask;
249 static gfn_t get_mmio_spte_gfn(u64 spte)
251 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
252 return (spte & ~mask) >> PAGE_SHIFT;
255 static unsigned get_mmio_spte_access(u64 spte)
257 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
258 return (spte & ~mask) & ~PAGE_MASK;
261 static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
262 pfn_t pfn, unsigned access)
264 if (unlikely(is_noslot_pfn(pfn))) {
265 mark_mmio_spte(kvm, sptep, gfn, access);
266 return true;
269 return false;
272 static bool check_mmio_spte(struct kvm *kvm, u64 spte)
274 unsigned int kvm_gen, spte_gen;
276 kvm_gen = kvm_current_mmio_generation(kvm);
277 spte_gen = get_mmio_spte_generation(spte);
279 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
280 return likely(kvm_gen == spte_gen);
283 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
284 u64 dirty_mask, u64 nx_mask, u64 x_mask)
286 shadow_user_mask = user_mask;
287 shadow_accessed_mask = accessed_mask;
288 shadow_dirty_mask = dirty_mask;
289 shadow_nx_mask = nx_mask;
290 shadow_x_mask = x_mask;
292 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
294 static int is_cpuid_PSE36(void)
296 return 1;
299 static int is_nx(struct kvm_vcpu *vcpu)
301 return vcpu->arch.efer & EFER_NX;
304 static int is_shadow_present_pte(u64 pte)
306 return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
309 static int is_large_pte(u64 pte)
311 return pte & PT_PAGE_SIZE_MASK;
314 static int is_rmap_spte(u64 pte)
316 return is_shadow_present_pte(pte);
319 static int is_last_spte(u64 pte, int level)
321 if (level == PT_PAGE_TABLE_LEVEL)
322 return 1;
323 if (is_large_pte(pte))
324 return 1;
325 return 0;
328 static pfn_t spte_to_pfn(u64 pte)
330 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
333 static gfn_t pse36_gfn_delta(u32 gpte)
335 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
337 return (gpte & PT32_DIR_PSE36_MASK) << shift;
340 #ifdef CONFIG_X86_64
341 static void __set_spte(u64 *sptep, u64 spte)
343 *sptep = spte;
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
348 *sptep = spte;
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
353 return xchg(sptep, spte);
356 static u64 __get_spte_lockless(u64 *sptep)
358 return ACCESS_ONCE(*sptep);
360 #else
361 union split_spte {
362 struct {
363 u32 spte_low;
364 u32 spte_high;
366 u64 spte;
369 static void count_spte_clear(u64 *sptep, u64 spte)
371 struct kvm_mmu_page *sp = page_header(__pa(sptep));
373 if (is_shadow_present_pte(spte))
374 return;
376 /* Ensure the spte is completely set before we increase the count */
377 smp_wmb();
378 sp->clear_spte_count++;
381 static void __set_spte(u64 *sptep, u64 spte)
383 union split_spte *ssptep, sspte;
385 ssptep = (union split_spte *)sptep;
386 sspte = (union split_spte)spte;
388 ssptep->spte_high = sspte.spte_high;
391 * If we map the spte from nonpresent to present, We should store
392 * the high bits firstly, then set present bit, so cpu can not
393 * fetch this spte while we are setting the spte.
395 smp_wmb();
397 ssptep->spte_low = sspte.spte_low;
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
402 union split_spte *ssptep, sspte;
404 ssptep = (union split_spte *)sptep;
405 sspte = (union split_spte)spte;
407 ssptep->spte_low = sspte.spte_low;
410 * If we map the spte from present to nonpresent, we should clear
411 * present bit firstly to avoid vcpu fetch the old high bits.
413 smp_wmb();
415 ssptep->spte_high = sspte.spte_high;
416 count_spte_clear(sptep, spte);
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
421 union split_spte *ssptep, sspte, orig;
423 ssptep = (union split_spte *)sptep;
424 sspte = (union split_spte)spte;
426 /* xchg acts as a barrier before the setting of the high bits */
427 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
428 orig.spte_high = ssptep->spte_high;
429 ssptep->spte_high = sspte.spte_high;
430 count_spte_clear(sptep, spte);
432 return orig.spte;
436 * The idea using the light way get the spte on x86_32 guest is from
437 * gup_get_pte(arch/x86/mm/gup.c).
439 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
440 * coalesces them and we are running out of the MMU lock. Therefore
441 * we need to protect against in-progress updates of the spte.
443 * Reading the spte while an update is in progress may get the old value
444 * for the high part of the spte. The race is fine for a present->non-present
445 * change (because the high part of the spte is ignored for non-present spte),
446 * but for a present->present change we must reread the spte.
448 * All such changes are done in two steps (present->non-present and
449 * non-present->present), hence it is enough to count the number of
450 * present->non-present updates: if it changed while reading the spte,
451 * we might have hit the race. This is done using clear_spte_count.
453 static u64 __get_spte_lockless(u64 *sptep)
455 struct kvm_mmu_page *sp = page_header(__pa(sptep));
456 union split_spte spte, *orig = (union split_spte *)sptep;
457 int count;
459 retry:
460 count = sp->clear_spte_count;
461 smp_rmb();
463 spte.spte_low = orig->spte_low;
464 smp_rmb();
466 spte.spte_high = orig->spte_high;
467 smp_rmb();
469 if (unlikely(spte.spte_low != orig->spte_low ||
470 count != sp->clear_spte_count))
471 goto retry;
473 return spte.spte;
475 #endif
477 static bool spte_is_locklessly_modifiable(u64 spte)
479 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
480 (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
483 static bool spte_has_volatile_bits(u64 spte)
486 * Always atomicly update spte if it can be updated
487 * out of mmu-lock, it can ensure dirty bit is not lost,
488 * also, it can help us to get a stable is_writable_pte()
489 * to ensure tlb flush is not missed.
491 if (spte_is_locklessly_modifiable(spte))
492 return true;
494 if (!shadow_accessed_mask)
495 return false;
497 if (!is_shadow_present_pte(spte))
498 return false;
500 if ((spte & shadow_accessed_mask) &&
501 (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
502 return false;
504 return true;
507 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
509 return (old_spte & bit_mask) && !(new_spte & bit_mask);
512 static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask)
514 return (old_spte & bit_mask) != (new_spte & bit_mask);
517 /* Rules for using mmu_spte_set:
518 * Set the sptep from nonpresent to present.
519 * Note: the sptep being assigned *must* be either not present
520 * or in a state where the hardware will not attempt to update
521 * the spte.
523 static void mmu_spte_set(u64 *sptep, u64 new_spte)
525 WARN_ON(is_shadow_present_pte(*sptep));
526 __set_spte(sptep, new_spte);
529 /* Rules for using mmu_spte_update:
530 * Update the state bits, it means the mapped pfn is not changged.
532 * Whenever we overwrite a writable spte with a read-only one we
533 * should flush remote TLBs. Otherwise rmap_write_protect
534 * will find a read-only spte, even though the writable spte
535 * might be cached on a CPU's TLB, the return value indicates this
536 * case.
538 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
540 u64 old_spte = *sptep;
541 bool ret = false;
543 WARN_ON(!is_rmap_spte(new_spte));
545 if (!is_shadow_present_pte(old_spte)) {
546 mmu_spte_set(sptep, new_spte);
547 return ret;
550 if (!spte_has_volatile_bits(old_spte))
551 __update_clear_spte_fast(sptep, new_spte);
552 else
553 old_spte = __update_clear_spte_slow(sptep, new_spte);
556 * For the spte updated out of mmu-lock is safe, since
557 * we always atomicly update it, see the comments in
558 * spte_has_volatile_bits().
560 if (spte_is_locklessly_modifiable(old_spte) &&
561 !is_writable_pte(new_spte))
562 ret = true;
564 if (!shadow_accessed_mask)
565 return ret;
568 * Flush TLB when accessed/dirty bits are changed in the page tables,
569 * to guarantee consistency between TLB and page tables.
571 if (spte_is_bit_changed(old_spte, new_spte,
572 shadow_accessed_mask | shadow_dirty_mask))
573 ret = true;
575 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
576 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
577 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
578 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
580 return ret;
584 * Rules for using mmu_spte_clear_track_bits:
585 * It sets the sptep from present to nonpresent, and track the
586 * state bits, it is used to clear the last level sptep.
588 static int mmu_spte_clear_track_bits(u64 *sptep)
590 pfn_t pfn;
591 u64 old_spte = *sptep;
593 if (!spte_has_volatile_bits(old_spte))
594 __update_clear_spte_fast(sptep, 0ull);
595 else
596 old_spte = __update_clear_spte_slow(sptep, 0ull);
598 if (!is_rmap_spte(old_spte))
599 return 0;
601 pfn = spte_to_pfn(old_spte);
604 * KVM does not hold the refcount of the page used by
605 * kvm mmu, before reclaiming the page, we should
606 * unmap it from mmu first.
608 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
610 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
611 kvm_set_pfn_accessed(pfn);
612 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
613 kvm_set_pfn_dirty(pfn);
614 return 1;
618 * Rules for using mmu_spte_clear_no_track:
619 * Directly clear spte without caring the state bits of sptep,
620 * it is used to set the upper level spte.
622 static void mmu_spte_clear_no_track(u64 *sptep)
624 __update_clear_spte_fast(sptep, 0ull);
627 static u64 mmu_spte_get_lockless(u64 *sptep)
629 return __get_spte_lockless(sptep);
632 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
635 * Prevent page table teardown by making any free-er wait during
636 * kvm_flush_remote_tlbs() IPI to all active vcpus.
638 local_irq_disable();
639 vcpu->mode = READING_SHADOW_PAGE_TABLES;
641 * Make sure a following spte read is not reordered ahead of the write
642 * to vcpu->mode.
644 smp_mb();
647 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
650 * Make sure the write to vcpu->mode is not reordered in front of
651 * reads to sptes. If it does, kvm_commit_zap_page() can see us
652 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
654 smp_mb();
655 vcpu->mode = OUTSIDE_GUEST_MODE;
656 local_irq_enable();
659 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
660 struct kmem_cache *base_cache, int min)
662 void *obj;
664 if (cache->nobjs >= min)
665 return 0;
666 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
667 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
668 if (!obj)
669 return -ENOMEM;
670 cache->objects[cache->nobjs++] = obj;
672 return 0;
675 static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
677 return cache->nobjs;
680 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
681 struct kmem_cache *cache)
683 while (mc->nobjs)
684 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
687 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
688 int min)
690 void *page;
692 if (cache->nobjs >= min)
693 return 0;
694 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
695 page = (void *)__get_free_page(GFP_KERNEL);
696 if (!page)
697 return -ENOMEM;
698 cache->objects[cache->nobjs++] = page;
700 return 0;
703 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
705 while (mc->nobjs)
706 free_page((unsigned long)mc->objects[--mc->nobjs]);
709 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
711 int r;
713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
714 pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
715 if (r)
716 goto out;
717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
718 if (r)
719 goto out;
720 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
721 mmu_page_header_cache, 4);
722 out:
723 return r;
726 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
728 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
729 pte_list_desc_cache);
730 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
731 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
732 mmu_page_header_cache);
735 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
737 void *p;
739 BUG_ON(!mc->nobjs);
740 p = mc->objects[--mc->nobjs];
741 return p;
744 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
746 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
749 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
751 kmem_cache_free(pte_list_desc_cache, pte_list_desc);
754 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
756 if (!sp->role.direct)
757 return sp->gfns[index];
759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
764 if (sp->role.direct)
765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
766 else
767 sp->gfns[index] = gfn;
771 * Return the pointer to the large page information for a given gfn,
772 * handling slots that are not large page aligned.
774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
775 struct kvm_memory_slot *slot,
776 int level)
778 unsigned long idx;
780 idx = gfn_to_index(gfn, slot->base_gfn, level);
781 return &slot->arch.lpage_info[level - 2][idx];
784 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
786 struct kvm_memory_slot *slot;
787 struct kvm_lpage_info *linfo;
788 int i;
790 slot = gfn_to_memslot(kvm, gfn);
791 for (i = PT_DIRECTORY_LEVEL;
792 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
793 linfo = lpage_info_slot(gfn, slot, i);
794 linfo->write_count += 1;
796 kvm->arch.indirect_shadow_pages++;
799 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
801 struct kvm_memory_slot *slot;
802 struct kvm_lpage_info *linfo;
803 int i;
805 slot = gfn_to_memslot(kvm, gfn);
806 for (i = PT_DIRECTORY_LEVEL;
807 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
808 linfo = lpage_info_slot(gfn, slot, i);
809 linfo->write_count -= 1;
810 WARN_ON(linfo->write_count < 0);
812 kvm->arch.indirect_shadow_pages--;
815 static int has_wrprotected_page(struct kvm *kvm,
816 gfn_t gfn,
817 int level)
819 struct kvm_memory_slot *slot;
820 struct kvm_lpage_info *linfo;
822 slot = gfn_to_memslot(kvm, gfn);
823 if (slot) {
824 linfo = lpage_info_slot(gfn, slot, level);
825 return linfo->write_count;
828 return 1;
831 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
833 unsigned long page_size;
834 int i, ret = 0;
836 page_size = kvm_host_page_size(kvm, gfn);
838 for (i = PT_PAGE_TABLE_LEVEL;
839 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
840 if (page_size >= KVM_HPAGE_SIZE(i))
841 ret = i;
842 else
843 break;
846 return ret;
849 static struct kvm_memory_slot *
850 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
851 bool no_dirty_log)
853 struct kvm_memory_slot *slot;
855 slot = gfn_to_memslot(vcpu->kvm, gfn);
856 if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
857 (no_dirty_log && slot->dirty_bitmap))
858 slot = NULL;
860 return slot;
863 static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
865 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
868 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
870 int host_level, level, max_level;
872 host_level = host_mapping_level(vcpu->kvm, large_gfn);
874 if (host_level == PT_PAGE_TABLE_LEVEL)
875 return host_level;
877 max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
879 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
880 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
881 break;
883 return level - 1;
887 * Pte mapping structures:
889 * If pte_list bit zero is zero, then pte_list point to the spte.
891 * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
892 * pte_list_desc containing more mappings.
894 * Returns the number of pte entries before the spte was added or zero if
895 * the spte was not added.
898 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
899 unsigned long *pte_list)
901 struct pte_list_desc *desc;
902 int i, count = 0;
904 if (!*pte_list) {
905 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
906 *pte_list = (unsigned long)spte;
907 } else if (!(*pte_list & 1)) {
908 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
909 desc = mmu_alloc_pte_list_desc(vcpu);
910 desc->sptes[0] = (u64 *)*pte_list;
911 desc->sptes[1] = spte;
912 *pte_list = (unsigned long)desc | 1;
913 ++count;
914 } else {
915 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
916 desc = (struct pte_list_desc *)(*pte_list & ~1ul);
917 while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
918 desc = desc->more;
919 count += PTE_LIST_EXT;
921 if (desc->sptes[PTE_LIST_EXT-1]) {
922 desc->more = mmu_alloc_pte_list_desc(vcpu);
923 desc = desc->more;
925 for (i = 0; desc->sptes[i]; ++i)
926 ++count;
927 desc->sptes[i] = spte;
929 return count;
932 static void
933 pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
934 int i, struct pte_list_desc *prev_desc)
936 int j;
938 for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
940 desc->sptes[i] = desc->sptes[j];
941 desc->sptes[j] = NULL;
942 if (j != 0)
943 return;
944 if (!prev_desc && !desc->more)
945 *pte_list = (unsigned long)desc->sptes[0];
946 else
947 if (prev_desc)
948 prev_desc->more = desc->more;
949 else
950 *pte_list = (unsigned long)desc->more | 1;
951 mmu_free_pte_list_desc(desc);
954 static void pte_list_remove(u64 *spte, unsigned long *pte_list)
956 struct pte_list_desc *desc;
957 struct pte_list_desc *prev_desc;
958 int i;
960 if (!*pte_list) {
961 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
962 BUG();
963 } else if (!(*pte_list & 1)) {
964 rmap_printk("pte_list_remove: %p 1->0\n", spte);
965 if ((u64 *)*pte_list != spte) {
966 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
967 BUG();
969 *pte_list = 0;
970 } else {
971 rmap_printk("pte_list_remove: %p many->many\n", spte);
972 desc = (struct pte_list_desc *)(*pte_list & ~1ul);
973 prev_desc = NULL;
974 while (desc) {
975 for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
976 if (desc->sptes[i] == spte) {
977 pte_list_desc_remove_entry(pte_list,
978 desc, i,
979 prev_desc);
980 return;
982 prev_desc = desc;
983 desc = desc->more;
985 pr_err("pte_list_remove: %p many->many\n", spte);
986 BUG();
990 typedef void (*pte_list_walk_fn) (u64 *spte);
991 static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
993 struct pte_list_desc *desc;
994 int i;
996 if (!*pte_list)
997 return;
999 if (!(*pte_list & 1))
1000 return fn((u64 *)*pte_list);
1002 desc = (struct pte_list_desc *)(*pte_list & ~1ul);
1003 while (desc) {
1004 for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
1005 fn(desc->sptes[i]);
1006 desc = desc->more;
1010 static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
1011 struct kvm_memory_slot *slot)
1013 unsigned long idx;
1015 idx = gfn_to_index(gfn, slot->base_gfn, level);
1016 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1020 * Take gfn and return the reverse mapping to it.
1022 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
1024 struct kvm_memory_slot *slot;
1026 slot = gfn_to_memslot(kvm, gfn);
1027 return __gfn_to_rmap(gfn, level, slot);
1030 static bool rmap_can_add(struct kvm_vcpu *vcpu)
1032 struct kvm_mmu_memory_cache *cache;
1034 cache = &vcpu->arch.mmu_pte_list_desc_cache;
1035 return mmu_memory_cache_free_objects(cache);
1038 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1040 struct kvm_mmu_page *sp;
1041 unsigned long *rmapp;
1043 sp = page_header(__pa(spte));
1044 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1045 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
1046 return pte_list_add(vcpu, spte, rmapp);
1049 static void rmap_remove(struct kvm *kvm, u64 *spte)
1051 struct kvm_mmu_page *sp;
1052 gfn_t gfn;
1053 unsigned long *rmapp;
1055 sp = page_header(__pa(spte));
1056 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1057 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
1058 pte_list_remove(spte, rmapp);
1062 * Used by the following functions to iterate through the sptes linked by a
1063 * rmap. All fields are private and not assumed to be used outside.
1065 struct rmap_iterator {
1066 /* private fields */
1067 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1068 int pos; /* index of the sptep */
1072 * Iteration must be started by this function. This should also be used after
1073 * removing/dropping sptes from the rmap link because in such cases the
1074 * information in the itererator may not be valid.
1076 * Returns sptep if found, NULL otherwise.
1078 static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
1080 if (!rmap)
1081 return NULL;
1083 if (!(rmap & 1)) {
1084 iter->desc = NULL;
1085 return (u64 *)rmap;
1088 iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
1089 iter->pos = 0;
1090 return iter->desc->sptes[iter->pos];
1094 * Must be used with a valid iterator: e.g. after rmap_get_first().
1096 * Returns sptep if found, NULL otherwise.
1098 static u64 *rmap_get_next(struct rmap_iterator *iter)
1100 if (iter->desc) {
1101 if (iter->pos < PTE_LIST_EXT - 1) {
1102 u64 *sptep;
1104 ++iter->pos;
1105 sptep = iter->desc->sptes[iter->pos];
1106 if (sptep)
1107 return sptep;
1110 iter->desc = iter->desc->more;
1112 if (iter->desc) {
1113 iter->pos = 0;
1114 /* desc->sptes[0] cannot be NULL */
1115 return iter->desc->sptes[iter->pos];
1119 return NULL;
1122 static void drop_spte(struct kvm *kvm, u64 *sptep)
1124 if (mmu_spte_clear_track_bits(sptep))
1125 rmap_remove(kvm, sptep);
1129 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1131 if (is_large_pte(*sptep)) {
1132 WARN_ON(page_header(__pa(sptep))->role.level ==
1133 PT_PAGE_TABLE_LEVEL);
1134 drop_spte(kvm, sptep);
1135 --kvm->stat.lpages;
1136 return true;
1139 return false;
1142 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1144 if (__drop_large_spte(vcpu->kvm, sptep))
1145 kvm_flush_remote_tlbs(vcpu->kvm);
1149 * Write-protect on the specified @sptep, @pt_protect indicates whether
1150 * spte write-protection is caused by protecting shadow page table.
1152 * Note: write protection is difference between dirty logging and spte
1153 * protection:
1154 * - for dirty logging, the spte can be set to writable at anytime if
1155 * its dirty bitmap is properly set.
1156 * - for spte protection, the spte can be writable only after unsync-ing
1157 * shadow page.
1159 * Return true if tlb need be flushed.
1161 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
1163 u64 spte = *sptep;
1165 if (!is_writable_pte(spte) &&
1166 !(pt_protect && spte_is_locklessly_modifiable(spte)))
1167 return false;
1169 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1171 if (pt_protect)
1172 spte &= ~SPTE_MMU_WRITEABLE;
1173 spte = spte & ~PT_WRITABLE_MASK;
1175 return mmu_spte_update(sptep, spte);
1178 static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
1179 bool pt_protect)
1181 u64 *sptep;
1182 struct rmap_iterator iter;
1183 bool flush = false;
1185 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1186 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1188 flush |= spte_write_protect(kvm, sptep, pt_protect);
1189 sptep = rmap_get_next(&iter);
1192 return flush;
1195 static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
1197 u64 spte = *sptep;
1199 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1201 spte &= ~shadow_dirty_mask;
1203 return mmu_spte_update(sptep, spte);
1206 static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
1208 u64 *sptep;
1209 struct rmap_iterator iter;
1210 bool flush = false;
1212 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1213 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1215 flush |= spte_clear_dirty(kvm, sptep);
1216 sptep = rmap_get_next(&iter);
1219 return flush;
1222 static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
1224 u64 spte = *sptep;
1226 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1228 spte |= shadow_dirty_mask;
1230 return mmu_spte_update(sptep, spte);
1233 static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp)
1235 u64 *sptep;
1236 struct rmap_iterator iter;
1237 bool flush = false;
1239 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1240 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1242 flush |= spte_set_dirty(kvm, sptep);
1243 sptep = rmap_get_next(&iter);
1246 return flush;
1250 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1251 * @kvm: kvm instance
1252 * @slot: slot to protect
1253 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1254 * @mask: indicates which pages we should protect
1256 * Used when we do not need to care about huge page mappings: e.g. during dirty
1257 * logging we do not have any such mappings.
1259 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1260 struct kvm_memory_slot *slot,
1261 gfn_t gfn_offset, unsigned long mask)
1263 unsigned long *rmapp;
1265 while (mask) {
1266 rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1267 PT_PAGE_TABLE_LEVEL, slot);
1268 __rmap_write_protect(kvm, rmapp, false);
1270 /* clear the first set bit */
1271 mask &= mask - 1;
1276 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
1277 * @kvm: kvm instance
1278 * @slot: slot to clear D-bit
1279 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1280 * @mask: indicates which pages we should clear D-bit
1282 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1284 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1285 struct kvm_memory_slot *slot,
1286 gfn_t gfn_offset, unsigned long mask)
1288 unsigned long *rmapp;
1290 while (mask) {
1291 rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1292 PT_PAGE_TABLE_LEVEL, slot);
1293 __rmap_clear_dirty(kvm, rmapp);
1295 /* clear the first set bit */
1296 mask &= mask - 1;
1299 EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
1302 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1303 * PT level pages.
1305 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1306 * enable dirty logging for them.
1308 * Used when we do not need to care about huge page mappings: e.g. during dirty
1309 * logging we do not have any such mappings.
1311 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1312 struct kvm_memory_slot *slot,
1313 gfn_t gfn_offset, unsigned long mask)
1315 if (kvm_x86_ops->enable_log_dirty_pt_masked)
1316 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1317 mask);
1318 else
1319 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1322 static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
1324 struct kvm_memory_slot *slot;
1325 unsigned long *rmapp;
1326 int i;
1327 bool write_protected = false;
1329 slot = gfn_to_memslot(kvm, gfn);
1331 for (i = PT_PAGE_TABLE_LEVEL;
1332 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
1333 rmapp = __gfn_to_rmap(gfn, i, slot);
1334 write_protected |= __rmap_write_protect(kvm, rmapp, true);
1337 return write_protected;
1340 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
1341 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1342 unsigned long data)
1344 u64 *sptep;
1345 struct rmap_iterator iter;
1346 int need_tlb_flush = 0;
1348 while ((sptep = rmap_get_first(*rmapp, &iter))) {
1349 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1350 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n",
1351 sptep, *sptep, gfn, level);
1353 drop_spte(kvm, sptep);
1354 need_tlb_flush = 1;
1357 return need_tlb_flush;
1360 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
1361 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1362 unsigned long data)
1364 u64 *sptep;
1365 struct rmap_iterator iter;
1366 int need_flush = 0;
1367 u64 new_spte;
1368 pte_t *ptep = (pte_t *)data;
1369 pfn_t new_pfn;
1371 WARN_ON(pte_huge(*ptep));
1372 new_pfn = pte_pfn(*ptep);
1374 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1375 BUG_ON(!is_shadow_present_pte(*sptep));
1376 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1377 sptep, *sptep, gfn, level);
1379 need_flush = 1;
1381 if (pte_write(*ptep)) {
1382 drop_spte(kvm, sptep);
1383 sptep = rmap_get_first(*rmapp, &iter);
1384 } else {
1385 new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1386 new_spte |= (u64)new_pfn << PAGE_SHIFT;
1388 new_spte &= ~PT_WRITABLE_MASK;
1389 new_spte &= ~SPTE_HOST_WRITEABLE;
1390 new_spte &= ~shadow_accessed_mask;
1392 mmu_spte_clear_track_bits(sptep);
1393 mmu_spte_set(sptep, new_spte);
1394 sptep = rmap_get_next(&iter);
1398 if (need_flush)
1399 kvm_flush_remote_tlbs(kvm);
1401 return 0;
1404 static int kvm_handle_hva_range(struct kvm *kvm,
1405 unsigned long start,
1406 unsigned long end,
1407 unsigned long data,
1408 int (*handler)(struct kvm *kvm,
1409 unsigned long *rmapp,
1410 struct kvm_memory_slot *slot,
1411 gfn_t gfn,
1412 int level,
1413 unsigned long data))
1415 int j;
1416 int ret = 0;
1417 struct kvm_memslots *slots;
1418 struct kvm_memory_slot *memslot;
1420 slots = kvm_memslots(kvm);
1422 kvm_for_each_memslot(memslot, slots) {
1423 unsigned long hva_start, hva_end;
1424 gfn_t gfn_start, gfn_end;
1426 hva_start = max(start, memslot->userspace_addr);
1427 hva_end = min(end, memslot->userspace_addr +
1428 (memslot->npages << PAGE_SHIFT));
1429 if (hva_start >= hva_end)
1430 continue;
1432 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1433 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1435 gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1436 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1438 for (j = PT_PAGE_TABLE_LEVEL;
1439 j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
1440 unsigned long idx, idx_end;
1441 unsigned long *rmapp;
1442 gfn_t gfn = gfn_start;
1445 * {idx(page_j) | page_j intersects with
1446 * [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}.
1448 idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
1449 idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j);
1451 rmapp = __gfn_to_rmap(gfn_start, j, memslot);
1453 for (; idx <= idx_end;
1454 ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
1455 ret |= handler(kvm, rmapp++, memslot,
1456 gfn, j, data);
1460 return ret;
1463 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1464 unsigned long data,
1465 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
1466 struct kvm_memory_slot *slot,
1467 gfn_t gfn, int level,
1468 unsigned long data))
1470 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1473 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1475 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
1478 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
1480 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1483 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1485 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1488 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1489 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1490 unsigned long data)
1492 u64 *sptep;
1493 struct rmap_iterator uninitialized_var(iter);
1494 int young = 0;
1496 BUG_ON(!shadow_accessed_mask);
1498 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
1499 sptep = rmap_get_next(&iter)) {
1500 BUG_ON(!is_shadow_present_pte(*sptep));
1502 if (*sptep & shadow_accessed_mask) {
1503 young = 1;
1504 clear_bit((ffs(shadow_accessed_mask) - 1),
1505 (unsigned long *)sptep);
1508 trace_kvm_age_page(gfn, level, slot, young);
1509 return young;
1512 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1513 struct kvm_memory_slot *slot, gfn_t gfn,
1514 int level, unsigned long data)
1516 u64 *sptep;
1517 struct rmap_iterator iter;
1518 int young = 0;
1521 * If there's no access bit in the secondary pte set by the
1522 * hardware it's up to gup-fast/gup to set the access bit in
1523 * the primary pte or in the page structure.
1525 if (!shadow_accessed_mask)
1526 goto out;
1528 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
1529 sptep = rmap_get_next(&iter)) {
1530 BUG_ON(!is_shadow_present_pte(*sptep));
1532 if (*sptep & shadow_accessed_mask) {
1533 young = 1;
1534 break;
1537 out:
1538 return young;
1541 #define RMAP_RECYCLE_THRESHOLD 1000
1543 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1545 unsigned long *rmapp;
1546 struct kvm_mmu_page *sp;
1548 sp = page_header(__pa(spte));
1550 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
1552 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
1553 kvm_flush_remote_tlbs(vcpu->kvm);
1556 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1559 * In case of absence of EPT Access and Dirty Bits supports,
1560 * emulate the accessed bit for EPT, by checking if this page has
1561 * an EPT mapping, and clearing it if it does. On the next access,
1562 * a new EPT mapping will be established.
1563 * This has some overhead, but not as much as the cost of swapping
1564 * out actively used pages or breaking up actively used hugepages.
1566 if (!shadow_accessed_mask) {
1568 * We are holding the kvm->mmu_lock, and we are blowing up
1569 * shadow PTEs. MMU notifier consumers need to be kept at bay.
1570 * This is correct as long as we don't decouple the mmu_lock
1571 * protected regions (like invalidate_range_start|end does).
1573 kvm->mmu_notifier_seq++;
1574 return kvm_handle_hva_range(kvm, start, end, 0,
1575 kvm_unmap_rmapp);
1578 return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1581 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1583 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1586 #ifdef MMU_DEBUG
1587 static int is_empty_shadow_page(u64 *spt)
1589 u64 *pos;
1590 u64 *end;
1592 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1593 if (is_shadow_present_pte(*pos)) {
1594 printk(KERN_ERR "%s: %p %llx\n", __func__,
1595 pos, *pos);
1596 return 0;
1598 return 1;
1600 #endif
1603 * This value is the sum of all of the kvm instances's
1604 * kvm->arch.n_used_mmu_pages values. We need a global,
1605 * aggregate version in order to make the slab shrinker
1606 * faster
1608 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1610 kvm->arch.n_used_mmu_pages += nr;
1611 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1614 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1616 MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1617 hlist_del(&sp->hash_link);
1618 list_del(&sp->link);
1619 free_page((unsigned long)sp->spt);
1620 if (!sp->role.direct)
1621 free_page((unsigned long)sp->gfns);
1622 kmem_cache_free(mmu_page_header_cache, sp);
1625 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1627 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1630 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1631 struct kvm_mmu_page *sp, u64 *parent_pte)
1633 if (!parent_pte)
1634 return;
1636 pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1639 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1640 u64 *parent_pte)
1642 pte_list_remove(parent_pte, &sp->parent_ptes);
1645 static void drop_parent_pte(struct kvm_mmu_page *sp,
1646 u64 *parent_pte)
1648 mmu_page_remove_parent_pte(sp, parent_pte);
1649 mmu_spte_clear_no_track(parent_pte);
1652 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1653 u64 *parent_pte, int direct)
1655 struct kvm_mmu_page *sp;
1657 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1658 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1659 if (!direct)
1660 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1661 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1664 * The active_mmu_pages list is the FIFO list, do not move the
1665 * page until it is zapped. kvm_zap_obsolete_pages depends on
1666 * this feature. See the comments in kvm_zap_obsolete_pages().
1668 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1669 sp->parent_ptes = 0;
1670 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1671 kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1672 return sp;
1675 static void mark_unsync(u64 *spte);
1676 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1678 pte_list_walk(&sp->parent_ptes, mark_unsync);
1681 static void mark_unsync(u64 *spte)
1683 struct kvm_mmu_page *sp;
1684 unsigned int index;
1686 sp = page_header(__pa(spte));
1687 index = spte - sp->spt;
1688 if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1689 return;
1690 if (sp->unsync_children++)
1691 return;
1692 kvm_mmu_mark_parents_unsync(sp);
1695 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1696 struct kvm_mmu_page *sp)
1698 return 1;
1701 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1705 static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
1706 struct kvm_mmu_page *sp, u64 *spte,
1707 const void *pte)
1709 WARN_ON(1);
1712 #define KVM_PAGE_ARRAY_NR 16
1714 struct kvm_mmu_pages {
1715 struct mmu_page_and_offset {
1716 struct kvm_mmu_page *sp;
1717 unsigned int idx;
1718 } page[KVM_PAGE_ARRAY_NR];
1719 unsigned int nr;
1722 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1723 int idx)
1725 int i;
1727 if (sp->unsync)
1728 for (i=0; i < pvec->nr; i++)
1729 if (pvec->page[i].sp == sp)
1730 return 0;
1732 pvec->page[pvec->nr].sp = sp;
1733 pvec->page[pvec->nr].idx = idx;
1734 pvec->nr++;
1735 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1738 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1739 struct kvm_mmu_pages *pvec)
1741 int i, ret, nr_unsync_leaf = 0;
1743 for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1744 struct kvm_mmu_page *child;
1745 u64 ent = sp->spt[i];
1747 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1748 goto clear_child_bitmap;
1750 child = page_header(ent & PT64_BASE_ADDR_MASK);
1752 if (child->unsync_children) {
1753 if (mmu_pages_add(pvec, child, i))
1754 return -ENOSPC;
1756 ret = __mmu_unsync_walk(child, pvec);
1757 if (!ret)
1758 goto clear_child_bitmap;
1759 else if (ret > 0)
1760 nr_unsync_leaf += ret;
1761 else
1762 return ret;
1763 } else if (child->unsync) {
1764 nr_unsync_leaf++;
1765 if (mmu_pages_add(pvec, child, i))
1766 return -ENOSPC;
1767 } else
1768 goto clear_child_bitmap;
1770 continue;
1772 clear_child_bitmap:
1773 __clear_bit(i, sp->unsync_child_bitmap);
1774 sp->unsync_children--;
1775 WARN_ON((int)sp->unsync_children < 0);
1779 return nr_unsync_leaf;
1782 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1783 struct kvm_mmu_pages *pvec)
1785 if (!sp->unsync_children)
1786 return 0;
1788 mmu_pages_add(pvec, sp, 0);
1789 return __mmu_unsync_walk(sp, pvec);
1792 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1794 WARN_ON(!sp->unsync);
1795 trace_kvm_mmu_sync_page(sp);
1796 sp->unsync = 0;
1797 --kvm->stat.mmu_unsync;
1800 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1801 struct list_head *invalid_list);
1802 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1803 struct list_head *invalid_list);
1806 * NOTE: we should pay more attention on the zapped-obsolete page
1807 * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
1808 * since it has been deleted from active_mmu_pages but still can be found
1809 * at hast list.
1811 * for_each_gfn_indirect_valid_sp has skipped that kind of page and
1812 * kvm_mmu_get_page(), the only user of for_each_gfn_sp(), has skipped
1813 * all the obsolete pages.
1815 #define for_each_gfn_sp(_kvm, _sp, _gfn) \
1816 hlist_for_each_entry(_sp, \
1817 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
1818 if ((_sp)->gfn != (_gfn)) {} else
1820 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
1821 for_each_gfn_sp(_kvm, _sp, _gfn) \
1822 if ((_sp)->role.direct || (_sp)->role.invalid) {} else
1824 /* @sp->gfn should be write-protected at the call site */
1825 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1826 struct list_head *invalid_list, bool clear_unsync)
1828 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1829 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1830 return 1;
1833 if (clear_unsync)
1834 kvm_unlink_unsync_page(vcpu->kvm, sp);
1836 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1837 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1838 return 1;
1841 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1842 return 0;
1845 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1846 struct kvm_mmu_page *sp)
1848 LIST_HEAD(invalid_list);
1849 int ret;
1851 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1852 if (ret)
1853 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1855 return ret;
1858 #ifdef CONFIG_KVM_MMU_AUDIT
1859 #include "mmu_audit.c"
1860 #else
1861 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
1862 static void mmu_audit_disable(void) { }
1863 #endif
1865 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1866 struct list_head *invalid_list)
1868 return __kvm_sync_page(vcpu, sp, invalid_list, true);
1871 /* @gfn should be write-protected at the call site */
1872 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1874 struct kvm_mmu_page *s;
1875 LIST_HEAD(invalid_list);
1876 bool flush = false;
1878 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1879 if (!s->unsync)
1880 continue;
1882 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1883 kvm_unlink_unsync_page(vcpu->kvm, s);
1884 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1885 (vcpu->arch.mmu.sync_page(vcpu, s))) {
1886 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1887 continue;
1889 flush = true;
1892 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1893 if (flush)
1894 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1897 struct mmu_page_path {
1898 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1899 unsigned int idx[PT64_ROOT_LEVEL-1];
1902 #define for_each_sp(pvec, sp, parents, i) \
1903 for (i = mmu_pages_next(&pvec, &parents, -1), \
1904 sp = pvec.page[i].sp; \
1905 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1906 i = mmu_pages_next(&pvec, &parents, i))
1908 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1909 struct mmu_page_path *parents,
1910 int i)
1912 int n;
1914 for (n = i+1; n < pvec->nr; n++) {
1915 struct kvm_mmu_page *sp = pvec->page[n].sp;
1917 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1918 parents->idx[0] = pvec->page[n].idx;
1919 return n;
1922 parents->parent[sp->role.level-2] = sp;
1923 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1926 return n;
1929 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1931 struct kvm_mmu_page *sp;
1932 unsigned int level = 0;
1934 do {
1935 unsigned int idx = parents->idx[level];
1937 sp = parents->parent[level];
1938 if (!sp)
1939 return;
1941 --sp->unsync_children;
1942 WARN_ON((int)sp->unsync_children < 0);
1943 __clear_bit(idx, sp->unsync_child_bitmap);
1944 level++;
1945 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1948 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1949 struct mmu_page_path *parents,
1950 struct kvm_mmu_pages *pvec)
1952 parents->parent[parent->role.level-1] = NULL;
1953 pvec->nr = 0;
1956 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1957 struct kvm_mmu_page *parent)
1959 int i;
1960 struct kvm_mmu_page *sp;
1961 struct mmu_page_path parents;
1962 struct kvm_mmu_pages pages;
1963 LIST_HEAD(invalid_list);
1965 kvm_mmu_pages_init(parent, &parents, &pages);
1966 while (mmu_unsync_walk(parent, &pages)) {
1967 bool protected = false;
1969 for_each_sp(pages, sp, parents, i)
1970 protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1972 if (protected)
1973 kvm_flush_remote_tlbs(vcpu->kvm);
1975 for_each_sp(pages, sp, parents, i) {
1976 kvm_sync_page(vcpu, sp, &invalid_list);
1977 mmu_pages_clear_parents(&parents);
1979 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1980 cond_resched_lock(&vcpu->kvm->mmu_lock);
1981 kvm_mmu_pages_init(parent, &parents, &pages);
1985 static void init_shadow_page_table(struct kvm_mmu_page *sp)
1987 int i;
1989 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1990 sp->spt[i] = 0ull;
1993 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
1995 sp->write_flooding_count = 0;
1998 static void clear_sp_write_flooding_count(u64 *spte)
2000 struct kvm_mmu_page *sp = page_header(__pa(spte));
2002 __clear_sp_write_flooding_count(sp);
2005 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2007 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2010 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2011 gfn_t gfn,
2012 gva_t gaddr,
2013 unsigned level,
2014 int direct,
2015 unsigned access,
2016 u64 *parent_pte)
2018 union kvm_mmu_page_role role;
2019 unsigned quadrant;
2020 struct kvm_mmu_page *sp;
2021 bool need_sync = false;
2023 role = vcpu->arch.mmu.base_role;
2024 role.level = level;
2025 role.direct = direct;
2026 if (role.direct)
2027 role.cr4_pae = 0;
2028 role.access = access;
2029 if (!vcpu->arch.mmu.direct_map
2030 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
2031 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2032 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2033 role.quadrant = quadrant;
2035 for_each_gfn_sp(vcpu->kvm, sp, gfn) {
2036 if (is_obsolete_sp(vcpu->kvm, sp))
2037 continue;
2039 if (!need_sync && sp->unsync)
2040 need_sync = true;
2042 if (sp->role.word != role.word)
2043 continue;
2045 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
2046 break;
2048 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
2049 if (sp->unsync_children) {
2050 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2051 kvm_mmu_mark_parents_unsync(sp);
2052 } else if (sp->unsync)
2053 kvm_mmu_mark_parents_unsync(sp);
2055 __clear_sp_write_flooding_count(sp);
2056 trace_kvm_mmu_get_page(sp, false);
2057 return sp;
2059 ++vcpu->kvm->stat.mmu_cache_miss;
2060 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
2061 if (!sp)
2062 return sp;
2063 sp->gfn = gfn;
2064 sp->role = role;
2065 hlist_add_head(&sp->hash_link,
2066 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2067 if (!direct) {
2068 if (rmap_write_protect(vcpu->kvm, gfn))
2069 kvm_flush_remote_tlbs(vcpu->kvm);
2070 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2071 kvm_sync_pages(vcpu, gfn);
2073 account_shadowed(vcpu->kvm, gfn);
2075 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2076 init_shadow_page_table(sp);
2077 trace_kvm_mmu_get_page(sp, true);
2078 return sp;
2081 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2082 struct kvm_vcpu *vcpu, u64 addr)
2084 iterator->addr = addr;
2085 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
2086 iterator->level = vcpu->arch.mmu.shadow_root_level;
2088 if (iterator->level == PT64_ROOT_LEVEL &&
2089 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
2090 !vcpu->arch.mmu.direct_map)
2091 --iterator->level;
2093 if (iterator->level == PT32E_ROOT_LEVEL) {
2094 iterator->shadow_addr
2095 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
2096 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2097 --iterator->level;
2098 if (!iterator->shadow_addr)
2099 iterator->level = 0;
2103 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2105 if (iterator->level < PT_PAGE_TABLE_LEVEL)
2106 return false;
2108 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2109 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2110 return true;
2113 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2114 u64 spte)
2116 if (is_last_spte(spte, iterator->level)) {
2117 iterator->level = 0;
2118 return;
2121 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2122 --iterator->level;
2125 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2127 return __shadow_walk_next(iterator, *iterator->sptep);
2130 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
2132 u64 spte;
2134 BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
2135 VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2137 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
2138 shadow_user_mask | shadow_x_mask;
2140 if (accessed)
2141 spte |= shadow_accessed_mask;
2143 mmu_spte_set(sptep, spte);
2146 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2147 unsigned direct_access)
2149 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2150 struct kvm_mmu_page *child;
2153 * For the direct sp, if the guest pte's dirty bit
2154 * changed form clean to dirty, it will corrupt the
2155 * sp's access: allow writable in the read-only sp,
2156 * so we should update the spte at this point to get
2157 * a new sp with the correct access.
2159 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
2160 if (child->role.access == direct_access)
2161 return;
2163 drop_parent_pte(child, sptep);
2164 kvm_flush_remote_tlbs(vcpu->kvm);
2168 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2169 u64 *spte)
2171 u64 pte;
2172 struct kvm_mmu_page *child;
2174 pte = *spte;
2175 if (is_shadow_present_pte(pte)) {
2176 if (is_last_spte(pte, sp->role.level)) {
2177 drop_spte(kvm, spte);
2178 if (is_large_pte(pte))
2179 --kvm->stat.lpages;
2180 } else {
2181 child = page_header(pte & PT64_BASE_ADDR_MASK);
2182 drop_parent_pte(child, spte);
2184 return true;
2187 if (is_mmio_spte(pte))
2188 mmu_spte_clear_no_track(spte);
2190 return false;
2193 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2194 struct kvm_mmu_page *sp)
2196 unsigned i;
2198 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2199 mmu_page_zap_pte(kvm, sp, sp->spt + i);
2202 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
2204 mmu_page_remove_parent_pte(sp, parent_pte);
2207 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2209 u64 *sptep;
2210 struct rmap_iterator iter;
2212 while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
2213 drop_parent_pte(sp, sptep);
2216 static int mmu_zap_unsync_children(struct kvm *kvm,
2217 struct kvm_mmu_page *parent,
2218 struct list_head *invalid_list)
2220 int i, zapped = 0;
2221 struct mmu_page_path parents;
2222 struct kvm_mmu_pages pages;
2224 if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2225 return 0;
2227 kvm_mmu_pages_init(parent, &parents, &pages);
2228 while (mmu_unsync_walk(parent, &pages)) {
2229 struct kvm_mmu_page *sp;
2231 for_each_sp(pages, sp, parents, i) {
2232 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2233 mmu_pages_clear_parents(&parents);
2234 zapped++;
2236 kvm_mmu_pages_init(parent, &parents, &pages);
2239 return zapped;
2242 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2243 struct list_head *invalid_list)
2245 int ret;
2247 trace_kvm_mmu_prepare_zap_page(sp);
2248 ++kvm->stat.mmu_shadow_zapped;
2249 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
2250 kvm_mmu_page_unlink_children(kvm, sp);
2251 kvm_mmu_unlink_parents(kvm, sp);
2253 if (!sp->role.invalid && !sp->role.direct)
2254 unaccount_shadowed(kvm, sp->gfn);
2256 if (sp->unsync)
2257 kvm_unlink_unsync_page(kvm, sp);
2258 if (!sp->root_count) {
2259 /* Count self */
2260 ret++;
2261 list_move(&sp->link, invalid_list);
2262 kvm_mod_used_mmu_pages(kvm, -1);
2263 } else {
2264 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2267 * The obsolete pages can not be used on any vcpus.
2268 * See the comments in kvm_mmu_invalidate_zap_all_pages().
2270 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
2271 kvm_reload_remote_mmus(kvm);
2274 sp->role.invalid = 1;
2275 return ret;
2278 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2279 struct list_head *invalid_list)
2281 struct kvm_mmu_page *sp, *nsp;
2283 if (list_empty(invalid_list))
2284 return;
2287 * wmb: make sure everyone sees our modifications to the page tables
2288 * rmb: make sure we see changes to vcpu->mode
2290 smp_mb();
2293 * Wait for all vcpus to exit guest mode and/or lockless shadow
2294 * page table walks.
2296 kvm_flush_remote_tlbs(kvm);
2298 list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2299 WARN_ON(!sp->role.invalid || sp->root_count);
2300 kvm_mmu_free_page(sp);
2304 static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
2305 struct list_head *invalid_list)
2307 struct kvm_mmu_page *sp;
2309 if (list_empty(&kvm->arch.active_mmu_pages))
2310 return false;
2312 sp = list_entry(kvm->arch.active_mmu_pages.prev,
2313 struct kvm_mmu_page, link);
2314 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2316 return true;
2320 * Changing the number of mmu pages allocated to the vm
2321 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2323 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2325 LIST_HEAD(invalid_list);
2327 spin_lock(&kvm->mmu_lock);
2329 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2330 /* Need to free some mmu pages to achieve the goal. */
2331 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
2332 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
2333 break;
2335 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2336 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2339 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2341 spin_unlock(&kvm->mmu_lock);
2344 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2346 struct kvm_mmu_page *sp;
2347 LIST_HEAD(invalid_list);
2348 int r;
2350 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2351 r = 0;
2352 spin_lock(&kvm->mmu_lock);
2353 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2354 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2355 sp->role.word);
2356 r = 1;
2357 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2359 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2360 spin_unlock(&kvm->mmu_lock);
2362 return r;
2364 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2367 * The function is based on mtrr_type_lookup() in
2368 * arch/x86/kernel/cpu/mtrr/generic.c
2370 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
2371 u64 start, u64 end)
2373 int i;
2374 u64 base, mask;
2375 u8 prev_match, curr_match;
2376 int num_var_ranges = KVM_NR_VAR_MTRR;
2378 if (!mtrr_state->enabled)
2379 return 0xFF;
2381 /* Make end inclusive end, instead of exclusive */
2382 end--;
2384 /* Look in fixed ranges. Just return the type as per start */
2385 if (mtrr_state->have_fixed && (start < 0x100000)) {
2386 int idx;
2388 if (start < 0x80000) {
2389 idx = 0;
2390 idx += (start >> 16);
2391 return mtrr_state->fixed_ranges[idx];
2392 } else if (start < 0xC0000) {
2393 idx = 1 * 8;
2394 idx += ((start - 0x80000) >> 14);
2395 return mtrr_state->fixed_ranges[idx];
2396 } else if (start < 0x1000000) {
2397 idx = 3 * 8;
2398 idx += ((start - 0xC0000) >> 12);
2399 return mtrr_state->fixed_ranges[idx];
2404 * Look in variable ranges
2405 * Look of multiple ranges matching this address and pick type
2406 * as per MTRR precedence
2408 if (!(mtrr_state->enabled & 2))
2409 return mtrr_state->def_type;
2411 prev_match = 0xFF;
2412 for (i = 0; i < num_var_ranges; ++i) {
2413 unsigned short start_state, end_state;
2415 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
2416 continue;
2418 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
2419 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
2420 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
2421 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
2423 start_state = ((start & mask) == (base & mask));
2424 end_state = ((end & mask) == (base & mask));
2425 if (start_state != end_state)
2426 return 0xFE;
2428 if ((start & mask) != (base & mask))
2429 continue;
2431 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
2432 if (prev_match == 0xFF) {
2433 prev_match = curr_match;
2434 continue;
2437 if (prev_match == MTRR_TYPE_UNCACHABLE ||
2438 curr_match == MTRR_TYPE_UNCACHABLE)
2439 return MTRR_TYPE_UNCACHABLE;
2441 if ((prev_match == MTRR_TYPE_WRBACK &&
2442 curr_match == MTRR_TYPE_WRTHROUGH) ||
2443 (prev_match == MTRR_TYPE_WRTHROUGH &&
2444 curr_match == MTRR_TYPE_WRBACK)) {
2445 prev_match = MTRR_TYPE_WRTHROUGH;
2446 curr_match = MTRR_TYPE_WRTHROUGH;
2449 if (prev_match != curr_match)
2450 return MTRR_TYPE_UNCACHABLE;
2453 if (prev_match != 0xFF)
2454 return prev_match;
2456 return mtrr_state->def_type;
2459 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
2461 u8 mtrr;
2463 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
2464 (gfn << PAGE_SHIFT) + PAGE_SIZE);
2465 if (mtrr == 0xfe || mtrr == 0xff)
2466 mtrr = MTRR_TYPE_WRBACK;
2467 return mtrr;
2469 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
2471 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2473 trace_kvm_mmu_unsync_page(sp);
2474 ++vcpu->kvm->stat.mmu_unsync;
2475 sp->unsync = 1;
2477 kvm_mmu_mark_parents_unsync(sp);
2480 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
2482 struct kvm_mmu_page *s;
2484 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2485 if (s->unsync)
2486 continue;
2487 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2488 __kvm_unsync_page(vcpu, s);
2492 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2493 bool can_unsync)
2495 struct kvm_mmu_page *s;
2496 bool need_unsync = false;
2498 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2499 if (!can_unsync)
2500 return 1;
2502 if (s->role.level != PT_PAGE_TABLE_LEVEL)
2503 return 1;
2505 if (!s->unsync)
2506 need_unsync = true;
2508 if (need_unsync)
2509 kvm_unsync_pages(vcpu, gfn);
2510 return 0;
2513 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2514 unsigned pte_access, int level,
2515 gfn_t gfn, pfn_t pfn, bool speculative,
2516 bool can_unsync, bool host_writable)
2518 u64 spte;
2519 int ret = 0;
2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access))
2522 return 0;
2524 spte = PT_PRESENT_MASK;
2525 if (!speculative)
2526 spte |= shadow_accessed_mask;
2528 if (pte_access & ACC_EXEC_MASK)
2529 spte |= shadow_x_mask;
2530 else
2531 spte |= shadow_nx_mask;
2533 if (pte_access & ACC_USER_MASK)
2534 spte |= shadow_user_mask;
2536 if (level > PT_PAGE_TABLE_LEVEL)
2537 spte |= PT_PAGE_SIZE_MASK;
2538 if (tdp_enabled)
2539 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2540 kvm_is_reserved_pfn(pfn));
2542 if (host_writable)
2543 spte |= SPTE_HOST_WRITEABLE;
2544 else
2545 pte_access &= ~ACC_WRITE_MASK;
2547 spte |= (u64)pfn << PAGE_SHIFT;
2549 if (pte_access & ACC_WRITE_MASK) {
2552 * Other vcpu creates new sp in the window between
2553 * mapping_level() and acquiring mmu-lock. We can
2554 * allow guest to retry the access, the mapping can
2555 * be fixed if guest refault.
2557 if (level > PT_PAGE_TABLE_LEVEL &&
2558 has_wrprotected_page(vcpu->kvm, gfn, level))
2559 goto done;
2561 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
2564 * Optimization: for pte sync, if spte was writable the hash
2565 * lookup is unnecessary (and expensive). Write protection
2566 * is responsibility of mmu_get_page / kvm_sync_page.
2567 * Same reasoning can be applied to dirty page accounting.
2569 if (!can_unsync && is_writable_pte(*sptep))
2570 goto set_pte;
2572 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2573 pgprintk("%s: found shadow page for %llx, marking ro\n",
2574 __func__, gfn);
2575 ret = 1;
2576 pte_access &= ~ACC_WRITE_MASK;
2577 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
2581 if (pte_access & ACC_WRITE_MASK) {
2582 mark_page_dirty(vcpu->kvm, gfn);
2583 spte |= shadow_dirty_mask;
2586 set_pte:
2587 if (mmu_spte_update(sptep, spte))
2588 kvm_flush_remote_tlbs(vcpu->kvm);
2589 done:
2590 return ret;
2593 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2594 unsigned pte_access, int write_fault, int *emulate,
2595 int level, gfn_t gfn, pfn_t pfn, bool speculative,
2596 bool host_writable)
2598 int was_rmapped = 0;
2599 int rmap_count;
2601 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2602 *sptep, write_fault, gfn);
2604 if (is_rmap_spte(*sptep)) {
2606 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2607 * the parent of the now unreachable PTE.
2609 if (level > PT_PAGE_TABLE_LEVEL &&
2610 !is_large_pte(*sptep)) {
2611 struct kvm_mmu_page *child;
2612 u64 pte = *sptep;
2614 child = page_header(pte & PT64_BASE_ADDR_MASK);
2615 drop_parent_pte(child, sptep);
2616 kvm_flush_remote_tlbs(vcpu->kvm);
2617 } else if (pfn != spte_to_pfn(*sptep)) {
2618 pgprintk("hfn old %llx new %llx\n",
2619 spte_to_pfn(*sptep), pfn);
2620 drop_spte(vcpu->kvm, sptep);
2621 kvm_flush_remote_tlbs(vcpu->kvm);
2622 } else
2623 was_rmapped = 1;
2626 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
2627 true, host_writable)) {
2628 if (write_fault)
2629 *emulate = 1;
2630 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2633 if (unlikely(is_mmio_spte(*sptep) && emulate))
2634 *emulate = 1;
2636 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2637 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2638 is_large_pte(*sptep)? "2MB" : "4kB",
2639 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2640 *sptep, sptep);
2641 if (!was_rmapped && is_large_pte(*sptep))
2642 ++vcpu->kvm->stat.lpages;
2644 if (is_shadow_present_pte(*sptep)) {
2645 if (!was_rmapped) {
2646 rmap_count = rmap_add(vcpu, sptep, gfn);
2647 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2648 rmap_recycle(vcpu, sptep, gfn);
2652 kvm_release_pfn_clean(pfn);
2655 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2656 bool no_dirty_log)
2658 struct kvm_memory_slot *slot;
2660 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2661 if (!slot)
2662 return KVM_PFN_ERR_FAULT;
2664 return gfn_to_pfn_memslot_atomic(slot, gfn);
2667 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2668 struct kvm_mmu_page *sp,
2669 u64 *start, u64 *end)
2671 struct page *pages[PTE_PREFETCH_NUM];
2672 unsigned access = sp->role.access;
2673 int i, ret;
2674 gfn_t gfn;
2676 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2677 if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
2678 return -1;
2680 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2681 if (ret <= 0)
2682 return -1;
2684 for (i = 0; i < ret; i++, gfn++, start++)
2685 mmu_set_spte(vcpu, start, access, 0, NULL,
2686 sp->role.level, gfn, page_to_pfn(pages[i]),
2687 true, true);
2689 return 0;
2692 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2693 struct kvm_mmu_page *sp, u64 *sptep)
2695 u64 *spte, *start = NULL;
2696 int i;
2698 WARN_ON(!sp->role.direct);
2700 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2701 spte = sp->spt + i;
2703 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2704 if (is_shadow_present_pte(*spte) || spte == sptep) {
2705 if (!start)
2706 continue;
2707 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2708 break;
2709 start = NULL;
2710 } else if (!start)
2711 start = spte;
2715 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2717 struct kvm_mmu_page *sp;
2720 * Since it's no accessed bit on EPT, it's no way to
2721 * distinguish between actually accessed translations
2722 * and prefetched, so disable pte prefetch if EPT is
2723 * enabled.
2725 if (!shadow_accessed_mask)
2726 return;
2728 sp = page_header(__pa(sptep));
2729 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2730 return;
2732 __direct_pte_prefetch(vcpu, sp, sptep);
2735 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2736 int map_writable, int level, gfn_t gfn, pfn_t pfn,
2737 bool prefault)
2739 struct kvm_shadow_walk_iterator iterator;
2740 struct kvm_mmu_page *sp;
2741 int emulate = 0;
2742 gfn_t pseudo_gfn;
2744 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2745 return 0;
2747 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2748 if (iterator.level == level) {
2749 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
2750 write, &emulate, level, gfn, pfn,
2751 prefault, map_writable);
2752 direct_pte_prefetch(vcpu, iterator.sptep);
2753 ++vcpu->stat.pf_fixed;
2754 break;
2757 drop_large_spte(vcpu, iterator.sptep);
2758 if (!is_shadow_present_pte(*iterator.sptep)) {
2759 u64 base_addr = iterator.addr;
2761 base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2762 pseudo_gfn = base_addr >> PAGE_SHIFT;
2763 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2764 iterator.level - 1,
2765 1, ACC_ALL, iterator.sptep);
2767 link_shadow_page(iterator.sptep, sp, true);
2770 return emulate;
2773 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2775 siginfo_t info;
2777 info.si_signo = SIGBUS;
2778 info.si_errno = 0;
2779 info.si_code = BUS_MCEERR_AR;
2780 info.si_addr = (void __user *)address;
2781 info.si_addr_lsb = PAGE_SHIFT;
2783 send_sig_info(SIGBUS, &info, tsk);
2786 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
2789 * Do not cache the mmio info caused by writing the readonly gfn
2790 * into the spte otherwise read access on readonly gfn also can
2791 * caused mmio page fault and treat it as mmio access.
2792 * Return 1 to tell kvm to emulate it.
2794 if (pfn == KVM_PFN_ERR_RO_FAULT)
2795 return 1;
2797 if (pfn == KVM_PFN_ERR_HWPOISON) {
2798 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
2799 return 0;
2802 return -EFAULT;
2805 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2806 gfn_t *gfnp, pfn_t *pfnp, int *levelp)
2808 pfn_t pfn = *pfnp;
2809 gfn_t gfn = *gfnp;
2810 int level = *levelp;
2813 * Check if it's a transparent hugepage. If this would be an
2814 * hugetlbfs page, level wouldn't be set to
2815 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2816 * here.
2818 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2819 level == PT_PAGE_TABLE_LEVEL &&
2820 PageTransCompound(pfn_to_page(pfn)) &&
2821 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
2822 unsigned long mask;
2824 * mmu_notifier_retry was successful and we hold the
2825 * mmu_lock here, so the pmd can't become splitting
2826 * from under us, and in turn
2827 * __split_huge_page_refcount() can't run from under
2828 * us and we can safely transfer the refcount from
2829 * PG_tail to PG_head as we switch the pfn to tail to
2830 * head.
2832 *levelp = level = PT_DIRECTORY_LEVEL;
2833 mask = KVM_PAGES_PER_HPAGE(level) - 1;
2834 VM_BUG_ON((gfn & mask) != (pfn & mask));
2835 if (pfn & mask) {
2836 gfn &= ~mask;
2837 *gfnp = gfn;
2838 kvm_release_pfn_clean(pfn);
2839 pfn &= ~mask;
2840 kvm_get_pfn(pfn);
2841 *pfnp = pfn;
2846 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2847 pfn_t pfn, unsigned access, int *ret_val)
2849 bool ret = true;
2851 /* The pfn is invalid, report the error! */
2852 if (unlikely(is_error_pfn(pfn))) {
2853 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2854 goto exit;
2857 if (unlikely(is_noslot_pfn(pfn)))
2858 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2860 ret = false;
2861 exit:
2862 return ret;
2865 static bool page_fault_can_be_fast(u32 error_code)
2868 * Do not fix the mmio spte with invalid generation number which
2869 * need to be updated by slow page fault path.
2871 if (unlikely(error_code & PFERR_RSVD_MASK))
2872 return false;
2875 * #PF can be fast only if the shadow page table is present and it
2876 * is caused by write-protect, that means we just need change the
2877 * W bit of the spte which can be done out of mmu-lock.
2879 if (!(error_code & PFERR_PRESENT_MASK) ||
2880 !(error_code & PFERR_WRITE_MASK))
2881 return false;
2883 return true;
2886 static bool
2887 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2888 u64 *sptep, u64 spte)
2890 gfn_t gfn;
2892 WARN_ON(!sp->role.direct);
2895 * The gfn of direct spte is stable since it is calculated
2896 * by sp->gfn.
2898 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
2901 * Theoretically we could also set dirty bit (and flush TLB) here in
2902 * order to eliminate unnecessary PML logging. See comments in
2903 * set_spte. But fast_page_fault is very unlikely to happen with PML
2904 * enabled, so we do not do this. This might result in the same GPA
2905 * to be logged in PML buffer again when the write really happens, and
2906 * eventually to be called by mark_page_dirty twice. But it's also no
2907 * harm. This also avoids the TLB flush needed after setting dirty bit
2908 * so non-PML cases won't be impacted.
2910 * Compare with set_spte where instead shadow_dirty_mask is set.
2912 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
2913 mark_page_dirty(vcpu->kvm, gfn);
2915 return true;
2919 * Return value:
2920 * - true: let the vcpu to access on the same address again.
2921 * - false: let the real page fault path to fix it.
2923 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
2924 u32 error_code)
2926 struct kvm_shadow_walk_iterator iterator;
2927 struct kvm_mmu_page *sp;
2928 bool ret = false;
2929 u64 spte = 0ull;
2931 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2932 return false;
2934 if (!page_fault_can_be_fast(error_code))
2935 return false;
2937 walk_shadow_page_lockless_begin(vcpu);
2938 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
2939 if (!is_shadow_present_pte(spte) || iterator.level < level)
2940 break;
2943 * If the mapping has been changed, let the vcpu fault on the
2944 * same address again.
2946 if (!is_rmap_spte(spte)) {
2947 ret = true;
2948 goto exit;
2951 sp = page_header(__pa(iterator.sptep));
2952 if (!is_last_spte(spte, sp->role.level))
2953 goto exit;
2956 * Check if it is a spurious fault caused by TLB lazily flushed.
2958 * Need not check the access of upper level table entries since
2959 * they are always ACC_ALL.
2961 if (is_writable_pte(spte)) {
2962 ret = true;
2963 goto exit;
2967 * Currently, to simplify the code, only the spte write-protected
2968 * by dirty-log can be fast fixed.
2970 if (!spte_is_locklessly_modifiable(spte))
2971 goto exit;
2974 * Do not fix write-permission on the large spte since we only dirty
2975 * the first page into the dirty-bitmap in fast_pf_fix_direct_spte()
2976 * that means other pages are missed if its slot is dirty-logged.
2978 * Instead, we let the slow page fault path create a normal spte to
2979 * fix the access.
2981 * See the comments in kvm_arch_commit_memory_region().
2983 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2984 goto exit;
2987 * Currently, fast page fault only works for direct mapping since
2988 * the gfn is not stable for indirect shadow page.
2989 * See Documentation/virtual/kvm/locking.txt to get more detail.
2991 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte);
2992 exit:
2993 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
2994 spte, ret);
2995 walk_shadow_page_lockless_end(vcpu);
2997 return ret;
3000 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3001 gva_t gva, pfn_t *pfn, bool write, bool *writable);
3002 static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
3004 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
3005 gfn_t gfn, bool prefault)
3007 int r;
3008 int level;
3009 int force_pt_level;
3010 pfn_t pfn;
3011 unsigned long mmu_seq;
3012 bool map_writable, write = error_code & PFERR_WRITE_MASK;
3014 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
3015 if (likely(!force_pt_level)) {
3016 level = mapping_level(vcpu, gfn);
3018 * This path builds a PAE pagetable - so we can map
3019 * 2mb pages at maximum. Therefore check if the level
3020 * is larger than that.
3022 if (level > PT_DIRECTORY_LEVEL)
3023 level = PT_DIRECTORY_LEVEL;
3025 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3026 } else
3027 level = PT_PAGE_TABLE_LEVEL;
3029 if (fast_page_fault(vcpu, v, level, error_code))
3030 return 0;
3032 mmu_seq = vcpu->kvm->mmu_notifier_seq;
3033 smp_rmb();
3035 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
3036 return 0;
3038 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
3039 return r;
3041 spin_lock(&vcpu->kvm->mmu_lock);
3042 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3043 goto out_unlock;
3044 make_mmu_pages_available(vcpu);
3045 if (likely(!force_pt_level))
3046 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3047 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
3048 prefault);
3049 spin_unlock(&vcpu->kvm->mmu_lock);
3052 return r;
3054 out_unlock:
3055 spin_unlock(&vcpu->kvm->mmu_lock);
3056 kvm_release_pfn_clean(pfn);
3057 return 0;
3061 static void mmu_free_roots(struct kvm_vcpu *vcpu)
3063 int i;
3064 struct kvm_mmu_page *sp;
3065 LIST_HEAD(invalid_list);
3067 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3068 return;
3070 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
3071 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
3072 vcpu->arch.mmu.direct_map)) {
3073 hpa_t root = vcpu->arch.mmu.root_hpa;
3075 spin_lock(&vcpu->kvm->mmu_lock);
3076 sp = page_header(root);
3077 --sp->root_count;
3078 if (!sp->root_count && sp->role.invalid) {
3079 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3080 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3082 spin_unlock(&vcpu->kvm->mmu_lock);
3083 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3084 return;
3087 spin_lock(&vcpu->kvm->mmu_lock);
3088 for (i = 0; i < 4; ++i) {
3089 hpa_t root = vcpu->arch.mmu.pae_root[i];
3091 if (root) {
3092 root &= PT64_BASE_ADDR_MASK;
3093 sp = page_header(root);
3094 --sp->root_count;
3095 if (!sp->root_count && sp->role.invalid)
3096 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3097 &invalid_list);
3099 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3101 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3102 spin_unlock(&vcpu->kvm->mmu_lock);
3103 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3106 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3108 int ret = 0;
3110 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3111 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3112 ret = 1;
3115 return ret;
3118 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3120 struct kvm_mmu_page *sp;
3121 unsigned i;
3123 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3124 spin_lock(&vcpu->kvm->mmu_lock);
3125 make_mmu_pages_available(vcpu);
3126 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
3127 1, ACC_ALL, NULL);
3128 ++sp->root_count;
3129 spin_unlock(&vcpu->kvm->mmu_lock);
3130 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
3131 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
3132 for (i = 0; i < 4; ++i) {
3133 hpa_t root = vcpu->arch.mmu.pae_root[i];
3135 MMU_WARN_ON(VALID_PAGE(root));
3136 spin_lock(&vcpu->kvm->mmu_lock);
3137 make_mmu_pages_available(vcpu);
3138 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3139 i << 30,
3140 PT32_ROOT_LEVEL, 1, ACC_ALL,
3141 NULL);
3142 root = __pa(sp->spt);
3143 ++sp->root_count;
3144 spin_unlock(&vcpu->kvm->mmu_lock);
3145 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
3147 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3148 } else
3149 BUG();
3151 return 0;
3154 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3156 struct kvm_mmu_page *sp;
3157 u64 pdptr, pm_mask;
3158 gfn_t root_gfn;
3159 int i;
3161 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
3163 if (mmu_check_root(vcpu, root_gfn))
3164 return 1;
3167 * Do we shadow a long mode page table? If so we need to
3168 * write-protect the guests page table root.
3170 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
3171 hpa_t root = vcpu->arch.mmu.root_hpa;
3173 MMU_WARN_ON(VALID_PAGE(root));
3175 spin_lock(&vcpu->kvm->mmu_lock);
3176 make_mmu_pages_available(vcpu);
3177 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
3178 0, ACC_ALL, NULL);
3179 root = __pa(sp->spt);
3180 ++sp->root_count;
3181 spin_unlock(&vcpu->kvm->mmu_lock);
3182 vcpu->arch.mmu.root_hpa = root;
3183 return 0;
3187 * We shadow a 32 bit page table. This may be a legacy 2-level
3188 * or a PAE 3-level page table. In either case we need to be aware that
3189 * the shadow page table may be a PAE or a long mode page table.
3191 pm_mask = PT_PRESENT_MASK;
3192 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
3193 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3195 for (i = 0; i < 4; ++i) {
3196 hpa_t root = vcpu->arch.mmu.pae_root[i];
3198 MMU_WARN_ON(VALID_PAGE(root));
3199 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
3200 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
3201 if (!is_present_gpte(pdptr)) {
3202 vcpu->arch.mmu.pae_root[i] = 0;
3203 continue;
3205 root_gfn = pdptr >> PAGE_SHIFT;
3206 if (mmu_check_root(vcpu, root_gfn))
3207 return 1;
3209 spin_lock(&vcpu->kvm->mmu_lock);
3210 make_mmu_pages_available(vcpu);
3211 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
3212 PT32_ROOT_LEVEL, 0,
3213 ACC_ALL, NULL);
3214 root = __pa(sp->spt);
3215 ++sp->root_count;
3216 spin_unlock(&vcpu->kvm->mmu_lock);
3218 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
3220 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3223 * If we shadow a 32 bit page table with a long mode page
3224 * table we enter this path.
3226 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3227 if (vcpu->arch.mmu.lm_root == NULL) {
3229 * The additional page necessary for this is only
3230 * allocated on demand.
3233 u64 *lm_root;
3235 lm_root = (void*)get_zeroed_page(GFP_KERNEL);
3236 if (lm_root == NULL)
3237 return 1;
3239 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
3241 vcpu->arch.mmu.lm_root = lm_root;
3244 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
3247 return 0;
3250 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
3252 if (vcpu->arch.mmu.direct_map)
3253 return mmu_alloc_direct_roots(vcpu);
3254 else
3255 return mmu_alloc_shadow_roots(vcpu);
3258 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
3260 int i;
3261 struct kvm_mmu_page *sp;
3263 if (vcpu->arch.mmu.direct_map)
3264 return;
3266 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3267 return;
3269 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3270 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3271 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
3272 hpa_t root = vcpu->arch.mmu.root_hpa;
3273 sp = page_header(root);
3274 mmu_sync_children(vcpu, sp);
3275 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3276 return;
3278 for (i = 0; i < 4; ++i) {
3279 hpa_t root = vcpu->arch.mmu.pae_root[i];
3281 if (root && VALID_PAGE(root)) {
3282 root &= PT64_BASE_ADDR_MASK;
3283 sp = page_header(root);
3284 mmu_sync_children(vcpu, sp);
3287 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3290 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3292 spin_lock(&vcpu->kvm->mmu_lock);
3293 mmu_sync_roots(vcpu);
3294 spin_unlock(&vcpu->kvm->mmu_lock);
3296 EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3298 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
3299 u32 access, struct x86_exception *exception)
3301 if (exception)
3302 exception->error_code = 0;
3303 return vaddr;
3306 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
3307 u32 access,
3308 struct x86_exception *exception)
3310 if (exception)
3311 exception->error_code = 0;
3312 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3315 static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3317 if (direct)
3318 return vcpu_match_mmio_gpa(vcpu, addr);
3320 return vcpu_match_mmio_gva(vcpu, addr);
3323 static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
3325 struct kvm_shadow_walk_iterator iterator;
3326 u64 spte = 0ull;
3328 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3329 return spte;
3331 walk_shadow_page_lockless_begin(vcpu);
3332 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
3333 if (!is_shadow_present_pte(spte))
3334 break;
3335 walk_shadow_page_lockless_end(vcpu);
3337 return spte;
3340 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3342 u64 spte;
3344 if (quickly_check_mmio_pf(vcpu, addr, direct))
3345 return RET_MMIO_PF_EMULATE;
3347 spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
3349 if (is_mmio_spte(spte)) {
3350 gfn_t gfn = get_mmio_spte_gfn(spte);
3351 unsigned access = get_mmio_spte_access(spte);
3353 if (!check_mmio_spte(vcpu->kvm, spte))
3354 return RET_MMIO_PF_INVALID;
3356 if (direct)
3357 addr = 0;
3359 trace_handle_mmio_page_fault(addr, gfn, access);
3360 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3361 return RET_MMIO_PF_EMULATE;
3365 * If the page table is zapped by other cpus, let CPU fault again on
3366 * the address.
3368 return RET_MMIO_PF_RETRY;
3370 EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
3372 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
3373 u32 error_code, bool direct)
3375 int ret;
3377 ret = handle_mmio_page_fault_common(vcpu, addr, direct);
3378 WARN_ON(ret == RET_MMIO_PF_BUG);
3379 return ret;
3382 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3383 u32 error_code, bool prefault)
3385 gfn_t gfn;
3386 int r;
3388 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3390 if (unlikely(error_code & PFERR_RSVD_MASK)) {
3391 r = handle_mmio_page_fault(vcpu, gva, error_code, true);
3393 if (likely(r != RET_MMIO_PF_INVALID))
3394 return r;
3397 r = mmu_topup_memory_caches(vcpu);
3398 if (r)
3399 return r;
3401 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3403 gfn = gva >> PAGE_SHIFT;
3405 return nonpaging_map(vcpu, gva & PAGE_MASK,
3406 error_code, gfn, prefault);
3409 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3411 struct kvm_arch_async_pf arch;
3413 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3414 arch.gfn = gfn;
3415 arch.direct_map = vcpu->arch.mmu.direct_map;
3416 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch);
3421 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
3423 if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
3424 kvm_event_needs_reinjection(vcpu)))
3425 return false;
3427 return kvm_x86_ops->interrupt_allowed(vcpu);
3430 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3431 gva_t gva, pfn_t *pfn, bool write, bool *writable)
3433 bool async;
3435 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
3437 if (!async)
3438 return false; /* *pfn has correct page already */
3440 if (!prefault && can_do_async_pf(vcpu)) {
3441 trace_kvm_try_async_get_page(gva, gfn);
3442 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3443 trace_kvm_async_pf_doublefault(gva, gfn);
3444 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3445 return true;
3446 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
3447 return true;
3450 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
3452 return false;
3455 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3456 bool prefault)
3458 pfn_t pfn;
3459 int r;
3460 int level;
3461 int force_pt_level;
3462 gfn_t gfn = gpa >> PAGE_SHIFT;
3463 unsigned long mmu_seq;
3464 int write = error_code & PFERR_WRITE_MASK;
3465 bool map_writable;
3467 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3469 if (unlikely(error_code & PFERR_RSVD_MASK)) {
3470 r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
3472 if (likely(r != RET_MMIO_PF_INVALID))
3473 return r;
3476 r = mmu_topup_memory_caches(vcpu);
3477 if (r)
3478 return r;
3480 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
3481 if (likely(!force_pt_level)) {
3482 level = mapping_level(vcpu, gfn);
3483 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3484 } else
3485 level = PT_PAGE_TABLE_LEVEL;
3487 if (fast_page_fault(vcpu, gpa, level, error_code))
3488 return 0;
3490 mmu_seq = vcpu->kvm->mmu_notifier_seq;
3491 smp_rmb();
3493 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
3494 return 0;
3496 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
3497 return r;
3499 spin_lock(&vcpu->kvm->mmu_lock);
3500 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3501 goto out_unlock;
3502 make_mmu_pages_available(vcpu);
3503 if (likely(!force_pt_level))
3504 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3505 r = __direct_map(vcpu, gpa, write, map_writable,
3506 level, gfn, pfn, prefault);
3507 spin_unlock(&vcpu->kvm->mmu_lock);
3509 return r;
3511 out_unlock:
3512 spin_unlock(&vcpu->kvm->mmu_lock);
3513 kvm_release_pfn_clean(pfn);
3514 return 0;
3517 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
3518 struct kvm_mmu *context)
3520 context->page_fault = nonpaging_page_fault;
3521 context->gva_to_gpa = nonpaging_gva_to_gpa;
3522 context->sync_page = nonpaging_sync_page;
3523 context->invlpg = nonpaging_invlpg;
3524 context->update_pte = nonpaging_update_pte;
3525 context->root_level = 0;
3526 context->shadow_root_level = PT32E_ROOT_LEVEL;
3527 context->root_hpa = INVALID_PAGE;
3528 context->direct_map = true;
3529 context->nx = false;
3532 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
3534 mmu_free_roots(vcpu);
3537 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
3539 return kvm_read_cr3(vcpu);
3542 static void inject_page_fault(struct kvm_vcpu *vcpu,
3543 struct x86_exception *fault)
3545 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
3548 static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
3549 unsigned access, int *nr_present)
3551 if (unlikely(is_mmio_spte(*sptep))) {
3552 if (gfn != get_mmio_spte_gfn(*sptep)) {
3553 mmu_spte_clear_no_track(sptep);
3554 return true;
3557 (*nr_present)++;
3558 mark_mmio_spte(kvm, sptep, gfn, access);
3559 return true;
3562 return false;
3565 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
3567 unsigned index;
3569 index = level - 1;
3570 index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2);
3571 return mmu->last_pte_bitmap & (1 << index);
3574 #define PTTYPE_EPT 18 /* arbitrary */
3575 #define PTTYPE PTTYPE_EPT
3576 #include "paging_tmpl.h"
3577 #undef PTTYPE
3579 #define PTTYPE 64
3580 #include "paging_tmpl.h"
3581 #undef PTTYPE
3583 #define PTTYPE 32
3584 #include "paging_tmpl.h"
3585 #undef PTTYPE
3587 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3588 struct kvm_mmu *context)
3590 int maxphyaddr = cpuid_maxphyaddr(vcpu);
3591 u64 exb_bit_rsvd = 0;
3592 u64 gbpages_bit_rsvd = 0;
3593 u64 nonleaf_bit8_rsvd = 0;
3595 context->bad_mt_xwr = 0;
3597 if (!context->nx)
3598 exb_bit_rsvd = rsvd_bits(63, 63);
3599 if (!guest_cpuid_has_gbpages(vcpu))
3600 gbpages_bit_rsvd = rsvd_bits(7, 7);
3603 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
3604 * leaf entries) on AMD CPUs only.
3606 if (guest_cpuid_is_amd(vcpu))
3607 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
3609 switch (context->root_level) {
3610 case PT32_ROOT_LEVEL:
3611 /* no rsvd bits for 2 level 4K page table entries */
3612 context->rsvd_bits_mask[0][1] = 0;
3613 context->rsvd_bits_mask[0][0] = 0;
3614 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
3616 if (!is_pse(vcpu)) {
3617 context->rsvd_bits_mask[1][1] = 0;
3618 break;
3621 if (is_cpuid_PSE36())
3622 /* 36bits PSE 4MB page */
3623 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
3624 else
3625 /* 32 bits PSE 4MB page */
3626 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
3627 break;
3628 case PT32E_ROOT_LEVEL:
3629 context->rsvd_bits_mask[0][2] =
3630 rsvd_bits(maxphyaddr, 63) |
3631 rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */
3632 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
3633 rsvd_bits(maxphyaddr, 62); /* PDE */
3634 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
3635 rsvd_bits(maxphyaddr, 62); /* PTE */
3636 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
3637 rsvd_bits(maxphyaddr, 62) |
3638 rsvd_bits(13, 20); /* large page */
3639 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
3640 break;
3641 case PT64_ROOT_LEVEL:
3642 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
3643 nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51);
3644 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
3645 nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51);
3646 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
3647 rsvd_bits(maxphyaddr, 51);
3648 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
3649 rsvd_bits(maxphyaddr, 51);
3650 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
3651 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
3652 gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
3653 rsvd_bits(13, 29);
3654 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
3655 rsvd_bits(maxphyaddr, 51) |
3656 rsvd_bits(13, 20); /* large page */
3657 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
3658 break;
3662 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3663 struct kvm_mmu *context, bool execonly)
3665 int maxphyaddr = cpuid_maxphyaddr(vcpu);
3666 int pte;
3668 context->rsvd_bits_mask[0][3] =
3669 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
3670 context->rsvd_bits_mask[0][2] =
3671 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3672 context->rsvd_bits_mask[0][1] =
3673 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3674 context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
3676 /* large page */
3677 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
3678 context->rsvd_bits_mask[1][2] =
3679 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
3680 context->rsvd_bits_mask[1][1] =
3681 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
3682 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
3684 for (pte = 0; pte < 64; pte++) {
3685 int rwx_bits = pte & 7;
3686 int mt = pte >> 3;
3687 if (mt == 0x2 || mt == 0x3 || mt == 0x7 ||
3688 rwx_bits == 0x2 || rwx_bits == 0x6 ||
3689 (rwx_bits == 0x4 && !execonly))
3690 context->bad_mt_xwr |= (1ull << pte);
3694 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3695 struct kvm_mmu *mmu, bool ept)
3697 unsigned bit, byte, pfec;
3698 u8 map;
3699 bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
3701 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3702 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
3703 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
3704 pfec = byte << 1;
3705 map = 0;
3706 wf = pfec & PFERR_WRITE_MASK;
3707 uf = pfec & PFERR_USER_MASK;
3708 ff = pfec & PFERR_FETCH_MASK;
3710 * PFERR_RSVD_MASK bit is set in PFEC if the access is not
3711 * subject to SMAP restrictions, and cleared otherwise. The
3712 * bit is only meaningful if the SMAP bit is set in CR4.
3714 smapf = !(pfec & PFERR_RSVD_MASK);
3715 for (bit = 0; bit < 8; ++bit) {
3716 x = bit & ACC_EXEC_MASK;
3717 w = bit & ACC_WRITE_MASK;
3718 u = bit & ACC_USER_MASK;
3720 if (!ept) {
3721 /* Not really needed: !nx will cause pte.nx to fault */
3722 x |= !mmu->nx;
3723 /* Allow supervisor writes if !cr0.wp */
3724 w |= !is_write_protection(vcpu) && !uf;
3725 /* Disallow supervisor fetches of user code if cr4.smep */
3726 x &= !(cr4_smep && u && !uf);
3729 * SMAP:kernel-mode data accesses from user-mode
3730 * mappings should fault. A fault is considered
3731 * as a SMAP violation if all of the following
3732 * conditions are ture:
3733 * - X86_CR4_SMAP is set in CR4
3734 * - An user page is accessed
3735 * - Page fault in kernel mode
3736 * - if CPL = 3 or X86_EFLAGS_AC is clear
3738 * Here, we cover the first three conditions.
3739 * The fourth is computed dynamically in
3740 * permission_fault() and is in smapf.
3742 * Also, SMAP does not affect instruction
3743 * fetches, add the !ff check here to make it
3744 * clearer.
3746 smap = cr4_smap && u && !uf && !ff;
3747 } else
3748 /* Not really needed: no U/S accesses on ept */
3749 u = 1;
3751 fault = (ff && !x) || (uf && !u) || (wf && !w) ||
3752 (smapf && smap);
3753 map |= fault << bit;
3755 mmu->permissions[byte] = map;
3759 static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
3761 u8 map;
3762 unsigned level, root_level = mmu->root_level;
3763 const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */
3765 if (root_level == PT32E_ROOT_LEVEL)
3766 --root_level;
3767 /* PT_PAGE_TABLE_LEVEL always terminates */
3768 map = 1 | (1 << ps_set_index);
3769 for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
3770 if (level <= PT_PDPE_LEVEL
3771 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
3772 map |= 1 << (ps_set_index | (level - 1));
3774 mmu->last_pte_bitmap = map;
3777 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
3778 struct kvm_mmu *context,
3779 int level)
3781 context->nx = is_nx(vcpu);
3782 context->root_level = level;
3784 reset_rsvds_bits_mask(vcpu, context);
3785 update_permission_bitmask(vcpu, context, false);
3786 update_last_pte_bitmap(vcpu, context);
3788 MMU_WARN_ON(!is_pae(vcpu));
3789 context->page_fault = paging64_page_fault;
3790 context->gva_to_gpa = paging64_gva_to_gpa;
3791 context->sync_page = paging64_sync_page;
3792 context->invlpg = paging64_invlpg;
3793 context->update_pte = paging64_update_pte;
3794 context->shadow_root_level = level;
3795 context->root_hpa = INVALID_PAGE;
3796 context->direct_map = false;
3799 static void paging64_init_context(struct kvm_vcpu *vcpu,
3800 struct kvm_mmu *context)
3802 paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
3805 static void paging32_init_context(struct kvm_vcpu *vcpu,
3806 struct kvm_mmu *context)
3808 context->nx = false;
3809 context->root_level = PT32_ROOT_LEVEL;
3811 reset_rsvds_bits_mask(vcpu, context);
3812 update_permission_bitmask(vcpu, context, false);
3813 update_last_pte_bitmap(vcpu, context);
3815 context->page_fault = paging32_page_fault;
3816 context->gva_to_gpa = paging32_gva_to_gpa;
3817 context->sync_page = paging32_sync_page;
3818 context->invlpg = paging32_invlpg;
3819 context->update_pte = paging32_update_pte;
3820 context->shadow_root_level = PT32E_ROOT_LEVEL;
3821 context->root_hpa = INVALID_PAGE;
3822 context->direct_map = false;
3825 static void paging32E_init_context(struct kvm_vcpu *vcpu,
3826 struct kvm_mmu *context)
3828 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
3831 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3833 struct kvm_mmu *context = &vcpu->arch.mmu;
3835 context->base_role.word = 0;
3836 context->page_fault = tdp_page_fault;
3837 context->sync_page = nonpaging_sync_page;
3838 context->invlpg = nonpaging_invlpg;
3839 context->update_pte = nonpaging_update_pte;
3840 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
3841 context->root_hpa = INVALID_PAGE;
3842 context->direct_map = true;
3843 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
3844 context->get_cr3 = get_cr3;
3845 context->get_pdptr = kvm_pdptr_read;
3846 context->inject_page_fault = kvm_inject_page_fault;
3848 if (!is_paging(vcpu)) {
3849 context->nx = false;
3850 context->gva_to_gpa = nonpaging_gva_to_gpa;
3851 context->root_level = 0;
3852 } else if (is_long_mode(vcpu)) {
3853 context->nx = is_nx(vcpu);
3854 context->root_level = PT64_ROOT_LEVEL;
3855 reset_rsvds_bits_mask(vcpu, context);
3856 context->gva_to_gpa = paging64_gva_to_gpa;
3857 } else if (is_pae(vcpu)) {
3858 context->nx = is_nx(vcpu);
3859 context->root_level = PT32E_ROOT_LEVEL;
3860 reset_rsvds_bits_mask(vcpu, context);
3861 context->gva_to_gpa = paging64_gva_to_gpa;
3862 } else {
3863 context->nx = false;
3864 context->root_level = PT32_ROOT_LEVEL;
3865 reset_rsvds_bits_mask(vcpu, context);
3866 context->gva_to_gpa = paging32_gva_to_gpa;
3869 update_permission_bitmask(vcpu, context, false);
3870 update_last_pte_bitmap(vcpu, context);
3873 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
3875 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3876 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
3877 struct kvm_mmu *context = &vcpu->arch.mmu;
3879 MMU_WARN_ON(VALID_PAGE(context->root_hpa));
3881 if (!is_paging(vcpu))
3882 nonpaging_init_context(vcpu, context);
3883 else if (is_long_mode(vcpu))
3884 paging64_init_context(vcpu, context);
3885 else if (is_pae(vcpu))
3886 paging32E_init_context(vcpu, context);
3887 else
3888 paging32_init_context(vcpu, context);
3890 context->base_role.nxe = is_nx(vcpu);
3891 context->base_role.cr4_pae = !!is_pae(vcpu);
3892 context->base_role.cr0_wp = is_write_protection(vcpu);
3893 context->base_role.smep_andnot_wp
3894 = smep && !is_write_protection(vcpu);
3895 context->base_role.smap_andnot_wp
3896 = smap && !is_write_protection(vcpu);
3898 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3900 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
3902 struct kvm_mmu *context = &vcpu->arch.mmu;
3904 MMU_WARN_ON(VALID_PAGE(context->root_hpa));
3906 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
3908 context->nx = true;
3909 context->page_fault = ept_page_fault;
3910 context->gva_to_gpa = ept_gva_to_gpa;
3911 context->sync_page = ept_sync_page;
3912 context->invlpg = ept_invlpg;
3913 context->update_pte = ept_update_pte;
3914 context->root_level = context->shadow_root_level;
3915 context->root_hpa = INVALID_PAGE;
3916 context->direct_map = false;
3918 update_permission_bitmask(vcpu, context, true);
3919 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
3921 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
3923 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
3925 struct kvm_mmu *context = &vcpu->arch.mmu;
3927 kvm_init_shadow_mmu(vcpu);
3928 context->set_cr3 = kvm_x86_ops->set_cr3;
3929 context->get_cr3 = get_cr3;
3930 context->get_pdptr = kvm_pdptr_read;
3931 context->inject_page_fault = kvm_inject_page_fault;
3934 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3936 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
3938 g_context->get_cr3 = get_cr3;
3939 g_context->get_pdptr = kvm_pdptr_read;
3940 g_context->inject_page_fault = kvm_inject_page_fault;
3943 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
3944 * translation of l2_gpa to l1_gpa addresses is done using the
3945 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
3946 * functions between mmu and nested_mmu are swapped.
3948 if (!is_paging(vcpu)) {
3949 g_context->nx = false;
3950 g_context->root_level = 0;
3951 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
3952 } else if (is_long_mode(vcpu)) {
3953 g_context->nx = is_nx(vcpu);
3954 g_context->root_level = PT64_ROOT_LEVEL;
3955 reset_rsvds_bits_mask(vcpu, g_context);
3956 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3957 } else if (is_pae(vcpu)) {
3958 g_context->nx = is_nx(vcpu);
3959 g_context->root_level = PT32E_ROOT_LEVEL;
3960 reset_rsvds_bits_mask(vcpu, g_context);
3961 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3962 } else {
3963 g_context->nx = false;
3964 g_context->root_level = PT32_ROOT_LEVEL;
3965 reset_rsvds_bits_mask(vcpu, g_context);
3966 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
3969 update_permission_bitmask(vcpu, g_context, false);
3970 update_last_pte_bitmap(vcpu, g_context);
3973 static void init_kvm_mmu(struct kvm_vcpu *vcpu)
3975 if (mmu_is_nested(vcpu))
3976 init_kvm_nested_mmu(vcpu);
3977 else if (tdp_enabled)
3978 init_kvm_tdp_mmu(vcpu);
3979 else
3980 init_kvm_softmmu(vcpu);
3983 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
3985 kvm_mmu_unload(vcpu);
3986 init_kvm_mmu(vcpu);
3988 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
3990 int kvm_mmu_load(struct kvm_vcpu *vcpu)
3992 int r;
3994 r = mmu_topup_memory_caches(vcpu);
3995 if (r)
3996 goto out;
3997 r = mmu_alloc_roots(vcpu);
3998 kvm_mmu_sync_roots(vcpu);
3999 if (r)
4000 goto out;
4001 /* set_cr3() should ensure TLB has been flushed */
4002 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
4003 out:
4004 return r;
4006 EXPORT_SYMBOL_GPL(kvm_mmu_load);
4008 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4010 mmu_free_roots(vcpu);
4011 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
4013 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
4015 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4016 struct kvm_mmu_page *sp, u64 *spte,
4017 const void *new)
4019 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4020 ++vcpu->kvm->stat.mmu_pde_zapped;
4021 return;
4024 ++vcpu->kvm->stat.mmu_pte_updated;
4025 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
4028 static bool need_remote_flush(u64 old, u64 new)
4030 if (!is_shadow_present_pte(old))
4031 return false;
4032 if (!is_shadow_present_pte(new))
4033 return true;
4034 if ((old ^ new) & PT64_BASE_ADDR_MASK)
4035 return true;
4036 old ^= shadow_nx_mask;
4037 new ^= shadow_nx_mask;
4038 return (old & ~new & PT64_PERM_MASK) != 0;
4041 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
4042 bool remote_flush, bool local_flush)
4044 if (zap_page)
4045 return;
4047 if (remote_flush)
4048 kvm_flush_remote_tlbs(vcpu->kvm);
4049 else if (local_flush)
4050 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4053 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
4054 const u8 *new, int *bytes)
4056 u64 gentry;
4057 int r;
4060 * Assume that the pte write on a page table of the same type
4061 * as the current vcpu paging mode since we update the sptes only
4062 * when they have the same mode.
4064 if (is_pae(vcpu) && *bytes == 4) {
4065 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4066 *gpa &= ~(gpa_t)7;
4067 *bytes = 8;
4068 r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8);
4069 if (r)
4070 gentry = 0;
4071 new = (const u8 *)&gentry;
4074 switch (*bytes) {
4075 case 4:
4076 gentry = *(const u32 *)new;
4077 break;
4078 case 8:
4079 gentry = *(const u64 *)new;
4080 break;
4081 default:
4082 gentry = 0;
4083 break;
4086 return gentry;
4090 * If we're seeing too many writes to a page, it may no longer be a page table,
4091 * or we may be forking, in which case it is better to unmap the page.
4093 static bool detect_write_flooding(struct kvm_mmu_page *sp)
4096 * Skip write-flooding detected for the sp whose level is 1, because
4097 * it can become unsync, then the guest page is not write-protected.
4099 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
4100 return false;
4102 return ++sp->write_flooding_count >= 3;
4106 * Misaligned accesses are too much trouble to fix up; also, they usually
4107 * indicate a page is not used as a page table.
4109 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
4110 int bytes)
4112 unsigned offset, pte_size, misaligned;
4114 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4115 gpa, bytes, sp->role.word);
4117 offset = offset_in_page(gpa);
4118 pte_size = sp->role.cr4_pae ? 8 : 4;
4121 * Sometimes, the OS only writes the last one bytes to update status
4122 * bits, for example, in linux, andb instruction is used in clear_bit().
4124 if (!(offset & (pte_size - 1)) && bytes == 1)
4125 return false;
4127 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
4128 misaligned |= bytes < 4;
4130 return misaligned;
4133 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
4135 unsigned page_offset, quadrant;
4136 u64 *spte;
4137 int level;
4139 page_offset = offset_in_page(gpa);
4140 level = sp->role.level;
4141 *nspte = 1;
4142 if (!sp->role.cr4_pae) {
4143 page_offset <<= 1; /* 32->64 */
4145 * A 32-bit pde maps 4MB while the shadow pdes map
4146 * only 2MB. So we need to double the offset again
4147 * and zap two pdes instead of one.
4149 if (level == PT32_ROOT_LEVEL) {
4150 page_offset &= ~7; /* kill rounding error */
4151 page_offset <<= 1;
4152 *nspte = 2;
4154 quadrant = page_offset >> PAGE_SHIFT;
4155 page_offset &= ~PAGE_MASK;
4156 if (quadrant != sp->role.quadrant)
4157 return NULL;
4160 spte = &sp->spt[page_offset / sizeof(*spte)];
4161 return spte;
4164 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4165 const u8 *new, int bytes)
4167 gfn_t gfn = gpa >> PAGE_SHIFT;
4168 struct kvm_mmu_page *sp;
4169 LIST_HEAD(invalid_list);
4170 u64 entry, gentry, *spte;
4171 int npte;
4172 bool remote_flush, local_flush, zap_page;
4173 union kvm_mmu_page_role mask = { };
4175 mask.cr0_wp = 1;
4176 mask.cr4_pae = 1;
4177 mask.nxe = 1;
4178 mask.smep_andnot_wp = 1;
4179 mask.smap_andnot_wp = 1;
4182 * If we don't have indirect shadow pages, it means no page is
4183 * write-protected, so we can exit simply.
4185 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
4186 return;
4188 zap_page = remote_flush = local_flush = false;
4190 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
4192 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
4195 * No need to care whether allocation memory is successful
4196 * or not since pte prefetch is skiped if it does not have
4197 * enough objects in the cache.
4199 mmu_topup_memory_caches(vcpu);
4201 spin_lock(&vcpu->kvm->mmu_lock);
4202 ++vcpu->kvm->stat.mmu_pte_write;
4203 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
4205 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
4206 if (detect_write_misaligned(sp, gpa, bytes) ||
4207 detect_write_flooding(sp)) {
4208 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
4209 &invalid_list);
4210 ++vcpu->kvm->stat.mmu_flooded;
4211 continue;
4214 spte = get_written_sptes(sp, gpa, &npte);
4215 if (!spte)
4216 continue;
4218 local_flush = true;
4219 while (npte--) {
4220 entry = *spte;
4221 mmu_page_zap_pte(vcpu->kvm, sp, spte);
4222 if (gentry &&
4223 !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
4224 & mask.word) && rmap_can_add(vcpu))
4225 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
4226 if (need_remote_flush(entry, *spte))
4227 remote_flush = true;
4228 ++spte;
4231 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
4232 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
4233 kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
4234 spin_unlock(&vcpu->kvm->mmu_lock);
4237 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
4239 gpa_t gpa;
4240 int r;
4242 if (vcpu->arch.mmu.direct_map)
4243 return 0;
4245 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
4247 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4249 return r;
4251 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
4253 static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
4255 LIST_HEAD(invalid_list);
4257 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
4258 return;
4260 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
4261 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
4262 break;
4264 ++vcpu->kvm->stat.mmu_recycled;
4266 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
4269 static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
4271 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
4272 return vcpu_match_mmio_gpa(vcpu, addr);
4274 return vcpu_match_mmio_gva(vcpu, addr);
4277 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
4278 void *insn, int insn_len)
4280 int r, emulation_type = EMULTYPE_RETRY;
4281 enum emulation_result er;
4283 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
4284 if (r < 0)
4285 goto out;
4287 if (!r) {
4288 r = 1;
4289 goto out;
4292 if (is_mmio_page_fault(vcpu, cr2))
4293 emulation_type = 0;
4295 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
4297 switch (er) {
4298 case EMULATE_DONE:
4299 return 1;
4300 case EMULATE_USER_EXIT:
4301 ++vcpu->stat.mmio_exits;
4302 /* fall through */
4303 case EMULATE_FAIL:
4304 return 0;
4305 default:
4306 BUG();
4308 out:
4309 return r;
4311 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
4313 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
4315 vcpu->arch.mmu.invlpg(vcpu, gva);
4316 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4317 ++vcpu->stat.invlpg;
4319 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
4321 void kvm_enable_tdp(void)
4323 tdp_enabled = true;
4325 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
4327 void kvm_disable_tdp(void)
4329 tdp_enabled = false;
4331 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
4333 static void free_mmu_pages(struct kvm_vcpu *vcpu)
4335 free_page((unsigned long)vcpu->arch.mmu.pae_root);
4336 if (vcpu->arch.mmu.lm_root != NULL)
4337 free_page((unsigned long)vcpu->arch.mmu.lm_root);
4340 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
4342 struct page *page;
4343 int i;
4346 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
4347 * Therefore we need to allocate shadow page tables in the first
4348 * 4GB of memory, which happens to fit the DMA32 zone.
4350 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
4351 if (!page)
4352 return -ENOMEM;
4354 vcpu->arch.mmu.pae_root = page_address(page);
4355 for (i = 0; i < 4; ++i)
4356 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
4358 return 0;
4361 int kvm_mmu_create(struct kvm_vcpu *vcpu)
4363 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
4364 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4365 vcpu->arch.mmu.translate_gpa = translate_gpa;
4366 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
4368 return alloc_mmu_pages(vcpu);
4371 void kvm_mmu_setup(struct kvm_vcpu *vcpu)
4373 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
4375 init_kvm_mmu(vcpu);
4378 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
4379 struct kvm_memory_slot *memslot)
4381 gfn_t last_gfn;
4382 int i;
4383 bool flush = false;
4385 last_gfn = memslot->base_gfn + memslot->npages - 1;
4387 spin_lock(&kvm->mmu_lock);
4389 for (i = PT_PAGE_TABLE_LEVEL;
4390 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
4391 unsigned long *rmapp;
4392 unsigned long last_index, index;
4394 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
4395 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
4397 for (index = 0; index <= last_index; ++index, ++rmapp) {
4398 if (*rmapp)
4399 flush |= __rmap_write_protect(kvm, rmapp,
4400 false);
4402 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
4403 cond_resched_lock(&kvm->mmu_lock);
4407 spin_unlock(&kvm->mmu_lock);
4410 * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
4411 * which do tlb flush out of mmu-lock should be serialized by
4412 * kvm->slots_lock otherwise tlb flush would be missed.
4414 lockdep_assert_held(&kvm->slots_lock);
4417 * We can flush all the TLBs out of the mmu lock without TLB
4418 * corruption since we just change the spte from writable to
4419 * readonly so that we only need to care the case of changing
4420 * spte from present to present (changing the spte from present
4421 * to nonpresent will flush all the TLBs immediately), in other
4422 * words, the only case we care is mmu_spte_update() where we
4423 * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
4424 * instead of PT_WRITABLE_MASK, that means it does not depend
4425 * on PT_WRITABLE_MASK anymore.
4427 if (flush)
4428 kvm_flush_remote_tlbs(kvm);
4431 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
4432 unsigned long *rmapp)
4434 u64 *sptep;
4435 struct rmap_iterator iter;
4436 int need_tlb_flush = 0;
4437 pfn_t pfn;
4438 struct kvm_mmu_page *sp;
4440 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
4441 BUG_ON(!(*sptep & PT_PRESENT_MASK));
4443 sp = page_header(__pa(sptep));
4444 pfn = spte_to_pfn(*sptep);
4447 * We cannot do huge page mapping for indirect shadow pages,
4448 * which are found on the last rmap (level = 1) when not using
4449 * tdp; such shadow pages are synced with the page table in
4450 * the guest, and the guest page table is using 4K page size
4451 * mapping if the indirect sp has level = 1.
4453 if (sp->role.direct &&
4454 !kvm_is_reserved_pfn(pfn) &&
4455 PageTransCompound(pfn_to_page(pfn))) {
4456 drop_spte(kvm, sptep);
4457 sptep = rmap_get_first(*rmapp, &iter);
4458 need_tlb_flush = 1;
4459 } else
4460 sptep = rmap_get_next(&iter);
4463 return need_tlb_flush;
4466 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
4467 struct kvm_memory_slot *memslot)
4469 bool flush = false;
4470 unsigned long *rmapp;
4471 unsigned long last_index, index;
4473 spin_lock(&kvm->mmu_lock);
4475 rmapp = memslot->arch.rmap[0];
4476 last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1,
4477 memslot->base_gfn, PT_PAGE_TABLE_LEVEL);
4479 for (index = 0; index <= last_index; ++index, ++rmapp) {
4480 if (*rmapp)
4481 flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
4483 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
4484 if (flush) {
4485 kvm_flush_remote_tlbs(kvm);
4486 flush = false;
4488 cond_resched_lock(&kvm->mmu_lock);
4492 if (flush)
4493 kvm_flush_remote_tlbs(kvm);
4495 spin_unlock(&kvm->mmu_lock);
4498 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
4499 struct kvm_memory_slot *memslot)
4501 gfn_t last_gfn;
4502 unsigned long *rmapp;
4503 unsigned long last_index, index;
4504 bool flush = false;
4506 last_gfn = memslot->base_gfn + memslot->npages - 1;
4508 spin_lock(&kvm->mmu_lock);
4510 rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1];
4511 last_index = gfn_to_index(last_gfn, memslot->base_gfn,
4512 PT_PAGE_TABLE_LEVEL);
4514 for (index = 0; index <= last_index; ++index, ++rmapp) {
4515 if (*rmapp)
4516 flush |= __rmap_clear_dirty(kvm, rmapp);
4518 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
4519 cond_resched_lock(&kvm->mmu_lock);
4522 spin_unlock(&kvm->mmu_lock);
4524 lockdep_assert_held(&kvm->slots_lock);
4527 * It's also safe to flush TLBs out of mmu lock here as currently this
4528 * function is only used for dirty logging, in which case flushing TLB
4529 * out of mmu lock also guarantees no dirty pages will be lost in
4530 * dirty_bitmap.
4532 if (flush)
4533 kvm_flush_remote_tlbs(kvm);
4535 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
4537 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
4538 struct kvm_memory_slot *memslot)
4540 gfn_t last_gfn;
4541 int i;
4542 bool flush = false;
4544 last_gfn = memslot->base_gfn + memslot->npages - 1;
4546 spin_lock(&kvm->mmu_lock);
4548 for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */
4549 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
4550 unsigned long *rmapp;
4551 unsigned long last_index, index;
4553 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
4554 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
4556 for (index = 0; index <= last_index; ++index, ++rmapp) {
4557 if (*rmapp)
4558 flush |= __rmap_write_protect(kvm, rmapp,
4559 false);
4561 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
4562 cond_resched_lock(&kvm->mmu_lock);
4565 spin_unlock(&kvm->mmu_lock);
4567 /* see kvm_mmu_slot_remove_write_access */
4568 lockdep_assert_held(&kvm->slots_lock);
4570 if (flush)
4571 kvm_flush_remote_tlbs(kvm);
4573 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
4575 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
4576 struct kvm_memory_slot *memslot)
4578 gfn_t last_gfn;
4579 int i;
4580 bool flush = false;
4582 last_gfn = memslot->base_gfn + memslot->npages - 1;
4584 spin_lock(&kvm->mmu_lock);
4586 for (i = PT_PAGE_TABLE_LEVEL;
4587 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
4588 unsigned long *rmapp;
4589 unsigned long last_index, index;
4591 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
4592 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
4594 for (index = 0; index <= last_index; ++index, ++rmapp) {
4595 if (*rmapp)
4596 flush |= __rmap_set_dirty(kvm, rmapp);
4598 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
4599 cond_resched_lock(&kvm->mmu_lock);
4603 spin_unlock(&kvm->mmu_lock);
4605 lockdep_assert_held(&kvm->slots_lock);
4607 /* see kvm_mmu_slot_leaf_clear_dirty */
4608 if (flush)
4609 kvm_flush_remote_tlbs(kvm);
4611 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
4613 #define BATCH_ZAP_PAGES 10
4614 static void kvm_zap_obsolete_pages(struct kvm *kvm)
4616 struct kvm_mmu_page *sp, *node;
4617 int batch = 0;
4619 restart:
4620 list_for_each_entry_safe_reverse(sp, node,
4621 &kvm->arch.active_mmu_pages, link) {
4622 int ret;
4625 * No obsolete page exists before new created page since
4626 * active_mmu_pages is the FIFO list.
4628 if (!is_obsolete_sp(kvm, sp))
4629 break;
4632 * Since we are reversely walking the list and the invalid
4633 * list will be moved to the head, skip the invalid page
4634 * can help us to avoid the infinity list walking.
4636 if (sp->role.invalid)
4637 continue;
4640 * Need not flush tlb since we only zap the sp with invalid
4641 * generation number.
4643 if (batch >= BATCH_ZAP_PAGES &&
4644 cond_resched_lock(&kvm->mmu_lock)) {
4645 batch = 0;
4646 goto restart;
4649 ret = kvm_mmu_prepare_zap_page(kvm, sp,
4650 &kvm->arch.zapped_obsolete_pages);
4651 batch += ret;
4653 if (ret)
4654 goto restart;
4658 * Should flush tlb before free page tables since lockless-walking
4659 * may use the pages.
4661 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
4665 * Fast invalidate all shadow pages and use lock-break technique
4666 * to zap obsolete pages.
4668 * It's required when memslot is being deleted or VM is being
4669 * destroyed, in these cases, we should ensure that KVM MMU does
4670 * not use any resource of the being-deleted slot or all slots
4671 * after calling the function.
4673 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
4675 spin_lock(&kvm->mmu_lock);
4676 trace_kvm_mmu_invalidate_zap_all_pages(kvm);
4677 kvm->arch.mmu_valid_gen++;
4680 * Notify all vcpus to reload its shadow page table
4681 * and flush TLB. Then all vcpus will switch to new
4682 * shadow page table with the new mmu_valid_gen.
4684 * Note: we should do this under the protection of
4685 * mmu-lock, otherwise, vcpu would purge shadow page
4686 * but miss tlb flush.
4688 kvm_reload_remote_mmus(kvm);
4690 kvm_zap_obsolete_pages(kvm);
4691 spin_unlock(&kvm->mmu_lock);
4694 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
4696 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
4699 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
4702 * The very rare case: if the generation-number is round,
4703 * zap all shadow pages.
4705 if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
4706 printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
4707 kvm_mmu_invalidate_zap_all_pages(kvm);
4711 static unsigned long
4712 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
4714 struct kvm *kvm;
4715 int nr_to_scan = sc->nr_to_scan;
4716 unsigned long freed = 0;
4718 spin_lock(&kvm_lock);
4720 list_for_each_entry(kvm, &vm_list, vm_list) {
4721 int idx;
4722 LIST_HEAD(invalid_list);
4725 * Never scan more than sc->nr_to_scan VM instances.
4726 * Will not hit this condition practically since we do not try
4727 * to shrink more than one VM and it is very unlikely to see
4728 * !n_used_mmu_pages so many times.
4730 if (!nr_to_scan--)
4731 break;
4733 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
4734 * here. We may skip a VM instance errorneosly, but we do not
4735 * want to shrink a VM that only started to populate its MMU
4736 * anyway.
4738 if (!kvm->arch.n_used_mmu_pages &&
4739 !kvm_has_zapped_obsolete_pages(kvm))
4740 continue;
4742 idx = srcu_read_lock(&kvm->srcu);
4743 spin_lock(&kvm->mmu_lock);
4745 if (kvm_has_zapped_obsolete_pages(kvm)) {
4746 kvm_mmu_commit_zap_page(kvm,
4747 &kvm->arch.zapped_obsolete_pages);
4748 goto unlock;
4751 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
4752 freed++;
4753 kvm_mmu_commit_zap_page(kvm, &invalid_list);
4755 unlock:
4756 spin_unlock(&kvm->mmu_lock);
4757 srcu_read_unlock(&kvm->srcu, idx);
4760 * unfair on small ones
4761 * per-vm shrinkers cry out
4762 * sadness comes quickly
4764 list_move_tail(&kvm->vm_list, &vm_list);
4765 break;
4768 spin_unlock(&kvm_lock);
4769 return freed;
4772 static unsigned long
4773 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
4775 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
4778 static struct shrinker mmu_shrinker = {
4779 .count_objects = mmu_shrink_count,
4780 .scan_objects = mmu_shrink_scan,
4781 .seeks = DEFAULT_SEEKS * 10,
4784 static void mmu_destroy_caches(void)
4786 if (pte_list_desc_cache)
4787 kmem_cache_destroy(pte_list_desc_cache);
4788 if (mmu_page_header_cache)
4789 kmem_cache_destroy(mmu_page_header_cache);
4792 int kvm_mmu_module_init(void)
4794 pte_list_desc_cache = kmem_cache_create("pte_list_desc",
4795 sizeof(struct pte_list_desc),
4796 0, 0, NULL);
4797 if (!pte_list_desc_cache)
4798 goto nomem;
4800 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
4801 sizeof(struct kvm_mmu_page),
4802 0, 0, NULL);
4803 if (!mmu_page_header_cache)
4804 goto nomem;
4806 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
4807 goto nomem;
4809 register_shrinker(&mmu_shrinker);
4811 return 0;
4813 nomem:
4814 mmu_destroy_caches();
4815 return -ENOMEM;
4819 * Caculate mmu pages needed for kvm.
4821 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
4823 unsigned int nr_mmu_pages;
4824 unsigned int nr_pages = 0;
4825 struct kvm_memslots *slots;
4826 struct kvm_memory_slot *memslot;
4828 slots = kvm_memslots(kvm);
4830 kvm_for_each_memslot(memslot, slots)
4831 nr_pages += memslot->npages;
4833 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
4834 nr_mmu_pages = max(nr_mmu_pages,
4835 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
4837 return nr_mmu_pages;
4840 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
4842 struct kvm_shadow_walk_iterator iterator;
4843 u64 spte;
4844 int nr_sptes = 0;
4846 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
4847 return nr_sptes;
4849 walk_shadow_page_lockless_begin(vcpu);
4850 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
4851 sptes[iterator.level-1] = spte;
4852 nr_sptes++;
4853 if (!is_shadow_present_pte(spte))
4854 break;
4856 walk_shadow_page_lockless_end(vcpu);
4858 return nr_sptes;
4860 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
4862 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
4864 kvm_mmu_unload(vcpu);
4865 free_mmu_pages(vcpu);
4866 mmu_free_memory_caches(vcpu);
4869 void kvm_mmu_module_exit(void)
4871 mmu_destroy_caches();
4872 percpu_counter_destroy(&kvm_total_used_mmu_pages);
4873 unregister_shrinker(&mmu_shrinker);
4874 mmu_audit_disable();