2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <linux/uaccess.h>
15 #include <asm/mmu_context.h>
16 #include <asm/pgalloc.h>
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20 * for which pages need to be cached.
22 #if defined(__PAGETABLE_PMD_FOLDED)
23 #define KVM_MMU_CACHE_MIN_PAGES 1
25 #define KVM_MMU_CACHE_MIN_PAGES 2
28 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
33 BUG_ON(max
> KVM_NR_MEM_OBJS
);
34 if (cache
->nobjs
>= min
)
36 while (cache
->nobjs
< max
) {
37 page
= (void *)__get_free_page(GFP_KERNEL
);
40 cache
->objects
[cache
->nobjs
++] = page
;
45 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
)
48 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
51 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
)
55 BUG_ON(!mc
|| !mc
->nobjs
);
56 p
= mc
->objects
[--mc
->nobjs
];
60 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
62 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
66 * kvm_pgd_init() - Initialise KVM GPA page directory.
67 * @page: Pointer to page directory (PGD) for KVM GPA.
69 * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
70 * representing no mappings. This is similar to pgd_init(), however it
71 * initialises all the page directory pointers, not just the ones corresponding
72 * to the userland address space (since it is for the guest physical address
73 * space rather than a virtual address space).
75 static void kvm_pgd_init(void *page
)
77 unsigned long *p
, *end
;
80 #ifdef __PAGETABLE_PMD_FOLDED
81 entry
= (unsigned long)invalid_pte_table
;
83 entry
= (unsigned long)invalid_pmd_table
;
86 p
= (unsigned long *)page
;
87 end
= p
+ PTRS_PER_PGD
;
103 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
105 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
106 * to host physical page mappings.
108 * Returns: Pointer to new KVM GPA page directory.
109 * NULL on allocation failure.
111 pgd_t
*kvm_pgd_alloc(void)
115 ret
= (pgd_t
*)__get_free_pages(GFP_KERNEL
, PGD_ORDER
);
123 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
124 * @pgd: Page directory pointer.
125 * @addr: Address to index page table using.
126 * @cache: MMU page cache to allocate new page tables from, or NULL.
128 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
129 * address @addr. If page tables don't exist for @addr, they will be created
130 * from the MMU cache if @cache is not NULL.
132 * Returns: Pointer to pte_t corresponding to @addr.
133 * NULL if a page table doesn't exist for @addr and !@cache.
134 * NULL if a page table allocation failed.
136 static pte_t
*kvm_mips_walk_pgd(pgd_t
*pgd
, struct kvm_mmu_memory_cache
*cache
,
142 pgd
+= pgd_index(addr
);
143 if (pgd_none(*pgd
)) {
144 /* Not used on MIPS yet */
148 pud
= pud_offset(pgd
, addr
);
149 if (pud_none(*pud
)) {
154 new_pmd
= mmu_memory_cache_alloc(cache
);
155 pmd_init((unsigned long)new_pmd
,
156 (unsigned long)invalid_pte_table
);
157 pud_populate(NULL
, pud
, new_pmd
);
159 pmd
= pmd_offset(pud
, addr
);
160 if (pmd_none(*pmd
)) {
165 new_pte
= mmu_memory_cache_alloc(cache
);
167 pmd_populate_kernel(NULL
, pmd
, new_pte
);
169 return pte_offset(pmd
, addr
);
172 /* Caller must hold kvm->mm_lock */
173 static pte_t
*kvm_mips_pte_for_gpa(struct kvm
*kvm
,
174 struct kvm_mmu_memory_cache
*cache
,
177 return kvm_mips_walk_pgd(kvm
->arch
.gpa_mm
.pgd
, cache
, addr
);
181 * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
182 * Flush a range of guest physical address space from the VM's GPA page tables.
185 static bool kvm_mips_flush_gpa_pte(pte_t
*pte
, unsigned long start_gpa
,
186 unsigned long end_gpa
)
188 int i_min
= __pte_offset(start_gpa
);
189 int i_max
= __pte_offset(end_gpa
);
190 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PTE
- 1);
193 for (i
= i_min
; i
<= i_max
; ++i
) {
194 if (!pte_present(pte
[i
]))
197 set_pte(pte
+ i
, __pte(0));
199 return safe_to_remove
;
202 static bool kvm_mips_flush_gpa_pmd(pmd_t
*pmd
, unsigned long start_gpa
,
203 unsigned long end_gpa
)
206 unsigned long end
= ~0ul;
207 int i_min
= __pmd_offset(start_gpa
);
208 int i_max
= __pmd_offset(end_gpa
);
209 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PMD
- 1);
212 for (i
= i_min
; i
<= i_max
; ++i
, start_gpa
= 0) {
213 if (!pmd_present(pmd
[i
]))
216 pte
= pte_offset(pmd
+ i
, 0);
220 if (kvm_mips_flush_gpa_pte(pte
, start_gpa
, end
)) {
222 pte_free_kernel(NULL
, pte
);
224 safe_to_remove
= false;
227 return safe_to_remove
;
230 static bool kvm_mips_flush_gpa_pud(pud_t
*pud
, unsigned long start_gpa
,
231 unsigned long end_gpa
)
234 unsigned long end
= ~0ul;
235 int i_min
= __pud_offset(start_gpa
);
236 int i_max
= __pud_offset(end_gpa
);
237 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PUD
- 1);
240 for (i
= i_min
; i
<= i_max
; ++i
, start_gpa
= 0) {
241 if (!pud_present(pud
[i
]))
244 pmd
= pmd_offset(pud
+ i
, 0);
248 if (kvm_mips_flush_gpa_pmd(pmd
, start_gpa
, end
)) {
252 safe_to_remove
= false;
255 return safe_to_remove
;
258 static bool kvm_mips_flush_gpa_pgd(pgd_t
*pgd
, unsigned long start_gpa
,
259 unsigned long end_gpa
)
262 unsigned long end
= ~0ul;
263 int i_min
= pgd_index(start_gpa
);
264 int i_max
= pgd_index(end_gpa
);
265 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PGD
- 1);
268 for (i
= i_min
; i
<= i_max
; ++i
, start_gpa
= 0) {
269 if (!pgd_present(pgd
[i
]))
272 pud
= pud_offset(pgd
+ i
, 0);
276 if (kvm_mips_flush_gpa_pud(pud
, start_gpa
, end
)) {
280 safe_to_remove
= false;
283 return safe_to_remove
;
287 * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
289 * @start_gfn: Guest frame number of first page in GPA range to flush.
290 * @end_gfn: Guest frame number of last page in GPA range to flush.
292 * Flushes a range of GPA mappings from the GPA page tables.
294 * The caller must hold the @kvm->mmu_lock spinlock.
296 * Returns: Whether its safe to remove the top level page directory because
297 * all lower levels have been removed.
299 bool kvm_mips_flush_gpa_pt(struct kvm
*kvm
, gfn_t start_gfn
, gfn_t end_gfn
)
301 return kvm_mips_flush_gpa_pgd(kvm
->arch
.gpa_mm
.pgd
,
302 start_gfn
<< PAGE_SHIFT
,
303 end_gfn
<< PAGE_SHIFT
);
306 #define BUILD_PTE_RANGE_OP(name, op) \
307 static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
311 int i_min = __pte_offset(start); \
312 int i_max = __pte_offset(end); \
316 for (i = i_min; i <= i_max; ++i) { \
317 if (!pte_present(pte[i])) \
322 if (pte_val(new) == pte_val(old)) \
324 set_pte(pte + i, new); \
330 /* returns true if anything was done */ \
331 static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
336 unsigned long cur_end = ~0ul; \
337 int i_min = __pmd_offset(start); \
338 int i_max = __pmd_offset(end); \
341 for (i = i_min; i <= i_max; ++i, start = 0) { \
342 if (!pmd_present(pmd[i])) \
345 pte = pte_offset(pmd + i, 0); \
349 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
354 static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
359 unsigned long cur_end = ~0ul; \
360 int i_min = __pud_offset(start); \
361 int i_max = __pud_offset(end); \
364 for (i = i_min; i <= i_max; ++i, start = 0) { \
365 if (!pud_present(pud[i])) \
368 pmd = pmd_offset(pud + i, 0); \
372 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
377 static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
382 unsigned long cur_end = ~0ul; \
383 int i_min = pgd_index(start); \
384 int i_max = pgd_index(end); \
387 for (i = i_min; i <= i_max; ++i, start = 0) { \
388 if (!pgd_present(pgd[i])) \
391 pud = pud_offset(pgd + i, 0); \
395 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
401 * kvm_mips_mkclean_gpa_pt.
402 * Mark a range of guest physical address space clean (writes fault) in the VM's
403 * GPA page table to allow dirty page tracking.
406 BUILD_PTE_RANGE_OP(mkclean
, pte_mkclean
)
409 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
411 * @start_gfn: Guest frame number of first page in GPA range to flush.
412 * @end_gfn: Guest frame number of last page in GPA range to flush.
414 * Make a range of GPA mappings clean so that guest writes will fault and
415 * trigger dirty page logging.
417 * The caller must hold the @kvm->mmu_lock spinlock.
419 * Returns: Whether any GPA mappings were modified, which would require
420 * derived mappings (GVA page tables & TLB enties) to be
423 int kvm_mips_mkclean_gpa_pt(struct kvm
*kvm
, gfn_t start_gfn
, gfn_t end_gfn
)
425 return kvm_mips_mkclean_pgd(kvm
->arch
.gpa_mm
.pgd
,
426 start_gfn
<< PAGE_SHIFT
,
427 end_gfn
<< PAGE_SHIFT
);
431 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
432 * @kvm: The KVM pointer
433 * @slot: The memory slot associated with mask
434 * @gfn_offset: The gfn offset in memory slot
435 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
436 * slot to be write protected
438 * Walks bits set in mask write protects the associated pte's. Caller must
439 * acquire @kvm->mmu_lock.
441 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm
*kvm
,
442 struct kvm_memory_slot
*slot
,
443 gfn_t gfn_offset
, unsigned long mask
)
445 gfn_t base_gfn
= slot
->base_gfn
+ gfn_offset
;
446 gfn_t start
= base_gfn
+ __ffs(mask
);
447 gfn_t end
= base_gfn
+ __fls(mask
);
449 kvm_mips_mkclean_gpa_pt(kvm
, start
, end
);
453 * kvm_mips_mkold_gpa_pt.
454 * Mark a range of guest physical address space old (all accesses fault) in the
455 * VM's GPA page table to allow detection of commonly used pages.
458 BUILD_PTE_RANGE_OP(mkold
, pte_mkold
)
460 static int kvm_mips_mkold_gpa_pt(struct kvm
*kvm
, gfn_t start_gfn
,
463 return kvm_mips_mkold_pgd(kvm
->arch
.gpa_mm
.pgd
,
464 start_gfn
<< PAGE_SHIFT
,
465 end_gfn
<< PAGE_SHIFT
);
468 static int handle_hva_to_gpa(struct kvm
*kvm
,
471 int (*handler
)(struct kvm
*kvm
, gfn_t gfn
,
473 struct kvm_memory_slot
*memslot
,
477 struct kvm_memslots
*slots
;
478 struct kvm_memory_slot
*memslot
;
481 slots
= kvm_memslots(kvm
);
483 /* we only care about the pages that the guest sees */
484 kvm_for_each_memslot(memslot
, slots
) {
485 unsigned long hva_start
, hva_end
;
488 hva_start
= max(start
, memslot
->userspace_addr
);
489 hva_end
= min(end
, memslot
->userspace_addr
+
490 (memslot
->npages
<< PAGE_SHIFT
));
491 if (hva_start
>= hva_end
)
495 * {gfn(page) | page intersects with [hva_start, hva_end)} =
496 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
498 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
499 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
501 ret
|= handler(kvm
, gfn
, gfn_end
, memslot
, data
);
508 static int kvm_unmap_hva_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
509 struct kvm_memory_slot
*memslot
, void *data
)
511 kvm_mips_flush_gpa_pt(kvm
, gfn
, gfn_end
);
515 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
517 unsigned long end
= hva
+ PAGE_SIZE
;
519 handle_hva_to_gpa(kvm
, hva
, end
, &kvm_unmap_hva_handler
, NULL
);
521 kvm_mips_callbacks
->flush_shadow_all(kvm
);
525 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
527 handle_hva_to_gpa(kvm
, start
, end
, &kvm_unmap_hva_handler
, NULL
);
529 kvm_mips_callbacks
->flush_shadow_all(kvm
);
533 static int kvm_set_spte_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
534 struct kvm_memory_slot
*memslot
, void *data
)
536 gpa_t gpa
= gfn
<< PAGE_SHIFT
;
537 pte_t hva_pte
= *(pte_t
*)data
;
538 pte_t
*gpa_pte
= kvm_mips_pte_for_gpa(kvm
, NULL
, gpa
);
544 /* Mapping may need adjusting depending on memslot flags */
546 if (memslot
->flags
& KVM_MEM_LOG_DIRTY_PAGES
&& !pte_dirty(old_pte
))
547 hva_pte
= pte_mkclean(hva_pte
);
548 else if (memslot
->flags
& KVM_MEM_READONLY
)
549 hva_pte
= pte_wrprotect(hva_pte
);
551 set_pte(gpa_pte
, hva_pte
);
553 /* Replacing an absent or old page doesn't need flushes */
554 if (!pte_present(old_pte
) || !pte_young(old_pte
))
557 /* Pages swapped, aged, moved, or cleaned require flushes */
558 return !pte_present(hva_pte
) ||
559 !pte_young(hva_pte
) ||
560 pte_pfn(old_pte
) != pte_pfn(hva_pte
) ||
561 (pte_dirty(old_pte
) && !pte_dirty(hva_pte
));
564 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
566 unsigned long end
= hva
+ PAGE_SIZE
;
569 ret
= handle_hva_to_gpa(kvm
, hva
, end
, &kvm_set_spte_handler
, &pte
);
571 kvm_mips_callbacks
->flush_shadow_all(kvm
);
574 static int kvm_age_hva_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
575 struct kvm_memory_slot
*memslot
, void *data
)
577 return kvm_mips_mkold_gpa_pt(kvm
, gfn
, gfn_end
);
580 static int kvm_test_age_hva_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
581 struct kvm_memory_slot
*memslot
, void *data
)
583 gpa_t gpa
= gfn
<< PAGE_SHIFT
;
584 pte_t
*gpa_pte
= kvm_mips_pte_for_gpa(kvm
, NULL
, gpa
);
588 return pte_young(*gpa_pte
);
591 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
593 return handle_hva_to_gpa(kvm
, start
, end
, kvm_age_hva_handler
, NULL
);
596 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
598 return handle_hva_to_gpa(kvm
, hva
, hva
, kvm_test_age_hva_handler
, NULL
);
602 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
603 * @vcpu: VCPU pointer.
604 * @gpa: Guest physical address of fault.
605 * @write_fault: Whether the fault was due to a write.
606 * @out_entry: New PTE for @gpa (written on success unless NULL).
607 * @out_buddy: New PTE for @gpa's buddy (written on success unless
610 * Perform fast path GPA fault handling, doing all that can be done without
611 * calling into KVM. This handles marking old pages young (for idle page
612 * tracking), and dirtying of clean pages (for dirty page logging).
614 * Returns: 0 on success, in which case we can update derived mappings and
615 * resume guest execution.
616 * -EFAULT on failure due to absent GPA mapping or write to
617 * read-only page, in which case KVM must be consulted.
619 static int _kvm_mips_map_page_fast(struct kvm_vcpu
*vcpu
, unsigned long gpa
,
621 pte_t
*out_entry
, pte_t
*out_buddy
)
623 struct kvm
*kvm
= vcpu
->kvm
;
624 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
626 kvm_pfn_t pfn
= 0; /* silence bogus GCC warning */
627 bool pfn_valid
= false;
630 spin_lock(&kvm
->mmu_lock
);
632 /* Fast path - just check GPA page table for an existing entry */
633 ptep
= kvm_mips_pte_for_gpa(kvm
, NULL
, gpa
);
634 if (!ptep
|| !pte_present(*ptep
)) {
639 /* Track access to pages marked old */
640 if (!pte_young(*ptep
)) {
641 set_pte(ptep
, pte_mkyoung(*ptep
));
642 pfn
= pte_pfn(*ptep
);
644 /* call kvm_set_pfn_accessed() after unlock */
646 if (write_fault
&& !pte_dirty(*ptep
)) {
647 if (!pte_write(*ptep
)) {
652 /* Track dirtying of writeable pages */
653 set_pte(ptep
, pte_mkdirty(*ptep
));
654 pfn
= pte_pfn(*ptep
);
655 mark_page_dirty(kvm
, gfn
);
656 kvm_set_pfn_dirty(pfn
);
662 *out_buddy
= *ptep_buddy(ptep
);
665 spin_unlock(&kvm
->mmu_lock
);
667 kvm_set_pfn_accessed(pfn
);
672 * kvm_mips_map_page() - Map a guest physical page.
673 * @vcpu: VCPU pointer.
674 * @gpa: Guest physical address of fault.
675 * @write_fault: Whether the fault was due to a write.
676 * @out_entry: New PTE for @gpa (written on success unless NULL).
677 * @out_buddy: New PTE for @gpa's buddy (written on success unless
680 * Handle GPA faults by creating a new GPA mapping (or updating an existing
683 * This takes care of marking pages young or dirty (idle/dirty page tracking),
684 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
685 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
688 * Returns: 0 on success, in which case the caller may use the @out_entry
689 * and @out_buddy PTEs to update derived mappings and resume guest
691 * -EFAULT if there is no memory region at @gpa or a write was
692 * attempted to a read-only memory region. This is usually handled
695 static int kvm_mips_map_page(struct kvm_vcpu
*vcpu
, unsigned long gpa
,
697 pte_t
*out_entry
, pte_t
*out_buddy
)
699 struct kvm
*kvm
= vcpu
->kvm
;
700 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
701 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
704 pte_t
*ptep
, entry
, old_pte
;
706 unsigned long prot_bits
;
707 unsigned long mmu_seq
;
709 /* Try the fast path to handle old / clean pages */
710 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
711 err
= _kvm_mips_map_page_fast(vcpu
, gpa
, write_fault
, out_entry
,
716 /* We need a minimum of cached pages ready for page table creation */
717 err
= mmu_topup_memory_cache(memcache
, KVM_MMU_CACHE_MIN_PAGES
,
724 * Used to check for invalidations in progress, of the pfn that is
725 * returned by pfn_to_pfn_prot below.
727 mmu_seq
= kvm
->mmu_notifier_seq
;
729 * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
730 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
731 * risk the page we get a reference to getting unmapped before we have a
732 * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
734 * This smp_rmb() pairs with the effective smp_wmb() of the combination
735 * of the pte_unmap_unlock() after the PTE is zapped, and the
736 * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
737 * mmu_notifier_seq is incremented.
741 /* Slow path - ask KVM core whether we can access this GPA */
742 pfn
= gfn_to_pfn_prot(kvm
, gfn
, write_fault
, &writeable
);
743 if (is_error_noslot_pfn(pfn
)) {
748 spin_lock(&kvm
->mmu_lock
);
749 /* Check if an invalidation has taken place since we got pfn */
750 if (mmu_notifier_retry(kvm
, mmu_seq
)) {
752 * This can happen when mappings are changed asynchronously, but
753 * also synchronously if a COW is triggered by
756 spin_unlock(&kvm
->mmu_lock
);
757 kvm_release_pfn_clean(pfn
);
761 /* Ensure page tables are allocated */
762 ptep
= kvm_mips_pte_for_gpa(kvm
, memcache
, gpa
);
765 prot_bits
= _PAGE_PRESENT
| __READABLE
| _page_cachable_default
;
767 prot_bits
|= _PAGE_WRITE
;
769 prot_bits
|= __WRITEABLE
;
770 mark_page_dirty(kvm
, gfn
);
771 kvm_set_pfn_dirty(pfn
);
774 entry
= pfn_pte(pfn
, __pgprot(prot_bits
));
778 set_pte(ptep
, entry
);
784 *out_buddy
= *ptep_buddy(ptep
);
786 spin_unlock(&kvm
->mmu_lock
);
787 kvm_release_pfn_clean(pfn
);
788 kvm_set_pfn_accessed(pfn
);
790 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
794 static pte_t
*kvm_trap_emul_pte_for_gva(struct kvm_vcpu
*vcpu
,
797 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
801 /* We need a minimum of cached pages ready for page table creation */
802 ret
= mmu_topup_memory_cache(memcache
, KVM_MMU_CACHE_MIN_PAGES
,
807 if (KVM_GUEST_KERNEL_MODE(vcpu
))
808 pgdp
= vcpu
->arch
.guest_kernel_mm
.pgd
;
810 pgdp
= vcpu
->arch
.guest_user_mm
.pgd
;
812 return kvm_mips_walk_pgd(pgdp
, memcache
, addr
);
815 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu
*vcpu
, unsigned long addr
,
821 addr
&= PAGE_MASK
<< 1;
823 pgdp
= vcpu
->arch
.guest_kernel_mm
.pgd
;
824 ptep
= kvm_mips_walk_pgd(pgdp
, NULL
, addr
);
826 ptep
[0] = pfn_pte(0, __pgprot(0));
827 ptep
[1] = pfn_pte(0, __pgprot(0));
831 pgdp
= vcpu
->arch
.guest_user_mm
.pgd
;
832 ptep
= kvm_mips_walk_pgd(pgdp
, NULL
, addr
);
834 ptep
[0] = pfn_pte(0, __pgprot(0));
835 ptep
[1] = pfn_pte(0, __pgprot(0));
841 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
842 * Flush a range of guest physical address space from the VM's GPA page tables.
845 static bool kvm_mips_flush_gva_pte(pte_t
*pte
, unsigned long start_gva
,
846 unsigned long end_gva
)
848 int i_min
= __pte_offset(start_gva
);
849 int i_max
= __pte_offset(end_gva
);
850 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PTE
- 1);
854 * There's no freeing to do, so there's no point clearing individual
855 * entries unless only part of the last level page table needs flushing.
860 for (i
= i_min
; i
<= i_max
; ++i
) {
861 if (!pte_present(pte
[i
]))
864 set_pte(pte
+ i
, __pte(0));
869 static bool kvm_mips_flush_gva_pmd(pmd_t
*pmd
, unsigned long start_gva
,
870 unsigned long end_gva
)
873 unsigned long end
= ~0ul;
874 int i_min
= __pmd_offset(start_gva
);
875 int i_max
= __pmd_offset(end_gva
);
876 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PMD
- 1);
879 for (i
= i_min
; i
<= i_max
; ++i
, start_gva
= 0) {
880 if (!pmd_present(pmd
[i
]))
883 pte
= pte_offset(pmd
+ i
, 0);
887 if (kvm_mips_flush_gva_pte(pte
, start_gva
, end
)) {
889 pte_free_kernel(NULL
, pte
);
891 safe_to_remove
= false;
894 return safe_to_remove
;
897 static bool kvm_mips_flush_gva_pud(pud_t
*pud
, unsigned long start_gva
,
898 unsigned long end_gva
)
901 unsigned long end
= ~0ul;
902 int i_min
= __pud_offset(start_gva
);
903 int i_max
= __pud_offset(end_gva
);
904 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PUD
- 1);
907 for (i
= i_min
; i
<= i_max
; ++i
, start_gva
= 0) {
908 if (!pud_present(pud
[i
]))
911 pmd
= pmd_offset(pud
+ i
, 0);
915 if (kvm_mips_flush_gva_pmd(pmd
, start_gva
, end
)) {
919 safe_to_remove
= false;
922 return safe_to_remove
;
925 static bool kvm_mips_flush_gva_pgd(pgd_t
*pgd
, unsigned long start_gva
,
926 unsigned long end_gva
)
929 unsigned long end
= ~0ul;
930 int i_min
= pgd_index(start_gva
);
931 int i_max
= pgd_index(end_gva
);
932 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PGD
- 1);
935 for (i
= i_min
; i
<= i_max
; ++i
, start_gva
= 0) {
936 if (!pgd_present(pgd
[i
]))
939 pud
= pud_offset(pgd
+ i
, 0);
943 if (kvm_mips_flush_gva_pud(pud
, start_gva
, end
)) {
947 safe_to_remove
= false;
950 return safe_to_remove
;
953 void kvm_mips_flush_gva_pt(pgd_t
*pgd
, enum kvm_mips_flush flags
)
955 if (flags
& KMF_GPA
) {
956 /* all of guest virtual address space could be affected */
957 if (flags
& KMF_KERN
)
958 /* useg, kseg0, seg2/3 */
959 kvm_mips_flush_gva_pgd(pgd
, 0, 0x7fffffff);
962 kvm_mips_flush_gva_pgd(pgd
, 0, 0x3fffffff);
965 kvm_mips_flush_gva_pgd(pgd
, 0, 0x3fffffff);
968 if (flags
& KMF_KERN
)
969 kvm_mips_flush_gva_pgd(pgd
, 0x60000000, 0x7fffffff);
973 static pte_t
kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte
)
976 * Don't leak writeable but clean entries from GPA page tables. We don't
977 * want the normal Linux tlbmod handler to handle dirtying when KVM
978 * accesses guest memory.
981 pte
= pte_wrprotect(pte
);
986 static pte_t
kvm_mips_gpa_pte_to_gva_mapped(pte_t pte
, long entrylo
)
988 /* Guest EntryLo overrides host EntryLo */
989 if (!(entrylo
& ENTRYLO_D
))
990 pte
= pte_mkclean(pte
);
992 return kvm_mips_gpa_pte_to_gva_unmapped(pte
);
995 #ifdef CONFIG_KVM_MIPS_VZ
996 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr
,
997 struct kvm_vcpu
*vcpu
,
1002 ret
= kvm_mips_map_page(vcpu
, badvaddr
, write_fault
, NULL
, NULL
);
1006 /* Invalidate this entry in the TLB */
1007 return kvm_vz_host_tlb_inv(vcpu
, badvaddr
);
1011 /* XXXKYMA: Must be called with interrupts disabled */
1012 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
1013 struct kvm_vcpu
*vcpu
,
1017 pte_t pte_gpa
[2], *ptep_gva
;
1020 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
1021 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
1022 kvm_mips_dump_host_tlbs();
1026 /* Get the GPA page table entry */
1027 gpa
= KVM_GUEST_CPHYSADDR(badvaddr
);
1028 idx
= (badvaddr
>> PAGE_SHIFT
) & 1;
1029 if (kvm_mips_map_page(vcpu
, gpa
, write_fault
, &pte_gpa
[idx
],
1030 &pte_gpa
[!idx
]) < 0)
1033 /* Get the GVA page table entry */
1034 ptep_gva
= kvm_trap_emul_pte_for_gva(vcpu
, badvaddr
& ~PAGE_SIZE
);
1036 kvm_err("No ptep for gva %lx\n", badvaddr
);
1040 /* Copy a pair of entries from GPA page table to GVA page table */
1041 ptep_gva
[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa
[0]);
1042 ptep_gva
[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa
[1]);
1044 /* Invalidate this entry in the TLB, guest kernel ASID only */
1045 kvm_mips_host_tlb_inv(vcpu
, badvaddr
, false, true);
1049 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
1050 struct kvm_mips_tlb
*tlb
,
1054 struct kvm
*kvm
= vcpu
->kvm
;
1056 pte_t pte_gpa
[2], *ptep_buddy
, *ptep_gva
;
1057 unsigned int idx
= TLB_LO_IDX(*tlb
, gva
);
1058 bool kernel
= KVM_GUEST_KERNEL_MODE(vcpu
);
1060 tlb_lo
[0] = tlb
->tlb_lo
[0];
1061 tlb_lo
[1] = tlb
->tlb_lo
[1];
1064 * The commpage address must not be mapped to anything else if the guest
1065 * TLB contains entries nearby, or commpage accesses will break.
1067 if (!((gva
^ KVM_GUEST_COMMPAGE_ADDR
) & VPN2_MASK
& (PAGE_MASK
<< 1)))
1068 tlb_lo
[TLB_LO_IDX(*tlb
, KVM_GUEST_COMMPAGE_ADDR
)] = 0;
1070 /* Get the GPA page table entry */
1071 if (kvm_mips_map_page(vcpu
, mips3_tlbpfn_to_paddr(tlb_lo
[idx
]),
1072 write_fault
, &pte_gpa
[idx
], NULL
) < 0)
1075 /* And its GVA buddy's GPA page table entry if it also exists */
1076 pte_gpa
[!idx
] = pfn_pte(0, __pgprot(0));
1077 if (tlb_lo
[!idx
] & ENTRYLO_V
) {
1078 spin_lock(&kvm
->mmu_lock
);
1079 ptep_buddy
= kvm_mips_pte_for_gpa(kvm
, NULL
,
1080 mips3_tlbpfn_to_paddr(tlb_lo
[!idx
]));
1082 pte_gpa
[!idx
] = *ptep_buddy
;
1083 spin_unlock(&kvm
->mmu_lock
);
1086 /* Get the GVA page table entry pair */
1087 ptep_gva
= kvm_trap_emul_pte_for_gva(vcpu
, gva
& ~PAGE_SIZE
);
1089 kvm_err("No ptep for gva %lx\n", gva
);
1093 /* Copy a pair of entries from GPA page table to GVA page table */
1094 ptep_gva
[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa
[0], tlb_lo
[0]);
1095 ptep_gva
[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa
[1], tlb_lo
[1]);
1097 /* Invalidate this entry in the TLB, current guest mode ASID only */
1098 kvm_mips_host_tlb_inv(vcpu
, gva
, !kernel
, kernel
);
1100 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
1101 tlb
->tlb_lo
[0], tlb
->tlb_lo
[1]);
1106 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
1107 struct kvm_vcpu
*vcpu
)
1112 ptep
= kvm_trap_emul_pte_for_gva(vcpu
, badvaddr
);
1114 kvm_err("No ptep for commpage %lx\n", badvaddr
);
1118 pfn
= PFN_DOWN(virt_to_phys(vcpu
->arch
.kseg0_commpage
));
1119 /* Also set valid and dirty, so refill handler doesn't have to */
1120 *ptep
= pte_mkyoung(pte_mkdirty(pfn_pte(pfn
, PAGE_SHARED
)));
1122 /* Invalidate this entry in the TLB, guest kernel ASID only */
1123 kvm_mips_host_tlb_inv(vcpu
, badvaddr
, false, true);
1128 * kvm_mips_migrate_count() - Migrate timer.
1129 * @vcpu: Virtual CPU.
1131 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
1132 * if it was running prior to being cancelled.
1134 * Must be called when the VCPU is migrated to a different CPU to ensure that
1135 * timer expiry during guest execution interrupts the guest and causes the
1136 * interrupt to be delivered in a timely manner.
1138 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
1140 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
1141 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
1144 /* Restore ASID once we are scheduled back after preemption */
1145 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1147 unsigned long flags
;
1149 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
1151 local_irq_save(flags
);
1154 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
1155 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1156 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
1158 * Migrate the timer interrupt to the current CPU so that it
1159 * always interrupts the guest and synchronously triggers a
1160 * guest timer interrupt.
1162 kvm_mips_migrate_count(vcpu
);
1165 /* restore guest state to registers */
1166 kvm_mips_callbacks
->vcpu_load(vcpu
, cpu
);
1168 local_irq_restore(flags
);
1171 /* ASID can change if another task is scheduled during preemption */
1172 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1174 unsigned long flags
;
1177 local_irq_save(flags
);
1179 cpu
= smp_processor_id();
1180 vcpu
->arch
.last_sched_cpu
= cpu
;
1183 /* save guest state in registers */
1184 kvm_mips_callbacks
->vcpu_put(vcpu
, cpu
);
1186 local_irq_restore(flags
);
1190 * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1191 * @vcpu: Virtual CPU.
1192 * @gva: Guest virtual address to be accessed.
1193 * @write: True if write attempted (must be dirtied and made writable).
1195 * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
1196 * dirtying the page if @write so that guest instructions can be modified.
1198 * Returns: KVM_MIPS_MAPPED on success.
1199 * KVM_MIPS_GVA if bad guest virtual address.
1200 * KVM_MIPS_GPA if bad guest physical address.
1201 * KVM_MIPS_TLB if guest TLB not present.
1202 * KVM_MIPS_TLBINV if guest TLB present but not valid.
1203 * KVM_MIPS_TLBMOD if guest TLB read only.
1205 enum kvm_mips_fault_result
kvm_trap_emul_gva_fault(struct kvm_vcpu
*vcpu
,
1209 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1210 struct kvm_mips_tlb
*tlb
;
1213 if (KVM_GUEST_KSEGX(gva
) == KVM_GUEST_KSEG0
) {
1214 if (kvm_mips_handle_kseg0_tlb_fault(gva
, vcpu
, write
) < 0)
1215 return KVM_MIPS_GPA
;
1216 } else if ((KVM_GUEST_KSEGX(gva
) < KVM_GUEST_KSEG0
) ||
1217 KVM_GUEST_KSEGX(gva
) == KVM_GUEST_KSEG23
) {
1218 /* Address should be in the guest TLB */
1219 index
= kvm_mips_guest_tlb_lookup(vcpu
, (gva
& VPN2_MASK
) |
1220 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
));
1222 return KVM_MIPS_TLB
;
1223 tlb
= &vcpu
->arch
.guest_tlb
[index
];
1225 /* Entry should be valid, and dirty for writes */
1226 if (!TLB_IS_VALID(*tlb
, gva
))
1227 return KVM_MIPS_TLBINV
;
1228 if (write
&& !TLB_IS_DIRTY(*tlb
, gva
))
1229 return KVM_MIPS_TLBMOD
;
1231 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, gva
, write
))
1232 return KVM_MIPS_GPA
;
1234 return KVM_MIPS_GVA
;
1237 return KVM_MIPS_MAPPED
;
1240 int kvm_get_inst(u32
*opc
, struct kvm_vcpu
*vcpu
, u32
*out
)
1244 if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ
),
1245 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1249 kvm_trap_emul_gva_lockless_begin(vcpu
);
1250 err
= get_user(*out
, opc
);
1251 kvm_trap_emul_gva_lockless_end(vcpu
);
1253 if (unlikely(err
)) {
1255 * Try to handle the fault, maybe we just raced with a GVA
1258 err
= kvm_trap_emul_gva_fault(vcpu
, (unsigned long)opc
,
1260 if (unlikely(err
)) {
1261 kvm_err("%s: illegal address: %p\n",
1266 /* Hopefully it'll work now */