2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <linux/uaccess.h>
15 #include <asm/mmu_context.h>
16 #include <asm/pgalloc.h>
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20 * for which pages need to be cached.
22 #if defined(__PAGETABLE_PMD_FOLDED)
23 #define KVM_MMU_CACHE_MIN_PAGES 1
25 #define KVM_MMU_CACHE_MIN_PAGES 2
28 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
33 BUG_ON(max
> KVM_NR_MEM_OBJS
);
34 if (cache
->nobjs
>= min
)
36 while (cache
->nobjs
< max
) {
37 page
= (void *)__get_free_page(GFP_KERNEL
);
40 cache
->objects
[cache
->nobjs
++] = page
;
45 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
)
48 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
51 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
)
55 BUG_ON(!mc
|| !mc
->nobjs
);
56 p
= mc
->objects
[--mc
->nobjs
];
60 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
62 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
66 * kvm_pgd_init() - Initialise KVM GPA page directory.
67 * @page: Pointer to page directory (PGD) for KVM GPA.
69 * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
70 * representing no mappings. This is similar to pgd_init(), however it
71 * initialises all the page directory pointers, not just the ones corresponding
72 * to the userland address space (since it is for the guest physical address
73 * space rather than a virtual address space).
75 static void kvm_pgd_init(void *page
)
77 unsigned long *p
, *end
;
80 #ifdef __PAGETABLE_PMD_FOLDED
81 entry
= (unsigned long)invalid_pte_table
;
83 entry
= (unsigned long)invalid_pmd_table
;
86 p
= (unsigned long *)page
;
87 end
= p
+ PTRS_PER_PGD
;
103 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
105 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
106 * to host physical page mappings.
108 * Returns: Pointer to new KVM GPA page directory.
109 * NULL on allocation failure.
111 pgd_t
*kvm_pgd_alloc(void)
115 ret
= (pgd_t
*)__get_free_pages(GFP_KERNEL
, PGD_ORDER
);
123 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
124 * @pgd: Page directory pointer.
125 * @addr: Address to index page table using.
126 * @cache: MMU page cache to allocate new page tables from, or NULL.
128 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
129 * address @addr. If page tables don't exist for @addr, they will be created
130 * from the MMU cache if @cache is not NULL.
132 * Returns: Pointer to pte_t corresponding to @addr.
133 * NULL if a page table doesn't exist for @addr and !@cache.
134 * NULL if a page table allocation failed.
136 static pte_t
*kvm_mips_walk_pgd(pgd_t
*pgd
, struct kvm_mmu_memory_cache
*cache
,
143 pgd
+= pgd_index(addr
);
144 if (pgd_none(*pgd
)) {
145 /* Not used on MIPS yet */
149 p4d
= p4d_offset(pgd
, addr
);
150 pud
= pud_offset(p4d
, addr
);
151 if (pud_none(*pud
)) {
156 new_pmd
= mmu_memory_cache_alloc(cache
);
157 pmd_init((unsigned long)new_pmd
,
158 (unsigned long)invalid_pte_table
);
159 pud_populate(NULL
, pud
, new_pmd
);
161 pmd
= pmd_offset(pud
, addr
);
162 if (pmd_none(*pmd
)) {
167 new_pte
= mmu_memory_cache_alloc(cache
);
169 pmd_populate_kernel(NULL
, pmd
, new_pte
);
171 return pte_offset(pmd
, addr
);
174 /* Caller must hold kvm->mm_lock */
175 static pte_t
*kvm_mips_pte_for_gpa(struct kvm
*kvm
,
176 struct kvm_mmu_memory_cache
*cache
,
179 return kvm_mips_walk_pgd(kvm
->arch
.gpa_mm
.pgd
, cache
, addr
);
183 * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
184 * Flush a range of guest physical address space from the VM's GPA page tables.
187 static bool kvm_mips_flush_gpa_pte(pte_t
*pte
, unsigned long start_gpa
,
188 unsigned long end_gpa
)
190 int i_min
= __pte_offset(start_gpa
);
191 int i_max
= __pte_offset(end_gpa
);
192 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PTE
- 1);
195 for (i
= i_min
; i
<= i_max
; ++i
) {
196 if (!pte_present(pte
[i
]))
199 set_pte(pte
+ i
, __pte(0));
201 return safe_to_remove
;
204 static bool kvm_mips_flush_gpa_pmd(pmd_t
*pmd
, unsigned long start_gpa
,
205 unsigned long end_gpa
)
208 unsigned long end
= ~0ul;
209 int i_min
= pmd_index(start_gpa
);
210 int i_max
= pmd_index(end_gpa
);
211 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PMD
- 1);
214 for (i
= i_min
; i
<= i_max
; ++i
, start_gpa
= 0) {
215 if (!pmd_present(pmd
[i
]))
218 pte
= pte_offset(pmd
+ i
, 0);
222 if (kvm_mips_flush_gpa_pte(pte
, start_gpa
, end
)) {
224 pte_free_kernel(NULL
, pte
);
226 safe_to_remove
= false;
229 return safe_to_remove
;
232 static bool kvm_mips_flush_gpa_pud(pud_t
*pud
, unsigned long start_gpa
,
233 unsigned long end_gpa
)
236 unsigned long end
= ~0ul;
237 int i_min
= pud_index(start_gpa
);
238 int i_max
= pud_index(end_gpa
);
239 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PUD
- 1);
242 for (i
= i_min
; i
<= i_max
; ++i
, start_gpa
= 0) {
243 if (!pud_present(pud
[i
]))
246 pmd
= pmd_offset(pud
+ i
, 0);
250 if (kvm_mips_flush_gpa_pmd(pmd
, start_gpa
, end
)) {
254 safe_to_remove
= false;
257 return safe_to_remove
;
260 static bool kvm_mips_flush_gpa_pgd(pgd_t
*pgd
, unsigned long start_gpa
,
261 unsigned long end_gpa
)
265 unsigned long end
= ~0ul;
266 int i_min
= pgd_index(start_gpa
);
267 int i_max
= pgd_index(end_gpa
);
268 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PGD
- 1);
271 for (i
= i_min
; i
<= i_max
; ++i
, start_gpa
= 0) {
272 if (!pgd_present(pgd
[i
]))
275 p4d
= p4d_offset(pgd
, 0);
276 pud
= pud_offset(p4d
+ i
, 0);
280 if (kvm_mips_flush_gpa_pud(pud
, start_gpa
, end
)) {
284 safe_to_remove
= false;
287 return safe_to_remove
;
291 * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
293 * @start_gfn: Guest frame number of first page in GPA range to flush.
294 * @end_gfn: Guest frame number of last page in GPA range to flush.
296 * Flushes a range of GPA mappings from the GPA page tables.
298 * The caller must hold the @kvm->mmu_lock spinlock.
300 * Returns: Whether its safe to remove the top level page directory because
301 * all lower levels have been removed.
303 bool kvm_mips_flush_gpa_pt(struct kvm
*kvm
, gfn_t start_gfn
, gfn_t end_gfn
)
305 return kvm_mips_flush_gpa_pgd(kvm
->arch
.gpa_mm
.pgd
,
306 start_gfn
<< PAGE_SHIFT
,
307 end_gfn
<< PAGE_SHIFT
);
310 #define BUILD_PTE_RANGE_OP(name, op) \
311 static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
315 int i_min = __pte_offset(start); \
316 int i_max = __pte_offset(end); \
320 for (i = i_min; i <= i_max; ++i) { \
321 if (!pte_present(pte[i])) \
326 if (pte_val(new) == pte_val(old)) \
328 set_pte(pte + i, new); \
334 /* returns true if anything was done */ \
335 static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
340 unsigned long cur_end = ~0ul; \
341 int i_min = pmd_index(start); \
342 int i_max = pmd_index(end); \
345 for (i = i_min; i <= i_max; ++i, start = 0) { \
346 if (!pmd_present(pmd[i])) \
349 pte = pte_offset(pmd + i, 0); \
353 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
358 static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
363 unsigned long cur_end = ~0ul; \
364 int i_min = pud_index(start); \
365 int i_max = pud_index(end); \
368 for (i = i_min; i <= i_max; ++i, start = 0) { \
369 if (!pud_present(pud[i])) \
372 pmd = pmd_offset(pud + i, 0); \
376 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
381 static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
387 unsigned long cur_end = ~0ul; \
388 int i_min = pgd_index(start); \
389 int i_max = pgd_index(end); \
392 for (i = i_min; i <= i_max; ++i, start = 0) { \
393 if (!pgd_present(pgd[i])) \
396 p4d = p4d_offset(pgd, 0); \
397 pud = pud_offset(p4d + i, 0); \
401 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
407 * kvm_mips_mkclean_gpa_pt.
408 * Mark a range of guest physical address space clean (writes fault) in the VM's
409 * GPA page table to allow dirty page tracking.
412 BUILD_PTE_RANGE_OP(mkclean
, pte_mkclean
)
415 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
417 * @start_gfn: Guest frame number of first page in GPA range to flush.
418 * @end_gfn: Guest frame number of last page in GPA range to flush.
420 * Make a range of GPA mappings clean so that guest writes will fault and
421 * trigger dirty page logging.
423 * The caller must hold the @kvm->mmu_lock spinlock.
425 * Returns: Whether any GPA mappings were modified, which would require
426 * derived mappings (GVA page tables & TLB enties) to be
429 int kvm_mips_mkclean_gpa_pt(struct kvm
*kvm
, gfn_t start_gfn
, gfn_t end_gfn
)
431 return kvm_mips_mkclean_pgd(kvm
->arch
.gpa_mm
.pgd
,
432 start_gfn
<< PAGE_SHIFT
,
433 end_gfn
<< PAGE_SHIFT
);
437 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
438 * @kvm: The KVM pointer
439 * @slot: The memory slot associated with mask
440 * @gfn_offset: The gfn offset in memory slot
441 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
442 * slot to be write protected
444 * Walks bits set in mask write protects the associated pte's. Caller must
445 * acquire @kvm->mmu_lock.
447 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm
*kvm
,
448 struct kvm_memory_slot
*slot
,
449 gfn_t gfn_offset
, unsigned long mask
)
451 gfn_t base_gfn
= slot
->base_gfn
+ gfn_offset
;
452 gfn_t start
= base_gfn
+ __ffs(mask
);
453 gfn_t end
= base_gfn
+ __fls(mask
);
455 kvm_mips_mkclean_gpa_pt(kvm
, start
, end
);
459 * kvm_mips_mkold_gpa_pt.
460 * Mark a range of guest physical address space old (all accesses fault) in the
461 * VM's GPA page table to allow detection of commonly used pages.
464 BUILD_PTE_RANGE_OP(mkold
, pte_mkold
)
466 static int kvm_mips_mkold_gpa_pt(struct kvm
*kvm
, gfn_t start_gfn
,
469 return kvm_mips_mkold_pgd(kvm
->arch
.gpa_mm
.pgd
,
470 start_gfn
<< PAGE_SHIFT
,
471 end_gfn
<< PAGE_SHIFT
);
474 static int handle_hva_to_gpa(struct kvm
*kvm
,
477 int (*handler
)(struct kvm
*kvm
, gfn_t gfn
,
479 struct kvm_memory_slot
*memslot
,
483 struct kvm_memslots
*slots
;
484 struct kvm_memory_slot
*memslot
;
487 slots
= kvm_memslots(kvm
);
489 /* we only care about the pages that the guest sees */
490 kvm_for_each_memslot(memslot
, slots
) {
491 unsigned long hva_start
, hva_end
;
494 hva_start
= max(start
, memslot
->userspace_addr
);
495 hva_end
= min(end
, memslot
->userspace_addr
+
496 (memslot
->npages
<< PAGE_SHIFT
));
497 if (hva_start
>= hva_end
)
501 * {gfn(page) | page intersects with [hva_start, hva_end)} =
502 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
504 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
505 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
507 ret
|= handler(kvm
, gfn
, gfn_end
, memslot
, data
);
514 static int kvm_unmap_hva_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
515 struct kvm_memory_slot
*memslot
, void *data
)
517 kvm_mips_flush_gpa_pt(kvm
, gfn
, gfn_end
);
521 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
523 handle_hva_to_gpa(kvm
, start
, end
, &kvm_unmap_hva_handler
, NULL
);
525 kvm_mips_callbacks
->flush_shadow_all(kvm
);
529 static int kvm_set_spte_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
530 struct kvm_memory_slot
*memslot
, void *data
)
532 gpa_t gpa
= gfn
<< PAGE_SHIFT
;
533 pte_t hva_pte
= *(pte_t
*)data
;
534 pte_t
*gpa_pte
= kvm_mips_pte_for_gpa(kvm
, NULL
, gpa
);
540 /* Mapping may need adjusting depending on memslot flags */
542 if (memslot
->flags
& KVM_MEM_LOG_DIRTY_PAGES
&& !pte_dirty(old_pte
))
543 hva_pte
= pte_mkclean(hva_pte
);
544 else if (memslot
->flags
& KVM_MEM_READONLY
)
545 hva_pte
= pte_wrprotect(hva_pte
);
547 set_pte(gpa_pte
, hva_pte
);
549 /* Replacing an absent or old page doesn't need flushes */
550 if (!pte_present(old_pte
) || !pte_young(old_pte
))
553 /* Pages swapped, aged, moved, or cleaned require flushes */
554 return !pte_present(hva_pte
) ||
555 !pte_young(hva_pte
) ||
556 pte_pfn(old_pte
) != pte_pfn(hva_pte
) ||
557 (pte_dirty(old_pte
) && !pte_dirty(hva_pte
));
560 int kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
562 unsigned long end
= hva
+ PAGE_SIZE
;
565 ret
= handle_hva_to_gpa(kvm
, hva
, end
, &kvm_set_spte_handler
, &pte
);
567 kvm_mips_callbacks
->flush_shadow_all(kvm
);
571 static int kvm_age_hva_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
572 struct kvm_memory_slot
*memslot
, void *data
)
574 return kvm_mips_mkold_gpa_pt(kvm
, gfn
, gfn_end
);
577 static int kvm_test_age_hva_handler(struct kvm
*kvm
, gfn_t gfn
, gfn_t gfn_end
,
578 struct kvm_memory_slot
*memslot
, void *data
)
580 gpa_t gpa
= gfn
<< PAGE_SHIFT
;
581 pte_t
*gpa_pte
= kvm_mips_pte_for_gpa(kvm
, NULL
, gpa
);
585 return pte_young(*gpa_pte
);
588 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
590 return handle_hva_to_gpa(kvm
, start
, end
, kvm_age_hva_handler
, NULL
);
593 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
595 return handle_hva_to_gpa(kvm
, hva
, hva
, kvm_test_age_hva_handler
, NULL
);
599 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
600 * @vcpu: VCPU pointer.
601 * @gpa: Guest physical address of fault.
602 * @write_fault: Whether the fault was due to a write.
603 * @out_entry: New PTE for @gpa (written on success unless NULL).
604 * @out_buddy: New PTE for @gpa's buddy (written on success unless
607 * Perform fast path GPA fault handling, doing all that can be done without
608 * calling into KVM. This handles marking old pages young (for idle page
609 * tracking), and dirtying of clean pages (for dirty page logging).
611 * Returns: 0 on success, in which case we can update derived mappings and
612 * resume guest execution.
613 * -EFAULT on failure due to absent GPA mapping or write to
614 * read-only page, in which case KVM must be consulted.
616 static int _kvm_mips_map_page_fast(struct kvm_vcpu
*vcpu
, unsigned long gpa
,
618 pte_t
*out_entry
, pte_t
*out_buddy
)
620 struct kvm
*kvm
= vcpu
->kvm
;
621 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
623 kvm_pfn_t pfn
= 0; /* silence bogus GCC warning */
624 bool pfn_valid
= false;
627 spin_lock(&kvm
->mmu_lock
);
629 /* Fast path - just check GPA page table for an existing entry */
630 ptep
= kvm_mips_pte_for_gpa(kvm
, NULL
, gpa
);
631 if (!ptep
|| !pte_present(*ptep
)) {
636 /* Track access to pages marked old */
637 if (!pte_young(*ptep
)) {
638 set_pte(ptep
, pte_mkyoung(*ptep
));
639 pfn
= pte_pfn(*ptep
);
641 /* call kvm_set_pfn_accessed() after unlock */
643 if (write_fault
&& !pte_dirty(*ptep
)) {
644 if (!pte_write(*ptep
)) {
649 /* Track dirtying of writeable pages */
650 set_pte(ptep
, pte_mkdirty(*ptep
));
651 pfn
= pte_pfn(*ptep
);
652 mark_page_dirty(kvm
, gfn
);
653 kvm_set_pfn_dirty(pfn
);
659 *out_buddy
= *ptep_buddy(ptep
);
662 spin_unlock(&kvm
->mmu_lock
);
664 kvm_set_pfn_accessed(pfn
);
669 * kvm_mips_map_page() - Map a guest physical page.
670 * @vcpu: VCPU pointer.
671 * @gpa: Guest physical address of fault.
672 * @write_fault: Whether the fault was due to a write.
673 * @out_entry: New PTE for @gpa (written on success unless NULL).
674 * @out_buddy: New PTE for @gpa's buddy (written on success unless
677 * Handle GPA faults by creating a new GPA mapping (or updating an existing
680 * This takes care of marking pages young or dirty (idle/dirty page tracking),
681 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
682 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
685 * Returns: 0 on success, in which case the caller may use the @out_entry
686 * and @out_buddy PTEs to update derived mappings and resume guest
688 * -EFAULT if there is no memory region at @gpa or a write was
689 * attempted to a read-only memory region. This is usually handled
692 static int kvm_mips_map_page(struct kvm_vcpu
*vcpu
, unsigned long gpa
,
694 pte_t
*out_entry
, pte_t
*out_buddy
)
696 struct kvm
*kvm
= vcpu
->kvm
;
697 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
698 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
701 pte_t
*ptep
, entry
, old_pte
;
703 unsigned long prot_bits
;
704 unsigned long mmu_seq
;
706 /* Try the fast path to handle old / clean pages */
707 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
708 err
= _kvm_mips_map_page_fast(vcpu
, gpa
, write_fault
, out_entry
,
713 /* We need a minimum of cached pages ready for page table creation */
714 err
= mmu_topup_memory_cache(memcache
, KVM_MMU_CACHE_MIN_PAGES
,
721 * Used to check for invalidations in progress, of the pfn that is
722 * returned by pfn_to_pfn_prot below.
724 mmu_seq
= kvm
->mmu_notifier_seq
;
726 * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
727 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
728 * risk the page we get a reference to getting unmapped before we have a
729 * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
731 * This smp_rmb() pairs with the effective smp_wmb() of the combination
732 * of the pte_unmap_unlock() after the PTE is zapped, and the
733 * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
734 * mmu_notifier_seq is incremented.
738 /* Slow path - ask KVM core whether we can access this GPA */
739 pfn
= gfn_to_pfn_prot(kvm
, gfn
, write_fault
, &writeable
);
740 if (is_error_noslot_pfn(pfn
)) {
745 spin_lock(&kvm
->mmu_lock
);
746 /* Check if an invalidation has taken place since we got pfn */
747 if (mmu_notifier_retry(kvm
, mmu_seq
)) {
749 * This can happen when mappings are changed asynchronously, but
750 * also synchronously if a COW is triggered by
753 spin_unlock(&kvm
->mmu_lock
);
754 kvm_release_pfn_clean(pfn
);
758 /* Ensure page tables are allocated */
759 ptep
= kvm_mips_pte_for_gpa(kvm
, memcache
, gpa
);
762 prot_bits
= _PAGE_PRESENT
| __READABLE
| _page_cachable_default
;
764 prot_bits
|= _PAGE_WRITE
;
766 prot_bits
|= __WRITEABLE
;
767 mark_page_dirty(kvm
, gfn
);
768 kvm_set_pfn_dirty(pfn
);
771 entry
= pfn_pte(pfn
, __pgprot(prot_bits
));
775 set_pte(ptep
, entry
);
781 *out_buddy
= *ptep_buddy(ptep
);
783 spin_unlock(&kvm
->mmu_lock
);
784 kvm_release_pfn_clean(pfn
);
785 kvm_set_pfn_accessed(pfn
);
787 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
791 static pte_t
*kvm_trap_emul_pte_for_gva(struct kvm_vcpu
*vcpu
,
794 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
798 /* We need a minimum of cached pages ready for page table creation */
799 ret
= mmu_topup_memory_cache(memcache
, KVM_MMU_CACHE_MIN_PAGES
,
804 if (KVM_GUEST_KERNEL_MODE(vcpu
))
805 pgdp
= vcpu
->arch
.guest_kernel_mm
.pgd
;
807 pgdp
= vcpu
->arch
.guest_user_mm
.pgd
;
809 return kvm_mips_walk_pgd(pgdp
, memcache
, addr
);
812 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu
*vcpu
, unsigned long addr
,
818 addr
&= PAGE_MASK
<< 1;
820 pgdp
= vcpu
->arch
.guest_kernel_mm
.pgd
;
821 ptep
= kvm_mips_walk_pgd(pgdp
, NULL
, addr
);
823 ptep
[0] = pfn_pte(0, __pgprot(0));
824 ptep
[1] = pfn_pte(0, __pgprot(0));
828 pgdp
= vcpu
->arch
.guest_user_mm
.pgd
;
829 ptep
= kvm_mips_walk_pgd(pgdp
, NULL
, addr
);
831 ptep
[0] = pfn_pte(0, __pgprot(0));
832 ptep
[1] = pfn_pte(0, __pgprot(0));
838 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
839 * Flush a range of guest physical address space from the VM's GPA page tables.
842 static bool kvm_mips_flush_gva_pte(pte_t
*pte
, unsigned long start_gva
,
843 unsigned long end_gva
)
845 int i_min
= __pte_offset(start_gva
);
846 int i_max
= __pte_offset(end_gva
);
847 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PTE
- 1);
851 * There's no freeing to do, so there's no point clearing individual
852 * entries unless only part of the last level page table needs flushing.
857 for (i
= i_min
; i
<= i_max
; ++i
) {
858 if (!pte_present(pte
[i
]))
861 set_pte(pte
+ i
, __pte(0));
866 static bool kvm_mips_flush_gva_pmd(pmd_t
*pmd
, unsigned long start_gva
,
867 unsigned long end_gva
)
870 unsigned long end
= ~0ul;
871 int i_min
= pmd_index(start_gva
);
872 int i_max
= pmd_index(end_gva
);
873 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PMD
- 1);
876 for (i
= i_min
; i
<= i_max
; ++i
, start_gva
= 0) {
877 if (!pmd_present(pmd
[i
]))
880 pte
= pte_offset(pmd
+ i
, 0);
884 if (kvm_mips_flush_gva_pte(pte
, start_gva
, end
)) {
886 pte_free_kernel(NULL
, pte
);
888 safe_to_remove
= false;
891 return safe_to_remove
;
894 static bool kvm_mips_flush_gva_pud(pud_t
*pud
, unsigned long start_gva
,
895 unsigned long end_gva
)
898 unsigned long end
= ~0ul;
899 int i_min
= pud_index(start_gva
);
900 int i_max
= pud_index(end_gva
);
901 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PUD
- 1);
904 for (i
= i_min
; i
<= i_max
; ++i
, start_gva
= 0) {
905 if (!pud_present(pud
[i
]))
908 pmd
= pmd_offset(pud
+ i
, 0);
912 if (kvm_mips_flush_gva_pmd(pmd
, start_gva
, end
)) {
916 safe_to_remove
= false;
919 return safe_to_remove
;
922 static bool kvm_mips_flush_gva_pgd(pgd_t
*pgd
, unsigned long start_gva
,
923 unsigned long end_gva
)
927 unsigned long end
= ~0ul;
928 int i_min
= pgd_index(start_gva
);
929 int i_max
= pgd_index(end_gva
);
930 bool safe_to_remove
= (i_min
== 0 && i_max
== PTRS_PER_PGD
- 1);
933 for (i
= i_min
; i
<= i_max
; ++i
, start_gva
= 0) {
934 if (!pgd_present(pgd
[i
]))
937 p4d
= p4d_offset(pgd
, 0);
938 pud
= pud_offset(p4d
+ i
, 0);
942 if (kvm_mips_flush_gva_pud(pud
, start_gva
, end
)) {
946 safe_to_remove
= false;
949 return safe_to_remove
;
952 void kvm_mips_flush_gva_pt(pgd_t
*pgd
, enum kvm_mips_flush flags
)
954 if (flags
& KMF_GPA
) {
955 /* all of guest virtual address space could be affected */
956 if (flags
& KMF_KERN
)
957 /* useg, kseg0, seg2/3 */
958 kvm_mips_flush_gva_pgd(pgd
, 0, 0x7fffffff);
961 kvm_mips_flush_gva_pgd(pgd
, 0, 0x3fffffff);
964 kvm_mips_flush_gva_pgd(pgd
, 0, 0x3fffffff);
967 if (flags
& KMF_KERN
)
968 kvm_mips_flush_gva_pgd(pgd
, 0x60000000, 0x7fffffff);
972 static pte_t
kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte
)
975 * Don't leak writeable but clean entries from GPA page tables. We don't
976 * want the normal Linux tlbmod handler to handle dirtying when KVM
977 * accesses guest memory.
980 pte
= pte_wrprotect(pte
);
985 static pte_t
kvm_mips_gpa_pte_to_gva_mapped(pte_t pte
, long entrylo
)
987 /* Guest EntryLo overrides host EntryLo */
988 if (!(entrylo
& ENTRYLO_D
))
989 pte
= pte_mkclean(pte
);
991 return kvm_mips_gpa_pte_to_gva_unmapped(pte
);
994 #ifdef CONFIG_KVM_MIPS_VZ
995 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr
,
996 struct kvm_vcpu
*vcpu
,
1001 ret
= kvm_mips_map_page(vcpu
, badvaddr
, write_fault
, NULL
, NULL
);
1005 /* Invalidate this entry in the TLB */
1006 return kvm_vz_host_tlb_inv(vcpu
, badvaddr
);
1010 /* XXXKYMA: Must be called with interrupts disabled */
1011 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
1012 struct kvm_vcpu
*vcpu
,
1016 pte_t pte_gpa
[2], *ptep_gva
;
1019 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
1020 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
1021 kvm_mips_dump_host_tlbs();
1025 /* Get the GPA page table entry */
1026 gpa
= KVM_GUEST_CPHYSADDR(badvaddr
);
1027 idx
= (badvaddr
>> PAGE_SHIFT
) & 1;
1028 if (kvm_mips_map_page(vcpu
, gpa
, write_fault
, &pte_gpa
[idx
],
1029 &pte_gpa
[!idx
]) < 0)
1032 /* Get the GVA page table entry */
1033 ptep_gva
= kvm_trap_emul_pte_for_gva(vcpu
, badvaddr
& ~PAGE_SIZE
);
1035 kvm_err("No ptep for gva %lx\n", badvaddr
);
1039 /* Copy a pair of entries from GPA page table to GVA page table */
1040 ptep_gva
[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa
[0]);
1041 ptep_gva
[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa
[1]);
1043 /* Invalidate this entry in the TLB, guest kernel ASID only */
1044 kvm_mips_host_tlb_inv(vcpu
, badvaddr
, false, true);
1048 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
1049 struct kvm_mips_tlb
*tlb
,
1053 struct kvm
*kvm
= vcpu
->kvm
;
1055 pte_t pte_gpa
[2], *ptep_buddy
, *ptep_gva
;
1056 unsigned int idx
= TLB_LO_IDX(*tlb
, gva
);
1057 bool kernel
= KVM_GUEST_KERNEL_MODE(vcpu
);
1059 tlb_lo
[0] = tlb
->tlb_lo
[0];
1060 tlb_lo
[1] = tlb
->tlb_lo
[1];
1063 * The commpage address must not be mapped to anything else if the guest
1064 * TLB contains entries nearby, or commpage accesses will break.
1066 if (!((gva
^ KVM_GUEST_COMMPAGE_ADDR
) & VPN2_MASK
& (PAGE_MASK
<< 1)))
1067 tlb_lo
[TLB_LO_IDX(*tlb
, KVM_GUEST_COMMPAGE_ADDR
)] = 0;
1069 /* Get the GPA page table entry */
1070 if (kvm_mips_map_page(vcpu
, mips3_tlbpfn_to_paddr(tlb_lo
[idx
]),
1071 write_fault
, &pte_gpa
[idx
], NULL
) < 0)
1074 /* And its GVA buddy's GPA page table entry if it also exists */
1075 pte_gpa
[!idx
] = pfn_pte(0, __pgprot(0));
1076 if (tlb_lo
[!idx
] & ENTRYLO_V
) {
1077 spin_lock(&kvm
->mmu_lock
);
1078 ptep_buddy
= kvm_mips_pte_for_gpa(kvm
, NULL
,
1079 mips3_tlbpfn_to_paddr(tlb_lo
[!idx
]));
1081 pte_gpa
[!idx
] = *ptep_buddy
;
1082 spin_unlock(&kvm
->mmu_lock
);
1085 /* Get the GVA page table entry pair */
1086 ptep_gva
= kvm_trap_emul_pte_for_gva(vcpu
, gva
& ~PAGE_SIZE
);
1088 kvm_err("No ptep for gva %lx\n", gva
);
1092 /* Copy a pair of entries from GPA page table to GVA page table */
1093 ptep_gva
[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa
[0], tlb_lo
[0]);
1094 ptep_gva
[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa
[1], tlb_lo
[1]);
1096 /* Invalidate this entry in the TLB, current guest mode ASID only */
1097 kvm_mips_host_tlb_inv(vcpu
, gva
, !kernel
, kernel
);
1099 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
1100 tlb
->tlb_lo
[0], tlb
->tlb_lo
[1]);
1105 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr
,
1106 struct kvm_vcpu
*vcpu
)
1111 ptep
= kvm_trap_emul_pte_for_gva(vcpu
, badvaddr
);
1113 kvm_err("No ptep for commpage %lx\n", badvaddr
);
1117 pfn
= PFN_DOWN(virt_to_phys(vcpu
->arch
.kseg0_commpage
));
1118 /* Also set valid and dirty, so refill handler doesn't have to */
1119 *ptep
= pte_mkyoung(pte_mkdirty(pfn_pte(pfn
, PAGE_SHARED
)));
1121 /* Invalidate this entry in the TLB, guest kernel ASID only */
1122 kvm_mips_host_tlb_inv(vcpu
, badvaddr
, false, true);
1127 * kvm_mips_migrate_count() - Migrate timer.
1128 * @vcpu: Virtual CPU.
1130 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
1131 * if it was running prior to being cancelled.
1133 * Must be called when the VCPU is migrated to a different CPU to ensure that
1134 * timer expiry during guest execution interrupts the guest and causes the
1135 * interrupt to be delivered in a timely manner.
1137 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
1139 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
1140 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
1143 /* Restore ASID once we are scheduled back after preemption */
1144 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1146 unsigned long flags
;
1148 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
1150 local_irq_save(flags
);
1153 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
1154 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1155 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
1157 * Migrate the timer interrupt to the current CPU so that it
1158 * always interrupts the guest and synchronously triggers a
1159 * guest timer interrupt.
1161 kvm_mips_migrate_count(vcpu
);
1164 /* restore guest state to registers */
1165 kvm_mips_callbacks
->vcpu_load(vcpu
, cpu
);
1167 local_irq_restore(flags
);
1170 /* ASID can change if another task is scheduled during preemption */
1171 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1173 unsigned long flags
;
1176 local_irq_save(flags
);
1178 cpu
= smp_processor_id();
1179 vcpu
->arch
.last_sched_cpu
= cpu
;
1182 /* save guest state in registers */
1183 kvm_mips_callbacks
->vcpu_put(vcpu
, cpu
);
1185 local_irq_restore(flags
);
1189 * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1190 * @vcpu: Virtual CPU.
1191 * @gva: Guest virtual address to be accessed.
1192 * @write: True if write attempted (must be dirtied and made writable).
1194 * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
1195 * dirtying the page if @write so that guest instructions can be modified.
1197 * Returns: KVM_MIPS_MAPPED on success.
1198 * KVM_MIPS_GVA if bad guest virtual address.
1199 * KVM_MIPS_GPA if bad guest physical address.
1200 * KVM_MIPS_TLB if guest TLB not present.
1201 * KVM_MIPS_TLBINV if guest TLB present but not valid.
1202 * KVM_MIPS_TLBMOD if guest TLB read only.
1204 enum kvm_mips_fault_result
kvm_trap_emul_gva_fault(struct kvm_vcpu
*vcpu
,
1208 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1209 struct kvm_mips_tlb
*tlb
;
1212 if (KVM_GUEST_KSEGX(gva
) == KVM_GUEST_KSEG0
) {
1213 if (kvm_mips_handle_kseg0_tlb_fault(gva
, vcpu
, write
) < 0)
1214 return KVM_MIPS_GPA
;
1215 } else if ((KVM_GUEST_KSEGX(gva
) < KVM_GUEST_KSEG0
) ||
1216 KVM_GUEST_KSEGX(gva
) == KVM_GUEST_KSEG23
) {
1217 /* Address should be in the guest TLB */
1218 index
= kvm_mips_guest_tlb_lookup(vcpu
, (gva
& VPN2_MASK
) |
1219 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
));
1221 return KVM_MIPS_TLB
;
1222 tlb
= &vcpu
->arch
.guest_tlb
[index
];
1224 /* Entry should be valid, and dirty for writes */
1225 if (!TLB_IS_VALID(*tlb
, gva
))
1226 return KVM_MIPS_TLBINV
;
1227 if (write
&& !TLB_IS_DIRTY(*tlb
, gva
))
1228 return KVM_MIPS_TLBMOD
;
1230 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, gva
, write
))
1231 return KVM_MIPS_GPA
;
1233 return KVM_MIPS_GVA
;
1236 return KVM_MIPS_MAPPED
;
1239 int kvm_get_inst(u32
*opc
, struct kvm_vcpu
*vcpu
, u32
*out
)
1243 if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ
),
1244 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1248 kvm_trap_emul_gva_lockless_begin(vcpu
);
1249 err
= get_user(*out
, opc
);
1250 kvm_trap_emul_gva_lockless_end(vcpu
);
1252 if (unlikely(err
)) {
1254 * Try to handle the fault, maybe we just raced with a GVA
1257 err
= kvm_trap_emul_gva_fault(vcpu
, (unsigned long)opc
,
1259 if (unlikely(err
)) {
1260 kvm_err("%s: illegal address: %p\n",
1265 /* Hopefully it'll work now */