1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/types.h>
8 #include <linux/string.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/kvm_book3s.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
22 #include <asm/ultravisor.h>
23 #include <asm/kvm_book3s_uvmem.h>
26 * Supported radix tree geometry.
27 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28 * for a page size of 64k or 4k.
30 static int p9_supported_radix_bits
[4] = { 5, 9, 9, 13 };
32 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid
, int pid
,
33 gva_t eaddr
, void *to
, void *from
,
36 int uninitialized_var(old_pid
), old_lpid
;
37 unsigned long quadrant
, ret
= n
;
40 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
41 if (kvmhv_on_pseries())
42 return plpar_hcall_norets(H_COPY_TOFROM_GUEST
, lpid
, pid
, eaddr
,
43 __pa(to
), __pa(from
), n
);
49 from
= (void *) (eaddr
| (quadrant
<< 62));
51 to
= (void *) (eaddr
| (quadrant
<< 62));
55 /* switch the lpid first to avoid running host with unallocated pid */
56 old_lpid
= mfspr(SPRN_LPID
);
58 mtspr(SPRN_LPID
, lpid
);
60 old_pid
= mfspr(SPRN_PID
);
67 ret
= probe_user_read(to
, (const void __user
*)from
, n
);
69 ret
= probe_user_write((void __user
*)to
, from
, n
);
71 /* switch the pid first to avoid running host with unallocated pid */
72 if (quadrant
== 1 && pid
!= old_pid
)
73 mtspr(SPRN_PID
, old_pid
);
75 mtspr(SPRN_LPID
, old_lpid
);
82 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix
);
84 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
85 void *to
, void *from
, unsigned long n
)
87 int lpid
= vcpu
->kvm
->arch
.lpid
;
88 int pid
= vcpu
->arch
.pid
;
90 /* This would cause a data segment intr so don't allow the access */
91 if (eaddr
& (0x3FFUL
<< 52))
94 /* Should we be using the nested lpid */
95 if (vcpu
->arch
.nested
)
96 lpid
= vcpu
->arch
.nested
->shadow_lpid
;
98 /* If accessing quadrant 3 then pid is expected to be 0 */
99 if (((eaddr
>> 62) & 0x3) == 0x3)
102 eaddr
&= ~(0xFFFUL
<< 52);
104 return __kvmhv_copy_tofrom_guest_radix(lpid
, pid
, eaddr
, to
, from
, n
);
107 long kvmhv_copy_from_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
, void *to
,
112 ret
= kvmhv_copy_tofrom_guest_radix(vcpu
, eaddr
, to
, NULL
, n
);
114 memset(to
+ (n
- ret
), 0, ret
);
118 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix
);
120 long kvmhv_copy_to_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
, void *from
,
123 return kvmhv_copy_tofrom_guest_radix(vcpu
, eaddr
, NULL
, from
, n
);
125 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix
);
127 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
128 struct kvmppc_pte
*gpte
, u64 root
,
131 struct kvm
*kvm
= vcpu
->kvm
;
133 unsigned long rts
, bits
, offset
, index
;
137 rts
= ((root
& RTS1_MASK
) >> (RTS1_SHIFT
- 3)) |
138 ((root
& RTS2_MASK
) >> RTS2_SHIFT
);
139 bits
= root
& RPDS_MASK
;
140 base
= root
& RPDB_MASK
;
144 /* Current implementations only support 52-bit space */
148 /* Walk each level of the radix tree */
149 for (level
= 3; level
>= 0; --level
) {
151 /* Check a valid size */
152 if (level
&& bits
!= p9_supported_radix_bits
[level
])
154 if (level
== 0 && !(bits
== 5 || bits
== 9))
157 index
= (eaddr
>> offset
) & ((1UL << bits
) - 1);
158 /* Check that low bits of page table base are zero */
159 if (base
& ((1UL << (bits
+ 3)) - 1))
161 /* Read the entry from guest memory */
162 addr
= base
+ (index
* sizeof(rpte
));
163 ret
= kvm_read_guest(kvm
, addr
, &rpte
, sizeof(rpte
));
169 pte
= __be64_to_cpu(rpte
);
170 if (!(pte
& _PAGE_PRESENT
))
172 /* Check if a leaf entry */
175 /* Get ready to walk the next level */
176 base
= pte
& RPDB_MASK
;
177 bits
= pte
& RPDS_MASK
;
180 /* Need a leaf at lowest level; 512GB pages not supported */
181 if (level
< 0 || level
== 3)
184 /* We found a valid leaf PTE */
185 /* Offset is now log base 2 of the page size */
186 gpa
= pte
& 0x01fffffffffff000ul
;
187 if (gpa
& ((1ul << offset
) - 1))
189 gpa
|= eaddr
& ((1ul << offset
) - 1);
190 for (ps
= MMU_PAGE_4K
; ps
< MMU_PAGE_COUNT
; ++ps
)
191 if (offset
== mmu_psize_defs
[ps
].shift
)
193 gpte
->page_size
= ps
;
194 gpte
->page_shift
= offset
;
199 /* Work out permissions */
200 gpte
->may_read
= !!(pte
& _PAGE_READ
);
201 gpte
->may_write
= !!(pte
& _PAGE_WRITE
);
202 gpte
->may_execute
= !!(pte
& _PAGE_EXEC
);
204 gpte
->rc
= pte
& (_PAGE_ACCESSED
| _PAGE_DIRTY
);
213 * Used to walk a partition or process table radix tree in guest memory
214 * Note: We exploit the fact that a partition table and a process
215 * table have the same layout, a partition-scoped page table and a
216 * process-scoped page table have the same layout, and the 2nd
217 * doubleword of a partition table entry has the same layout as
220 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
221 struct kvmppc_pte
*gpte
, u64 table
,
222 int table_index
, u64
*pte_ret_p
)
224 struct kvm
*kvm
= vcpu
->kvm
;
226 unsigned long size
, ptbl
, root
;
227 struct prtb_entry entry
;
229 if ((table
& PRTS_MASK
) > 24)
231 size
= 1ul << ((table
& PRTS_MASK
) + 12);
233 /* Is the table big enough to contain this entry? */
234 if ((table_index
* sizeof(entry
)) >= size
)
237 /* Read the table to find the root of the radix tree */
238 ptbl
= (table
& PRTB_MASK
) + (table_index
* sizeof(entry
));
239 ret
= kvm_read_guest(kvm
, ptbl
, &entry
, sizeof(entry
));
243 /* Root is stored in the first double word */
244 root
= be64_to_cpu(entry
.prtb0
);
246 return kvmppc_mmu_walk_radix_tree(vcpu
, eaddr
, gpte
, root
, pte_ret_p
);
249 int kvmppc_mmu_radix_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
250 struct kvmppc_pte
*gpte
, bool data
, bool iswrite
)
256 /* Work out effective PID */
257 switch (eaddr
>> 62) {
259 pid
= vcpu
->arch
.pid
;
268 ret
= kvmppc_mmu_radix_translate_table(vcpu
, eaddr
, gpte
,
269 vcpu
->kvm
->arch
.process_table
, pid
, &pte
);
273 /* Check privilege (applies only to process scoped translations) */
274 if (kvmppc_get_msr(vcpu
) & MSR_PR
) {
275 if (pte
& _PAGE_PRIVILEGED
) {
278 gpte
->may_execute
= 0;
281 if (!(pte
& _PAGE_PRIVILEGED
)) {
282 /* Check AMR/IAMR to see if strict mode is in force */
283 if (vcpu
->arch
.amr
& (1ul << 62))
285 if (vcpu
->arch
.amr
& (1ul << 63))
287 if (vcpu
->arch
.iamr
& (1ul << 62))
288 gpte
->may_execute
= 0;
295 void kvmppc_radix_tlbie_page(struct kvm
*kvm
, unsigned long addr
,
296 unsigned int pshift
, unsigned int lpid
)
298 unsigned long psize
= PAGE_SIZE
;
304 psize
= 1UL << pshift
;
308 addr
&= ~(psize
- 1);
310 if (!kvmhv_on_pseries()) {
311 radix__flush_tlb_lpid_page(lpid
, addr
, psize
);
315 psi
= shift_to_mmu_psize(pshift
);
316 rb
= addr
| (mmu_get_ap(psi
) << PPC_BITLSHIFT(58));
317 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(0, 0, 1),
320 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc
);
323 static void kvmppc_radix_flush_pwc(struct kvm
*kvm
, unsigned int lpid
)
327 if (!kvmhv_on_pseries()) {
328 radix__flush_pwc_lpid(lpid
);
332 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(1, 0, 1),
333 lpid
, TLBIEL_INVAL_SET_LPID
);
335 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc
);
338 static unsigned long kvmppc_radix_update_pte(struct kvm
*kvm
, pte_t
*ptep
,
339 unsigned long clr
, unsigned long set
,
340 unsigned long addr
, unsigned int shift
)
342 return __radix_pte_update(ptep
, clr
, set
);
345 void kvmppc_radix_set_pte_at(struct kvm
*kvm
, unsigned long addr
,
346 pte_t
*ptep
, pte_t pte
)
348 radix__set_pte_at(kvm
->mm
, addr
, ptep
, pte
, 0);
351 static struct kmem_cache
*kvm_pte_cache
;
352 static struct kmem_cache
*kvm_pmd_cache
;
354 static pte_t
*kvmppc_pte_alloc(void)
356 return kmem_cache_alloc(kvm_pte_cache
, GFP_KERNEL
);
359 static void kvmppc_pte_free(pte_t
*ptep
)
361 kmem_cache_free(kvm_pte_cache
, ptep
);
364 static pmd_t
*kvmppc_pmd_alloc(void)
366 return kmem_cache_alloc(kvm_pmd_cache
, GFP_KERNEL
);
369 static void kvmppc_pmd_free(pmd_t
*pmdp
)
371 kmem_cache_free(kvm_pmd_cache
, pmdp
);
374 /* Called with kvm->mmu_lock held */
375 void kvmppc_unmap_pte(struct kvm
*kvm
, pte_t
*pte
, unsigned long gpa
,
377 const struct kvm_memory_slot
*memslot
,
382 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
383 unsigned long page_size
= PAGE_SIZE
;
386 old
= kvmppc_radix_update_pte(kvm
, pte
, ~0UL, 0, gpa
, shift
);
387 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, lpid
);
389 /* The following only applies to L1 entries */
390 if (lpid
!= kvm
->arch
.lpid
)
394 memslot
= gfn_to_memslot(kvm
, gfn
);
398 if (shift
) { /* 1GB or 2MB page */
399 page_size
= 1ul << shift
;
400 if (shift
== PMD_SHIFT
)
401 kvm
->stat
.num_2M_pages
--;
402 else if (shift
== PUD_SHIFT
)
403 kvm
->stat
.num_1G_pages
--;
406 gpa
&= ~(page_size
- 1);
407 hpa
= old
& PTE_RPN_MASK
;
408 kvmhv_remove_nest_rmap_range(kvm
, memslot
, gpa
, hpa
, page_size
);
410 if ((old
& _PAGE_DIRTY
) && memslot
->dirty_bitmap
)
411 kvmppc_update_dirty_map(memslot
, gfn
, page_size
);
415 * kvmppc_free_p?d are used to free existing page tables, and recursively
416 * descend and clear and free children.
417 * Callers are responsible for flushing the PWC.
419 * When page tables are being unmapped/freed as part of page fault path
420 * (full == false), ptes are not expected. There is code to unmap them
421 * and emit a warning if encountered, but there may already be data
422 * corruption due to the unexpected mappings.
424 static void kvmppc_unmap_free_pte(struct kvm
*kvm
, pte_t
*pte
, bool full
,
428 memset(pte
, 0, sizeof(long) << PTE_INDEX_SIZE
);
433 for (it
= 0; it
< PTRS_PER_PTE
; ++it
, ++p
) {
434 if (pte_val(*p
) == 0)
437 kvmppc_unmap_pte(kvm
, p
,
438 pte_pfn(*p
) << PAGE_SHIFT
,
439 PAGE_SHIFT
, NULL
, lpid
);
443 kvmppc_pte_free(pte
);
446 static void kvmppc_unmap_free_pmd(struct kvm
*kvm
, pmd_t
*pmd
, bool full
,
452 for (im
= 0; im
< PTRS_PER_PMD
; ++im
, ++p
) {
453 if (!pmd_present(*p
))
455 if (pmd_is_leaf(*p
)) {
460 kvmppc_unmap_pte(kvm
, (pte_t
*)p
,
461 pte_pfn(*(pte_t
*)p
) << PAGE_SHIFT
,
462 PMD_SHIFT
, NULL
, lpid
);
467 pte
= pte_offset_map(p
, 0);
468 kvmppc_unmap_free_pte(kvm
, pte
, full
, lpid
);
472 kvmppc_pmd_free(pmd
);
475 static void kvmppc_unmap_free_pud(struct kvm
*kvm
, pud_t
*pud
,
481 for (iu
= 0; iu
< PTRS_PER_PUD
; ++iu
, ++p
) {
482 if (!pud_present(*p
))
484 if (pud_is_leaf(*p
)) {
489 pmd
= pmd_offset(p
, 0);
490 kvmppc_unmap_free_pmd(kvm
, pmd
, true, lpid
);
494 pud_free(kvm
->mm
, pud
);
497 void kvmppc_free_pgtable_radix(struct kvm
*kvm
, pgd_t
*pgd
, unsigned int lpid
)
501 for (ig
= 0; ig
< PTRS_PER_PGD
; ++ig
, ++pgd
) {
504 if (!pgd_present(*pgd
))
506 pud
= pud_offset(pgd
, 0);
507 kvmppc_unmap_free_pud(kvm
, pud
, lpid
);
512 void kvmppc_free_radix(struct kvm
*kvm
)
514 if (kvm
->arch
.pgtable
) {
515 kvmppc_free_pgtable_radix(kvm
, kvm
->arch
.pgtable
,
517 pgd_free(kvm
->mm
, kvm
->arch
.pgtable
);
518 kvm
->arch
.pgtable
= NULL
;
522 static void kvmppc_unmap_free_pmd_entry_table(struct kvm
*kvm
, pmd_t
*pmd
,
523 unsigned long gpa
, unsigned int lpid
)
525 pte_t
*pte
= pte_offset_kernel(pmd
, 0);
528 * Clearing the pmd entry then flushing the PWC ensures that the pte
529 * page no longer be cached by the MMU, so can be freed without
530 * flushing the PWC again.
533 kvmppc_radix_flush_pwc(kvm
, lpid
);
535 kvmppc_unmap_free_pte(kvm
, pte
, false, lpid
);
538 static void kvmppc_unmap_free_pud_entry_table(struct kvm
*kvm
, pud_t
*pud
,
539 unsigned long gpa
, unsigned int lpid
)
541 pmd_t
*pmd
= pmd_offset(pud
, 0);
544 * Clearing the pud entry then flushing the PWC ensures that the pmd
545 * page and any children pte pages will no longer be cached by the MMU,
546 * so can be freed without flushing the PWC again.
549 kvmppc_radix_flush_pwc(kvm
, lpid
);
551 kvmppc_unmap_free_pmd(kvm
, pmd
, false, lpid
);
555 * There are a number of bits which may differ between different faults to
556 * the same partition scope entry. RC bits, in the course of cleaning and
557 * aging. And the write bit can change, either the access could have been
558 * upgraded, or a read fault could happen concurrently with a write fault
559 * that sets those bits first.
561 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
563 int kvmppc_create_pte(struct kvm
*kvm
, pgd_t
*pgtable
, pte_t pte
,
564 unsigned long gpa
, unsigned int level
,
565 unsigned long mmu_seq
, unsigned int lpid
,
566 unsigned long *rmapp
, struct rmap_nested
**n_rmap
)
569 pud_t
*pud
, *new_pud
= NULL
;
570 pmd_t
*pmd
, *new_pmd
= NULL
;
571 pte_t
*ptep
, *new_ptep
= NULL
;
574 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
575 pgd
= pgtable
+ pgd_index(gpa
);
577 if (pgd_present(*pgd
))
578 pud
= pud_offset(pgd
, gpa
);
580 new_pud
= pud_alloc_one(kvm
->mm
, gpa
);
583 if (pud
&& pud_present(*pud
) && !pud_is_leaf(*pud
))
584 pmd
= pmd_offset(pud
, gpa
);
586 new_pmd
= kvmppc_pmd_alloc();
588 if (level
== 0 && !(pmd
&& pmd_present(*pmd
) && !pmd_is_leaf(*pmd
)))
589 new_ptep
= kvmppc_pte_alloc();
591 /* Check if we might have been invalidated; let the guest retry if so */
592 spin_lock(&kvm
->mmu_lock
);
594 if (mmu_notifier_retry(kvm
, mmu_seq
))
597 /* Now traverse again under the lock and change the tree */
599 if (pgd_none(*pgd
)) {
602 pgd_populate(kvm
->mm
, pgd
, new_pud
);
605 pud
= pud_offset(pgd
, gpa
);
606 if (pud_is_leaf(*pud
)) {
607 unsigned long hgpa
= gpa
& PUD_MASK
;
609 /* Check if we raced and someone else has set the same thing */
611 if (pud_raw(*pud
) == pte_raw(pte
)) {
615 /* Valid 1GB page here already, add our extra bits */
616 WARN_ON_ONCE((pud_val(*pud
) ^ pte_val(pte
)) &
617 PTE_BITS_MUST_MATCH
);
618 kvmppc_radix_update_pte(kvm
, (pte_t
*)pud
,
619 0, pte_val(pte
), hgpa
, PUD_SHIFT
);
624 * If we raced with another CPU which has just put
625 * a 1GB pte in after we saw a pmd page, try again.
631 /* Valid 1GB page here already, remove it */
632 kvmppc_unmap_pte(kvm
, (pte_t
*)pud
, hgpa
, PUD_SHIFT
, NULL
,
636 if (!pud_none(*pud
)) {
638 * There's a page table page here, but we wanted to
639 * install a large page, so remove and free the page
642 kvmppc_unmap_free_pud_entry_table(kvm
, pud
, gpa
, lpid
);
644 kvmppc_radix_set_pte_at(kvm
, gpa
, (pte_t
*)pud
, pte
);
646 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
650 if (pud_none(*pud
)) {
653 pud_populate(kvm
->mm
, pud
, new_pmd
);
656 pmd
= pmd_offset(pud
, gpa
);
657 if (pmd_is_leaf(*pmd
)) {
658 unsigned long lgpa
= gpa
& PMD_MASK
;
660 /* Check if we raced and someone else has set the same thing */
662 if (pmd_raw(*pmd
) == pte_raw(pte
)) {
666 /* Valid 2MB page here already, add our extra bits */
667 WARN_ON_ONCE((pmd_val(*pmd
) ^ pte_val(pte
)) &
668 PTE_BITS_MUST_MATCH
);
669 kvmppc_radix_update_pte(kvm
, pmdp_ptep(pmd
),
670 0, pte_val(pte
), lgpa
, PMD_SHIFT
);
676 * If we raced with another CPU which has just put
677 * a 2MB pte in after we saw a pte page, try again.
683 /* Valid 2MB page here already, remove it */
684 kvmppc_unmap_pte(kvm
, pmdp_ptep(pmd
), lgpa
, PMD_SHIFT
, NULL
,
688 if (!pmd_none(*pmd
)) {
690 * There's a page table page here, but we wanted to
691 * install a large page, so remove and free the page
694 kvmppc_unmap_free_pmd_entry_table(kvm
, pmd
, gpa
, lpid
);
696 kvmppc_radix_set_pte_at(kvm
, gpa
, pmdp_ptep(pmd
), pte
);
698 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
702 if (pmd_none(*pmd
)) {
705 pmd_populate(kvm
->mm
, pmd
, new_ptep
);
708 ptep
= pte_offset_kernel(pmd
, gpa
);
709 if (pte_present(*ptep
)) {
710 /* Check if someone else set the same thing */
711 if (pte_raw(*ptep
) == pte_raw(pte
)) {
715 /* Valid page here already, add our extra bits */
716 WARN_ON_ONCE((pte_val(*ptep
) ^ pte_val(pte
)) &
717 PTE_BITS_MUST_MATCH
);
718 kvmppc_radix_update_pte(kvm
, ptep
, 0, pte_val(pte
), gpa
, 0);
722 kvmppc_radix_set_pte_at(kvm
, gpa
, ptep
, pte
);
724 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
728 spin_unlock(&kvm
->mmu_lock
);
730 pud_free(kvm
->mm
, new_pud
);
732 kvmppc_pmd_free(new_pmd
);
734 kvmppc_pte_free(new_ptep
);
738 bool kvmppc_hv_handle_set_rc(struct kvm
*kvm
, pgd_t
*pgtable
, bool writing
,
739 unsigned long gpa
, unsigned int lpid
)
741 unsigned long pgflags
;
746 * Need to set an R or C bit in the 2nd-level tables;
747 * since we are just helping out the hardware here,
748 * it is sufficient to do what the hardware does.
750 pgflags
= _PAGE_ACCESSED
;
752 pgflags
|= _PAGE_DIRTY
;
754 * We are walking the secondary (partition-scoped) page table here.
755 * We can do this without disabling irq because the Linux MM
756 * subsystem doesn't do THP splits and collapses on this tree.
758 ptep
= __find_linux_pte(pgtable
, gpa
, NULL
, &shift
);
759 if (ptep
&& pte_present(*ptep
) && (!writing
|| pte_write(*ptep
))) {
760 kvmppc_radix_update_pte(kvm
, ptep
, 0, pgflags
, gpa
, shift
);
766 int kvmppc_book3s_instantiate_page(struct kvm_vcpu
*vcpu
,
768 struct kvm_memory_slot
*memslot
,
769 bool writing
, bool kvm_ro
,
770 pte_t
*inserted_pte
, unsigned int *levelp
)
772 struct kvm
*kvm
= vcpu
->kvm
;
773 struct page
*page
= NULL
;
774 unsigned long mmu_seq
;
775 unsigned long hva
, gfn
= gpa
>> PAGE_SHIFT
;
776 bool upgrade_write
= false;
777 bool *upgrade_p
= &upgrade_write
;
779 unsigned int shift
, level
;
783 /* used to check for invalidations in progress */
784 mmu_seq
= kvm
->mmu_notifier_seq
;
788 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
789 * do it with !atomic && !async, which is how we call it.
790 * We always ask for write permission since the common case
791 * is that the page is writable.
793 hva
= gfn_to_hva_memslot(memslot
, gfn
);
794 if (!kvm_ro
&& __get_user_pages_fast(hva
, 1, 1, &page
) == 1) {
795 upgrade_write
= true;
799 /* Call KVM generic code to do the slow-path check */
800 pfn
= __gfn_to_pfn_memslot(memslot
, gfn
, false, NULL
,
802 if (is_error_noslot_pfn(pfn
))
805 if (pfn_valid(pfn
)) {
806 page
= pfn_to_page(pfn
);
807 if (PageReserved(page
))
813 * Read the PTE from the process' radix tree and use that
814 * so we get the shift and attribute bits.
817 ptep
= __find_linux_pte(vcpu
->arch
.pgdir
, hva
, NULL
, &shift
);
819 * If the PTE disappeared temporarily due to a THP
820 * collapse, just return and let the guest try again.
831 /* If we're logging dirty pages, always map single pages */
832 large_enable
= !(memslot
->flags
& KVM_MEM_LOG_DIRTY_PAGES
);
834 /* Get pte level from shift/size */
835 if (large_enable
&& shift
== PUD_SHIFT
&&
836 (gpa
& (PUD_SIZE
- PAGE_SIZE
)) ==
837 (hva
& (PUD_SIZE
- PAGE_SIZE
))) {
839 } else if (large_enable
&& shift
== PMD_SHIFT
&&
840 (gpa
& (PMD_SIZE
- PAGE_SIZE
)) ==
841 (hva
& (PMD_SIZE
- PAGE_SIZE
))) {
845 if (shift
> PAGE_SHIFT
) {
847 * If the pte maps more than one page, bring over
848 * bits from the virtual address to get the real
849 * address of the specific single page we want.
851 unsigned long rpnmask
= (1ul << shift
) - PAGE_SIZE
;
852 pte
= __pte(pte_val(pte
) | (hva
& rpnmask
));
856 pte
= __pte(pte_val(pte
) | _PAGE_EXEC
| _PAGE_ACCESSED
);
857 if (writing
|| upgrade_write
) {
858 if (pte_val(pte
) & _PAGE_WRITE
)
859 pte
= __pte(pte_val(pte
) | _PAGE_DIRTY
);
861 pte
= __pte(pte_val(pte
) & ~(_PAGE_WRITE
| _PAGE_DIRTY
));
864 /* Allocate space in the tree and write the PTE */
865 ret
= kvmppc_create_pte(kvm
, kvm
->arch
.pgtable
, pte
, gpa
, level
,
866 mmu_seq
, kvm
->arch
.lpid
, NULL
, NULL
);
873 if (!ret
&& (pte_val(pte
) & _PAGE_WRITE
))
874 set_page_dirty_lock(page
);
878 /* Increment number of large pages if we (successfully) inserted one */
881 kvm
->stat
.num_2M_pages
++;
883 kvm
->stat
.num_1G_pages
++;
889 int kvmppc_book3s_radix_page_fault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
890 unsigned long ea
, unsigned long dsisr
)
892 struct kvm
*kvm
= vcpu
->kvm
;
893 unsigned long gpa
, gfn
;
894 struct kvm_memory_slot
*memslot
;
896 bool writing
= !!(dsisr
& DSISR_ISSTORE
);
899 /* Check for unusual errors */
900 if (dsisr
& DSISR_UNSUPP_MMU
) {
901 pr_err("KVM: Got unsupported MMU fault\n");
904 if (dsisr
& DSISR_BADACCESS
) {
905 /* Reflect to the guest as DSI */
906 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr
);
907 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
911 /* Translate the logical address */
912 gpa
= vcpu
->arch
.fault_gpa
& ~0xfffUL
;
913 gpa
&= ~0xF000000000000000ul
;
914 gfn
= gpa
>> PAGE_SHIFT
;
915 if (!(dsisr
& DSISR_PRTABLE_FAULT
))
918 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
919 return kvmppc_send_page_to_uv(kvm
, gfn
);
921 /* Get the corresponding memslot */
922 memslot
= gfn_to_memslot(kvm
, gfn
);
924 /* No memslot means it's an emulated MMIO region */
925 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
926 if (dsisr
& (DSISR_PRTABLE_FAULT
| DSISR_BADACCESS
|
929 * Bad address in guest page table tree, or other
930 * unusual error - reflect it to the guest as DSI.
932 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
935 return kvmppc_hv_emulate_mmio(run
, vcpu
, gpa
, ea
, writing
);
938 if (memslot
->flags
& KVM_MEM_READONLY
) {
940 /* give the guest a DSI */
941 kvmppc_core_queue_data_storage(vcpu
, ea
, DSISR_ISSTORE
|
948 /* Failed to set the reference/change bits */
949 if (dsisr
& DSISR_SET_RC
) {
950 spin_lock(&kvm
->mmu_lock
);
951 if (kvmppc_hv_handle_set_rc(kvm
, kvm
->arch
.pgtable
,
952 writing
, gpa
, kvm
->arch
.lpid
))
953 dsisr
&= ~DSISR_SET_RC
;
954 spin_unlock(&kvm
->mmu_lock
);
956 if (!(dsisr
& (DSISR_BAD_FAULT_64S
| DSISR_NOHPTE
|
957 DSISR_PROTFAULT
| DSISR_SET_RC
)))
961 /* Try to insert a pte */
962 ret
= kvmppc_book3s_instantiate_page(vcpu
, gpa
, memslot
, writing
,
965 if (ret
== 0 || ret
== -EAGAIN
)
970 /* Called with kvm->mmu_lock held */
971 int kvm_unmap_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
975 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
978 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
) {
979 uv_page_inval(kvm
->arch
.lpid
, gpa
, PAGE_SHIFT
);
983 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
984 if (ptep
&& pte_present(*ptep
))
985 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, memslot
,
990 /* Called with kvm->mmu_lock held */
991 int kvm_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
995 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
998 unsigned long old
, *rmapp
;
1000 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1003 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1004 if (ptep
&& pte_present(*ptep
) && pte_young(*ptep
)) {
1005 old
= kvmppc_radix_update_pte(kvm
, ptep
, _PAGE_ACCESSED
, 0,
1007 /* XXX need to flush tlb here? */
1008 /* Also clear bit in ptes in shadow pgtable for nested guests */
1009 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1010 kvmhv_update_nest_rmap_rc_list(kvm
, rmapp
, _PAGE_ACCESSED
, 0,
1018 /* Called with kvm->mmu_lock held */
1019 int kvm_test_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
1023 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1027 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1030 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1031 if (ptep
&& pte_present(*ptep
) && pte_young(*ptep
))
1036 /* Returns the number of PAGE_SIZE pages that are dirty */
1037 static int kvm_radix_test_clear_dirty(struct kvm
*kvm
,
1038 struct kvm_memory_slot
*memslot
, int pagenum
)
1040 unsigned long gfn
= memslot
->base_gfn
+ pagenum
;
1041 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1045 unsigned long old
, *rmapp
;
1047 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1050 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1051 if (ptep
&& pte_present(*ptep
) && pte_dirty(*ptep
)) {
1054 ret
= 1 << (shift
- PAGE_SHIFT
);
1055 spin_lock(&kvm
->mmu_lock
);
1056 old
= kvmppc_radix_update_pte(kvm
, ptep
, _PAGE_DIRTY
, 0,
1058 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, kvm
->arch
.lpid
);
1059 /* Also clear bit in ptes in shadow pgtable for nested guests */
1060 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1061 kvmhv_update_nest_rmap_rc_list(kvm
, rmapp
, _PAGE_DIRTY
, 0,
1064 spin_unlock(&kvm
->mmu_lock
);
1069 long kvmppc_hv_get_dirty_log_radix(struct kvm
*kvm
,
1070 struct kvm_memory_slot
*memslot
, unsigned long *map
)
1075 for (i
= 0; i
< memslot
->npages
; i
= j
) {
1076 npages
= kvm_radix_test_clear_dirty(kvm
, memslot
, i
);
1079 * Note that if npages > 0 then i must be a multiple of npages,
1080 * since huge pages are only used to back the guest at guest
1081 * real addresses that are a multiple of their size.
1082 * Since we have at most one PTE covering any given guest
1083 * real address, if npages > 1 we can skip to i + npages.
1087 set_dirty_bits(map
, i
, npages
);
1094 void kvmppc_radix_flush_memslot(struct kvm
*kvm
,
1095 const struct kvm_memory_slot
*memslot
)
1102 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
)
1103 kvmppc_uvmem_drop_pages(memslot
, kvm
, true);
1105 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1108 gpa
= memslot
->base_gfn
<< PAGE_SHIFT
;
1109 spin_lock(&kvm
->mmu_lock
);
1110 for (n
= memslot
->npages
; n
; --n
) {
1111 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1112 if (ptep
&& pte_present(*ptep
))
1113 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, memslot
,
1117 spin_unlock(&kvm
->mmu_lock
);
1120 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info
*info
,
1121 int psize
, int *indexp
)
1123 if (!mmu_psize_defs
[psize
].shift
)
1125 info
->ap_encodings
[*indexp
] = mmu_psize_defs
[psize
].shift
|
1126 (mmu_psize_defs
[psize
].ap
<< 29);
1130 int kvmhv_get_rmmu_info(struct kvm
*kvm
, struct kvm_ppc_rmmu_info
*info
)
1134 if (!radix_enabled())
1136 memset(info
, 0, sizeof(*info
));
1139 info
->geometries
[0].page_shift
= 12;
1140 info
->geometries
[0].level_bits
[0] = 9;
1141 for (i
= 1; i
< 4; ++i
)
1142 info
->geometries
[0].level_bits
[i
] = p9_supported_radix_bits
[i
];
1144 info
->geometries
[1].page_shift
= 16;
1145 for (i
= 0; i
< 4; ++i
)
1146 info
->geometries
[1].level_bits
[i
] = p9_supported_radix_bits
[i
];
1149 add_rmmu_ap_encoding(info
, MMU_PAGE_4K
, &i
);
1150 add_rmmu_ap_encoding(info
, MMU_PAGE_64K
, &i
);
1151 add_rmmu_ap_encoding(info
, MMU_PAGE_2M
, &i
);
1152 add_rmmu_ap_encoding(info
, MMU_PAGE_1G
, &i
);
1157 int kvmppc_init_vm_radix(struct kvm
*kvm
)
1159 kvm
->arch
.pgtable
= pgd_alloc(kvm
->mm
);
1160 if (!kvm
->arch
.pgtable
)
1165 static void pte_ctor(void *addr
)
1167 memset(addr
, 0, RADIX_PTE_TABLE_SIZE
);
1170 static void pmd_ctor(void *addr
)
1172 memset(addr
, 0, RADIX_PMD_TABLE_SIZE
);
1175 struct debugfs_radix_state
{
1186 static int debugfs_radix_open(struct inode
*inode
, struct file
*file
)
1188 struct kvm
*kvm
= inode
->i_private
;
1189 struct debugfs_radix_state
*p
;
1191 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1197 mutex_init(&p
->mutex
);
1198 file
->private_data
= p
;
1200 return nonseekable_open(inode
, file
);
1203 static int debugfs_radix_release(struct inode
*inode
, struct file
*file
)
1205 struct debugfs_radix_state
*p
= file
->private_data
;
1207 kvm_put_kvm(p
->kvm
);
1212 static ssize_t
debugfs_radix_read(struct file
*file
, char __user
*buf
,
1213 size_t len
, loff_t
*ppos
)
1215 struct debugfs_radix_state
*p
= file
->private_data
;
1221 struct kvm_nested_guest
*nested
;
1230 if (!kvm_is_radix(kvm
))
1233 ret
= mutex_lock_interruptible(&p
->mutex
);
1237 if (p
->chars_left
) {
1241 r
= copy_to_user(buf
, p
->buf
+ p
->buf_index
, n
);
1258 while (len
!= 0 && p
->lpid
>= 0) {
1259 if (gpa
>= RADIX_PGTABLE_RANGE
) {
1263 kvmhv_put_nested(nested
);
1266 p
->lpid
= kvmhv_nested_next_lpid(kvm
, p
->lpid
);
1273 pgt
= kvm
->arch
.pgtable
;
1275 nested
= kvmhv_get_nested(kvm
, p
->lpid
, false);
1277 gpa
= RADIX_PGTABLE_RANGE
;
1280 pgt
= nested
->shadow_pgtable
;
1286 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
1287 "\nNested LPID %d: ", p
->lpid
);
1288 n
+= scnprintf(p
->buf
+ n
, sizeof(p
->buf
) - n
,
1289 "pgdir: %lx\n", (unsigned long)pgt
);
1294 pgdp
= pgt
+ pgd_index(gpa
);
1295 pgd
= READ_ONCE(*pgdp
);
1296 if (!(pgd_val(pgd
) & _PAGE_PRESENT
)) {
1297 gpa
= (gpa
& PGDIR_MASK
) + PGDIR_SIZE
;
1301 pudp
= pud_offset(&pgd
, gpa
);
1302 pud
= READ_ONCE(*pudp
);
1303 if (!(pud_val(pud
) & _PAGE_PRESENT
)) {
1304 gpa
= (gpa
& PUD_MASK
) + PUD_SIZE
;
1307 if (pud_val(pud
) & _PAGE_PTE
) {
1313 pmdp
= pmd_offset(&pud
, gpa
);
1314 pmd
= READ_ONCE(*pmdp
);
1315 if (!(pmd_val(pmd
) & _PAGE_PRESENT
)) {
1316 gpa
= (gpa
& PMD_MASK
) + PMD_SIZE
;
1319 if (pmd_val(pmd
) & _PAGE_PTE
) {
1325 ptep
= pte_offset_kernel(&pmd
, gpa
);
1326 pte
= pte_val(READ_ONCE(*ptep
));
1327 if (!(pte
& _PAGE_PRESENT
)) {
1333 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
1334 " %lx: %lx %d\n", gpa
, pte
, shift
);
1335 gpa
+= 1ul << shift
;
1340 r
= copy_to_user(buf
, p
->buf
, n
);
1355 kvmhv_put_nested(nested
);
1358 mutex_unlock(&p
->mutex
);
1362 static ssize_t
debugfs_radix_write(struct file
*file
, const char __user
*buf
,
1363 size_t len
, loff_t
*ppos
)
1368 static const struct file_operations debugfs_radix_fops
= {
1369 .owner
= THIS_MODULE
,
1370 .open
= debugfs_radix_open
,
1371 .release
= debugfs_radix_release
,
1372 .read
= debugfs_radix_read
,
1373 .write
= debugfs_radix_write
,
1374 .llseek
= generic_file_llseek
,
1377 void kvmhv_radix_debugfs_init(struct kvm
*kvm
)
1379 kvm
->arch
.radix_dentry
= debugfs_create_file("radix", 0400,
1380 kvm
->arch
.debugfs_dir
, kvm
,
1381 &debugfs_radix_fops
);
1384 int kvmppc_radix_init(void)
1386 unsigned long size
= sizeof(void *) << RADIX_PTE_INDEX_SIZE
;
1388 kvm_pte_cache
= kmem_cache_create("kvm-pte", size
, size
, 0, pte_ctor
);
1392 size
= sizeof(void *) << RADIX_PMD_INDEX_SIZE
;
1394 kvm_pmd_cache
= kmem_cache_create("kvm-pmd", size
, size
, 0, pmd_ctor
);
1395 if (!kvm_pmd_cache
) {
1396 kmem_cache_destroy(kvm_pte_cache
);
1403 void kvmppc_radix_exit(void)
1405 kmem_cache_destroy(kvm_pte_cache
);
1406 kmem_cache_destroy(kvm_pmd_cache
);