1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/types.h>
8 #include <linux/string.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
14 #include <linux/pgtable.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
22 #include <asm/ultravisor.h>
23 #include <asm/kvm_book3s_uvmem.h>
26 * Supported radix tree geometry.
27 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28 * for a page size of 64k or 4k.
30 static int p9_supported_radix_bits
[4] = { 5, 9, 9, 13 };
32 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid
, int pid
,
33 gva_t eaddr
, void *to
, void *from
,
36 int old_pid
, old_lpid
;
37 unsigned long quadrant
, ret
= n
;
40 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
41 if (kvmhv_on_pseries())
42 return plpar_hcall_norets(H_COPY_TOFROM_GUEST
, lpid
, pid
, eaddr
,
43 (to
!= NULL
) ? __pa(to
): 0,
44 (from
!= NULL
) ? __pa(from
): 0, n
);
50 from
= (void *) (eaddr
| (quadrant
<< 62));
52 to
= (void *) (eaddr
| (quadrant
<< 62));
56 /* switch the lpid first to avoid running host with unallocated pid */
57 old_lpid
= mfspr(SPRN_LPID
);
59 mtspr(SPRN_LPID
, lpid
);
61 old_pid
= mfspr(SPRN_PID
);
68 ret
= copy_from_user_nofault(to
, (const void __user
*)from
, n
);
70 ret
= copy_to_user_nofault((void __user
*)to
, from
, n
);
72 /* switch the pid first to avoid running host with unallocated pid */
73 if (quadrant
== 1 && pid
!= old_pid
)
74 mtspr(SPRN_PID
, old_pid
);
76 mtspr(SPRN_LPID
, old_lpid
);
83 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix
);
85 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
86 void *to
, void *from
, unsigned long n
)
88 int lpid
= vcpu
->kvm
->arch
.lpid
;
89 int pid
= vcpu
->arch
.pid
;
91 /* This would cause a data segment intr so don't allow the access */
92 if (eaddr
& (0x3FFUL
<< 52))
95 /* Should we be using the nested lpid */
96 if (vcpu
->arch
.nested
)
97 lpid
= vcpu
->arch
.nested
->shadow_lpid
;
99 /* If accessing quadrant 3 then pid is expected to be 0 */
100 if (((eaddr
>> 62) & 0x3) == 0x3)
103 eaddr
&= ~(0xFFFUL
<< 52);
105 return __kvmhv_copy_tofrom_guest_radix(lpid
, pid
, eaddr
, to
, from
, n
);
108 long kvmhv_copy_from_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
, void *to
,
113 ret
= kvmhv_copy_tofrom_guest_radix(vcpu
, eaddr
, to
, NULL
, n
);
115 memset(to
+ (n
- ret
), 0, ret
);
119 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix
);
121 long kvmhv_copy_to_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
, void *from
,
124 return kvmhv_copy_tofrom_guest_radix(vcpu
, eaddr
, NULL
, from
, n
);
126 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix
);
128 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
129 struct kvmppc_pte
*gpte
, u64 root
,
132 struct kvm
*kvm
= vcpu
->kvm
;
134 unsigned long rts
, bits
, offset
, index
;
138 rts
= ((root
& RTS1_MASK
) >> (RTS1_SHIFT
- 3)) |
139 ((root
& RTS2_MASK
) >> RTS2_SHIFT
);
140 bits
= root
& RPDS_MASK
;
141 base
= root
& RPDB_MASK
;
145 /* Current implementations only support 52-bit space */
149 /* Walk each level of the radix tree */
150 for (level
= 3; level
>= 0; --level
) {
152 /* Check a valid size */
153 if (level
&& bits
!= p9_supported_radix_bits
[level
])
155 if (level
== 0 && !(bits
== 5 || bits
== 9))
158 index
= (eaddr
>> offset
) & ((1UL << bits
) - 1);
159 /* Check that low bits of page table base are zero */
160 if (base
& ((1UL << (bits
+ 3)) - 1))
162 /* Read the entry from guest memory */
163 addr
= base
+ (index
* sizeof(rpte
));
164 vcpu
->srcu_idx
= srcu_read_lock(&kvm
->srcu
);
165 ret
= kvm_read_guest(kvm
, addr
, &rpte
, sizeof(rpte
));
166 srcu_read_unlock(&kvm
->srcu
, vcpu
->srcu_idx
);
172 pte
= __be64_to_cpu(rpte
);
173 if (!(pte
& _PAGE_PRESENT
))
175 /* Check if a leaf entry */
178 /* Get ready to walk the next level */
179 base
= pte
& RPDB_MASK
;
180 bits
= pte
& RPDS_MASK
;
183 /* Need a leaf at lowest level; 512GB pages not supported */
184 if (level
< 0 || level
== 3)
187 /* We found a valid leaf PTE */
188 /* Offset is now log base 2 of the page size */
189 gpa
= pte
& 0x01fffffffffff000ul
;
190 if (gpa
& ((1ul << offset
) - 1))
192 gpa
|= eaddr
& ((1ul << offset
) - 1);
193 for (ps
= MMU_PAGE_4K
; ps
< MMU_PAGE_COUNT
; ++ps
)
194 if (offset
== mmu_psize_defs
[ps
].shift
)
196 gpte
->page_size
= ps
;
197 gpte
->page_shift
= offset
;
202 /* Work out permissions */
203 gpte
->may_read
= !!(pte
& _PAGE_READ
);
204 gpte
->may_write
= !!(pte
& _PAGE_WRITE
);
205 gpte
->may_execute
= !!(pte
& _PAGE_EXEC
);
207 gpte
->rc
= pte
& (_PAGE_ACCESSED
| _PAGE_DIRTY
);
216 * Used to walk a partition or process table radix tree in guest memory
217 * Note: We exploit the fact that a partition table and a process
218 * table have the same layout, a partition-scoped page table and a
219 * process-scoped page table have the same layout, and the 2nd
220 * doubleword of a partition table entry has the same layout as
223 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
224 struct kvmppc_pte
*gpte
, u64 table
,
225 int table_index
, u64
*pte_ret_p
)
227 struct kvm
*kvm
= vcpu
->kvm
;
229 unsigned long size
, ptbl
, root
;
230 struct prtb_entry entry
;
232 if ((table
& PRTS_MASK
) > 24)
234 size
= 1ul << ((table
& PRTS_MASK
) + 12);
236 /* Is the table big enough to contain this entry? */
237 if ((table_index
* sizeof(entry
)) >= size
)
240 /* Read the table to find the root of the radix tree */
241 ptbl
= (table
& PRTB_MASK
) + (table_index
* sizeof(entry
));
242 vcpu
->srcu_idx
= srcu_read_lock(&kvm
->srcu
);
243 ret
= kvm_read_guest(kvm
, ptbl
, &entry
, sizeof(entry
));
244 srcu_read_unlock(&kvm
->srcu
, vcpu
->srcu_idx
);
248 /* Root is stored in the first double word */
249 root
= be64_to_cpu(entry
.prtb0
);
251 return kvmppc_mmu_walk_radix_tree(vcpu
, eaddr
, gpte
, root
, pte_ret_p
);
254 int kvmppc_mmu_radix_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
255 struct kvmppc_pte
*gpte
, bool data
, bool iswrite
)
261 /* Work out effective PID */
262 switch (eaddr
>> 62) {
264 pid
= vcpu
->arch
.pid
;
273 ret
= kvmppc_mmu_radix_translate_table(vcpu
, eaddr
, gpte
,
274 vcpu
->kvm
->arch
.process_table
, pid
, &pte
);
278 /* Check privilege (applies only to process scoped translations) */
279 if (kvmppc_get_msr(vcpu
) & MSR_PR
) {
280 if (pte
& _PAGE_PRIVILEGED
) {
283 gpte
->may_execute
= 0;
286 if (!(pte
& _PAGE_PRIVILEGED
)) {
287 /* Check AMR/IAMR to see if strict mode is in force */
288 if (vcpu
->arch
.amr
& (1ul << 62))
290 if (vcpu
->arch
.amr
& (1ul << 63))
292 if (vcpu
->arch
.iamr
& (1ul << 62))
293 gpte
->may_execute
= 0;
300 void kvmppc_radix_tlbie_page(struct kvm
*kvm
, unsigned long addr
,
301 unsigned int pshift
, unsigned int lpid
)
303 unsigned long psize
= PAGE_SIZE
;
309 psize
= 1UL << pshift
;
313 addr
&= ~(psize
- 1);
315 if (!kvmhv_on_pseries()) {
316 radix__flush_tlb_lpid_page(lpid
, addr
, psize
);
320 psi
= shift_to_mmu_psize(pshift
);
321 rb
= addr
| (mmu_get_ap(psi
) << PPC_BITLSHIFT(58));
322 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(0, 0, 1),
325 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc
);
328 static void kvmppc_radix_flush_pwc(struct kvm
*kvm
, unsigned int lpid
)
332 if (!kvmhv_on_pseries()) {
333 radix__flush_pwc_lpid(lpid
);
337 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(1, 0, 1),
338 lpid
, TLBIEL_INVAL_SET_LPID
);
340 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc
);
343 static unsigned long kvmppc_radix_update_pte(struct kvm
*kvm
, pte_t
*ptep
,
344 unsigned long clr
, unsigned long set
,
345 unsigned long addr
, unsigned int shift
)
347 return __radix_pte_update(ptep
, clr
, set
);
350 static void kvmppc_radix_set_pte_at(struct kvm
*kvm
, unsigned long addr
,
351 pte_t
*ptep
, pte_t pte
)
353 radix__set_pte_at(kvm
->mm
, addr
, ptep
, pte
, 0);
356 static struct kmem_cache
*kvm_pte_cache
;
357 static struct kmem_cache
*kvm_pmd_cache
;
359 static pte_t
*kvmppc_pte_alloc(void)
363 pte
= kmem_cache_alloc(kvm_pte_cache
, GFP_KERNEL
);
364 /* pmd_populate() will only reference _pa(pte). */
365 kmemleak_ignore(pte
);
370 static void kvmppc_pte_free(pte_t
*ptep
)
372 kmem_cache_free(kvm_pte_cache
, ptep
);
375 static pmd_t
*kvmppc_pmd_alloc(void)
379 pmd
= kmem_cache_alloc(kvm_pmd_cache
, GFP_KERNEL
);
380 /* pud_populate() will only reference _pa(pmd). */
381 kmemleak_ignore(pmd
);
386 static void kvmppc_pmd_free(pmd_t
*pmdp
)
388 kmem_cache_free(kvm_pmd_cache
, pmdp
);
391 /* Called with kvm->mmu_lock held */
392 void kvmppc_unmap_pte(struct kvm
*kvm
, pte_t
*pte
, unsigned long gpa
,
394 const struct kvm_memory_slot
*memslot
,
399 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
400 unsigned long page_size
= PAGE_SIZE
;
403 old
= kvmppc_radix_update_pte(kvm
, pte
, ~0UL, 0, gpa
, shift
);
404 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, lpid
);
406 /* The following only applies to L1 entries */
407 if (lpid
!= kvm
->arch
.lpid
)
411 memslot
= gfn_to_memslot(kvm
, gfn
);
415 if (shift
) { /* 1GB or 2MB page */
416 page_size
= 1ul << shift
;
417 if (shift
== PMD_SHIFT
)
418 kvm
->stat
.num_2M_pages
--;
419 else if (shift
== PUD_SHIFT
)
420 kvm
->stat
.num_1G_pages
--;
423 gpa
&= ~(page_size
- 1);
424 hpa
= old
& PTE_RPN_MASK
;
425 kvmhv_remove_nest_rmap_range(kvm
, memslot
, gpa
, hpa
, page_size
);
427 if ((old
& _PAGE_DIRTY
) && memslot
->dirty_bitmap
)
428 kvmppc_update_dirty_map(memslot
, gfn
, page_size
);
432 * kvmppc_free_p?d are used to free existing page tables, and recursively
433 * descend and clear and free children.
434 * Callers are responsible for flushing the PWC.
436 * When page tables are being unmapped/freed as part of page fault path
437 * (full == false), valid ptes are generally not expected; however, there
438 * is one situation where they arise, which is when dirty page logging is
439 * turned off for a memslot while the VM is running. The new memslot
440 * becomes visible to page faults before the memslot commit function
441 * gets to flush the memslot, which can lead to a 2MB page mapping being
442 * installed for a guest physical address where there are already 64kB
443 * (or 4kB) mappings (of sub-pages of the same 2MB page).
445 static void kvmppc_unmap_free_pte(struct kvm
*kvm
, pte_t
*pte
, bool full
,
449 memset(pte
, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE
);
454 for (it
= 0; it
< PTRS_PER_PTE
; ++it
, ++p
) {
455 if (pte_val(*p
) == 0)
457 kvmppc_unmap_pte(kvm
, p
,
458 pte_pfn(*p
) << PAGE_SHIFT
,
459 PAGE_SHIFT
, NULL
, lpid
);
463 kvmppc_pte_free(pte
);
466 static void kvmppc_unmap_free_pmd(struct kvm
*kvm
, pmd_t
*pmd
, bool full
,
472 for (im
= 0; im
< PTRS_PER_PMD
; ++im
, ++p
) {
473 if (!pmd_present(*p
))
475 if (pmd_is_leaf(*p
)) {
480 kvmppc_unmap_pte(kvm
, (pte_t
*)p
,
481 pte_pfn(*(pte_t
*)p
) << PAGE_SHIFT
,
482 PMD_SHIFT
, NULL
, lpid
);
487 pte
= pte_offset_map(p
, 0);
488 kvmppc_unmap_free_pte(kvm
, pte
, full
, lpid
);
492 kvmppc_pmd_free(pmd
);
495 static void kvmppc_unmap_free_pud(struct kvm
*kvm
, pud_t
*pud
,
501 for (iu
= 0; iu
< PTRS_PER_PUD
; ++iu
, ++p
) {
502 if (!pud_present(*p
))
504 if (pud_is_leaf(*p
)) {
509 pmd
= pmd_offset(p
, 0);
510 kvmppc_unmap_free_pmd(kvm
, pmd
, true, lpid
);
514 pud_free(kvm
->mm
, pud
);
517 void kvmppc_free_pgtable_radix(struct kvm
*kvm
, pgd_t
*pgd
, unsigned int lpid
)
521 for (ig
= 0; ig
< PTRS_PER_PGD
; ++ig
, ++pgd
) {
522 p4d_t
*p4d
= p4d_offset(pgd
, 0);
525 if (!p4d_present(*p4d
))
527 pud
= pud_offset(p4d
, 0);
528 kvmppc_unmap_free_pud(kvm
, pud
, lpid
);
533 void kvmppc_free_radix(struct kvm
*kvm
)
535 if (kvm
->arch
.pgtable
) {
536 kvmppc_free_pgtable_radix(kvm
, kvm
->arch
.pgtable
,
538 pgd_free(kvm
->mm
, kvm
->arch
.pgtable
);
539 kvm
->arch
.pgtable
= NULL
;
543 static void kvmppc_unmap_free_pmd_entry_table(struct kvm
*kvm
, pmd_t
*pmd
,
544 unsigned long gpa
, unsigned int lpid
)
546 pte_t
*pte
= pte_offset_kernel(pmd
, 0);
549 * Clearing the pmd entry then flushing the PWC ensures that the pte
550 * page no longer be cached by the MMU, so can be freed without
551 * flushing the PWC again.
554 kvmppc_radix_flush_pwc(kvm
, lpid
);
556 kvmppc_unmap_free_pte(kvm
, pte
, false, lpid
);
559 static void kvmppc_unmap_free_pud_entry_table(struct kvm
*kvm
, pud_t
*pud
,
560 unsigned long gpa
, unsigned int lpid
)
562 pmd_t
*pmd
= pmd_offset(pud
, 0);
565 * Clearing the pud entry then flushing the PWC ensures that the pmd
566 * page and any children pte pages will no longer be cached by the MMU,
567 * so can be freed without flushing the PWC again.
570 kvmppc_radix_flush_pwc(kvm
, lpid
);
572 kvmppc_unmap_free_pmd(kvm
, pmd
, false, lpid
);
576 * There are a number of bits which may differ between different faults to
577 * the same partition scope entry. RC bits, in the course of cleaning and
578 * aging. And the write bit can change, either the access could have been
579 * upgraded, or a read fault could happen concurrently with a write fault
580 * that sets those bits first.
582 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
584 int kvmppc_create_pte(struct kvm
*kvm
, pgd_t
*pgtable
, pte_t pte
,
585 unsigned long gpa
, unsigned int level
,
586 unsigned long mmu_seq
, unsigned int lpid
,
587 unsigned long *rmapp
, struct rmap_nested
**n_rmap
)
591 pud_t
*pud
, *new_pud
= NULL
;
592 pmd_t
*pmd
, *new_pmd
= NULL
;
593 pte_t
*ptep
, *new_ptep
= NULL
;
596 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
597 pgd
= pgtable
+ pgd_index(gpa
);
598 p4d
= p4d_offset(pgd
, gpa
);
601 if (p4d_present(*p4d
))
602 pud
= pud_offset(p4d
, gpa
);
604 new_pud
= pud_alloc_one(kvm
->mm
, gpa
);
607 if (pud
&& pud_present(*pud
) && !pud_is_leaf(*pud
))
608 pmd
= pmd_offset(pud
, gpa
);
610 new_pmd
= kvmppc_pmd_alloc();
612 if (level
== 0 && !(pmd
&& pmd_present(*pmd
) && !pmd_is_leaf(*pmd
)))
613 new_ptep
= kvmppc_pte_alloc();
615 /* Check if we might have been invalidated; let the guest retry if so */
616 spin_lock(&kvm
->mmu_lock
);
618 if (mmu_notifier_retry(kvm
, mmu_seq
))
621 /* Now traverse again under the lock and change the tree */
623 if (p4d_none(*p4d
)) {
626 p4d_populate(kvm
->mm
, p4d
, new_pud
);
629 pud
= pud_offset(p4d
, gpa
);
630 if (pud_is_leaf(*pud
)) {
631 unsigned long hgpa
= gpa
& PUD_MASK
;
633 /* Check if we raced and someone else has set the same thing */
635 if (pud_raw(*pud
) == pte_raw(pte
)) {
639 /* Valid 1GB page here already, add our extra bits */
640 WARN_ON_ONCE((pud_val(*pud
) ^ pte_val(pte
)) &
641 PTE_BITS_MUST_MATCH
);
642 kvmppc_radix_update_pte(kvm
, (pte_t
*)pud
,
643 0, pte_val(pte
), hgpa
, PUD_SHIFT
);
648 * If we raced with another CPU which has just put
649 * a 1GB pte in after we saw a pmd page, try again.
655 /* Valid 1GB page here already, remove it */
656 kvmppc_unmap_pte(kvm
, (pte_t
*)pud
, hgpa
, PUD_SHIFT
, NULL
,
660 if (!pud_none(*pud
)) {
662 * There's a page table page here, but we wanted to
663 * install a large page, so remove and free the page
666 kvmppc_unmap_free_pud_entry_table(kvm
, pud
, gpa
, lpid
);
668 kvmppc_radix_set_pte_at(kvm
, gpa
, (pte_t
*)pud
, pte
);
670 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
674 if (pud_none(*pud
)) {
677 pud_populate(kvm
->mm
, pud
, new_pmd
);
680 pmd
= pmd_offset(pud
, gpa
);
681 if (pmd_is_leaf(*pmd
)) {
682 unsigned long lgpa
= gpa
& PMD_MASK
;
684 /* Check if we raced and someone else has set the same thing */
686 if (pmd_raw(*pmd
) == pte_raw(pte
)) {
690 /* Valid 2MB page here already, add our extra bits */
691 WARN_ON_ONCE((pmd_val(*pmd
) ^ pte_val(pte
)) &
692 PTE_BITS_MUST_MATCH
);
693 kvmppc_radix_update_pte(kvm
, pmdp_ptep(pmd
),
694 0, pte_val(pte
), lgpa
, PMD_SHIFT
);
700 * If we raced with another CPU which has just put
701 * a 2MB pte in after we saw a pte page, try again.
707 /* Valid 2MB page here already, remove it */
708 kvmppc_unmap_pte(kvm
, pmdp_ptep(pmd
), lgpa
, PMD_SHIFT
, NULL
,
712 if (!pmd_none(*pmd
)) {
714 * There's a page table page here, but we wanted to
715 * install a large page, so remove and free the page
718 kvmppc_unmap_free_pmd_entry_table(kvm
, pmd
, gpa
, lpid
);
720 kvmppc_radix_set_pte_at(kvm
, gpa
, pmdp_ptep(pmd
), pte
);
722 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
726 if (pmd_none(*pmd
)) {
729 pmd_populate(kvm
->mm
, pmd
, new_ptep
);
732 ptep
= pte_offset_kernel(pmd
, gpa
);
733 if (pte_present(*ptep
)) {
734 /* Check if someone else set the same thing */
735 if (pte_raw(*ptep
) == pte_raw(pte
)) {
739 /* Valid page here already, add our extra bits */
740 WARN_ON_ONCE((pte_val(*ptep
) ^ pte_val(pte
)) &
741 PTE_BITS_MUST_MATCH
);
742 kvmppc_radix_update_pte(kvm
, ptep
, 0, pte_val(pte
), gpa
, 0);
746 kvmppc_radix_set_pte_at(kvm
, gpa
, ptep
, pte
);
748 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
752 spin_unlock(&kvm
->mmu_lock
);
754 pud_free(kvm
->mm
, new_pud
);
756 kvmppc_pmd_free(new_pmd
);
758 kvmppc_pte_free(new_ptep
);
762 bool kvmppc_hv_handle_set_rc(struct kvm
*kvm
, bool nested
, bool writing
,
763 unsigned long gpa
, unsigned int lpid
)
765 unsigned long pgflags
;
770 * Need to set an R or C bit in the 2nd-level tables;
771 * since we are just helping out the hardware here,
772 * it is sufficient to do what the hardware does.
774 pgflags
= _PAGE_ACCESSED
;
776 pgflags
|= _PAGE_DIRTY
;
779 ptep
= find_kvm_nested_guest_pte(kvm
, lpid
, gpa
, &shift
);
781 ptep
= find_kvm_secondary_pte(kvm
, gpa
, &shift
);
783 if (ptep
&& pte_present(*ptep
) && (!writing
|| pte_write(*ptep
))) {
784 kvmppc_radix_update_pte(kvm
, ptep
, 0, pgflags
, gpa
, shift
);
790 int kvmppc_book3s_instantiate_page(struct kvm_vcpu
*vcpu
,
792 struct kvm_memory_slot
*memslot
,
793 bool writing
, bool kvm_ro
,
794 pte_t
*inserted_pte
, unsigned int *levelp
)
796 struct kvm
*kvm
= vcpu
->kvm
;
797 struct page
*page
= NULL
;
798 unsigned long mmu_seq
;
799 unsigned long hva
, gfn
= gpa
>> PAGE_SHIFT
;
800 bool upgrade_write
= false;
801 bool *upgrade_p
= &upgrade_write
;
803 unsigned int shift
, level
;
807 /* used to check for invalidations in progress */
808 mmu_seq
= kvm
->mmu_notifier_seq
;
812 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
813 * do it with !atomic && !async, which is how we call it.
814 * We always ask for write permission since the common case
815 * is that the page is writable.
817 hva
= gfn_to_hva_memslot(memslot
, gfn
);
818 if (!kvm_ro
&& get_user_page_fast_only(hva
, FOLL_WRITE
, &page
)) {
819 upgrade_write
= true;
823 /* Call KVM generic code to do the slow-path check */
824 pfn
= __gfn_to_pfn_memslot(memslot
, gfn
, false, NULL
,
826 if (is_error_noslot_pfn(pfn
))
829 if (pfn_valid(pfn
)) {
830 page
= pfn_to_page(pfn
);
831 if (PageReserved(page
))
837 * Read the PTE from the process' radix tree and use that
838 * so we get the shift and attribute bits.
840 spin_lock(&kvm
->mmu_lock
);
841 ptep
= find_kvm_host_pte(kvm
, mmu_seq
, hva
, &shift
);
844 pte
= READ_ONCE(*ptep
);
845 spin_unlock(&kvm
->mmu_lock
);
847 * If the PTE disappeared temporarily due to a THP
848 * collapse, just return and let the guest try again.
850 if (!pte_present(pte
)) {
856 /* If we're logging dirty pages, always map single pages */
857 large_enable
= !(memslot
->flags
& KVM_MEM_LOG_DIRTY_PAGES
);
859 /* Get pte level from shift/size */
860 if (large_enable
&& shift
== PUD_SHIFT
&&
861 (gpa
& (PUD_SIZE
- PAGE_SIZE
)) ==
862 (hva
& (PUD_SIZE
- PAGE_SIZE
))) {
864 } else if (large_enable
&& shift
== PMD_SHIFT
&&
865 (gpa
& (PMD_SIZE
- PAGE_SIZE
)) ==
866 (hva
& (PMD_SIZE
- PAGE_SIZE
))) {
870 if (shift
> PAGE_SHIFT
) {
872 * If the pte maps more than one page, bring over
873 * bits from the virtual address to get the real
874 * address of the specific single page we want.
876 unsigned long rpnmask
= (1ul << shift
) - PAGE_SIZE
;
877 pte
= __pte(pte_val(pte
) | (hva
& rpnmask
));
881 pte
= __pte(pte_val(pte
) | _PAGE_EXEC
| _PAGE_ACCESSED
);
882 if (writing
|| upgrade_write
) {
883 if (pte_val(pte
) & _PAGE_WRITE
)
884 pte
= __pte(pte_val(pte
) | _PAGE_DIRTY
);
886 pte
= __pte(pte_val(pte
) & ~(_PAGE_WRITE
| _PAGE_DIRTY
));
889 /* Allocate space in the tree and write the PTE */
890 ret
= kvmppc_create_pte(kvm
, kvm
->arch
.pgtable
, pte
, gpa
, level
,
891 mmu_seq
, kvm
->arch
.lpid
, NULL
, NULL
);
898 if (!ret
&& (pte_val(pte
) & _PAGE_WRITE
))
899 set_page_dirty_lock(page
);
903 /* Increment number of large pages if we (successfully) inserted one */
906 kvm
->stat
.num_2M_pages
++;
908 kvm
->stat
.num_1G_pages
++;
914 int kvmppc_book3s_radix_page_fault(struct kvm_vcpu
*vcpu
,
915 unsigned long ea
, unsigned long dsisr
)
917 struct kvm
*kvm
= vcpu
->kvm
;
918 unsigned long gpa
, gfn
;
919 struct kvm_memory_slot
*memslot
;
921 bool writing
= !!(dsisr
& DSISR_ISSTORE
);
924 /* Check for unusual errors */
925 if (dsisr
& DSISR_UNSUPP_MMU
) {
926 pr_err("KVM: Got unsupported MMU fault\n");
929 if (dsisr
& DSISR_BADACCESS
) {
930 /* Reflect to the guest as DSI */
931 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr
);
932 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
936 /* Translate the logical address */
937 gpa
= vcpu
->arch
.fault_gpa
& ~0xfffUL
;
938 gpa
&= ~0xF000000000000000ul
;
939 gfn
= gpa
>> PAGE_SHIFT
;
940 if (!(dsisr
& DSISR_PRTABLE_FAULT
))
943 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
944 return kvmppc_send_page_to_uv(kvm
, gfn
);
946 /* Get the corresponding memslot */
947 memslot
= gfn_to_memslot(kvm
, gfn
);
949 /* No memslot means it's an emulated MMIO region */
950 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
951 if (dsisr
& (DSISR_PRTABLE_FAULT
| DSISR_BADACCESS
|
954 * Bad address in guest page table tree, or other
955 * unusual error - reflect it to the guest as DSI.
957 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
960 return kvmppc_hv_emulate_mmio(vcpu
, gpa
, ea
, writing
);
963 if (memslot
->flags
& KVM_MEM_READONLY
) {
965 /* give the guest a DSI */
966 kvmppc_core_queue_data_storage(vcpu
, ea
, DSISR_ISSTORE
|
973 /* Failed to set the reference/change bits */
974 if (dsisr
& DSISR_SET_RC
) {
975 spin_lock(&kvm
->mmu_lock
);
976 if (kvmppc_hv_handle_set_rc(kvm
, false, writing
,
977 gpa
, kvm
->arch
.lpid
))
978 dsisr
&= ~DSISR_SET_RC
;
979 spin_unlock(&kvm
->mmu_lock
);
981 if (!(dsisr
& (DSISR_BAD_FAULT_64S
| DSISR_NOHPTE
|
982 DSISR_PROTFAULT
| DSISR_SET_RC
)))
986 /* Try to insert a pte */
987 ret
= kvmppc_book3s_instantiate_page(vcpu
, gpa
, memslot
, writing
,
990 if (ret
== 0 || ret
== -EAGAIN
)
995 /* Called with kvm->mmu_lock held */
996 int kvm_unmap_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
1000 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1003 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
) {
1004 uv_page_inval(kvm
->arch
.lpid
, gpa
, PAGE_SHIFT
);
1008 ptep
= find_kvm_secondary_pte(kvm
, gpa
, &shift
);
1009 if (ptep
&& pte_present(*ptep
))
1010 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, memslot
,
1015 /* Called with kvm->mmu_lock held */
1016 int kvm_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
1020 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1023 unsigned long old
, *rmapp
;
1025 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1028 ptep
= find_kvm_secondary_pte(kvm
, gpa
, &shift
);
1029 if (ptep
&& pte_present(*ptep
) && pte_young(*ptep
)) {
1030 old
= kvmppc_radix_update_pte(kvm
, ptep
, _PAGE_ACCESSED
, 0,
1032 /* XXX need to flush tlb here? */
1033 /* Also clear bit in ptes in shadow pgtable for nested guests */
1034 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1035 kvmhv_update_nest_rmap_rc_list(kvm
, rmapp
, _PAGE_ACCESSED
, 0,
1043 /* Called with kvm->mmu_lock held */
1044 int kvm_test_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
1048 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1052 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1055 ptep
= find_kvm_secondary_pte(kvm
, gpa
, &shift
);
1056 if (ptep
&& pte_present(*ptep
) && pte_young(*ptep
))
1061 /* Returns the number of PAGE_SIZE pages that are dirty */
1062 static int kvm_radix_test_clear_dirty(struct kvm
*kvm
,
1063 struct kvm_memory_slot
*memslot
, int pagenum
)
1065 unsigned long gfn
= memslot
->base_gfn
+ pagenum
;
1066 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1070 unsigned long old
, *rmapp
;
1072 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1076 * For performance reasons we don't hold kvm->mmu_lock while walking the
1077 * partition scoped table.
1079 ptep
= find_kvm_secondary_pte_unlocked(kvm
, gpa
, &shift
);
1083 pte
= READ_ONCE(*ptep
);
1084 if (pte_present(pte
) && pte_dirty(pte
)) {
1085 spin_lock(&kvm
->mmu_lock
);
1087 * Recheck the pte again
1089 if (pte_val(pte
) != pte_val(*ptep
)) {
1091 * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1092 * only find PAGE_SIZE pte entries here. We can continue
1093 * to use the pte addr returned by above page table
1096 if (!pte_present(*ptep
) || !pte_dirty(*ptep
)) {
1097 spin_unlock(&kvm
->mmu_lock
);
1104 old
= kvmppc_radix_update_pte(kvm
, ptep
, _PAGE_DIRTY
, 0,
1106 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, kvm
->arch
.lpid
);
1107 /* Also clear bit in ptes in shadow pgtable for nested guests */
1108 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1109 kvmhv_update_nest_rmap_rc_list(kvm
, rmapp
, _PAGE_DIRTY
, 0,
1112 spin_unlock(&kvm
->mmu_lock
);
1117 long kvmppc_hv_get_dirty_log_radix(struct kvm
*kvm
,
1118 struct kvm_memory_slot
*memslot
, unsigned long *map
)
1123 for (i
= 0; i
< memslot
->npages
; i
= j
) {
1124 npages
= kvm_radix_test_clear_dirty(kvm
, memslot
, i
);
1127 * Note that if npages > 0 then i must be a multiple of npages,
1128 * since huge pages are only used to back the guest at guest
1129 * real addresses that are a multiple of their size.
1130 * Since we have at most one PTE covering any given guest
1131 * real address, if npages > 1 we can skip to i + npages.
1135 set_dirty_bits(map
, i
, npages
);
1142 void kvmppc_radix_flush_memslot(struct kvm
*kvm
,
1143 const struct kvm_memory_slot
*memslot
)
1150 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
)
1151 kvmppc_uvmem_drop_pages(memslot
, kvm
, true);
1153 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1156 gpa
= memslot
->base_gfn
<< PAGE_SHIFT
;
1157 spin_lock(&kvm
->mmu_lock
);
1158 for (n
= memslot
->npages
; n
; --n
) {
1159 ptep
= find_kvm_secondary_pte(kvm
, gpa
, &shift
);
1160 if (ptep
&& pte_present(*ptep
))
1161 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, memslot
,
1166 * Increase the mmu notifier sequence number to prevent any page
1167 * fault that read the memslot earlier from writing a PTE.
1169 kvm
->mmu_notifier_seq
++;
1170 spin_unlock(&kvm
->mmu_lock
);
1173 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info
*info
,
1174 int psize
, int *indexp
)
1176 if (!mmu_psize_defs
[psize
].shift
)
1178 info
->ap_encodings
[*indexp
] = mmu_psize_defs
[psize
].shift
|
1179 (mmu_psize_defs
[psize
].ap
<< 29);
1183 int kvmhv_get_rmmu_info(struct kvm
*kvm
, struct kvm_ppc_rmmu_info
*info
)
1187 if (!radix_enabled())
1189 memset(info
, 0, sizeof(*info
));
1192 info
->geometries
[0].page_shift
= 12;
1193 info
->geometries
[0].level_bits
[0] = 9;
1194 for (i
= 1; i
< 4; ++i
)
1195 info
->geometries
[0].level_bits
[i
] = p9_supported_radix_bits
[i
];
1197 info
->geometries
[1].page_shift
= 16;
1198 for (i
= 0; i
< 4; ++i
)
1199 info
->geometries
[1].level_bits
[i
] = p9_supported_radix_bits
[i
];
1202 add_rmmu_ap_encoding(info
, MMU_PAGE_4K
, &i
);
1203 add_rmmu_ap_encoding(info
, MMU_PAGE_64K
, &i
);
1204 add_rmmu_ap_encoding(info
, MMU_PAGE_2M
, &i
);
1205 add_rmmu_ap_encoding(info
, MMU_PAGE_1G
, &i
);
1210 int kvmppc_init_vm_radix(struct kvm
*kvm
)
1212 kvm
->arch
.pgtable
= pgd_alloc(kvm
->mm
);
1213 if (!kvm
->arch
.pgtable
)
1218 static void pte_ctor(void *addr
)
1220 memset(addr
, 0, RADIX_PTE_TABLE_SIZE
);
1223 static void pmd_ctor(void *addr
)
1225 memset(addr
, 0, RADIX_PMD_TABLE_SIZE
);
1228 struct debugfs_radix_state
{
1239 static int debugfs_radix_open(struct inode
*inode
, struct file
*file
)
1241 struct kvm
*kvm
= inode
->i_private
;
1242 struct debugfs_radix_state
*p
;
1244 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1250 mutex_init(&p
->mutex
);
1251 file
->private_data
= p
;
1253 return nonseekable_open(inode
, file
);
1256 static int debugfs_radix_release(struct inode
*inode
, struct file
*file
)
1258 struct debugfs_radix_state
*p
= file
->private_data
;
1260 kvm_put_kvm(p
->kvm
);
1265 static ssize_t
debugfs_radix_read(struct file
*file
, char __user
*buf
,
1266 size_t len
, loff_t
*ppos
)
1268 struct debugfs_radix_state
*p
= file
->private_data
;
1274 struct kvm_nested_guest
*nested
;
1284 if (!kvm_is_radix(kvm
))
1287 ret
= mutex_lock_interruptible(&p
->mutex
);
1291 if (p
->chars_left
) {
1295 r
= copy_to_user(buf
, p
->buf
+ p
->buf_index
, n
);
1312 while (len
!= 0 && p
->lpid
>= 0) {
1313 if (gpa
>= RADIX_PGTABLE_RANGE
) {
1317 kvmhv_put_nested(nested
);
1320 p
->lpid
= kvmhv_nested_next_lpid(kvm
, p
->lpid
);
1327 pgt
= kvm
->arch
.pgtable
;
1329 nested
= kvmhv_get_nested(kvm
, p
->lpid
, false);
1331 gpa
= RADIX_PGTABLE_RANGE
;
1334 pgt
= nested
->shadow_pgtable
;
1340 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
1341 "\nNested LPID %d: ", p
->lpid
);
1342 n
+= scnprintf(p
->buf
+ n
, sizeof(p
->buf
) - n
,
1343 "pgdir: %lx\n", (unsigned long)pgt
);
1348 pgdp
= pgt
+ pgd_index(gpa
);
1349 p4dp
= p4d_offset(pgdp
, gpa
);
1350 p4d
= READ_ONCE(*p4dp
);
1351 if (!(p4d_val(p4d
) & _PAGE_PRESENT
)) {
1352 gpa
= (gpa
& P4D_MASK
) + P4D_SIZE
;
1356 pudp
= pud_offset(&p4d
, gpa
);
1357 pud
= READ_ONCE(*pudp
);
1358 if (!(pud_val(pud
) & _PAGE_PRESENT
)) {
1359 gpa
= (gpa
& PUD_MASK
) + PUD_SIZE
;
1362 if (pud_val(pud
) & _PAGE_PTE
) {
1368 pmdp
= pmd_offset(&pud
, gpa
);
1369 pmd
= READ_ONCE(*pmdp
);
1370 if (!(pmd_val(pmd
) & _PAGE_PRESENT
)) {
1371 gpa
= (gpa
& PMD_MASK
) + PMD_SIZE
;
1374 if (pmd_val(pmd
) & _PAGE_PTE
) {
1380 ptep
= pte_offset_kernel(&pmd
, gpa
);
1381 pte
= pte_val(READ_ONCE(*ptep
));
1382 if (!(pte
& _PAGE_PRESENT
)) {
1388 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
1389 " %lx: %lx %d\n", gpa
, pte
, shift
);
1390 gpa
+= 1ul << shift
;
1395 r
= copy_to_user(buf
, p
->buf
, n
);
1410 kvmhv_put_nested(nested
);
1413 mutex_unlock(&p
->mutex
);
1417 static ssize_t
debugfs_radix_write(struct file
*file
, const char __user
*buf
,
1418 size_t len
, loff_t
*ppos
)
1423 static const struct file_operations debugfs_radix_fops
= {
1424 .owner
= THIS_MODULE
,
1425 .open
= debugfs_radix_open
,
1426 .release
= debugfs_radix_release
,
1427 .read
= debugfs_radix_read
,
1428 .write
= debugfs_radix_write
,
1429 .llseek
= generic_file_llseek
,
1432 void kvmhv_radix_debugfs_init(struct kvm
*kvm
)
1434 debugfs_create_file("radix", 0400, kvm
->arch
.debugfs_dir
, kvm
,
1435 &debugfs_radix_fops
);
1438 int kvmppc_radix_init(void)
1440 unsigned long size
= sizeof(void *) << RADIX_PTE_INDEX_SIZE
;
1442 kvm_pte_cache
= kmem_cache_create("kvm-pte", size
, size
, 0, pte_ctor
);
1446 size
= sizeof(void *) << RADIX_PMD_INDEX_SIZE
;
1448 kvm_pmd_cache
= kmem_cache_create("kvm-pmd", size
, size
, 0, pmd_ctor
);
1449 if (!kvm_pmd_cache
) {
1450 kmem_cache_destroy(kvm_pte_cache
);
1457 void kvmppc_radix_exit(void)
1459 kmem_cache_destroy(kvm_pte_cache
);
1460 kmem_cache_destroy(kvm_pmd_cache
);