1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/types.h>
8 #include <linux/string.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/kvm_book3s.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
22 #include <asm/ultravisor.h>
23 #include <asm/kvm_book3s_uvmem.h>
26 * Supported radix tree geometry.
27 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28 * for a page size of 64k or 4k.
30 static int p9_supported_radix_bits
[4] = { 5, 9, 9, 13 };
32 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid
, int pid
,
33 gva_t eaddr
, void *to
, void *from
,
36 int uninitialized_var(old_pid
), old_lpid
;
37 unsigned long quadrant
, ret
= n
;
40 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
41 if (kvmhv_on_pseries())
42 return plpar_hcall_norets(H_COPY_TOFROM_GUEST
, lpid
, pid
, eaddr
,
43 __pa(to
), __pa(from
), n
);
49 from
= (void *) (eaddr
| (quadrant
<< 62));
51 to
= (void *) (eaddr
| (quadrant
<< 62));
55 /* switch the lpid first to avoid running host with unallocated pid */
56 old_lpid
= mfspr(SPRN_LPID
);
58 mtspr(SPRN_LPID
, lpid
);
60 old_pid
= mfspr(SPRN_PID
);
68 ret
= raw_copy_from_user(to
, from
, n
);
70 ret
= raw_copy_to_user(to
, from
, n
);
73 /* switch the pid first to avoid running host with unallocated pid */
74 if (quadrant
== 1 && pid
!= old_pid
)
75 mtspr(SPRN_PID
, old_pid
);
77 mtspr(SPRN_LPID
, old_lpid
);
84 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix
);
86 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
87 void *to
, void *from
, unsigned long n
)
89 int lpid
= vcpu
->kvm
->arch
.lpid
;
90 int pid
= vcpu
->arch
.pid
;
92 /* This would cause a data segment intr so don't allow the access */
93 if (eaddr
& (0x3FFUL
<< 52))
96 /* Should we be using the nested lpid */
97 if (vcpu
->arch
.nested
)
98 lpid
= vcpu
->arch
.nested
->shadow_lpid
;
100 /* If accessing quadrant 3 then pid is expected to be 0 */
101 if (((eaddr
>> 62) & 0x3) == 0x3)
104 eaddr
&= ~(0xFFFUL
<< 52);
106 return __kvmhv_copy_tofrom_guest_radix(lpid
, pid
, eaddr
, to
, from
, n
);
109 long kvmhv_copy_from_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
, void *to
,
114 ret
= kvmhv_copy_tofrom_guest_radix(vcpu
, eaddr
, to
, NULL
, n
);
116 memset(to
+ (n
- ret
), 0, ret
);
120 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix
);
122 long kvmhv_copy_to_guest_radix(struct kvm_vcpu
*vcpu
, gva_t eaddr
, void *from
,
125 return kvmhv_copy_tofrom_guest_radix(vcpu
, eaddr
, NULL
, from
, n
);
127 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix
);
129 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
130 struct kvmppc_pte
*gpte
, u64 root
,
133 struct kvm
*kvm
= vcpu
->kvm
;
135 unsigned long rts
, bits
, offset
, index
;
139 rts
= ((root
& RTS1_MASK
) >> (RTS1_SHIFT
- 3)) |
140 ((root
& RTS2_MASK
) >> RTS2_SHIFT
);
141 bits
= root
& RPDS_MASK
;
142 base
= root
& RPDB_MASK
;
146 /* Current implementations only support 52-bit space */
150 /* Walk each level of the radix tree */
151 for (level
= 3; level
>= 0; --level
) {
153 /* Check a valid size */
154 if (level
&& bits
!= p9_supported_radix_bits
[level
])
156 if (level
== 0 && !(bits
== 5 || bits
== 9))
159 index
= (eaddr
>> offset
) & ((1UL << bits
) - 1);
160 /* Check that low bits of page table base are zero */
161 if (base
& ((1UL << (bits
+ 3)) - 1))
163 /* Read the entry from guest memory */
164 addr
= base
+ (index
* sizeof(rpte
));
165 ret
= kvm_read_guest(kvm
, addr
, &rpte
, sizeof(rpte
));
171 pte
= __be64_to_cpu(rpte
);
172 if (!(pte
& _PAGE_PRESENT
))
174 /* Check if a leaf entry */
177 /* Get ready to walk the next level */
178 base
= pte
& RPDB_MASK
;
179 bits
= pte
& RPDS_MASK
;
182 /* Need a leaf at lowest level; 512GB pages not supported */
183 if (level
< 0 || level
== 3)
186 /* We found a valid leaf PTE */
187 /* Offset is now log base 2 of the page size */
188 gpa
= pte
& 0x01fffffffffff000ul
;
189 if (gpa
& ((1ul << offset
) - 1))
191 gpa
|= eaddr
& ((1ul << offset
) - 1);
192 for (ps
= MMU_PAGE_4K
; ps
< MMU_PAGE_COUNT
; ++ps
)
193 if (offset
== mmu_psize_defs
[ps
].shift
)
195 gpte
->page_size
= ps
;
196 gpte
->page_shift
= offset
;
201 /* Work out permissions */
202 gpte
->may_read
= !!(pte
& _PAGE_READ
);
203 gpte
->may_write
= !!(pte
& _PAGE_WRITE
);
204 gpte
->may_execute
= !!(pte
& _PAGE_EXEC
);
206 gpte
->rc
= pte
& (_PAGE_ACCESSED
| _PAGE_DIRTY
);
215 * Used to walk a partition or process table radix tree in guest memory
216 * Note: We exploit the fact that a partition table and a process
217 * table have the same layout, a partition-scoped page table and a
218 * process-scoped page table have the same layout, and the 2nd
219 * doubleword of a partition table entry has the same layout as
222 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
223 struct kvmppc_pte
*gpte
, u64 table
,
224 int table_index
, u64
*pte_ret_p
)
226 struct kvm
*kvm
= vcpu
->kvm
;
228 unsigned long size
, ptbl
, root
;
229 struct prtb_entry entry
;
231 if ((table
& PRTS_MASK
) > 24)
233 size
= 1ul << ((table
& PRTS_MASK
) + 12);
235 /* Is the table big enough to contain this entry? */
236 if ((table_index
* sizeof(entry
)) >= size
)
239 /* Read the table to find the root of the radix tree */
240 ptbl
= (table
& PRTB_MASK
) + (table_index
* sizeof(entry
));
241 ret
= kvm_read_guest(kvm
, ptbl
, &entry
, sizeof(entry
));
245 /* Root is stored in the first double word */
246 root
= be64_to_cpu(entry
.prtb0
);
248 return kvmppc_mmu_walk_radix_tree(vcpu
, eaddr
, gpte
, root
, pte_ret_p
);
251 int kvmppc_mmu_radix_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
252 struct kvmppc_pte
*gpte
, bool data
, bool iswrite
)
258 /* Work out effective PID */
259 switch (eaddr
>> 62) {
261 pid
= vcpu
->arch
.pid
;
270 ret
= kvmppc_mmu_radix_translate_table(vcpu
, eaddr
, gpte
,
271 vcpu
->kvm
->arch
.process_table
, pid
, &pte
);
275 /* Check privilege (applies only to process scoped translations) */
276 if (kvmppc_get_msr(vcpu
) & MSR_PR
) {
277 if (pte
& _PAGE_PRIVILEGED
) {
280 gpte
->may_execute
= 0;
283 if (!(pte
& _PAGE_PRIVILEGED
)) {
284 /* Check AMR/IAMR to see if strict mode is in force */
285 if (vcpu
->arch
.amr
& (1ul << 62))
287 if (vcpu
->arch
.amr
& (1ul << 63))
289 if (vcpu
->arch
.iamr
& (1ul << 62))
290 gpte
->may_execute
= 0;
297 void kvmppc_radix_tlbie_page(struct kvm
*kvm
, unsigned long addr
,
298 unsigned int pshift
, unsigned int lpid
)
300 unsigned long psize
= PAGE_SIZE
;
306 psize
= 1UL << pshift
;
310 addr
&= ~(psize
- 1);
312 if (!kvmhv_on_pseries()) {
313 radix__flush_tlb_lpid_page(lpid
, addr
, psize
);
317 psi
= shift_to_mmu_psize(pshift
);
318 rb
= addr
| (mmu_get_ap(psi
) << PPC_BITLSHIFT(58));
319 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(0, 0, 1),
322 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc
);
325 static void kvmppc_radix_flush_pwc(struct kvm
*kvm
, unsigned int lpid
)
329 if (!kvmhv_on_pseries()) {
330 radix__flush_pwc_lpid(lpid
);
334 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(1, 0, 1),
335 lpid
, TLBIEL_INVAL_SET_LPID
);
337 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc
);
340 static unsigned long kvmppc_radix_update_pte(struct kvm
*kvm
, pte_t
*ptep
,
341 unsigned long clr
, unsigned long set
,
342 unsigned long addr
, unsigned int shift
)
344 return __radix_pte_update(ptep
, clr
, set
);
347 void kvmppc_radix_set_pte_at(struct kvm
*kvm
, unsigned long addr
,
348 pte_t
*ptep
, pte_t pte
)
350 radix__set_pte_at(kvm
->mm
, addr
, ptep
, pte
, 0);
353 static struct kmem_cache
*kvm_pte_cache
;
354 static struct kmem_cache
*kvm_pmd_cache
;
356 static pte_t
*kvmppc_pte_alloc(void)
358 return kmem_cache_alloc(kvm_pte_cache
, GFP_KERNEL
);
361 static void kvmppc_pte_free(pte_t
*ptep
)
363 kmem_cache_free(kvm_pte_cache
, ptep
);
366 static pmd_t
*kvmppc_pmd_alloc(void)
368 return kmem_cache_alloc(kvm_pmd_cache
, GFP_KERNEL
);
371 static void kvmppc_pmd_free(pmd_t
*pmdp
)
373 kmem_cache_free(kvm_pmd_cache
, pmdp
);
376 /* Called with kvm->mmu_lock held */
377 void kvmppc_unmap_pte(struct kvm
*kvm
, pte_t
*pte
, unsigned long gpa
,
379 const struct kvm_memory_slot
*memslot
,
384 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
385 unsigned long page_size
= PAGE_SIZE
;
388 old
= kvmppc_radix_update_pte(kvm
, pte
, ~0UL, 0, gpa
, shift
);
389 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, lpid
);
391 /* The following only applies to L1 entries */
392 if (lpid
!= kvm
->arch
.lpid
)
396 memslot
= gfn_to_memslot(kvm
, gfn
);
400 if (shift
) { /* 1GB or 2MB page */
401 page_size
= 1ul << shift
;
402 if (shift
== PMD_SHIFT
)
403 kvm
->stat
.num_2M_pages
--;
404 else if (shift
== PUD_SHIFT
)
405 kvm
->stat
.num_1G_pages
--;
408 gpa
&= ~(page_size
- 1);
409 hpa
= old
& PTE_RPN_MASK
;
410 kvmhv_remove_nest_rmap_range(kvm
, memslot
, gpa
, hpa
, page_size
);
412 if ((old
& _PAGE_DIRTY
) && memslot
->dirty_bitmap
)
413 kvmppc_update_dirty_map(memslot
, gfn
, page_size
);
417 * kvmppc_free_p?d are used to free existing page tables, and recursively
418 * descend and clear and free children.
419 * Callers are responsible for flushing the PWC.
421 * When page tables are being unmapped/freed as part of page fault path
422 * (full == false), ptes are not expected. There is code to unmap them
423 * and emit a warning if encountered, but there may already be data
424 * corruption due to the unexpected mappings.
426 static void kvmppc_unmap_free_pte(struct kvm
*kvm
, pte_t
*pte
, bool full
,
430 memset(pte
, 0, sizeof(long) << PTE_INDEX_SIZE
);
435 for (it
= 0; it
< PTRS_PER_PTE
; ++it
, ++p
) {
436 if (pte_val(*p
) == 0)
439 kvmppc_unmap_pte(kvm
, p
,
440 pte_pfn(*p
) << PAGE_SHIFT
,
441 PAGE_SHIFT
, NULL
, lpid
);
445 kvmppc_pte_free(pte
);
448 static void kvmppc_unmap_free_pmd(struct kvm
*kvm
, pmd_t
*pmd
, bool full
,
454 for (im
= 0; im
< PTRS_PER_PMD
; ++im
, ++p
) {
455 if (!pmd_present(*p
))
457 if (pmd_is_leaf(*p
)) {
462 kvmppc_unmap_pte(kvm
, (pte_t
*)p
,
463 pte_pfn(*(pte_t
*)p
) << PAGE_SHIFT
,
464 PMD_SHIFT
, NULL
, lpid
);
469 pte
= pte_offset_map(p
, 0);
470 kvmppc_unmap_free_pte(kvm
, pte
, full
, lpid
);
474 kvmppc_pmd_free(pmd
);
477 static void kvmppc_unmap_free_pud(struct kvm
*kvm
, pud_t
*pud
,
483 for (iu
= 0; iu
< PTRS_PER_PUD
; ++iu
, ++p
) {
484 if (!pud_present(*p
))
486 if (pud_is_leaf(*p
)) {
491 pmd
= pmd_offset(p
, 0);
492 kvmppc_unmap_free_pmd(kvm
, pmd
, true, lpid
);
496 pud_free(kvm
->mm
, pud
);
499 void kvmppc_free_pgtable_radix(struct kvm
*kvm
, pgd_t
*pgd
, unsigned int lpid
)
503 for (ig
= 0; ig
< PTRS_PER_PGD
; ++ig
, ++pgd
) {
506 if (!pgd_present(*pgd
))
508 pud
= pud_offset(pgd
, 0);
509 kvmppc_unmap_free_pud(kvm
, pud
, lpid
);
514 void kvmppc_free_radix(struct kvm
*kvm
)
516 if (kvm
->arch
.pgtable
) {
517 kvmppc_free_pgtable_radix(kvm
, kvm
->arch
.pgtable
,
519 pgd_free(kvm
->mm
, kvm
->arch
.pgtable
);
520 kvm
->arch
.pgtable
= NULL
;
524 static void kvmppc_unmap_free_pmd_entry_table(struct kvm
*kvm
, pmd_t
*pmd
,
525 unsigned long gpa
, unsigned int lpid
)
527 pte_t
*pte
= pte_offset_kernel(pmd
, 0);
530 * Clearing the pmd entry then flushing the PWC ensures that the pte
531 * page no longer be cached by the MMU, so can be freed without
532 * flushing the PWC again.
535 kvmppc_radix_flush_pwc(kvm
, lpid
);
537 kvmppc_unmap_free_pte(kvm
, pte
, false, lpid
);
540 static void kvmppc_unmap_free_pud_entry_table(struct kvm
*kvm
, pud_t
*pud
,
541 unsigned long gpa
, unsigned int lpid
)
543 pmd_t
*pmd
= pmd_offset(pud
, 0);
546 * Clearing the pud entry then flushing the PWC ensures that the pmd
547 * page and any children pte pages will no longer be cached by the MMU,
548 * so can be freed without flushing the PWC again.
551 kvmppc_radix_flush_pwc(kvm
, lpid
);
553 kvmppc_unmap_free_pmd(kvm
, pmd
, false, lpid
);
557 * There are a number of bits which may differ between different faults to
558 * the same partition scope entry. RC bits, in the course of cleaning and
559 * aging. And the write bit can change, either the access could have been
560 * upgraded, or a read fault could happen concurrently with a write fault
561 * that sets those bits first.
563 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
565 int kvmppc_create_pte(struct kvm
*kvm
, pgd_t
*pgtable
, pte_t pte
,
566 unsigned long gpa
, unsigned int level
,
567 unsigned long mmu_seq
, unsigned int lpid
,
568 unsigned long *rmapp
, struct rmap_nested
**n_rmap
)
571 pud_t
*pud
, *new_pud
= NULL
;
572 pmd_t
*pmd
, *new_pmd
= NULL
;
573 pte_t
*ptep
, *new_ptep
= NULL
;
576 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
577 pgd
= pgtable
+ pgd_index(gpa
);
579 if (pgd_present(*pgd
))
580 pud
= pud_offset(pgd
, gpa
);
582 new_pud
= pud_alloc_one(kvm
->mm
, gpa
);
585 if (pud
&& pud_present(*pud
) && !pud_is_leaf(*pud
))
586 pmd
= pmd_offset(pud
, gpa
);
588 new_pmd
= kvmppc_pmd_alloc();
590 if (level
== 0 && !(pmd
&& pmd_present(*pmd
) && !pmd_is_leaf(*pmd
)))
591 new_ptep
= kvmppc_pte_alloc();
593 /* Check if we might have been invalidated; let the guest retry if so */
594 spin_lock(&kvm
->mmu_lock
);
596 if (mmu_notifier_retry(kvm
, mmu_seq
))
599 /* Now traverse again under the lock and change the tree */
601 if (pgd_none(*pgd
)) {
604 pgd_populate(kvm
->mm
, pgd
, new_pud
);
607 pud
= pud_offset(pgd
, gpa
);
608 if (pud_is_leaf(*pud
)) {
609 unsigned long hgpa
= gpa
& PUD_MASK
;
611 /* Check if we raced and someone else has set the same thing */
613 if (pud_raw(*pud
) == pte_raw(pte
)) {
617 /* Valid 1GB page here already, add our extra bits */
618 WARN_ON_ONCE((pud_val(*pud
) ^ pte_val(pte
)) &
619 PTE_BITS_MUST_MATCH
);
620 kvmppc_radix_update_pte(kvm
, (pte_t
*)pud
,
621 0, pte_val(pte
), hgpa
, PUD_SHIFT
);
626 * If we raced with another CPU which has just put
627 * a 1GB pte in after we saw a pmd page, try again.
633 /* Valid 1GB page here already, remove it */
634 kvmppc_unmap_pte(kvm
, (pte_t
*)pud
, hgpa
, PUD_SHIFT
, NULL
,
638 if (!pud_none(*pud
)) {
640 * There's a page table page here, but we wanted to
641 * install a large page, so remove and free the page
644 kvmppc_unmap_free_pud_entry_table(kvm
, pud
, gpa
, lpid
);
646 kvmppc_radix_set_pte_at(kvm
, gpa
, (pte_t
*)pud
, pte
);
648 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
652 if (pud_none(*pud
)) {
655 pud_populate(kvm
->mm
, pud
, new_pmd
);
658 pmd
= pmd_offset(pud
, gpa
);
659 if (pmd_is_leaf(*pmd
)) {
660 unsigned long lgpa
= gpa
& PMD_MASK
;
662 /* Check if we raced and someone else has set the same thing */
664 if (pmd_raw(*pmd
) == pte_raw(pte
)) {
668 /* Valid 2MB page here already, add our extra bits */
669 WARN_ON_ONCE((pmd_val(*pmd
) ^ pte_val(pte
)) &
670 PTE_BITS_MUST_MATCH
);
671 kvmppc_radix_update_pte(kvm
, pmdp_ptep(pmd
),
672 0, pte_val(pte
), lgpa
, PMD_SHIFT
);
678 * If we raced with another CPU which has just put
679 * a 2MB pte in after we saw a pte page, try again.
685 /* Valid 2MB page here already, remove it */
686 kvmppc_unmap_pte(kvm
, pmdp_ptep(pmd
), lgpa
, PMD_SHIFT
, NULL
,
690 if (!pmd_none(*pmd
)) {
692 * There's a page table page here, but we wanted to
693 * install a large page, so remove and free the page
696 kvmppc_unmap_free_pmd_entry_table(kvm
, pmd
, gpa
, lpid
);
698 kvmppc_radix_set_pte_at(kvm
, gpa
, pmdp_ptep(pmd
), pte
);
700 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
704 if (pmd_none(*pmd
)) {
707 pmd_populate(kvm
->mm
, pmd
, new_ptep
);
710 ptep
= pte_offset_kernel(pmd
, gpa
);
711 if (pte_present(*ptep
)) {
712 /* Check if someone else set the same thing */
713 if (pte_raw(*ptep
) == pte_raw(pte
)) {
717 /* Valid page here already, add our extra bits */
718 WARN_ON_ONCE((pte_val(*ptep
) ^ pte_val(pte
)) &
719 PTE_BITS_MUST_MATCH
);
720 kvmppc_radix_update_pte(kvm
, ptep
, 0, pte_val(pte
), gpa
, 0);
724 kvmppc_radix_set_pte_at(kvm
, gpa
, ptep
, pte
);
726 kvmhv_insert_nest_rmap(kvm
, rmapp
, n_rmap
);
730 spin_unlock(&kvm
->mmu_lock
);
732 pud_free(kvm
->mm
, new_pud
);
734 kvmppc_pmd_free(new_pmd
);
736 kvmppc_pte_free(new_ptep
);
740 bool kvmppc_hv_handle_set_rc(struct kvm
*kvm
, pgd_t
*pgtable
, bool writing
,
741 unsigned long gpa
, unsigned int lpid
)
743 unsigned long pgflags
;
748 * Need to set an R or C bit in the 2nd-level tables;
749 * since we are just helping out the hardware here,
750 * it is sufficient to do what the hardware does.
752 pgflags
= _PAGE_ACCESSED
;
754 pgflags
|= _PAGE_DIRTY
;
756 * We are walking the secondary (partition-scoped) page table here.
757 * We can do this without disabling irq because the Linux MM
758 * subsystem doesn't do THP splits and collapses on this tree.
760 ptep
= __find_linux_pte(pgtable
, gpa
, NULL
, &shift
);
761 if (ptep
&& pte_present(*ptep
) && (!writing
|| pte_write(*ptep
))) {
762 kvmppc_radix_update_pte(kvm
, ptep
, 0, pgflags
, gpa
, shift
);
768 int kvmppc_book3s_instantiate_page(struct kvm_vcpu
*vcpu
,
770 struct kvm_memory_slot
*memslot
,
771 bool writing
, bool kvm_ro
,
772 pte_t
*inserted_pte
, unsigned int *levelp
)
774 struct kvm
*kvm
= vcpu
->kvm
;
775 struct page
*page
= NULL
;
776 unsigned long mmu_seq
;
777 unsigned long hva
, gfn
= gpa
>> PAGE_SHIFT
;
778 bool upgrade_write
= false;
779 bool *upgrade_p
= &upgrade_write
;
781 unsigned int shift
, level
;
785 /* used to check for invalidations in progress */
786 mmu_seq
= kvm
->mmu_notifier_seq
;
790 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
791 * do it with !atomic && !async, which is how we call it.
792 * We always ask for write permission since the common case
793 * is that the page is writable.
795 hva
= gfn_to_hva_memslot(memslot
, gfn
);
796 if (!kvm_ro
&& __get_user_pages_fast(hva
, 1, 1, &page
) == 1) {
797 upgrade_write
= true;
801 /* Call KVM generic code to do the slow-path check */
802 pfn
= __gfn_to_pfn_memslot(memslot
, gfn
, false, NULL
,
804 if (is_error_noslot_pfn(pfn
))
807 if (pfn_valid(pfn
)) {
808 page
= pfn_to_page(pfn
);
809 if (PageReserved(page
))
815 * Read the PTE from the process' radix tree and use that
816 * so we get the shift and attribute bits.
819 ptep
= __find_linux_pte(vcpu
->arch
.pgdir
, hva
, NULL
, &shift
);
821 * If the PTE disappeared temporarily due to a THP
822 * collapse, just return and let the guest try again.
833 /* If we're logging dirty pages, always map single pages */
834 large_enable
= !(memslot
->flags
& KVM_MEM_LOG_DIRTY_PAGES
);
836 /* Get pte level from shift/size */
837 if (large_enable
&& shift
== PUD_SHIFT
&&
838 (gpa
& (PUD_SIZE
- PAGE_SIZE
)) ==
839 (hva
& (PUD_SIZE
- PAGE_SIZE
))) {
841 } else if (large_enable
&& shift
== PMD_SHIFT
&&
842 (gpa
& (PMD_SIZE
- PAGE_SIZE
)) ==
843 (hva
& (PMD_SIZE
- PAGE_SIZE
))) {
847 if (shift
> PAGE_SHIFT
) {
849 * If the pte maps more than one page, bring over
850 * bits from the virtual address to get the real
851 * address of the specific single page we want.
853 unsigned long rpnmask
= (1ul << shift
) - PAGE_SIZE
;
854 pte
= __pte(pte_val(pte
) | (hva
& rpnmask
));
858 pte
= __pte(pte_val(pte
) | _PAGE_EXEC
| _PAGE_ACCESSED
);
859 if (writing
|| upgrade_write
) {
860 if (pte_val(pte
) & _PAGE_WRITE
)
861 pte
= __pte(pte_val(pte
) | _PAGE_DIRTY
);
863 pte
= __pte(pte_val(pte
) & ~(_PAGE_WRITE
| _PAGE_DIRTY
));
866 /* Allocate space in the tree and write the PTE */
867 ret
= kvmppc_create_pte(kvm
, kvm
->arch
.pgtable
, pte
, gpa
, level
,
868 mmu_seq
, kvm
->arch
.lpid
, NULL
, NULL
);
875 if (!ret
&& (pte_val(pte
) & _PAGE_WRITE
))
876 set_page_dirty_lock(page
);
880 /* Increment number of large pages if we (successfully) inserted one */
883 kvm
->stat
.num_2M_pages
++;
885 kvm
->stat
.num_1G_pages
++;
891 int kvmppc_book3s_radix_page_fault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
892 unsigned long ea
, unsigned long dsisr
)
894 struct kvm
*kvm
= vcpu
->kvm
;
895 unsigned long gpa
, gfn
;
896 struct kvm_memory_slot
*memslot
;
898 bool writing
= !!(dsisr
& DSISR_ISSTORE
);
901 /* Check for unusual errors */
902 if (dsisr
& DSISR_UNSUPP_MMU
) {
903 pr_err("KVM: Got unsupported MMU fault\n");
906 if (dsisr
& DSISR_BADACCESS
) {
907 /* Reflect to the guest as DSI */
908 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr
);
909 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
913 /* Translate the logical address */
914 gpa
= vcpu
->arch
.fault_gpa
& ~0xfffUL
;
915 gpa
&= ~0xF000000000000000ul
;
916 gfn
= gpa
>> PAGE_SHIFT
;
917 if (!(dsisr
& DSISR_PRTABLE_FAULT
))
920 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
921 return kvmppc_send_page_to_uv(kvm
, gfn
);
923 /* Get the corresponding memslot */
924 memslot
= gfn_to_memslot(kvm
, gfn
);
926 /* No memslot means it's an emulated MMIO region */
927 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
928 if (dsisr
& (DSISR_PRTABLE_FAULT
| DSISR_BADACCESS
|
931 * Bad address in guest page table tree, or other
932 * unusual error - reflect it to the guest as DSI.
934 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
937 return kvmppc_hv_emulate_mmio(run
, vcpu
, gpa
, ea
, writing
);
940 if (memslot
->flags
& KVM_MEM_READONLY
) {
942 /* give the guest a DSI */
943 kvmppc_core_queue_data_storage(vcpu
, ea
, DSISR_ISSTORE
|
950 /* Failed to set the reference/change bits */
951 if (dsisr
& DSISR_SET_RC
) {
952 spin_lock(&kvm
->mmu_lock
);
953 if (kvmppc_hv_handle_set_rc(kvm
, kvm
->arch
.pgtable
,
954 writing
, gpa
, kvm
->arch
.lpid
))
955 dsisr
&= ~DSISR_SET_RC
;
956 spin_unlock(&kvm
->mmu_lock
);
958 if (!(dsisr
& (DSISR_BAD_FAULT_64S
| DSISR_NOHPTE
|
959 DSISR_PROTFAULT
| DSISR_SET_RC
)))
963 /* Try to insert a pte */
964 ret
= kvmppc_book3s_instantiate_page(vcpu
, gpa
, memslot
, writing
,
967 if (ret
== 0 || ret
== -EAGAIN
)
972 /* Called with kvm->mmu_lock held */
973 int kvm_unmap_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
977 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
980 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
) {
981 uv_page_inval(kvm
->arch
.lpid
, gpa
, PAGE_SHIFT
);
985 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
986 if (ptep
&& pte_present(*ptep
))
987 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, memslot
,
992 /* Called with kvm->mmu_lock held */
993 int kvm_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
997 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1000 unsigned long old
, *rmapp
;
1002 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1005 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1006 if (ptep
&& pte_present(*ptep
) && pte_young(*ptep
)) {
1007 old
= kvmppc_radix_update_pte(kvm
, ptep
, _PAGE_ACCESSED
, 0,
1009 /* XXX need to flush tlb here? */
1010 /* Also clear bit in ptes in shadow pgtable for nested guests */
1011 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1012 kvmhv_update_nest_rmap_rc_list(kvm
, rmapp
, _PAGE_ACCESSED
, 0,
1020 /* Called with kvm->mmu_lock held */
1021 int kvm_test_age_radix(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
1025 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1029 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1032 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1033 if (ptep
&& pte_present(*ptep
) && pte_young(*ptep
))
1038 /* Returns the number of PAGE_SIZE pages that are dirty */
1039 static int kvm_radix_test_clear_dirty(struct kvm
*kvm
,
1040 struct kvm_memory_slot
*memslot
, int pagenum
)
1042 unsigned long gfn
= memslot
->base_gfn
+ pagenum
;
1043 unsigned long gpa
= gfn
<< PAGE_SHIFT
;
1047 unsigned long old
, *rmapp
;
1049 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1052 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1053 if (ptep
&& pte_present(*ptep
) && pte_dirty(*ptep
)) {
1056 ret
= 1 << (shift
- PAGE_SHIFT
);
1057 spin_lock(&kvm
->mmu_lock
);
1058 old
= kvmppc_radix_update_pte(kvm
, ptep
, _PAGE_DIRTY
, 0,
1060 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, kvm
->arch
.lpid
);
1061 /* Also clear bit in ptes in shadow pgtable for nested guests */
1062 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1063 kvmhv_update_nest_rmap_rc_list(kvm
, rmapp
, _PAGE_DIRTY
, 0,
1066 spin_unlock(&kvm
->mmu_lock
);
1071 long kvmppc_hv_get_dirty_log_radix(struct kvm
*kvm
,
1072 struct kvm_memory_slot
*memslot
, unsigned long *map
)
1077 for (i
= 0; i
< memslot
->npages
; i
= j
) {
1078 npages
= kvm_radix_test_clear_dirty(kvm
, memslot
, i
);
1081 * Note that if npages > 0 then i must be a multiple of npages,
1082 * since huge pages are only used to back the guest at guest
1083 * real addresses that are a multiple of their size.
1084 * Since we have at most one PTE covering any given guest
1085 * real address, if npages > 1 we can skip to i + npages.
1089 set_dirty_bits(map
, i
, npages
);
1096 void kvmppc_radix_flush_memslot(struct kvm
*kvm
,
1097 const struct kvm_memory_slot
*memslot
)
1104 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
)
1105 kvmppc_uvmem_drop_pages(memslot
, kvm
, true);
1107 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
1110 gpa
= memslot
->base_gfn
<< PAGE_SHIFT
;
1111 spin_lock(&kvm
->mmu_lock
);
1112 for (n
= memslot
->npages
; n
; --n
) {
1113 ptep
= __find_linux_pte(kvm
->arch
.pgtable
, gpa
, NULL
, &shift
);
1114 if (ptep
&& pte_present(*ptep
))
1115 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, memslot
,
1119 spin_unlock(&kvm
->mmu_lock
);
1122 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info
*info
,
1123 int psize
, int *indexp
)
1125 if (!mmu_psize_defs
[psize
].shift
)
1127 info
->ap_encodings
[*indexp
] = mmu_psize_defs
[psize
].shift
|
1128 (mmu_psize_defs
[psize
].ap
<< 29);
1132 int kvmhv_get_rmmu_info(struct kvm
*kvm
, struct kvm_ppc_rmmu_info
*info
)
1136 if (!radix_enabled())
1138 memset(info
, 0, sizeof(*info
));
1141 info
->geometries
[0].page_shift
= 12;
1142 info
->geometries
[0].level_bits
[0] = 9;
1143 for (i
= 1; i
< 4; ++i
)
1144 info
->geometries
[0].level_bits
[i
] = p9_supported_radix_bits
[i
];
1146 info
->geometries
[1].page_shift
= 16;
1147 for (i
= 0; i
< 4; ++i
)
1148 info
->geometries
[1].level_bits
[i
] = p9_supported_radix_bits
[i
];
1151 add_rmmu_ap_encoding(info
, MMU_PAGE_4K
, &i
);
1152 add_rmmu_ap_encoding(info
, MMU_PAGE_64K
, &i
);
1153 add_rmmu_ap_encoding(info
, MMU_PAGE_2M
, &i
);
1154 add_rmmu_ap_encoding(info
, MMU_PAGE_1G
, &i
);
1159 int kvmppc_init_vm_radix(struct kvm
*kvm
)
1161 kvm
->arch
.pgtable
= pgd_alloc(kvm
->mm
);
1162 if (!kvm
->arch
.pgtable
)
1167 static void pte_ctor(void *addr
)
1169 memset(addr
, 0, RADIX_PTE_TABLE_SIZE
);
1172 static void pmd_ctor(void *addr
)
1174 memset(addr
, 0, RADIX_PMD_TABLE_SIZE
);
1177 struct debugfs_radix_state
{
1188 static int debugfs_radix_open(struct inode
*inode
, struct file
*file
)
1190 struct kvm
*kvm
= inode
->i_private
;
1191 struct debugfs_radix_state
*p
;
1193 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1199 mutex_init(&p
->mutex
);
1200 file
->private_data
= p
;
1202 return nonseekable_open(inode
, file
);
1205 static int debugfs_radix_release(struct inode
*inode
, struct file
*file
)
1207 struct debugfs_radix_state
*p
= file
->private_data
;
1209 kvm_put_kvm(p
->kvm
);
1214 static ssize_t
debugfs_radix_read(struct file
*file
, char __user
*buf
,
1215 size_t len
, loff_t
*ppos
)
1217 struct debugfs_radix_state
*p
= file
->private_data
;
1223 struct kvm_nested_guest
*nested
;
1232 if (!kvm_is_radix(kvm
))
1235 ret
= mutex_lock_interruptible(&p
->mutex
);
1239 if (p
->chars_left
) {
1243 r
= copy_to_user(buf
, p
->buf
+ p
->buf_index
, n
);
1260 while (len
!= 0 && p
->lpid
>= 0) {
1261 if (gpa
>= RADIX_PGTABLE_RANGE
) {
1265 kvmhv_put_nested(nested
);
1268 p
->lpid
= kvmhv_nested_next_lpid(kvm
, p
->lpid
);
1275 pgt
= kvm
->arch
.pgtable
;
1277 nested
= kvmhv_get_nested(kvm
, p
->lpid
, false);
1279 gpa
= RADIX_PGTABLE_RANGE
;
1282 pgt
= nested
->shadow_pgtable
;
1288 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
1289 "\nNested LPID %d: ", p
->lpid
);
1290 n
+= scnprintf(p
->buf
+ n
, sizeof(p
->buf
) - n
,
1291 "pgdir: %lx\n", (unsigned long)pgt
);
1296 pgdp
= pgt
+ pgd_index(gpa
);
1297 pgd
= READ_ONCE(*pgdp
);
1298 if (!(pgd_val(pgd
) & _PAGE_PRESENT
)) {
1299 gpa
= (gpa
& PGDIR_MASK
) + PGDIR_SIZE
;
1303 pudp
= pud_offset(&pgd
, gpa
);
1304 pud
= READ_ONCE(*pudp
);
1305 if (!(pud_val(pud
) & _PAGE_PRESENT
)) {
1306 gpa
= (gpa
& PUD_MASK
) + PUD_SIZE
;
1309 if (pud_val(pud
) & _PAGE_PTE
) {
1315 pmdp
= pmd_offset(&pud
, gpa
);
1316 pmd
= READ_ONCE(*pmdp
);
1317 if (!(pmd_val(pmd
) & _PAGE_PRESENT
)) {
1318 gpa
= (gpa
& PMD_MASK
) + PMD_SIZE
;
1321 if (pmd_val(pmd
) & _PAGE_PTE
) {
1327 ptep
= pte_offset_kernel(&pmd
, gpa
);
1328 pte
= pte_val(READ_ONCE(*ptep
));
1329 if (!(pte
& _PAGE_PRESENT
)) {
1335 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
1336 " %lx: %lx %d\n", gpa
, pte
, shift
);
1337 gpa
+= 1ul << shift
;
1342 r
= copy_to_user(buf
, p
->buf
, n
);
1357 kvmhv_put_nested(nested
);
1360 mutex_unlock(&p
->mutex
);
1364 static ssize_t
debugfs_radix_write(struct file
*file
, const char __user
*buf
,
1365 size_t len
, loff_t
*ppos
)
1370 static const struct file_operations debugfs_radix_fops
= {
1371 .owner
= THIS_MODULE
,
1372 .open
= debugfs_radix_open
,
1373 .release
= debugfs_radix_release
,
1374 .read
= debugfs_radix_read
,
1375 .write
= debugfs_radix_write
,
1376 .llseek
= generic_file_llseek
,
1379 void kvmhv_radix_debugfs_init(struct kvm
*kvm
)
1381 kvm
->arch
.radix_dentry
= debugfs_create_file("radix", 0400,
1382 kvm
->arch
.debugfs_dir
, kvm
,
1383 &debugfs_radix_fops
);
1386 int kvmppc_radix_init(void)
1388 unsigned long size
= sizeof(void *) << RADIX_PTE_INDEX_SIZE
;
1390 kvm_pte_cache
= kmem_cache_create("kvm-pte", size
, size
, 0, pte_ctor
);
1394 size
= sizeof(void *) << RADIX_PMD_INDEX_SIZE
;
1396 kvm_pmd_cache
= kmem_cache_create("kvm-pmd", size
, size
, 0, pmd_ctor
);
1397 if (!kvm_pmd_cache
) {
1398 kmem_cache_destroy(kvm_pte_cache
);
1405 void kvmppc_radix_exit(void)
1407 kmem_cache_destroy(kvm_pte_cache
);
1408 kmem_cache_destroy(kvm_pmd_cache
);