1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/hugetlb.h>
17 #include <linux/list.h>
18 #include <linux/stringify.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/book3s/64/mmu-hash.h>
23 #include <asm/mmu_context.h>
24 #include <asm/hvcall.h>
25 #include <asm/synch.h>
26 #include <asm/ppc-opcode.h>
27 #include <asm/kvm_host.h>
29 #include <asm/iommu.h>
31 #include <asm/pte-walk.h>
35 #define WARN_ON_ONCE_RM(condition) ({ \
36 static bool __section(.data.unlikely) __warned; \
37 int __ret_warn_once = !!(condition); \
39 if (unlikely(__ret_warn_once && !__warned)) { \
41 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
42 __stringify(condition), \
43 __func__, __LINE__); \
46 unlikely(__ret_warn_once); \
51 #define WARN_ON_ONCE_RM(condition) ({ \
52 int __ret_warn_on = !!(condition); \
53 unlikely(__ret_warn_on); \
59 * Finds a TCE table descriptor by LIOBN.
61 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
64 struct kvmppc_spapr_tce_table
*kvmppc_find_table(struct kvm
*kvm
,
67 struct kvmppc_spapr_tce_table
*stt
;
69 list_for_each_entry_lockless(stt
, &kvm
->arch
.spapr_tce_tables
, list
)
70 if (stt
->liobn
== liobn
)
75 EXPORT_SYMBOL_GPL(kvmppc_find_table
);
77 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
78 static long kvmppc_rm_tce_to_ua(struct kvm
*kvm
, unsigned long tce
,
79 unsigned long *ua
, unsigned long **prmap
)
81 unsigned long gfn
= tce
>> PAGE_SHIFT
;
82 struct kvm_memory_slot
*memslot
;
84 memslot
= search_memslots(kvm_memslots_raw(kvm
), gfn
);
88 *ua
= __gfn_to_hva_memslot(memslot
, gfn
) |
89 (tce
& ~(PAGE_MASK
| TCE_PCI_READ
| TCE_PCI_WRITE
));
92 *prmap
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
98 * Validates TCE address.
99 * At the moment flags and page mask are validated.
100 * As the host kernel does not access those addresses (just puts them
101 * to the table and user space is supposed to process them), we can skip
102 * checking other things (such as TCE is a guest RAM address or the page
103 * was actually allocated).
105 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table
*stt
,
108 unsigned long gpa
= tce
& ~(TCE_PCI_READ
| TCE_PCI_WRITE
);
109 enum dma_data_direction dir
= iommu_tce_direction(tce
);
110 struct kvmppc_spapr_tce_iommu_table
*stit
;
111 unsigned long ua
= 0;
113 /* Allow userspace to poison TCE table */
117 if (iommu_tce_check_gpa(stt
->page_shift
, gpa
))
120 if (kvmppc_rm_tce_to_ua(stt
->kvm
, tce
, &ua
, NULL
))
123 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
124 unsigned long hpa
= 0;
125 struct mm_iommu_table_group_mem_t
*mem
;
126 long shift
= stit
->tbl
->it_page_shift
;
128 mem
= mm_iommu_lookup_rm(stt
->kvm
->mm
, ua
, 1ULL << shift
);
132 if (mm_iommu_ua_to_hpa_rm(mem
, ua
, shift
, &hpa
))
139 /* Note on the use of page_address() in real mode,
141 * It is safe to use page_address() in real mode on ppc64 because
142 * page_address() is always defined as lowmem_page_address()
143 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
144 * operation and does not access page struct.
146 * Theoretically page_address() could be defined different
147 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
148 * would have to be enabled.
149 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
150 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
151 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
152 * is not expected to be enabled on ppc32, page_address()
153 * is safe for ppc32 as well.
155 * WARNING: This will be called in real-mode on HV KVM and virtual
158 static u64
*kvmppc_page_address(struct page
*page
)
160 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
161 #error TODO: fix to avoid page_address() here
163 return (u64
*) page_address(page
);
167 * Handles TCE requests for emulated devices.
168 * Puts guest TCE values to the table and expects user space to convert them.
169 * Cannot fail so kvmppc_rm_tce_validate must be called before it.
171 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table
*stt
,
172 unsigned long idx
, unsigned long tce
)
178 page
= stt
->pages
[idx
/ TCES_PER_PAGE
];
180 * page must not be NULL in real mode,
181 * kvmppc_rm_ioba_validate() must have taken care of this.
183 WARN_ON_ONCE_RM(!page
);
184 tbl
= kvmppc_page_address(page
);
186 tbl
[idx
% TCES_PER_PAGE
] = tce
;
190 * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
192 * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
193 * allocated or not required (when clearing a tce entry).
195 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table
*stt
,
196 unsigned long ioba
, unsigned long npages
, bool clearing
)
198 unsigned long i
, idx
, sttpage
, sttpages
;
199 unsigned long ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
204 * clearing==true says kvmppc_rm_tce_put won't be allocating pages
210 idx
= (ioba
>> stt
->page_shift
) - stt
->offset
;
211 sttpage
= idx
/ TCES_PER_PAGE
;
212 sttpages
= _ALIGN_UP(idx
% TCES_PER_PAGE
+ npages
, TCES_PER_PAGE
) /
214 for (i
= sttpage
; i
< sttpage
+ sttpages
; ++i
)
221 static long iommu_tce_xchg_no_kill_rm(struct mm_struct
*mm
,
222 struct iommu_table
*tbl
,
223 unsigned long entry
, unsigned long *hpa
,
224 enum dma_data_direction
*direction
)
228 ret
= tbl
->it_ops
->xchg_no_kill(tbl
, entry
, hpa
, direction
, true);
230 if (!ret
&& ((*direction
== DMA_FROM_DEVICE
) ||
231 (*direction
== DMA_BIDIRECTIONAL
))) {
232 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
, entry
);
234 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
235 * calling this so we still get here a valid UA.
238 mm_iommu_ua_mark_dirty_rm(mm
, be64_to_cpu(*pua
));
244 extern void iommu_tce_kill_rm(struct iommu_table
*tbl
,
245 unsigned long entry
, unsigned long pages
)
247 if (tbl
->it_ops
->tce_kill
)
248 tbl
->it_ops
->tce_kill(tbl
, entry
, pages
, true);
251 static void kvmppc_rm_clear_tce(struct kvm
*kvm
, struct iommu_table
*tbl
,
254 unsigned long hpa
= 0;
255 enum dma_data_direction dir
= DMA_NONE
;
257 iommu_tce_xchg_no_kill_rm(kvm
->mm
, tbl
, entry
, &hpa
, &dir
);
260 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm
*kvm
,
261 struct iommu_table
*tbl
, unsigned long entry
)
263 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
264 const unsigned long pgsize
= 1ULL << tbl
->it_page_shift
;
265 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
, entry
);
268 /* it_userspace allocation might be delayed */
271 mem
= mm_iommu_lookup_rm(kvm
->mm
, be64_to_cpu(*pua
), pgsize
);
275 mm_iommu_mapped_dec(mem
);
277 *pua
= cpu_to_be64(0);
282 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm
*kvm
,
283 struct iommu_table
*tbl
, unsigned long entry
)
285 enum dma_data_direction dir
= DMA_NONE
;
286 unsigned long hpa
= 0;
289 if (iommu_tce_xchg_no_kill_rm(kvm
->mm
, tbl
, entry
, &hpa
, &dir
))
291 * real mode xchg can fail if struct page crosses
299 ret
= kvmppc_rm_tce_iommu_mapped_dec(kvm
, tbl
, entry
);
301 iommu_tce_xchg_no_kill_rm(kvm
->mm
, tbl
, entry
, &hpa
, &dir
);
306 static long kvmppc_rm_tce_iommu_unmap(struct kvm
*kvm
,
307 struct kvmppc_spapr_tce_table
*stt
, struct iommu_table
*tbl
,
310 unsigned long i
, ret
= H_SUCCESS
;
311 unsigned long subpages
= 1ULL << (stt
->page_shift
- tbl
->it_page_shift
);
312 unsigned long io_entry
= entry
* subpages
;
314 for (i
= 0; i
< subpages
; ++i
) {
315 ret
= kvmppc_rm_tce_iommu_do_unmap(kvm
, tbl
, io_entry
+ i
);
316 if (ret
!= H_SUCCESS
)
323 static long kvmppc_rm_tce_iommu_do_map(struct kvm
*kvm
, struct iommu_table
*tbl
,
324 unsigned long entry
, unsigned long ua
,
325 enum dma_data_direction dir
)
328 unsigned long hpa
= 0;
329 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
, entry
);
330 struct mm_iommu_table_group_mem_t
*mem
;
333 /* it_userspace allocation might be delayed */
336 mem
= mm_iommu_lookup_rm(kvm
->mm
, ua
, 1ULL << tbl
->it_page_shift
);
340 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem
, ua
, tbl
->it_page_shift
,
344 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem
)))
347 ret
= iommu_tce_xchg_no_kill_rm(kvm
->mm
, tbl
, entry
, &hpa
, &dir
);
349 mm_iommu_mapped_dec(mem
);
351 * real mode xchg can fail if struct page crosses
358 kvmppc_rm_tce_iommu_mapped_dec(kvm
, tbl
, entry
);
360 *pua
= cpu_to_be64(ua
);
365 static long kvmppc_rm_tce_iommu_map(struct kvm
*kvm
,
366 struct kvmppc_spapr_tce_table
*stt
, struct iommu_table
*tbl
,
367 unsigned long entry
, unsigned long ua
,
368 enum dma_data_direction dir
)
370 unsigned long i
, pgoff
, ret
= H_SUCCESS
;
371 unsigned long subpages
= 1ULL << (stt
->page_shift
- tbl
->it_page_shift
);
372 unsigned long io_entry
= entry
* subpages
;
374 for (i
= 0, pgoff
= 0; i
< subpages
;
375 ++i
, pgoff
+= IOMMU_PAGE_SIZE(tbl
)) {
377 ret
= kvmppc_rm_tce_iommu_do_map(kvm
, tbl
,
378 io_entry
+ i
, ua
+ pgoff
, dir
);
379 if (ret
!= H_SUCCESS
)
386 long kvmppc_rm_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
387 unsigned long ioba
, unsigned long tce
)
389 struct kvmppc_spapr_tce_table
*stt
;
391 struct kvmppc_spapr_tce_iommu_table
*stit
;
392 unsigned long entry
, ua
= 0;
393 enum dma_data_direction dir
;
395 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
396 /* liobn, ioba, tce); */
398 /* For radix, we might be in virtual mode, so punt */
399 if (kvm_is_radix(vcpu
->kvm
))
402 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
406 ret
= kvmppc_rm_ioba_validate(stt
, ioba
, 1, tce
== 0);
407 if (ret
!= H_SUCCESS
)
410 ret
= kvmppc_rm_tce_validate(stt
, tce
);
411 if (ret
!= H_SUCCESS
)
414 dir
= iommu_tce_direction(tce
);
415 if ((dir
!= DMA_NONE
) && kvmppc_rm_tce_to_ua(vcpu
->kvm
, tce
, &ua
, NULL
))
418 entry
= ioba
>> stt
->page_shift
;
420 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
422 ret
= kvmppc_rm_tce_iommu_unmap(vcpu
->kvm
, stt
,
425 ret
= kvmppc_rm_tce_iommu_map(vcpu
->kvm
, stt
,
426 stit
->tbl
, entry
, ua
, dir
);
428 iommu_tce_kill_rm(stit
->tbl
, entry
, 1);
430 if (ret
!= H_SUCCESS
) {
431 kvmppc_rm_clear_tce(vcpu
->kvm
, stit
->tbl
, entry
);
436 kvmppc_rm_tce_put(stt
, entry
, tce
);
441 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu
*vcpu
,
442 unsigned long ua
, unsigned long *phpa
)
448 * Called in real mode with MSR_EE = 0. We are safe here.
449 * It is ok to do the lookup with arch.pgdir here, because
450 * we are doing this on secondary cpus and current task there
451 * is not the hypervisor. Also this is safe against THP in the
452 * host, because an IPI to primary thread will wait for the secondary
453 * to exit which will agains result in the below page table walk
456 ptep
= __find_linux_pte(vcpu
->arch
.pgdir
, ua
, NULL
, &shift
);
457 if (!ptep
|| !pte_present(*ptep
))
464 /* Avoid handling anything potentially complicated in realmode */
465 if (shift
> PAGE_SHIFT
)
471 *phpa
= (pte_pfn(pte
) << PAGE_SHIFT
) | (ua
& ((1ULL << shift
) - 1)) |
477 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu
*vcpu
,
478 unsigned long liobn
, unsigned long ioba
,
479 unsigned long tce_list
, unsigned long npages
)
481 struct kvmppc_spapr_tce_table
*stt
;
482 long i
, ret
= H_SUCCESS
;
483 unsigned long tces
, entry
, ua
= 0;
484 unsigned long *rmap
= NULL
;
486 struct kvmppc_spapr_tce_iommu_table
*stit
;
488 /* For radix, we might be in virtual mode, so punt */
489 if (kvm_is_radix(vcpu
->kvm
))
492 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
496 entry
= ioba
>> stt
->page_shift
;
498 * The spec says that the maximum size of the list is 512 TCEs
499 * so the whole table addressed resides in 4K page
504 if (tce_list
& (SZ_4K
- 1))
507 ret
= kvmppc_rm_ioba_validate(stt
, ioba
, npages
, false);
508 if (ret
!= H_SUCCESS
)
511 if (mm_iommu_preregistered(vcpu
->kvm
->mm
)) {
513 * We get here if guest memory was pre-registered which
514 * is normally VFIO case and gpa->hpa translation does not
517 struct mm_iommu_table_group_mem_t
*mem
;
519 if (kvmppc_rm_tce_to_ua(vcpu
->kvm
, tce_list
, &ua
, NULL
))
522 mem
= mm_iommu_lookup_rm(vcpu
->kvm
->mm
, ua
, IOMMU_PAGE_SIZE_4K
);
524 prereg
= mm_iommu_ua_to_hpa_rm(mem
, ua
,
525 IOMMU_PAGE_SHIFT_4K
, &tces
) == 0;
530 * This is usually a case of a guest with emulated devices only
531 * when TCE list is not in preregistered memory.
532 * We do not require memory to be preregistered in this case
533 * so lock rmap and do __find_linux_pte_or_hugepte().
535 if (kvmppc_rm_tce_to_ua(vcpu
->kvm
, tce_list
, &ua
, &rmap
))
538 rmap
= (void *) vmalloc_to_phys(rmap
);
539 if (WARN_ON_ONCE_RM(!rmap
))
543 * Synchronize with the MMU notifier callbacks in
544 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
545 * While we have the rmap lock, code running on other CPUs
546 * cannot finish unmapping the host real page that backs
547 * this guest real page, so we are OK to access the host
551 if (kvmppc_rm_ua_to_hpa(vcpu
, ua
, &tces
)) {
557 for (i
= 0; i
< npages
; ++i
) {
558 unsigned long tce
= be64_to_cpu(((u64
*)tces
)[i
]);
560 ret
= kvmppc_rm_tce_validate(stt
, tce
);
561 if (ret
!= H_SUCCESS
)
565 for (i
= 0; i
< npages
; ++i
) {
566 unsigned long tce
= be64_to_cpu(((u64
*)tces
)[i
]);
569 if (kvmppc_rm_tce_to_ua(vcpu
->kvm
, tce
, &ua
, NULL
)) {
571 goto invalidate_exit
;
574 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
575 ret
= kvmppc_rm_tce_iommu_map(vcpu
->kvm
, stt
,
576 stit
->tbl
, entry
+ i
, ua
,
577 iommu_tce_direction(tce
));
579 if (ret
!= H_SUCCESS
) {
580 kvmppc_rm_clear_tce(vcpu
->kvm
, stit
->tbl
,
582 goto invalidate_exit
;
586 kvmppc_rm_tce_put(stt
, entry
+ i
, tce
);
590 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
)
591 iommu_tce_kill_rm(stit
->tbl
, entry
, npages
);
600 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu
*vcpu
,
601 unsigned long liobn
, unsigned long ioba
,
602 unsigned long tce_value
, unsigned long npages
)
604 struct kvmppc_spapr_tce_table
*stt
;
606 struct kvmppc_spapr_tce_iommu_table
*stit
;
608 /* For radix, we might be in virtual mode, so punt */
609 if (kvm_is_radix(vcpu
->kvm
))
612 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
616 ret
= kvmppc_rm_ioba_validate(stt
, ioba
, npages
, tce_value
== 0);
617 if (ret
!= H_SUCCESS
)
620 /* Check permission bits only to allow userspace poison TCE for debug */
621 if (tce_value
& (TCE_PCI_WRITE
| TCE_PCI_READ
))
624 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
625 unsigned long entry
= ioba
>> stt
->page_shift
;
627 for (i
= 0; i
< npages
; ++i
) {
628 ret
= kvmppc_rm_tce_iommu_unmap(vcpu
->kvm
, stt
,
629 stit
->tbl
, entry
+ i
);
631 if (ret
== H_SUCCESS
)
634 if (ret
== H_TOO_HARD
)
635 goto invalidate_exit
;
638 kvmppc_rm_clear_tce(vcpu
->kvm
, stit
->tbl
, entry
);
642 for (i
= 0; i
< npages
; ++i
, ioba
+= (1ULL << stt
->page_shift
))
643 kvmppc_rm_tce_put(stt
, ioba
>> stt
->page_shift
, tce_value
);
646 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
)
647 iommu_tce_kill_rm(stit
->tbl
, ioba
>> stt
->page_shift
, npages
);
652 /* This can be called in either virtual mode or real mode */
653 long kvmppc_h_get_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
656 struct kvmppc_spapr_tce_table
*stt
;
662 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
666 ret
= kvmppc_ioba_validate(stt
, ioba
, 1);
667 if (ret
!= H_SUCCESS
)
670 idx
= (ioba
>> stt
->page_shift
) - stt
->offset
;
671 page
= stt
->pages
[idx
/ TCES_PER_PAGE
];
673 vcpu
->arch
.regs
.gpr
[4] = 0;
676 tbl
= (u64
*)page_address(page
);
678 vcpu
->arch
.regs
.gpr
[4] = tbl
[idx
% TCES_PER_PAGE
];
682 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce
);
684 #endif /* KVM_BOOK3S_HV_POSSIBLE */