WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_64_vio_hv.c
blob083a4e037718d924a5782ef8190560c6c28865e5
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7 */
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/hugetlb.h>
17 #include <linux/list.h>
18 #include <linux/stringify.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/book3s/64/mmu-hash.h>
23 #include <asm/mmu_context.h>
24 #include <asm/hvcall.h>
25 #include <asm/synch.h>
26 #include <asm/ppc-opcode.h>
27 #include <asm/udbg.h>
28 #include <asm/iommu.h>
29 #include <asm/tce.h>
30 #include <asm/pte-walk.h>
32 #ifdef CONFIG_BUG
34 #define WARN_ON_ONCE_RM(condition) ({ \
35 static bool __section(".data.unlikely") __warned; \
36 int __ret_warn_once = !!(condition); \
38 if (unlikely(__ret_warn_once && !__warned)) { \
39 __warned = true; \
40 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
41 __stringify(condition), \
42 __func__, __LINE__); \
43 dump_stack(); \
44 } \
45 unlikely(__ret_warn_once); \
48 #else
50 #define WARN_ON_ONCE_RM(condition) ({ \
51 int __ret_warn_on = !!(condition); \
52 unlikely(__ret_warn_on); \
55 #endif
58 * Finds a TCE table descriptor by LIOBN.
60 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
61 * mode on PR KVM
63 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
64 unsigned long liobn)
66 struct kvmppc_spapr_tce_table *stt;
68 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
69 if (stt->liobn == liobn)
70 return stt;
72 return NULL;
74 EXPORT_SYMBOL_GPL(kvmppc_find_table);
76 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
77 static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
78 unsigned long tce, unsigned long *ua)
80 unsigned long gfn = tce >> PAGE_SHIFT;
81 struct kvm_memory_slot *memslot;
83 memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
84 if (!memslot)
85 return -EINVAL;
87 *ua = __gfn_to_hva_memslot(memslot, gfn) |
88 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
90 return 0;
94 * Validates TCE address.
95 * At the moment flags and page mask are validated.
96 * As the host kernel does not access those addresses (just puts them
97 * to the table and user space is supposed to process them), we can skip
98 * checking other things (such as TCE is a guest RAM address or the page
99 * was actually allocated).
101 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
102 unsigned long tce)
104 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
105 enum dma_data_direction dir = iommu_tce_direction(tce);
106 struct kvmppc_spapr_tce_iommu_table *stit;
107 unsigned long ua = 0;
109 /* Allow userspace to poison TCE table */
110 if (dir == DMA_NONE)
111 return H_SUCCESS;
113 if (iommu_tce_check_gpa(stt->page_shift, gpa))
114 return H_PARAMETER;
116 if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
117 return H_TOO_HARD;
119 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
120 unsigned long hpa = 0;
121 struct mm_iommu_table_group_mem_t *mem;
122 long shift = stit->tbl->it_page_shift;
124 mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
125 if (!mem)
126 return H_TOO_HARD;
128 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
129 return H_TOO_HARD;
132 return H_SUCCESS;
135 /* Note on the use of page_address() in real mode,
137 * It is safe to use page_address() in real mode on ppc64 because
138 * page_address() is always defined as lowmem_page_address()
139 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
140 * operation and does not access page struct.
142 * Theoretically page_address() could be defined different
143 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
144 * would have to be enabled.
145 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
146 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
147 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
148 * is not expected to be enabled on ppc32, page_address()
149 * is safe for ppc32 as well.
151 * WARNING: This will be called in real-mode on HV KVM and virtual
152 * mode on PR KVM
154 static u64 *kvmppc_page_address(struct page *page)
156 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
157 #error TODO: fix to avoid page_address() here
158 #endif
159 return (u64 *) page_address(page);
163 * Handles TCE requests for emulated devices.
164 * Puts guest TCE values to the table and expects user space to convert them.
165 * Cannot fail so kvmppc_rm_tce_validate must be called before it.
167 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
168 unsigned long idx, unsigned long tce)
170 struct page *page;
171 u64 *tbl;
173 idx -= stt->offset;
174 page = stt->pages[idx / TCES_PER_PAGE];
176 * page must not be NULL in real mode,
177 * kvmppc_rm_ioba_validate() must have taken care of this.
179 WARN_ON_ONCE_RM(!page);
180 tbl = kvmppc_page_address(page);
182 tbl[idx % TCES_PER_PAGE] = tce;
186 * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
187 * in real mode.
188 * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
189 * allocated or not required (when clearing a tce entry).
191 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
192 unsigned long ioba, unsigned long npages, bool clearing)
194 unsigned long i, idx, sttpage, sttpages;
195 unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
197 if (ret)
198 return ret;
200 * clearing==true says kvmppc_rm_tce_put won't be allocating pages
201 * for empty tces.
203 if (clearing)
204 return H_SUCCESS;
206 idx = (ioba >> stt->page_shift) - stt->offset;
207 sttpage = idx / TCES_PER_PAGE;
208 sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
209 TCES_PER_PAGE;
210 for (i = sttpage; i < sttpage + sttpages; ++i)
211 if (!stt->pages[i])
212 return H_TOO_HARD;
214 return H_SUCCESS;
217 static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
218 struct iommu_table *tbl,
219 unsigned long entry, unsigned long *hpa,
220 enum dma_data_direction *direction)
222 long ret;
224 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
226 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
227 (*direction == DMA_BIDIRECTIONAL))) {
228 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
230 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
231 * calling this so we still get here a valid UA.
233 if (pua && *pua)
234 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
237 return ret;
240 static void iommu_tce_kill_rm(struct iommu_table *tbl,
241 unsigned long entry, unsigned long pages)
243 if (tbl->it_ops->tce_kill)
244 tbl->it_ops->tce_kill(tbl, entry, pages, true);
247 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
248 unsigned long entry)
250 unsigned long hpa = 0;
251 enum dma_data_direction dir = DMA_NONE;
253 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
256 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
257 struct iommu_table *tbl, unsigned long entry)
259 struct mm_iommu_table_group_mem_t *mem = NULL;
260 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
261 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
263 if (!pua)
264 /* it_userspace allocation might be delayed */
265 return H_TOO_HARD;
267 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
268 if (!mem)
269 return H_TOO_HARD;
271 mm_iommu_mapped_dec(mem);
273 *pua = cpu_to_be64(0);
275 return H_SUCCESS;
278 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
279 struct iommu_table *tbl, unsigned long entry)
281 enum dma_data_direction dir = DMA_NONE;
282 unsigned long hpa = 0;
283 long ret;
285 if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
287 * real mode xchg can fail if struct page crosses
288 * a page boundary
290 return H_TOO_HARD;
292 if (dir == DMA_NONE)
293 return H_SUCCESS;
295 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
296 if (ret)
297 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
299 return ret;
302 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
303 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
304 unsigned long entry)
306 unsigned long i, ret = H_SUCCESS;
307 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
308 unsigned long io_entry = entry * subpages;
310 for (i = 0; i < subpages; ++i) {
311 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
312 if (ret != H_SUCCESS)
313 break;
316 return ret;
319 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
320 unsigned long entry, unsigned long ua,
321 enum dma_data_direction dir)
323 long ret;
324 unsigned long hpa = 0;
325 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
326 struct mm_iommu_table_group_mem_t *mem;
328 if (!pua)
329 /* it_userspace allocation might be delayed */
330 return H_TOO_HARD;
332 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
333 if (!mem)
334 return H_TOO_HARD;
336 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
337 &hpa)))
338 return H_TOO_HARD;
340 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
341 return H_TOO_HARD;
343 ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
344 if (ret) {
345 mm_iommu_mapped_dec(mem);
347 * real mode xchg can fail if struct page crosses
348 * a page boundary
350 return H_TOO_HARD;
353 if (dir != DMA_NONE)
354 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
356 *pua = cpu_to_be64(ua);
358 return 0;
361 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
362 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
363 unsigned long entry, unsigned long ua,
364 enum dma_data_direction dir)
366 unsigned long i, pgoff, ret = H_SUCCESS;
367 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
368 unsigned long io_entry = entry * subpages;
370 for (i = 0, pgoff = 0; i < subpages;
371 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
373 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
374 io_entry + i, ua + pgoff, dir);
375 if (ret != H_SUCCESS)
376 break;
379 return ret;
382 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
383 unsigned long ioba, unsigned long tce)
385 struct kvmppc_spapr_tce_table *stt;
386 long ret;
387 struct kvmppc_spapr_tce_iommu_table *stit;
388 unsigned long entry, ua = 0;
389 enum dma_data_direction dir;
391 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
392 /* liobn, ioba, tce); */
394 /* For radix, we might be in virtual mode, so punt */
395 if (kvm_is_radix(vcpu->kvm))
396 return H_TOO_HARD;
398 stt = kvmppc_find_table(vcpu->kvm, liobn);
399 if (!stt)
400 return H_TOO_HARD;
402 ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
403 if (ret != H_SUCCESS)
404 return ret;
406 ret = kvmppc_rm_tce_validate(stt, tce);
407 if (ret != H_SUCCESS)
408 return ret;
410 dir = iommu_tce_direction(tce);
411 if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
412 return H_PARAMETER;
414 entry = ioba >> stt->page_shift;
416 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
417 if (dir == DMA_NONE)
418 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
419 stit->tbl, entry);
420 else
421 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
422 stit->tbl, entry, ua, dir);
424 iommu_tce_kill_rm(stit->tbl, entry, 1);
426 if (ret != H_SUCCESS) {
427 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
428 return ret;
432 kvmppc_rm_tce_put(stt, entry, tce);
434 return H_SUCCESS;
437 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
438 unsigned long ua, unsigned long *phpa)
440 pte_t *ptep, pte;
441 unsigned shift = 0;
444 * Called in real mode with MSR_EE = 0. We are safe here.
445 * It is ok to do the lookup with arch.pgdir here, because
446 * we are doing this on secondary cpus and current task there
447 * is not the hypervisor. Also this is safe against THP in the
448 * host, because an IPI to primary thread will wait for the secondary
449 * to exit which will agains result in the below page table walk
450 * to finish.
452 /* an rmap lock won't make it safe. because that just ensure hash
453 * page table entries are removed with rmap lock held. After that
454 * mmu notifier returns and we go ahead and removing ptes from Qemu page table.
456 ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
457 if (!ptep)
458 return -ENXIO;
460 pte = READ_ONCE(*ptep);
461 if (!pte_present(pte))
462 return -ENXIO;
464 if (!shift)
465 shift = PAGE_SHIFT;
467 /* Avoid handling anything potentially complicated in realmode */
468 if (shift > PAGE_SHIFT)
469 return -EAGAIN;
471 if (!pte_young(pte))
472 return -EAGAIN;
474 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
475 (ua & ~PAGE_MASK);
477 return 0;
480 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
481 unsigned long liobn, unsigned long ioba,
482 unsigned long tce_list, unsigned long npages)
484 struct kvm *kvm = vcpu->kvm;
485 struct kvmppc_spapr_tce_table *stt;
486 long i, ret = H_SUCCESS;
487 unsigned long tces, entry, ua = 0;
488 unsigned long mmu_seq;
489 bool prereg = false;
490 struct kvmppc_spapr_tce_iommu_table *stit;
492 /* For radix, we might be in virtual mode, so punt */
493 if (kvm_is_radix(vcpu->kvm))
494 return H_TOO_HARD;
497 * used to check for invalidations in progress
499 mmu_seq = kvm->mmu_notifier_seq;
500 smp_rmb();
502 stt = kvmppc_find_table(vcpu->kvm, liobn);
503 if (!stt)
504 return H_TOO_HARD;
506 entry = ioba >> stt->page_shift;
508 * The spec says that the maximum size of the list is 512 TCEs
509 * so the whole table addressed resides in 4K page
511 if (npages > 512)
512 return H_PARAMETER;
514 if (tce_list & (SZ_4K - 1))
515 return H_PARAMETER;
517 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
518 if (ret != H_SUCCESS)
519 return ret;
521 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
523 * We get here if guest memory was pre-registered which
524 * is normally VFIO case and gpa->hpa translation does not
525 * depend on hpt.
527 struct mm_iommu_table_group_mem_t *mem;
529 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
530 return H_TOO_HARD;
532 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
533 if (mem)
534 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
535 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
538 if (!prereg) {
540 * This is usually a case of a guest with emulated devices only
541 * when TCE list is not in preregistered memory.
542 * We do not require memory to be preregistered in this case
543 * so lock rmap and do __find_linux_pte_or_hugepte().
545 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
546 return H_TOO_HARD;
548 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
549 if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
550 ret = H_TOO_HARD;
551 goto unlock_exit;
555 for (i = 0; i < npages; ++i) {
556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
558 ret = kvmppc_rm_tce_validate(stt, tce);
559 if (ret != H_SUCCESS)
560 goto unlock_exit;
563 for (i = 0; i < npages; ++i) {
564 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
566 ua = 0;
567 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
568 ret = H_PARAMETER;
569 goto invalidate_exit;
572 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
573 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
574 stit->tbl, entry + i, ua,
575 iommu_tce_direction(tce));
577 if (ret != H_SUCCESS) {
578 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
579 entry);
580 goto invalidate_exit;
584 kvmppc_rm_tce_put(stt, entry + i, tce);
587 invalidate_exit:
588 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
589 iommu_tce_kill_rm(stit->tbl, entry, npages);
591 unlock_exit:
592 if (!prereg)
593 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
594 return ret;
597 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
598 unsigned long liobn, unsigned long ioba,
599 unsigned long tce_value, unsigned long npages)
601 struct kvmppc_spapr_tce_table *stt;
602 long i, ret;
603 struct kvmppc_spapr_tce_iommu_table *stit;
605 /* For radix, we might be in virtual mode, so punt */
606 if (kvm_is_radix(vcpu->kvm))
607 return H_TOO_HARD;
609 stt = kvmppc_find_table(vcpu->kvm, liobn);
610 if (!stt)
611 return H_TOO_HARD;
613 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
614 if (ret != H_SUCCESS)
615 return ret;
617 /* Check permission bits only to allow userspace poison TCE for debug */
618 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
619 return H_PARAMETER;
621 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
622 unsigned long entry = ioba >> stt->page_shift;
624 for (i = 0; i < npages; ++i) {
625 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
626 stit->tbl, entry + i);
628 if (ret == H_SUCCESS)
629 continue;
631 if (ret == H_TOO_HARD)
632 goto invalidate_exit;
634 WARN_ON_ONCE_RM(1);
635 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
639 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
640 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
642 invalidate_exit:
643 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
644 iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
646 return ret;
649 /* This can be called in either virtual mode or real mode */
650 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
651 unsigned long ioba)
653 struct kvmppc_spapr_tce_table *stt;
654 long ret;
655 unsigned long idx;
656 struct page *page;
657 u64 *tbl;
659 stt = kvmppc_find_table(vcpu->kvm, liobn);
660 if (!stt)
661 return H_TOO_HARD;
663 ret = kvmppc_ioba_validate(stt, ioba, 1);
664 if (ret != H_SUCCESS)
665 return ret;
667 idx = (ioba >> stt->page_shift) - stt->offset;
668 page = stt->pages[idx / TCES_PER_PAGE];
669 if (!page) {
670 vcpu->arch.regs.gpr[4] = 0;
671 return H_SUCCESS;
673 tbl = (u64 *)page_address(page);
675 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
677 return H_SUCCESS;
679 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
681 #endif /* KVM_BOOK3S_HV_POSSIBLE */