2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
30 #include <asm/tlbflush.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
40 #include <asm/iommu.h>
42 #include <asm/pte-walk.h>
46 #define WARN_ON_ONCE_RM(condition) ({ \
47 static bool __section(.data.unlikely) __warned; \
48 int __ret_warn_once = !!(condition); \
50 if (unlikely(__ret_warn_once && !__warned)) { \
52 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
53 __stringify(condition), \
54 __func__, __LINE__); \
57 unlikely(__ret_warn_once); \
62 #define WARN_ON_ONCE_RM(condition) ({ \
63 int __ret_warn_on = !!(condition); \
64 unlikely(__ret_warn_on); \
69 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
72 * Finds a TCE table descriptor by LIOBN.
74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
77 struct kvmppc_spapr_tce_table
*kvmppc_find_table(struct kvm
*kvm
,
80 struct kvmppc_spapr_tce_table
*stt
;
82 list_for_each_entry_lockless(stt
, &kvm
->arch
.spapr_tce_tables
, list
)
83 if (stt
->liobn
== liobn
)
88 EXPORT_SYMBOL_GPL(kvmppc_find_table
);
91 * Validates TCE address.
92 * At the moment flags and page mask are validated.
93 * As the host kernel does not access those addresses (just puts them
94 * to the table and user space is supposed to process them), we can skip
95 * checking other things (such as TCE is a guest RAM address or the page
96 * was actually allocated).
98 * WARNING: This will be called in real-mode on HV KVM and virtual
101 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table
*stt
, unsigned long tce
)
103 unsigned long gpa
= tce
& ~(TCE_PCI_READ
| TCE_PCI_WRITE
);
104 enum dma_data_direction dir
= iommu_tce_direction(tce
);
106 /* Allow userspace to poison TCE table */
110 if (iommu_tce_check_gpa(stt
->page_shift
, gpa
))
115 EXPORT_SYMBOL_GPL(kvmppc_tce_validate
);
117 /* Note on the use of page_address() in real mode,
119 * It is safe to use page_address() in real mode on ppc64 because
120 * page_address() is always defined as lowmem_page_address()
121 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
122 * operation and does not access page struct.
124 * Theoretically page_address() could be defined different
125 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
126 * would have to be enabled.
127 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
128 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
129 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
130 * is not expected to be enabled on ppc32, page_address()
131 * is safe for ppc32 as well.
133 * WARNING: This will be called in real-mode on HV KVM and virtual
136 static u64
*kvmppc_page_address(struct page
*page
)
138 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
139 #error TODO: fix to avoid page_address() here
141 return (u64
*) page_address(page
);
145 * Handles TCE requests for emulated devices.
146 * Puts guest TCE values to the table and expects user space to convert them.
147 * Called in both real and virtual modes.
148 * Cannot fail so kvmppc_tce_validate must be called before it.
150 * WARNING: This will be called in real-mode on HV KVM and virtual
153 void kvmppc_tce_put(struct kvmppc_spapr_tce_table
*stt
,
154 unsigned long idx
, unsigned long tce
)
160 page
= stt
->pages
[idx
/ TCES_PER_PAGE
];
161 tbl
= kvmppc_page_address(page
);
163 tbl
[idx
% TCES_PER_PAGE
] = tce
;
165 EXPORT_SYMBOL_GPL(kvmppc_tce_put
);
167 long kvmppc_gpa_to_ua(struct kvm
*kvm
, unsigned long gpa
,
168 unsigned long *ua
, unsigned long **prmap
)
170 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
171 struct kvm_memory_slot
*memslot
;
173 memslot
= search_memslots(kvm_memslots(kvm
), gfn
);
177 *ua
= __gfn_to_hva_memslot(memslot
, gfn
) |
178 (gpa
& ~(PAGE_MASK
| TCE_PCI_READ
| TCE_PCI_WRITE
));
180 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
182 *prmap
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
187 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua
);
189 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
190 static void kvmppc_rm_clear_tce(struct iommu_table
*tbl
, unsigned long entry
)
192 unsigned long hpa
= 0;
193 enum dma_data_direction dir
= DMA_NONE
;
195 iommu_tce_xchg_rm(tbl
, entry
, &hpa
, &dir
);
198 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm
*kvm
,
199 struct iommu_table
*tbl
, unsigned long entry
)
201 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
202 const unsigned long pgsize
= 1ULL << tbl
->it_page_shift
;
203 unsigned long *pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
);
206 /* it_userspace allocation might be delayed */
209 pua
= (void *) vmalloc_to_phys(pua
);
210 if (WARN_ON_ONCE_RM(!pua
))
213 mem
= mm_iommu_lookup_rm(kvm
->mm
, *pua
, pgsize
);
217 mm_iommu_mapped_dec(mem
);
224 static long kvmppc_rm_tce_iommu_unmap(struct kvm
*kvm
,
225 struct iommu_table
*tbl
, unsigned long entry
)
227 enum dma_data_direction dir
= DMA_NONE
;
228 unsigned long hpa
= 0;
231 if (iommu_tce_xchg_rm(tbl
, entry
, &hpa
, &dir
))
233 * real mode xchg can fail if struct page crosses
241 ret
= kvmppc_rm_tce_iommu_mapped_dec(kvm
, tbl
, entry
);
243 iommu_tce_xchg_rm(tbl
, entry
, &hpa
, &dir
);
248 static long kvmppc_rm_tce_iommu_map(struct kvm
*kvm
, struct iommu_table
*tbl
,
249 unsigned long entry
, unsigned long ua
,
250 enum dma_data_direction dir
)
253 unsigned long hpa
= 0;
254 unsigned long *pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
);
255 struct mm_iommu_table_group_mem_t
*mem
;
258 /* it_userspace allocation might be delayed */
261 mem
= mm_iommu_lookup_rm(kvm
->mm
, ua
, 1ULL << tbl
->it_page_shift
);
265 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem
, ua
, &hpa
)))
268 pua
= (void *) vmalloc_to_phys(pua
);
269 if (WARN_ON_ONCE_RM(!pua
))
272 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem
)))
275 ret
= iommu_tce_xchg_rm(tbl
, entry
, &hpa
, &dir
);
277 mm_iommu_mapped_dec(mem
);
279 * real mode xchg can fail if struct page crosses
286 kvmppc_rm_tce_iommu_mapped_dec(kvm
, tbl
, entry
);
293 long kvmppc_rm_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
294 unsigned long ioba
, unsigned long tce
)
296 struct kvmppc_spapr_tce_table
*stt
;
298 struct kvmppc_spapr_tce_iommu_table
*stit
;
299 unsigned long entry
, ua
= 0;
300 enum dma_data_direction dir
;
302 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
303 /* liobn, ioba, tce); */
305 /* For radix, we might be in virtual mode, so punt */
306 if (kvm_is_radix(vcpu
->kvm
))
309 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
313 ret
= kvmppc_ioba_validate(stt
, ioba
, 1);
314 if (ret
!= H_SUCCESS
)
317 ret
= kvmppc_tce_validate(stt
, tce
);
318 if (ret
!= H_SUCCESS
)
321 dir
= iommu_tce_direction(tce
);
322 if ((dir
!= DMA_NONE
) && kvmppc_gpa_to_ua(vcpu
->kvm
,
323 tce
& ~(TCE_PCI_READ
| TCE_PCI_WRITE
), &ua
, NULL
))
326 entry
= ioba
>> stt
->page_shift
;
328 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
330 ret
= kvmppc_rm_tce_iommu_unmap(vcpu
->kvm
,
333 ret
= kvmppc_rm_tce_iommu_map(vcpu
->kvm
,
334 stit
->tbl
, entry
, ua
, dir
);
336 if (ret
== H_SUCCESS
)
339 if (ret
== H_TOO_HARD
)
343 kvmppc_rm_clear_tce(stit
->tbl
, entry
);
346 kvmppc_tce_put(stt
, entry
, tce
);
351 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu
*vcpu
,
352 unsigned long ua
, unsigned long *phpa
)
358 * Called in real mode with MSR_EE = 0. We are safe here.
359 * It is ok to do the lookup with arch.pgdir here, because
360 * we are doing this on secondary cpus and current task there
361 * is not the hypervisor. Also this is safe against THP in the
362 * host, because an IPI to primary thread will wait for the secondary
363 * to exit which will agains result in the below page table walk
366 ptep
= __find_linux_pte(vcpu
->arch
.pgdir
, ua
, NULL
, &shift
);
367 if (!ptep
|| !pte_present(*ptep
))
374 /* Avoid handling anything potentially complicated in realmode */
375 if (shift
> PAGE_SHIFT
)
381 *phpa
= (pte_pfn(pte
) << PAGE_SHIFT
) | (ua
& ((1ULL << shift
) - 1)) |
387 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu
*vcpu
,
388 unsigned long liobn
, unsigned long ioba
,
389 unsigned long tce_list
, unsigned long npages
)
391 struct kvmppc_spapr_tce_table
*stt
;
392 long i
, ret
= H_SUCCESS
;
393 unsigned long tces
, entry
, ua
= 0;
394 unsigned long *rmap
= NULL
;
396 struct kvmppc_spapr_tce_iommu_table
*stit
;
398 /* For radix, we might be in virtual mode, so punt */
399 if (kvm_is_radix(vcpu
->kvm
))
402 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
406 entry
= ioba
>> stt
->page_shift
;
408 * The spec says that the maximum size of the list is 512 TCEs
409 * so the whole table addressed resides in 4K page
414 if (tce_list
& (SZ_4K
- 1))
417 ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
418 if (ret
!= H_SUCCESS
)
421 if (mm_iommu_preregistered(vcpu
->kvm
->mm
)) {
423 * We get here if guest memory was pre-registered which
424 * is normally VFIO case and gpa->hpa translation does not
427 struct mm_iommu_table_group_mem_t
*mem
;
429 if (kvmppc_gpa_to_ua(vcpu
->kvm
, tce_list
, &ua
, NULL
))
432 mem
= mm_iommu_lookup_rm(vcpu
->kvm
->mm
, ua
, IOMMU_PAGE_SIZE_4K
);
434 prereg
= mm_iommu_ua_to_hpa_rm(mem
, ua
, &tces
) == 0;
439 * This is usually a case of a guest with emulated devices only
440 * when TCE list is not in preregistered memory.
441 * We do not require memory to be preregistered in this case
442 * so lock rmap and do __find_linux_pte_or_hugepte().
444 if (kvmppc_gpa_to_ua(vcpu
->kvm
, tce_list
, &ua
, &rmap
))
447 rmap
= (void *) vmalloc_to_phys(rmap
);
448 if (WARN_ON_ONCE_RM(!rmap
))
452 * Synchronize with the MMU notifier callbacks in
453 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
454 * While we have the rmap lock, code running on other CPUs
455 * cannot finish unmapping the host real page that backs
456 * this guest real page, so we are OK to access the host
460 if (kvmppc_rm_ua_to_hpa(vcpu
, ua
, &tces
)) {
466 for (i
= 0; i
< npages
; ++i
) {
467 unsigned long tce
= be64_to_cpu(((u64
*)tces
)[i
]);
469 ret
= kvmppc_tce_validate(stt
, tce
);
470 if (ret
!= H_SUCCESS
)
474 if (kvmppc_gpa_to_ua(vcpu
->kvm
,
475 tce
& ~(TCE_PCI_READ
| TCE_PCI_WRITE
),
479 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
480 ret
= kvmppc_rm_tce_iommu_map(vcpu
->kvm
,
481 stit
->tbl
, entry
+ i
, ua
,
482 iommu_tce_direction(tce
));
484 if (ret
== H_SUCCESS
)
487 if (ret
== H_TOO_HARD
)
491 kvmppc_rm_clear_tce(stit
->tbl
, entry
);
494 kvmppc_tce_put(stt
, entry
+ i
, tce
);
504 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu
*vcpu
,
505 unsigned long liobn
, unsigned long ioba
,
506 unsigned long tce_value
, unsigned long npages
)
508 struct kvmppc_spapr_tce_table
*stt
;
510 struct kvmppc_spapr_tce_iommu_table
*stit
;
512 /* For radix, we might be in virtual mode, so punt */
513 if (kvm_is_radix(vcpu
->kvm
))
516 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
520 ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
521 if (ret
!= H_SUCCESS
)
524 /* Check permission bits only to allow userspace poison TCE for debug */
525 if (tce_value
& (TCE_PCI_WRITE
| TCE_PCI_READ
))
528 list_for_each_entry_lockless(stit
, &stt
->iommu_tables
, next
) {
529 unsigned long entry
= ioba
>> stit
->tbl
->it_page_shift
;
531 for (i
= 0; i
< npages
; ++i
) {
532 ret
= kvmppc_rm_tce_iommu_unmap(vcpu
->kvm
,
533 stit
->tbl
, entry
+ i
);
535 if (ret
== H_SUCCESS
)
538 if (ret
== H_TOO_HARD
)
542 kvmppc_rm_clear_tce(stit
->tbl
, entry
);
546 for (i
= 0; i
< npages
; ++i
, ioba
+= (1ULL << stt
->page_shift
))
547 kvmppc_tce_put(stt
, ioba
>> stt
->page_shift
, tce_value
);
552 /* This can be called in either virtual mode or real mode */
553 long kvmppc_h_get_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
556 struct kvmppc_spapr_tce_table
*stt
;
562 stt
= kvmppc_find_table(vcpu
->kvm
, liobn
);
566 ret
= kvmppc_ioba_validate(stt
, ioba
, 1);
567 if (ret
!= H_SUCCESS
)
570 idx
= (ioba
>> stt
->page_shift
) - stt
->offset
;
571 page
= stt
->pages
[idx
/ TCES_PER_PAGE
];
572 tbl
= (u64
*)page_address(page
);
574 vcpu
->arch
.gpr
[4] = tbl
[idx
% TCES_PER_PAGE
];
578 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce
);
580 #endif /* KVM_BOOK3S_HV_POSSIBLE */