1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO: IOMMU DMA mapping support for TCE on POWER
5 * Copyright (C) 2013 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * Copyright Gavin Shan, IBM Corporation 2014.
9 * Derived from original vfio_iommu_type1.c:
10 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
11 * Author: Alex Williamson <alex.williamson@redhat.com>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/err.h>
19 #include <linux/vfio.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sched/mm.h>
22 #include <linux/sched/signal.h>
26 #include <asm/iommu.h>
28 #include <asm/mmu_context.h>
30 #define DRIVER_VERSION "0.1"
31 #define DRIVER_AUTHOR "aik@ozlabs.ru"
32 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
34 static void tce_iommu_detach_group(void *iommu_data
,
35 struct iommu_group
*iommu_group
);
38 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
40 * This code handles mapping and unmapping of user data buffers
41 * into DMA'ble space using the IOMMU
44 struct tce_iommu_group
{
45 struct list_head next
;
46 struct iommu_group
*grp
;
50 * A container needs to remember which preregistered region it has
51 * referenced to do proper cleanup at the userspace process exit.
53 struct tce_iommu_prereg
{
54 struct list_head next
;
55 struct mm_iommu_table_group_mem_t
*mem
;
59 * The container descriptor supports only a single group per container.
60 * Required by the API as the container is not supplied with the IOMMU group
61 * at the moment of initialization.
63 struct tce_container
{
67 bool def_window_pending
;
68 unsigned long locked_pages
;
70 struct iommu_table
*tables
[IOMMU_TABLE_GROUP_MAX_TABLES
];
71 struct list_head group_list
;
72 struct list_head prereg_list
;
75 static long tce_iommu_mm_set(struct tce_container
*container
)
78 if (container
->mm
== current
->mm
)
83 container
->mm
= current
->mm
;
84 mmgrab(container
->mm
);
89 static long tce_iommu_prereg_free(struct tce_container
*container
,
90 struct tce_iommu_prereg
*tcemem
)
94 ret
= mm_iommu_put(container
->mm
, tcemem
->mem
);
98 list_del(&tcemem
->next
);
104 static long tce_iommu_unregister_pages(struct tce_container
*container
,
105 __u64 vaddr
, __u64 size
)
107 struct mm_iommu_table_group_mem_t
*mem
;
108 struct tce_iommu_prereg
*tcemem
;
112 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
115 mem
= mm_iommu_get(container
->mm
, vaddr
, size
>> PAGE_SHIFT
);
119 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
120 if (tcemem
->mem
== mem
) {
129 ret
= tce_iommu_prereg_free(container
, tcemem
);
131 mm_iommu_put(container
->mm
, mem
);
136 static long tce_iommu_register_pages(struct tce_container
*container
,
137 __u64 vaddr
, __u64 size
)
140 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
141 struct tce_iommu_prereg
*tcemem
;
142 unsigned long entries
= size
>> PAGE_SHIFT
;
144 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
) ||
145 ((vaddr
+ size
) < vaddr
))
148 mem
= mm_iommu_get(container
->mm
, vaddr
, entries
);
150 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
151 if (tcemem
->mem
== mem
) {
157 ret
= mm_iommu_new(container
->mm
, vaddr
, entries
, &mem
);
162 tcemem
= kzalloc(sizeof(*tcemem
), GFP_KERNEL
);
169 list_add(&tcemem
->next
, &container
->prereg_list
);
171 container
->enabled
= true;
176 mm_iommu_put(container
->mm
, mem
);
180 static bool tce_page_is_contained(struct mm_struct
*mm
, unsigned long hpa
,
181 unsigned int it_page_shift
)
184 unsigned long size
= 0;
186 if (mm_iommu_is_devmem(mm
, hpa
, it_page_shift
, &size
))
187 return size
== (1UL << it_page_shift
);
189 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
191 * Check that the TCE table granularity is not bigger than the size of
192 * a page we just found. Otherwise the hardware can get access to
193 * a bigger memory chunk that it should.
195 return page_shift(compound_head(page
)) >= it_page_shift
;
198 static inline bool tce_groups_attached(struct tce_container
*container
)
200 return !list_empty(&container
->group_list
);
203 static long tce_iommu_find_table(struct tce_container
*container
,
204 phys_addr_t ioba
, struct iommu_table
**ptbl
)
208 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
209 struct iommu_table
*tbl
= container
->tables
[i
];
212 unsigned long entry
= ioba
>> tbl
->it_page_shift
;
213 unsigned long start
= tbl
->it_offset
;
214 unsigned long end
= start
+ tbl
->it_size
;
216 if ((start
<= entry
) && (entry
< end
)) {
226 static int tce_iommu_find_free_table(struct tce_container
*container
)
230 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
231 if (!container
->tables
[i
])
238 static int tce_iommu_enable(struct tce_container
*container
)
241 unsigned long locked
;
242 struct iommu_table_group
*table_group
;
243 struct tce_iommu_group
*tcegrp
;
245 if (container
->enabled
)
249 * When userspace pages are mapped into the IOMMU, they are effectively
250 * locked memory, so, theoretically, we need to update the accounting
251 * of locked pages on each map and unmap. For powerpc, the map unmap
252 * paths can be very hot, though, and the accounting would kill
253 * performance, especially since it would be difficult to impossible
254 * to handle the accounting in real mode only.
256 * To address that, rather than precisely accounting every page, we
257 * instead account for a worst case on locked memory when the iommu is
258 * enabled and disabled. The worst case upper bound on locked memory
259 * is the size of the whole iommu window, which is usually relatively
260 * small (compared to total memory sizes) on POWER hardware.
262 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
263 * that would effectively kill the guest at random points, much better
264 * enforcing the limit based on the max that the guest can map.
266 * Unfortunately at the moment it counts whole tables, no matter how
267 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
268 * each with 2GB DMA window, 8GB will be counted here. The reason for
269 * this is that we cannot tell here the amount of RAM used by the guest
270 * as this information is only available from KVM and VFIO is
273 * So we do not allow enabling a container without a group attached
274 * as there is no way to know how much we should increment
275 * the locked_vm counter.
277 if (!tce_groups_attached(container
))
280 tcegrp
= list_first_entry(&container
->group_list
,
281 struct tce_iommu_group
, next
);
282 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
286 if (!table_group
->tce32_size
)
289 ret
= tce_iommu_mm_set(container
);
293 locked
= table_group
->tce32_size
>> PAGE_SHIFT
;
294 ret
= account_locked_vm(container
->mm
, locked
, true);
298 container
->locked_pages
= locked
;
300 container
->enabled
= true;
305 static void tce_iommu_disable(struct tce_container
*container
)
307 if (!container
->enabled
)
310 container
->enabled
= false;
312 BUG_ON(!container
->mm
);
313 account_locked_vm(container
->mm
, container
->locked_pages
, false);
316 static void *tce_iommu_open(unsigned long arg
)
318 struct tce_container
*container
;
320 if ((arg
!= VFIO_SPAPR_TCE_IOMMU
) && (arg
!= VFIO_SPAPR_TCE_v2_IOMMU
)) {
321 pr_err("tce_vfio: Wrong IOMMU type\n");
322 return ERR_PTR(-EINVAL
);
325 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
327 return ERR_PTR(-ENOMEM
);
329 mutex_init(&container
->lock
);
330 INIT_LIST_HEAD_RCU(&container
->group_list
);
331 INIT_LIST_HEAD_RCU(&container
->prereg_list
);
333 container
->v2
= arg
== VFIO_SPAPR_TCE_v2_IOMMU
;
338 static int tce_iommu_clear(struct tce_container
*container
,
339 struct iommu_table
*tbl
,
340 unsigned long entry
, unsigned long pages
);
341 static void tce_iommu_free_table(struct tce_container
*container
,
342 struct iommu_table
*tbl
);
344 static void tce_iommu_release(void *iommu_data
)
346 struct tce_container
*container
= iommu_data
;
347 struct tce_iommu_group
*tcegrp
;
348 struct tce_iommu_prereg
*tcemem
, *tmtmp
;
351 while (tce_groups_attached(container
)) {
352 tcegrp
= list_first_entry(&container
->group_list
,
353 struct tce_iommu_group
, next
);
354 tce_iommu_detach_group(iommu_data
, tcegrp
->grp
);
358 * If VFIO created a table, it was not disposed
359 * by tce_iommu_detach_group() so do it now.
361 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
362 struct iommu_table
*tbl
= container
->tables
[i
];
367 tce_iommu_free_table(container
, tbl
);
370 list_for_each_entry_safe(tcemem
, tmtmp
, &container
->prereg_list
, next
)
371 WARN_ON(tce_iommu_prereg_free(container
, tcemem
));
373 tce_iommu_disable(container
);
375 mmdrop(container
->mm
);
376 mutex_destroy(&container
->lock
);
381 static void tce_iommu_unuse_page(unsigned long hpa
)
385 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
386 unpin_user_page(page
);
389 static int tce_iommu_prereg_ua_to_hpa(struct tce_container
*container
,
390 unsigned long tce
, unsigned long shift
,
391 unsigned long *phpa
, struct mm_iommu_table_group_mem_t
**pmem
)
394 struct mm_iommu_table_group_mem_t
*mem
;
396 mem
= mm_iommu_lookup(container
->mm
, tce
, 1ULL << shift
);
400 ret
= mm_iommu_ua_to_hpa(mem
, tce
, shift
, phpa
);
409 static void tce_iommu_unuse_page_v2(struct tce_container
*container
,
410 struct iommu_table
*tbl
, unsigned long entry
)
412 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
414 unsigned long hpa
= 0;
415 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
, entry
);
420 ret
= tce_iommu_prereg_ua_to_hpa(container
, be64_to_cpu(*pua
),
421 tbl
->it_page_shift
, &hpa
, &mem
);
423 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
424 __func__
, be64_to_cpu(*pua
), entry
, ret
);
426 mm_iommu_mapped_dec(mem
);
428 *pua
= cpu_to_be64(0);
431 static int tce_iommu_clear(struct tce_container
*container
,
432 struct iommu_table
*tbl
,
433 unsigned long entry
, unsigned long pages
)
435 unsigned long oldhpa
;
437 enum dma_data_direction direction
;
438 unsigned long lastentry
= entry
+ pages
, firstentry
= entry
;
440 for ( ; entry
< lastentry
; ++entry
) {
441 if (tbl
->it_indirect_levels
&& tbl
->it_userspace
) {
443 * For multilevel tables, we can take a shortcut here
444 * and skip some TCEs as we know that the userspace
445 * addresses cache is a mirror of the real TCE table
446 * and if it is missing some indirect levels, then
447 * the hardware table does not have them allocated
448 * either and therefore does not require updating.
450 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
,
453 /* align to level_size which is power of two */
454 entry
|= tbl
->it_level_size
- 1;
461 direction
= DMA_NONE
;
463 ret
= iommu_tce_xchg_no_kill(container
->mm
, tbl
, entry
, &oldhpa
,
468 if (direction
== DMA_NONE
)
472 tce_iommu_unuse_page_v2(container
, tbl
, entry
);
476 tce_iommu_unuse_page(oldhpa
);
479 iommu_tce_kill(tbl
, firstentry
, pages
);
484 static int tce_iommu_use_page(unsigned long tce
, unsigned long *hpa
)
486 struct page
*page
= NULL
;
487 enum dma_data_direction direction
= iommu_tce_direction(tce
);
489 if (pin_user_pages_fast(tce
& PAGE_MASK
, 1,
490 direction
!= DMA_TO_DEVICE
? FOLL_WRITE
: 0,
494 *hpa
= __pa((unsigned long) page_address(page
));
499 static long tce_iommu_build(struct tce_container
*container
,
500 struct iommu_table
*tbl
,
501 unsigned long entry
, unsigned long tce
, unsigned long pages
,
502 enum dma_data_direction direction
)
506 enum dma_data_direction dirtmp
;
508 for (i
= 0; i
< pages
; ++i
) {
509 unsigned long offset
= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
511 ret
= tce_iommu_use_page(tce
, &hpa
);
515 if (!tce_page_is_contained(container
->mm
, hpa
,
516 tbl
->it_page_shift
)) {
523 ret
= iommu_tce_xchg_no_kill(container
->mm
, tbl
, entry
+ i
,
526 tce_iommu_unuse_page(hpa
);
527 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
528 __func__
, entry
<< tbl
->it_page_shift
,
533 if (dirtmp
!= DMA_NONE
)
534 tce_iommu_unuse_page(hpa
);
536 tce
+= IOMMU_PAGE_SIZE(tbl
);
540 tce_iommu_clear(container
, tbl
, entry
, i
);
542 iommu_tce_kill(tbl
, entry
, pages
);
547 static long tce_iommu_build_v2(struct tce_container
*container
,
548 struct iommu_table
*tbl
,
549 unsigned long entry
, unsigned long tce
, unsigned long pages
,
550 enum dma_data_direction direction
)
554 enum dma_data_direction dirtmp
;
556 for (i
= 0; i
< pages
; ++i
) {
557 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
558 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
+ i
);
560 ret
= tce_iommu_prereg_ua_to_hpa(container
,
561 tce
, tbl
->it_page_shift
, &hpa
, &mem
);
565 if (!tce_page_is_contained(container
->mm
, hpa
,
566 tbl
->it_page_shift
)) {
571 /* Preserve offset within IOMMU page */
572 hpa
|= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
575 /* The registered region is being unregistered */
576 if (mm_iommu_mapped_inc(mem
))
579 ret
= iommu_tce_xchg_no_kill(container
->mm
, tbl
, entry
+ i
,
582 /* dirtmp cannot be DMA_NONE here */
583 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
584 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
585 __func__
, entry
<< tbl
->it_page_shift
,
590 if (dirtmp
!= DMA_NONE
)
591 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
593 *pua
= cpu_to_be64(tce
);
595 tce
+= IOMMU_PAGE_SIZE(tbl
);
599 tce_iommu_clear(container
, tbl
, entry
, i
);
601 iommu_tce_kill(tbl
, entry
, pages
);
606 static long tce_iommu_create_table(struct tce_container
*container
,
607 struct iommu_table_group
*table_group
,
612 struct iommu_table
**ptbl
)
614 long ret
, table_size
;
616 table_size
= table_group
->ops
->get_table_size(page_shift
, window_size
,
621 ret
= account_locked_vm(container
->mm
, table_size
>> PAGE_SHIFT
, true);
625 ret
= table_group
->ops
->create_table(table_group
, num
,
626 page_shift
, window_size
, levels
, ptbl
);
628 WARN_ON(!ret
&& !(*ptbl
)->it_ops
->free
);
629 WARN_ON(!ret
&& ((*ptbl
)->it_allocated_size
> table_size
));
634 static void tce_iommu_free_table(struct tce_container
*container
,
635 struct iommu_table
*tbl
)
637 unsigned long pages
= tbl
->it_allocated_size
>> PAGE_SHIFT
;
639 iommu_tce_table_put(tbl
);
640 account_locked_vm(container
->mm
, pages
, false);
643 static long tce_iommu_create_window(struct tce_container
*container
,
644 __u32 page_shift
, __u64 window_size
, __u32 levels
,
647 struct tce_iommu_group
*tcegrp
;
648 struct iommu_table_group
*table_group
;
649 struct iommu_table
*tbl
= NULL
;
652 num
= tce_iommu_find_free_table(container
);
656 /* Get the first group for ops::create_table */
657 tcegrp
= list_first_entry(&container
->group_list
,
658 struct tce_iommu_group
, next
);
659 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
663 if (!(table_group
->pgsizes
& (1ULL << page_shift
)))
666 if (!table_group
->ops
->set_window
|| !table_group
->ops
->unset_window
||
667 !table_group
->ops
->get_table_size
||
668 !table_group
->ops
->create_table
)
671 /* Create TCE table */
672 ret
= tce_iommu_create_table(container
, table_group
, num
,
673 page_shift
, window_size
, levels
, &tbl
);
677 BUG_ON(!tbl
->it_ops
->free
);
680 * Program the table to every group.
681 * Groups have been tested for compatibility at the attach time.
683 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
684 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
686 ret
= table_group
->ops
->set_window(table_group
, num
, tbl
);
691 container
->tables
[num
] = tbl
;
693 /* Return start address assigned by platform in create_table() */
694 *start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
699 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
700 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
701 table_group
->ops
->unset_window(table_group
, num
);
703 tce_iommu_free_table(container
, tbl
);
708 static long tce_iommu_remove_window(struct tce_container
*container
,
711 struct iommu_table_group
*table_group
= NULL
;
712 struct iommu_table
*tbl
;
713 struct tce_iommu_group
*tcegrp
;
716 num
= tce_iommu_find_table(container
, start_addr
, &tbl
);
720 BUG_ON(!tbl
->it_size
);
722 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
724 /* Detach groups from IOMMUs */
725 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
726 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
729 * SPAPR TCE IOMMU exposes the default DMA window to
730 * the guest via dma32_window_start/size of
731 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
732 * the userspace to remove this window, some do not so
733 * here we check for the platform capability.
735 if (!table_group
->ops
|| !table_group
->ops
->unset_window
)
738 table_group
->ops
->unset_window(table_group
, num
);
742 tce_iommu_free_table(container
, tbl
);
743 container
->tables
[num
] = NULL
;
748 static long tce_iommu_create_default_window(struct tce_container
*container
)
751 __u64 start_addr
= 0;
752 struct tce_iommu_group
*tcegrp
;
753 struct iommu_table_group
*table_group
;
755 if (!container
->def_window_pending
)
758 if (!tce_groups_attached(container
))
761 tcegrp
= list_first_entry(&container
->group_list
,
762 struct tce_iommu_group
, next
);
763 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
767 ret
= tce_iommu_create_window(container
, IOMMU_PAGE_SHIFT_4K
,
768 table_group
->tce32_size
, 1, &start_addr
);
769 WARN_ON_ONCE(!ret
&& start_addr
);
772 container
->def_window_pending
= false;
777 static long vfio_spapr_ioctl_eeh_pe_op(struct iommu_group
*group
,
781 struct vfio_eeh_pe_op op
;
784 pe
= eeh_iommu_group_to_pe(group
);
788 minsz
= offsetofend(struct vfio_eeh_pe_op
, op
);
789 if (copy_from_user(&op
, (void __user
*)arg
, minsz
))
791 if (op
.argsz
< minsz
|| op
.flags
)
795 case VFIO_EEH_PE_DISABLE
:
796 return eeh_pe_set_option(pe
, EEH_OPT_DISABLE
);
797 case VFIO_EEH_PE_ENABLE
:
798 return eeh_pe_set_option(pe
, EEH_OPT_ENABLE
);
799 case VFIO_EEH_PE_UNFREEZE_IO
:
800 return eeh_pe_set_option(pe
, EEH_OPT_THAW_MMIO
);
801 case VFIO_EEH_PE_UNFREEZE_DMA
:
802 return eeh_pe_set_option(pe
, EEH_OPT_THAW_DMA
);
803 case VFIO_EEH_PE_GET_STATE
:
804 return eeh_pe_get_state(pe
);
806 case VFIO_EEH_PE_RESET_DEACTIVATE
:
807 return eeh_pe_reset(pe
, EEH_RESET_DEACTIVATE
, true);
808 case VFIO_EEH_PE_RESET_HOT
:
809 return eeh_pe_reset(pe
, EEH_RESET_HOT
, true);
810 case VFIO_EEH_PE_RESET_FUNDAMENTAL
:
811 return eeh_pe_reset(pe
, EEH_RESET_FUNDAMENTAL
, true);
812 case VFIO_EEH_PE_CONFIGURE
:
813 return eeh_pe_configure(pe
);
814 case VFIO_EEH_PE_INJECT_ERR
:
815 minsz
= offsetofend(struct vfio_eeh_pe_op
, err
.mask
);
816 if (op
.argsz
< minsz
)
818 if (copy_from_user(&op
, (void __user
*)arg
, minsz
))
821 return eeh_pe_inject_err(pe
, op
.err
.type
, op
.err
.func
,
822 op
.err
.addr
, op
.err
.mask
);
828 static long tce_iommu_ioctl(void *iommu_data
,
829 unsigned int cmd
, unsigned long arg
)
831 struct tce_container
*container
= iommu_data
;
832 unsigned long minsz
, ddwsz
;
836 case VFIO_CHECK_EXTENSION
:
838 case VFIO_SPAPR_TCE_IOMMU
:
839 case VFIO_SPAPR_TCE_v2_IOMMU
:
842 return eeh_enabled();
849 * Sanity check to prevent one userspace from manipulating
850 * another userspace mm.
853 if (container
->mm
&& container
->mm
!= current
->mm
)
857 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
: {
858 struct vfio_iommu_spapr_tce_info info
;
859 struct tce_iommu_group
*tcegrp
;
860 struct iommu_table_group
*table_group
;
862 if (!tce_groups_attached(container
))
865 tcegrp
= list_first_entry(&container
->group_list
,
866 struct tce_iommu_group
, next
);
867 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
872 minsz
= offsetofend(struct vfio_iommu_spapr_tce_info
,
875 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
878 if (info
.argsz
< minsz
)
881 info
.dma32_window_start
= table_group
->tce32_start
;
882 info
.dma32_window_size
= table_group
->tce32_size
;
884 memset(&info
.ddw
, 0, sizeof(info
.ddw
));
886 if (table_group
->max_dynamic_windows_supported
&&
888 info
.flags
|= VFIO_IOMMU_SPAPR_INFO_DDW
;
889 info
.ddw
.pgsizes
= table_group
->pgsizes
;
890 info
.ddw
.max_dynamic_windows_supported
=
891 table_group
->max_dynamic_windows_supported
;
892 info
.ddw
.levels
= table_group
->max_levels
;
895 ddwsz
= offsetofend(struct vfio_iommu_spapr_tce_info
, ddw
);
897 if (info
.argsz
>= ddwsz
)
900 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
905 case VFIO_IOMMU_MAP_DMA
: {
906 struct vfio_iommu_type1_dma_map param
;
907 struct iommu_table
*tbl
= NULL
;
909 enum dma_data_direction direction
;
911 if (!container
->enabled
)
914 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
916 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
919 if (param
.argsz
< minsz
)
922 if (param
.flags
& ~(VFIO_DMA_MAP_FLAG_READ
|
923 VFIO_DMA_MAP_FLAG_WRITE
))
926 ret
= tce_iommu_create_default_window(container
);
930 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
934 if ((param
.size
& ~IOMMU_PAGE_MASK(tbl
)) ||
935 (param
.vaddr
& ~IOMMU_PAGE_MASK(tbl
)))
938 /* iova is checked by the IOMMU API */
939 if (param
.flags
& VFIO_DMA_MAP_FLAG_READ
) {
940 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
941 direction
= DMA_BIDIRECTIONAL
;
943 direction
= DMA_TO_DEVICE
;
945 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
946 direction
= DMA_FROM_DEVICE
;
951 ret
= iommu_tce_put_param_check(tbl
, param
.iova
, param
.vaddr
);
956 ret
= tce_iommu_build_v2(container
, tbl
,
957 param
.iova
>> tbl
->it_page_shift
,
959 param
.size
>> tbl
->it_page_shift
,
962 ret
= tce_iommu_build(container
, tbl
,
963 param
.iova
>> tbl
->it_page_shift
,
965 param
.size
>> tbl
->it_page_shift
,
968 iommu_flush_tce(tbl
);
972 case VFIO_IOMMU_UNMAP_DMA
: {
973 struct vfio_iommu_type1_dma_unmap param
;
974 struct iommu_table
*tbl
= NULL
;
977 if (!container
->enabled
)
980 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
,
983 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
986 if (param
.argsz
< minsz
)
989 /* No flag is supported now */
993 ret
= tce_iommu_create_default_window(container
);
997 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
1001 if (param
.size
& ~IOMMU_PAGE_MASK(tbl
))
1004 ret
= iommu_tce_clear_param_check(tbl
, param
.iova
, 0,
1005 param
.size
>> tbl
->it_page_shift
);
1009 ret
= tce_iommu_clear(container
, tbl
,
1010 param
.iova
>> tbl
->it_page_shift
,
1011 param
.size
>> tbl
->it_page_shift
);
1012 iommu_flush_tce(tbl
);
1016 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY
: {
1017 struct vfio_iommu_spapr_register_memory param
;
1022 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1025 ret
= tce_iommu_mm_set(container
);
1029 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1032 if (param
.argsz
< minsz
)
1035 /* No flag is supported now */
1039 mutex_lock(&container
->lock
);
1040 ret
= tce_iommu_register_pages(container
, param
.vaddr
,
1042 mutex_unlock(&container
->lock
);
1046 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
: {
1047 struct vfio_iommu_spapr_register_memory param
;
1055 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1058 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1061 if (param
.argsz
< minsz
)
1064 /* No flag is supported now */
1068 mutex_lock(&container
->lock
);
1069 ret
= tce_iommu_unregister_pages(container
, param
.vaddr
,
1071 mutex_unlock(&container
->lock
);
1075 case VFIO_IOMMU_ENABLE
:
1079 mutex_lock(&container
->lock
);
1080 ret
= tce_iommu_enable(container
);
1081 mutex_unlock(&container
->lock
);
1085 case VFIO_IOMMU_DISABLE
:
1089 mutex_lock(&container
->lock
);
1090 tce_iommu_disable(container
);
1091 mutex_unlock(&container
->lock
);
1094 case VFIO_EEH_PE_OP
: {
1095 struct tce_iommu_group
*tcegrp
;
1098 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1099 ret
= vfio_spapr_ioctl_eeh_pe_op(tcegrp
->grp
, arg
);
1106 case VFIO_IOMMU_SPAPR_TCE_CREATE
: {
1107 struct vfio_iommu_spapr_tce_create create
;
1112 ret
= tce_iommu_mm_set(container
);
1116 if (!tce_groups_attached(container
))
1119 minsz
= offsetofend(struct vfio_iommu_spapr_tce_create
,
1122 if (copy_from_user(&create
, (void __user
*)arg
, minsz
))
1125 if (create
.argsz
< minsz
)
1131 mutex_lock(&container
->lock
);
1133 ret
= tce_iommu_create_default_window(container
);
1135 ret
= tce_iommu_create_window(container
,
1137 create
.window_size
, create
.levels
,
1138 &create
.start_addr
);
1140 mutex_unlock(&container
->lock
);
1142 if (!ret
&& copy_to_user((void __user
*)arg
, &create
, minsz
))
1147 case VFIO_IOMMU_SPAPR_TCE_REMOVE
: {
1148 struct vfio_iommu_spapr_tce_remove remove
;
1153 ret
= tce_iommu_mm_set(container
);
1157 if (!tce_groups_attached(container
))
1160 minsz
= offsetofend(struct vfio_iommu_spapr_tce_remove
,
1163 if (copy_from_user(&remove
, (void __user
*)arg
, minsz
))
1166 if (remove
.argsz
< minsz
)
1172 if (container
->def_window_pending
&& !remove
.start_addr
) {
1173 container
->def_window_pending
= false;
1177 mutex_lock(&container
->lock
);
1179 ret
= tce_iommu_remove_window(container
, remove
.start_addr
);
1181 mutex_unlock(&container
->lock
);
1190 static void tce_iommu_release_ownership(struct tce_container
*container
,
1191 struct iommu_table_group
*table_group
)
1195 if (!table_group
->ops
->unset_window
) {
1200 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1201 if (container
->tables
[i
]) {
1202 tce_iommu_clear(container
, container
->tables
[i
],
1203 container
->tables
[i
]->it_offset
,
1204 container
->tables
[i
]->it_size
);
1205 table_group
->ops
->unset_window(table_group
, i
);
1210 static long tce_iommu_take_ownership(struct tce_container
*container
,
1211 struct iommu_table_group
*table_group
)
1215 /* Set all windows to the new group */
1216 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1217 struct iommu_table
*tbl
= container
->tables
[i
];
1222 ret
= table_group
->ops
->set_window(table_group
, i
, tbl
);
1230 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1231 table_group
->ops
->unset_window(table_group
, i
);
1236 static int tce_iommu_attach_group(void *iommu_data
,
1237 struct iommu_group
*iommu_group
, enum vfio_group_type type
)
1240 struct tce_container
*container
= iommu_data
;
1241 struct iommu_table_group
*table_group
;
1242 struct tce_iommu_group
*tcegrp
= NULL
;
1244 if (type
== VFIO_EMULATED_IOMMU
)
1247 mutex_lock(&container
->lock
);
1249 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1250 iommu_group_id(iommu_group), iommu_group); */
1251 table_group
= iommu_group_get_iommudata(iommu_group
);
1257 /* v2 requires full support of dynamic DMA windows */
1258 if (container
->v2
&& table_group
->max_dynamic_windows_supported
== 0) {
1263 /* v1 reuses TCE tables and does not share them among PEs */
1264 if (!container
->v2
&& tce_groups_attached(container
)) {
1270 * Check if new group has the same iommu_table_group_ops
1273 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1274 struct iommu_table_group
*table_group_tmp
;
1276 if (tcegrp
->grp
== iommu_group
) {
1277 pr_warn("tce_vfio: Group %d is already attached\n",
1278 iommu_group_id(iommu_group
));
1282 table_group_tmp
= iommu_group_get_iommudata(tcegrp
->grp
);
1283 if (table_group_tmp
->ops
->create_table
!=
1284 table_group
->ops
->create_table
) {
1285 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1286 iommu_group_id(iommu_group
),
1287 iommu_group_id(tcegrp
->grp
));
1293 tcegrp
= kzalloc(sizeof(*tcegrp
), GFP_KERNEL
);
1299 ret
= tce_iommu_take_ownership(container
, table_group
);
1300 if (!tce_groups_attached(container
) && !container
->tables
[0])
1301 container
->def_window_pending
= true;
1304 tcegrp
->grp
= iommu_group
;
1305 list_add(&tcegrp
->next
, &container
->group_list
);
1312 mutex_unlock(&container
->lock
);
1317 static void tce_iommu_detach_group(void *iommu_data
,
1318 struct iommu_group
*iommu_group
)
1320 struct tce_container
*container
= iommu_data
;
1321 struct iommu_table_group
*table_group
;
1323 struct tce_iommu_group
*tcegrp
;
1325 mutex_lock(&container
->lock
);
1327 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1328 if (tcegrp
->grp
== iommu_group
) {
1335 pr_warn("tce_vfio: detaching unattached group #%u\n",
1336 iommu_group_id(iommu_group
));
1340 list_del(&tcegrp
->next
);
1343 table_group
= iommu_group_get_iommudata(iommu_group
);
1344 BUG_ON(!table_group
);
1346 tce_iommu_release_ownership(container
, table_group
);
1349 mutex_unlock(&container
->lock
);
1352 static const struct vfio_iommu_driver_ops tce_iommu_driver_ops
= {
1353 .name
= "iommu-vfio-powerpc",
1354 .owner
= THIS_MODULE
,
1355 .open
= tce_iommu_open
,
1356 .release
= tce_iommu_release
,
1357 .ioctl
= tce_iommu_ioctl
,
1358 .attach_group
= tce_iommu_attach_group
,
1359 .detach_group
= tce_iommu_detach_group
,
1362 static int __init
tce_iommu_init(void)
1364 return vfio_register_iommu_driver(&tce_iommu_driver_ops
);
1367 static void __exit
tce_iommu_cleanup(void)
1369 vfio_unregister_iommu_driver(&tce_iommu_driver_ops
);
1372 module_init(tce_iommu_init
);
1373 module_exit(tce_iommu_cleanup
);
1375 MODULE_VERSION(DRIVER_VERSION
);
1376 MODULE_LICENSE("GPL v2");
1377 MODULE_AUTHOR(DRIVER_AUTHOR
);
1378 MODULE_DESCRIPTION(DRIVER_DESC
);