1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO: IOMMU DMA mapping support for TCE on POWER
5 * Copyright (C) 2013 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
8 * Derived from original vfio_iommu_type1.c:
9 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
10 * Author: Alex Williamson <alex.williamson@redhat.com>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/err.h>
18 #include <linux/vfio.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/signal.h>
24 #include <asm/iommu.h>
26 #include <asm/mmu_context.h>
28 #define DRIVER_VERSION "0.1"
29 #define DRIVER_AUTHOR "aik@ozlabs.ru"
30 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
32 static void tce_iommu_detach_group(void *iommu_data
,
33 struct iommu_group
*iommu_group
);
36 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
38 * This code handles mapping and unmapping of user data buffers
39 * into DMA'ble space using the IOMMU
42 struct tce_iommu_group
{
43 struct list_head next
;
44 struct iommu_group
*grp
;
48 * A container needs to remember which preregistered region it has
49 * referenced to do proper cleanup at the userspace process exit.
51 struct tce_iommu_prereg
{
52 struct list_head next
;
53 struct mm_iommu_table_group_mem_t
*mem
;
57 * The container descriptor supports only a single group per container.
58 * Required by the API as the container is not supplied with the IOMMU group
59 * at the moment of initialization.
61 struct tce_container
{
65 bool def_window_pending
;
66 unsigned long locked_pages
;
68 struct iommu_table
*tables
[IOMMU_TABLE_GROUP_MAX_TABLES
];
69 struct list_head group_list
;
70 struct list_head prereg_list
;
73 static long tce_iommu_mm_set(struct tce_container
*container
)
76 if (container
->mm
== current
->mm
)
81 container
->mm
= current
->mm
;
82 mmgrab(container
->mm
);
87 static long tce_iommu_prereg_free(struct tce_container
*container
,
88 struct tce_iommu_prereg
*tcemem
)
92 ret
= mm_iommu_put(container
->mm
, tcemem
->mem
);
96 list_del(&tcemem
->next
);
102 static long tce_iommu_unregister_pages(struct tce_container
*container
,
103 __u64 vaddr
, __u64 size
)
105 struct mm_iommu_table_group_mem_t
*mem
;
106 struct tce_iommu_prereg
*tcemem
;
110 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
113 mem
= mm_iommu_get(container
->mm
, vaddr
, size
>> PAGE_SHIFT
);
117 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
118 if (tcemem
->mem
== mem
) {
127 ret
= tce_iommu_prereg_free(container
, tcemem
);
129 mm_iommu_put(container
->mm
, mem
);
134 static long tce_iommu_register_pages(struct tce_container
*container
,
135 __u64 vaddr
, __u64 size
)
138 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
139 struct tce_iommu_prereg
*tcemem
;
140 unsigned long entries
= size
>> PAGE_SHIFT
;
142 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
) ||
143 ((vaddr
+ size
) < vaddr
))
146 mem
= mm_iommu_get(container
->mm
, vaddr
, entries
);
148 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
149 if (tcemem
->mem
== mem
) {
155 ret
= mm_iommu_new(container
->mm
, vaddr
, entries
, &mem
);
160 tcemem
= kzalloc(sizeof(*tcemem
), GFP_KERNEL
);
167 list_add(&tcemem
->next
, &container
->prereg_list
);
169 container
->enabled
= true;
174 mm_iommu_put(container
->mm
, mem
);
178 static bool tce_page_is_contained(struct mm_struct
*mm
, unsigned long hpa
,
179 unsigned int it_page_shift
)
182 unsigned long size
= 0;
184 if (mm_iommu_is_devmem(mm
, hpa
, it_page_shift
, &size
))
185 return size
== (1UL << it_page_shift
);
187 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
189 * Check that the TCE table granularity is not bigger than the size of
190 * a page we just found. Otherwise the hardware can get access to
191 * a bigger memory chunk that it should.
193 return page_shift(compound_head(page
)) >= it_page_shift
;
196 static inline bool tce_groups_attached(struct tce_container
*container
)
198 return !list_empty(&container
->group_list
);
201 static long tce_iommu_find_table(struct tce_container
*container
,
202 phys_addr_t ioba
, struct iommu_table
**ptbl
)
206 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
207 struct iommu_table
*tbl
= container
->tables
[i
];
210 unsigned long entry
= ioba
>> tbl
->it_page_shift
;
211 unsigned long start
= tbl
->it_offset
;
212 unsigned long end
= start
+ tbl
->it_size
;
214 if ((start
<= entry
) && (entry
< end
)) {
224 static int tce_iommu_find_free_table(struct tce_container
*container
)
228 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
229 if (!container
->tables
[i
])
236 static int tce_iommu_enable(struct tce_container
*container
)
239 unsigned long locked
;
240 struct iommu_table_group
*table_group
;
241 struct tce_iommu_group
*tcegrp
;
243 if (container
->enabled
)
247 * When userspace pages are mapped into the IOMMU, they are effectively
248 * locked memory, so, theoretically, we need to update the accounting
249 * of locked pages on each map and unmap. For powerpc, the map unmap
250 * paths can be very hot, though, and the accounting would kill
251 * performance, especially since it would be difficult to impossible
252 * to handle the accounting in real mode only.
254 * To address that, rather than precisely accounting every page, we
255 * instead account for a worst case on locked memory when the iommu is
256 * enabled and disabled. The worst case upper bound on locked memory
257 * is the size of the whole iommu window, which is usually relatively
258 * small (compared to total memory sizes) on POWER hardware.
260 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
261 * that would effectively kill the guest at random points, much better
262 * enforcing the limit based on the max that the guest can map.
264 * Unfortunately at the moment it counts whole tables, no matter how
265 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
266 * each with 2GB DMA window, 8GB will be counted here. The reason for
267 * this is that we cannot tell here the amount of RAM used by the guest
268 * as this information is only available from KVM and VFIO is
271 * So we do not allow enabling a container without a group attached
272 * as there is no way to know how much we should increment
273 * the locked_vm counter.
275 if (!tce_groups_attached(container
))
278 tcegrp
= list_first_entry(&container
->group_list
,
279 struct tce_iommu_group
, next
);
280 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
284 if (!table_group
->tce32_size
)
287 ret
= tce_iommu_mm_set(container
);
291 locked
= table_group
->tce32_size
>> PAGE_SHIFT
;
292 ret
= account_locked_vm(container
->mm
, locked
, true);
296 container
->locked_pages
= locked
;
298 container
->enabled
= true;
303 static void tce_iommu_disable(struct tce_container
*container
)
305 if (!container
->enabled
)
308 container
->enabled
= false;
310 BUG_ON(!container
->mm
);
311 account_locked_vm(container
->mm
, container
->locked_pages
, false);
314 static void *tce_iommu_open(unsigned long arg
)
316 struct tce_container
*container
;
318 if ((arg
!= VFIO_SPAPR_TCE_IOMMU
) && (arg
!= VFIO_SPAPR_TCE_v2_IOMMU
)) {
319 pr_err("tce_vfio: Wrong IOMMU type\n");
320 return ERR_PTR(-EINVAL
);
323 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
325 return ERR_PTR(-ENOMEM
);
327 mutex_init(&container
->lock
);
328 INIT_LIST_HEAD_RCU(&container
->group_list
);
329 INIT_LIST_HEAD_RCU(&container
->prereg_list
);
331 container
->v2
= arg
== VFIO_SPAPR_TCE_v2_IOMMU
;
336 static int tce_iommu_clear(struct tce_container
*container
,
337 struct iommu_table
*tbl
,
338 unsigned long entry
, unsigned long pages
);
339 static void tce_iommu_free_table(struct tce_container
*container
,
340 struct iommu_table
*tbl
);
342 static void tce_iommu_release(void *iommu_data
)
344 struct tce_container
*container
= iommu_data
;
345 struct tce_iommu_group
*tcegrp
;
346 struct tce_iommu_prereg
*tcemem
, *tmtmp
;
349 while (tce_groups_attached(container
)) {
350 tcegrp
= list_first_entry(&container
->group_list
,
351 struct tce_iommu_group
, next
);
352 tce_iommu_detach_group(iommu_data
, tcegrp
->grp
);
356 * If VFIO created a table, it was not disposed
357 * by tce_iommu_detach_group() so do it now.
359 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
360 struct iommu_table
*tbl
= container
->tables
[i
];
365 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
366 tce_iommu_free_table(container
, tbl
);
369 list_for_each_entry_safe(tcemem
, tmtmp
, &container
->prereg_list
, next
)
370 WARN_ON(tce_iommu_prereg_free(container
, tcemem
));
372 tce_iommu_disable(container
);
374 mmdrop(container
->mm
);
375 mutex_destroy(&container
->lock
);
380 static void tce_iommu_unuse_page(struct tce_container
*container
,
385 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
386 unpin_user_page(page
);
389 static int tce_iommu_prereg_ua_to_hpa(struct tce_container
*container
,
390 unsigned long tce
, unsigned long shift
,
391 unsigned long *phpa
, struct mm_iommu_table_group_mem_t
**pmem
)
394 struct mm_iommu_table_group_mem_t
*mem
;
396 mem
= mm_iommu_lookup(container
->mm
, tce
, 1ULL << shift
);
400 ret
= mm_iommu_ua_to_hpa(mem
, tce
, shift
, phpa
);
409 static void tce_iommu_unuse_page_v2(struct tce_container
*container
,
410 struct iommu_table
*tbl
, unsigned long entry
)
412 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
414 unsigned long hpa
= 0;
415 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
, entry
);
420 ret
= tce_iommu_prereg_ua_to_hpa(container
, be64_to_cpu(*pua
),
421 tbl
->it_page_shift
, &hpa
, &mem
);
423 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
424 __func__
, be64_to_cpu(*pua
), entry
, ret
);
426 mm_iommu_mapped_dec(mem
);
428 *pua
= cpu_to_be64(0);
431 static int tce_iommu_clear(struct tce_container
*container
,
432 struct iommu_table
*tbl
,
433 unsigned long entry
, unsigned long pages
)
435 unsigned long oldhpa
;
437 enum dma_data_direction direction
;
438 unsigned long lastentry
= entry
+ pages
, firstentry
= entry
;
440 for ( ; entry
< lastentry
; ++entry
) {
441 if (tbl
->it_indirect_levels
&& tbl
->it_userspace
) {
443 * For multilevel tables, we can take a shortcut here
444 * and skip some TCEs as we know that the userspace
445 * addresses cache is a mirror of the real TCE table
446 * and if it is missing some indirect levels, then
447 * the hardware table does not have them allocated
448 * either and therefore does not require updating.
450 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
,
453 /* align to level_size which is power of two */
454 entry
|= tbl
->it_level_size
- 1;
461 direction
= DMA_NONE
;
463 ret
= iommu_tce_xchg_no_kill(container
->mm
, tbl
, entry
, &oldhpa
,
468 if (direction
== DMA_NONE
)
472 tce_iommu_unuse_page_v2(container
, tbl
, entry
);
476 tce_iommu_unuse_page(container
, oldhpa
);
479 iommu_tce_kill(tbl
, firstentry
, pages
);
484 static int tce_iommu_use_page(unsigned long tce
, unsigned long *hpa
)
486 struct page
*page
= NULL
;
487 enum dma_data_direction direction
= iommu_tce_direction(tce
);
489 if (pin_user_pages_fast(tce
& PAGE_MASK
, 1,
490 direction
!= DMA_TO_DEVICE
? FOLL_WRITE
: 0,
494 *hpa
= __pa((unsigned long) page_address(page
));
499 static long tce_iommu_build(struct tce_container
*container
,
500 struct iommu_table
*tbl
,
501 unsigned long entry
, unsigned long tce
, unsigned long pages
,
502 enum dma_data_direction direction
)
506 enum dma_data_direction dirtmp
;
508 for (i
= 0; i
< pages
; ++i
) {
509 unsigned long offset
= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
511 ret
= tce_iommu_use_page(tce
, &hpa
);
515 if (!tce_page_is_contained(container
->mm
, hpa
,
516 tbl
->it_page_shift
)) {
523 ret
= iommu_tce_xchg_no_kill(container
->mm
, tbl
, entry
+ i
,
526 tce_iommu_unuse_page(container
, hpa
);
527 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
528 __func__
, entry
<< tbl
->it_page_shift
,
533 if (dirtmp
!= DMA_NONE
)
534 tce_iommu_unuse_page(container
, hpa
);
536 tce
+= IOMMU_PAGE_SIZE(tbl
);
540 tce_iommu_clear(container
, tbl
, entry
, i
);
542 iommu_tce_kill(tbl
, entry
, pages
);
547 static long tce_iommu_build_v2(struct tce_container
*container
,
548 struct iommu_table
*tbl
,
549 unsigned long entry
, unsigned long tce
, unsigned long pages
,
550 enum dma_data_direction direction
)
554 enum dma_data_direction dirtmp
;
556 for (i
= 0; i
< pages
; ++i
) {
557 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
558 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
+ i
);
560 ret
= tce_iommu_prereg_ua_to_hpa(container
,
561 tce
, tbl
->it_page_shift
, &hpa
, &mem
);
565 if (!tce_page_is_contained(container
->mm
, hpa
,
566 tbl
->it_page_shift
)) {
571 /* Preserve offset within IOMMU page */
572 hpa
|= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
575 /* The registered region is being unregistered */
576 if (mm_iommu_mapped_inc(mem
))
579 ret
= iommu_tce_xchg_no_kill(container
->mm
, tbl
, entry
+ i
,
582 /* dirtmp cannot be DMA_NONE here */
583 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
584 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
585 __func__
, entry
<< tbl
->it_page_shift
,
590 if (dirtmp
!= DMA_NONE
)
591 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
593 *pua
= cpu_to_be64(tce
);
595 tce
+= IOMMU_PAGE_SIZE(tbl
);
599 tce_iommu_clear(container
, tbl
, entry
, i
);
601 iommu_tce_kill(tbl
, entry
, pages
);
606 static long tce_iommu_create_table(struct tce_container
*container
,
607 struct iommu_table_group
*table_group
,
612 struct iommu_table
**ptbl
)
614 long ret
, table_size
;
616 table_size
= table_group
->ops
->get_table_size(page_shift
, window_size
,
621 ret
= account_locked_vm(container
->mm
, table_size
>> PAGE_SHIFT
, true);
625 ret
= table_group
->ops
->create_table(table_group
, num
,
626 page_shift
, window_size
, levels
, ptbl
);
628 WARN_ON(!ret
&& !(*ptbl
)->it_ops
->free
);
629 WARN_ON(!ret
&& ((*ptbl
)->it_allocated_size
> table_size
));
634 static void tce_iommu_free_table(struct tce_container
*container
,
635 struct iommu_table
*tbl
)
637 unsigned long pages
= tbl
->it_allocated_size
>> PAGE_SHIFT
;
639 iommu_tce_table_put(tbl
);
640 account_locked_vm(container
->mm
, pages
, false);
643 static long tce_iommu_create_window(struct tce_container
*container
,
644 __u32 page_shift
, __u64 window_size
, __u32 levels
,
647 struct tce_iommu_group
*tcegrp
;
648 struct iommu_table_group
*table_group
;
649 struct iommu_table
*tbl
= NULL
;
652 num
= tce_iommu_find_free_table(container
);
656 /* Get the first group for ops::create_table */
657 tcegrp
= list_first_entry(&container
->group_list
,
658 struct tce_iommu_group
, next
);
659 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
663 if (!(table_group
->pgsizes
& (1ULL << page_shift
)))
666 if (!table_group
->ops
->set_window
|| !table_group
->ops
->unset_window
||
667 !table_group
->ops
->get_table_size
||
668 !table_group
->ops
->create_table
)
671 /* Create TCE table */
672 ret
= tce_iommu_create_table(container
, table_group
, num
,
673 page_shift
, window_size
, levels
, &tbl
);
677 BUG_ON(!tbl
->it_ops
->free
);
680 * Program the table to every group.
681 * Groups have been tested for compatibility at the attach time.
683 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
684 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
686 ret
= table_group
->ops
->set_window(table_group
, num
, tbl
);
691 container
->tables
[num
] = tbl
;
693 /* Return start address assigned by platform in create_table() */
694 *start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
699 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
700 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
701 table_group
->ops
->unset_window(table_group
, num
);
703 tce_iommu_free_table(container
, tbl
);
708 static long tce_iommu_remove_window(struct tce_container
*container
,
711 struct iommu_table_group
*table_group
= NULL
;
712 struct iommu_table
*tbl
;
713 struct tce_iommu_group
*tcegrp
;
716 num
= tce_iommu_find_table(container
, start_addr
, &tbl
);
720 BUG_ON(!tbl
->it_size
);
722 /* Detach groups from IOMMUs */
723 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
724 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
727 * SPAPR TCE IOMMU exposes the default DMA window to
728 * the guest via dma32_window_start/size of
729 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
730 * the userspace to remove this window, some do not so
731 * here we check for the platform capability.
733 if (!table_group
->ops
|| !table_group
->ops
->unset_window
)
736 table_group
->ops
->unset_window(table_group
, num
);
740 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
741 tce_iommu_free_table(container
, tbl
);
742 container
->tables
[num
] = NULL
;
747 static long tce_iommu_create_default_window(struct tce_container
*container
)
750 __u64 start_addr
= 0;
751 struct tce_iommu_group
*tcegrp
;
752 struct iommu_table_group
*table_group
;
754 if (!container
->def_window_pending
)
757 if (!tce_groups_attached(container
))
760 tcegrp
= list_first_entry(&container
->group_list
,
761 struct tce_iommu_group
, next
);
762 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
766 ret
= tce_iommu_create_window(container
, IOMMU_PAGE_SHIFT_4K
,
767 table_group
->tce32_size
, 1, &start_addr
);
768 WARN_ON_ONCE(!ret
&& start_addr
);
771 container
->def_window_pending
= false;
776 static long tce_iommu_ioctl(void *iommu_data
,
777 unsigned int cmd
, unsigned long arg
)
779 struct tce_container
*container
= iommu_data
;
780 unsigned long minsz
, ddwsz
;
784 case VFIO_CHECK_EXTENSION
:
786 case VFIO_SPAPR_TCE_IOMMU
:
787 case VFIO_SPAPR_TCE_v2_IOMMU
:
791 ret
= vfio_spapr_iommu_eeh_ioctl(NULL
, cmd
, arg
);
795 return (ret
< 0) ? 0 : ret
;
799 * Sanity check to prevent one userspace from manipulating
800 * another userspace mm.
803 if (container
->mm
&& container
->mm
!= current
->mm
)
807 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
: {
808 struct vfio_iommu_spapr_tce_info info
;
809 struct tce_iommu_group
*tcegrp
;
810 struct iommu_table_group
*table_group
;
812 if (!tce_groups_attached(container
))
815 tcegrp
= list_first_entry(&container
->group_list
,
816 struct tce_iommu_group
, next
);
817 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
822 minsz
= offsetofend(struct vfio_iommu_spapr_tce_info
,
825 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
828 if (info
.argsz
< minsz
)
831 info
.dma32_window_start
= table_group
->tce32_start
;
832 info
.dma32_window_size
= table_group
->tce32_size
;
834 memset(&info
.ddw
, 0, sizeof(info
.ddw
));
836 if (table_group
->max_dynamic_windows_supported
&&
838 info
.flags
|= VFIO_IOMMU_SPAPR_INFO_DDW
;
839 info
.ddw
.pgsizes
= table_group
->pgsizes
;
840 info
.ddw
.max_dynamic_windows_supported
=
841 table_group
->max_dynamic_windows_supported
;
842 info
.ddw
.levels
= table_group
->max_levels
;
845 ddwsz
= offsetofend(struct vfio_iommu_spapr_tce_info
, ddw
);
847 if (info
.argsz
>= ddwsz
)
850 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
855 case VFIO_IOMMU_MAP_DMA
: {
856 struct vfio_iommu_type1_dma_map param
;
857 struct iommu_table
*tbl
= NULL
;
859 enum dma_data_direction direction
;
861 if (!container
->enabled
)
864 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
866 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
869 if (param
.argsz
< minsz
)
872 if (param
.flags
& ~(VFIO_DMA_MAP_FLAG_READ
|
873 VFIO_DMA_MAP_FLAG_WRITE
))
876 ret
= tce_iommu_create_default_window(container
);
880 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
884 if ((param
.size
& ~IOMMU_PAGE_MASK(tbl
)) ||
885 (param
.vaddr
& ~IOMMU_PAGE_MASK(tbl
)))
888 /* iova is checked by the IOMMU API */
889 if (param
.flags
& VFIO_DMA_MAP_FLAG_READ
) {
890 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
891 direction
= DMA_BIDIRECTIONAL
;
893 direction
= DMA_TO_DEVICE
;
895 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
896 direction
= DMA_FROM_DEVICE
;
901 ret
= iommu_tce_put_param_check(tbl
, param
.iova
, param
.vaddr
);
906 ret
= tce_iommu_build_v2(container
, tbl
,
907 param
.iova
>> tbl
->it_page_shift
,
909 param
.size
>> tbl
->it_page_shift
,
912 ret
= tce_iommu_build(container
, tbl
,
913 param
.iova
>> tbl
->it_page_shift
,
915 param
.size
>> tbl
->it_page_shift
,
918 iommu_flush_tce(tbl
);
922 case VFIO_IOMMU_UNMAP_DMA
: {
923 struct vfio_iommu_type1_dma_unmap param
;
924 struct iommu_table
*tbl
= NULL
;
927 if (!container
->enabled
)
930 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
,
933 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
936 if (param
.argsz
< minsz
)
939 /* No flag is supported now */
943 ret
= tce_iommu_create_default_window(container
);
947 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
951 if (param
.size
& ~IOMMU_PAGE_MASK(tbl
))
954 ret
= iommu_tce_clear_param_check(tbl
, param
.iova
, 0,
955 param
.size
>> tbl
->it_page_shift
);
959 ret
= tce_iommu_clear(container
, tbl
,
960 param
.iova
>> tbl
->it_page_shift
,
961 param
.size
>> tbl
->it_page_shift
);
962 iommu_flush_tce(tbl
);
966 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY
: {
967 struct vfio_iommu_spapr_register_memory param
;
972 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
975 ret
= tce_iommu_mm_set(container
);
979 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
982 if (param
.argsz
< minsz
)
985 /* No flag is supported now */
989 mutex_lock(&container
->lock
);
990 ret
= tce_iommu_register_pages(container
, param
.vaddr
,
992 mutex_unlock(&container
->lock
);
996 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
: {
997 struct vfio_iommu_spapr_register_memory param
;
1005 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1008 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1011 if (param
.argsz
< minsz
)
1014 /* No flag is supported now */
1018 mutex_lock(&container
->lock
);
1019 ret
= tce_iommu_unregister_pages(container
, param
.vaddr
,
1021 mutex_unlock(&container
->lock
);
1025 case VFIO_IOMMU_ENABLE
:
1029 mutex_lock(&container
->lock
);
1030 ret
= tce_iommu_enable(container
);
1031 mutex_unlock(&container
->lock
);
1035 case VFIO_IOMMU_DISABLE
:
1039 mutex_lock(&container
->lock
);
1040 tce_iommu_disable(container
);
1041 mutex_unlock(&container
->lock
);
1044 case VFIO_EEH_PE_OP
: {
1045 struct tce_iommu_group
*tcegrp
;
1048 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1049 ret
= vfio_spapr_iommu_eeh_ioctl(tcegrp
->grp
,
1057 case VFIO_IOMMU_SPAPR_TCE_CREATE
: {
1058 struct vfio_iommu_spapr_tce_create create
;
1063 ret
= tce_iommu_mm_set(container
);
1067 if (!tce_groups_attached(container
))
1070 minsz
= offsetofend(struct vfio_iommu_spapr_tce_create
,
1073 if (copy_from_user(&create
, (void __user
*)arg
, minsz
))
1076 if (create
.argsz
< minsz
)
1082 mutex_lock(&container
->lock
);
1084 ret
= tce_iommu_create_default_window(container
);
1086 ret
= tce_iommu_create_window(container
,
1088 create
.window_size
, create
.levels
,
1089 &create
.start_addr
);
1091 mutex_unlock(&container
->lock
);
1093 if (!ret
&& copy_to_user((void __user
*)arg
, &create
, minsz
))
1098 case VFIO_IOMMU_SPAPR_TCE_REMOVE
: {
1099 struct vfio_iommu_spapr_tce_remove remove
;
1104 ret
= tce_iommu_mm_set(container
);
1108 if (!tce_groups_attached(container
))
1111 minsz
= offsetofend(struct vfio_iommu_spapr_tce_remove
,
1114 if (copy_from_user(&remove
, (void __user
*)arg
, minsz
))
1117 if (remove
.argsz
< minsz
)
1123 if (container
->def_window_pending
&& !remove
.start_addr
) {
1124 container
->def_window_pending
= false;
1128 mutex_lock(&container
->lock
);
1130 ret
= tce_iommu_remove_window(container
, remove
.start_addr
);
1132 mutex_unlock(&container
->lock
);
1141 static void tce_iommu_release_ownership(struct tce_container
*container
,
1142 struct iommu_table_group
*table_group
)
1146 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1147 struct iommu_table
*tbl
= container
->tables
[i
];
1152 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
1154 iommu_release_ownership(tbl
);
1156 container
->tables
[i
] = NULL
;
1160 static int tce_iommu_take_ownership(struct tce_container
*container
,
1161 struct iommu_table_group
*table_group
)
1165 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1166 struct iommu_table
*tbl
= table_group
->tables
[i
];
1168 if (!tbl
|| !tbl
->it_map
)
1171 rc
= iommu_take_ownership(tbl
);
1173 for (j
= 0; j
< i
; ++j
)
1174 iommu_release_ownership(
1175 table_group
->tables
[j
]);
1181 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1182 container
->tables
[i
] = table_group
->tables
[i
];
1187 static void tce_iommu_release_ownership_ddw(struct tce_container
*container
,
1188 struct iommu_table_group
*table_group
)
1192 if (!table_group
->ops
->unset_window
) {
1197 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1198 if (container
->tables
[i
])
1199 table_group
->ops
->unset_window(table_group
, i
);
1201 table_group
->ops
->release_ownership(table_group
);
1204 static long tce_iommu_take_ownership_ddw(struct tce_container
*container
,
1205 struct iommu_table_group
*table_group
)
1209 if (!table_group
->ops
->create_table
|| !table_group
->ops
->set_window
||
1210 !table_group
->ops
->release_ownership
) {
1215 table_group
->ops
->take_ownership(table_group
);
1217 /* Set all windows to the new group */
1218 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1219 struct iommu_table
*tbl
= container
->tables
[i
];
1224 ret
= table_group
->ops
->set_window(table_group
, i
, tbl
);
1232 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1233 table_group
->ops
->unset_window(table_group
, i
);
1235 table_group
->ops
->release_ownership(table_group
);
1240 static int tce_iommu_attach_group(void *iommu_data
,
1241 struct iommu_group
*iommu_group
)
1244 struct tce_container
*container
= iommu_data
;
1245 struct iommu_table_group
*table_group
;
1246 struct tce_iommu_group
*tcegrp
= NULL
;
1248 mutex_lock(&container
->lock
);
1250 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1251 iommu_group_id(iommu_group), iommu_group); */
1252 table_group
= iommu_group_get_iommudata(iommu_group
);
1258 if (tce_groups_attached(container
) && (!table_group
->ops
||
1259 !table_group
->ops
->take_ownership
||
1260 !table_group
->ops
->release_ownership
)) {
1265 /* Check if new group has the same iommu_ops (i.e. compatible) */
1266 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1267 struct iommu_table_group
*table_group_tmp
;
1269 if (tcegrp
->grp
== iommu_group
) {
1270 pr_warn("tce_vfio: Group %d is already attached\n",
1271 iommu_group_id(iommu_group
));
1275 table_group_tmp
= iommu_group_get_iommudata(tcegrp
->grp
);
1276 if (table_group_tmp
->ops
->create_table
!=
1277 table_group
->ops
->create_table
) {
1278 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1279 iommu_group_id(iommu_group
),
1280 iommu_group_id(tcegrp
->grp
));
1286 tcegrp
= kzalloc(sizeof(*tcegrp
), GFP_KERNEL
);
1292 if (!table_group
->ops
|| !table_group
->ops
->take_ownership
||
1293 !table_group
->ops
->release_ownership
) {
1294 if (container
->v2
) {
1298 ret
= tce_iommu_take_ownership(container
, table_group
);
1300 if (!container
->v2
) {
1304 ret
= tce_iommu_take_ownership_ddw(container
, table_group
);
1305 if (!tce_groups_attached(container
) && !container
->tables
[0])
1306 container
->def_window_pending
= true;
1310 tcegrp
->grp
= iommu_group
;
1311 list_add(&tcegrp
->next
, &container
->group_list
);
1319 mutex_unlock(&container
->lock
);
1324 static void tce_iommu_detach_group(void *iommu_data
,
1325 struct iommu_group
*iommu_group
)
1327 struct tce_container
*container
= iommu_data
;
1328 struct iommu_table_group
*table_group
;
1330 struct tce_iommu_group
*tcegrp
;
1332 mutex_lock(&container
->lock
);
1334 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1335 if (tcegrp
->grp
== iommu_group
) {
1342 pr_warn("tce_vfio: detaching unattached group #%u\n",
1343 iommu_group_id(iommu_group
));
1347 list_del(&tcegrp
->next
);
1350 table_group
= iommu_group_get_iommudata(iommu_group
);
1351 BUG_ON(!table_group
);
1353 if (!table_group
->ops
|| !table_group
->ops
->release_ownership
)
1354 tce_iommu_release_ownership(container
, table_group
);
1356 tce_iommu_release_ownership_ddw(container
, table_group
);
1359 mutex_unlock(&container
->lock
);
1362 static const struct vfio_iommu_driver_ops tce_iommu_driver_ops
= {
1363 .name
= "iommu-vfio-powerpc",
1364 .owner
= THIS_MODULE
,
1365 .open
= tce_iommu_open
,
1366 .release
= tce_iommu_release
,
1367 .ioctl
= tce_iommu_ioctl
,
1368 .attach_group
= tce_iommu_attach_group
,
1369 .detach_group
= tce_iommu_detach_group
,
1372 static int __init
tce_iommu_init(void)
1374 return vfio_register_iommu_driver(&tce_iommu_driver_ops
);
1377 static void __exit
tce_iommu_cleanup(void)
1379 vfio_unregister_iommu_driver(&tce_iommu_driver_ops
);
1382 module_init(tce_iommu_init
);
1383 module_exit(tce_iommu_cleanup
);
1385 MODULE_VERSION(DRIVER_VERSION
);
1386 MODULE_LICENSE("GPL v2");
1387 MODULE_AUTHOR(DRIVER_AUTHOR
);
1388 MODULE_DESCRIPTION(DRIVER_DESC
);