1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO: IOMMU DMA mapping support for TCE on POWER
5 * Copyright (C) 2013 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
8 * Derived from original vfio_iommu_type1.c:
9 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
10 * Author: Alex Williamson <alex.williamson@redhat.com>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/err.h>
18 #include <linux/vfio.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/signal.h>
24 #include <asm/iommu.h>
26 #include <asm/mmu_context.h>
28 #define DRIVER_VERSION "0.1"
29 #define DRIVER_AUTHOR "aik@ozlabs.ru"
30 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
32 static void tce_iommu_detach_group(void *iommu_data
,
33 struct iommu_group
*iommu_group
);
36 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
38 * This code handles mapping and unmapping of user data buffers
39 * into DMA'ble space using the IOMMU
42 struct tce_iommu_group
{
43 struct list_head next
;
44 struct iommu_group
*grp
;
48 * A container needs to remember which preregistered region it has
49 * referenced to do proper cleanup at the userspace process exit.
51 struct tce_iommu_prereg
{
52 struct list_head next
;
53 struct mm_iommu_table_group_mem_t
*mem
;
57 * The container descriptor supports only a single group per container.
58 * Required by the API as the container is not supplied with the IOMMU group
59 * at the moment of initialization.
61 struct tce_container
{
65 bool def_window_pending
;
66 unsigned long locked_pages
;
68 struct iommu_table
*tables
[IOMMU_TABLE_GROUP_MAX_TABLES
];
69 struct list_head group_list
;
70 struct list_head prereg_list
;
73 static long tce_iommu_mm_set(struct tce_container
*container
)
76 if (container
->mm
== current
->mm
)
81 container
->mm
= current
->mm
;
82 atomic_inc(&container
->mm
->mm_count
);
87 static long tce_iommu_prereg_free(struct tce_container
*container
,
88 struct tce_iommu_prereg
*tcemem
)
92 ret
= mm_iommu_put(container
->mm
, tcemem
->mem
);
96 list_del(&tcemem
->next
);
102 static long tce_iommu_unregister_pages(struct tce_container
*container
,
103 __u64 vaddr
, __u64 size
)
105 struct mm_iommu_table_group_mem_t
*mem
;
106 struct tce_iommu_prereg
*tcemem
;
110 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
113 mem
= mm_iommu_get(container
->mm
, vaddr
, size
>> PAGE_SHIFT
);
117 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
118 if (tcemem
->mem
== mem
) {
127 ret
= tce_iommu_prereg_free(container
, tcemem
);
129 mm_iommu_put(container
->mm
, mem
);
134 static long tce_iommu_register_pages(struct tce_container
*container
,
135 __u64 vaddr
, __u64 size
)
138 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
139 struct tce_iommu_prereg
*tcemem
;
140 unsigned long entries
= size
>> PAGE_SHIFT
;
142 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
) ||
143 ((vaddr
+ size
) < vaddr
))
146 mem
= mm_iommu_get(container
->mm
, vaddr
, entries
);
148 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
149 if (tcemem
->mem
== mem
) {
155 ret
= mm_iommu_new(container
->mm
, vaddr
, entries
, &mem
);
160 tcemem
= kzalloc(sizeof(*tcemem
), GFP_KERNEL
);
167 list_add(&tcemem
->next
, &container
->prereg_list
);
169 container
->enabled
= true;
174 mm_iommu_put(container
->mm
, mem
);
178 static bool tce_page_is_contained(struct mm_struct
*mm
, unsigned long hpa
,
179 unsigned int page_shift
)
182 unsigned long size
= 0;
184 if (mm_iommu_is_devmem(mm
, hpa
, page_shift
, &size
))
185 return size
== (1UL << page_shift
);
187 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
189 * Check that the TCE table granularity is not bigger than the size of
190 * a page we just found. Otherwise the hardware can get access to
191 * a bigger memory chunk that it should.
193 return (PAGE_SHIFT
+ compound_order(compound_head(page
))) >= page_shift
;
196 static inline bool tce_groups_attached(struct tce_container
*container
)
198 return !list_empty(&container
->group_list
);
201 static long tce_iommu_find_table(struct tce_container
*container
,
202 phys_addr_t ioba
, struct iommu_table
**ptbl
)
206 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
207 struct iommu_table
*tbl
= container
->tables
[i
];
210 unsigned long entry
= ioba
>> tbl
->it_page_shift
;
211 unsigned long start
= tbl
->it_offset
;
212 unsigned long end
= start
+ tbl
->it_size
;
214 if ((start
<= entry
) && (entry
< end
)) {
224 static int tce_iommu_find_free_table(struct tce_container
*container
)
228 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
229 if (!container
->tables
[i
])
236 static int tce_iommu_enable(struct tce_container
*container
)
239 unsigned long locked
;
240 struct iommu_table_group
*table_group
;
241 struct tce_iommu_group
*tcegrp
;
243 if (container
->enabled
)
247 * When userspace pages are mapped into the IOMMU, they are effectively
248 * locked memory, so, theoretically, we need to update the accounting
249 * of locked pages on each map and unmap. For powerpc, the map unmap
250 * paths can be very hot, though, and the accounting would kill
251 * performance, especially since it would be difficult to impossible
252 * to handle the accounting in real mode only.
254 * To address that, rather than precisely accounting every page, we
255 * instead account for a worst case on locked memory when the iommu is
256 * enabled and disabled. The worst case upper bound on locked memory
257 * is the size of the whole iommu window, which is usually relatively
258 * small (compared to total memory sizes) on POWER hardware.
260 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
261 * that would effectively kill the guest at random points, much better
262 * enforcing the limit based on the max that the guest can map.
264 * Unfortunately at the moment it counts whole tables, no matter how
265 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
266 * each with 2GB DMA window, 8GB will be counted here. The reason for
267 * this is that we cannot tell here the amount of RAM used by the guest
268 * as this information is only available from KVM and VFIO is
271 * So we do not allow enabling a container without a group attached
272 * as there is no way to know how much we should increment
273 * the locked_vm counter.
275 if (!tce_groups_attached(container
))
278 tcegrp
= list_first_entry(&container
->group_list
,
279 struct tce_iommu_group
, next
);
280 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
284 if (!table_group
->tce32_size
)
287 ret
= tce_iommu_mm_set(container
);
291 locked
= table_group
->tce32_size
>> PAGE_SHIFT
;
292 ret
= account_locked_vm(container
->mm
, locked
, true);
296 container
->locked_pages
= locked
;
298 container
->enabled
= true;
303 static void tce_iommu_disable(struct tce_container
*container
)
305 if (!container
->enabled
)
308 container
->enabled
= false;
310 BUG_ON(!container
->mm
);
311 account_locked_vm(container
->mm
, container
->locked_pages
, false);
314 static void *tce_iommu_open(unsigned long arg
)
316 struct tce_container
*container
;
318 if ((arg
!= VFIO_SPAPR_TCE_IOMMU
) && (arg
!= VFIO_SPAPR_TCE_v2_IOMMU
)) {
319 pr_err("tce_vfio: Wrong IOMMU type\n");
320 return ERR_PTR(-EINVAL
);
323 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
325 return ERR_PTR(-ENOMEM
);
327 mutex_init(&container
->lock
);
328 INIT_LIST_HEAD_RCU(&container
->group_list
);
329 INIT_LIST_HEAD_RCU(&container
->prereg_list
);
331 container
->v2
= arg
== VFIO_SPAPR_TCE_v2_IOMMU
;
336 static int tce_iommu_clear(struct tce_container
*container
,
337 struct iommu_table
*tbl
,
338 unsigned long entry
, unsigned long pages
);
339 static void tce_iommu_free_table(struct tce_container
*container
,
340 struct iommu_table
*tbl
);
342 static void tce_iommu_release(void *iommu_data
)
344 struct tce_container
*container
= iommu_data
;
345 struct tce_iommu_group
*tcegrp
;
346 struct tce_iommu_prereg
*tcemem
, *tmtmp
;
349 while (tce_groups_attached(container
)) {
350 tcegrp
= list_first_entry(&container
->group_list
,
351 struct tce_iommu_group
, next
);
352 tce_iommu_detach_group(iommu_data
, tcegrp
->grp
);
356 * If VFIO created a table, it was not disposed
357 * by tce_iommu_detach_group() so do it now.
359 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
360 struct iommu_table
*tbl
= container
->tables
[i
];
365 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
366 tce_iommu_free_table(container
, tbl
);
369 list_for_each_entry_safe(tcemem
, tmtmp
, &container
->prereg_list
, next
)
370 WARN_ON(tce_iommu_prereg_free(container
, tcemem
));
372 tce_iommu_disable(container
);
374 mmdrop(container
->mm
);
375 mutex_destroy(&container
->lock
);
380 static void tce_iommu_unuse_page(struct tce_container
*container
,
385 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
389 static int tce_iommu_prereg_ua_to_hpa(struct tce_container
*container
,
390 unsigned long tce
, unsigned long shift
,
391 unsigned long *phpa
, struct mm_iommu_table_group_mem_t
**pmem
)
394 struct mm_iommu_table_group_mem_t
*mem
;
396 mem
= mm_iommu_lookup(container
->mm
, tce
, 1ULL << shift
);
400 ret
= mm_iommu_ua_to_hpa(mem
, tce
, shift
, phpa
);
409 static void tce_iommu_unuse_page_v2(struct tce_container
*container
,
410 struct iommu_table
*tbl
, unsigned long entry
)
412 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
414 unsigned long hpa
= 0;
415 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
, entry
);
420 ret
= tce_iommu_prereg_ua_to_hpa(container
, be64_to_cpu(*pua
),
421 tbl
->it_page_shift
, &hpa
, &mem
);
423 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
424 __func__
, be64_to_cpu(*pua
), entry
, ret
);
426 mm_iommu_mapped_dec(mem
);
428 *pua
= cpu_to_be64(0);
431 static int tce_iommu_clear(struct tce_container
*container
,
432 struct iommu_table
*tbl
,
433 unsigned long entry
, unsigned long pages
)
435 unsigned long oldhpa
;
437 enum dma_data_direction direction
;
438 unsigned long lastentry
= entry
+ pages
;
440 for ( ; entry
< lastentry
; ++entry
) {
441 if (tbl
->it_indirect_levels
&& tbl
->it_userspace
) {
443 * For multilevel tables, we can take a shortcut here
444 * and skip some TCEs as we know that the userspace
445 * addresses cache is a mirror of the real TCE table
446 * and if it is missing some indirect levels, then
447 * the hardware table does not have them allocated
448 * either and therefore does not require updating.
450 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl
,
453 /* align to level_size which is power of two */
454 entry
|= tbl
->it_level_size
- 1;
461 direction
= DMA_NONE
;
463 ret
= iommu_tce_xchg(container
->mm
, tbl
, entry
, &oldhpa
,
468 if (direction
== DMA_NONE
)
472 tce_iommu_unuse_page_v2(container
, tbl
, entry
);
476 tce_iommu_unuse_page(container
, oldhpa
);
482 static int tce_iommu_use_page(unsigned long tce
, unsigned long *hpa
)
484 struct page
*page
= NULL
;
485 enum dma_data_direction direction
= iommu_tce_direction(tce
);
487 if (get_user_pages_fast(tce
& PAGE_MASK
, 1,
488 direction
!= DMA_TO_DEVICE
? FOLL_WRITE
: 0,
492 *hpa
= __pa((unsigned long) page_address(page
));
497 static long tce_iommu_build(struct tce_container
*container
,
498 struct iommu_table
*tbl
,
499 unsigned long entry
, unsigned long tce
, unsigned long pages
,
500 enum dma_data_direction direction
)
504 enum dma_data_direction dirtmp
;
506 for (i
= 0; i
< pages
; ++i
) {
507 unsigned long offset
= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
509 ret
= tce_iommu_use_page(tce
, &hpa
);
513 if (!tce_page_is_contained(container
->mm
, hpa
,
514 tbl
->it_page_shift
)) {
521 ret
= iommu_tce_xchg(container
->mm
, tbl
, entry
+ i
, &hpa
,
524 tce_iommu_unuse_page(container
, hpa
);
525 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
526 __func__
, entry
<< tbl
->it_page_shift
,
531 if (dirtmp
!= DMA_NONE
)
532 tce_iommu_unuse_page(container
, hpa
);
534 tce
+= IOMMU_PAGE_SIZE(tbl
);
538 tce_iommu_clear(container
, tbl
, entry
, i
);
543 static long tce_iommu_build_v2(struct tce_container
*container
,
544 struct iommu_table
*tbl
,
545 unsigned long entry
, unsigned long tce
, unsigned long pages
,
546 enum dma_data_direction direction
)
550 enum dma_data_direction dirtmp
;
552 for (i
= 0; i
< pages
; ++i
) {
553 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
554 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
+ i
);
556 ret
= tce_iommu_prereg_ua_to_hpa(container
,
557 tce
, tbl
->it_page_shift
, &hpa
, &mem
);
561 if (!tce_page_is_contained(container
->mm
, hpa
,
562 tbl
->it_page_shift
)) {
567 /* Preserve offset within IOMMU page */
568 hpa
|= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
571 /* The registered region is being unregistered */
572 if (mm_iommu_mapped_inc(mem
))
575 ret
= iommu_tce_xchg(container
->mm
, tbl
, entry
+ i
, &hpa
,
578 /* dirtmp cannot be DMA_NONE here */
579 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
580 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
581 __func__
, entry
<< tbl
->it_page_shift
,
586 if (dirtmp
!= DMA_NONE
)
587 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
589 *pua
= cpu_to_be64(tce
);
591 tce
+= IOMMU_PAGE_SIZE(tbl
);
595 tce_iommu_clear(container
, tbl
, entry
, i
);
600 static long tce_iommu_create_table(struct tce_container
*container
,
601 struct iommu_table_group
*table_group
,
606 struct iommu_table
**ptbl
)
608 long ret
, table_size
;
610 table_size
= table_group
->ops
->get_table_size(page_shift
, window_size
,
615 ret
= account_locked_vm(container
->mm
, table_size
>> PAGE_SHIFT
, true);
619 ret
= table_group
->ops
->create_table(table_group
, num
,
620 page_shift
, window_size
, levels
, ptbl
);
622 WARN_ON(!ret
&& !(*ptbl
)->it_ops
->free
);
623 WARN_ON(!ret
&& ((*ptbl
)->it_allocated_size
> table_size
));
628 static void tce_iommu_free_table(struct tce_container
*container
,
629 struct iommu_table
*tbl
)
631 unsigned long pages
= tbl
->it_allocated_size
>> PAGE_SHIFT
;
633 iommu_tce_table_put(tbl
);
634 account_locked_vm(container
->mm
, pages
, false);
637 static long tce_iommu_create_window(struct tce_container
*container
,
638 __u32 page_shift
, __u64 window_size
, __u32 levels
,
641 struct tce_iommu_group
*tcegrp
;
642 struct iommu_table_group
*table_group
;
643 struct iommu_table
*tbl
= NULL
;
646 num
= tce_iommu_find_free_table(container
);
650 /* Get the first group for ops::create_table */
651 tcegrp
= list_first_entry(&container
->group_list
,
652 struct tce_iommu_group
, next
);
653 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
657 if (!(table_group
->pgsizes
& (1ULL << page_shift
)))
660 if (!table_group
->ops
->set_window
|| !table_group
->ops
->unset_window
||
661 !table_group
->ops
->get_table_size
||
662 !table_group
->ops
->create_table
)
665 /* Create TCE table */
666 ret
= tce_iommu_create_table(container
, table_group
, num
,
667 page_shift
, window_size
, levels
, &tbl
);
671 BUG_ON(!tbl
->it_ops
->free
);
674 * Program the table to every group.
675 * Groups have been tested for compatibility at the attach time.
677 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
678 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
680 ret
= table_group
->ops
->set_window(table_group
, num
, tbl
);
685 container
->tables
[num
] = tbl
;
687 /* Return start address assigned by platform in create_table() */
688 *start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
693 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
694 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
695 table_group
->ops
->unset_window(table_group
, num
);
697 tce_iommu_free_table(container
, tbl
);
702 static long tce_iommu_remove_window(struct tce_container
*container
,
705 struct iommu_table_group
*table_group
= NULL
;
706 struct iommu_table
*tbl
;
707 struct tce_iommu_group
*tcegrp
;
710 num
= tce_iommu_find_table(container
, start_addr
, &tbl
);
714 BUG_ON(!tbl
->it_size
);
716 /* Detach groups from IOMMUs */
717 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
718 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
721 * SPAPR TCE IOMMU exposes the default DMA window to
722 * the guest via dma32_window_start/size of
723 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
724 * the userspace to remove this window, some do not so
725 * here we check for the platform capability.
727 if (!table_group
->ops
|| !table_group
->ops
->unset_window
)
730 table_group
->ops
->unset_window(table_group
, num
);
734 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
735 tce_iommu_free_table(container
, tbl
);
736 container
->tables
[num
] = NULL
;
741 static long tce_iommu_create_default_window(struct tce_container
*container
)
744 __u64 start_addr
= 0;
745 struct tce_iommu_group
*tcegrp
;
746 struct iommu_table_group
*table_group
;
748 if (!container
->def_window_pending
)
751 if (!tce_groups_attached(container
))
754 tcegrp
= list_first_entry(&container
->group_list
,
755 struct tce_iommu_group
, next
);
756 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
760 ret
= tce_iommu_create_window(container
, IOMMU_PAGE_SHIFT_4K
,
761 table_group
->tce32_size
, 1, &start_addr
);
762 WARN_ON_ONCE(!ret
&& start_addr
);
765 container
->def_window_pending
= false;
770 static long tce_iommu_ioctl(void *iommu_data
,
771 unsigned int cmd
, unsigned long arg
)
773 struct tce_container
*container
= iommu_data
;
774 unsigned long minsz
, ddwsz
;
778 case VFIO_CHECK_EXTENSION
:
780 case VFIO_SPAPR_TCE_IOMMU
:
781 case VFIO_SPAPR_TCE_v2_IOMMU
:
785 ret
= vfio_spapr_iommu_eeh_ioctl(NULL
, cmd
, arg
);
789 return (ret
< 0) ? 0 : ret
;
793 * Sanity check to prevent one userspace from manipulating
794 * another userspace mm.
797 if (container
->mm
&& container
->mm
!= current
->mm
)
801 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
: {
802 struct vfio_iommu_spapr_tce_info info
;
803 struct tce_iommu_group
*tcegrp
;
804 struct iommu_table_group
*table_group
;
806 if (!tce_groups_attached(container
))
809 tcegrp
= list_first_entry(&container
->group_list
,
810 struct tce_iommu_group
, next
);
811 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
816 minsz
= offsetofend(struct vfio_iommu_spapr_tce_info
,
819 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
822 if (info
.argsz
< minsz
)
825 info
.dma32_window_start
= table_group
->tce32_start
;
826 info
.dma32_window_size
= table_group
->tce32_size
;
828 memset(&info
.ddw
, 0, sizeof(info
.ddw
));
830 if (table_group
->max_dynamic_windows_supported
&&
832 info
.flags
|= VFIO_IOMMU_SPAPR_INFO_DDW
;
833 info
.ddw
.pgsizes
= table_group
->pgsizes
;
834 info
.ddw
.max_dynamic_windows_supported
=
835 table_group
->max_dynamic_windows_supported
;
836 info
.ddw
.levels
= table_group
->max_levels
;
839 ddwsz
= offsetofend(struct vfio_iommu_spapr_tce_info
, ddw
);
841 if (info
.argsz
>= ddwsz
)
844 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
849 case VFIO_IOMMU_MAP_DMA
: {
850 struct vfio_iommu_type1_dma_map param
;
851 struct iommu_table
*tbl
= NULL
;
853 enum dma_data_direction direction
;
855 if (!container
->enabled
)
858 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
860 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
863 if (param
.argsz
< minsz
)
866 if (param
.flags
& ~(VFIO_DMA_MAP_FLAG_READ
|
867 VFIO_DMA_MAP_FLAG_WRITE
))
870 ret
= tce_iommu_create_default_window(container
);
874 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
878 if ((param
.size
& ~IOMMU_PAGE_MASK(tbl
)) ||
879 (param
.vaddr
& ~IOMMU_PAGE_MASK(tbl
)))
882 /* iova is checked by the IOMMU API */
883 if (param
.flags
& VFIO_DMA_MAP_FLAG_READ
) {
884 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
885 direction
= DMA_BIDIRECTIONAL
;
887 direction
= DMA_TO_DEVICE
;
889 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
890 direction
= DMA_FROM_DEVICE
;
895 ret
= iommu_tce_put_param_check(tbl
, param
.iova
, param
.vaddr
);
900 ret
= tce_iommu_build_v2(container
, tbl
,
901 param
.iova
>> tbl
->it_page_shift
,
903 param
.size
>> tbl
->it_page_shift
,
906 ret
= tce_iommu_build(container
, tbl
,
907 param
.iova
>> tbl
->it_page_shift
,
909 param
.size
>> tbl
->it_page_shift
,
912 iommu_flush_tce(tbl
);
916 case VFIO_IOMMU_UNMAP_DMA
: {
917 struct vfio_iommu_type1_dma_unmap param
;
918 struct iommu_table
*tbl
= NULL
;
921 if (!container
->enabled
)
924 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
,
927 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
930 if (param
.argsz
< minsz
)
933 /* No flag is supported now */
937 ret
= tce_iommu_create_default_window(container
);
941 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
945 if (param
.size
& ~IOMMU_PAGE_MASK(tbl
))
948 ret
= iommu_tce_clear_param_check(tbl
, param
.iova
, 0,
949 param
.size
>> tbl
->it_page_shift
);
953 ret
= tce_iommu_clear(container
, tbl
,
954 param
.iova
>> tbl
->it_page_shift
,
955 param
.size
>> tbl
->it_page_shift
);
956 iommu_flush_tce(tbl
);
960 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY
: {
961 struct vfio_iommu_spapr_register_memory param
;
966 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
969 ret
= tce_iommu_mm_set(container
);
973 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
976 if (param
.argsz
< minsz
)
979 /* No flag is supported now */
983 mutex_lock(&container
->lock
);
984 ret
= tce_iommu_register_pages(container
, param
.vaddr
,
986 mutex_unlock(&container
->lock
);
990 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
: {
991 struct vfio_iommu_spapr_register_memory param
;
999 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1002 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1005 if (param
.argsz
< minsz
)
1008 /* No flag is supported now */
1012 mutex_lock(&container
->lock
);
1013 ret
= tce_iommu_unregister_pages(container
, param
.vaddr
,
1015 mutex_unlock(&container
->lock
);
1019 case VFIO_IOMMU_ENABLE
:
1023 mutex_lock(&container
->lock
);
1024 ret
= tce_iommu_enable(container
);
1025 mutex_unlock(&container
->lock
);
1029 case VFIO_IOMMU_DISABLE
:
1033 mutex_lock(&container
->lock
);
1034 tce_iommu_disable(container
);
1035 mutex_unlock(&container
->lock
);
1038 case VFIO_EEH_PE_OP
: {
1039 struct tce_iommu_group
*tcegrp
;
1042 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1043 ret
= vfio_spapr_iommu_eeh_ioctl(tcegrp
->grp
,
1051 case VFIO_IOMMU_SPAPR_TCE_CREATE
: {
1052 struct vfio_iommu_spapr_tce_create create
;
1057 ret
= tce_iommu_mm_set(container
);
1061 if (!tce_groups_attached(container
))
1064 minsz
= offsetofend(struct vfio_iommu_spapr_tce_create
,
1067 if (copy_from_user(&create
, (void __user
*)arg
, minsz
))
1070 if (create
.argsz
< minsz
)
1076 mutex_lock(&container
->lock
);
1078 ret
= tce_iommu_create_default_window(container
);
1080 ret
= tce_iommu_create_window(container
,
1082 create
.window_size
, create
.levels
,
1083 &create
.start_addr
);
1085 mutex_unlock(&container
->lock
);
1087 if (!ret
&& copy_to_user((void __user
*)arg
, &create
, minsz
))
1092 case VFIO_IOMMU_SPAPR_TCE_REMOVE
: {
1093 struct vfio_iommu_spapr_tce_remove remove
;
1098 ret
= tce_iommu_mm_set(container
);
1102 if (!tce_groups_attached(container
))
1105 minsz
= offsetofend(struct vfio_iommu_spapr_tce_remove
,
1108 if (copy_from_user(&remove
, (void __user
*)arg
, minsz
))
1111 if (remove
.argsz
< minsz
)
1117 if (container
->def_window_pending
&& !remove
.start_addr
) {
1118 container
->def_window_pending
= false;
1122 mutex_lock(&container
->lock
);
1124 ret
= tce_iommu_remove_window(container
, remove
.start_addr
);
1126 mutex_unlock(&container
->lock
);
1135 static void tce_iommu_release_ownership(struct tce_container
*container
,
1136 struct iommu_table_group
*table_group
)
1140 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1141 struct iommu_table
*tbl
= container
->tables
[i
];
1146 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
1148 iommu_release_ownership(tbl
);
1150 container
->tables
[i
] = NULL
;
1154 static int tce_iommu_take_ownership(struct tce_container
*container
,
1155 struct iommu_table_group
*table_group
)
1159 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1160 struct iommu_table
*tbl
= table_group
->tables
[i
];
1162 if (!tbl
|| !tbl
->it_map
)
1165 rc
= iommu_take_ownership(tbl
);
1167 for (j
= 0; j
< i
; ++j
)
1168 iommu_release_ownership(
1169 table_group
->tables
[j
]);
1175 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1176 container
->tables
[i
] = table_group
->tables
[i
];
1181 static void tce_iommu_release_ownership_ddw(struct tce_container
*container
,
1182 struct iommu_table_group
*table_group
)
1186 if (!table_group
->ops
->unset_window
) {
1191 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1192 if (container
->tables
[i
])
1193 table_group
->ops
->unset_window(table_group
, i
);
1195 table_group
->ops
->release_ownership(table_group
);
1198 static long tce_iommu_take_ownership_ddw(struct tce_container
*container
,
1199 struct iommu_table_group
*table_group
)
1203 if (!table_group
->ops
->create_table
|| !table_group
->ops
->set_window
||
1204 !table_group
->ops
->release_ownership
) {
1209 table_group
->ops
->take_ownership(table_group
);
1211 /* Set all windows to the new group */
1212 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1213 struct iommu_table
*tbl
= container
->tables
[i
];
1218 ret
= table_group
->ops
->set_window(table_group
, i
, tbl
);
1226 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1227 table_group
->ops
->unset_window(table_group
, i
);
1229 table_group
->ops
->release_ownership(table_group
);
1234 static int tce_iommu_attach_group(void *iommu_data
,
1235 struct iommu_group
*iommu_group
)
1238 struct tce_container
*container
= iommu_data
;
1239 struct iommu_table_group
*table_group
;
1240 struct tce_iommu_group
*tcegrp
= NULL
;
1242 mutex_lock(&container
->lock
);
1244 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1245 iommu_group_id(iommu_group), iommu_group); */
1246 table_group
= iommu_group_get_iommudata(iommu_group
);
1252 if (tce_groups_attached(container
) && (!table_group
->ops
||
1253 !table_group
->ops
->take_ownership
||
1254 !table_group
->ops
->release_ownership
)) {
1259 /* Check if new group has the same iommu_ops (i.e. compatible) */
1260 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1261 struct iommu_table_group
*table_group_tmp
;
1263 if (tcegrp
->grp
== iommu_group
) {
1264 pr_warn("tce_vfio: Group %d is already attached\n",
1265 iommu_group_id(iommu_group
));
1269 table_group_tmp
= iommu_group_get_iommudata(tcegrp
->grp
);
1270 if (table_group_tmp
->ops
->create_table
!=
1271 table_group
->ops
->create_table
) {
1272 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1273 iommu_group_id(iommu_group
),
1274 iommu_group_id(tcegrp
->grp
));
1280 tcegrp
= kzalloc(sizeof(*tcegrp
), GFP_KERNEL
);
1286 if (!table_group
->ops
|| !table_group
->ops
->take_ownership
||
1287 !table_group
->ops
->release_ownership
) {
1288 if (container
->v2
) {
1292 ret
= tce_iommu_take_ownership(container
, table_group
);
1294 if (!container
->v2
) {
1298 ret
= tce_iommu_take_ownership_ddw(container
, table_group
);
1299 if (!tce_groups_attached(container
) && !container
->tables
[0])
1300 container
->def_window_pending
= true;
1304 tcegrp
->grp
= iommu_group
;
1305 list_add(&tcegrp
->next
, &container
->group_list
);
1312 mutex_unlock(&container
->lock
);
1317 static void tce_iommu_detach_group(void *iommu_data
,
1318 struct iommu_group
*iommu_group
)
1320 struct tce_container
*container
= iommu_data
;
1321 struct iommu_table_group
*table_group
;
1323 struct tce_iommu_group
*tcegrp
;
1325 mutex_lock(&container
->lock
);
1327 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1328 if (tcegrp
->grp
== iommu_group
) {
1335 pr_warn("tce_vfio: detaching unattached group #%u\n",
1336 iommu_group_id(iommu_group
));
1340 list_del(&tcegrp
->next
);
1343 table_group
= iommu_group_get_iommudata(iommu_group
);
1344 BUG_ON(!table_group
);
1346 if (!table_group
->ops
|| !table_group
->ops
->release_ownership
)
1347 tce_iommu_release_ownership(container
, table_group
);
1349 tce_iommu_release_ownership_ddw(container
, table_group
);
1352 mutex_unlock(&container
->lock
);
1355 static const struct vfio_iommu_driver_ops tce_iommu_driver_ops
= {
1356 .name
= "iommu-vfio-powerpc",
1357 .owner
= THIS_MODULE
,
1358 .open
= tce_iommu_open
,
1359 .release
= tce_iommu_release
,
1360 .ioctl
= tce_iommu_ioctl
,
1361 .attach_group
= tce_iommu_attach_group
,
1362 .detach_group
= tce_iommu_detach_group
,
1365 static int __init
tce_iommu_init(void)
1367 return vfio_register_iommu_driver(&tce_iommu_driver_ops
);
1370 static void __exit
tce_iommu_cleanup(void)
1372 vfio_unregister_iommu_driver(&tce_iommu_driver_ops
);
1375 module_init(tce_iommu_init
);
1376 module_exit(tce_iommu_cleanup
);
1378 MODULE_VERSION(DRIVER_VERSION
);
1379 MODULE_LICENSE("GPL v2");
1380 MODULE_AUTHOR(DRIVER_AUTHOR
);
1381 MODULE_DESCRIPTION(DRIVER_DESC
);