2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/signal.h>
26 #include <asm/iommu.h>
28 #include <asm/mmu_context.h>
30 #define DRIVER_VERSION "0.1"
31 #define DRIVER_AUTHOR "aik@ozlabs.ru"
32 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
34 static void tce_iommu_detach_group(void *iommu_data
,
35 struct iommu_group
*iommu_group
);
37 static long try_increment_locked_vm(struct mm_struct
*mm
, long npages
)
39 long ret
= 0, locked
, lock_limit
;
41 if (WARN_ON_ONCE(!mm
))
47 down_write(&mm
->mmap_sem
);
48 locked
= mm
->locked_vm
+ npages
;
49 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
50 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
53 mm
->locked_vm
+= npages
;
55 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current
->pid
,
57 mm
->locked_vm
<< PAGE_SHIFT
,
58 rlimit(RLIMIT_MEMLOCK
),
59 ret
? " - exceeded" : "");
61 up_write(&mm
->mmap_sem
);
66 static void decrement_locked_vm(struct mm_struct
*mm
, long npages
)
71 down_write(&mm
->mmap_sem
);
72 if (WARN_ON_ONCE(npages
> mm
->locked_vm
))
73 npages
= mm
->locked_vm
;
74 mm
->locked_vm
-= npages
;
75 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current
->pid
,
77 mm
->locked_vm
<< PAGE_SHIFT
,
78 rlimit(RLIMIT_MEMLOCK
));
79 up_write(&mm
->mmap_sem
);
83 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
85 * This code handles mapping and unmapping of user data buffers
86 * into DMA'ble space using the IOMMU
89 struct tce_iommu_group
{
90 struct list_head next
;
91 struct iommu_group
*grp
;
95 * A container needs to remember which preregistered region it has
96 * referenced to do proper cleanup at the userspace process exit.
98 struct tce_iommu_prereg
{
99 struct list_head next
;
100 struct mm_iommu_table_group_mem_t
*mem
;
104 * The container descriptor supports only a single group per container.
105 * Required by the API as the container is not supplied with the IOMMU group
106 * at the moment of initialization.
108 struct tce_container
{
112 bool def_window_pending
;
113 unsigned long locked_pages
;
114 struct mm_struct
*mm
;
115 struct iommu_table
*tables
[IOMMU_TABLE_GROUP_MAX_TABLES
];
116 struct list_head group_list
;
117 struct list_head prereg_list
;
120 static long tce_iommu_mm_set(struct tce_container
*container
)
123 if (container
->mm
== current
->mm
)
127 BUG_ON(!current
->mm
);
128 container
->mm
= current
->mm
;
129 atomic_inc(&container
->mm
->mm_count
);
134 static long tce_iommu_prereg_free(struct tce_container
*container
,
135 struct tce_iommu_prereg
*tcemem
)
139 ret
= mm_iommu_put(container
->mm
, tcemem
->mem
);
143 list_del(&tcemem
->next
);
149 static long tce_iommu_unregister_pages(struct tce_container
*container
,
150 __u64 vaddr
, __u64 size
)
152 struct mm_iommu_table_group_mem_t
*mem
;
153 struct tce_iommu_prereg
*tcemem
;
156 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
159 mem
= mm_iommu_find(container
->mm
, vaddr
, size
>> PAGE_SHIFT
);
163 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
164 if (tcemem
->mem
== mem
) {
173 return tce_iommu_prereg_free(container
, tcemem
);
176 static long tce_iommu_register_pages(struct tce_container
*container
,
177 __u64 vaddr
, __u64 size
)
180 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
181 struct tce_iommu_prereg
*tcemem
;
182 unsigned long entries
= size
>> PAGE_SHIFT
;
184 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
) ||
185 ((vaddr
+ size
) < vaddr
))
188 mem
= mm_iommu_find(container
->mm
, vaddr
, entries
);
190 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
191 if (tcemem
->mem
== mem
)
196 ret
= mm_iommu_get(container
->mm
, vaddr
, entries
, &mem
);
200 tcemem
= kzalloc(sizeof(*tcemem
), GFP_KERNEL
);
202 mm_iommu_put(container
->mm
, mem
);
207 list_add(&tcemem
->next
, &container
->prereg_list
);
209 container
->enabled
= true;
214 static bool tce_page_is_contained(struct page
*page
, unsigned page_shift
)
217 * Check that the TCE table granularity is not bigger than the size of
218 * a page we just found. Otherwise the hardware can get access to
219 * a bigger memory chunk that it should.
221 return (PAGE_SHIFT
+ compound_order(compound_head(page
))) >= page_shift
;
224 static inline bool tce_groups_attached(struct tce_container
*container
)
226 return !list_empty(&container
->group_list
);
229 static long tce_iommu_find_table(struct tce_container
*container
,
230 phys_addr_t ioba
, struct iommu_table
**ptbl
)
234 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
235 struct iommu_table
*tbl
= container
->tables
[i
];
238 unsigned long entry
= ioba
>> tbl
->it_page_shift
;
239 unsigned long start
= tbl
->it_offset
;
240 unsigned long end
= start
+ tbl
->it_size
;
242 if ((start
<= entry
) && (entry
< end
)) {
252 static int tce_iommu_find_free_table(struct tce_container
*container
)
256 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
257 if (!container
->tables
[i
])
264 static int tce_iommu_enable(struct tce_container
*container
)
267 unsigned long locked
;
268 struct iommu_table_group
*table_group
;
269 struct tce_iommu_group
*tcegrp
;
271 if (container
->enabled
)
275 * When userspace pages are mapped into the IOMMU, they are effectively
276 * locked memory, so, theoretically, we need to update the accounting
277 * of locked pages on each map and unmap. For powerpc, the map unmap
278 * paths can be very hot, though, and the accounting would kill
279 * performance, especially since it would be difficult to impossible
280 * to handle the accounting in real mode only.
282 * To address that, rather than precisely accounting every page, we
283 * instead account for a worst case on locked memory when the iommu is
284 * enabled and disabled. The worst case upper bound on locked memory
285 * is the size of the whole iommu window, which is usually relatively
286 * small (compared to total memory sizes) on POWER hardware.
288 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
289 * that would effectively kill the guest at random points, much better
290 * enforcing the limit based on the max that the guest can map.
292 * Unfortunately at the moment it counts whole tables, no matter how
293 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
294 * each with 2GB DMA window, 8GB will be counted here. The reason for
295 * this is that we cannot tell here the amount of RAM used by the guest
296 * as this information is only available from KVM and VFIO is
299 * So we do not allow enabling a container without a group attached
300 * as there is no way to know how much we should increment
301 * the locked_vm counter.
303 if (!tce_groups_attached(container
))
306 tcegrp
= list_first_entry(&container
->group_list
,
307 struct tce_iommu_group
, next
);
308 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
312 if (!table_group
->tce32_size
)
315 ret
= tce_iommu_mm_set(container
);
319 locked
= table_group
->tce32_size
>> PAGE_SHIFT
;
320 ret
= try_increment_locked_vm(container
->mm
, locked
);
324 container
->locked_pages
= locked
;
326 container
->enabled
= true;
331 static void tce_iommu_disable(struct tce_container
*container
)
333 if (!container
->enabled
)
336 container
->enabled
= false;
338 BUG_ON(!container
->mm
);
339 decrement_locked_vm(container
->mm
, container
->locked_pages
);
342 static void *tce_iommu_open(unsigned long arg
)
344 struct tce_container
*container
;
346 if ((arg
!= VFIO_SPAPR_TCE_IOMMU
) && (arg
!= VFIO_SPAPR_TCE_v2_IOMMU
)) {
347 pr_err("tce_vfio: Wrong IOMMU type\n");
348 return ERR_PTR(-EINVAL
);
351 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
353 return ERR_PTR(-ENOMEM
);
355 mutex_init(&container
->lock
);
356 INIT_LIST_HEAD_RCU(&container
->group_list
);
357 INIT_LIST_HEAD_RCU(&container
->prereg_list
);
359 container
->v2
= arg
== VFIO_SPAPR_TCE_v2_IOMMU
;
364 static int tce_iommu_clear(struct tce_container
*container
,
365 struct iommu_table
*tbl
,
366 unsigned long entry
, unsigned long pages
);
367 static void tce_iommu_free_table(struct tce_container
*container
,
368 struct iommu_table
*tbl
);
370 static void tce_iommu_release(void *iommu_data
)
372 struct tce_container
*container
= iommu_data
;
373 struct tce_iommu_group
*tcegrp
;
374 struct tce_iommu_prereg
*tcemem
, *tmtmp
;
377 while (tce_groups_attached(container
)) {
378 tcegrp
= list_first_entry(&container
->group_list
,
379 struct tce_iommu_group
, next
);
380 tce_iommu_detach_group(iommu_data
, tcegrp
->grp
);
384 * If VFIO created a table, it was not disposed
385 * by tce_iommu_detach_group() so do it now.
387 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
388 struct iommu_table
*tbl
= container
->tables
[i
];
393 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
394 tce_iommu_free_table(container
, tbl
);
397 list_for_each_entry_safe(tcemem
, tmtmp
, &container
->prereg_list
, next
)
398 WARN_ON(tce_iommu_prereg_free(container
, tcemem
));
400 tce_iommu_disable(container
);
402 mmdrop(container
->mm
);
403 mutex_destroy(&container
->lock
);
408 static void tce_iommu_unuse_page(struct tce_container
*container
,
413 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
417 static int tce_iommu_prereg_ua_to_hpa(struct tce_container
*container
,
418 unsigned long tce
, unsigned long shift
,
419 unsigned long *phpa
, struct mm_iommu_table_group_mem_t
**pmem
)
422 struct mm_iommu_table_group_mem_t
*mem
;
424 mem
= mm_iommu_lookup(container
->mm
, tce
, 1ULL << shift
);
428 ret
= mm_iommu_ua_to_hpa(mem
, tce
, shift
, phpa
);
437 static void tce_iommu_unuse_page_v2(struct tce_container
*container
,
438 struct iommu_table
*tbl
, unsigned long entry
)
440 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
442 unsigned long hpa
= 0;
443 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
);
448 ret
= tce_iommu_prereg_ua_to_hpa(container
, be64_to_cpu(*pua
),
449 tbl
->it_page_shift
, &hpa
, &mem
);
451 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
452 __func__
, be64_to_cpu(*pua
), entry
, ret
);
454 mm_iommu_mapped_dec(mem
);
456 *pua
= cpu_to_be64(0);
459 static int tce_iommu_clear(struct tce_container
*container
,
460 struct iommu_table
*tbl
,
461 unsigned long entry
, unsigned long pages
)
463 unsigned long oldhpa
;
465 enum dma_data_direction direction
;
467 for ( ; pages
; --pages
, ++entry
) {
470 direction
= DMA_NONE
;
472 ret
= iommu_tce_xchg(tbl
, entry
, &oldhpa
, &direction
);
476 if (direction
== DMA_NONE
)
480 tce_iommu_unuse_page_v2(container
, tbl
, entry
);
484 tce_iommu_unuse_page(container
, oldhpa
);
490 static int tce_iommu_use_page(unsigned long tce
, unsigned long *hpa
)
492 struct page
*page
= NULL
;
493 enum dma_data_direction direction
= iommu_tce_direction(tce
);
495 if (get_user_pages_fast(tce
& PAGE_MASK
, 1,
496 direction
!= DMA_TO_DEVICE
, &page
) != 1)
499 *hpa
= __pa((unsigned long) page_address(page
));
504 static long tce_iommu_build(struct tce_container
*container
,
505 struct iommu_table
*tbl
,
506 unsigned long entry
, unsigned long tce
, unsigned long pages
,
507 enum dma_data_direction direction
)
512 enum dma_data_direction dirtmp
;
514 for (i
= 0; i
< pages
; ++i
) {
515 unsigned long offset
= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
517 ret
= tce_iommu_use_page(tce
, &hpa
);
521 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
522 if (!tce_page_is_contained(page
, tbl
->it_page_shift
)) {
529 ret
= iommu_tce_xchg(tbl
, entry
+ i
, &hpa
, &dirtmp
);
531 tce_iommu_unuse_page(container
, hpa
);
532 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
533 __func__
, entry
<< tbl
->it_page_shift
,
538 if (dirtmp
!= DMA_NONE
)
539 tce_iommu_unuse_page(container
, hpa
);
541 tce
+= IOMMU_PAGE_SIZE(tbl
);
545 tce_iommu_clear(container
, tbl
, entry
, i
);
550 static long tce_iommu_build_v2(struct tce_container
*container
,
551 struct iommu_table
*tbl
,
552 unsigned long entry
, unsigned long tce
, unsigned long pages
,
553 enum dma_data_direction direction
)
558 enum dma_data_direction dirtmp
;
560 for (i
= 0; i
< pages
; ++i
) {
561 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
562 __be64
*pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
+ i
);
564 ret
= tce_iommu_prereg_ua_to_hpa(container
,
565 tce
, tbl
->it_page_shift
, &hpa
, &mem
);
569 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
570 if (!tce_page_is_contained(page
, tbl
->it_page_shift
)) {
575 /* Preserve offset within IOMMU page */
576 hpa
|= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
579 /* The registered region is being unregistered */
580 if (mm_iommu_mapped_inc(mem
))
583 ret
= iommu_tce_xchg(tbl
, entry
+ i
, &hpa
, &dirtmp
);
585 /* dirtmp cannot be DMA_NONE here */
586 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
587 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
588 __func__
, entry
<< tbl
->it_page_shift
,
593 if (dirtmp
!= DMA_NONE
)
594 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
596 *pua
= cpu_to_be64(tce
);
598 tce
+= IOMMU_PAGE_SIZE(tbl
);
602 tce_iommu_clear(container
, tbl
, entry
, i
);
607 static long tce_iommu_create_table(struct tce_container
*container
,
608 struct iommu_table_group
*table_group
,
613 struct iommu_table
**ptbl
)
615 long ret
, table_size
;
617 table_size
= table_group
->ops
->get_table_size(page_shift
, window_size
,
622 ret
= try_increment_locked_vm(container
->mm
, table_size
>> PAGE_SHIFT
);
626 ret
= table_group
->ops
->create_table(table_group
, num
,
627 page_shift
, window_size
, levels
, ptbl
);
629 WARN_ON(!ret
&& !(*ptbl
)->it_ops
->free
);
630 WARN_ON(!ret
&& ((*ptbl
)->it_allocated_size
> table_size
));
635 static void tce_iommu_free_table(struct tce_container
*container
,
636 struct iommu_table
*tbl
)
638 unsigned long pages
= tbl
->it_allocated_size
>> PAGE_SHIFT
;
640 iommu_tce_table_put(tbl
);
641 decrement_locked_vm(container
->mm
, pages
);
644 static long tce_iommu_create_window(struct tce_container
*container
,
645 __u32 page_shift
, __u64 window_size
, __u32 levels
,
648 struct tce_iommu_group
*tcegrp
;
649 struct iommu_table_group
*table_group
;
650 struct iommu_table
*tbl
= NULL
;
653 num
= tce_iommu_find_free_table(container
);
657 /* Get the first group for ops::create_table */
658 tcegrp
= list_first_entry(&container
->group_list
,
659 struct tce_iommu_group
, next
);
660 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
664 if (!(table_group
->pgsizes
& (1ULL << page_shift
)))
667 if (!table_group
->ops
->set_window
|| !table_group
->ops
->unset_window
||
668 !table_group
->ops
->get_table_size
||
669 !table_group
->ops
->create_table
)
672 /* Create TCE table */
673 ret
= tce_iommu_create_table(container
, table_group
, num
,
674 page_shift
, window_size
, levels
, &tbl
);
678 BUG_ON(!tbl
->it_ops
->free
);
681 * Program the table to every group.
682 * Groups have been tested for compatibility at the attach time.
684 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
685 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
687 ret
= table_group
->ops
->set_window(table_group
, num
, tbl
);
692 container
->tables
[num
] = tbl
;
694 /* Return start address assigned by platform in create_table() */
695 *start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
700 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
701 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
702 table_group
->ops
->unset_window(table_group
, num
);
704 tce_iommu_free_table(container
, tbl
);
709 static long tce_iommu_remove_window(struct tce_container
*container
,
712 struct iommu_table_group
*table_group
= NULL
;
713 struct iommu_table
*tbl
;
714 struct tce_iommu_group
*tcegrp
;
717 num
= tce_iommu_find_table(container
, start_addr
, &tbl
);
721 BUG_ON(!tbl
->it_size
);
723 /* Detach groups from IOMMUs */
724 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
725 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
728 * SPAPR TCE IOMMU exposes the default DMA window to
729 * the guest via dma32_window_start/size of
730 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
731 * the userspace to remove this window, some do not so
732 * here we check for the platform capability.
734 if (!table_group
->ops
|| !table_group
->ops
->unset_window
)
737 table_group
->ops
->unset_window(table_group
, num
);
741 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
742 tce_iommu_free_table(container
, tbl
);
743 container
->tables
[num
] = NULL
;
748 static long tce_iommu_create_default_window(struct tce_container
*container
)
751 __u64 start_addr
= 0;
752 struct tce_iommu_group
*tcegrp
;
753 struct iommu_table_group
*table_group
;
755 if (!container
->def_window_pending
)
758 if (!tce_groups_attached(container
))
761 tcegrp
= list_first_entry(&container
->group_list
,
762 struct tce_iommu_group
, next
);
763 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
767 ret
= tce_iommu_create_window(container
, IOMMU_PAGE_SHIFT_4K
,
768 table_group
->tce32_size
, 1, &start_addr
);
769 WARN_ON_ONCE(!ret
&& start_addr
);
772 container
->def_window_pending
= false;
777 static long tce_iommu_ioctl(void *iommu_data
,
778 unsigned int cmd
, unsigned long arg
)
780 struct tce_container
*container
= iommu_data
;
781 unsigned long minsz
, ddwsz
;
785 case VFIO_CHECK_EXTENSION
:
787 case VFIO_SPAPR_TCE_IOMMU
:
788 case VFIO_SPAPR_TCE_v2_IOMMU
:
792 ret
= vfio_spapr_iommu_eeh_ioctl(NULL
, cmd
, arg
);
796 return (ret
< 0) ? 0 : ret
;
800 * Sanity check to prevent one userspace from manipulating
801 * another userspace mm.
804 if (container
->mm
&& container
->mm
!= current
->mm
)
808 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
: {
809 struct vfio_iommu_spapr_tce_info info
;
810 struct tce_iommu_group
*tcegrp
;
811 struct iommu_table_group
*table_group
;
813 if (!tce_groups_attached(container
))
816 tcegrp
= list_first_entry(&container
->group_list
,
817 struct tce_iommu_group
, next
);
818 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
823 minsz
= offsetofend(struct vfio_iommu_spapr_tce_info
,
826 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
829 if (info
.argsz
< minsz
)
832 info
.dma32_window_start
= table_group
->tce32_start
;
833 info
.dma32_window_size
= table_group
->tce32_size
;
835 memset(&info
.ddw
, 0, sizeof(info
.ddw
));
837 if (table_group
->max_dynamic_windows_supported
&&
839 info
.flags
|= VFIO_IOMMU_SPAPR_INFO_DDW
;
840 info
.ddw
.pgsizes
= table_group
->pgsizes
;
841 info
.ddw
.max_dynamic_windows_supported
=
842 table_group
->max_dynamic_windows_supported
;
843 info
.ddw
.levels
= table_group
->max_levels
;
846 ddwsz
= offsetofend(struct vfio_iommu_spapr_tce_info
, ddw
);
848 if (info
.argsz
>= ddwsz
)
851 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
856 case VFIO_IOMMU_MAP_DMA
: {
857 struct vfio_iommu_type1_dma_map param
;
858 struct iommu_table
*tbl
= NULL
;
860 enum dma_data_direction direction
;
862 if (!container
->enabled
)
865 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
867 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
870 if (param
.argsz
< minsz
)
873 if (param
.flags
& ~(VFIO_DMA_MAP_FLAG_READ
|
874 VFIO_DMA_MAP_FLAG_WRITE
))
877 ret
= tce_iommu_create_default_window(container
);
881 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
885 if ((param
.size
& ~IOMMU_PAGE_MASK(tbl
)) ||
886 (param
.vaddr
& ~IOMMU_PAGE_MASK(tbl
)))
889 /* iova is checked by the IOMMU API */
890 if (param
.flags
& VFIO_DMA_MAP_FLAG_READ
) {
891 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
892 direction
= DMA_BIDIRECTIONAL
;
894 direction
= DMA_TO_DEVICE
;
896 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
897 direction
= DMA_FROM_DEVICE
;
902 ret
= iommu_tce_put_param_check(tbl
, param
.iova
, param
.vaddr
);
907 ret
= tce_iommu_build_v2(container
, tbl
,
908 param
.iova
>> tbl
->it_page_shift
,
910 param
.size
>> tbl
->it_page_shift
,
913 ret
= tce_iommu_build(container
, tbl
,
914 param
.iova
>> tbl
->it_page_shift
,
916 param
.size
>> tbl
->it_page_shift
,
919 iommu_flush_tce(tbl
);
923 case VFIO_IOMMU_UNMAP_DMA
: {
924 struct vfio_iommu_type1_dma_unmap param
;
925 struct iommu_table
*tbl
= NULL
;
928 if (!container
->enabled
)
931 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
,
934 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
937 if (param
.argsz
< minsz
)
940 /* No flag is supported now */
944 ret
= tce_iommu_create_default_window(container
);
948 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
952 if (param
.size
& ~IOMMU_PAGE_MASK(tbl
))
955 ret
= iommu_tce_clear_param_check(tbl
, param
.iova
, 0,
956 param
.size
>> tbl
->it_page_shift
);
960 ret
= tce_iommu_clear(container
, tbl
,
961 param
.iova
>> tbl
->it_page_shift
,
962 param
.size
>> tbl
->it_page_shift
);
963 iommu_flush_tce(tbl
);
967 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY
: {
968 struct vfio_iommu_spapr_register_memory param
;
973 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
976 ret
= tce_iommu_mm_set(container
);
980 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
983 if (param
.argsz
< minsz
)
986 /* No flag is supported now */
990 mutex_lock(&container
->lock
);
991 ret
= tce_iommu_register_pages(container
, param
.vaddr
,
993 mutex_unlock(&container
->lock
);
997 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
: {
998 struct vfio_iommu_spapr_register_memory param
;
1006 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1009 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1012 if (param
.argsz
< minsz
)
1015 /* No flag is supported now */
1019 mutex_lock(&container
->lock
);
1020 ret
= tce_iommu_unregister_pages(container
, param
.vaddr
,
1022 mutex_unlock(&container
->lock
);
1026 case VFIO_IOMMU_ENABLE
:
1030 mutex_lock(&container
->lock
);
1031 ret
= tce_iommu_enable(container
);
1032 mutex_unlock(&container
->lock
);
1036 case VFIO_IOMMU_DISABLE
:
1040 mutex_lock(&container
->lock
);
1041 tce_iommu_disable(container
);
1042 mutex_unlock(&container
->lock
);
1045 case VFIO_EEH_PE_OP
: {
1046 struct tce_iommu_group
*tcegrp
;
1049 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1050 ret
= vfio_spapr_iommu_eeh_ioctl(tcegrp
->grp
,
1058 case VFIO_IOMMU_SPAPR_TCE_CREATE
: {
1059 struct vfio_iommu_spapr_tce_create create
;
1064 ret
= tce_iommu_mm_set(container
);
1068 if (!tce_groups_attached(container
))
1071 minsz
= offsetofend(struct vfio_iommu_spapr_tce_create
,
1074 if (copy_from_user(&create
, (void __user
*)arg
, minsz
))
1077 if (create
.argsz
< minsz
)
1083 mutex_lock(&container
->lock
);
1085 ret
= tce_iommu_create_default_window(container
);
1087 ret
= tce_iommu_create_window(container
,
1089 create
.window_size
, create
.levels
,
1090 &create
.start_addr
);
1092 mutex_unlock(&container
->lock
);
1094 if (!ret
&& copy_to_user((void __user
*)arg
, &create
, minsz
))
1099 case VFIO_IOMMU_SPAPR_TCE_REMOVE
: {
1100 struct vfio_iommu_spapr_tce_remove remove
;
1105 ret
= tce_iommu_mm_set(container
);
1109 if (!tce_groups_attached(container
))
1112 minsz
= offsetofend(struct vfio_iommu_spapr_tce_remove
,
1115 if (copy_from_user(&remove
, (void __user
*)arg
, minsz
))
1118 if (remove
.argsz
< minsz
)
1124 if (container
->def_window_pending
&& !remove
.start_addr
) {
1125 container
->def_window_pending
= false;
1129 mutex_lock(&container
->lock
);
1131 ret
= tce_iommu_remove_window(container
, remove
.start_addr
);
1133 mutex_unlock(&container
->lock
);
1142 static void tce_iommu_release_ownership(struct tce_container
*container
,
1143 struct iommu_table_group
*table_group
)
1147 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1148 struct iommu_table
*tbl
= container
->tables
[i
];
1153 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
1155 iommu_release_ownership(tbl
);
1157 container
->tables
[i
] = NULL
;
1161 static int tce_iommu_take_ownership(struct tce_container
*container
,
1162 struct iommu_table_group
*table_group
)
1166 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1167 struct iommu_table
*tbl
= table_group
->tables
[i
];
1169 if (!tbl
|| !tbl
->it_map
)
1172 rc
= iommu_take_ownership(tbl
);
1174 for (j
= 0; j
< i
; ++j
)
1175 iommu_release_ownership(
1176 table_group
->tables
[j
]);
1182 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1183 container
->tables
[i
] = table_group
->tables
[i
];
1188 static void tce_iommu_release_ownership_ddw(struct tce_container
*container
,
1189 struct iommu_table_group
*table_group
)
1193 if (!table_group
->ops
->unset_window
) {
1198 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1199 table_group
->ops
->unset_window(table_group
, i
);
1201 table_group
->ops
->release_ownership(table_group
);
1204 static long tce_iommu_take_ownership_ddw(struct tce_container
*container
,
1205 struct iommu_table_group
*table_group
)
1209 if (!table_group
->ops
->create_table
|| !table_group
->ops
->set_window
||
1210 !table_group
->ops
->release_ownership
) {
1215 table_group
->ops
->take_ownership(table_group
);
1217 /* Set all windows to the new group */
1218 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1219 struct iommu_table
*tbl
= container
->tables
[i
];
1224 ret
= table_group
->ops
->set_window(table_group
, i
, tbl
);
1232 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1233 table_group
->ops
->unset_window(table_group
, i
);
1235 table_group
->ops
->release_ownership(table_group
);
1240 static int tce_iommu_attach_group(void *iommu_data
,
1241 struct iommu_group
*iommu_group
)
1244 struct tce_container
*container
= iommu_data
;
1245 struct iommu_table_group
*table_group
;
1246 struct tce_iommu_group
*tcegrp
= NULL
;
1248 mutex_lock(&container
->lock
);
1250 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1251 iommu_group_id(iommu_group), iommu_group); */
1252 table_group
= iommu_group_get_iommudata(iommu_group
);
1258 if (tce_groups_attached(container
) && (!table_group
->ops
||
1259 !table_group
->ops
->take_ownership
||
1260 !table_group
->ops
->release_ownership
)) {
1265 /* Check if new group has the same iommu_ops (i.e. compatible) */
1266 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1267 struct iommu_table_group
*table_group_tmp
;
1269 if (tcegrp
->grp
== iommu_group
) {
1270 pr_warn("tce_vfio: Group %d is already attached\n",
1271 iommu_group_id(iommu_group
));
1275 table_group_tmp
= iommu_group_get_iommudata(tcegrp
->grp
);
1276 if (table_group_tmp
->ops
->create_table
!=
1277 table_group
->ops
->create_table
) {
1278 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1279 iommu_group_id(iommu_group
),
1280 iommu_group_id(tcegrp
->grp
));
1286 tcegrp
= kzalloc(sizeof(*tcegrp
), GFP_KERNEL
);
1292 if (!table_group
->ops
|| !table_group
->ops
->take_ownership
||
1293 !table_group
->ops
->release_ownership
) {
1294 if (container
->v2
) {
1298 ret
= tce_iommu_take_ownership(container
, table_group
);
1300 if (!container
->v2
) {
1304 ret
= tce_iommu_take_ownership_ddw(container
, table_group
);
1305 if (!tce_groups_attached(container
) && !container
->tables
[0])
1306 container
->def_window_pending
= true;
1310 tcegrp
->grp
= iommu_group
;
1311 list_add(&tcegrp
->next
, &container
->group_list
);
1318 mutex_unlock(&container
->lock
);
1323 static void tce_iommu_detach_group(void *iommu_data
,
1324 struct iommu_group
*iommu_group
)
1326 struct tce_container
*container
= iommu_data
;
1327 struct iommu_table_group
*table_group
;
1329 struct tce_iommu_group
*tcegrp
;
1331 mutex_lock(&container
->lock
);
1333 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1334 if (tcegrp
->grp
== iommu_group
) {
1341 pr_warn("tce_vfio: detaching unattached group #%u\n",
1342 iommu_group_id(iommu_group
));
1346 list_del(&tcegrp
->next
);
1349 table_group
= iommu_group_get_iommudata(iommu_group
);
1350 BUG_ON(!table_group
);
1352 if (!table_group
->ops
|| !table_group
->ops
->release_ownership
)
1353 tce_iommu_release_ownership(container
, table_group
);
1355 tce_iommu_release_ownership_ddw(container
, table_group
);
1358 mutex_unlock(&container
->lock
);
1361 const struct vfio_iommu_driver_ops tce_iommu_driver_ops
= {
1362 .name
= "iommu-vfio-powerpc",
1363 .owner
= THIS_MODULE
,
1364 .open
= tce_iommu_open
,
1365 .release
= tce_iommu_release
,
1366 .ioctl
= tce_iommu_ioctl
,
1367 .attach_group
= tce_iommu_attach_group
,
1368 .detach_group
= tce_iommu_detach_group
,
1371 static int __init
tce_iommu_init(void)
1373 return vfio_register_iommu_driver(&tce_iommu_driver_ops
);
1376 static void __exit
tce_iommu_cleanup(void)
1378 vfio_unregister_iommu_driver(&tce_iommu_driver_ops
);
1381 module_init(tce_iommu_init
);
1382 module_exit(tce_iommu_cleanup
);
1384 MODULE_VERSION(DRIVER_VERSION
);
1385 MODULE_LICENSE("GPL v2");
1386 MODULE_AUTHOR(DRIVER_AUTHOR
);
1387 MODULE_DESCRIPTION(DRIVER_DESC
);