2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
25 #include <asm/mmu_context.h>
27 #define DRIVER_VERSION "0.1"
28 #define DRIVER_AUTHOR "aik@ozlabs.ru"
29 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
31 static void tce_iommu_detach_group(void *iommu_data
,
32 struct iommu_group
*iommu_group
);
34 static long try_increment_locked_vm(struct mm_struct
*mm
, long npages
)
36 long ret
= 0, locked
, lock_limit
;
38 if (WARN_ON_ONCE(!mm
))
44 down_write(&mm
->mmap_sem
);
45 locked
= mm
->locked_vm
+ npages
;
46 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
47 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
50 mm
->locked_vm
+= npages
;
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current
->pid
,
54 mm
->locked_vm
<< PAGE_SHIFT
,
55 rlimit(RLIMIT_MEMLOCK
),
56 ret
? " - exceeded" : "");
58 up_write(&mm
->mmap_sem
);
63 static void decrement_locked_vm(struct mm_struct
*mm
, long npages
)
68 down_write(&mm
->mmap_sem
);
69 if (WARN_ON_ONCE(npages
> mm
->locked_vm
))
70 npages
= mm
->locked_vm
;
71 mm
->locked_vm
-= npages
;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current
->pid
,
74 mm
->locked_vm
<< PAGE_SHIFT
,
75 rlimit(RLIMIT_MEMLOCK
));
76 up_write(&mm
->mmap_sem
);
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
86 struct tce_iommu_group
{
87 struct list_head next
;
88 struct iommu_group
*grp
;
92 * A container needs to remember which preregistered region it has
93 * referenced to do proper cleanup at the userspace process exit.
95 struct tce_iommu_prereg
{
96 struct list_head next
;
97 struct mm_iommu_table_group_mem_t
*mem
;
101 * The container descriptor supports only a single group per container.
102 * Required by the API as the container is not supplied with the IOMMU group
103 * at the moment of initialization.
105 struct tce_container
{
109 bool def_window_pending
;
110 unsigned long locked_pages
;
111 struct mm_struct
*mm
;
112 struct iommu_table
*tables
[IOMMU_TABLE_GROUP_MAX_TABLES
];
113 struct list_head group_list
;
114 struct list_head prereg_list
;
117 static long tce_iommu_mm_set(struct tce_container
*container
)
120 if (container
->mm
== current
->mm
)
124 BUG_ON(!current
->mm
);
125 container
->mm
= current
->mm
;
126 atomic_inc(&container
->mm
->mm_count
);
131 static long tce_iommu_prereg_free(struct tce_container
*container
,
132 struct tce_iommu_prereg
*tcemem
)
136 ret
= mm_iommu_put(container
->mm
, tcemem
->mem
);
140 list_del(&tcemem
->next
);
146 static long tce_iommu_unregister_pages(struct tce_container
*container
,
147 __u64 vaddr
, __u64 size
)
149 struct mm_iommu_table_group_mem_t
*mem
;
150 struct tce_iommu_prereg
*tcemem
;
153 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
))
156 mem
= mm_iommu_find(container
->mm
, vaddr
, size
>> PAGE_SHIFT
);
160 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
161 if (tcemem
->mem
== mem
) {
170 return tce_iommu_prereg_free(container
, tcemem
);
173 static long tce_iommu_register_pages(struct tce_container
*container
,
174 __u64 vaddr
, __u64 size
)
177 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
178 struct tce_iommu_prereg
*tcemem
;
179 unsigned long entries
= size
>> PAGE_SHIFT
;
181 if ((vaddr
& ~PAGE_MASK
) || (size
& ~PAGE_MASK
) ||
182 ((vaddr
+ size
) < vaddr
))
185 mem
= mm_iommu_find(container
->mm
, vaddr
, entries
);
187 list_for_each_entry(tcemem
, &container
->prereg_list
, next
) {
188 if (tcemem
->mem
== mem
)
193 ret
= mm_iommu_get(container
->mm
, vaddr
, entries
, &mem
);
197 tcemem
= kzalloc(sizeof(*tcemem
), GFP_KERNEL
);
199 list_add(&tcemem
->next
, &container
->prereg_list
);
201 container
->enabled
= true;
206 static long tce_iommu_userspace_view_alloc(struct iommu_table
*tbl
,
207 struct mm_struct
*mm
)
209 unsigned long cb
= _ALIGN_UP(sizeof(tbl
->it_userspace
[0]) *
210 tbl
->it_size
, PAGE_SIZE
);
214 BUG_ON(tbl
->it_userspace
);
216 ret
= try_increment_locked_vm(mm
, cb
>> PAGE_SHIFT
);
222 decrement_locked_vm(mm
, cb
>> PAGE_SHIFT
);
225 tbl
->it_userspace
= uas
;
230 static void tce_iommu_userspace_view_free(struct iommu_table
*tbl
,
231 struct mm_struct
*mm
)
233 unsigned long cb
= _ALIGN_UP(sizeof(tbl
->it_userspace
[0]) *
234 tbl
->it_size
, PAGE_SIZE
);
236 if (!tbl
->it_userspace
)
239 vfree(tbl
->it_userspace
);
240 tbl
->it_userspace
= NULL
;
241 decrement_locked_vm(mm
, cb
>> PAGE_SHIFT
);
244 static bool tce_page_is_contained(struct page
*page
, unsigned page_shift
)
247 * Check that the TCE table granularity is not bigger than the size of
248 * a page we just found. Otherwise the hardware can get access to
249 * a bigger memory chunk that it should.
251 return (PAGE_SHIFT
+ compound_order(compound_head(page
))) >= page_shift
;
254 static inline bool tce_groups_attached(struct tce_container
*container
)
256 return !list_empty(&container
->group_list
);
259 static long tce_iommu_find_table(struct tce_container
*container
,
260 phys_addr_t ioba
, struct iommu_table
**ptbl
)
264 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
265 struct iommu_table
*tbl
= container
->tables
[i
];
268 unsigned long entry
= ioba
>> tbl
->it_page_shift
;
269 unsigned long start
= tbl
->it_offset
;
270 unsigned long end
= start
+ tbl
->it_size
;
272 if ((start
<= entry
) && (entry
< end
)) {
282 static int tce_iommu_find_free_table(struct tce_container
*container
)
286 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
287 if (!container
->tables
[i
])
294 static int tce_iommu_enable(struct tce_container
*container
)
297 unsigned long locked
;
298 struct iommu_table_group
*table_group
;
299 struct tce_iommu_group
*tcegrp
;
301 if (container
->enabled
)
305 * When userspace pages are mapped into the IOMMU, they are effectively
306 * locked memory, so, theoretically, we need to update the accounting
307 * of locked pages on each map and unmap. For powerpc, the map unmap
308 * paths can be very hot, though, and the accounting would kill
309 * performance, especially since it would be difficult to impossible
310 * to handle the accounting in real mode only.
312 * To address that, rather than precisely accounting every page, we
313 * instead account for a worst case on locked memory when the iommu is
314 * enabled and disabled. The worst case upper bound on locked memory
315 * is the size of the whole iommu window, which is usually relatively
316 * small (compared to total memory sizes) on POWER hardware.
318 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
319 * that would effectively kill the guest at random points, much better
320 * enforcing the limit based on the max that the guest can map.
322 * Unfortunately at the moment it counts whole tables, no matter how
323 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
324 * each with 2GB DMA window, 8GB will be counted here. The reason for
325 * this is that we cannot tell here the amount of RAM used by the guest
326 * as this information is only available from KVM and VFIO is
329 * So we do not allow enabling a container without a group attached
330 * as there is no way to know how much we should increment
331 * the locked_vm counter.
333 if (!tce_groups_attached(container
))
336 tcegrp
= list_first_entry(&container
->group_list
,
337 struct tce_iommu_group
, next
);
338 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
342 if (!table_group
->tce32_size
)
345 ret
= tce_iommu_mm_set(container
);
349 locked
= table_group
->tce32_size
>> PAGE_SHIFT
;
350 ret
= try_increment_locked_vm(container
->mm
, locked
);
354 container
->locked_pages
= locked
;
356 container
->enabled
= true;
361 static void tce_iommu_disable(struct tce_container
*container
)
363 if (!container
->enabled
)
366 container
->enabled
= false;
368 BUG_ON(!container
->mm
);
369 decrement_locked_vm(container
->mm
, container
->locked_pages
);
372 static void *tce_iommu_open(unsigned long arg
)
374 struct tce_container
*container
;
376 if ((arg
!= VFIO_SPAPR_TCE_IOMMU
) && (arg
!= VFIO_SPAPR_TCE_v2_IOMMU
)) {
377 pr_err("tce_vfio: Wrong IOMMU type\n");
378 return ERR_PTR(-EINVAL
);
381 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
383 return ERR_PTR(-ENOMEM
);
385 mutex_init(&container
->lock
);
386 INIT_LIST_HEAD_RCU(&container
->group_list
);
387 INIT_LIST_HEAD_RCU(&container
->prereg_list
);
389 container
->v2
= arg
== VFIO_SPAPR_TCE_v2_IOMMU
;
394 static int tce_iommu_clear(struct tce_container
*container
,
395 struct iommu_table
*tbl
,
396 unsigned long entry
, unsigned long pages
);
397 static void tce_iommu_free_table(struct tce_container
*container
,
398 struct iommu_table
*tbl
);
400 static void tce_iommu_release(void *iommu_data
)
402 struct tce_container
*container
= iommu_data
;
403 struct tce_iommu_group
*tcegrp
;
406 while (tce_groups_attached(container
)) {
407 tcegrp
= list_first_entry(&container
->group_list
,
408 struct tce_iommu_group
, next
);
409 tce_iommu_detach_group(iommu_data
, tcegrp
->grp
);
413 * If VFIO created a table, it was not disposed
414 * by tce_iommu_detach_group() so do it now.
416 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
417 struct iommu_table
*tbl
= container
->tables
[i
];
422 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
423 tce_iommu_free_table(container
, tbl
);
426 while (!list_empty(&container
->prereg_list
)) {
427 struct tce_iommu_prereg
*tcemem
;
429 tcemem
= list_first_entry(&container
->prereg_list
,
430 struct tce_iommu_prereg
, next
);
431 WARN_ON_ONCE(tce_iommu_prereg_free(container
, tcemem
));
434 tce_iommu_disable(container
);
436 mmdrop(container
->mm
);
437 mutex_destroy(&container
->lock
);
442 static void tce_iommu_unuse_page(struct tce_container
*container
,
447 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
451 static int tce_iommu_prereg_ua_to_hpa(struct tce_container
*container
,
452 unsigned long tce
, unsigned long size
,
453 unsigned long *phpa
, struct mm_iommu_table_group_mem_t
**pmem
)
456 struct mm_iommu_table_group_mem_t
*mem
;
458 mem
= mm_iommu_lookup(container
->mm
, tce
, size
);
462 ret
= mm_iommu_ua_to_hpa(mem
, tce
, phpa
);
471 static void tce_iommu_unuse_page_v2(struct tce_container
*container
,
472 struct iommu_table
*tbl
, unsigned long entry
)
474 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
476 unsigned long hpa
= 0;
477 unsigned long *pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
, entry
);
482 ret
= tce_iommu_prereg_ua_to_hpa(container
, *pua
, IOMMU_PAGE_SIZE(tbl
),
485 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
486 __func__
, *pua
, entry
, ret
);
488 mm_iommu_mapped_dec(mem
);
493 static int tce_iommu_clear(struct tce_container
*container
,
494 struct iommu_table
*tbl
,
495 unsigned long entry
, unsigned long pages
)
497 unsigned long oldhpa
;
499 enum dma_data_direction direction
;
501 for ( ; pages
; --pages
, ++entry
) {
502 direction
= DMA_NONE
;
504 ret
= iommu_tce_xchg(tbl
, entry
, &oldhpa
, &direction
);
508 if (direction
== DMA_NONE
)
512 tce_iommu_unuse_page_v2(container
, tbl
, entry
);
516 tce_iommu_unuse_page(container
, oldhpa
);
522 static int tce_iommu_use_page(unsigned long tce
, unsigned long *hpa
)
524 struct page
*page
= NULL
;
525 enum dma_data_direction direction
= iommu_tce_direction(tce
);
527 if (get_user_pages_fast(tce
& PAGE_MASK
, 1,
528 direction
!= DMA_TO_DEVICE
, &page
) != 1)
531 *hpa
= __pa((unsigned long) page_address(page
));
536 static long tce_iommu_build(struct tce_container
*container
,
537 struct iommu_table
*tbl
,
538 unsigned long entry
, unsigned long tce
, unsigned long pages
,
539 enum dma_data_direction direction
)
544 enum dma_data_direction dirtmp
;
546 for (i
= 0; i
< pages
; ++i
) {
547 unsigned long offset
= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
549 ret
= tce_iommu_use_page(tce
, &hpa
);
553 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
554 if (!tce_page_is_contained(page
, tbl
->it_page_shift
)) {
561 ret
= iommu_tce_xchg(tbl
, entry
+ i
, &hpa
, &dirtmp
);
563 tce_iommu_unuse_page(container
, hpa
);
564 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
565 __func__
, entry
<< tbl
->it_page_shift
,
570 if (dirtmp
!= DMA_NONE
)
571 tce_iommu_unuse_page(container
, hpa
);
573 tce
+= IOMMU_PAGE_SIZE(tbl
);
577 tce_iommu_clear(container
, tbl
, entry
, i
);
582 static long tce_iommu_build_v2(struct tce_container
*container
,
583 struct iommu_table
*tbl
,
584 unsigned long entry
, unsigned long tce
, unsigned long pages
,
585 enum dma_data_direction direction
)
590 enum dma_data_direction dirtmp
;
592 if (!tbl
->it_userspace
) {
593 ret
= tce_iommu_userspace_view_alloc(tbl
, container
->mm
);
598 for (i
= 0; i
< pages
; ++i
) {
599 struct mm_iommu_table_group_mem_t
*mem
= NULL
;
600 unsigned long *pua
= IOMMU_TABLE_USERSPACE_ENTRY(tbl
,
603 ret
= tce_iommu_prereg_ua_to_hpa(container
,
604 tce
, IOMMU_PAGE_SIZE(tbl
), &hpa
, &mem
);
608 page
= pfn_to_page(hpa
>> PAGE_SHIFT
);
609 if (!tce_page_is_contained(page
, tbl
->it_page_shift
)) {
614 /* Preserve offset within IOMMU page */
615 hpa
|= tce
& IOMMU_PAGE_MASK(tbl
) & ~PAGE_MASK
;
618 /* The registered region is being unregistered */
619 if (mm_iommu_mapped_inc(mem
))
622 ret
= iommu_tce_xchg(tbl
, entry
+ i
, &hpa
, &dirtmp
);
624 /* dirtmp cannot be DMA_NONE here */
625 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
626 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
627 __func__
, entry
<< tbl
->it_page_shift
,
632 if (dirtmp
!= DMA_NONE
)
633 tce_iommu_unuse_page_v2(container
, tbl
, entry
+ i
);
637 tce
+= IOMMU_PAGE_SIZE(tbl
);
641 tce_iommu_clear(container
, tbl
, entry
, i
);
646 static long tce_iommu_create_table(struct tce_container
*container
,
647 struct iommu_table_group
*table_group
,
652 struct iommu_table
**ptbl
)
654 long ret
, table_size
;
656 table_size
= table_group
->ops
->get_table_size(page_shift
, window_size
,
661 ret
= try_increment_locked_vm(container
->mm
, table_size
>> PAGE_SHIFT
);
665 ret
= table_group
->ops
->create_table(table_group
, num
,
666 page_shift
, window_size
, levels
, ptbl
);
668 WARN_ON(!ret
&& !(*ptbl
)->it_ops
->free
);
669 WARN_ON(!ret
&& ((*ptbl
)->it_allocated_size
!= table_size
));
674 static void tce_iommu_free_table(struct tce_container
*container
,
675 struct iommu_table
*tbl
)
677 unsigned long pages
= tbl
->it_allocated_size
>> PAGE_SHIFT
;
679 tce_iommu_userspace_view_free(tbl
, container
->mm
);
680 tbl
->it_ops
->free(tbl
);
681 decrement_locked_vm(container
->mm
, pages
);
684 static long tce_iommu_create_window(struct tce_container
*container
,
685 __u32 page_shift
, __u64 window_size
, __u32 levels
,
688 struct tce_iommu_group
*tcegrp
;
689 struct iommu_table_group
*table_group
;
690 struct iommu_table
*tbl
= NULL
;
693 num
= tce_iommu_find_free_table(container
);
697 /* Get the first group for ops::create_table */
698 tcegrp
= list_first_entry(&container
->group_list
,
699 struct tce_iommu_group
, next
);
700 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
704 if (!(table_group
->pgsizes
& (1ULL << page_shift
)))
707 if (!table_group
->ops
->set_window
|| !table_group
->ops
->unset_window
||
708 !table_group
->ops
->get_table_size
||
709 !table_group
->ops
->create_table
)
712 /* Create TCE table */
713 ret
= tce_iommu_create_table(container
, table_group
, num
,
714 page_shift
, window_size
, levels
, &tbl
);
718 BUG_ON(!tbl
->it_ops
->free
);
721 * Program the table to every group.
722 * Groups have been tested for compatibility at the attach time.
724 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
725 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
727 ret
= table_group
->ops
->set_window(table_group
, num
, tbl
);
732 container
->tables
[num
] = tbl
;
734 /* Return start address assigned by platform in create_table() */
735 *start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
740 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
741 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
742 table_group
->ops
->unset_window(table_group
, num
);
744 tce_iommu_free_table(container
, tbl
);
749 static long tce_iommu_remove_window(struct tce_container
*container
,
752 struct iommu_table_group
*table_group
= NULL
;
753 struct iommu_table
*tbl
;
754 struct tce_iommu_group
*tcegrp
;
757 num
= tce_iommu_find_table(container
, start_addr
, &tbl
);
761 BUG_ON(!tbl
->it_size
);
763 /* Detach groups from IOMMUs */
764 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
765 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
768 * SPAPR TCE IOMMU exposes the default DMA window to
769 * the guest via dma32_window_start/size of
770 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
771 * the userspace to remove this window, some do not so
772 * here we check for the platform capability.
774 if (!table_group
->ops
|| !table_group
->ops
->unset_window
)
777 table_group
->ops
->unset_window(table_group
, num
);
781 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
782 tce_iommu_free_table(container
, tbl
);
783 container
->tables
[num
] = NULL
;
788 static long tce_iommu_create_default_window(struct tce_container
*container
)
791 __u64 start_addr
= 0;
792 struct tce_iommu_group
*tcegrp
;
793 struct iommu_table_group
*table_group
;
795 if (!container
->def_window_pending
)
798 if (!tce_groups_attached(container
))
801 tcegrp
= list_first_entry(&container
->group_list
,
802 struct tce_iommu_group
, next
);
803 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
807 ret
= tce_iommu_create_window(container
, IOMMU_PAGE_SHIFT_4K
,
808 table_group
->tce32_size
, 1, &start_addr
);
809 WARN_ON_ONCE(!ret
&& start_addr
);
812 container
->def_window_pending
= false;
817 static long tce_iommu_ioctl(void *iommu_data
,
818 unsigned int cmd
, unsigned long arg
)
820 struct tce_container
*container
= iommu_data
;
821 unsigned long minsz
, ddwsz
;
825 case VFIO_CHECK_EXTENSION
:
827 case VFIO_SPAPR_TCE_IOMMU
:
828 case VFIO_SPAPR_TCE_v2_IOMMU
:
832 ret
= vfio_spapr_iommu_eeh_ioctl(NULL
, cmd
, arg
);
836 return (ret
< 0) ? 0 : ret
;
840 * Sanity check to prevent one userspace from manipulating
841 * another userspace mm.
844 if (container
->mm
&& container
->mm
!= current
->mm
)
848 case VFIO_IOMMU_SPAPR_TCE_GET_INFO
: {
849 struct vfio_iommu_spapr_tce_info info
;
850 struct tce_iommu_group
*tcegrp
;
851 struct iommu_table_group
*table_group
;
853 if (!tce_groups_attached(container
))
856 tcegrp
= list_first_entry(&container
->group_list
,
857 struct tce_iommu_group
, next
);
858 table_group
= iommu_group_get_iommudata(tcegrp
->grp
);
863 minsz
= offsetofend(struct vfio_iommu_spapr_tce_info
,
866 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
869 if (info
.argsz
< minsz
)
872 info
.dma32_window_start
= table_group
->tce32_start
;
873 info
.dma32_window_size
= table_group
->tce32_size
;
875 memset(&info
.ddw
, 0, sizeof(info
.ddw
));
877 if (table_group
->max_dynamic_windows_supported
&&
879 info
.flags
|= VFIO_IOMMU_SPAPR_INFO_DDW
;
880 info
.ddw
.pgsizes
= table_group
->pgsizes
;
881 info
.ddw
.max_dynamic_windows_supported
=
882 table_group
->max_dynamic_windows_supported
;
883 info
.ddw
.levels
= table_group
->max_levels
;
886 ddwsz
= offsetofend(struct vfio_iommu_spapr_tce_info
, ddw
);
888 if (info
.argsz
>= ddwsz
)
891 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
896 case VFIO_IOMMU_MAP_DMA
: {
897 struct vfio_iommu_type1_dma_map param
;
898 struct iommu_table
*tbl
= NULL
;
900 enum dma_data_direction direction
;
902 if (!container
->enabled
)
905 minsz
= offsetofend(struct vfio_iommu_type1_dma_map
, size
);
907 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
910 if (param
.argsz
< minsz
)
913 if (param
.flags
& ~(VFIO_DMA_MAP_FLAG_READ
|
914 VFIO_DMA_MAP_FLAG_WRITE
))
917 ret
= tce_iommu_create_default_window(container
);
921 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
925 if ((param
.size
& ~IOMMU_PAGE_MASK(tbl
)) ||
926 (param
.vaddr
& ~IOMMU_PAGE_MASK(tbl
)))
929 /* iova is checked by the IOMMU API */
930 if (param
.flags
& VFIO_DMA_MAP_FLAG_READ
) {
931 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
932 direction
= DMA_BIDIRECTIONAL
;
934 direction
= DMA_TO_DEVICE
;
936 if (param
.flags
& VFIO_DMA_MAP_FLAG_WRITE
)
937 direction
= DMA_FROM_DEVICE
;
942 ret
= iommu_tce_put_param_check(tbl
, param
.iova
, param
.vaddr
);
947 ret
= tce_iommu_build_v2(container
, tbl
,
948 param
.iova
>> tbl
->it_page_shift
,
950 param
.size
>> tbl
->it_page_shift
,
953 ret
= tce_iommu_build(container
, tbl
,
954 param
.iova
>> tbl
->it_page_shift
,
956 param
.size
>> tbl
->it_page_shift
,
959 iommu_flush_tce(tbl
);
963 case VFIO_IOMMU_UNMAP_DMA
: {
964 struct vfio_iommu_type1_dma_unmap param
;
965 struct iommu_table
*tbl
= NULL
;
968 if (!container
->enabled
)
971 minsz
= offsetofend(struct vfio_iommu_type1_dma_unmap
,
974 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
977 if (param
.argsz
< minsz
)
980 /* No flag is supported now */
984 ret
= tce_iommu_create_default_window(container
);
988 num
= tce_iommu_find_table(container
, param
.iova
, &tbl
);
992 if (param
.size
& ~IOMMU_PAGE_MASK(tbl
))
995 ret
= iommu_tce_clear_param_check(tbl
, param
.iova
, 0,
996 param
.size
>> tbl
->it_page_shift
);
1000 ret
= tce_iommu_clear(container
, tbl
,
1001 param
.iova
>> tbl
->it_page_shift
,
1002 param
.size
>> tbl
->it_page_shift
);
1003 iommu_flush_tce(tbl
);
1007 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY
: {
1008 struct vfio_iommu_spapr_register_memory param
;
1013 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1016 ret
= tce_iommu_mm_set(container
);
1020 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1023 if (param
.argsz
< minsz
)
1026 /* No flag is supported now */
1030 mutex_lock(&container
->lock
);
1031 ret
= tce_iommu_register_pages(container
, param
.vaddr
,
1033 mutex_unlock(&container
->lock
);
1037 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY
: {
1038 struct vfio_iommu_spapr_register_memory param
;
1046 minsz
= offsetofend(struct vfio_iommu_spapr_register_memory
,
1049 if (copy_from_user(¶m
, (void __user
*)arg
, minsz
))
1052 if (param
.argsz
< minsz
)
1055 /* No flag is supported now */
1059 mutex_lock(&container
->lock
);
1060 ret
= tce_iommu_unregister_pages(container
, param
.vaddr
,
1062 mutex_unlock(&container
->lock
);
1066 case VFIO_IOMMU_ENABLE
:
1070 mutex_lock(&container
->lock
);
1071 ret
= tce_iommu_enable(container
);
1072 mutex_unlock(&container
->lock
);
1076 case VFIO_IOMMU_DISABLE
:
1080 mutex_lock(&container
->lock
);
1081 tce_iommu_disable(container
);
1082 mutex_unlock(&container
->lock
);
1085 case VFIO_EEH_PE_OP
: {
1086 struct tce_iommu_group
*tcegrp
;
1089 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1090 ret
= vfio_spapr_iommu_eeh_ioctl(tcegrp
->grp
,
1098 case VFIO_IOMMU_SPAPR_TCE_CREATE
: {
1099 struct vfio_iommu_spapr_tce_create create
;
1104 ret
= tce_iommu_mm_set(container
);
1108 if (!tce_groups_attached(container
))
1111 minsz
= offsetofend(struct vfio_iommu_spapr_tce_create
,
1114 if (copy_from_user(&create
, (void __user
*)arg
, minsz
))
1117 if (create
.argsz
< minsz
)
1123 mutex_lock(&container
->lock
);
1125 ret
= tce_iommu_create_default_window(container
);
1127 ret
= tce_iommu_create_window(container
,
1129 create
.window_size
, create
.levels
,
1130 &create
.start_addr
);
1132 mutex_unlock(&container
->lock
);
1134 if (!ret
&& copy_to_user((void __user
*)arg
, &create
, minsz
))
1139 case VFIO_IOMMU_SPAPR_TCE_REMOVE
: {
1140 struct vfio_iommu_spapr_tce_remove remove
;
1145 ret
= tce_iommu_mm_set(container
);
1149 if (!tce_groups_attached(container
))
1152 minsz
= offsetofend(struct vfio_iommu_spapr_tce_remove
,
1155 if (copy_from_user(&remove
, (void __user
*)arg
, minsz
))
1158 if (remove
.argsz
< minsz
)
1164 if (container
->def_window_pending
&& !remove
.start_addr
) {
1165 container
->def_window_pending
= false;
1169 mutex_lock(&container
->lock
);
1171 ret
= tce_iommu_remove_window(container
, remove
.start_addr
);
1173 mutex_unlock(&container
->lock
);
1182 static void tce_iommu_release_ownership(struct tce_container
*container
,
1183 struct iommu_table_group
*table_group
)
1187 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1188 struct iommu_table
*tbl
= container
->tables
[i
];
1193 tce_iommu_clear(container
, tbl
, tbl
->it_offset
, tbl
->it_size
);
1194 tce_iommu_userspace_view_free(tbl
, container
->mm
);
1196 iommu_release_ownership(tbl
);
1198 container
->tables
[i
] = NULL
;
1202 static int tce_iommu_take_ownership(struct tce_container
*container
,
1203 struct iommu_table_group
*table_group
)
1207 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
) {
1208 struct iommu_table
*tbl
= table_group
->tables
[i
];
1210 if (!tbl
|| !tbl
->it_map
)
1213 rc
= iommu_take_ownership(tbl
);
1215 for (j
= 0; j
< i
; ++j
)
1216 iommu_release_ownership(
1217 table_group
->tables
[j
]);
1223 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1224 container
->tables
[i
] = table_group
->tables
[i
];
1229 static void tce_iommu_release_ownership_ddw(struct tce_container
*container
,
1230 struct iommu_table_group
*table_group
)
1234 if (!table_group
->ops
->unset_window
) {
1239 for (i
= 0; i
< IOMMU_TABLE_GROUP_MAX_TABLES
; ++i
)
1240 table_group
->ops
->unset_window(table_group
, i
);
1242 table_group
->ops
->release_ownership(table_group
);
1245 static long tce_iommu_take_ownership_ddw(struct tce_container
*container
,
1246 struct iommu_table_group
*table_group
)
1248 if (!table_group
->ops
->create_table
|| !table_group
->ops
->set_window
||
1249 !table_group
->ops
->release_ownership
) {
1254 table_group
->ops
->take_ownership(table_group
);
1259 static int tce_iommu_attach_group(void *iommu_data
,
1260 struct iommu_group
*iommu_group
)
1263 struct tce_container
*container
= iommu_data
;
1264 struct iommu_table_group
*table_group
;
1265 struct tce_iommu_group
*tcegrp
= NULL
;
1267 mutex_lock(&container
->lock
);
1269 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1270 iommu_group_id(iommu_group), iommu_group); */
1271 table_group
= iommu_group_get_iommudata(iommu_group
);
1277 if (tce_groups_attached(container
) && (!table_group
->ops
||
1278 !table_group
->ops
->take_ownership
||
1279 !table_group
->ops
->release_ownership
)) {
1284 /* Check if new group has the same iommu_ops (i.e. compatible) */
1285 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1286 struct iommu_table_group
*table_group_tmp
;
1288 if (tcegrp
->grp
== iommu_group
) {
1289 pr_warn("tce_vfio: Group %d is already attached\n",
1290 iommu_group_id(iommu_group
));
1294 table_group_tmp
= iommu_group_get_iommudata(tcegrp
->grp
);
1295 if (table_group_tmp
->ops
->create_table
!=
1296 table_group
->ops
->create_table
) {
1297 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1298 iommu_group_id(iommu_group
),
1299 iommu_group_id(tcegrp
->grp
));
1305 tcegrp
= kzalloc(sizeof(*tcegrp
), GFP_KERNEL
);
1311 if (!table_group
->ops
|| !table_group
->ops
->take_ownership
||
1312 !table_group
->ops
->release_ownership
) {
1313 ret
= tce_iommu_take_ownership(container
, table_group
);
1315 ret
= tce_iommu_take_ownership_ddw(container
, table_group
);
1316 if (!tce_groups_attached(container
) && !container
->tables
[0])
1317 container
->def_window_pending
= true;
1321 tcegrp
->grp
= iommu_group
;
1322 list_add(&tcegrp
->next
, &container
->group_list
);
1329 mutex_unlock(&container
->lock
);
1334 static void tce_iommu_detach_group(void *iommu_data
,
1335 struct iommu_group
*iommu_group
)
1337 struct tce_container
*container
= iommu_data
;
1338 struct iommu_table_group
*table_group
;
1340 struct tce_iommu_group
*tcegrp
;
1342 mutex_lock(&container
->lock
);
1344 list_for_each_entry(tcegrp
, &container
->group_list
, next
) {
1345 if (tcegrp
->grp
== iommu_group
) {
1352 pr_warn("tce_vfio: detaching unattached group #%u\n",
1353 iommu_group_id(iommu_group
));
1357 list_del(&tcegrp
->next
);
1360 table_group
= iommu_group_get_iommudata(iommu_group
);
1361 BUG_ON(!table_group
);
1363 if (!table_group
->ops
|| !table_group
->ops
->release_ownership
)
1364 tce_iommu_release_ownership(container
, table_group
);
1366 tce_iommu_release_ownership_ddw(container
, table_group
);
1369 mutex_unlock(&container
->lock
);
1372 const struct vfio_iommu_driver_ops tce_iommu_driver_ops
= {
1373 .name
= "iommu-vfio-powerpc",
1374 .owner
= THIS_MODULE
,
1375 .open
= tce_iommu_open
,
1376 .release
= tce_iommu_release
,
1377 .ioctl
= tce_iommu_ioctl
,
1378 .attach_group
= tce_iommu_attach_group
,
1379 .detach_group
= tce_iommu_detach_group
,
1382 static int __init
tce_iommu_init(void)
1384 return vfio_register_iommu_driver(&tce_iommu_driver_ops
);
1387 static void __exit
tce_iommu_cleanup(void)
1389 vfio_unregister_iommu_driver(&tce_iommu_driver_ops
);
1392 module_init(tce_iommu_init
);
1393 module_exit(tce_iommu_cleanup
);
1395 MODULE_VERSION(DRIVER_VERSION
);
1396 MODULE_LICENSE("GPL v2");
1397 MODULE_AUTHOR(DRIVER_AUTHOR
);
1398 MODULE_DESCRIPTION(DRIVER_DESC
);