Linux 4.19.133
[linux/fpc-iii.git] / drivers / vfio / vfio_iommu_spapr_tce.c
blobec53310f1613601c2e9ed9a5490c044dca6d782e
1 /*
2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/signal.h>
26 #include <asm/iommu.h>
27 #include <asm/tce.h>
28 #include <asm/mmu_context.h>
30 #define DRIVER_VERSION "0.1"
31 #define DRIVER_AUTHOR "aik@ozlabs.ru"
32 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
34 static void tce_iommu_detach_group(void *iommu_data,
35 struct iommu_group *iommu_group);
37 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
39 long ret = 0, locked, lock_limit;
41 if (WARN_ON_ONCE(!mm))
42 return -EPERM;
44 if (!npages)
45 return 0;
47 down_write(&mm->mmap_sem);
48 locked = mm->locked_vm + npages;
49 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51 ret = -ENOMEM;
52 else
53 mm->locked_vm += npages;
55 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
56 npages << PAGE_SHIFT,
57 mm->locked_vm << PAGE_SHIFT,
58 rlimit(RLIMIT_MEMLOCK),
59 ret ? " - exceeded" : "");
61 up_write(&mm->mmap_sem);
63 return ret;
66 static void decrement_locked_vm(struct mm_struct *mm, long npages)
68 if (!mm || !npages)
69 return;
71 down_write(&mm->mmap_sem);
72 if (WARN_ON_ONCE(npages > mm->locked_vm))
73 npages = mm->locked_vm;
74 mm->locked_vm -= npages;
75 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
76 npages << PAGE_SHIFT,
77 mm->locked_vm << PAGE_SHIFT,
78 rlimit(RLIMIT_MEMLOCK));
79 up_write(&mm->mmap_sem);
83 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
85 * This code handles mapping and unmapping of user data buffers
86 * into DMA'ble space using the IOMMU
89 struct tce_iommu_group {
90 struct list_head next;
91 struct iommu_group *grp;
95 * A container needs to remember which preregistered region it has
96 * referenced to do proper cleanup at the userspace process exit.
98 struct tce_iommu_prereg {
99 struct list_head next;
100 struct mm_iommu_table_group_mem_t *mem;
104 * The container descriptor supports only a single group per container.
105 * Required by the API as the container is not supplied with the IOMMU group
106 * at the moment of initialization.
108 struct tce_container {
109 struct mutex lock;
110 bool enabled;
111 bool v2;
112 bool def_window_pending;
113 unsigned long locked_pages;
114 struct mm_struct *mm;
115 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
116 struct list_head group_list;
117 struct list_head prereg_list;
120 static long tce_iommu_mm_set(struct tce_container *container)
122 if (container->mm) {
123 if (container->mm == current->mm)
124 return 0;
125 return -EPERM;
127 BUG_ON(!current->mm);
128 container->mm = current->mm;
129 atomic_inc(&container->mm->mm_count);
131 return 0;
134 static long tce_iommu_prereg_free(struct tce_container *container,
135 struct tce_iommu_prereg *tcemem)
137 long ret;
139 ret = mm_iommu_put(container->mm, tcemem->mem);
140 if (ret)
141 return ret;
143 list_del(&tcemem->next);
144 kfree(tcemem);
146 return 0;
149 static long tce_iommu_unregister_pages(struct tce_container *container,
150 __u64 vaddr, __u64 size)
152 struct mm_iommu_table_group_mem_t *mem;
153 struct tce_iommu_prereg *tcemem;
154 bool found = false;
156 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
157 return -EINVAL;
159 mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
160 if (!mem)
161 return -ENOENT;
163 list_for_each_entry(tcemem, &container->prereg_list, next) {
164 if (tcemem->mem == mem) {
165 found = true;
166 break;
170 if (!found)
171 return -ENOENT;
173 return tce_iommu_prereg_free(container, tcemem);
176 static long tce_iommu_register_pages(struct tce_container *container,
177 __u64 vaddr, __u64 size)
179 long ret = 0;
180 struct mm_iommu_table_group_mem_t *mem = NULL;
181 struct tce_iommu_prereg *tcemem;
182 unsigned long entries = size >> PAGE_SHIFT;
184 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
185 ((vaddr + size) < vaddr))
186 return -EINVAL;
188 mem = mm_iommu_find(container->mm, vaddr, entries);
189 if (mem) {
190 list_for_each_entry(tcemem, &container->prereg_list, next) {
191 if (tcemem->mem == mem)
192 return -EBUSY;
196 ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
197 if (ret)
198 return ret;
200 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
201 if (!tcemem) {
202 mm_iommu_put(container->mm, mem);
203 return -ENOMEM;
206 tcemem->mem = mem;
207 list_add(&tcemem->next, &container->prereg_list);
209 container->enabled = true;
211 return 0;
214 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
217 * Check that the TCE table granularity is not bigger than the size of
218 * a page we just found. Otherwise the hardware can get access to
219 * a bigger memory chunk that it should.
221 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
224 static inline bool tce_groups_attached(struct tce_container *container)
226 return !list_empty(&container->group_list);
229 static long tce_iommu_find_table(struct tce_container *container,
230 phys_addr_t ioba, struct iommu_table **ptbl)
232 long i;
234 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
235 struct iommu_table *tbl = container->tables[i];
237 if (tbl) {
238 unsigned long entry = ioba >> tbl->it_page_shift;
239 unsigned long start = tbl->it_offset;
240 unsigned long end = start + tbl->it_size;
242 if ((start <= entry) && (entry < end)) {
243 *ptbl = tbl;
244 return i;
249 return -1;
252 static int tce_iommu_find_free_table(struct tce_container *container)
254 int i;
256 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
257 if (!container->tables[i])
258 return i;
261 return -ENOSPC;
264 static int tce_iommu_enable(struct tce_container *container)
266 int ret = 0;
267 unsigned long locked;
268 struct iommu_table_group *table_group;
269 struct tce_iommu_group *tcegrp;
271 if (container->enabled)
272 return -EBUSY;
275 * When userspace pages are mapped into the IOMMU, they are effectively
276 * locked memory, so, theoretically, we need to update the accounting
277 * of locked pages on each map and unmap. For powerpc, the map unmap
278 * paths can be very hot, though, and the accounting would kill
279 * performance, especially since it would be difficult to impossible
280 * to handle the accounting in real mode only.
282 * To address that, rather than precisely accounting every page, we
283 * instead account for a worst case on locked memory when the iommu is
284 * enabled and disabled. The worst case upper bound on locked memory
285 * is the size of the whole iommu window, which is usually relatively
286 * small (compared to total memory sizes) on POWER hardware.
288 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
289 * that would effectively kill the guest at random points, much better
290 * enforcing the limit based on the max that the guest can map.
292 * Unfortunately at the moment it counts whole tables, no matter how
293 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
294 * each with 2GB DMA window, 8GB will be counted here. The reason for
295 * this is that we cannot tell here the amount of RAM used by the guest
296 * as this information is only available from KVM and VFIO is
297 * KVM agnostic.
299 * So we do not allow enabling a container without a group attached
300 * as there is no way to know how much we should increment
301 * the locked_vm counter.
303 if (!tce_groups_attached(container))
304 return -ENODEV;
306 tcegrp = list_first_entry(&container->group_list,
307 struct tce_iommu_group, next);
308 table_group = iommu_group_get_iommudata(tcegrp->grp);
309 if (!table_group)
310 return -ENODEV;
312 if (!table_group->tce32_size)
313 return -EPERM;
315 ret = tce_iommu_mm_set(container);
316 if (ret)
317 return ret;
319 locked = table_group->tce32_size >> PAGE_SHIFT;
320 ret = try_increment_locked_vm(container->mm, locked);
321 if (ret)
322 return ret;
324 container->locked_pages = locked;
326 container->enabled = true;
328 return ret;
331 static void tce_iommu_disable(struct tce_container *container)
333 if (!container->enabled)
334 return;
336 container->enabled = false;
338 BUG_ON(!container->mm);
339 decrement_locked_vm(container->mm, container->locked_pages);
342 static void *tce_iommu_open(unsigned long arg)
344 struct tce_container *container;
346 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
347 pr_err("tce_vfio: Wrong IOMMU type\n");
348 return ERR_PTR(-EINVAL);
351 container = kzalloc(sizeof(*container), GFP_KERNEL);
352 if (!container)
353 return ERR_PTR(-ENOMEM);
355 mutex_init(&container->lock);
356 INIT_LIST_HEAD_RCU(&container->group_list);
357 INIT_LIST_HEAD_RCU(&container->prereg_list);
359 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
361 return container;
364 static int tce_iommu_clear(struct tce_container *container,
365 struct iommu_table *tbl,
366 unsigned long entry, unsigned long pages);
367 static void tce_iommu_free_table(struct tce_container *container,
368 struct iommu_table *tbl);
370 static void tce_iommu_release(void *iommu_data)
372 struct tce_container *container = iommu_data;
373 struct tce_iommu_group *tcegrp;
374 struct tce_iommu_prereg *tcemem, *tmtmp;
375 long i;
377 while (tce_groups_attached(container)) {
378 tcegrp = list_first_entry(&container->group_list,
379 struct tce_iommu_group, next);
380 tce_iommu_detach_group(iommu_data, tcegrp->grp);
384 * If VFIO created a table, it was not disposed
385 * by tce_iommu_detach_group() so do it now.
387 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
388 struct iommu_table *tbl = container->tables[i];
390 if (!tbl)
391 continue;
393 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
394 tce_iommu_free_table(container, tbl);
397 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
398 WARN_ON(tce_iommu_prereg_free(container, tcemem));
400 tce_iommu_disable(container);
401 if (container->mm)
402 mmdrop(container->mm);
403 mutex_destroy(&container->lock);
405 kfree(container);
408 static void tce_iommu_unuse_page(struct tce_container *container,
409 unsigned long hpa)
411 struct page *page;
413 page = pfn_to_page(hpa >> PAGE_SHIFT);
414 put_page(page);
417 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
418 unsigned long tce, unsigned long shift,
419 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
421 long ret = 0;
422 struct mm_iommu_table_group_mem_t *mem;
424 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
425 if (!mem)
426 return -EINVAL;
428 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
429 if (ret)
430 return -EINVAL;
432 *pmem = mem;
434 return 0;
437 static void tce_iommu_unuse_page_v2(struct tce_container *container,
438 struct iommu_table *tbl, unsigned long entry)
440 struct mm_iommu_table_group_mem_t *mem = NULL;
441 int ret;
442 unsigned long hpa = 0;
443 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
445 if (!pua)
446 return;
448 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
449 tbl->it_page_shift, &hpa, &mem);
450 if (ret)
451 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
452 __func__, be64_to_cpu(*pua), entry, ret);
453 if (mem)
454 mm_iommu_mapped_dec(mem);
456 *pua = cpu_to_be64(0);
459 static int tce_iommu_clear(struct tce_container *container,
460 struct iommu_table *tbl,
461 unsigned long entry, unsigned long pages)
463 unsigned long oldhpa;
464 long ret;
465 enum dma_data_direction direction;
467 for ( ; pages; --pages, ++entry) {
468 cond_resched();
470 direction = DMA_NONE;
471 oldhpa = 0;
472 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
473 if (ret)
474 continue;
476 if (direction == DMA_NONE)
477 continue;
479 if (container->v2) {
480 tce_iommu_unuse_page_v2(container, tbl, entry);
481 continue;
484 tce_iommu_unuse_page(container, oldhpa);
487 return 0;
490 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
492 struct page *page = NULL;
493 enum dma_data_direction direction = iommu_tce_direction(tce);
495 if (get_user_pages_fast(tce & PAGE_MASK, 1,
496 direction != DMA_TO_DEVICE, &page) != 1)
497 return -EFAULT;
499 *hpa = __pa((unsigned long) page_address(page));
501 return 0;
504 static long tce_iommu_build(struct tce_container *container,
505 struct iommu_table *tbl,
506 unsigned long entry, unsigned long tce, unsigned long pages,
507 enum dma_data_direction direction)
509 long i, ret = 0;
510 struct page *page;
511 unsigned long hpa;
512 enum dma_data_direction dirtmp;
514 for (i = 0; i < pages; ++i) {
515 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
517 ret = tce_iommu_use_page(tce, &hpa);
518 if (ret)
519 break;
521 page = pfn_to_page(hpa >> PAGE_SHIFT);
522 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
523 ret = -EPERM;
524 break;
527 hpa |= offset;
528 dirtmp = direction;
529 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
530 if (ret) {
531 tce_iommu_unuse_page(container, hpa);
532 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
533 __func__, entry << tbl->it_page_shift,
534 tce, ret);
535 break;
538 if (dirtmp != DMA_NONE)
539 tce_iommu_unuse_page(container, hpa);
541 tce += IOMMU_PAGE_SIZE(tbl);
544 if (ret)
545 tce_iommu_clear(container, tbl, entry, i);
547 return ret;
550 static long tce_iommu_build_v2(struct tce_container *container,
551 struct iommu_table *tbl,
552 unsigned long entry, unsigned long tce, unsigned long pages,
553 enum dma_data_direction direction)
555 long i, ret = 0;
556 struct page *page;
557 unsigned long hpa;
558 enum dma_data_direction dirtmp;
560 for (i = 0; i < pages; ++i) {
561 struct mm_iommu_table_group_mem_t *mem = NULL;
562 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
564 ret = tce_iommu_prereg_ua_to_hpa(container,
565 tce, tbl->it_page_shift, &hpa, &mem);
566 if (ret)
567 break;
569 page = pfn_to_page(hpa >> PAGE_SHIFT);
570 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
571 ret = -EPERM;
572 break;
575 /* Preserve offset within IOMMU page */
576 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
577 dirtmp = direction;
579 /* The registered region is being unregistered */
580 if (mm_iommu_mapped_inc(mem))
581 break;
583 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
584 if (ret) {
585 /* dirtmp cannot be DMA_NONE here */
586 tce_iommu_unuse_page_v2(container, tbl, entry + i);
587 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
588 __func__, entry << tbl->it_page_shift,
589 tce, ret);
590 break;
593 if (dirtmp != DMA_NONE)
594 tce_iommu_unuse_page_v2(container, tbl, entry + i);
596 *pua = cpu_to_be64(tce);
598 tce += IOMMU_PAGE_SIZE(tbl);
601 if (ret)
602 tce_iommu_clear(container, tbl, entry, i);
604 return ret;
607 static long tce_iommu_create_table(struct tce_container *container,
608 struct iommu_table_group *table_group,
609 int num,
610 __u32 page_shift,
611 __u64 window_size,
612 __u32 levels,
613 struct iommu_table **ptbl)
615 long ret, table_size;
617 table_size = table_group->ops->get_table_size(page_shift, window_size,
618 levels);
619 if (!table_size)
620 return -EINVAL;
622 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
623 if (ret)
624 return ret;
626 ret = table_group->ops->create_table(table_group, num,
627 page_shift, window_size, levels, ptbl);
629 WARN_ON(!ret && !(*ptbl)->it_ops->free);
630 WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
632 return ret;
635 static void tce_iommu_free_table(struct tce_container *container,
636 struct iommu_table *tbl)
638 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
640 iommu_tce_table_put(tbl);
641 decrement_locked_vm(container->mm, pages);
644 static long tce_iommu_create_window(struct tce_container *container,
645 __u32 page_shift, __u64 window_size, __u32 levels,
646 __u64 *start_addr)
648 struct tce_iommu_group *tcegrp;
649 struct iommu_table_group *table_group;
650 struct iommu_table *tbl = NULL;
651 long ret, num;
653 num = tce_iommu_find_free_table(container);
654 if (num < 0)
655 return num;
657 /* Get the first group for ops::create_table */
658 tcegrp = list_first_entry(&container->group_list,
659 struct tce_iommu_group, next);
660 table_group = iommu_group_get_iommudata(tcegrp->grp);
661 if (!table_group)
662 return -EFAULT;
664 if (!(table_group->pgsizes & (1ULL << page_shift)))
665 return -EINVAL;
667 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
668 !table_group->ops->get_table_size ||
669 !table_group->ops->create_table)
670 return -EPERM;
672 /* Create TCE table */
673 ret = tce_iommu_create_table(container, table_group, num,
674 page_shift, window_size, levels, &tbl);
675 if (ret)
676 return ret;
678 BUG_ON(!tbl->it_ops->free);
681 * Program the table to every group.
682 * Groups have been tested for compatibility at the attach time.
684 list_for_each_entry(tcegrp, &container->group_list, next) {
685 table_group = iommu_group_get_iommudata(tcegrp->grp);
687 ret = table_group->ops->set_window(table_group, num, tbl);
688 if (ret)
689 goto unset_exit;
692 container->tables[num] = tbl;
694 /* Return start address assigned by platform in create_table() */
695 *start_addr = tbl->it_offset << tbl->it_page_shift;
697 return 0;
699 unset_exit:
700 list_for_each_entry(tcegrp, &container->group_list, next) {
701 table_group = iommu_group_get_iommudata(tcegrp->grp);
702 table_group->ops->unset_window(table_group, num);
704 tce_iommu_free_table(container, tbl);
706 return ret;
709 static long tce_iommu_remove_window(struct tce_container *container,
710 __u64 start_addr)
712 struct iommu_table_group *table_group = NULL;
713 struct iommu_table *tbl;
714 struct tce_iommu_group *tcegrp;
715 int num;
717 num = tce_iommu_find_table(container, start_addr, &tbl);
718 if (num < 0)
719 return -EINVAL;
721 BUG_ON(!tbl->it_size);
723 /* Detach groups from IOMMUs */
724 list_for_each_entry(tcegrp, &container->group_list, next) {
725 table_group = iommu_group_get_iommudata(tcegrp->grp);
728 * SPAPR TCE IOMMU exposes the default DMA window to
729 * the guest via dma32_window_start/size of
730 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
731 * the userspace to remove this window, some do not so
732 * here we check for the platform capability.
734 if (!table_group->ops || !table_group->ops->unset_window)
735 return -EPERM;
737 table_group->ops->unset_window(table_group, num);
740 /* Free table */
741 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
742 tce_iommu_free_table(container, tbl);
743 container->tables[num] = NULL;
745 return 0;
748 static long tce_iommu_create_default_window(struct tce_container *container)
750 long ret;
751 __u64 start_addr = 0;
752 struct tce_iommu_group *tcegrp;
753 struct iommu_table_group *table_group;
755 if (!container->def_window_pending)
756 return 0;
758 if (!tce_groups_attached(container))
759 return -ENODEV;
761 tcegrp = list_first_entry(&container->group_list,
762 struct tce_iommu_group, next);
763 table_group = iommu_group_get_iommudata(tcegrp->grp);
764 if (!table_group)
765 return -ENODEV;
767 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
768 table_group->tce32_size, 1, &start_addr);
769 WARN_ON_ONCE(!ret && start_addr);
771 if (!ret)
772 container->def_window_pending = false;
774 return ret;
777 static long tce_iommu_ioctl(void *iommu_data,
778 unsigned int cmd, unsigned long arg)
780 struct tce_container *container = iommu_data;
781 unsigned long minsz, ddwsz;
782 long ret;
784 switch (cmd) {
785 case VFIO_CHECK_EXTENSION:
786 switch (arg) {
787 case VFIO_SPAPR_TCE_IOMMU:
788 case VFIO_SPAPR_TCE_v2_IOMMU:
789 ret = 1;
790 break;
791 default:
792 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
793 break;
796 return (ret < 0) ? 0 : ret;
800 * Sanity check to prevent one userspace from manipulating
801 * another userspace mm.
803 BUG_ON(!container);
804 if (container->mm && container->mm != current->mm)
805 return -EPERM;
807 switch (cmd) {
808 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
809 struct vfio_iommu_spapr_tce_info info;
810 struct tce_iommu_group *tcegrp;
811 struct iommu_table_group *table_group;
813 if (!tce_groups_attached(container))
814 return -ENXIO;
816 tcegrp = list_first_entry(&container->group_list,
817 struct tce_iommu_group, next);
818 table_group = iommu_group_get_iommudata(tcegrp->grp);
820 if (!table_group)
821 return -ENXIO;
823 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
824 dma32_window_size);
826 if (copy_from_user(&info, (void __user *)arg, minsz))
827 return -EFAULT;
829 if (info.argsz < minsz)
830 return -EINVAL;
832 info.dma32_window_start = table_group->tce32_start;
833 info.dma32_window_size = table_group->tce32_size;
834 info.flags = 0;
835 memset(&info.ddw, 0, sizeof(info.ddw));
837 if (table_group->max_dynamic_windows_supported &&
838 container->v2) {
839 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
840 info.ddw.pgsizes = table_group->pgsizes;
841 info.ddw.max_dynamic_windows_supported =
842 table_group->max_dynamic_windows_supported;
843 info.ddw.levels = table_group->max_levels;
846 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
848 if (info.argsz >= ddwsz)
849 minsz = ddwsz;
851 if (copy_to_user((void __user *)arg, &info, minsz))
852 return -EFAULT;
854 return 0;
856 case VFIO_IOMMU_MAP_DMA: {
857 struct vfio_iommu_type1_dma_map param;
858 struct iommu_table *tbl = NULL;
859 long num;
860 enum dma_data_direction direction;
862 if (!container->enabled)
863 return -EPERM;
865 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
867 if (copy_from_user(&param, (void __user *)arg, minsz))
868 return -EFAULT;
870 if (param.argsz < minsz)
871 return -EINVAL;
873 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
874 VFIO_DMA_MAP_FLAG_WRITE))
875 return -EINVAL;
877 ret = tce_iommu_create_default_window(container);
878 if (ret)
879 return ret;
881 num = tce_iommu_find_table(container, param.iova, &tbl);
882 if (num < 0)
883 return -ENXIO;
885 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
886 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
887 return -EINVAL;
889 /* iova is checked by the IOMMU API */
890 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
891 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
892 direction = DMA_BIDIRECTIONAL;
893 else
894 direction = DMA_TO_DEVICE;
895 } else {
896 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
897 direction = DMA_FROM_DEVICE;
898 else
899 return -EINVAL;
902 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
903 if (ret)
904 return ret;
906 if (container->v2)
907 ret = tce_iommu_build_v2(container, tbl,
908 param.iova >> tbl->it_page_shift,
909 param.vaddr,
910 param.size >> tbl->it_page_shift,
911 direction);
912 else
913 ret = tce_iommu_build(container, tbl,
914 param.iova >> tbl->it_page_shift,
915 param.vaddr,
916 param.size >> tbl->it_page_shift,
917 direction);
919 iommu_flush_tce(tbl);
921 return ret;
923 case VFIO_IOMMU_UNMAP_DMA: {
924 struct vfio_iommu_type1_dma_unmap param;
925 struct iommu_table *tbl = NULL;
926 long num;
928 if (!container->enabled)
929 return -EPERM;
931 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
932 size);
934 if (copy_from_user(&param, (void __user *)arg, minsz))
935 return -EFAULT;
937 if (param.argsz < minsz)
938 return -EINVAL;
940 /* No flag is supported now */
941 if (param.flags)
942 return -EINVAL;
944 ret = tce_iommu_create_default_window(container);
945 if (ret)
946 return ret;
948 num = tce_iommu_find_table(container, param.iova, &tbl);
949 if (num < 0)
950 return -ENXIO;
952 if (param.size & ~IOMMU_PAGE_MASK(tbl))
953 return -EINVAL;
955 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
956 param.size >> tbl->it_page_shift);
957 if (ret)
958 return ret;
960 ret = tce_iommu_clear(container, tbl,
961 param.iova >> tbl->it_page_shift,
962 param.size >> tbl->it_page_shift);
963 iommu_flush_tce(tbl);
965 return ret;
967 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
968 struct vfio_iommu_spapr_register_memory param;
970 if (!container->v2)
971 break;
973 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
974 size);
976 ret = tce_iommu_mm_set(container);
977 if (ret)
978 return ret;
980 if (copy_from_user(&param, (void __user *)arg, minsz))
981 return -EFAULT;
983 if (param.argsz < minsz)
984 return -EINVAL;
986 /* No flag is supported now */
987 if (param.flags)
988 return -EINVAL;
990 mutex_lock(&container->lock);
991 ret = tce_iommu_register_pages(container, param.vaddr,
992 param.size);
993 mutex_unlock(&container->lock);
995 return ret;
997 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
998 struct vfio_iommu_spapr_register_memory param;
1000 if (!container->v2)
1001 break;
1003 if (!container->mm)
1004 return -EPERM;
1006 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1007 size);
1009 if (copy_from_user(&param, (void __user *)arg, minsz))
1010 return -EFAULT;
1012 if (param.argsz < minsz)
1013 return -EINVAL;
1015 /* No flag is supported now */
1016 if (param.flags)
1017 return -EINVAL;
1019 mutex_lock(&container->lock);
1020 ret = tce_iommu_unregister_pages(container, param.vaddr,
1021 param.size);
1022 mutex_unlock(&container->lock);
1024 return ret;
1026 case VFIO_IOMMU_ENABLE:
1027 if (container->v2)
1028 break;
1030 mutex_lock(&container->lock);
1031 ret = tce_iommu_enable(container);
1032 mutex_unlock(&container->lock);
1033 return ret;
1036 case VFIO_IOMMU_DISABLE:
1037 if (container->v2)
1038 break;
1040 mutex_lock(&container->lock);
1041 tce_iommu_disable(container);
1042 mutex_unlock(&container->lock);
1043 return 0;
1045 case VFIO_EEH_PE_OP: {
1046 struct tce_iommu_group *tcegrp;
1048 ret = 0;
1049 list_for_each_entry(tcegrp, &container->group_list, next) {
1050 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1051 cmd, arg);
1052 if (ret)
1053 return ret;
1055 return ret;
1058 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1059 struct vfio_iommu_spapr_tce_create create;
1061 if (!container->v2)
1062 break;
1064 ret = tce_iommu_mm_set(container);
1065 if (ret)
1066 return ret;
1068 if (!tce_groups_attached(container))
1069 return -ENXIO;
1071 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1072 start_addr);
1074 if (copy_from_user(&create, (void __user *)arg, minsz))
1075 return -EFAULT;
1077 if (create.argsz < minsz)
1078 return -EINVAL;
1080 if (create.flags)
1081 return -EINVAL;
1083 mutex_lock(&container->lock);
1085 ret = tce_iommu_create_default_window(container);
1086 if (!ret)
1087 ret = tce_iommu_create_window(container,
1088 create.page_shift,
1089 create.window_size, create.levels,
1090 &create.start_addr);
1092 mutex_unlock(&container->lock);
1094 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1095 ret = -EFAULT;
1097 return ret;
1099 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1100 struct vfio_iommu_spapr_tce_remove remove;
1102 if (!container->v2)
1103 break;
1105 ret = tce_iommu_mm_set(container);
1106 if (ret)
1107 return ret;
1109 if (!tce_groups_attached(container))
1110 return -ENXIO;
1112 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1113 start_addr);
1115 if (copy_from_user(&remove, (void __user *)arg, minsz))
1116 return -EFAULT;
1118 if (remove.argsz < minsz)
1119 return -EINVAL;
1121 if (remove.flags)
1122 return -EINVAL;
1124 if (container->def_window_pending && !remove.start_addr) {
1125 container->def_window_pending = false;
1126 return 0;
1129 mutex_lock(&container->lock);
1131 ret = tce_iommu_remove_window(container, remove.start_addr);
1133 mutex_unlock(&container->lock);
1135 return ret;
1139 return -ENOTTY;
1142 static void tce_iommu_release_ownership(struct tce_container *container,
1143 struct iommu_table_group *table_group)
1145 int i;
1147 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1148 struct iommu_table *tbl = container->tables[i];
1150 if (!tbl)
1151 continue;
1153 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1154 if (tbl->it_map)
1155 iommu_release_ownership(tbl);
1157 container->tables[i] = NULL;
1161 static int tce_iommu_take_ownership(struct tce_container *container,
1162 struct iommu_table_group *table_group)
1164 int i, j, rc = 0;
1166 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1167 struct iommu_table *tbl = table_group->tables[i];
1169 if (!tbl || !tbl->it_map)
1170 continue;
1172 rc = iommu_take_ownership(tbl);
1173 if (rc) {
1174 for (j = 0; j < i; ++j)
1175 iommu_release_ownership(
1176 table_group->tables[j]);
1178 return rc;
1182 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1183 container->tables[i] = table_group->tables[i];
1185 return 0;
1188 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1189 struct iommu_table_group *table_group)
1191 long i;
1193 if (!table_group->ops->unset_window) {
1194 WARN_ON_ONCE(1);
1195 return;
1198 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1199 table_group->ops->unset_window(table_group, i);
1201 table_group->ops->release_ownership(table_group);
1204 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1205 struct iommu_table_group *table_group)
1207 long i, ret = 0;
1209 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1210 !table_group->ops->release_ownership) {
1211 WARN_ON_ONCE(1);
1212 return -EFAULT;
1215 table_group->ops->take_ownership(table_group);
1217 /* Set all windows to the new group */
1218 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1219 struct iommu_table *tbl = container->tables[i];
1221 if (!tbl)
1222 continue;
1224 ret = table_group->ops->set_window(table_group, i, tbl);
1225 if (ret)
1226 goto release_exit;
1229 return 0;
1231 release_exit:
1232 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1233 table_group->ops->unset_window(table_group, i);
1235 table_group->ops->release_ownership(table_group);
1237 return ret;
1240 static int tce_iommu_attach_group(void *iommu_data,
1241 struct iommu_group *iommu_group)
1243 int ret;
1244 struct tce_container *container = iommu_data;
1245 struct iommu_table_group *table_group;
1246 struct tce_iommu_group *tcegrp = NULL;
1248 mutex_lock(&container->lock);
1250 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1251 iommu_group_id(iommu_group), iommu_group); */
1252 table_group = iommu_group_get_iommudata(iommu_group);
1253 if (!table_group) {
1254 ret = -ENODEV;
1255 goto unlock_exit;
1258 if (tce_groups_attached(container) && (!table_group->ops ||
1259 !table_group->ops->take_ownership ||
1260 !table_group->ops->release_ownership)) {
1261 ret = -EBUSY;
1262 goto unlock_exit;
1265 /* Check if new group has the same iommu_ops (i.e. compatible) */
1266 list_for_each_entry(tcegrp, &container->group_list, next) {
1267 struct iommu_table_group *table_group_tmp;
1269 if (tcegrp->grp == iommu_group) {
1270 pr_warn("tce_vfio: Group %d is already attached\n",
1271 iommu_group_id(iommu_group));
1272 ret = -EBUSY;
1273 goto unlock_exit;
1275 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1276 if (table_group_tmp->ops->create_table !=
1277 table_group->ops->create_table) {
1278 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1279 iommu_group_id(iommu_group),
1280 iommu_group_id(tcegrp->grp));
1281 ret = -EPERM;
1282 goto unlock_exit;
1286 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1287 if (!tcegrp) {
1288 ret = -ENOMEM;
1289 goto unlock_exit;
1292 if (!table_group->ops || !table_group->ops->take_ownership ||
1293 !table_group->ops->release_ownership) {
1294 if (container->v2) {
1295 ret = -EPERM;
1296 goto unlock_exit;
1298 ret = tce_iommu_take_ownership(container, table_group);
1299 } else {
1300 if (!container->v2) {
1301 ret = -EPERM;
1302 goto unlock_exit;
1304 ret = tce_iommu_take_ownership_ddw(container, table_group);
1305 if (!tce_groups_attached(container) && !container->tables[0])
1306 container->def_window_pending = true;
1309 if (!ret) {
1310 tcegrp->grp = iommu_group;
1311 list_add(&tcegrp->next, &container->group_list);
1314 unlock_exit:
1315 if (ret && tcegrp)
1316 kfree(tcegrp);
1318 mutex_unlock(&container->lock);
1320 return ret;
1323 static void tce_iommu_detach_group(void *iommu_data,
1324 struct iommu_group *iommu_group)
1326 struct tce_container *container = iommu_data;
1327 struct iommu_table_group *table_group;
1328 bool found = false;
1329 struct tce_iommu_group *tcegrp;
1331 mutex_lock(&container->lock);
1333 list_for_each_entry(tcegrp, &container->group_list, next) {
1334 if (tcegrp->grp == iommu_group) {
1335 found = true;
1336 break;
1340 if (!found) {
1341 pr_warn("tce_vfio: detaching unattached group #%u\n",
1342 iommu_group_id(iommu_group));
1343 goto unlock_exit;
1346 list_del(&tcegrp->next);
1347 kfree(tcegrp);
1349 table_group = iommu_group_get_iommudata(iommu_group);
1350 BUG_ON(!table_group);
1352 if (!table_group->ops || !table_group->ops->release_ownership)
1353 tce_iommu_release_ownership(container, table_group);
1354 else
1355 tce_iommu_release_ownership_ddw(container, table_group);
1357 unlock_exit:
1358 mutex_unlock(&container->lock);
1361 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1362 .name = "iommu-vfio-powerpc",
1363 .owner = THIS_MODULE,
1364 .open = tce_iommu_open,
1365 .release = tce_iommu_release,
1366 .ioctl = tce_iommu_ioctl,
1367 .attach_group = tce_iommu_attach_group,
1368 .detach_group = tce_iommu_detach_group,
1371 static int __init tce_iommu_init(void)
1373 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1376 static void __exit tce_iommu_cleanup(void)
1378 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1381 module_init(tce_iommu_init);
1382 module_exit(tce_iommu_cleanup);
1384 MODULE_VERSION(DRIVER_VERSION);
1385 MODULE_LICENSE("GPL v2");
1386 MODULE_AUTHOR(DRIVER_AUTHOR);
1387 MODULE_DESCRIPTION(DRIVER_DESC);