spi: sprd: adi: Add a reset reason for watchdog mode
[linux/fpc-iii.git] / drivers / vfio / vfio_iommu_spapr_tce.c
blob8ce9ad21129f08d2e49820d76627684bd54e743e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO: IOMMU DMA mapping support for TCE on POWER
5 * Copyright (C) 2013 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
8 * Derived from original vfio_iommu_type1.c:
9 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
10 * Author: Alex Williamson <alex.williamson@redhat.com>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/err.h>
18 #include <linux/vfio.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/signal.h>
22 #include <linux/mm.h>
24 #include <asm/iommu.h>
25 #include <asm/tce.h>
26 #include <asm/mmu_context.h>
28 #define DRIVER_VERSION "0.1"
29 #define DRIVER_AUTHOR "aik@ozlabs.ru"
30 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
32 static void tce_iommu_detach_group(void *iommu_data,
33 struct iommu_group *iommu_group);
36 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
38 * This code handles mapping and unmapping of user data buffers
39 * into DMA'ble space using the IOMMU
42 struct tce_iommu_group {
43 struct list_head next;
44 struct iommu_group *grp;
48 * A container needs to remember which preregistered region it has
49 * referenced to do proper cleanup at the userspace process exit.
51 struct tce_iommu_prereg {
52 struct list_head next;
53 struct mm_iommu_table_group_mem_t *mem;
57 * The container descriptor supports only a single group per container.
58 * Required by the API as the container is not supplied with the IOMMU group
59 * at the moment of initialization.
61 struct tce_container {
62 struct mutex lock;
63 bool enabled;
64 bool v2;
65 bool def_window_pending;
66 unsigned long locked_pages;
67 struct mm_struct *mm;
68 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
69 struct list_head group_list;
70 struct list_head prereg_list;
73 static long tce_iommu_mm_set(struct tce_container *container)
75 if (container->mm) {
76 if (container->mm == current->mm)
77 return 0;
78 return -EPERM;
80 BUG_ON(!current->mm);
81 container->mm = current->mm;
82 atomic_inc(&container->mm->mm_count);
84 return 0;
87 static long tce_iommu_prereg_free(struct tce_container *container,
88 struct tce_iommu_prereg *tcemem)
90 long ret;
92 ret = mm_iommu_put(container->mm, tcemem->mem);
93 if (ret)
94 return ret;
96 list_del(&tcemem->next);
97 kfree(tcemem);
99 return 0;
102 static long tce_iommu_unregister_pages(struct tce_container *container,
103 __u64 vaddr, __u64 size)
105 struct mm_iommu_table_group_mem_t *mem;
106 struct tce_iommu_prereg *tcemem;
107 bool found = false;
108 long ret;
110 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
111 return -EINVAL;
113 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
114 if (!mem)
115 return -ENOENT;
117 list_for_each_entry(tcemem, &container->prereg_list, next) {
118 if (tcemem->mem == mem) {
119 found = true;
120 break;
124 if (!found)
125 ret = -ENOENT;
126 else
127 ret = tce_iommu_prereg_free(container, tcemem);
129 mm_iommu_put(container->mm, mem);
131 return ret;
134 static long tce_iommu_register_pages(struct tce_container *container,
135 __u64 vaddr, __u64 size)
137 long ret = 0;
138 struct mm_iommu_table_group_mem_t *mem = NULL;
139 struct tce_iommu_prereg *tcemem;
140 unsigned long entries = size >> PAGE_SHIFT;
142 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
143 ((vaddr + size) < vaddr))
144 return -EINVAL;
146 mem = mm_iommu_get(container->mm, vaddr, entries);
147 if (mem) {
148 list_for_each_entry(tcemem, &container->prereg_list, next) {
149 if (tcemem->mem == mem) {
150 ret = -EBUSY;
151 goto put_exit;
154 } else {
155 ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
156 if (ret)
157 return ret;
160 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
161 if (!tcemem) {
162 ret = -ENOMEM;
163 goto put_exit;
166 tcemem->mem = mem;
167 list_add(&tcemem->next, &container->prereg_list);
169 container->enabled = true;
171 return 0;
173 put_exit:
174 mm_iommu_put(container->mm, mem);
175 return ret;
178 static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
179 unsigned int page_shift)
181 struct page *page;
182 unsigned long size = 0;
184 if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
185 return size == (1UL << page_shift);
187 page = pfn_to_page(hpa >> PAGE_SHIFT);
189 * Check that the TCE table granularity is not bigger than the size of
190 * a page we just found. Otherwise the hardware can get access to
191 * a bigger memory chunk that it should.
193 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
196 static inline bool tce_groups_attached(struct tce_container *container)
198 return !list_empty(&container->group_list);
201 static long tce_iommu_find_table(struct tce_container *container,
202 phys_addr_t ioba, struct iommu_table **ptbl)
204 long i;
206 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
207 struct iommu_table *tbl = container->tables[i];
209 if (tbl) {
210 unsigned long entry = ioba >> tbl->it_page_shift;
211 unsigned long start = tbl->it_offset;
212 unsigned long end = start + tbl->it_size;
214 if ((start <= entry) && (entry < end)) {
215 *ptbl = tbl;
216 return i;
221 return -1;
224 static int tce_iommu_find_free_table(struct tce_container *container)
226 int i;
228 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
229 if (!container->tables[i])
230 return i;
233 return -ENOSPC;
236 static int tce_iommu_enable(struct tce_container *container)
238 int ret = 0;
239 unsigned long locked;
240 struct iommu_table_group *table_group;
241 struct tce_iommu_group *tcegrp;
243 if (container->enabled)
244 return -EBUSY;
247 * When userspace pages are mapped into the IOMMU, they are effectively
248 * locked memory, so, theoretically, we need to update the accounting
249 * of locked pages on each map and unmap. For powerpc, the map unmap
250 * paths can be very hot, though, and the accounting would kill
251 * performance, especially since it would be difficult to impossible
252 * to handle the accounting in real mode only.
254 * To address that, rather than precisely accounting every page, we
255 * instead account for a worst case on locked memory when the iommu is
256 * enabled and disabled. The worst case upper bound on locked memory
257 * is the size of the whole iommu window, which is usually relatively
258 * small (compared to total memory sizes) on POWER hardware.
260 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
261 * that would effectively kill the guest at random points, much better
262 * enforcing the limit based on the max that the guest can map.
264 * Unfortunately at the moment it counts whole tables, no matter how
265 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
266 * each with 2GB DMA window, 8GB will be counted here. The reason for
267 * this is that we cannot tell here the amount of RAM used by the guest
268 * as this information is only available from KVM and VFIO is
269 * KVM agnostic.
271 * So we do not allow enabling a container without a group attached
272 * as there is no way to know how much we should increment
273 * the locked_vm counter.
275 if (!tce_groups_attached(container))
276 return -ENODEV;
278 tcegrp = list_first_entry(&container->group_list,
279 struct tce_iommu_group, next);
280 table_group = iommu_group_get_iommudata(tcegrp->grp);
281 if (!table_group)
282 return -ENODEV;
284 if (!table_group->tce32_size)
285 return -EPERM;
287 ret = tce_iommu_mm_set(container);
288 if (ret)
289 return ret;
291 locked = table_group->tce32_size >> PAGE_SHIFT;
292 ret = account_locked_vm(container->mm, locked, true);
293 if (ret)
294 return ret;
296 container->locked_pages = locked;
298 container->enabled = true;
300 return ret;
303 static void tce_iommu_disable(struct tce_container *container)
305 if (!container->enabled)
306 return;
308 container->enabled = false;
310 BUG_ON(!container->mm);
311 account_locked_vm(container->mm, container->locked_pages, false);
314 static void *tce_iommu_open(unsigned long arg)
316 struct tce_container *container;
318 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
319 pr_err("tce_vfio: Wrong IOMMU type\n");
320 return ERR_PTR(-EINVAL);
323 container = kzalloc(sizeof(*container), GFP_KERNEL);
324 if (!container)
325 return ERR_PTR(-ENOMEM);
327 mutex_init(&container->lock);
328 INIT_LIST_HEAD_RCU(&container->group_list);
329 INIT_LIST_HEAD_RCU(&container->prereg_list);
331 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
333 return container;
336 static int tce_iommu_clear(struct tce_container *container,
337 struct iommu_table *tbl,
338 unsigned long entry, unsigned long pages);
339 static void tce_iommu_free_table(struct tce_container *container,
340 struct iommu_table *tbl);
342 static void tce_iommu_release(void *iommu_data)
344 struct tce_container *container = iommu_data;
345 struct tce_iommu_group *tcegrp;
346 struct tce_iommu_prereg *tcemem, *tmtmp;
347 long i;
349 while (tce_groups_attached(container)) {
350 tcegrp = list_first_entry(&container->group_list,
351 struct tce_iommu_group, next);
352 tce_iommu_detach_group(iommu_data, tcegrp->grp);
356 * If VFIO created a table, it was not disposed
357 * by tce_iommu_detach_group() so do it now.
359 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
360 struct iommu_table *tbl = container->tables[i];
362 if (!tbl)
363 continue;
365 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
366 tce_iommu_free_table(container, tbl);
369 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
370 WARN_ON(tce_iommu_prereg_free(container, tcemem));
372 tce_iommu_disable(container);
373 if (container->mm)
374 mmdrop(container->mm);
375 mutex_destroy(&container->lock);
377 kfree(container);
380 static void tce_iommu_unuse_page(struct tce_container *container,
381 unsigned long hpa)
383 struct page *page;
385 page = pfn_to_page(hpa >> PAGE_SHIFT);
386 put_page(page);
389 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
390 unsigned long tce, unsigned long shift,
391 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
393 long ret = 0;
394 struct mm_iommu_table_group_mem_t *mem;
396 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
397 if (!mem)
398 return -EINVAL;
400 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
401 if (ret)
402 return -EINVAL;
404 *pmem = mem;
406 return 0;
409 static void tce_iommu_unuse_page_v2(struct tce_container *container,
410 struct iommu_table *tbl, unsigned long entry)
412 struct mm_iommu_table_group_mem_t *mem = NULL;
413 int ret;
414 unsigned long hpa = 0;
415 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
417 if (!pua)
418 return;
420 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
421 tbl->it_page_shift, &hpa, &mem);
422 if (ret)
423 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
424 __func__, be64_to_cpu(*pua), entry, ret);
425 if (mem)
426 mm_iommu_mapped_dec(mem);
428 *pua = cpu_to_be64(0);
431 static int tce_iommu_clear(struct tce_container *container,
432 struct iommu_table *tbl,
433 unsigned long entry, unsigned long pages)
435 unsigned long oldhpa;
436 long ret;
437 enum dma_data_direction direction;
438 unsigned long lastentry = entry + pages;
440 for ( ; entry < lastentry; ++entry) {
441 if (tbl->it_indirect_levels && tbl->it_userspace) {
443 * For multilevel tables, we can take a shortcut here
444 * and skip some TCEs as we know that the userspace
445 * addresses cache is a mirror of the real TCE table
446 * and if it is missing some indirect levels, then
447 * the hardware table does not have them allocated
448 * either and therefore does not require updating.
450 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
451 entry);
452 if (!pua) {
453 /* align to level_size which is power of two */
454 entry |= tbl->it_level_size - 1;
455 continue;
459 cond_resched();
461 direction = DMA_NONE;
462 oldhpa = 0;
463 ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
464 &direction);
465 if (ret)
466 continue;
468 if (direction == DMA_NONE)
469 continue;
471 if (container->v2) {
472 tce_iommu_unuse_page_v2(container, tbl, entry);
473 continue;
476 tce_iommu_unuse_page(container, oldhpa);
479 return 0;
482 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
484 struct page *page = NULL;
485 enum dma_data_direction direction = iommu_tce_direction(tce);
487 if (get_user_pages_fast(tce & PAGE_MASK, 1,
488 direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
489 &page) != 1)
490 return -EFAULT;
492 *hpa = __pa((unsigned long) page_address(page));
494 return 0;
497 static long tce_iommu_build(struct tce_container *container,
498 struct iommu_table *tbl,
499 unsigned long entry, unsigned long tce, unsigned long pages,
500 enum dma_data_direction direction)
502 long i, ret = 0;
503 unsigned long hpa;
504 enum dma_data_direction dirtmp;
506 for (i = 0; i < pages; ++i) {
507 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
509 ret = tce_iommu_use_page(tce, &hpa);
510 if (ret)
511 break;
513 if (!tce_page_is_contained(container->mm, hpa,
514 tbl->it_page_shift)) {
515 ret = -EPERM;
516 break;
519 hpa |= offset;
520 dirtmp = direction;
521 ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
522 &dirtmp);
523 if (ret) {
524 tce_iommu_unuse_page(container, hpa);
525 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
526 __func__, entry << tbl->it_page_shift,
527 tce, ret);
528 break;
531 if (dirtmp != DMA_NONE)
532 tce_iommu_unuse_page(container, hpa);
534 tce += IOMMU_PAGE_SIZE(tbl);
537 if (ret)
538 tce_iommu_clear(container, tbl, entry, i);
540 return ret;
543 static long tce_iommu_build_v2(struct tce_container *container,
544 struct iommu_table *tbl,
545 unsigned long entry, unsigned long tce, unsigned long pages,
546 enum dma_data_direction direction)
548 long i, ret = 0;
549 unsigned long hpa;
550 enum dma_data_direction dirtmp;
552 for (i = 0; i < pages; ++i) {
553 struct mm_iommu_table_group_mem_t *mem = NULL;
554 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
556 ret = tce_iommu_prereg_ua_to_hpa(container,
557 tce, tbl->it_page_shift, &hpa, &mem);
558 if (ret)
559 break;
561 if (!tce_page_is_contained(container->mm, hpa,
562 tbl->it_page_shift)) {
563 ret = -EPERM;
564 break;
567 /* Preserve offset within IOMMU page */
568 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
569 dirtmp = direction;
571 /* The registered region is being unregistered */
572 if (mm_iommu_mapped_inc(mem))
573 break;
575 ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
576 &dirtmp);
577 if (ret) {
578 /* dirtmp cannot be DMA_NONE here */
579 tce_iommu_unuse_page_v2(container, tbl, entry + i);
580 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
581 __func__, entry << tbl->it_page_shift,
582 tce, ret);
583 break;
586 if (dirtmp != DMA_NONE)
587 tce_iommu_unuse_page_v2(container, tbl, entry + i);
589 *pua = cpu_to_be64(tce);
591 tce += IOMMU_PAGE_SIZE(tbl);
594 if (ret)
595 tce_iommu_clear(container, tbl, entry, i);
597 return ret;
600 static long tce_iommu_create_table(struct tce_container *container,
601 struct iommu_table_group *table_group,
602 int num,
603 __u32 page_shift,
604 __u64 window_size,
605 __u32 levels,
606 struct iommu_table **ptbl)
608 long ret, table_size;
610 table_size = table_group->ops->get_table_size(page_shift, window_size,
611 levels);
612 if (!table_size)
613 return -EINVAL;
615 ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
616 if (ret)
617 return ret;
619 ret = table_group->ops->create_table(table_group, num,
620 page_shift, window_size, levels, ptbl);
622 WARN_ON(!ret && !(*ptbl)->it_ops->free);
623 WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
625 return ret;
628 static void tce_iommu_free_table(struct tce_container *container,
629 struct iommu_table *tbl)
631 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
633 iommu_tce_table_put(tbl);
634 account_locked_vm(container->mm, pages, false);
637 static long tce_iommu_create_window(struct tce_container *container,
638 __u32 page_shift, __u64 window_size, __u32 levels,
639 __u64 *start_addr)
641 struct tce_iommu_group *tcegrp;
642 struct iommu_table_group *table_group;
643 struct iommu_table *tbl = NULL;
644 long ret, num;
646 num = tce_iommu_find_free_table(container);
647 if (num < 0)
648 return num;
650 /* Get the first group for ops::create_table */
651 tcegrp = list_first_entry(&container->group_list,
652 struct tce_iommu_group, next);
653 table_group = iommu_group_get_iommudata(tcegrp->grp);
654 if (!table_group)
655 return -EFAULT;
657 if (!(table_group->pgsizes & (1ULL << page_shift)))
658 return -EINVAL;
660 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
661 !table_group->ops->get_table_size ||
662 !table_group->ops->create_table)
663 return -EPERM;
665 /* Create TCE table */
666 ret = tce_iommu_create_table(container, table_group, num,
667 page_shift, window_size, levels, &tbl);
668 if (ret)
669 return ret;
671 BUG_ON(!tbl->it_ops->free);
674 * Program the table to every group.
675 * Groups have been tested for compatibility at the attach time.
677 list_for_each_entry(tcegrp, &container->group_list, next) {
678 table_group = iommu_group_get_iommudata(tcegrp->grp);
680 ret = table_group->ops->set_window(table_group, num, tbl);
681 if (ret)
682 goto unset_exit;
685 container->tables[num] = tbl;
687 /* Return start address assigned by platform in create_table() */
688 *start_addr = tbl->it_offset << tbl->it_page_shift;
690 return 0;
692 unset_exit:
693 list_for_each_entry(tcegrp, &container->group_list, next) {
694 table_group = iommu_group_get_iommudata(tcegrp->grp);
695 table_group->ops->unset_window(table_group, num);
697 tce_iommu_free_table(container, tbl);
699 return ret;
702 static long tce_iommu_remove_window(struct tce_container *container,
703 __u64 start_addr)
705 struct iommu_table_group *table_group = NULL;
706 struct iommu_table *tbl;
707 struct tce_iommu_group *tcegrp;
708 int num;
710 num = tce_iommu_find_table(container, start_addr, &tbl);
711 if (num < 0)
712 return -EINVAL;
714 BUG_ON(!tbl->it_size);
716 /* Detach groups from IOMMUs */
717 list_for_each_entry(tcegrp, &container->group_list, next) {
718 table_group = iommu_group_get_iommudata(tcegrp->grp);
721 * SPAPR TCE IOMMU exposes the default DMA window to
722 * the guest via dma32_window_start/size of
723 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
724 * the userspace to remove this window, some do not so
725 * here we check for the platform capability.
727 if (!table_group->ops || !table_group->ops->unset_window)
728 return -EPERM;
730 table_group->ops->unset_window(table_group, num);
733 /* Free table */
734 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
735 tce_iommu_free_table(container, tbl);
736 container->tables[num] = NULL;
738 return 0;
741 static long tce_iommu_create_default_window(struct tce_container *container)
743 long ret;
744 __u64 start_addr = 0;
745 struct tce_iommu_group *tcegrp;
746 struct iommu_table_group *table_group;
748 if (!container->def_window_pending)
749 return 0;
751 if (!tce_groups_attached(container))
752 return -ENODEV;
754 tcegrp = list_first_entry(&container->group_list,
755 struct tce_iommu_group, next);
756 table_group = iommu_group_get_iommudata(tcegrp->grp);
757 if (!table_group)
758 return -ENODEV;
760 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
761 table_group->tce32_size, 1, &start_addr);
762 WARN_ON_ONCE(!ret && start_addr);
764 if (!ret)
765 container->def_window_pending = false;
767 return ret;
770 static long tce_iommu_ioctl(void *iommu_data,
771 unsigned int cmd, unsigned long arg)
773 struct tce_container *container = iommu_data;
774 unsigned long minsz, ddwsz;
775 long ret;
777 switch (cmd) {
778 case VFIO_CHECK_EXTENSION:
779 switch (arg) {
780 case VFIO_SPAPR_TCE_IOMMU:
781 case VFIO_SPAPR_TCE_v2_IOMMU:
782 ret = 1;
783 break;
784 default:
785 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
786 break;
789 return (ret < 0) ? 0 : ret;
793 * Sanity check to prevent one userspace from manipulating
794 * another userspace mm.
796 BUG_ON(!container);
797 if (container->mm && container->mm != current->mm)
798 return -EPERM;
800 switch (cmd) {
801 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
802 struct vfio_iommu_spapr_tce_info info;
803 struct tce_iommu_group *tcegrp;
804 struct iommu_table_group *table_group;
806 if (!tce_groups_attached(container))
807 return -ENXIO;
809 tcegrp = list_first_entry(&container->group_list,
810 struct tce_iommu_group, next);
811 table_group = iommu_group_get_iommudata(tcegrp->grp);
813 if (!table_group)
814 return -ENXIO;
816 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
817 dma32_window_size);
819 if (copy_from_user(&info, (void __user *)arg, minsz))
820 return -EFAULT;
822 if (info.argsz < minsz)
823 return -EINVAL;
825 info.dma32_window_start = table_group->tce32_start;
826 info.dma32_window_size = table_group->tce32_size;
827 info.flags = 0;
828 memset(&info.ddw, 0, sizeof(info.ddw));
830 if (table_group->max_dynamic_windows_supported &&
831 container->v2) {
832 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
833 info.ddw.pgsizes = table_group->pgsizes;
834 info.ddw.max_dynamic_windows_supported =
835 table_group->max_dynamic_windows_supported;
836 info.ddw.levels = table_group->max_levels;
839 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
841 if (info.argsz >= ddwsz)
842 minsz = ddwsz;
844 if (copy_to_user((void __user *)arg, &info, minsz))
845 return -EFAULT;
847 return 0;
849 case VFIO_IOMMU_MAP_DMA: {
850 struct vfio_iommu_type1_dma_map param;
851 struct iommu_table *tbl = NULL;
852 long num;
853 enum dma_data_direction direction;
855 if (!container->enabled)
856 return -EPERM;
858 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
860 if (copy_from_user(&param, (void __user *)arg, minsz))
861 return -EFAULT;
863 if (param.argsz < minsz)
864 return -EINVAL;
866 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
867 VFIO_DMA_MAP_FLAG_WRITE))
868 return -EINVAL;
870 ret = tce_iommu_create_default_window(container);
871 if (ret)
872 return ret;
874 num = tce_iommu_find_table(container, param.iova, &tbl);
875 if (num < 0)
876 return -ENXIO;
878 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
879 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
880 return -EINVAL;
882 /* iova is checked by the IOMMU API */
883 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
884 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
885 direction = DMA_BIDIRECTIONAL;
886 else
887 direction = DMA_TO_DEVICE;
888 } else {
889 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
890 direction = DMA_FROM_DEVICE;
891 else
892 return -EINVAL;
895 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
896 if (ret)
897 return ret;
899 if (container->v2)
900 ret = tce_iommu_build_v2(container, tbl,
901 param.iova >> tbl->it_page_shift,
902 param.vaddr,
903 param.size >> tbl->it_page_shift,
904 direction);
905 else
906 ret = tce_iommu_build(container, tbl,
907 param.iova >> tbl->it_page_shift,
908 param.vaddr,
909 param.size >> tbl->it_page_shift,
910 direction);
912 iommu_flush_tce(tbl);
914 return ret;
916 case VFIO_IOMMU_UNMAP_DMA: {
917 struct vfio_iommu_type1_dma_unmap param;
918 struct iommu_table *tbl = NULL;
919 long num;
921 if (!container->enabled)
922 return -EPERM;
924 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
925 size);
927 if (copy_from_user(&param, (void __user *)arg, minsz))
928 return -EFAULT;
930 if (param.argsz < minsz)
931 return -EINVAL;
933 /* No flag is supported now */
934 if (param.flags)
935 return -EINVAL;
937 ret = tce_iommu_create_default_window(container);
938 if (ret)
939 return ret;
941 num = tce_iommu_find_table(container, param.iova, &tbl);
942 if (num < 0)
943 return -ENXIO;
945 if (param.size & ~IOMMU_PAGE_MASK(tbl))
946 return -EINVAL;
948 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
949 param.size >> tbl->it_page_shift);
950 if (ret)
951 return ret;
953 ret = tce_iommu_clear(container, tbl,
954 param.iova >> tbl->it_page_shift,
955 param.size >> tbl->it_page_shift);
956 iommu_flush_tce(tbl);
958 return ret;
960 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
961 struct vfio_iommu_spapr_register_memory param;
963 if (!container->v2)
964 break;
966 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
967 size);
969 ret = tce_iommu_mm_set(container);
970 if (ret)
971 return ret;
973 if (copy_from_user(&param, (void __user *)arg, minsz))
974 return -EFAULT;
976 if (param.argsz < minsz)
977 return -EINVAL;
979 /* No flag is supported now */
980 if (param.flags)
981 return -EINVAL;
983 mutex_lock(&container->lock);
984 ret = tce_iommu_register_pages(container, param.vaddr,
985 param.size);
986 mutex_unlock(&container->lock);
988 return ret;
990 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
991 struct vfio_iommu_spapr_register_memory param;
993 if (!container->v2)
994 break;
996 if (!container->mm)
997 return -EPERM;
999 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1000 size);
1002 if (copy_from_user(&param, (void __user *)arg, minsz))
1003 return -EFAULT;
1005 if (param.argsz < minsz)
1006 return -EINVAL;
1008 /* No flag is supported now */
1009 if (param.flags)
1010 return -EINVAL;
1012 mutex_lock(&container->lock);
1013 ret = tce_iommu_unregister_pages(container, param.vaddr,
1014 param.size);
1015 mutex_unlock(&container->lock);
1017 return ret;
1019 case VFIO_IOMMU_ENABLE:
1020 if (container->v2)
1021 break;
1023 mutex_lock(&container->lock);
1024 ret = tce_iommu_enable(container);
1025 mutex_unlock(&container->lock);
1026 return ret;
1029 case VFIO_IOMMU_DISABLE:
1030 if (container->v2)
1031 break;
1033 mutex_lock(&container->lock);
1034 tce_iommu_disable(container);
1035 mutex_unlock(&container->lock);
1036 return 0;
1038 case VFIO_EEH_PE_OP: {
1039 struct tce_iommu_group *tcegrp;
1041 ret = 0;
1042 list_for_each_entry(tcegrp, &container->group_list, next) {
1043 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1044 cmd, arg);
1045 if (ret)
1046 return ret;
1048 return ret;
1051 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1052 struct vfio_iommu_spapr_tce_create create;
1054 if (!container->v2)
1055 break;
1057 ret = tce_iommu_mm_set(container);
1058 if (ret)
1059 return ret;
1061 if (!tce_groups_attached(container))
1062 return -ENXIO;
1064 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1065 start_addr);
1067 if (copy_from_user(&create, (void __user *)arg, minsz))
1068 return -EFAULT;
1070 if (create.argsz < minsz)
1071 return -EINVAL;
1073 if (create.flags)
1074 return -EINVAL;
1076 mutex_lock(&container->lock);
1078 ret = tce_iommu_create_default_window(container);
1079 if (!ret)
1080 ret = tce_iommu_create_window(container,
1081 create.page_shift,
1082 create.window_size, create.levels,
1083 &create.start_addr);
1085 mutex_unlock(&container->lock);
1087 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1088 ret = -EFAULT;
1090 return ret;
1092 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1093 struct vfio_iommu_spapr_tce_remove remove;
1095 if (!container->v2)
1096 break;
1098 ret = tce_iommu_mm_set(container);
1099 if (ret)
1100 return ret;
1102 if (!tce_groups_attached(container))
1103 return -ENXIO;
1105 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1106 start_addr);
1108 if (copy_from_user(&remove, (void __user *)arg, minsz))
1109 return -EFAULT;
1111 if (remove.argsz < minsz)
1112 return -EINVAL;
1114 if (remove.flags)
1115 return -EINVAL;
1117 if (container->def_window_pending && !remove.start_addr) {
1118 container->def_window_pending = false;
1119 return 0;
1122 mutex_lock(&container->lock);
1124 ret = tce_iommu_remove_window(container, remove.start_addr);
1126 mutex_unlock(&container->lock);
1128 return ret;
1132 return -ENOTTY;
1135 static void tce_iommu_release_ownership(struct tce_container *container,
1136 struct iommu_table_group *table_group)
1138 int i;
1140 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1141 struct iommu_table *tbl = container->tables[i];
1143 if (!tbl)
1144 continue;
1146 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1147 if (tbl->it_map)
1148 iommu_release_ownership(tbl);
1150 container->tables[i] = NULL;
1154 static int tce_iommu_take_ownership(struct tce_container *container,
1155 struct iommu_table_group *table_group)
1157 int i, j, rc = 0;
1159 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1160 struct iommu_table *tbl = table_group->tables[i];
1162 if (!tbl || !tbl->it_map)
1163 continue;
1165 rc = iommu_take_ownership(tbl);
1166 if (rc) {
1167 for (j = 0; j < i; ++j)
1168 iommu_release_ownership(
1169 table_group->tables[j]);
1171 return rc;
1175 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1176 container->tables[i] = table_group->tables[i];
1178 return 0;
1181 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1182 struct iommu_table_group *table_group)
1184 long i;
1186 if (!table_group->ops->unset_window) {
1187 WARN_ON_ONCE(1);
1188 return;
1191 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1192 if (container->tables[i])
1193 table_group->ops->unset_window(table_group, i);
1195 table_group->ops->release_ownership(table_group);
1198 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1199 struct iommu_table_group *table_group)
1201 long i, ret = 0;
1203 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1204 !table_group->ops->release_ownership) {
1205 WARN_ON_ONCE(1);
1206 return -EFAULT;
1209 table_group->ops->take_ownership(table_group);
1211 /* Set all windows to the new group */
1212 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1213 struct iommu_table *tbl = container->tables[i];
1215 if (!tbl)
1216 continue;
1218 ret = table_group->ops->set_window(table_group, i, tbl);
1219 if (ret)
1220 goto release_exit;
1223 return 0;
1225 release_exit:
1226 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1227 table_group->ops->unset_window(table_group, i);
1229 table_group->ops->release_ownership(table_group);
1231 return ret;
1234 static int tce_iommu_attach_group(void *iommu_data,
1235 struct iommu_group *iommu_group)
1237 int ret;
1238 struct tce_container *container = iommu_data;
1239 struct iommu_table_group *table_group;
1240 struct tce_iommu_group *tcegrp = NULL;
1242 mutex_lock(&container->lock);
1244 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1245 iommu_group_id(iommu_group), iommu_group); */
1246 table_group = iommu_group_get_iommudata(iommu_group);
1247 if (!table_group) {
1248 ret = -ENODEV;
1249 goto unlock_exit;
1252 if (tce_groups_attached(container) && (!table_group->ops ||
1253 !table_group->ops->take_ownership ||
1254 !table_group->ops->release_ownership)) {
1255 ret = -EBUSY;
1256 goto unlock_exit;
1259 /* Check if new group has the same iommu_ops (i.e. compatible) */
1260 list_for_each_entry(tcegrp, &container->group_list, next) {
1261 struct iommu_table_group *table_group_tmp;
1263 if (tcegrp->grp == iommu_group) {
1264 pr_warn("tce_vfio: Group %d is already attached\n",
1265 iommu_group_id(iommu_group));
1266 ret = -EBUSY;
1267 goto unlock_exit;
1269 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1270 if (table_group_tmp->ops->create_table !=
1271 table_group->ops->create_table) {
1272 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1273 iommu_group_id(iommu_group),
1274 iommu_group_id(tcegrp->grp));
1275 ret = -EPERM;
1276 goto unlock_exit;
1280 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1281 if (!tcegrp) {
1282 ret = -ENOMEM;
1283 goto unlock_exit;
1286 if (!table_group->ops || !table_group->ops->take_ownership ||
1287 !table_group->ops->release_ownership) {
1288 if (container->v2) {
1289 ret = -EPERM;
1290 goto unlock_exit;
1292 ret = tce_iommu_take_ownership(container, table_group);
1293 } else {
1294 if (!container->v2) {
1295 ret = -EPERM;
1296 goto unlock_exit;
1298 ret = tce_iommu_take_ownership_ddw(container, table_group);
1299 if (!tce_groups_attached(container) && !container->tables[0])
1300 container->def_window_pending = true;
1303 if (!ret) {
1304 tcegrp->grp = iommu_group;
1305 list_add(&tcegrp->next, &container->group_list);
1308 unlock_exit:
1309 if (ret && tcegrp)
1310 kfree(tcegrp);
1312 mutex_unlock(&container->lock);
1314 return ret;
1317 static void tce_iommu_detach_group(void *iommu_data,
1318 struct iommu_group *iommu_group)
1320 struct tce_container *container = iommu_data;
1321 struct iommu_table_group *table_group;
1322 bool found = false;
1323 struct tce_iommu_group *tcegrp;
1325 mutex_lock(&container->lock);
1327 list_for_each_entry(tcegrp, &container->group_list, next) {
1328 if (tcegrp->grp == iommu_group) {
1329 found = true;
1330 break;
1334 if (!found) {
1335 pr_warn("tce_vfio: detaching unattached group #%u\n",
1336 iommu_group_id(iommu_group));
1337 goto unlock_exit;
1340 list_del(&tcegrp->next);
1341 kfree(tcegrp);
1343 table_group = iommu_group_get_iommudata(iommu_group);
1344 BUG_ON(!table_group);
1346 if (!table_group->ops || !table_group->ops->release_ownership)
1347 tce_iommu_release_ownership(container, table_group);
1348 else
1349 tce_iommu_release_ownership_ddw(container, table_group);
1351 unlock_exit:
1352 mutex_unlock(&container->lock);
1355 static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1356 .name = "iommu-vfio-powerpc",
1357 .owner = THIS_MODULE,
1358 .open = tce_iommu_open,
1359 .release = tce_iommu_release,
1360 .ioctl = tce_iommu_ioctl,
1361 .attach_group = tce_iommu_attach_group,
1362 .detach_group = tce_iommu_detach_group,
1365 static int __init tce_iommu_init(void)
1367 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1370 static void __exit tce_iommu_cleanup(void)
1372 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1375 module_init(tce_iommu_init);
1376 module_exit(tce_iommu_cleanup);
1378 MODULE_VERSION(DRIVER_VERSION);
1379 MODULE_LICENSE("GPL v2");
1380 MODULE_AUTHOR(DRIVER_AUTHOR);
1381 MODULE_DESCRIPTION(DRIVER_DESC);