Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[linux/fpc-iii.git] / drivers / vfio / vfio_iommu_spapr_tce.c
blob7690e5bf3cf134fc56a0a17d07e57d397d293346
1 /*
2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
24 #include <asm/tce.h>
25 #include <asm/mmu_context.h>
27 #define DRIVER_VERSION "0.1"
28 #define DRIVER_AUTHOR "aik@ozlabs.ru"
29 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
31 static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group);
34 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
36 long ret = 0, locked, lock_limit;
38 if (WARN_ON_ONCE(!mm))
39 return -EPERM;
41 if (!npages)
42 return 0;
44 down_write(&mm->mmap_sem);
45 locked = mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 ret = -ENOMEM;
49 else
50 mm->locked_vm += npages;
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 npages << PAGE_SHIFT,
54 mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
58 up_write(&mm->mmap_sem);
60 return ret;
63 static void decrement_locked_vm(struct mm_struct *mm, long npages)
65 if (!mm || !npages)
66 return;
68 down_write(&mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > mm->locked_vm))
70 npages = mm->locked_vm;
71 mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 npages << PAGE_SHIFT,
74 mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK));
76 up_write(&mm->mmap_sem);
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
86 struct tce_iommu_group {
87 struct list_head next;
88 struct iommu_group *grp;
92 * A container needs to remember which preregistered region it has
93 * referenced to do proper cleanup at the userspace process exit.
95 struct tce_iommu_prereg {
96 struct list_head next;
97 struct mm_iommu_table_group_mem_t *mem;
101 * The container descriptor supports only a single group per container.
102 * Required by the API as the container is not supplied with the IOMMU group
103 * at the moment of initialization.
105 struct tce_container {
106 struct mutex lock;
107 bool enabled;
108 bool v2;
109 bool def_window_pending;
110 unsigned long locked_pages;
111 struct mm_struct *mm;
112 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
113 struct list_head group_list;
114 struct list_head prereg_list;
117 static long tce_iommu_mm_set(struct tce_container *container)
119 if (container->mm) {
120 if (container->mm == current->mm)
121 return 0;
122 return -EPERM;
124 BUG_ON(!current->mm);
125 container->mm = current->mm;
126 atomic_inc(&container->mm->mm_count);
128 return 0;
131 static long tce_iommu_prereg_free(struct tce_container *container,
132 struct tce_iommu_prereg *tcemem)
134 long ret;
136 ret = mm_iommu_put(container->mm, tcemem->mem);
137 if (ret)
138 return ret;
140 list_del(&tcemem->next);
141 kfree(tcemem);
143 return 0;
146 static long tce_iommu_unregister_pages(struct tce_container *container,
147 __u64 vaddr, __u64 size)
149 struct mm_iommu_table_group_mem_t *mem;
150 struct tce_iommu_prereg *tcemem;
151 bool found = false;
153 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
154 return -EINVAL;
156 mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
157 if (!mem)
158 return -ENOENT;
160 list_for_each_entry(tcemem, &container->prereg_list, next) {
161 if (tcemem->mem == mem) {
162 found = true;
163 break;
167 if (!found)
168 return -ENOENT;
170 return tce_iommu_prereg_free(container, tcemem);
173 static long tce_iommu_register_pages(struct tce_container *container,
174 __u64 vaddr, __u64 size)
176 long ret = 0;
177 struct mm_iommu_table_group_mem_t *mem = NULL;
178 struct tce_iommu_prereg *tcemem;
179 unsigned long entries = size >> PAGE_SHIFT;
181 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
182 ((vaddr + size) < vaddr))
183 return -EINVAL;
185 mem = mm_iommu_find(container->mm, vaddr, entries);
186 if (mem) {
187 list_for_each_entry(tcemem, &container->prereg_list, next) {
188 if (tcemem->mem == mem)
189 return -EBUSY;
193 ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
194 if (ret)
195 return ret;
197 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
198 tcemem->mem = mem;
199 list_add(&tcemem->next, &container->prereg_list);
201 container->enabled = true;
203 return 0;
206 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
207 struct mm_struct *mm)
209 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
210 tbl->it_size, PAGE_SIZE);
211 unsigned long *uas;
212 long ret;
214 BUG_ON(tbl->it_userspace);
216 ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
217 if (ret)
218 return ret;
220 uas = vzalloc(cb);
221 if (!uas) {
222 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
223 return -ENOMEM;
225 tbl->it_userspace = uas;
227 return 0;
230 static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
231 struct mm_struct *mm)
233 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
234 tbl->it_size, PAGE_SIZE);
236 if (!tbl->it_userspace)
237 return;
239 vfree(tbl->it_userspace);
240 tbl->it_userspace = NULL;
241 decrement_locked_vm(mm, cb >> PAGE_SHIFT);
244 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
247 * Check that the TCE table granularity is not bigger than the size of
248 * a page we just found. Otherwise the hardware can get access to
249 * a bigger memory chunk that it should.
251 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
254 static inline bool tce_groups_attached(struct tce_container *container)
256 return !list_empty(&container->group_list);
259 static long tce_iommu_find_table(struct tce_container *container,
260 phys_addr_t ioba, struct iommu_table **ptbl)
262 long i;
264 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
265 struct iommu_table *tbl = container->tables[i];
267 if (tbl) {
268 unsigned long entry = ioba >> tbl->it_page_shift;
269 unsigned long start = tbl->it_offset;
270 unsigned long end = start + tbl->it_size;
272 if ((start <= entry) && (entry < end)) {
273 *ptbl = tbl;
274 return i;
279 return -1;
282 static int tce_iommu_find_free_table(struct tce_container *container)
284 int i;
286 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
287 if (!container->tables[i])
288 return i;
291 return -ENOSPC;
294 static int tce_iommu_enable(struct tce_container *container)
296 int ret = 0;
297 unsigned long locked;
298 struct iommu_table_group *table_group;
299 struct tce_iommu_group *tcegrp;
301 if (container->enabled)
302 return -EBUSY;
305 * When userspace pages are mapped into the IOMMU, they are effectively
306 * locked memory, so, theoretically, we need to update the accounting
307 * of locked pages on each map and unmap. For powerpc, the map unmap
308 * paths can be very hot, though, and the accounting would kill
309 * performance, especially since it would be difficult to impossible
310 * to handle the accounting in real mode only.
312 * To address that, rather than precisely accounting every page, we
313 * instead account for a worst case on locked memory when the iommu is
314 * enabled and disabled. The worst case upper bound on locked memory
315 * is the size of the whole iommu window, which is usually relatively
316 * small (compared to total memory sizes) on POWER hardware.
318 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
319 * that would effectively kill the guest at random points, much better
320 * enforcing the limit based on the max that the guest can map.
322 * Unfortunately at the moment it counts whole tables, no matter how
323 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
324 * each with 2GB DMA window, 8GB will be counted here. The reason for
325 * this is that we cannot tell here the amount of RAM used by the guest
326 * as this information is only available from KVM and VFIO is
327 * KVM agnostic.
329 * So we do not allow enabling a container without a group attached
330 * as there is no way to know how much we should increment
331 * the locked_vm counter.
333 if (!tce_groups_attached(container))
334 return -ENODEV;
336 tcegrp = list_first_entry(&container->group_list,
337 struct tce_iommu_group, next);
338 table_group = iommu_group_get_iommudata(tcegrp->grp);
339 if (!table_group)
340 return -ENODEV;
342 if (!table_group->tce32_size)
343 return -EPERM;
345 ret = tce_iommu_mm_set(container);
346 if (ret)
347 return ret;
349 locked = table_group->tce32_size >> PAGE_SHIFT;
350 ret = try_increment_locked_vm(container->mm, locked);
351 if (ret)
352 return ret;
354 container->locked_pages = locked;
356 container->enabled = true;
358 return ret;
361 static void tce_iommu_disable(struct tce_container *container)
363 if (!container->enabled)
364 return;
366 container->enabled = false;
368 BUG_ON(!container->mm);
369 decrement_locked_vm(container->mm, container->locked_pages);
372 static void *tce_iommu_open(unsigned long arg)
374 struct tce_container *container;
376 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
377 pr_err("tce_vfio: Wrong IOMMU type\n");
378 return ERR_PTR(-EINVAL);
381 container = kzalloc(sizeof(*container), GFP_KERNEL);
382 if (!container)
383 return ERR_PTR(-ENOMEM);
385 mutex_init(&container->lock);
386 INIT_LIST_HEAD_RCU(&container->group_list);
387 INIT_LIST_HEAD_RCU(&container->prereg_list);
389 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
391 return container;
394 static int tce_iommu_clear(struct tce_container *container,
395 struct iommu_table *tbl,
396 unsigned long entry, unsigned long pages);
397 static void tce_iommu_free_table(struct tce_container *container,
398 struct iommu_table *tbl);
400 static void tce_iommu_release(void *iommu_data)
402 struct tce_container *container = iommu_data;
403 struct tce_iommu_group *tcegrp;
404 long i;
406 while (tce_groups_attached(container)) {
407 tcegrp = list_first_entry(&container->group_list,
408 struct tce_iommu_group, next);
409 tce_iommu_detach_group(iommu_data, tcegrp->grp);
413 * If VFIO created a table, it was not disposed
414 * by tce_iommu_detach_group() so do it now.
416 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
417 struct iommu_table *tbl = container->tables[i];
419 if (!tbl)
420 continue;
422 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
423 tce_iommu_free_table(container, tbl);
426 while (!list_empty(&container->prereg_list)) {
427 struct tce_iommu_prereg *tcemem;
429 tcemem = list_first_entry(&container->prereg_list,
430 struct tce_iommu_prereg, next);
431 WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
434 tce_iommu_disable(container);
435 if (container->mm)
436 mmdrop(container->mm);
437 mutex_destroy(&container->lock);
439 kfree(container);
442 static void tce_iommu_unuse_page(struct tce_container *container,
443 unsigned long hpa)
445 struct page *page;
447 page = pfn_to_page(hpa >> PAGE_SHIFT);
448 put_page(page);
451 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
452 unsigned long tce, unsigned long size,
453 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
455 long ret = 0;
456 struct mm_iommu_table_group_mem_t *mem;
458 mem = mm_iommu_lookup(container->mm, tce, size);
459 if (!mem)
460 return -EINVAL;
462 ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
463 if (ret)
464 return -EINVAL;
466 *pmem = mem;
468 return 0;
471 static void tce_iommu_unuse_page_v2(struct tce_container *container,
472 struct iommu_table *tbl, unsigned long entry)
474 struct mm_iommu_table_group_mem_t *mem = NULL;
475 int ret;
476 unsigned long hpa = 0;
477 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
479 if (!pua)
480 return;
482 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
483 &hpa, &mem);
484 if (ret)
485 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
486 __func__, *pua, entry, ret);
487 if (mem)
488 mm_iommu_mapped_dec(mem);
490 *pua = 0;
493 static int tce_iommu_clear(struct tce_container *container,
494 struct iommu_table *tbl,
495 unsigned long entry, unsigned long pages)
497 unsigned long oldhpa;
498 long ret;
499 enum dma_data_direction direction;
501 for ( ; pages; --pages, ++entry) {
502 direction = DMA_NONE;
503 oldhpa = 0;
504 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
505 if (ret)
506 continue;
508 if (direction == DMA_NONE)
509 continue;
511 if (container->v2) {
512 tce_iommu_unuse_page_v2(container, tbl, entry);
513 continue;
516 tce_iommu_unuse_page(container, oldhpa);
519 return 0;
522 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
524 struct page *page = NULL;
525 enum dma_data_direction direction = iommu_tce_direction(tce);
527 if (get_user_pages_fast(tce & PAGE_MASK, 1,
528 direction != DMA_TO_DEVICE, &page) != 1)
529 return -EFAULT;
531 *hpa = __pa((unsigned long) page_address(page));
533 return 0;
536 static long tce_iommu_build(struct tce_container *container,
537 struct iommu_table *tbl,
538 unsigned long entry, unsigned long tce, unsigned long pages,
539 enum dma_data_direction direction)
541 long i, ret = 0;
542 struct page *page;
543 unsigned long hpa;
544 enum dma_data_direction dirtmp;
546 for (i = 0; i < pages; ++i) {
547 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
549 ret = tce_iommu_use_page(tce, &hpa);
550 if (ret)
551 break;
553 page = pfn_to_page(hpa >> PAGE_SHIFT);
554 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
555 ret = -EPERM;
556 break;
559 hpa |= offset;
560 dirtmp = direction;
561 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
562 if (ret) {
563 tce_iommu_unuse_page(container, hpa);
564 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
565 __func__, entry << tbl->it_page_shift,
566 tce, ret);
567 break;
570 if (dirtmp != DMA_NONE)
571 tce_iommu_unuse_page(container, hpa);
573 tce += IOMMU_PAGE_SIZE(tbl);
576 if (ret)
577 tce_iommu_clear(container, tbl, entry, i);
579 return ret;
582 static long tce_iommu_build_v2(struct tce_container *container,
583 struct iommu_table *tbl,
584 unsigned long entry, unsigned long tce, unsigned long pages,
585 enum dma_data_direction direction)
587 long i, ret = 0;
588 struct page *page;
589 unsigned long hpa;
590 enum dma_data_direction dirtmp;
592 if (!tbl->it_userspace) {
593 ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
594 if (ret)
595 return ret;
598 for (i = 0; i < pages; ++i) {
599 struct mm_iommu_table_group_mem_t *mem = NULL;
600 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
601 entry + i);
603 ret = tce_iommu_prereg_ua_to_hpa(container,
604 tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
605 if (ret)
606 break;
608 page = pfn_to_page(hpa >> PAGE_SHIFT);
609 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
610 ret = -EPERM;
611 break;
614 /* Preserve offset within IOMMU page */
615 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
616 dirtmp = direction;
618 /* The registered region is being unregistered */
619 if (mm_iommu_mapped_inc(mem))
620 break;
622 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
623 if (ret) {
624 /* dirtmp cannot be DMA_NONE here */
625 tce_iommu_unuse_page_v2(container, tbl, entry + i);
626 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
627 __func__, entry << tbl->it_page_shift,
628 tce, ret);
629 break;
632 if (dirtmp != DMA_NONE)
633 tce_iommu_unuse_page_v2(container, tbl, entry + i);
635 *pua = tce;
637 tce += IOMMU_PAGE_SIZE(tbl);
640 if (ret)
641 tce_iommu_clear(container, tbl, entry, i);
643 return ret;
646 static long tce_iommu_create_table(struct tce_container *container,
647 struct iommu_table_group *table_group,
648 int num,
649 __u32 page_shift,
650 __u64 window_size,
651 __u32 levels,
652 struct iommu_table **ptbl)
654 long ret, table_size;
656 table_size = table_group->ops->get_table_size(page_shift, window_size,
657 levels);
658 if (!table_size)
659 return -EINVAL;
661 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
662 if (ret)
663 return ret;
665 ret = table_group->ops->create_table(table_group, num,
666 page_shift, window_size, levels, ptbl);
668 WARN_ON(!ret && !(*ptbl)->it_ops->free);
669 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
671 return ret;
674 static void tce_iommu_free_table(struct tce_container *container,
675 struct iommu_table *tbl)
677 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
679 tce_iommu_userspace_view_free(tbl, container->mm);
680 tbl->it_ops->free(tbl);
681 decrement_locked_vm(container->mm, pages);
684 static long tce_iommu_create_window(struct tce_container *container,
685 __u32 page_shift, __u64 window_size, __u32 levels,
686 __u64 *start_addr)
688 struct tce_iommu_group *tcegrp;
689 struct iommu_table_group *table_group;
690 struct iommu_table *tbl = NULL;
691 long ret, num;
693 num = tce_iommu_find_free_table(container);
694 if (num < 0)
695 return num;
697 /* Get the first group for ops::create_table */
698 tcegrp = list_first_entry(&container->group_list,
699 struct tce_iommu_group, next);
700 table_group = iommu_group_get_iommudata(tcegrp->grp);
701 if (!table_group)
702 return -EFAULT;
704 if (!(table_group->pgsizes & (1ULL << page_shift)))
705 return -EINVAL;
707 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
708 !table_group->ops->get_table_size ||
709 !table_group->ops->create_table)
710 return -EPERM;
712 /* Create TCE table */
713 ret = tce_iommu_create_table(container, table_group, num,
714 page_shift, window_size, levels, &tbl);
715 if (ret)
716 return ret;
718 BUG_ON(!tbl->it_ops->free);
721 * Program the table to every group.
722 * Groups have been tested for compatibility at the attach time.
724 list_for_each_entry(tcegrp, &container->group_list, next) {
725 table_group = iommu_group_get_iommudata(tcegrp->grp);
727 ret = table_group->ops->set_window(table_group, num, tbl);
728 if (ret)
729 goto unset_exit;
732 container->tables[num] = tbl;
734 /* Return start address assigned by platform in create_table() */
735 *start_addr = tbl->it_offset << tbl->it_page_shift;
737 return 0;
739 unset_exit:
740 list_for_each_entry(tcegrp, &container->group_list, next) {
741 table_group = iommu_group_get_iommudata(tcegrp->grp);
742 table_group->ops->unset_window(table_group, num);
744 tce_iommu_free_table(container, tbl);
746 return ret;
749 static long tce_iommu_remove_window(struct tce_container *container,
750 __u64 start_addr)
752 struct iommu_table_group *table_group = NULL;
753 struct iommu_table *tbl;
754 struct tce_iommu_group *tcegrp;
755 int num;
757 num = tce_iommu_find_table(container, start_addr, &tbl);
758 if (num < 0)
759 return -EINVAL;
761 BUG_ON(!tbl->it_size);
763 /* Detach groups from IOMMUs */
764 list_for_each_entry(tcegrp, &container->group_list, next) {
765 table_group = iommu_group_get_iommudata(tcegrp->grp);
768 * SPAPR TCE IOMMU exposes the default DMA window to
769 * the guest via dma32_window_start/size of
770 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
771 * the userspace to remove this window, some do not so
772 * here we check for the platform capability.
774 if (!table_group->ops || !table_group->ops->unset_window)
775 return -EPERM;
777 table_group->ops->unset_window(table_group, num);
780 /* Free table */
781 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
782 tce_iommu_free_table(container, tbl);
783 container->tables[num] = NULL;
785 return 0;
788 static long tce_iommu_create_default_window(struct tce_container *container)
790 long ret;
791 __u64 start_addr = 0;
792 struct tce_iommu_group *tcegrp;
793 struct iommu_table_group *table_group;
795 if (!container->def_window_pending)
796 return 0;
798 if (!tce_groups_attached(container))
799 return -ENODEV;
801 tcegrp = list_first_entry(&container->group_list,
802 struct tce_iommu_group, next);
803 table_group = iommu_group_get_iommudata(tcegrp->grp);
804 if (!table_group)
805 return -ENODEV;
807 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
808 table_group->tce32_size, 1, &start_addr);
809 WARN_ON_ONCE(!ret && start_addr);
811 if (!ret)
812 container->def_window_pending = false;
814 return ret;
817 static long tce_iommu_ioctl(void *iommu_data,
818 unsigned int cmd, unsigned long arg)
820 struct tce_container *container = iommu_data;
821 unsigned long minsz, ddwsz;
822 long ret;
824 switch (cmd) {
825 case VFIO_CHECK_EXTENSION:
826 switch (arg) {
827 case VFIO_SPAPR_TCE_IOMMU:
828 case VFIO_SPAPR_TCE_v2_IOMMU:
829 ret = 1;
830 break;
831 default:
832 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
833 break;
836 return (ret < 0) ? 0 : ret;
840 * Sanity check to prevent one userspace from manipulating
841 * another userspace mm.
843 BUG_ON(!container);
844 if (container->mm && container->mm != current->mm)
845 return -EPERM;
847 switch (cmd) {
848 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
849 struct vfio_iommu_spapr_tce_info info;
850 struct tce_iommu_group *tcegrp;
851 struct iommu_table_group *table_group;
853 if (!tce_groups_attached(container))
854 return -ENXIO;
856 tcegrp = list_first_entry(&container->group_list,
857 struct tce_iommu_group, next);
858 table_group = iommu_group_get_iommudata(tcegrp->grp);
860 if (!table_group)
861 return -ENXIO;
863 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
864 dma32_window_size);
866 if (copy_from_user(&info, (void __user *)arg, minsz))
867 return -EFAULT;
869 if (info.argsz < minsz)
870 return -EINVAL;
872 info.dma32_window_start = table_group->tce32_start;
873 info.dma32_window_size = table_group->tce32_size;
874 info.flags = 0;
875 memset(&info.ddw, 0, sizeof(info.ddw));
877 if (table_group->max_dynamic_windows_supported &&
878 container->v2) {
879 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
880 info.ddw.pgsizes = table_group->pgsizes;
881 info.ddw.max_dynamic_windows_supported =
882 table_group->max_dynamic_windows_supported;
883 info.ddw.levels = table_group->max_levels;
886 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
888 if (info.argsz >= ddwsz)
889 minsz = ddwsz;
891 if (copy_to_user((void __user *)arg, &info, minsz))
892 return -EFAULT;
894 return 0;
896 case VFIO_IOMMU_MAP_DMA: {
897 struct vfio_iommu_type1_dma_map param;
898 struct iommu_table *tbl = NULL;
899 long num;
900 enum dma_data_direction direction;
902 if (!container->enabled)
903 return -EPERM;
905 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
907 if (copy_from_user(&param, (void __user *)arg, minsz))
908 return -EFAULT;
910 if (param.argsz < minsz)
911 return -EINVAL;
913 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
914 VFIO_DMA_MAP_FLAG_WRITE))
915 return -EINVAL;
917 ret = tce_iommu_create_default_window(container);
918 if (ret)
919 return ret;
921 num = tce_iommu_find_table(container, param.iova, &tbl);
922 if (num < 0)
923 return -ENXIO;
925 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
926 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
927 return -EINVAL;
929 /* iova is checked by the IOMMU API */
930 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
931 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
932 direction = DMA_BIDIRECTIONAL;
933 else
934 direction = DMA_TO_DEVICE;
935 } else {
936 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
937 direction = DMA_FROM_DEVICE;
938 else
939 return -EINVAL;
942 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
943 if (ret)
944 return ret;
946 if (container->v2)
947 ret = tce_iommu_build_v2(container, tbl,
948 param.iova >> tbl->it_page_shift,
949 param.vaddr,
950 param.size >> tbl->it_page_shift,
951 direction);
952 else
953 ret = tce_iommu_build(container, tbl,
954 param.iova >> tbl->it_page_shift,
955 param.vaddr,
956 param.size >> tbl->it_page_shift,
957 direction);
959 iommu_flush_tce(tbl);
961 return ret;
963 case VFIO_IOMMU_UNMAP_DMA: {
964 struct vfio_iommu_type1_dma_unmap param;
965 struct iommu_table *tbl = NULL;
966 long num;
968 if (!container->enabled)
969 return -EPERM;
971 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
972 size);
974 if (copy_from_user(&param, (void __user *)arg, minsz))
975 return -EFAULT;
977 if (param.argsz < minsz)
978 return -EINVAL;
980 /* No flag is supported now */
981 if (param.flags)
982 return -EINVAL;
984 ret = tce_iommu_create_default_window(container);
985 if (ret)
986 return ret;
988 num = tce_iommu_find_table(container, param.iova, &tbl);
989 if (num < 0)
990 return -ENXIO;
992 if (param.size & ~IOMMU_PAGE_MASK(tbl))
993 return -EINVAL;
995 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
996 param.size >> tbl->it_page_shift);
997 if (ret)
998 return ret;
1000 ret = tce_iommu_clear(container, tbl,
1001 param.iova >> tbl->it_page_shift,
1002 param.size >> tbl->it_page_shift);
1003 iommu_flush_tce(tbl);
1005 return ret;
1007 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1008 struct vfio_iommu_spapr_register_memory param;
1010 if (!container->v2)
1011 break;
1013 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1014 size);
1016 ret = tce_iommu_mm_set(container);
1017 if (ret)
1018 return ret;
1020 if (copy_from_user(&param, (void __user *)arg, minsz))
1021 return -EFAULT;
1023 if (param.argsz < minsz)
1024 return -EINVAL;
1026 /* No flag is supported now */
1027 if (param.flags)
1028 return -EINVAL;
1030 mutex_lock(&container->lock);
1031 ret = tce_iommu_register_pages(container, param.vaddr,
1032 param.size);
1033 mutex_unlock(&container->lock);
1035 return ret;
1037 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1038 struct vfio_iommu_spapr_register_memory param;
1040 if (!container->v2)
1041 break;
1043 if (!container->mm)
1044 return -EPERM;
1046 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1047 size);
1049 if (copy_from_user(&param, (void __user *)arg, minsz))
1050 return -EFAULT;
1052 if (param.argsz < minsz)
1053 return -EINVAL;
1055 /* No flag is supported now */
1056 if (param.flags)
1057 return -EINVAL;
1059 mutex_lock(&container->lock);
1060 ret = tce_iommu_unregister_pages(container, param.vaddr,
1061 param.size);
1062 mutex_unlock(&container->lock);
1064 return ret;
1066 case VFIO_IOMMU_ENABLE:
1067 if (container->v2)
1068 break;
1070 mutex_lock(&container->lock);
1071 ret = tce_iommu_enable(container);
1072 mutex_unlock(&container->lock);
1073 return ret;
1076 case VFIO_IOMMU_DISABLE:
1077 if (container->v2)
1078 break;
1080 mutex_lock(&container->lock);
1081 tce_iommu_disable(container);
1082 mutex_unlock(&container->lock);
1083 return 0;
1085 case VFIO_EEH_PE_OP: {
1086 struct tce_iommu_group *tcegrp;
1088 ret = 0;
1089 list_for_each_entry(tcegrp, &container->group_list, next) {
1090 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1091 cmd, arg);
1092 if (ret)
1093 return ret;
1095 return ret;
1098 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1099 struct vfio_iommu_spapr_tce_create create;
1101 if (!container->v2)
1102 break;
1104 ret = tce_iommu_mm_set(container);
1105 if (ret)
1106 return ret;
1108 if (!tce_groups_attached(container))
1109 return -ENXIO;
1111 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1112 start_addr);
1114 if (copy_from_user(&create, (void __user *)arg, minsz))
1115 return -EFAULT;
1117 if (create.argsz < minsz)
1118 return -EINVAL;
1120 if (create.flags)
1121 return -EINVAL;
1123 mutex_lock(&container->lock);
1125 ret = tce_iommu_create_default_window(container);
1126 if (!ret)
1127 ret = tce_iommu_create_window(container,
1128 create.page_shift,
1129 create.window_size, create.levels,
1130 &create.start_addr);
1132 mutex_unlock(&container->lock);
1134 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1135 ret = -EFAULT;
1137 return ret;
1139 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1140 struct vfio_iommu_spapr_tce_remove remove;
1142 if (!container->v2)
1143 break;
1145 ret = tce_iommu_mm_set(container);
1146 if (ret)
1147 return ret;
1149 if (!tce_groups_attached(container))
1150 return -ENXIO;
1152 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1153 start_addr);
1155 if (copy_from_user(&remove, (void __user *)arg, minsz))
1156 return -EFAULT;
1158 if (remove.argsz < minsz)
1159 return -EINVAL;
1161 if (remove.flags)
1162 return -EINVAL;
1164 if (container->def_window_pending && !remove.start_addr) {
1165 container->def_window_pending = false;
1166 return 0;
1169 mutex_lock(&container->lock);
1171 ret = tce_iommu_remove_window(container, remove.start_addr);
1173 mutex_unlock(&container->lock);
1175 return ret;
1179 return -ENOTTY;
1182 static void tce_iommu_release_ownership(struct tce_container *container,
1183 struct iommu_table_group *table_group)
1185 int i;
1187 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1188 struct iommu_table *tbl = container->tables[i];
1190 if (!tbl)
1191 continue;
1193 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1194 tce_iommu_userspace_view_free(tbl, container->mm);
1195 if (tbl->it_map)
1196 iommu_release_ownership(tbl);
1198 container->tables[i] = NULL;
1202 static int tce_iommu_take_ownership(struct tce_container *container,
1203 struct iommu_table_group *table_group)
1205 int i, j, rc = 0;
1207 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1208 struct iommu_table *tbl = table_group->tables[i];
1210 if (!tbl || !tbl->it_map)
1211 continue;
1213 rc = iommu_take_ownership(tbl);
1214 if (rc) {
1215 for (j = 0; j < i; ++j)
1216 iommu_release_ownership(
1217 table_group->tables[j]);
1219 return rc;
1223 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1224 container->tables[i] = table_group->tables[i];
1226 return 0;
1229 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1230 struct iommu_table_group *table_group)
1232 long i;
1234 if (!table_group->ops->unset_window) {
1235 WARN_ON_ONCE(1);
1236 return;
1239 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1240 table_group->ops->unset_window(table_group, i);
1242 table_group->ops->release_ownership(table_group);
1245 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1246 struct iommu_table_group *table_group)
1248 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1249 !table_group->ops->release_ownership) {
1250 WARN_ON_ONCE(1);
1251 return -EFAULT;
1254 table_group->ops->take_ownership(table_group);
1256 return 0;
1259 static int tce_iommu_attach_group(void *iommu_data,
1260 struct iommu_group *iommu_group)
1262 int ret;
1263 struct tce_container *container = iommu_data;
1264 struct iommu_table_group *table_group;
1265 struct tce_iommu_group *tcegrp = NULL;
1267 mutex_lock(&container->lock);
1269 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1270 iommu_group_id(iommu_group), iommu_group); */
1271 table_group = iommu_group_get_iommudata(iommu_group);
1272 if (!table_group) {
1273 ret = -ENODEV;
1274 goto unlock_exit;
1277 if (tce_groups_attached(container) && (!table_group->ops ||
1278 !table_group->ops->take_ownership ||
1279 !table_group->ops->release_ownership)) {
1280 ret = -EBUSY;
1281 goto unlock_exit;
1284 /* Check if new group has the same iommu_ops (i.e. compatible) */
1285 list_for_each_entry(tcegrp, &container->group_list, next) {
1286 struct iommu_table_group *table_group_tmp;
1288 if (tcegrp->grp == iommu_group) {
1289 pr_warn("tce_vfio: Group %d is already attached\n",
1290 iommu_group_id(iommu_group));
1291 ret = -EBUSY;
1292 goto unlock_exit;
1294 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1295 if (table_group_tmp->ops->create_table !=
1296 table_group->ops->create_table) {
1297 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1298 iommu_group_id(iommu_group),
1299 iommu_group_id(tcegrp->grp));
1300 ret = -EPERM;
1301 goto unlock_exit;
1305 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1306 if (!tcegrp) {
1307 ret = -ENOMEM;
1308 goto unlock_exit;
1311 if (!table_group->ops || !table_group->ops->take_ownership ||
1312 !table_group->ops->release_ownership) {
1313 ret = tce_iommu_take_ownership(container, table_group);
1314 } else {
1315 ret = tce_iommu_take_ownership_ddw(container, table_group);
1316 if (!tce_groups_attached(container) && !container->tables[0])
1317 container->def_window_pending = true;
1320 if (!ret) {
1321 tcegrp->grp = iommu_group;
1322 list_add(&tcegrp->next, &container->group_list);
1325 unlock_exit:
1326 if (ret && tcegrp)
1327 kfree(tcegrp);
1329 mutex_unlock(&container->lock);
1331 return ret;
1334 static void tce_iommu_detach_group(void *iommu_data,
1335 struct iommu_group *iommu_group)
1337 struct tce_container *container = iommu_data;
1338 struct iommu_table_group *table_group;
1339 bool found = false;
1340 struct tce_iommu_group *tcegrp;
1342 mutex_lock(&container->lock);
1344 list_for_each_entry(tcegrp, &container->group_list, next) {
1345 if (tcegrp->grp == iommu_group) {
1346 found = true;
1347 break;
1351 if (!found) {
1352 pr_warn("tce_vfio: detaching unattached group #%u\n",
1353 iommu_group_id(iommu_group));
1354 goto unlock_exit;
1357 list_del(&tcegrp->next);
1358 kfree(tcegrp);
1360 table_group = iommu_group_get_iommudata(iommu_group);
1361 BUG_ON(!table_group);
1363 if (!table_group->ops || !table_group->ops->release_ownership)
1364 tce_iommu_release_ownership(container, table_group);
1365 else
1366 tce_iommu_release_ownership_ddw(container, table_group);
1368 unlock_exit:
1369 mutex_unlock(&container->lock);
1372 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1373 .name = "iommu-vfio-powerpc",
1374 .owner = THIS_MODULE,
1375 .open = tce_iommu_open,
1376 .release = tce_iommu_release,
1377 .ioctl = tce_iommu_ioctl,
1378 .attach_group = tce_iommu_attach_group,
1379 .detach_group = tce_iommu_detach_group,
1382 static int __init tce_iommu_init(void)
1384 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1387 static void __exit tce_iommu_cleanup(void)
1389 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1392 module_init(tce_iommu_init);
1393 module_exit(tce_iommu_cleanup);
1395 MODULE_VERSION(DRIVER_VERSION);
1396 MODULE_LICENSE("GPL v2");
1397 MODULE_AUTHOR(DRIVER_AUTHOR);
1398 MODULE_DESCRIPTION(DRIVER_DESC);