ata: start separating SATA specific code from libata-core.c
[linux/fpc-iii.git] / drivers / iommu / iommu.c
blob3e3528436e0b220b8714470675bf9b1eefb673bb
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 */
7 #define pr_fmt(fmt) "iommu: " fmt
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
35 struct iommu_group {
36 struct kobject kobj;
37 struct kobject *devices_kobj;
38 struct list_head devices;
39 struct mutex mutex;
40 struct blocking_notifier_head notifier;
41 void *iommu_data;
42 void (*iommu_data_release)(void *iommu_data);
43 char *name;
44 int id;
45 struct iommu_domain *default_domain;
46 struct iommu_domain *domain;
49 struct group_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
55 struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
62 static const char * const iommu_group_resv_type_string[] = {
63 [IOMMU_RESV_DIRECT] = "direct",
64 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
65 [IOMMU_RESV_RESERVED] = "reserved",
66 [IOMMU_RESV_MSI] = "msi",
67 [IOMMU_RESV_SW_MSI] = "msi",
70 #define IOMMU_CMD_LINE_DMA_API BIT(0)
72 static void iommu_set_cmd_line_dma_api(void)
74 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
77 static bool iommu_cmd_line_dma_api(void)
79 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
82 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
83 struct iommu_group_attribute iommu_group_attr_##_name = \
84 __ATTR(_name, _mode, _show, _store)
86 #define to_iommu_group_attr(_attr) \
87 container_of(_attr, struct iommu_group_attribute, attr)
88 #define to_iommu_group(_kobj) \
89 container_of(_kobj, struct iommu_group, kobj)
91 static LIST_HEAD(iommu_device_list);
92 static DEFINE_SPINLOCK(iommu_device_lock);
95 * Use a function instead of an array here because the domain-type is a
96 * bit-field, so an array would waste memory.
98 static const char *iommu_domain_type_str(unsigned int t)
100 switch (t) {
101 case IOMMU_DOMAIN_BLOCKED:
102 return "Blocked";
103 case IOMMU_DOMAIN_IDENTITY:
104 return "Passthrough";
105 case IOMMU_DOMAIN_UNMANAGED:
106 return "Unmanaged";
107 case IOMMU_DOMAIN_DMA:
108 return "Translated";
109 default:
110 return "Unknown";
114 static int __init iommu_subsys_init(void)
116 bool cmd_line = iommu_cmd_line_dma_api();
118 if (!cmd_line) {
119 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
120 iommu_set_default_passthrough(false);
121 else
122 iommu_set_default_translated(false);
124 if (iommu_default_passthrough() && mem_encrypt_active()) {
125 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
126 iommu_set_default_translated(false);
130 pr_info("Default domain type: %s %s\n",
131 iommu_domain_type_str(iommu_def_domain_type),
132 cmd_line ? "(set via kernel command line)" : "");
134 return 0;
136 subsys_initcall(iommu_subsys_init);
138 int iommu_device_register(struct iommu_device *iommu)
140 spin_lock(&iommu_device_lock);
141 list_add_tail(&iommu->list, &iommu_device_list);
142 spin_unlock(&iommu_device_lock);
143 return 0;
145 EXPORT_SYMBOL_GPL(iommu_device_register);
147 void iommu_device_unregister(struct iommu_device *iommu)
149 spin_lock(&iommu_device_lock);
150 list_del(&iommu->list);
151 spin_unlock(&iommu_device_lock);
153 EXPORT_SYMBOL_GPL(iommu_device_unregister);
155 static struct iommu_param *iommu_get_dev_param(struct device *dev)
157 struct iommu_param *param = dev->iommu_param;
159 if (param)
160 return param;
162 param = kzalloc(sizeof(*param), GFP_KERNEL);
163 if (!param)
164 return NULL;
166 mutex_init(&param->lock);
167 dev->iommu_param = param;
168 return param;
171 static void iommu_free_dev_param(struct device *dev)
173 kfree(dev->iommu_param);
174 dev->iommu_param = NULL;
177 int iommu_probe_device(struct device *dev)
179 const struct iommu_ops *ops = dev->bus->iommu_ops;
180 int ret;
182 WARN_ON(dev->iommu_group);
183 if (!ops)
184 return -EINVAL;
186 if (!iommu_get_dev_param(dev))
187 return -ENOMEM;
189 if (!try_module_get(ops->owner)) {
190 ret = -EINVAL;
191 goto err_free_dev_param;
194 ret = ops->add_device(dev);
195 if (ret)
196 goto err_module_put;
198 return 0;
200 err_module_put:
201 module_put(ops->owner);
202 err_free_dev_param:
203 iommu_free_dev_param(dev);
204 return ret;
207 void iommu_release_device(struct device *dev)
209 const struct iommu_ops *ops = dev->bus->iommu_ops;
211 if (dev->iommu_group)
212 ops->remove_device(dev);
214 if (dev->iommu_param) {
215 module_put(ops->owner);
216 iommu_free_dev_param(dev);
220 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
221 unsigned type);
222 static int __iommu_attach_device(struct iommu_domain *domain,
223 struct device *dev);
224 static int __iommu_attach_group(struct iommu_domain *domain,
225 struct iommu_group *group);
226 static void __iommu_detach_group(struct iommu_domain *domain,
227 struct iommu_group *group);
229 static int __init iommu_set_def_domain_type(char *str)
231 bool pt;
232 int ret;
234 ret = kstrtobool(str, &pt);
235 if (ret)
236 return ret;
238 if (pt)
239 iommu_set_default_passthrough(true);
240 else
241 iommu_set_default_translated(true);
243 return 0;
245 early_param("iommu.passthrough", iommu_set_def_domain_type);
247 static int __init iommu_dma_setup(char *str)
249 return kstrtobool(str, &iommu_dma_strict);
251 early_param("iommu.strict", iommu_dma_setup);
253 static ssize_t iommu_group_attr_show(struct kobject *kobj,
254 struct attribute *__attr, char *buf)
256 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
257 struct iommu_group *group = to_iommu_group(kobj);
258 ssize_t ret = -EIO;
260 if (attr->show)
261 ret = attr->show(group, buf);
262 return ret;
265 static ssize_t iommu_group_attr_store(struct kobject *kobj,
266 struct attribute *__attr,
267 const char *buf, size_t count)
269 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
270 struct iommu_group *group = to_iommu_group(kobj);
271 ssize_t ret = -EIO;
273 if (attr->store)
274 ret = attr->store(group, buf, count);
275 return ret;
278 static const struct sysfs_ops iommu_group_sysfs_ops = {
279 .show = iommu_group_attr_show,
280 .store = iommu_group_attr_store,
283 static int iommu_group_create_file(struct iommu_group *group,
284 struct iommu_group_attribute *attr)
286 return sysfs_create_file(&group->kobj, &attr->attr);
289 static void iommu_group_remove_file(struct iommu_group *group,
290 struct iommu_group_attribute *attr)
292 sysfs_remove_file(&group->kobj, &attr->attr);
295 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
297 return sprintf(buf, "%s\n", group->name);
301 * iommu_insert_resv_region - Insert a new region in the
302 * list of reserved regions.
303 * @new: new region to insert
304 * @regions: list of regions
306 * Elements are sorted by start address and overlapping segments
307 * of the same type are merged.
309 int iommu_insert_resv_region(struct iommu_resv_region *new,
310 struct list_head *regions)
312 struct iommu_resv_region *iter, *tmp, *nr, *top;
313 LIST_HEAD(stack);
315 nr = iommu_alloc_resv_region(new->start, new->length,
316 new->prot, new->type);
317 if (!nr)
318 return -ENOMEM;
320 /* First add the new element based on start address sorting */
321 list_for_each_entry(iter, regions, list) {
322 if (nr->start < iter->start ||
323 (nr->start == iter->start && nr->type <= iter->type))
324 break;
326 list_add_tail(&nr->list, &iter->list);
328 /* Merge overlapping segments of type nr->type in @regions, if any */
329 list_for_each_entry_safe(iter, tmp, regions, list) {
330 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
332 /* no merge needed on elements of different types than @new */
333 if (iter->type != new->type) {
334 list_move_tail(&iter->list, &stack);
335 continue;
338 /* look for the last stack element of same type as @iter */
339 list_for_each_entry_reverse(top, &stack, list)
340 if (top->type == iter->type)
341 goto check_overlap;
343 list_move_tail(&iter->list, &stack);
344 continue;
346 check_overlap:
347 top_end = top->start + top->length - 1;
349 if (iter->start > top_end + 1) {
350 list_move_tail(&iter->list, &stack);
351 } else {
352 top->length = max(top_end, iter_end) - top->start + 1;
353 list_del(&iter->list);
354 kfree(iter);
357 list_splice(&stack, regions);
358 return 0;
361 static int
362 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
363 struct list_head *group_resv_regions)
365 struct iommu_resv_region *entry;
366 int ret = 0;
368 list_for_each_entry(entry, dev_resv_regions, list) {
369 ret = iommu_insert_resv_region(entry, group_resv_regions);
370 if (ret)
371 break;
373 return ret;
376 int iommu_get_group_resv_regions(struct iommu_group *group,
377 struct list_head *head)
379 struct group_device *device;
380 int ret = 0;
382 mutex_lock(&group->mutex);
383 list_for_each_entry(device, &group->devices, list) {
384 struct list_head dev_resv_regions;
386 INIT_LIST_HEAD(&dev_resv_regions);
387 iommu_get_resv_regions(device->dev, &dev_resv_regions);
388 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
389 iommu_put_resv_regions(device->dev, &dev_resv_regions);
390 if (ret)
391 break;
393 mutex_unlock(&group->mutex);
394 return ret;
396 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
398 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
399 char *buf)
401 struct iommu_resv_region *region, *next;
402 struct list_head group_resv_regions;
403 char *str = buf;
405 INIT_LIST_HEAD(&group_resv_regions);
406 iommu_get_group_resv_regions(group, &group_resv_regions);
408 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
409 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
410 (long long int)region->start,
411 (long long int)(region->start +
412 region->length - 1),
413 iommu_group_resv_type_string[region->type]);
414 kfree(region);
417 return (str - buf);
420 static ssize_t iommu_group_show_type(struct iommu_group *group,
421 char *buf)
423 char *type = "unknown\n";
425 if (group->default_domain) {
426 switch (group->default_domain->type) {
427 case IOMMU_DOMAIN_BLOCKED:
428 type = "blocked\n";
429 break;
430 case IOMMU_DOMAIN_IDENTITY:
431 type = "identity\n";
432 break;
433 case IOMMU_DOMAIN_UNMANAGED:
434 type = "unmanaged\n";
435 break;
436 case IOMMU_DOMAIN_DMA:
437 type = "DMA\n";
438 break;
441 strcpy(buf, type);
443 return strlen(type);
446 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
448 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
449 iommu_group_show_resv_regions, NULL);
451 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
453 static void iommu_group_release(struct kobject *kobj)
455 struct iommu_group *group = to_iommu_group(kobj);
457 pr_debug("Releasing group %d\n", group->id);
459 if (group->iommu_data_release)
460 group->iommu_data_release(group->iommu_data);
462 ida_simple_remove(&iommu_group_ida, group->id);
464 if (group->default_domain)
465 iommu_domain_free(group->default_domain);
467 kfree(group->name);
468 kfree(group);
471 static struct kobj_type iommu_group_ktype = {
472 .sysfs_ops = &iommu_group_sysfs_ops,
473 .release = iommu_group_release,
477 * iommu_group_alloc - Allocate a new group
479 * This function is called by an iommu driver to allocate a new iommu
480 * group. The iommu group represents the minimum granularity of the iommu.
481 * Upon successful return, the caller holds a reference to the supplied
482 * group in order to hold the group until devices are added. Use
483 * iommu_group_put() to release this extra reference count, allowing the
484 * group to be automatically reclaimed once it has no devices or external
485 * references.
487 struct iommu_group *iommu_group_alloc(void)
489 struct iommu_group *group;
490 int ret;
492 group = kzalloc(sizeof(*group), GFP_KERNEL);
493 if (!group)
494 return ERR_PTR(-ENOMEM);
496 group->kobj.kset = iommu_group_kset;
497 mutex_init(&group->mutex);
498 INIT_LIST_HEAD(&group->devices);
499 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
501 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
502 if (ret < 0) {
503 kfree(group);
504 return ERR_PTR(ret);
506 group->id = ret;
508 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
509 NULL, "%d", group->id);
510 if (ret) {
511 ida_simple_remove(&iommu_group_ida, group->id);
512 kfree(group);
513 return ERR_PTR(ret);
516 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
517 if (!group->devices_kobj) {
518 kobject_put(&group->kobj); /* triggers .release & free */
519 return ERR_PTR(-ENOMEM);
523 * The devices_kobj holds a reference on the group kobject, so
524 * as long as that exists so will the group. We can therefore
525 * use the devices_kobj for reference counting.
527 kobject_put(&group->kobj);
529 ret = iommu_group_create_file(group,
530 &iommu_group_attr_reserved_regions);
531 if (ret)
532 return ERR_PTR(ret);
534 ret = iommu_group_create_file(group, &iommu_group_attr_type);
535 if (ret)
536 return ERR_PTR(ret);
538 pr_debug("Allocated group %d\n", group->id);
540 return group;
542 EXPORT_SYMBOL_GPL(iommu_group_alloc);
544 struct iommu_group *iommu_group_get_by_id(int id)
546 struct kobject *group_kobj;
547 struct iommu_group *group;
548 const char *name;
550 if (!iommu_group_kset)
551 return NULL;
553 name = kasprintf(GFP_KERNEL, "%d", id);
554 if (!name)
555 return NULL;
557 group_kobj = kset_find_obj(iommu_group_kset, name);
558 kfree(name);
560 if (!group_kobj)
561 return NULL;
563 group = container_of(group_kobj, struct iommu_group, kobj);
564 BUG_ON(group->id != id);
566 kobject_get(group->devices_kobj);
567 kobject_put(&group->kobj);
569 return group;
571 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
574 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
575 * @group: the group
577 * iommu drivers can store data in the group for use when doing iommu
578 * operations. This function provides a way to retrieve it. Caller
579 * should hold a group reference.
581 void *iommu_group_get_iommudata(struct iommu_group *group)
583 return group->iommu_data;
585 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
588 * iommu_group_set_iommudata - set iommu_data for a group
589 * @group: the group
590 * @iommu_data: new data
591 * @release: release function for iommu_data
593 * iommu drivers can store data in the group for use when doing iommu
594 * operations. This function provides a way to set the data after
595 * the group has been allocated. Caller should hold a group reference.
597 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
598 void (*release)(void *iommu_data))
600 group->iommu_data = iommu_data;
601 group->iommu_data_release = release;
603 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
606 * iommu_group_set_name - set name for a group
607 * @group: the group
608 * @name: name
610 * Allow iommu driver to set a name for a group. When set it will
611 * appear in a name attribute file under the group in sysfs.
613 int iommu_group_set_name(struct iommu_group *group, const char *name)
615 int ret;
617 if (group->name) {
618 iommu_group_remove_file(group, &iommu_group_attr_name);
619 kfree(group->name);
620 group->name = NULL;
621 if (!name)
622 return 0;
625 group->name = kstrdup(name, GFP_KERNEL);
626 if (!group->name)
627 return -ENOMEM;
629 ret = iommu_group_create_file(group, &iommu_group_attr_name);
630 if (ret) {
631 kfree(group->name);
632 group->name = NULL;
633 return ret;
636 return 0;
638 EXPORT_SYMBOL_GPL(iommu_group_set_name);
640 static int iommu_group_create_direct_mappings(struct iommu_group *group,
641 struct device *dev)
643 struct iommu_domain *domain = group->default_domain;
644 struct iommu_resv_region *entry;
645 struct list_head mappings;
646 unsigned long pg_size;
647 int ret = 0;
649 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
650 return 0;
652 BUG_ON(!domain->pgsize_bitmap);
654 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
655 INIT_LIST_HEAD(&mappings);
657 iommu_get_resv_regions(dev, &mappings);
659 /* We need to consider overlapping regions for different devices */
660 list_for_each_entry(entry, &mappings, list) {
661 dma_addr_t start, end, addr;
663 if (domain->ops->apply_resv_region)
664 domain->ops->apply_resv_region(dev, domain, entry);
666 start = ALIGN(entry->start, pg_size);
667 end = ALIGN(entry->start + entry->length, pg_size);
669 if (entry->type != IOMMU_RESV_DIRECT &&
670 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
671 continue;
673 for (addr = start; addr < end; addr += pg_size) {
674 phys_addr_t phys_addr;
676 phys_addr = iommu_iova_to_phys(domain, addr);
677 if (phys_addr)
678 continue;
680 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
681 if (ret)
682 goto out;
687 iommu_flush_tlb_all(domain);
689 out:
690 iommu_put_resv_regions(dev, &mappings);
692 return ret;
696 * iommu_group_add_device - add a device to an iommu group
697 * @group: the group into which to add the device (reference should be held)
698 * @dev: the device
700 * This function is called by an iommu driver to add a device into a
701 * group. Adding a device increments the group reference count.
703 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
705 int ret, i = 0;
706 struct group_device *device;
708 device = kzalloc(sizeof(*device), GFP_KERNEL);
709 if (!device)
710 return -ENOMEM;
712 device->dev = dev;
714 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
715 if (ret)
716 goto err_free_device;
718 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
719 rename:
720 if (!device->name) {
721 ret = -ENOMEM;
722 goto err_remove_link;
725 ret = sysfs_create_link_nowarn(group->devices_kobj,
726 &dev->kobj, device->name);
727 if (ret) {
728 if (ret == -EEXIST && i >= 0) {
730 * Account for the slim chance of collision
731 * and append an instance to the name.
733 kfree(device->name);
734 device->name = kasprintf(GFP_KERNEL, "%s.%d",
735 kobject_name(&dev->kobj), i++);
736 goto rename;
738 goto err_free_name;
741 kobject_get(group->devices_kobj);
743 dev->iommu_group = group;
745 iommu_group_create_direct_mappings(group, dev);
747 mutex_lock(&group->mutex);
748 list_add_tail(&device->list, &group->devices);
749 if (group->domain)
750 ret = __iommu_attach_device(group->domain, dev);
751 mutex_unlock(&group->mutex);
752 if (ret)
753 goto err_put_group;
755 /* Notify any listeners about change to group. */
756 blocking_notifier_call_chain(&group->notifier,
757 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
759 trace_add_device_to_group(group->id, dev);
761 dev_info(dev, "Adding to iommu group %d\n", group->id);
763 return 0;
765 err_put_group:
766 mutex_lock(&group->mutex);
767 list_del(&device->list);
768 mutex_unlock(&group->mutex);
769 dev->iommu_group = NULL;
770 kobject_put(group->devices_kobj);
771 sysfs_remove_link(group->devices_kobj, device->name);
772 err_free_name:
773 kfree(device->name);
774 err_remove_link:
775 sysfs_remove_link(&dev->kobj, "iommu_group");
776 err_free_device:
777 kfree(device);
778 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
779 return ret;
781 EXPORT_SYMBOL_GPL(iommu_group_add_device);
784 * iommu_group_remove_device - remove a device from it's current group
785 * @dev: device to be removed
787 * This function is called by an iommu driver to remove the device from
788 * it's current group. This decrements the iommu group reference count.
790 void iommu_group_remove_device(struct device *dev)
792 struct iommu_group *group = dev->iommu_group;
793 struct group_device *tmp_device, *device = NULL;
795 dev_info(dev, "Removing from iommu group %d\n", group->id);
797 /* Pre-notify listeners that a device is being removed. */
798 blocking_notifier_call_chain(&group->notifier,
799 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
801 mutex_lock(&group->mutex);
802 list_for_each_entry(tmp_device, &group->devices, list) {
803 if (tmp_device->dev == dev) {
804 device = tmp_device;
805 list_del(&device->list);
806 break;
809 mutex_unlock(&group->mutex);
811 if (!device)
812 return;
814 sysfs_remove_link(group->devices_kobj, device->name);
815 sysfs_remove_link(&dev->kobj, "iommu_group");
817 trace_remove_device_from_group(group->id, dev);
819 kfree(device->name);
820 kfree(device);
821 dev->iommu_group = NULL;
822 kobject_put(group->devices_kobj);
824 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
826 static int iommu_group_device_count(struct iommu_group *group)
828 struct group_device *entry;
829 int ret = 0;
831 list_for_each_entry(entry, &group->devices, list)
832 ret++;
834 return ret;
838 * iommu_group_for_each_dev - iterate over each device in the group
839 * @group: the group
840 * @data: caller opaque data to be passed to callback function
841 * @fn: caller supplied callback function
843 * This function is called by group users to iterate over group devices.
844 * Callers should hold a reference count to the group during callback.
845 * The group->mutex is held across callbacks, which will block calls to
846 * iommu_group_add/remove_device.
848 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
849 int (*fn)(struct device *, void *))
851 struct group_device *device;
852 int ret = 0;
854 list_for_each_entry(device, &group->devices, list) {
855 ret = fn(device->dev, data);
856 if (ret)
857 break;
859 return ret;
863 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
864 int (*fn)(struct device *, void *))
866 int ret;
868 mutex_lock(&group->mutex);
869 ret = __iommu_group_for_each_dev(group, data, fn);
870 mutex_unlock(&group->mutex);
872 return ret;
874 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
877 * iommu_group_get - Return the group for a device and increment reference
878 * @dev: get the group that this device belongs to
880 * This function is called by iommu drivers and users to get the group
881 * for the specified device. If found, the group is returned and the group
882 * reference in incremented, else NULL.
884 struct iommu_group *iommu_group_get(struct device *dev)
886 struct iommu_group *group = dev->iommu_group;
888 if (group)
889 kobject_get(group->devices_kobj);
891 return group;
893 EXPORT_SYMBOL_GPL(iommu_group_get);
896 * iommu_group_ref_get - Increment reference on a group
897 * @group: the group to use, must not be NULL
899 * This function is called by iommu drivers to take additional references on an
900 * existing group. Returns the given group for convenience.
902 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
904 kobject_get(group->devices_kobj);
905 return group;
907 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
910 * iommu_group_put - Decrement group reference
911 * @group: the group to use
913 * This function is called by iommu drivers and users to release the
914 * iommu group. Once the reference count is zero, the group is released.
916 void iommu_group_put(struct iommu_group *group)
918 if (group)
919 kobject_put(group->devices_kobj);
921 EXPORT_SYMBOL_GPL(iommu_group_put);
924 * iommu_group_register_notifier - Register a notifier for group changes
925 * @group: the group to watch
926 * @nb: notifier block to signal
928 * This function allows iommu group users to track changes in a group.
929 * See include/linux/iommu.h for actions sent via this notifier. Caller
930 * should hold a reference to the group throughout notifier registration.
932 int iommu_group_register_notifier(struct iommu_group *group,
933 struct notifier_block *nb)
935 return blocking_notifier_chain_register(&group->notifier, nb);
937 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
940 * iommu_group_unregister_notifier - Unregister a notifier
941 * @group: the group to watch
942 * @nb: notifier block to signal
944 * Unregister a previously registered group notifier block.
946 int iommu_group_unregister_notifier(struct iommu_group *group,
947 struct notifier_block *nb)
949 return blocking_notifier_chain_unregister(&group->notifier, nb);
951 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
954 * iommu_register_device_fault_handler() - Register a device fault handler
955 * @dev: the device
956 * @handler: the fault handler
957 * @data: private data passed as argument to the handler
959 * When an IOMMU fault event is received, this handler gets called with the
960 * fault event and data as argument. The handler should return 0 on success. If
961 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
962 * complete the fault by calling iommu_page_response() with one of the following
963 * response code:
964 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
965 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
966 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
967 * page faults if possible.
969 * Return 0 if the fault handler was installed successfully, or an error.
971 int iommu_register_device_fault_handler(struct device *dev,
972 iommu_dev_fault_handler_t handler,
973 void *data)
975 struct iommu_param *param = dev->iommu_param;
976 int ret = 0;
978 if (!param)
979 return -EINVAL;
981 mutex_lock(&param->lock);
982 /* Only allow one fault handler registered for each device */
983 if (param->fault_param) {
984 ret = -EBUSY;
985 goto done_unlock;
988 get_device(dev);
989 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
990 if (!param->fault_param) {
991 put_device(dev);
992 ret = -ENOMEM;
993 goto done_unlock;
995 param->fault_param->handler = handler;
996 param->fault_param->data = data;
997 mutex_init(&param->fault_param->lock);
998 INIT_LIST_HEAD(&param->fault_param->faults);
1000 done_unlock:
1001 mutex_unlock(&param->lock);
1003 return ret;
1005 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1008 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1009 * @dev: the device
1011 * Remove the device fault handler installed with
1012 * iommu_register_device_fault_handler().
1014 * Return 0 on success, or an error.
1016 int iommu_unregister_device_fault_handler(struct device *dev)
1018 struct iommu_param *param = dev->iommu_param;
1019 int ret = 0;
1021 if (!param)
1022 return -EINVAL;
1024 mutex_lock(&param->lock);
1026 if (!param->fault_param)
1027 goto unlock;
1029 /* we cannot unregister handler if there are pending faults */
1030 if (!list_empty(&param->fault_param->faults)) {
1031 ret = -EBUSY;
1032 goto unlock;
1035 kfree(param->fault_param);
1036 param->fault_param = NULL;
1037 put_device(dev);
1038 unlock:
1039 mutex_unlock(&param->lock);
1041 return ret;
1043 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1046 * iommu_report_device_fault() - Report fault event to device driver
1047 * @dev: the device
1048 * @evt: fault event data
1050 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1051 * handler. When this function fails and the fault is recoverable, it is the
1052 * caller's responsibility to complete the fault.
1054 * Return 0 on success, or an error.
1056 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1058 struct iommu_param *param = dev->iommu_param;
1059 struct iommu_fault_event *evt_pending = NULL;
1060 struct iommu_fault_param *fparam;
1061 int ret = 0;
1063 if (!param || !evt)
1064 return -EINVAL;
1066 /* we only report device fault if there is a handler registered */
1067 mutex_lock(&param->lock);
1068 fparam = param->fault_param;
1069 if (!fparam || !fparam->handler) {
1070 ret = -EINVAL;
1071 goto done_unlock;
1074 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1075 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1076 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1077 GFP_KERNEL);
1078 if (!evt_pending) {
1079 ret = -ENOMEM;
1080 goto done_unlock;
1082 mutex_lock(&fparam->lock);
1083 list_add_tail(&evt_pending->list, &fparam->faults);
1084 mutex_unlock(&fparam->lock);
1087 ret = fparam->handler(&evt->fault, fparam->data);
1088 if (ret && evt_pending) {
1089 mutex_lock(&fparam->lock);
1090 list_del(&evt_pending->list);
1091 mutex_unlock(&fparam->lock);
1092 kfree(evt_pending);
1094 done_unlock:
1095 mutex_unlock(&param->lock);
1096 return ret;
1098 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1100 int iommu_page_response(struct device *dev,
1101 struct iommu_page_response *msg)
1103 bool pasid_valid;
1104 int ret = -EINVAL;
1105 struct iommu_fault_event *evt;
1106 struct iommu_fault_page_request *prm;
1107 struct iommu_param *param = dev->iommu_param;
1108 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1110 if (!domain || !domain->ops->page_response)
1111 return -ENODEV;
1113 if (!param || !param->fault_param)
1114 return -EINVAL;
1116 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1117 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1118 return -EINVAL;
1120 /* Only send response if there is a fault report pending */
1121 mutex_lock(&param->fault_param->lock);
1122 if (list_empty(&param->fault_param->faults)) {
1123 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1124 goto done_unlock;
1127 * Check if we have a matching page request pending to respond,
1128 * otherwise return -EINVAL
1130 list_for_each_entry(evt, &param->fault_param->faults, list) {
1131 prm = &evt->fault.prm;
1132 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1134 if ((pasid_valid && prm->pasid != msg->pasid) ||
1135 prm->grpid != msg->grpid)
1136 continue;
1138 /* Sanitize the reply */
1139 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1141 ret = domain->ops->page_response(dev, evt, msg);
1142 list_del(&evt->list);
1143 kfree(evt);
1144 break;
1147 done_unlock:
1148 mutex_unlock(&param->fault_param->lock);
1149 return ret;
1151 EXPORT_SYMBOL_GPL(iommu_page_response);
1154 * iommu_group_id - Return ID for a group
1155 * @group: the group to ID
1157 * Return the unique ID for the group matching the sysfs group number.
1159 int iommu_group_id(struct iommu_group *group)
1161 return group->id;
1163 EXPORT_SYMBOL_GPL(iommu_group_id);
1165 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1166 unsigned long *devfns);
1169 * To consider a PCI device isolated, we require ACS to support Source
1170 * Validation, Request Redirection, Completer Redirection, and Upstream
1171 * Forwarding. This effectively means that devices cannot spoof their
1172 * requester ID, requests and completions cannot be redirected, and all
1173 * transactions are forwarded upstream, even as it passes through a
1174 * bridge where the target device is downstream.
1176 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1179 * For multifunction devices which are not isolated from each other, find
1180 * all the other non-isolated functions and look for existing groups. For
1181 * each function, we also need to look for aliases to or from other devices
1182 * that may already have a group.
1184 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1185 unsigned long *devfns)
1187 struct pci_dev *tmp = NULL;
1188 struct iommu_group *group;
1190 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1191 return NULL;
1193 for_each_pci_dev(tmp) {
1194 if (tmp == pdev || tmp->bus != pdev->bus ||
1195 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1196 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1197 continue;
1199 group = get_pci_alias_group(tmp, devfns);
1200 if (group) {
1201 pci_dev_put(tmp);
1202 return group;
1206 return NULL;
1210 * Look for aliases to or from the given device for existing groups. DMA
1211 * aliases are only supported on the same bus, therefore the search
1212 * space is quite small (especially since we're really only looking at pcie
1213 * device, and therefore only expect multiple slots on the root complex or
1214 * downstream switch ports). It's conceivable though that a pair of
1215 * multifunction devices could have aliases between them that would cause a
1216 * loop. To prevent this, we use a bitmap to track where we've been.
1218 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1219 unsigned long *devfns)
1221 struct pci_dev *tmp = NULL;
1222 struct iommu_group *group;
1224 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1225 return NULL;
1227 group = iommu_group_get(&pdev->dev);
1228 if (group)
1229 return group;
1231 for_each_pci_dev(tmp) {
1232 if (tmp == pdev || tmp->bus != pdev->bus)
1233 continue;
1235 /* We alias them or they alias us */
1236 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1237 group = get_pci_alias_group(tmp, devfns);
1238 if (group) {
1239 pci_dev_put(tmp);
1240 return group;
1243 group = get_pci_function_alias_group(tmp, devfns);
1244 if (group) {
1245 pci_dev_put(tmp);
1246 return group;
1251 return NULL;
1254 struct group_for_pci_data {
1255 struct pci_dev *pdev;
1256 struct iommu_group *group;
1260 * DMA alias iterator callback, return the last seen device. Stop and return
1261 * the IOMMU group if we find one along the way.
1263 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1265 struct group_for_pci_data *data = opaque;
1267 data->pdev = pdev;
1268 data->group = iommu_group_get(&pdev->dev);
1270 return data->group != NULL;
1274 * Generic device_group call-back function. It just allocates one
1275 * iommu-group per device.
1277 struct iommu_group *generic_device_group(struct device *dev)
1279 return iommu_group_alloc();
1281 EXPORT_SYMBOL_GPL(generic_device_group);
1284 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1285 * to find or create an IOMMU group for a device.
1287 struct iommu_group *pci_device_group(struct device *dev)
1289 struct pci_dev *pdev = to_pci_dev(dev);
1290 struct group_for_pci_data data;
1291 struct pci_bus *bus;
1292 struct iommu_group *group = NULL;
1293 u64 devfns[4] = { 0 };
1295 if (WARN_ON(!dev_is_pci(dev)))
1296 return ERR_PTR(-EINVAL);
1299 * Find the upstream DMA alias for the device. A device must not
1300 * be aliased due to topology in order to have its own IOMMU group.
1301 * If we find an alias along the way that already belongs to a
1302 * group, use it.
1304 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1305 return data.group;
1307 pdev = data.pdev;
1310 * Continue upstream from the point of minimum IOMMU granularity
1311 * due to aliases to the point where devices are protected from
1312 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1313 * group, use it.
1315 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1316 if (!bus->self)
1317 continue;
1319 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1320 break;
1322 pdev = bus->self;
1324 group = iommu_group_get(&pdev->dev);
1325 if (group)
1326 return group;
1330 * Look for existing groups on device aliases. If we alias another
1331 * device or another device aliases us, use the same group.
1333 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1334 if (group)
1335 return group;
1338 * Look for existing groups on non-isolated functions on the same
1339 * slot and aliases of those funcions, if any. No need to clear
1340 * the search bitmap, the tested devfns are still valid.
1342 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1343 if (group)
1344 return group;
1346 /* No shared group found, allocate new */
1347 return iommu_group_alloc();
1349 EXPORT_SYMBOL_GPL(pci_device_group);
1351 /* Get the IOMMU group for device on fsl-mc bus */
1352 struct iommu_group *fsl_mc_device_group(struct device *dev)
1354 struct device *cont_dev = fsl_mc_cont_dev(dev);
1355 struct iommu_group *group;
1357 group = iommu_group_get(cont_dev);
1358 if (!group)
1359 group = iommu_group_alloc();
1360 return group;
1362 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1365 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1366 * @dev: target device
1368 * This function is intended to be called by IOMMU drivers and extended to
1369 * support common, bus-defined algorithms when determining or creating the
1370 * IOMMU group for a device. On success, the caller will hold a reference
1371 * to the returned IOMMU group, which will already include the provided
1372 * device. The reference should be released with iommu_group_put().
1374 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1376 const struct iommu_ops *ops = dev->bus->iommu_ops;
1377 struct iommu_group *group;
1378 int ret;
1380 group = iommu_group_get(dev);
1381 if (group)
1382 return group;
1384 if (!ops)
1385 return ERR_PTR(-EINVAL);
1387 group = ops->device_group(dev);
1388 if (WARN_ON_ONCE(group == NULL))
1389 return ERR_PTR(-EINVAL);
1391 if (IS_ERR(group))
1392 return group;
1395 * Try to allocate a default domain - needs support from the
1396 * IOMMU driver.
1398 if (!group->default_domain) {
1399 struct iommu_domain *dom;
1401 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1402 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1403 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1404 if (dom) {
1405 dev_warn(dev,
1406 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1407 iommu_def_domain_type);
1411 group->default_domain = dom;
1412 if (!group->domain)
1413 group->domain = dom;
1415 if (dom && !iommu_dma_strict) {
1416 int attr = 1;
1417 iommu_domain_set_attr(dom,
1418 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1419 &attr);
1423 ret = iommu_group_add_device(group, dev);
1424 if (ret) {
1425 iommu_group_put(group);
1426 return ERR_PTR(ret);
1429 return group;
1431 EXPORT_SYMBOL(iommu_group_get_for_dev);
1433 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1435 return group->default_domain;
1438 static int add_iommu_group(struct device *dev, void *data)
1440 int ret = iommu_probe_device(dev);
1443 * We ignore -ENODEV errors for now, as they just mean that the
1444 * device is not translated by an IOMMU. We still care about
1445 * other errors and fail to initialize when they happen.
1447 if (ret == -ENODEV)
1448 ret = 0;
1450 return ret;
1453 static int remove_iommu_group(struct device *dev, void *data)
1455 iommu_release_device(dev);
1457 return 0;
1460 static int iommu_bus_notifier(struct notifier_block *nb,
1461 unsigned long action, void *data)
1463 unsigned long group_action = 0;
1464 struct device *dev = data;
1465 struct iommu_group *group;
1468 * ADD/DEL call into iommu driver ops if provided, which may
1469 * result in ADD/DEL notifiers to group->notifier
1471 if (action == BUS_NOTIFY_ADD_DEVICE) {
1472 int ret;
1474 ret = iommu_probe_device(dev);
1475 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1476 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1477 iommu_release_device(dev);
1478 return NOTIFY_OK;
1482 * Remaining BUS_NOTIFYs get filtered and republished to the
1483 * group, if anyone is listening
1485 group = iommu_group_get(dev);
1486 if (!group)
1487 return 0;
1489 switch (action) {
1490 case BUS_NOTIFY_BIND_DRIVER:
1491 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1492 break;
1493 case BUS_NOTIFY_BOUND_DRIVER:
1494 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1495 break;
1496 case BUS_NOTIFY_UNBIND_DRIVER:
1497 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1498 break;
1499 case BUS_NOTIFY_UNBOUND_DRIVER:
1500 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1501 break;
1504 if (group_action)
1505 blocking_notifier_call_chain(&group->notifier,
1506 group_action, dev);
1508 iommu_group_put(group);
1509 return 0;
1512 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1514 int err;
1515 struct notifier_block *nb;
1517 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1518 if (!nb)
1519 return -ENOMEM;
1521 nb->notifier_call = iommu_bus_notifier;
1523 err = bus_register_notifier(bus, nb);
1524 if (err)
1525 goto out_free;
1527 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1528 if (err)
1529 goto out_err;
1532 return 0;
1534 out_err:
1535 /* Clean up */
1536 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1537 bus_unregister_notifier(bus, nb);
1539 out_free:
1540 kfree(nb);
1542 return err;
1546 * bus_set_iommu - set iommu-callbacks for the bus
1547 * @bus: bus.
1548 * @ops: the callbacks provided by the iommu-driver
1550 * This function is called by an iommu driver to set the iommu methods
1551 * used for a particular bus. Drivers for devices on that bus can use
1552 * the iommu-api after these ops are registered.
1553 * This special function is needed because IOMMUs are usually devices on
1554 * the bus itself, so the iommu drivers are not initialized when the bus
1555 * is set up. With this function the iommu-driver can set the iommu-ops
1556 * afterwards.
1558 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1560 int err;
1562 if (ops == NULL) {
1563 bus->iommu_ops = NULL;
1564 return 0;
1567 if (bus->iommu_ops != NULL)
1568 return -EBUSY;
1570 bus->iommu_ops = ops;
1572 /* Do IOMMU specific setup for this bus-type */
1573 err = iommu_bus_init(bus, ops);
1574 if (err)
1575 bus->iommu_ops = NULL;
1577 return err;
1579 EXPORT_SYMBOL_GPL(bus_set_iommu);
1581 bool iommu_present(struct bus_type *bus)
1583 return bus->iommu_ops != NULL;
1585 EXPORT_SYMBOL_GPL(iommu_present);
1587 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1589 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1590 return false;
1592 return bus->iommu_ops->capable(cap);
1594 EXPORT_SYMBOL_GPL(iommu_capable);
1597 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1598 * @domain: iommu domain
1599 * @handler: fault handler
1600 * @token: user data, will be passed back to the fault handler
1602 * This function should be used by IOMMU users which want to be notified
1603 * whenever an IOMMU fault happens.
1605 * The fault handler itself should return 0 on success, and an appropriate
1606 * error code otherwise.
1608 void iommu_set_fault_handler(struct iommu_domain *domain,
1609 iommu_fault_handler_t handler,
1610 void *token)
1612 BUG_ON(!domain);
1614 domain->handler = handler;
1615 domain->handler_token = token;
1617 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1619 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1620 unsigned type)
1622 struct iommu_domain *domain;
1624 if (bus == NULL || bus->iommu_ops == NULL)
1625 return NULL;
1627 domain = bus->iommu_ops->domain_alloc(type);
1628 if (!domain)
1629 return NULL;
1631 domain->ops = bus->iommu_ops;
1632 domain->type = type;
1633 /* Assume all sizes by default; the driver may override this later */
1634 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1636 return domain;
1639 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1641 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1643 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1645 void iommu_domain_free(struct iommu_domain *domain)
1647 domain->ops->domain_free(domain);
1649 EXPORT_SYMBOL_GPL(iommu_domain_free);
1651 static int __iommu_attach_device(struct iommu_domain *domain,
1652 struct device *dev)
1654 int ret;
1655 if ((domain->ops->is_attach_deferred != NULL) &&
1656 domain->ops->is_attach_deferred(domain, dev))
1657 return 0;
1659 if (unlikely(domain->ops->attach_dev == NULL))
1660 return -ENODEV;
1662 ret = domain->ops->attach_dev(domain, dev);
1663 if (!ret)
1664 trace_attach_device_to_domain(dev);
1665 return ret;
1668 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1670 struct iommu_group *group;
1671 int ret;
1673 group = iommu_group_get(dev);
1674 if (!group)
1675 return -ENODEV;
1678 * Lock the group to make sure the device-count doesn't
1679 * change while we are attaching
1681 mutex_lock(&group->mutex);
1682 ret = -EINVAL;
1683 if (iommu_group_device_count(group) != 1)
1684 goto out_unlock;
1686 ret = __iommu_attach_group(domain, group);
1688 out_unlock:
1689 mutex_unlock(&group->mutex);
1690 iommu_group_put(group);
1692 return ret;
1694 EXPORT_SYMBOL_GPL(iommu_attach_device);
1696 int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1697 struct iommu_cache_invalidate_info *inv_info)
1699 if (unlikely(!domain->ops->cache_invalidate))
1700 return -ENODEV;
1702 return domain->ops->cache_invalidate(domain, dev, inv_info);
1704 EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
1706 int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1707 struct device *dev, struct iommu_gpasid_bind_data *data)
1709 if (unlikely(!domain->ops->sva_bind_gpasid))
1710 return -ENODEV;
1712 return domain->ops->sva_bind_gpasid(domain, dev, data);
1714 EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
1716 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
1717 ioasid_t pasid)
1719 if (unlikely(!domain->ops->sva_unbind_gpasid))
1720 return -ENODEV;
1722 return domain->ops->sva_unbind_gpasid(dev, pasid);
1724 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
1726 static void __iommu_detach_device(struct iommu_domain *domain,
1727 struct device *dev)
1729 if ((domain->ops->is_attach_deferred != NULL) &&
1730 domain->ops->is_attach_deferred(domain, dev))
1731 return;
1733 if (unlikely(domain->ops->detach_dev == NULL))
1734 return;
1736 domain->ops->detach_dev(domain, dev);
1737 trace_detach_device_from_domain(dev);
1740 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1742 struct iommu_group *group;
1744 group = iommu_group_get(dev);
1745 if (!group)
1746 return;
1748 mutex_lock(&group->mutex);
1749 if (iommu_group_device_count(group) != 1) {
1750 WARN_ON(1);
1751 goto out_unlock;
1754 __iommu_detach_group(domain, group);
1756 out_unlock:
1757 mutex_unlock(&group->mutex);
1758 iommu_group_put(group);
1760 EXPORT_SYMBOL_GPL(iommu_detach_device);
1762 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1764 struct iommu_domain *domain;
1765 struct iommu_group *group;
1767 group = iommu_group_get(dev);
1768 if (!group)
1769 return NULL;
1771 domain = group->domain;
1773 iommu_group_put(group);
1775 return domain;
1777 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1780 * For IOMMU_DOMAIN_DMA implementations which already provide their own
1781 * guarantees that the group and its default domain are valid and correct.
1783 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1785 return dev->iommu_group->default_domain;
1789 * IOMMU groups are really the natural working unit of the IOMMU, but
1790 * the IOMMU API works on domains and devices. Bridge that gap by
1791 * iterating over the devices in a group. Ideally we'd have a single
1792 * device which represents the requestor ID of the group, but we also
1793 * allow IOMMU drivers to create policy defined minimum sets, where
1794 * the physical hardware may be able to distiguish members, but we
1795 * wish to group them at a higher level (ex. untrusted multi-function
1796 * PCI devices). Thus we attach each device.
1798 static int iommu_group_do_attach_device(struct device *dev, void *data)
1800 struct iommu_domain *domain = data;
1802 return __iommu_attach_device(domain, dev);
1805 static int __iommu_attach_group(struct iommu_domain *domain,
1806 struct iommu_group *group)
1808 int ret;
1810 if (group->default_domain && group->domain != group->default_domain)
1811 return -EBUSY;
1813 ret = __iommu_group_for_each_dev(group, domain,
1814 iommu_group_do_attach_device);
1815 if (ret == 0)
1816 group->domain = domain;
1818 return ret;
1821 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1823 int ret;
1825 mutex_lock(&group->mutex);
1826 ret = __iommu_attach_group(domain, group);
1827 mutex_unlock(&group->mutex);
1829 return ret;
1831 EXPORT_SYMBOL_GPL(iommu_attach_group);
1833 static int iommu_group_do_detach_device(struct device *dev, void *data)
1835 struct iommu_domain *domain = data;
1837 __iommu_detach_device(domain, dev);
1839 return 0;
1842 static void __iommu_detach_group(struct iommu_domain *domain,
1843 struct iommu_group *group)
1845 int ret;
1847 if (!group->default_domain) {
1848 __iommu_group_for_each_dev(group, domain,
1849 iommu_group_do_detach_device);
1850 group->domain = NULL;
1851 return;
1854 if (group->domain == group->default_domain)
1855 return;
1857 /* Detach by re-attaching to the default domain */
1858 ret = __iommu_group_for_each_dev(group, group->default_domain,
1859 iommu_group_do_attach_device);
1860 if (ret != 0)
1861 WARN_ON(1);
1862 else
1863 group->domain = group->default_domain;
1866 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1868 mutex_lock(&group->mutex);
1869 __iommu_detach_group(domain, group);
1870 mutex_unlock(&group->mutex);
1872 EXPORT_SYMBOL_GPL(iommu_detach_group);
1874 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1876 if (unlikely(domain->ops->iova_to_phys == NULL))
1877 return 0;
1879 return domain->ops->iova_to_phys(domain, iova);
1881 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1883 static size_t iommu_pgsize(struct iommu_domain *domain,
1884 unsigned long addr_merge, size_t size)
1886 unsigned int pgsize_idx;
1887 size_t pgsize;
1889 /* Max page size that still fits into 'size' */
1890 pgsize_idx = __fls(size);
1892 /* need to consider alignment requirements ? */
1893 if (likely(addr_merge)) {
1894 /* Max page size allowed by address */
1895 unsigned int align_pgsize_idx = __ffs(addr_merge);
1896 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1899 /* build a mask of acceptable page sizes */
1900 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1902 /* throw away page sizes not supported by the hardware */
1903 pgsize &= domain->pgsize_bitmap;
1905 /* make sure we're still sane */
1906 BUG_ON(!pgsize);
1908 /* pick the biggest page */
1909 pgsize_idx = __fls(pgsize);
1910 pgsize = 1UL << pgsize_idx;
1912 return pgsize;
1915 int __iommu_map(struct iommu_domain *domain, unsigned long iova,
1916 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1918 const struct iommu_ops *ops = domain->ops;
1919 unsigned long orig_iova = iova;
1920 unsigned int min_pagesz;
1921 size_t orig_size = size;
1922 phys_addr_t orig_paddr = paddr;
1923 int ret = 0;
1925 if (unlikely(ops->map == NULL ||
1926 domain->pgsize_bitmap == 0UL))
1927 return -ENODEV;
1929 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1930 return -EINVAL;
1932 /* find out the minimum page size supported */
1933 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1936 * both the virtual address and the physical one, as well as
1937 * the size of the mapping, must be aligned (at least) to the
1938 * size of the smallest page supported by the hardware
1940 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1941 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1942 iova, &paddr, size, min_pagesz);
1943 return -EINVAL;
1946 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1948 while (size) {
1949 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1951 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1952 iova, &paddr, pgsize);
1953 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
1955 if (ret)
1956 break;
1958 iova += pgsize;
1959 paddr += pgsize;
1960 size -= pgsize;
1963 if (ops->iotlb_sync_map)
1964 ops->iotlb_sync_map(domain);
1966 /* unroll mapping in case something went wrong */
1967 if (ret)
1968 iommu_unmap(domain, orig_iova, orig_size - size);
1969 else
1970 trace_map(orig_iova, orig_paddr, orig_size);
1972 return ret;
1975 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1976 phys_addr_t paddr, size_t size, int prot)
1978 might_sleep();
1979 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
1981 EXPORT_SYMBOL_GPL(iommu_map);
1983 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
1984 phys_addr_t paddr, size_t size, int prot)
1986 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
1988 EXPORT_SYMBOL_GPL(iommu_map_atomic);
1990 static size_t __iommu_unmap(struct iommu_domain *domain,
1991 unsigned long iova, size_t size,
1992 struct iommu_iotlb_gather *iotlb_gather)
1994 const struct iommu_ops *ops = domain->ops;
1995 size_t unmapped_page, unmapped = 0;
1996 unsigned long orig_iova = iova;
1997 unsigned int min_pagesz;
1999 if (unlikely(ops->unmap == NULL ||
2000 domain->pgsize_bitmap == 0UL))
2001 return 0;
2003 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2004 return 0;
2006 /* find out the minimum page size supported */
2007 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2010 * The virtual address, as well as the size of the mapping, must be
2011 * aligned (at least) to the size of the smallest page supported
2012 * by the hardware
2014 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2015 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2016 iova, size, min_pagesz);
2017 return 0;
2020 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2023 * Keep iterating until we either unmap 'size' bytes (or more)
2024 * or we hit an area that isn't mapped.
2026 while (unmapped < size) {
2027 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2029 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2030 if (!unmapped_page)
2031 break;
2033 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2034 iova, unmapped_page);
2036 iova += unmapped_page;
2037 unmapped += unmapped_page;
2040 trace_unmap(orig_iova, size, unmapped);
2041 return unmapped;
2044 size_t iommu_unmap(struct iommu_domain *domain,
2045 unsigned long iova, size_t size)
2047 struct iommu_iotlb_gather iotlb_gather;
2048 size_t ret;
2050 iommu_iotlb_gather_init(&iotlb_gather);
2051 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2052 iommu_tlb_sync(domain, &iotlb_gather);
2054 return ret;
2056 EXPORT_SYMBOL_GPL(iommu_unmap);
2058 size_t iommu_unmap_fast(struct iommu_domain *domain,
2059 unsigned long iova, size_t size,
2060 struct iommu_iotlb_gather *iotlb_gather)
2062 return __iommu_unmap(domain, iova, size, iotlb_gather);
2064 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2066 size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2067 struct scatterlist *sg, unsigned int nents, int prot,
2068 gfp_t gfp)
2070 size_t len = 0, mapped = 0;
2071 phys_addr_t start;
2072 unsigned int i = 0;
2073 int ret;
2075 while (i <= nents) {
2076 phys_addr_t s_phys = sg_phys(sg);
2078 if (len && s_phys != start + len) {
2079 ret = __iommu_map(domain, iova + mapped, start,
2080 len, prot, gfp);
2082 if (ret)
2083 goto out_err;
2085 mapped += len;
2086 len = 0;
2089 if (len) {
2090 len += sg->length;
2091 } else {
2092 len = sg->length;
2093 start = s_phys;
2096 if (++i < nents)
2097 sg = sg_next(sg);
2100 return mapped;
2102 out_err:
2103 /* undo mappings already done */
2104 iommu_unmap(domain, iova, mapped);
2106 return 0;
2110 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2111 struct scatterlist *sg, unsigned int nents, int prot)
2113 might_sleep();
2114 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2116 EXPORT_SYMBOL_GPL(iommu_map_sg);
2118 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2119 struct scatterlist *sg, unsigned int nents, int prot)
2121 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2123 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2125 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2126 phys_addr_t paddr, u64 size, int prot)
2128 if (unlikely(domain->ops->domain_window_enable == NULL))
2129 return -ENODEV;
2131 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2132 prot);
2134 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2136 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2138 if (unlikely(domain->ops->domain_window_disable == NULL))
2139 return;
2141 return domain->ops->domain_window_disable(domain, wnd_nr);
2143 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2146 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2147 * @domain: the iommu domain where the fault has happened
2148 * @dev: the device where the fault has happened
2149 * @iova: the faulting address
2150 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2152 * This function should be called by the low-level IOMMU implementations
2153 * whenever IOMMU faults happen, to allow high-level users, that are
2154 * interested in such events, to know about them.
2156 * This event may be useful for several possible use cases:
2157 * - mere logging of the event
2158 * - dynamic TLB/PTE loading
2159 * - if restarting of the faulting device is required
2161 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2162 * PTE/TLB loading will one day be supported, implementations will be able
2163 * to tell whether it succeeded or not according to this return value).
2165 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2166 * (though fault handlers can also return -ENOSYS, in case they want to
2167 * elicit the default behavior of the IOMMU drivers).
2169 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2170 unsigned long iova, int flags)
2172 int ret = -ENOSYS;
2175 * if upper layers showed interest and installed a fault handler,
2176 * invoke it.
2178 if (domain->handler)
2179 ret = domain->handler(domain, dev, iova, flags,
2180 domain->handler_token);
2182 trace_io_page_fault(dev, iova, flags);
2183 return ret;
2185 EXPORT_SYMBOL_GPL(report_iommu_fault);
2187 static int __init iommu_init(void)
2189 iommu_group_kset = kset_create_and_add("iommu_groups",
2190 NULL, kernel_kobj);
2191 BUG_ON(!iommu_group_kset);
2193 iommu_debugfs_setup();
2195 return 0;
2197 core_initcall(iommu_init);
2199 int iommu_domain_get_attr(struct iommu_domain *domain,
2200 enum iommu_attr attr, void *data)
2202 struct iommu_domain_geometry *geometry;
2203 bool *paging;
2204 int ret = 0;
2206 switch (attr) {
2207 case DOMAIN_ATTR_GEOMETRY:
2208 geometry = data;
2209 *geometry = domain->geometry;
2211 break;
2212 case DOMAIN_ATTR_PAGING:
2213 paging = data;
2214 *paging = (domain->pgsize_bitmap != 0UL);
2215 break;
2216 default:
2217 if (!domain->ops->domain_get_attr)
2218 return -EINVAL;
2220 ret = domain->ops->domain_get_attr(domain, attr, data);
2223 return ret;
2225 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2227 int iommu_domain_set_attr(struct iommu_domain *domain,
2228 enum iommu_attr attr, void *data)
2230 int ret = 0;
2232 switch (attr) {
2233 default:
2234 if (domain->ops->domain_set_attr == NULL)
2235 return -EINVAL;
2237 ret = domain->ops->domain_set_attr(domain, attr, data);
2240 return ret;
2242 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2244 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2246 const struct iommu_ops *ops = dev->bus->iommu_ops;
2248 if (ops && ops->get_resv_regions)
2249 ops->get_resv_regions(dev, list);
2252 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2254 const struct iommu_ops *ops = dev->bus->iommu_ops;
2256 if (ops && ops->put_resv_regions)
2257 ops->put_resv_regions(dev, list);
2261 * generic_iommu_put_resv_regions - Reserved region driver helper
2262 * @dev: device for which to free reserved regions
2263 * @list: reserved region list for device
2265 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2266 * for simple reservations. Memory allocated for each reserved region will be
2267 * freed. If an IOMMU driver allocates additional resources per region, it is
2268 * going to have to implement a custom callback.
2270 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2272 struct iommu_resv_region *entry, *next;
2274 list_for_each_entry_safe(entry, next, list, list)
2275 kfree(entry);
2277 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2279 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2280 size_t length, int prot,
2281 enum iommu_resv_type type)
2283 struct iommu_resv_region *region;
2285 region = kzalloc(sizeof(*region), GFP_KERNEL);
2286 if (!region)
2287 return NULL;
2289 INIT_LIST_HEAD(&region->list);
2290 region->start = start;
2291 region->length = length;
2292 region->prot = prot;
2293 region->type = type;
2294 return region;
2296 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2298 static int
2299 request_default_domain_for_dev(struct device *dev, unsigned long type)
2301 struct iommu_domain *domain;
2302 struct iommu_group *group;
2303 int ret;
2305 /* Device must already be in a group before calling this function */
2306 group = iommu_group_get(dev);
2307 if (!group)
2308 return -EINVAL;
2310 mutex_lock(&group->mutex);
2312 ret = 0;
2313 if (group->default_domain && group->default_domain->type == type)
2314 goto out;
2316 /* Don't change mappings of existing devices */
2317 ret = -EBUSY;
2318 if (iommu_group_device_count(group) != 1)
2319 goto out;
2321 ret = -ENOMEM;
2322 domain = __iommu_domain_alloc(dev->bus, type);
2323 if (!domain)
2324 goto out;
2326 /* Attach the device to the domain */
2327 ret = __iommu_attach_group(domain, group);
2328 if (ret) {
2329 iommu_domain_free(domain);
2330 goto out;
2333 /* Make the domain the default for this group */
2334 if (group->default_domain)
2335 iommu_domain_free(group->default_domain);
2336 group->default_domain = domain;
2338 iommu_group_create_direct_mappings(group, dev);
2340 dev_info(dev, "Using iommu %s mapping\n",
2341 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2343 ret = 0;
2344 out:
2345 mutex_unlock(&group->mutex);
2346 iommu_group_put(group);
2348 return ret;
2351 /* Request that a device is direct mapped by the IOMMU */
2352 int iommu_request_dm_for_dev(struct device *dev)
2354 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2357 /* Request that a device can't be direct mapped by the IOMMU */
2358 int iommu_request_dma_domain_for_dev(struct device *dev)
2360 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2363 void iommu_set_default_passthrough(bool cmd_line)
2365 if (cmd_line)
2366 iommu_set_cmd_line_dma_api();
2368 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2371 void iommu_set_default_translated(bool cmd_line)
2373 if (cmd_line)
2374 iommu_set_cmd_line_dma_api();
2376 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2379 bool iommu_default_passthrough(void)
2381 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2383 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2385 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2387 const struct iommu_ops *ops = NULL;
2388 struct iommu_device *iommu;
2390 spin_lock(&iommu_device_lock);
2391 list_for_each_entry(iommu, &iommu_device_list, list)
2392 if (iommu->fwnode == fwnode) {
2393 ops = iommu->ops;
2394 break;
2396 spin_unlock(&iommu_device_lock);
2397 return ops;
2400 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2401 const struct iommu_ops *ops)
2403 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2405 if (fwspec)
2406 return ops == fwspec->ops ? 0 : -EINVAL;
2408 fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
2409 if (!fwspec)
2410 return -ENOMEM;
2412 of_node_get(to_of_node(iommu_fwnode));
2413 fwspec->iommu_fwnode = iommu_fwnode;
2414 fwspec->ops = ops;
2415 dev_iommu_fwspec_set(dev, fwspec);
2416 return 0;
2418 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2420 void iommu_fwspec_free(struct device *dev)
2422 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2424 if (fwspec) {
2425 fwnode_handle_put(fwspec->iommu_fwnode);
2426 kfree(fwspec);
2427 dev_iommu_fwspec_set(dev, NULL);
2430 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2432 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2434 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2435 size_t size;
2436 int i;
2438 if (!fwspec)
2439 return -EINVAL;
2441 size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
2442 if (size > sizeof(*fwspec)) {
2443 fwspec = krealloc(fwspec, size, GFP_KERNEL);
2444 if (!fwspec)
2445 return -ENOMEM;
2447 dev_iommu_fwspec_set(dev, fwspec);
2450 for (i = 0; i < num_ids; i++)
2451 fwspec->ids[fwspec->num_ids + i] = ids[i];
2453 fwspec->num_ids += num_ids;
2454 return 0;
2456 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2459 * Per device IOMMU features.
2461 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2463 const struct iommu_ops *ops = dev->bus->iommu_ops;
2465 if (ops && ops->dev_has_feat)
2466 return ops->dev_has_feat(dev, feat);
2468 return false;
2470 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2472 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2474 const struct iommu_ops *ops = dev->bus->iommu_ops;
2476 if (ops && ops->dev_enable_feat)
2477 return ops->dev_enable_feat(dev, feat);
2479 return -ENODEV;
2481 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2484 * The device drivers should do the necessary cleanups before calling this.
2485 * For example, before disabling the aux-domain feature, the device driver
2486 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2488 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2490 const struct iommu_ops *ops = dev->bus->iommu_ops;
2492 if (ops && ops->dev_disable_feat)
2493 return ops->dev_disable_feat(dev, feat);
2495 return -EBUSY;
2497 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2499 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2501 const struct iommu_ops *ops = dev->bus->iommu_ops;
2503 if (ops && ops->dev_feat_enabled)
2504 return ops->dev_feat_enabled(dev, feat);
2506 return false;
2508 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2511 * Aux-domain specific attach/detach.
2513 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2514 * true. Also, as long as domains are attached to a device through this
2515 * interface, any tries to call iommu_attach_device() should fail
2516 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2517 * This should make us safe against a device being attached to a guest as a
2518 * whole while there are still pasid users on it (aux and sva).
2520 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2522 int ret = -ENODEV;
2524 if (domain->ops->aux_attach_dev)
2525 ret = domain->ops->aux_attach_dev(domain, dev);
2527 if (!ret)
2528 trace_attach_device_to_domain(dev);
2530 return ret;
2532 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2534 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2536 if (domain->ops->aux_detach_dev) {
2537 domain->ops->aux_detach_dev(domain, dev);
2538 trace_detach_device_from_domain(dev);
2541 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2543 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2545 int ret = -ENODEV;
2547 if (domain->ops->aux_get_pasid)
2548 ret = domain->ops->aux_get_pasid(domain, dev);
2550 return ret;
2552 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2555 * iommu_sva_bind_device() - Bind a process address space to a device
2556 * @dev: the device
2557 * @mm: the mm to bind, caller must hold a reference to it
2559 * Create a bond between device and address space, allowing the device to access
2560 * the mm using the returned PASID. If a bond already exists between @device and
2561 * @mm, it is returned and an additional reference is taken. Caller must call
2562 * iommu_sva_unbind_device() to release each reference.
2564 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2565 * initialize the required SVA features.
2567 * On error, returns an ERR_PTR value.
2569 struct iommu_sva *
2570 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2572 struct iommu_group *group;
2573 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2574 const struct iommu_ops *ops = dev->bus->iommu_ops;
2576 if (!ops || !ops->sva_bind)
2577 return ERR_PTR(-ENODEV);
2579 group = iommu_group_get(dev);
2580 if (!group)
2581 return ERR_PTR(-ENODEV);
2583 /* Ensure device count and domain don't change while we're binding */
2584 mutex_lock(&group->mutex);
2587 * To keep things simple, SVA currently doesn't support IOMMU groups
2588 * with more than one device. Existing SVA-capable systems are not
2589 * affected by the problems that required IOMMU groups (lack of ACS
2590 * isolation, device ID aliasing and other hardware issues).
2592 if (iommu_group_device_count(group) != 1)
2593 goto out_unlock;
2595 handle = ops->sva_bind(dev, mm, drvdata);
2597 out_unlock:
2598 mutex_unlock(&group->mutex);
2599 iommu_group_put(group);
2601 return handle;
2603 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2606 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2607 * @handle: the handle returned by iommu_sva_bind_device()
2609 * Put reference to a bond between device and address space. The device should
2610 * not be issuing any more transaction for this PASID. All outstanding page
2611 * requests for this PASID must have been flushed to the IOMMU.
2613 * Returns 0 on success, or an error value
2615 void iommu_sva_unbind_device(struct iommu_sva *handle)
2617 struct iommu_group *group;
2618 struct device *dev = handle->dev;
2619 const struct iommu_ops *ops = dev->bus->iommu_ops;
2621 if (!ops || !ops->sva_unbind)
2622 return;
2624 group = iommu_group_get(dev);
2625 if (!group)
2626 return;
2628 mutex_lock(&group->mutex);
2629 ops->sva_unbind(handle);
2630 mutex_unlock(&group->mutex);
2632 iommu_group_put(group);
2634 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2636 int iommu_sva_set_ops(struct iommu_sva *handle,
2637 const struct iommu_sva_ops *sva_ops)
2639 if (handle->ops && handle->ops != sva_ops)
2640 return -EEXIST;
2642 handle->ops = sva_ops;
2643 return 0;
2645 EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2647 int iommu_sva_get_pasid(struct iommu_sva *handle)
2649 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2651 if (!ops || !ops->sva_get_pasid)
2652 return IOMMU_PASID_INVALID;
2654 return ops->sva_get_pasid(handle);
2656 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);