1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
7 #define pr_fmt(fmt) "iommu: " fmt
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
28 static struct kset
*iommu_group_kset
;
29 static DEFINE_IDA(iommu_group_ida
);
31 static unsigned int iommu_def_domain_type __read_mostly
;
32 static bool iommu_dma_strict __read_mostly
= true;
33 static u32 iommu_cmd_line __read_mostly
;
37 struct kobject
*devices_kobj
;
38 struct list_head devices
;
40 struct blocking_notifier_head notifier
;
42 void (*iommu_data_release
)(void *iommu_data
);
45 struct iommu_domain
*default_domain
;
46 struct iommu_domain
*domain
;
50 struct list_head list
;
55 struct iommu_group_attribute
{
56 struct attribute attr
;
57 ssize_t (*show
)(struct iommu_group
*group
, char *buf
);
58 ssize_t (*store
)(struct iommu_group
*group
,
59 const char *buf
, size_t count
);
62 static const char * const iommu_group_resv_type_string
[] = {
63 [IOMMU_RESV_DIRECT
] = "direct",
64 [IOMMU_RESV_DIRECT_RELAXABLE
] = "direct-relaxable",
65 [IOMMU_RESV_RESERVED
] = "reserved",
66 [IOMMU_RESV_MSI
] = "msi",
67 [IOMMU_RESV_SW_MSI
] = "msi",
70 #define IOMMU_CMD_LINE_DMA_API BIT(0)
72 static void iommu_set_cmd_line_dma_api(void)
74 iommu_cmd_line
|= IOMMU_CMD_LINE_DMA_API
;
77 static bool iommu_cmd_line_dma_api(void)
79 return !!(iommu_cmd_line
& IOMMU_CMD_LINE_DMA_API
);
82 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
83 struct iommu_group_attribute iommu_group_attr_##_name = \
84 __ATTR(_name, _mode, _show, _store)
86 #define to_iommu_group_attr(_attr) \
87 container_of(_attr, struct iommu_group_attribute, attr)
88 #define to_iommu_group(_kobj) \
89 container_of(_kobj, struct iommu_group, kobj)
91 static LIST_HEAD(iommu_device_list
);
92 static DEFINE_SPINLOCK(iommu_device_lock
);
95 * Use a function instead of an array here because the domain-type is a
96 * bit-field, so an array would waste memory.
98 static const char *iommu_domain_type_str(unsigned int t
)
101 case IOMMU_DOMAIN_BLOCKED
:
103 case IOMMU_DOMAIN_IDENTITY
:
104 return "Passthrough";
105 case IOMMU_DOMAIN_UNMANAGED
:
107 case IOMMU_DOMAIN_DMA
:
114 static int __init
iommu_subsys_init(void)
116 bool cmd_line
= iommu_cmd_line_dma_api();
119 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH
))
120 iommu_set_default_passthrough(false);
122 iommu_set_default_translated(false);
124 if (iommu_default_passthrough() && mem_encrypt_active()) {
125 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
126 iommu_set_default_translated(false);
130 pr_info("Default domain type: %s %s\n",
131 iommu_domain_type_str(iommu_def_domain_type
),
132 cmd_line
? "(set via kernel command line)" : "");
136 subsys_initcall(iommu_subsys_init
);
138 int iommu_device_register(struct iommu_device
*iommu
)
140 spin_lock(&iommu_device_lock
);
141 list_add_tail(&iommu
->list
, &iommu_device_list
);
142 spin_unlock(&iommu_device_lock
);
145 EXPORT_SYMBOL_GPL(iommu_device_register
);
147 void iommu_device_unregister(struct iommu_device
*iommu
)
149 spin_lock(&iommu_device_lock
);
150 list_del(&iommu
->list
);
151 spin_unlock(&iommu_device_lock
);
153 EXPORT_SYMBOL_GPL(iommu_device_unregister
);
155 static struct dev_iommu
*dev_iommu_get(struct device
*dev
)
157 struct dev_iommu
*param
= dev
->iommu
;
162 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
166 mutex_init(¶m
->lock
);
171 static void dev_iommu_free(struct device
*dev
)
173 iommu_fwspec_free(dev
);
178 int iommu_probe_device(struct device
*dev
)
180 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
183 WARN_ON(dev
->iommu_group
);
187 if (!dev_iommu_get(dev
))
190 if (!try_module_get(ops
->owner
)) {
192 goto err_free_dev_param
;
195 ret
= ops
->add_device(dev
);
202 module_put(ops
->owner
);
208 void iommu_release_device(struct device
*dev
)
210 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
212 if (dev
->iommu_group
)
213 ops
->remove_device(dev
);
216 module_put(ops
->owner
);
221 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
223 static int __iommu_attach_device(struct iommu_domain
*domain
,
225 static int __iommu_attach_group(struct iommu_domain
*domain
,
226 struct iommu_group
*group
);
227 static void __iommu_detach_group(struct iommu_domain
*domain
,
228 struct iommu_group
*group
);
230 static int __init
iommu_set_def_domain_type(char *str
)
235 ret
= kstrtobool(str
, &pt
);
240 iommu_set_default_passthrough(true);
242 iommu_set_default_translated(true);
246 early_param("iommu.passthrough", iommu_set_def_domain_type
);
248 static int __init
iommu_dma_setup(char *str
)
250 return kstrtobool(str
, &iommu_dma_strict
);
252 early_param("iommu.strict", iommu_dma_setup
);
254 static ssize_t
iommu_group_attr_show(struct kobject
*kobj
,
255 struct attribute
*__attr
, char *buf
)
257 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
258 struct iommu_group
*group
= to_iommu_group(kobj
);
262 ret
= attr
->show(group
, buf
);
266 static ssize_t
iommu_group_attr_store(struct kobject
*kobj
,
267 struct attribute
*__attr
,
268 const char *buf
, size_t count
)
270 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
271 struct iommu_group
*group
= to_iommu_group(kobj
);
275 ret
= attr
->store(group
, buf
, count
);
279 static const struct sysfs_ops iommu_group_sysfs_ops
= {
280 .show
= iommu_group_attr_show
,
281 .store
= iommu_group_attr_store
,
284 static int iommu_group_create_file(struct iommu_group
*group
,
285 struct iommu_group_attribute
*attr
)
287 return sysfs_create_file(&group
->kobj
, &attr
->attr
);
290 static void iommu_group_remove_file(struct iommu_group
*group
,
291 struct iommu_group_attribute
*attr
)
293 sysfs_remove_file(&group
->kobj
, &attr
->attr
);
296 static ssize_t
iommu_group_show_name(struct iommu_group
*group
, char *buf
)
298 return sprintf(buf
, "%s\n", group
->name
);
302 * iommu_insert_resv_region - Insert a new region in the
303 * list of reserved regions.
304 * @new: new region to insert
305 * @regions: list of regions
307 * Elements are sorted by start address and overlapping segments
308 * of the same type are merged.
310 int iommu_insert_resv_region(struct iommu_resv_region
*new,
311 struct list_head
*regions
)
313 struct iommu_resv_region
*iter
, *tmp
, *nr
, *top
;
316 nr
= iommu_alloc_resv_region(new->start
, new->length
,
317 new->prot
, new->type
);
321 /* First add the new element based on start address sorting */
322 list_for_each_entry(iter
, regions
, list
) {
323 if (nr
->start
< iter
->start
||
324 (nr
->start
== iter
->start
&& nr
->type
<= iter
->type
))
327 list_add_tail(&nr
->list
, &iter
->list
);
329 /* Merge overlapping segments of type nr->type in @regions, if any */
330 list_for_each_entry_safe(iter
, tmp
, regions
, list
) {
331 phys_addr_t top_end
, iter_end
= iter
->start
+ iter
->length
- 1;
333 /* no merge needed on elements of different types than @new */
334 if (iter
->type
!= new->type
) {
335 list_move_tail(&iter
->list
, &stack
);
339 /* look for the last stack element of same type as @iter */
340 list_for_each_entry_reverse(top
, &stack
, list
)
341 if (top
->type
== iter
->type
)
344 list_move_tail(&iter
->list
, &stack
);
348 top_end
= top
->start
+ top
->length
- 1;
350 if (iter
->start
> top_end
+ 1) {
351 list_move_tail(&iter
->list
, &stack
);
353 top
->length
= max(top_end
, iter_end
) - top
->start
+ 1;
354 list_del(&iter
->list
);
358 list_splice(&stack
, regions
);
363 iommu_insert_device_resv_regions(struct list_head
*dev_resv_regions
,
364 struct list_head
*group_resv_regions
)
366 struct iommu_resv_region
*entry
;
369 list_for_each_entry(entry
, dev_resv_regions
, list
) {
370 ret
= iommu_insert_resv_region(entry
, group_resv_regions
);
377 int iommu_get_group_resv_regions(struct iommu_group
*group
,
378 struct list_head
*head
)
380 struct group_device
*device
;
383 mutex_lock(&group
->mutex
);
384 list_for_each_entry(device
, &group
->devices
, list
) {
385 struct list_head dev_resv_regions
;
387 INIT_LIST_HEAD(&dev_resv_regions
);
388 iommu_get_resv_regions(device
->dev
, &dev_resv_regions
);
389 ret
= iommu_insert_device_resv_regions(&dev_resv_regions
, head
);
390 iommu_put_resv_regions(device
->dev
, &dev_resv_regions
);
394 mutex_unlock(&group
->mutex
);
397 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions
);
399 static ssize_t
iommu_group_show_resv_regions(struct iommu_group
*group
,
402 struct iommu_resv_region
*region
, *next
;
403 struct list_head group_resv_regions
;
406 INIT_LIST_HEAD(&group_resv_regions
);
407 iommu_get_group_resv_regions(group
, &group_resv_regions
);
409 list_for_each_entry_safe(region
, next
, &group_resv_regions
, list
) {
410 str
+= sprintf(str
, "0x%016llx 0x%016llx %s\n",
411 (long long int)region
->start
,
412 (long long int)(region
->start
+
414 iommu_group_resv_type_string
[region
->type
]);
421 static ssize_t
iommu_group_show_type(struct iommu_group
*group
,
424 char *type
= "unknown\n";
426 if (group
->default_domain
) {
427 switch (group
->default_domain
->type
) {
428 case IOMMU_DOMAIN_BLOCKED
:
431 case IOMMU_DOMAIN_IDENTITY
:
434 case IOMMU_DOMAIN_UNMANAGED
:
435 type
= "unmanaged\n";
437 case IOMMU_DOMAIN_DMA
:
447 static IOMMU_GROUP_ATTR(name
, S_IRUGO
, iommu_group_show_name
, NULL
);
449 static IOMMU_GROUP_ATTR(reserved_regions
, 0444,
450 iommu_group_show_resv_regions
, NULL
);
452 static IOMMU_GROUP_ATTR(type
, 0444, iommu_group_show_type
, NULL
);
454 static void iommu_group_release(struct kobject
*kobj
)
456 struct iommu_group
*group
= to_iommu_group(kobj
);
458 pr_debug("Releasing group %d\n", group
->id
);
460 if (group
->iommu_data_release
)
461 group
->iommu_data_release(group
->iommu_data
);
463 ida_simple_remove(&iommu_group_ida
, group
->id
);
465 if (group
->default_domain
)
466 iommu_domain_free(group
->default_domain
);
472 static struct kobj_type iommu_group_ktype
= {
473 .sysfs_ops
= &iommu_group_sysfs_ops
,
474 .release
= iommu_group_release
,
478 * iommu_group_alloc - Allocate a new group
480 * This function is called by an iommu driver to allocate a new iommu
481 * group. The iommu group represents the minimum granularity of the iommu.
482 * Upon successful return, the caller holds a reference to the supplied
483 * group in order to hold the group until devices are added. Use
484 * iommu_group_put() to release this extra reference count, allowing the
485 * group to be automatically reclaimed once it has no devices or external
488 struct iommu_group
*iommu_group_alloc(void)
490 struct iommu_group
*group
;
493 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
495 return ERR_PTR(-ENOMEM
);
497 group
->kobj
.kset
= iommu_group_kset
;
498 mutex_init(&group
->mutex
);
499 INIT_LIST_HEAD(&group
->devices
);
500 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
502 ret
= ida_simple_get(&iommu_group_ida
, 0, 0, GFP_KERNEL
);
509 ret
= kobject_init_and_add(&group
->kobj
, &iommu_group_ktype
,
510 NULL
, "%d", group
->id
);
512 ida_simple_remove(&iommu_group_ida
, group
->id
);
513 kobject_put(&group
->kobj
);
517 group
->devices_kobj
= kobject_create_and_add("devices", &group
->kobj
);
518 if (!group
->devices_kobj
) {
519 kobject_put(&group
->kobj
); /* triggers .release & free */
520 return ERR_PTR(-ENOMEM
);
524 * The devices_kobj holds a reference on the group kobject, so
525 * as long as that exists so will the group. We can therefore
526 * use the devices_kobj for reference counting.
528 kobject_put(&group
->kobj
);
530 ret
= iommu_group_create_file(group
,
531 &iommu_group_attr_reserved_regions
);
535 ret
= iommu_group_create_file(group
, &iommu_group_attr_type
);
539 pr_debug("Allocated group %d\n", group
->id
);
543 EXPORT_SYMBOL_GPL(iommu_group_alloc
);
545 struct iommu_group
*iommu_group_get_by_id(int id
)
547 struct kobject
*group_kobj
;
548 struct iommu_group
*group
;
551 if (!iommu_group_kset
)
554 name
= kasprintf(GFP_KERNEL
, "%d", id
);
558 group_kobj
= kset_find_obj(iommu_group_kset
, name
);
564 group
= container_of(group_kobj
, struct iommu_group
, kobj
);
565 BUG_ON(group
->id
!= id
);
567 kobject_get(group
->devices_kobj
);
568 kobject_put(&group
->kobj
);
572 EXPORT_SYMBOL_GPL(iommu_group_get_by_id
);
575 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
578 * iommu drivers can store data in the group for use when doing iommu
579 * operations. This function provides a way to retrieve it. Caller
580 * should hold a group reference.
582 void *iommu_group_get_iommudata(struct iommu_group
*group
)
584 return group
->iommu_data
;
586 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata
);
589 * iommu_group_set_iommudata - set iommu_data for a group
591 * @iommu_data: new data
592 * @release: release function for iommu_data
594 * iommu drivers can store data in the group for use when doing iommu
595 * operations. This function provides a way to set the data after
596 * the group has been allocated. Caller should hold a group reference.
598 void iommu_group_set_iommudata(struct iommu_group
*group
, void *iommu_data
,
599 void (*release
)(void *iommu_data
))
601 group
->iommu_data
= iommu_data
;
602 group
->iommu_data_release
= release
;
604 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata
);
607 * iommu_group_set_name - set name for a group
611 * Allow iommu driver to set a name for a group. When set it will
612 * appear in a name attribute file under the group in sysfs.
614 int iommu_group_set_name(struct iommu_group
*group
, const char *name
)
619 iommu_group_remove_file(group
, &iommu_group_attr_name
);
626 group
->name
= kstrdup(name
, GFP_KERNEL
);
630 ret
= iommu_group_create_file(group
, &iommu_group_attr_name
);
639 EXPORT_SYMBOL_GPL(iommu_group_set_name
);
641 static int iommu_group_create_direct_mappings(struct iommu_group
*group
,
644 struct iommu_domain
*domain
= group
->default_domain
;
645 struct iommu_resv_region
*entry
;
646 struct list_head mappings
;
647 unsigned long pg_size
;
650 if (!domain
|| domain
->type
!= IOMMU_DOMAIN_DMA
)
653 BUG_ON(!domain
->pgsize_bitmap
);
655 pg_size
= 1UL << __ffs(domain
->pgsize_bitmap
);
656 INIT_LIST_HEAD(&mappings
);
658 iommu_get_resv_regions(dev
, &mappings
);
660 /* We need to consider overlapping regions for different devices */
661 list_for_each_entry(entry
, &mappings
, list
) {
662 dma_addr_t start
, end
, addr
;
664 if (domain
->ops
->apply_resv_region
)
665 domain
->ops
->apply_resv_region(dev
, domain
, entry
);
667 start
= ALIGN(entry
->start
, pg_size
);
668 end
= ALIGN(entry
->start
+ entry
->length
, pg_size
);
670 if (entry
->type
!= IOMMU_RESV_DIRECT
&&
671 entry
->type
!= IOMMU_RESV_DIRECT_RELAXABLE
)
674 for (addr
= start
; addr
< end
; addr
+= pg_size
) {
675 phys_addr_t phys_addr
;
677 phys_addr
= iommu_iova_to_phys(domain
, addr
);
681 ret
= iommu_map(domain
, addr
, addr
, pg_size
, entry
->prot
);
688 iommu_flush_tlb_all(domain
);
691 iommu_put_resv_regions(dev
, &mappings
);
696 static bool iommu_is_attach_deferred(struct iommu_domain
*domain
,
699 if (domain
->ops
->is_attach_deferred
)
700 return domain
->ops
->is_attach_deferred(domain
, dev
);
706 * iommu_group_add_device - add a device to an iommu group
707 * @group: the group into which to add the device (reference should be held)
710 * This function is called by an iommu driver to add a device into a
711 * group. Adding a device increments the group reference count.
713 int iommu_group_add_device(struct iommu_group
*group
, struct device
*dev
)
716 struct group_device
*device
;
718 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
724 ret
= sysfs_create_link(&dev
->kobj
, &group
->kobj
, "iommu_group");
726 goto err_free_device
;
728 device
->name
= kasprintf(GFP_KERNEL
, "%s", kobject_name(&dev
->kobj
));
732 goto err_remove_link
;
735 ret
= sysfs_create_link_nowarn(group
->devices_kobj
,
736 &dev
->kobj
, device
->name
);
738 if (ret
== -EEXIST
&& i
>= 0) {
740 * Account for the slim chance of collision
741 * and append an instance to the name.
744 device
->name
= kasprintf(GFP_KERNEL
, "%s.%d",
745 kobject_name(&dev
->kobj
), i
++);
751 kobject_get(group
->devices_kobj
);
753 dev
->iommu_group
= group
;
755 iommu_group_create_direct_mappings(group
, dev
);
757 mutex_lock(&group
->mutex
);
758 list_add_tail(&device
->list
, &group
->devices
);
759 if (group
->domain
&& !iommu_is_attach_deferred(group
->domain
, dev
))
760 ret
= __iommu_attach_device(group
->domain
, dev
);
761 mutex_unlock(&group
->mutex
);
765 /* Notify any listeners about change to group. */
766 blocking_notifier_call_chain(&group
->notifier
,
767 IOMMU_GROUP_NOTIFY_ADD_DEVICE
, dev
);
769 trace_add_device_to_group(group
->id
, dev
);
771 dev_info(dev
, "Adding to iommu group %d\n", group
->id
);
776 mutex_lock(&group
->mutex
);
777 list_del(&device
->list
);
778 mutex_unlock(&group
->mutex
);
779 dev
->iommu_group
= NULL
;
780 kobject_put(group
->devices_kobj
);
781 sysfs_remove_link(group
->devices_kobj
, device
->name
);
785 sysfs_remove_link(&dev
->kobj
, "iommu_group");
788 dev_err(dev
, "Failed to add to iommu group %d: %d\n", group
->id
, ret
);
791 EXPORT_SYMBOL_GPL(iommu_group_add_device
);
794 * iommu_group_remove_device - remove a device from it's current group
795 * @dev: device to be removed
797 * This function is called by an iommu driver to remove the device from
798 * it's current group. This decrements the iommu group reference count.
800 void iommu_group_remove_device(struct device
*dev
)
802 struct iommu_group
*group
= dev
->iommu_group
;
803 struct group_device
*tmp_device
, *device
= NULL
;
805 dev_info(dev
, "Removing from iommu group %d\n", group
->id
);
807 /* Pre-notify listeners that a device is being removed. */
808 blocking_notifier_call_chain(&group
->notifier
,
809 IOMMU_GROUP_NOTIFY_DEL_DEVICE
, dev
);
811 mutex_lock(&group
->mutex
);
812 list_for_each_entry(tmp_device
, &group
->devices
, list
) {
813 if (tmp_device
->dev
== dev
) {
815 list_del(&device
->list
);
819 mutex_unlock(&group
->mutex
);
824 sysfs_remove_link(group
->devices_kobj
, device
->name
);
825 sysfs_remove_link(&dev
->kobj
, "iommu_group");
827 trace_remove_device_from_group(group
->id
, dev
);
831 dev
->iommu_group
= NULL
;
832 kobject_put(group
->devices_kobj
);
834 EXPORT_SYMBOL_GPL(iommu_group_remove_device
);
836 static int iommu_group_device_count(struct iommu_group
*group
)
838 struct group_device
*entry
;
841 list_for_each_entry(entry
, &group
->devices
, list
)
848 * iommu_group_for_each_dev - iterate over each device in the group
850 * @data: caller opaque data to be passed to callback function
851 * @fn: caller supplied callback function
853 * This function is called by group users to iterate over group devices.
854 * Callers should hold a reference count to the group during callback.
855 * The group->mutex is held across callbacks, which will block calls to
856 * iommu_group_add/remove_device.
858 static int __iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
859 int (*fn
)(struct device
*, void *))
861 struct group_device
*device
;
864 list_for_each_entry(device
, &group
->devices
, list
) {
865 ret
= fn(device
->dev
, data
);
873 int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
874 int (*fn
)(struct device
*, void *))
878 mutex_lock(&group
->mutex
);
879 ret
= __iommu_group_for_each_dev(group
, data
, fn
);
880 mutex_unlock(&group
->mutex
);
884 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev
);
887 * iommu_group_get - Return the group for a device and increment reference
888 * @dev: get the group that this device belongs to
890 * This function is called by iommu drivers and users to get the group
891 * for the specified device. If found, the group is returned and the group
892 * reference in incremented, else NULL.
894 struct iommu_group
*iommu_group_get(struct device
*dev
)
896 struct iommu_group
*group
= dev
->iommu_group
;
899 kobject_get(group
->devices_kobj
);
903 EXPORT_SYMBOL_GPL(iommu_group_get
);
906 * iommu_group_ref_get - Increment reference on a group
907 * @group: the group to use, must not be NULL
909 * This function is called by iommu drivers to take additional references on an
910 * existing group. Returns the given group for convenience.
912 struct iommu_group
*iommu_group_ref_get(struct iommu_group
*group
)
914 kobject_get(group
->devices_kobj
);
917 EXPORT_SYMBOL_GPL(iommu_group_ref_get
);
920 * iommu_group_put - Decrement group reference
921 * @group: the group to use
923 * This function is called by iommu drivers and users to release the
924 * iommu group. Once the reference count is zero, the group is released.
926 void iommu_group_put(struct iommu_group
*group
)
929 kobject_put(group
->devices_kobj
);
931 EXPORT_SYMBOL_GPL(iommu_group_put
);
934 * iommu_group_register_notifier - Register a notifier for group changes
935 * @group: the group to watch
936 * @nb: notifier block to signal
938 * This function allows iommu group users to track changes in a group.
939 * See include/linux/iommu.h for actions sent via this notifier. Caller
940 * should hold a reference to the group throughout notifier registration.
942 int iommu_group_register_notifier(struct iommu_group
*group
,
943 struct notifier_block
*nb
)
945 return blocking_notifier_chain_register(&group
->notifier
, nb
);
947 EXPORT_SYMBOL_GPL(iommu_group_register_notifier
);
950 * iommu_group_unregister_notifier - Unregister a notifier
951 * @group: the group to watch
952 * @nb: notifier block to signal
954 * Unregister a previously registered group notifier block.
956 int iommu_group_unregister_notifier(struct iommu_group
*group
,
957 struct notifier_block
*nb
)
959 return blocking_notifier_chain_unregister(&group
->notifier
, nb
);
961 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier
);
964 * iommu_register_device_fault_handler() - Register a device fault handler
966 * @handler: the fault handler
967 * @data: private data passed as argument to the handler
969 * When an IOMMU fault event is received, this handler gets called with the
970 * fault event and data as argument. The handler should return 0 on success. If
971 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
972 * complete the fault by calling iommu_page_response() with one of the following
974 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
975 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
976 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
977 * page faults if possible.
979 * Return 0 if the fault handler was installed successfully, or an error.
981 int iommu_register_device_fault_handler(struct device
*dev
,
982 iommu_dev_fault_handler_t handler
,
985 struct dev_iommu
*param
= dev
->iommu
;
991 mutex_lock(¶m
->lock
);
992 /* Only allow one fault handler registered for each device */
993 if (param
->fault_param
) {
999 param
->fault_param
= kzalloc(sizeof(*param
->fault_param
), GFP_KERNEL
);
1000 if (!param
->fault_param
) {
1005 param
->fault_param
->handler
= handler
;
1006 param
->fault_param
->data
= data
;
1007 mutex_init(¶m
->fault_param
->lock
);
1008 INIT_LIST_HEAD(¶m
->fault_param
->faults
);
1011 mutex_unlock(¶m
->lock
);
1015 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler
);
1018 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1021 * Remove the device fault handler installed with
1022 * iommu_register_device_fault_handler().
1024 * Return 0 on success, or an error.
1026 int iommu_unregister_device_fault_handler(struct device
*dev
)
1028 struct dev_iommu
*param
= dev
->iommu
;
1034 mutex_lock(¶m
->lock
);
1036 if (!param
->fault_param
)
1039 /* we cannot unregister handler if there are pending faults */
1040 if (!list_empty(¶m
->fault_param
->faults
)) {
1045 kfree(param
->fault_param
);
1046 param
->fault_param
= NULL
;
1049 mutex_unlock(¶m
->lock
);
1053 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler
);
1056 * iommu_report_device_fault() - Report fault event to device driver
1058 * @evt: fault event data
1060 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1061 * handler. When this function fails and the fault is recoverable, it is the
1062 * caller's responsibility to complete the fault.
1064 * Return 0 on success, or an error.
1066 int iommu_report_device_fault(struct device
*dev
, struct iommu_fault_event
*evt
)
1068 struct dev_iommu
*param
= dev
->iommu
;
1069 struct iommu_fault_event
*evt_pending
= NULL
;
1070 struct iommu_fault_param
*fparam
;
1076 /* we only report device fault if there is a handler registered */
1077 mutex_lock(¶m
->lock
);
1078 fparam
= param
->fault_param
;
1079 if (!fparam
|| !fparam
->handler
) {
1084 if (evt
->fault
.type
== IOMMU_FAULT_PAGE_REQ
&&
1085 (evt
->fault
.prm
.flags
& IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
)) {
1086 evt_pending
= kmemdup(evt
, sizeof(struct iommu_fault_event
),
1092 mutex_lock(&fparam
->lock
);
1093 list_add_tail(&evt_pending
->list
, &fparam
->faults
);
1094 mutex_unlock(&fparam
->lock
);
1097 ret
= fparam
->handler(&evt
->fault
, fparam
->data
);
1098 if (ret
&& evt_pending
) {
1099 mutex_lock(&fparam
->lock
);
1100 list_del(&evt_pending
->list
);
1101 mutex_unlock(&fparam
->lock
);
1105 mutex_unlock(¶m
->lock
);
1108 EXPORT_SYMBOL_GPL(iommu_report_device_fault
);
1110 int iommu_page_response(struct device
*dev
,
1111 struct iommu_page_response
*msg
)
1115 struct iommu_fault_event
*evt
;
1116 struct iommu_fault_page_request
*prm
;
1117 struct dev_iommu
*param
= dev
->iommu
;
1118 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1120 if (!domain
|| !domain
->ops
->page_response
)
1123 if (!param
|| !param
->fault_param
)
1126 if (msg
->version
!= IOMMU_PAGE_RESP_VERSION_1
||
1127 msg
->flags
& ~IOMMU_PAGE_RESP_PASID_VALID
)
1130 /* Only send response if there is a fault report pending */
1131 mutex_lock(¶m
->fault_param
->lock
);
1132 if (list_empty(¶m
->fault_param
->faults
)) {
1133 dev_warn_ratelimited(dev
, "no pending PRQ, drop response\n");
1137 * Check if we have a matching page request pending to respond,
1138 * otherwise return -EINVAL
1140 list_for_each_entry(evt
, ¶m
->fault_param
->faults
, list
) {
1141 prm
= &evt
->fault
.prm
;
1142 pasid_valid
= prm
->flags
& IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
;
1144 if ((pasid_valid
&& prm
->pasid
!= msg
->pasid
) ||
1145 prm
->grpid
!= msg
->grpid
)
1148 /* Sanitize the reply */
1149 msg
->flags
= pasid_valid
? IOMMU_PAGE_RESP_PASID_VALID
: 0;
1151 ret
= domain
->ops
->page_response(dev
, evt
, msg
);
1152 list_del(&evt
->list
);
1158 mutex_unlock(¶m
->fault_param
->lock
);
1161 EXPORT_SYMBOL_GPL(iommu_page_response
);
1164 * iommu_group_id - Return ID for a group
1165 * @group: the group to ID
1167 * Return the unique ID for the group matching the sysfs group number.
1169 int iommu_group_id(struct iommu_group
*group
)
1173 EXPORT_SYMBOL_GPL(iommu_group_id
);
1175 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
1176 unsigned long *devfns
);
1179 * To consider a PCI device isolated, we require ACS to support Source
1180 * Validation, Request Redirection, Completer Redirection, and Upstream
1181 * Forwarding. This effectively means that devices cannot spoof their
1182 * requester ID, requests and completions cannot be redirected, and all
1183 * transactions are forwarded upstream, even as it passes through a
1184 * bridge where the target device is downstream.
1186 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1189 * For multifunction devices which are not isolated from each other, find
1190 * all the other non-isolated functions and look for existing groups. For
1191 * each function, we also need to look for aliases to or from other devices
1192 * that may already have a group.
1194 static struct iommu_group
*get_pci_function_alias_group(struct pci_dev
*pdev
,
1195 unsigned long *devfns
)
1197 struct pci_dev
*tmp
= NULL
;
1198 struct iommu_group
*group
;
1200 if (!pdev
->multifunction
|| pci_acs_enabled(pdev
, REQ_ACS_FLAGS
))
1203 for_each_pci_dev(tmp
) {
1204 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
||
1205 PCI_SLOT(tmp
->devfn
) != PCI_SLOT(pdev
->devfn
) ||
1206 pci_acs_enabled(tmp
, REQ_ACS_FLAGS
))
1209 group
= get_pci_alias_group(tmp
, devfns
);
1220 * Look for aliases to or from the given device for existing groups. DMA
1221 * aliases are only supported on the same bus, therefore the search
1222 * space is quite small (especially since we're really only looking at pcie
1223 * device, and therefore only expect multiple slots on the root complex or
1224 * downstream switch ports). It's conceivable though that a pair of
1225 * multifunction devices could have aliases between them that would cause a
1226 * loop. To prevent this, we use a bitmap to track where we've been.
1228 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
1229 unsigned long *devfns
)
1231 struct pci_dev
*tmp
= NULL
;
1232 struct iommu_group
*group
;
1234 if (test_and_set_bit(pdev
->devfn
& 0xff, devfns
))
1237 group
= iommu_group_get(&pdev
->dev
);
1241 for_each_pci_dev(tmp
) {
1242 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
)
1245 /* We alias them or they alias us */
1246 if (pci_devs_are_dma_aliases(pdev
, tmp
)) {
1247 group
= get_pci_alias_group(tmp
, devfns
);
1253 group
= get_pci_function_alias_group(tmp
, devfns
);
1264 struct group_for_pci_data
{
1265 struct pci_dev
*pdev
;
1266 struct iommu_group
*group
;
1270 * DMA alias iterator callback, return the last seen device. Stop and return
1271 * the IOMMU group if we find one along the way.
1273 static int get_pci_alias_or_group(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
1275 struct group_for_pci_data
*data
= opaque
;
1278 data
->group
= iommu_group_get(&pdev
->dev
);
1280 return data
->group
!= NULL
;
1284 * Generic device_group call-back function. It just allocates one
1285 * iommu-group per device.
1287 struct iommu_group
*generic_device_group(struct device
*dev
)
1289 return iommu_group_alloc();
1291 EXPORT_SYMBOL_GPL(generic_device_group
);
1294 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1295 * to find or create an IOMMU group for a device.
1297 struct iommu_group
*pci_device_group(struct device
*dev
)
1299 struct pci_dev
*pdev
= to_pci_dev(dev
);
1300 struct group_for_pci_data data
;
1301 struct pci_bus
*bus
;
1302 struct iommu_group
*group
= NULL
;
1303 u64 devfns
[4] = { 0 };
1305 if (WARN_ON(!dev_is_pci(dev
)))
1306 return ERR_PTR(-EINVAL
);
1309 * Find the upstream DMA alias for the device. A device must not
1310 * be aliased due to topology in order to have its own IOMMU group.
1311 * If we find an alias along the way that already belongs to a
1314 if (pci_for_each_dma_alias(pdev
, get_pci_alias_or_group
, &data
))
1320 * Continue upstream from the point of minimum IOMMU granularity
1321 * due to aliases to the point where devices are protected from
1322 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1325 for (bus
= pdev
->bus
; !pci_is_root_bus(bus
); bus
= bus
->parent
) {
1329 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
1334 group
= iommu_group_get(&pdev
->dev
);
1340 * Look for existing groups on device aliases. If we alias another
1341 * device or another device aliases us, use the same group.
1343 group
= get_pci_alias_group(pdev
, (unsigned long *)devfns
);
1348 * Look for existing groups on non-isolated functions on the same
1349 * slot and aliases of those funcions, if any. No need to clear
1350 * the search bitmap, the tested devfns are still valid.
1352 group
= get_pci_function_alias_group(pdev
, (unsigned long *)devfns
);
1356 /* No shared group found, allocate new */
1357 return iommu_group_alloc();
1359 EXPORT_SYMBOL_GPL(pci_device_group
);
1361 /* Get the IOMMU group for device on fsl-mc bus */
1362 struct iommu_group
*fsl_mc_device_group(struct device
*dev
)
1364 struct device
*cont_dev
= fsl_mc_cont_dev(dev
);
1365 struct iommu_group
*group
;
1367 group
= iommu_group_get(cont_dev
);
1369 group
= iommu_group_alloc();
1372 EXPORT_SYMBOL_GPL(fsl_mc_device_group
);
1375 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1376 * @dev: target device
1378 * This function is intended to be called by IOMMU drivers and extended to
1379 * support common, bus-defined algorithms when determining or creating the
1380 * IOMMU group for a device. On success, the caller will hold a reference
1381 * to the returned IOMMU group, which will already include the provided
1382 * device. The reference should be released with iommu_group_put().
1384 struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
)
1386 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1387 struct iommu_group
*group
;
1390 group
= iommu_group_get(dev
);
1395 return ERR_PTR(-EINVAL
);
1397 group
= ops
->device_group(dev
);
1398 if (WARN_ON_ONCE(group
== NULL
))
1399 return ERR_PTR(-EINVAL
);
1405 * Try to allocate a default domain - needs support from the
1408 if (!group
->default_domain
) {
1409 struct iommu_domain
*dom
;
1411 dom
= __iommu_domain_alloc(dev
->bus
, iommu_def_domain_type
);
1412 if (!dom
&& iommu_def_domain_type
!= IOMMU_DOMAIN_DMA
) {
1413 dom
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_DMA
);
1416 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1417 iommu_def_domain_type
);
1421 group
->default_domain
= dom
;
1423 group
->domain
= dom
;
1425 if (dom
&& !iommu_dma_strict
) {
1427 iommu_domain_set_attr(dom
,
1428 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
,
1433 ret
= iommu_group_add_device(group
, dev
);
1435 iommu_group_put(group
);
1436 return ERR_PTR(ret
);
1441 EXPORT_SYMBOL_GPL(iommu_group_get_for_dev
);
1443 struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*group
)
1445 return group
->default_domain
;
1448 static int add_iommu_group(struct device
*dev
, void *data
)
1450 int ret
= iommu_probe_device(dev
);
1453 * We ignore -ENODEV errors for now, as they just mean that the
1454 * device is not translated by an IOMMU. We still care about
1455 * other errors and fail to initialize when they happen.
1463 static int remove_iommu_group(struct device
*dev
, void *data
)
1465 iommu_release_device(dev
);
1470 static int iommu_bus_notifier(struct notifier_block
*nb
,
1471 unsigned long action
, void *data
)
1473 unsigned long group_action
= 0;
1474 struct device
*dev
= data
;
1475 struct iommu_group
*group
;
1478 * ADD/DEL call into iommu driver ops if provided, which may
1479 * result in ADD/DEL notifiers to group->notifier
1481 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
1484 ret
= iommu_probe_device(dev
);
1485 return (ret
) ? NOTIFY_DONE
: NOTIFY_OK
;
1486 } else if (action
== BUS_NOTIFY_REMOVED_DEVICE
) {
1487 iommu_release_device(dev
);
1492 * Remaining BUS_NOTIFYs get filtered and republished to the
1493 * group, if anyone is listening
1495 group
= iommu_group_get(dev
);
1500 case BUS_NOTIFY_BIND_DRIVER
:
1501 group_action
= IOMMU_GROUP_NOTIFY_BIND_DRIVER
;
1503 case BUS_NOTIFY_BOUND_DRIVER
:
1504 group_action
= IOMMU_GROUP_NOTIFY_BOUND_DRIVER
;
1506 case BUS_NOTIFY_UNBIND_DRIVER
:
1507 group_action
= IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
;
1509 case BUS_NOTIFY_UNBOUND_DRIVER
:
1510 group_action
= IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
;
1515 blocking_notifier_call_chain(&group
->notifier
,
1518 iommu_group_put(group
);
1522 static int iommu_bus_init(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1525 struct notifier_block
*nb
;
1527 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
1531 nb
->notifier_call
= iommu_bus_notifier
;
1533 err
= bus_register_notifier(bus
, nb
);
1537 err
= bus_for_each_dev(bus
, NULL
, NULL
, add_iommu_group
);
1546 bus_for_each_dev(bus
, NULL
, NULL
, remove_iommu_group
);
1547 bus_unregister_notifier(bus
, nb
);
1556 * bus_set_iommu - set iommu-callbacks for the bus
1558 * @ops: the callbacks provided by the iommu-driver
1560 * This function is called by an iommu driver to set the iommu methods
1561 * used for a particular bus. Drivers for devices on that bus can use
1562 * the iommu-api after these ops are registered.
1563 * This special function is needed because IOMMUs are usually devices on
1564 * the bus itself, so the iommu drivers are not initialized when the bus
1565 * is set up. With this function the iommu-driver can set the iommu-ops
1568 int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1573 bus
->iommu_ops
= NULL
;
1577 if (bus
->iommu_ops
!= NULL
)
1580 bus
->iommu_ops
= ops
;
1582 /* Do IOMMU specific setup for this bus-type */
1583 err
= iommu_bus_init(bus
, ops
);
1585 bus
->iommu_ops
= NULL
;
1589 EXPORT_SYMBOL_GPL(bus_set_iommu
);
1591 bool iommu_present(struct bus_type
*bus
)
1593 return bus
->iommu_ops
!= NULL
;
1595 EXPORT_SYMBOL_GPL(iommu_present
);
1597 bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
1599 if (!bus
->iommu_ops
|| !bus
->iommu_ops
->capable
)
1602 return bus
->iommu_ops
->capable(cap
);
1604 EXPORT_SYMBOL_GPL(iommu_capable
);
1607 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1608 * @domain: iommu domain
1609 * @handler: fault handler
1610 * @token: user data, will be passed back to the fault handler
1612 * This function should be used by IOMMU users which want to be notified
1613 * whenever an IOMMU fault happens.
1615 * The fault handler itself should return 0 on success, and an appropriate
1616 * error code otherwise.
1618 void iommu_set_fault_handler(struct iommu_domain
*domain
,
1619 iommu_fault_handler_t handler
,
1624 domain
->handler
= handler
;
1625 domain
->handler_token
= token
;
1627 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
1629 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
1632 struct iommu_domain
*domain
;
1634 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
1637 domain
= bus
->iommu_ops
->domain_alloc(type
);
1641 domain
->ops
= bus
->iommu_ops
;
1642 domain
->type
= type
;
1643 /* Assume all sizes by default; the driver may override this later */
1644 domain
->pgsize_bitmap
= bus
->iommu_ops
->pgsize_bitmap
;
1649 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
1651 return __iommu_domain_alloc(bus
, IOMMU_DOMAIN_UNMANAGED
);
1653 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
1655 void iommu_domain_free(struct iommu_domain
*domain
)
1657 domain
->ops
->domain_free(domain
);
1659 EXPORT_SYMBOL_GPL(iommu_domain_free
);
1661 static int __iommu_attach_device(struct iommu_domain
*domain
,
1666 if (unlikely(domain
->ops
->attach_dev
== NULL
))
1669 ret
= domain
->ops
->attach_dev(domain
, dev
);
1671 trace_attach_device_to_domain(dev
);
1675 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1677 struct iommu_group
*group
;
1680 group
= iommu_group_get(dev
);
1685 * Lock the group to make sure the device-count doesn't
1686 * change while we are attaching
1688 mutex_lock(&group
->mutex
);
1690 if (iommu_group_device_count(group
) != 1)
1693 ret
= __iommu_attach_group(domain
, group
);
1696 mutex_unlock(&group
->mutex
);
1697 iommu_group_put(group
);
1701 EXPORT_SYMBOL_GPL(iommu_attach_device
);
1703 int iommu_cache_invalidate(struct iommu_domain
*domain
, struct device
*dev
,
1704 struct iommu_cache_invalidate_info
*inv_info
)
1706 if (unlikely(!domain
->ops
->cache_invalidate
))
1709 return domain
->ops
->cache_invalidate(domain
, dev
, inv_info
);
1711 EXPORT_SYMBOL_GPL(iommu_cache_invalidate
);
1713 int iommu_sva_bind_gpasid(struct iommu_domain
*domain
,
1714 struct device
*dev
, struct iommu_gpasid_bind_data
*data
)
1716 if (unlikely(!domain
->ops
->sva_bind_gpasid
))
1719 return domain
->ops
->sva_bind_gpasid(domain
, dev
, data
);
1721 EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid
);
1723 int iommu_sva_unbind_gpasid(struct iommu_domain
*domain
, struct device
*dev
,
1726 if (unlikely(!domain
->ops
->sva_unbind_gpasid
))
1729 return domain
->ops
->sva_unbind_gpasid(dev
, pasid
);
1731 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid
);
1733 static void __iommu_detach_device(struct iommu_domain
*domain
,
1736 if (iommu_is_attach_deferred(domain
, dev
))
1739 if (unlikely(domain
->ops
->detach_dev
== NULL
))
1742 domain
->ops
->detach_dev(domain
, dev
);
1743 trace_detach_device_from_domain(dev
);
1746 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
1748 struct iommu_group
*group
;
1750 group
= iommu_group_get(dev
);
1754 mutex_lock(&group
->mutex
);
1755 if (iommu_group_device_count(group
) != 1) {
1760 __iommu_detach_group(domain
, group
);
1763 mutex_unlock(&group
->mutex
);
1764 iommu_group_put(group
);
1766 EXPORT_SYMBOL_GPL(iommu_detach_device
);
1768 struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
1770 struct iommu_domain
*domain
;
1771 struct iommu_group
*group
;
1773 group
= iommu_group_get(dev
);
1777 domain
= group
->domain
;
1779 iommu_group_put(group
);
1783 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev
);
1786 * For IOMMU_DOMAIN_DMA implementations which already provide their own
1787 * guarantees that the group and its default domain are valid and correct.
1789 struct iommu_domain
*iommu_get_dma_domain(struct device
*dev
)
1791 return dev
->iommu_group
->default_domain
;
1795 * IOMMU groups are really the natural working unit of the IOMMU, but
1796 * the IOMMU API works on domains and devices. Bridge that gap by
1797 * iterating over the devices in a group. Ideally we'd have a single
1798 * device which represents the requestor ID of the group, but we also
1799 * allow IOMMU drivers to create policy defined minimum sets, where
1800 * the physical hardware may be able to distiguish members, but we
1801 * wish to group them at a higher level (ex. untrusted multi-function
1802 * PCI devices). Thus we attach each device.
1804 static int iommu_group_do_attach_device(struct device
*dev
, void *data
)
1806 struct iommu_domain
*domain
= data
;
1808 return __iommu_attach_device(domain
, dev
);
1811 static int __iommu_attach_group(struct iommu_domain
*domain
,
1812 struct iommu_group
*group
)
1816 if (group
->default_domain
&& group
->domain
!= group
->default_domain
)
1819 ret
= __iommu_group_for_each_dev(group
, domain
,
1820 iommu_group_do_attach_device
);
1822 group
->domain
= domain
;
1827 int iommu_attach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1831 mutex_lock(&group
->mutex
);
1832 ret
= __iommu_attach_group(domain
, group
);
1833 mutex_unlock(&group
->mutex
);
1837 EXPORT_SYMBOL_GPL(iommu_attach_group
);
1839 static int iommu_group_do_detach_device(struct device
*dev
, void *data
)
1841 struct iommu_domain
*domain
= data
;
1843 __iommu_detach_device(domain
, dev
);
1848 static void __iommu_detach_group(struct iommu_domain
*domain
,
1849 struct iommu_group
*group
)
1853 if (!group
->default_domain
) {
1854 __iommu_group_for_each_dev(group
, domain
,
1855 iommu_group_do_detach_device
);
1856 group
->domain
= NULL
;
1860 if (group
->domain
== group
->default_domain
)
1863 /* Detach by re-attaching to the default domain */
1864 ret
= __iommu_group_for_each_dev(group
, group
->default_domain
,
1865 iommu_group_do_attach_device
);
1869 group
->domain
= group
->default_domain
;
1872 void iommu_detach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1874 mutex_lock(&group
->mutex
);
1875 __iommu_detach_group(domain
, group
);
1876 mutex_unlock(&group
->mutex
);
1878 EXPORT_SYMBOL_GPL(iommu_detach_group
);
1880 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
1882 if (unlikely(domain
->ops
->iova_to_phys
== NULL
))
1885 return domain
->ops
->iova_to_phys(domain
, iova
);
1887 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
1889 static size_t iommu_pgsize(struct iommu_domain
*domain
,
1890 unsigned long addr_merge
, size_t size
)
1892 unsigned int pgsize_idx
;
1895 /* Max page size that still fits into 'size' */
1896 pgsize_idx
= __fls(size
);
1898 /* need to consider alignment requirements ? */
1899 if (likely(addr_merge
)) {
1900 /* Max page size allowed by address */
1901 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
1902 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
1905 /* build a mask of acceptable page sizes */
1906 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
1908 /* throw away page sizes not supported by the hardware */
1909 pgsize
&= domain
->pgsize_bitmap
;
1911 /* make sure we're still sane */
1914 /* pick the biggest page */
1915 pgsize_idx
= __fls(pgsize
);
1916 pgsize
= 1UL << pgsize_idx
;
1921 int __iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
1922 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
1924 const struct iommu_ops
*ops
= domain
->ops
;
1925 unsigned long orig_iova
= iova
;
1926 unsigned int min_pagesz
;
1927 size_t orig_size
= size
;
1928 phys_addr_t orig_paddr
= paddr
;
1931 if (unlikely(ops
->map
== NULL
||
1932 domain
->pgsize_bitmap
== 0UL))
1935 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1938 /* find out the minimum page size supported */
1939 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1942 * both the virtual address and the physical one, as well as
1943 * the size of the mapping, must be aligned (at least) to the
1944 * size of the smallest page supported by the hardware
1946 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
1947 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1948 iova
, &paddr
, size
, min_pagesz
);
1952 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
1955 size_t pgsize
= iommu_pgsize(domain
, iova
| paddr
, size
);
1957 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1958 iova
, &paddr
, pgsize
);
1959 ret
= ops
->map(domain
, iova
, paddr
, pgsize
, prot
, gfp
);
1969 if (ops
->iotlb_sync_map
)
1970 ops
->iotlb_sync_map(domain
);
1972 /* unroll mapping in case something went wrong */
1974 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
1976 trace_map(orig_iova
, orig_paddr
, orig_size
);
1981 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
1982 phys_addr_t paddr
, size_t size
, int prot
)
1985 return __iommu_map(domain
, iova
, paddr
, size
, prot
, GFP_KERNEL
);
1987 EXPORT_SYMBOL_GPL(iommu_map
);
1989 int iommu_map_atomic(struct iommu_domain
*domain
, unsigned long iova
,
1990 phys_addr_t paddr
, size_t size
, int prot
)
1992 return __iommu_map(domain
, iova
, paddr
, size
, prot
, GFP_ATOMIC
);
1994 EXPORT_SYMBOL_GPL(iommu_map_atomic
);
1996 static size_t __iommu_unmap(struct iommu_domain
*domain
,
1997 unsigned long iova
, size_t size
,
1998 struct iommu_iotlb_gather
*iotlb_gather
)
2000 const struct iommu_ops
*ops
= domain
->ops
;
2001 size_t unmapped_page
, unmapped
= 0;
2002 unsigned long orig_iova
= iova
;
2003 unsigned int min_pagesz
;
2005 if (unlikely(ops
->unmap
== NULL
||
2006 domain
->pgsize_bitmap
== 0UL))
2009 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
2012 /* find out the minimum page size supported */
2013 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
2016 * The virtual address, as well as the size of the mapping, must be
2017 * aligned (at least) to the size of the smallest page supported
2020 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
2021 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2022 iova
, size
, min_pagesz
);
2026 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova
, size
);
2029 * Keep iterating until we either unmap 'size' bytes (or more)
2030 * or we hit an area that isn't mapped.
2032 while (unmapped
< size
) {
2033 size_t pgsize
= iommu_pgsize(domain
, iova
, size
- unmapped
);
2035 unmapped_page
= ops
->unmap(domain
, iova
, pgsize
, iotlb_gather
);
2039 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2040 iova
, unmapped_page
);
2042 iova
+= unmapped_page
;
2043 unmapped
+= unmapped_page
;
2046 trace_unmap(orig_iova
, size
, unmapped
);
2050 size_t iommu_unmap(struct iommu_domain
*domain
,
2051 unsigned long iova
, size_t size
)
2053 struct iommu_iotlb_gather iotlb_gather
;
2056 iommu_iotlb_gather_init(&iotlb_gather
);
2057 ret
= __iommu_unmap(domain
, iova
, size
, &iotlb_gather
);
2058 iommu_tlb_sync(domain
, &iotlb_gather
);
2062 EXPORT_SYMBOL_GPL(iommu_unmap
);
2064 size_t iommu_unmap_fast(struct iommu_domain
*domain
,
2065 unsigned long iova
, size_t size
,
2066 struct iommu_iotlb_gather
*iotlb_gather
)
2068 return __iommu_unmap(domain
, iova
, size
, iotlb_gather
);
2070 EXPORT_SYMBOL_GPL(iommu_unmap_fast
);
2072 size_t __iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
2073 struct scatterlist
*sg
, unsigned int nents
, int prot
,
2076 size_t len
= 0, mapped
= 0;
2081 while (i
<= nents
) {
2082 phys_addr_t s_phys
= sg_phys(sg
);
2084 if (len
&& s_phys
!= start
+ len
) {
2085 ret
= __iommu_map(domain
, iova
+ mapped
, start
,
2109 /* undo mappings already done */
2110 iommu_unmap(domain
, iova
, mapped
);
2116 size_t iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
2117 struct scatterlist
*sg
, unsigned int nents
, int prot
)
2120 return __iommu_map_sg(domain
, iova
, sg
, nents
, prot
, GFP_KERNEL
);
2122 EXPORT_SYMBOL_GPL(iommu_map_sg
);
2124 size_t iommu_map_sg_atomic(struct iommu_domain
*domain
, unsigned long iova
,
2125 struct scatterlist
*sg
, unsigned int nents
, int prot
)
2127 return __iommu_map_sg(domain
, iova
, sg
, nents
, prot
, GFP_ATOMIC
);
2129 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic
);
2131 int iommu_domain_window_enable(struct iommu_domain
*domain
, u32 wnd_nr
,
2132 phys_addr_t paddr
, u64 size
, int prot
)
2134 if (unlikely(domain
->ops
->domain_window_enable
== NULL
))
2137 return domain
->ops
->domain_window_enable(domain
, wnd_nr
, paddr
, size
,
2140 EXPORT_SYMBOL_GPL(iommu_domain_window_enable
);
2142 void iommu_domain_window_disable(struct iommu_domain
*domain
, u32 wnd_nr
)
2144 if (unlikely(domain
->ops
->domain_window_disable
== NULL
))
2147 return domain
->ops
->domain_window_disable(domain
, wnd_nr
);
2149 EXPORT_SYMBOL_GPL(iommu_domain_window_disable
);
2152 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2153 * @domain: the iommu domain where the fault has happened
2154 * @dev: the device where the fault has happened
2155 * @iova: the faulting address
2156 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2158 * This function should be called by the low-level IOMMU implementations
2159 * whenever IOMMU faults happen, to allow high-level users, that are
2160 * interested in such events, to know about them.
2162 * This event may be useful for several possible use cases:
2163 * - mere logging of the event
2164 * - dynamic TLB/PTE loading
2165 * - if restarting of the faulting device is required
2167 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2168 * PTE/TLB loading will one day be supported, implementations will be able
2169 * to tell whether it succeeded or not according to this return value).
2171 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2172 * (though fault handlers can also return -ENOSYS, in case they want to
2173 * elicit the default behavior of the IOMMU drivers).
2175 int report_iommu_fault(struct iommu_domain
*domain
, struct device
*dev
,
2176 unsigned long iova
, int flags
)
2181 * if upper layers showed interest and installed a fault handler,
2184 if (domain
->handler
)
2185 ret
= domain
->handler(domain
, dev
, iova
, flags
,
2186 domain
->handler_token
);
2188 trace_io_page_fault(dev
, iova
, flags
);
2191 EXPORT_SYMBOL_GPL(report_iommu_fault
);
2193 static int __init
iommu_init(void)
2195 iommu_group_kset
= kset_create_and_add("iommu_groups",
2197 BUG_ON(!iommu_group_kset
);
2199 iommu_debugfs_setup();
2203 core_initcall(iommu_init
);
2205 int iommu_domain_get_attr(struct iommu_domain
*domain
,
2206 enum iommu_attr attr
, void *data
)
2208 struct iommu_domain_geometry
*geometry
;
2213 case DOMAIN_ATTR_GEOMETRY
:
2215 *geometry
= domain
->geometry
;
2218 case DOMAIN_ATTR_PAGING
:
2220 *paging
= (domain
->pgsize_bitmap
!= 0UL);
2223 if (!domain
->ops
->domain_get_attr
)
2226 ret
= domain
->ops
->domain_get_attr(domain
, attr
, data
);
2231 EXPORT_SYMBOL_GPL(iommu_domain_get_attr
);
2233 int iommu_domain_set_attr(struct iommu_domain
*domain
,
2234 enum iommu_attr attr
, void *data
)
2240 if (domain
->ops
->domain_set_attr
== NULL
)
2243 ret
= domain
->ops
->domain_set_attr(domain
, attr
, data
);
2248 EXPORT_SYMBOL_GPL(iommu_domain_set_attr
);
2250 void iommu_get_resv_regions(struct device
*dev
, struct list_head
*list
)
2252 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2254 if (ops
&& ops
->get_resv_regions
)
2255 ops
->get_resv_regions(dev
, list
);
2258 void iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
)
2260 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2262 if (ops
&& ops
->put_resv_regions
)
2263 ops
->put_resv_regions(dev
, list
);
2267 * generic_iommu_put_resv_regions - Reserved region driver helper
2268 * @dev: device for which to free reserved regions
2269 * @list: reserved region list for device
2271 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2272 * for simple reservations. Memory allocated for each reserved region will be
2273 * freed. If an IOMMU driver allocates additional resources per region, it is
2274 * going to have to implement a custom callback.
2276 void generic_iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
)
2278 struct iommu_resv_region
*entry
, *next
;
2280 list_for_each_entry_safe(entry
, next
, list
, list
)
2283 EXPORT_SYMBOL(generic_iommu_put_resv_regions
);
2285 struct iommu_resv_region
*iommu_alloc_resv_region(phys_addr_t start
,
2286 size_t length
, int prot
,
2287 enum iommu_resv_type type
)
2289 struct iommu_resv_region
*region
;
2291 region
= kzalloc(sizeof(*region
), GFP_KERNEL
);
2295 INIT_LIST_HEAD(®ion
->list
);
2296 region
->start
= start
;
2297 region
->length
= length
;
2298 region
->prot
= prot
;
2299 region
->type
= type
;
2302 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region
);
2305 request_default_domain_for_dev(struct device
*dev
, unsigned long type
)
2307 struct iommu_domain
*domain
;
2308 struct iommu_group
*group
;
2311 /* Device must already be in a group before calling this function */
2312 group
= iommu_group_get(dev
);
2316 mutex_lock(&group
->mutex
);
2319 if (group
->default_domain
&& group
->default_domain
->type
== type
)
2322 /* Don't change mappings of existing devices */
2324 if (iommu_group_device_count(group
) != 1)
2328 domain
= __iommu_domain_alloc(dev
->bus
, type
);
2332 /* Attach the device to the domain */
2333 ret
= __iommu_attach_group(domain
, group
);
2335 iommu_domain_free(domain
);
2339 /* Make the domain the default for this group */
2340 if (group
->default_domain
)
2341 iommu_domain_free(group
->default_domain
);
2342 group
->default_domain
= domain
;
2344 iommu_group_create_direct_mappings(group
, dev
);
2346 dev_info(dev
, "Using iommu %s mapping\n",
2347 type
== IOMMU_DOMAIN_DMA
? "dma" : "direct");
2351 mutex_unlock(&group
->mutex
);
2352 iommu_group_put(group
);
2357 /* Request that a device is direct mapped by the IOMMU */
2358 int iommu_request_dm_for_dev(struct device
*dev
)
2360 return request_default_domain_for_dev(dev
, IOMMU_DOMAIN_IDENTITY
);
2363 /* Request that a device can't be direct mapped by the IOMMU */
2364 int iommu_request_dma_domain_for_dev(struct device
*dev
)
2366 return request_default_domain_for_dev(dev
, IOMMU_DOMAIN_DMA
);
2369 void iommu_set_default_passthrough(bool cmd_line
)
2372 iommu_set_cmd_line_dma_api();
2374 iommu_def_domain_type
= IOMMU_DOMAIN_IDENTITY
;
2377 void iommu_set_default_translated(bool cmd_line
)
2380 iommu_set_cmd_line_dma_api();
2382 iommu_def_domain_type
= IOMMU_DOMAIN_DMA
;
2385 bool iommu_default_passthrough(void)
2387 return iommu_def_domain_type
== IOMMU_DOMAIN_IDENTITY
;
2389 EXPORT_SYMBOL_GPL(iommu_default_passthrough
);
2391 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
)
2393 const struct iommu_ops
*ops
= NULL
;
2394 struct iommu_device
*iommu
;
2396 spin_lock(&iommu_device_lock
);
2397 list_for_each_entry(iommu
, &iommu_device_list
, list
)
2398 if (iommu
->fwnode
== fwnode
) {
2402 spin_unlock(&iommu_device_lock
);
2406 int iommu_fwspec_init(struct device
*dev
, struct fwnode_handle
*iommu_fwnode
,
2407 const struct iommu_ops
*ops
)
2409 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2412 return ops
== fwspec
->ops
? 0 : -EINVAL
;
2414 if (!dev_iommu_get(dev
))
2417 /* Preallocate for the overwhelmingly common case of 1 ID */
2418 fwspec
= kzalloc(struct_size(fwspec
, ids
, 1), GFP_KERNEL
);
2422 of_node_get(to_of_node(iommu_fwnode
));
2423 fwspec
->iommu_fwnode
= iommu_fwnode
;
2425 dev_iommu_fwspec_set(dev
, fwspec
);
2428 EXPORT_SYMBOL_GPL(iommu_fwspec_init
);
2430 void iommu_fwspec_free(struct device
*dev
)
2432 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2435 fwnode_handle_put(fwspec
->iommu_fwnode
);
2437 dev_iommu_fwspec_set(dev
, NULL
);
2440 EXPORT_SYMBOL_GPL(iommu_fwspec_free
);
2442 int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
, int num_ids
)
2444 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2450 new_num
= fwspec
->num_ids
+ num_ids
;
2452 fwspec
= krealloc(fwspec
, struct_size(fwspec
, ids
, new_num
),
2457 dev_iommu_fwspec_set(dev
, fwspec
);
2460 for (i
= 0; i
< num_ids
; i
++)
2461 fwspec
->ids
[fwspec
->num_ids
+ i
] = ids
[i
];
2463 fwspec
->num_ids
= new_num
;
2466 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids
);
2469 * Per device IOMMU features.
2471 bool iommu_dev_has_feature(struct device
*dev
, enum iommu_dev_features feat
)
2473 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2475 if (ops
&& ops
->dev_has_feat
)
2476 return ops
->dev_has_feat(dev
, feat
);
2480 EXPORT_SYMBOL_GPL(iommu_dev_has_feature
);
2482 int iommu_dev_enable_feature(struct device
*dev
, enum iommu_dev_features feat
)
2484 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2486 if (ops
&& ops
->dev_enable_feat
)
2487 return ops
->dev_enable_feat(dev
, feat
);
2491 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature
);
2494 * The device drivers should do the necessary cleanups before calling this.
2495 * For example, before disabling the aux-domain feature, the device driver
2496 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2498 int iommu_dev_disable_feature(struct device
*dev
, enum iommu_dev_features feat
)
2500 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2502 if (ops
&& ops
->dev_disable_feat
)
2503 return ops
->dev_disable_feat(dev
, feat
);
2507 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature
);
2509 bool iommu_dev_feature_enabled(struct device
*dev
, enum iommu_dev_features feat
)
2511 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2513 if (ops
&& ops
->dev_feat_enabled
)
2514 return ops
->dev_feat_enabled(dev
, feat
);
2518 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled
);
2521 * Aux-domain specific attach/detach.
2523 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2524 * true. Also, as long as domains are attached to a device through this
2525 * interface, any tries to call iommu_attach_device() should fail
2526 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2527 * This should make us safe against a device being attached to a guest as a
2528 * whole while there are still pasid users on it (aux and sva).
2530 int iommu_aux_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
2534 if (domain
->ops
->aux_attach_dev
)
2535 ret
= domain
->ops
->aux_attach_dev(domain
, dev
);
2538 trace_attach_device_to_domain(dev
);
2542 EXPORT_SYMBOL_GPL(iommu_aux_attach_device
);
2544 void iommu_aux_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
2546 if (domain
->ops
->aux_detach_dev
) {
2547 domain
->ops
->aux_detach_dev(domain
, dev
);
2548 trace_detach_device_from_domain(dev
);
2551 EXPORT_SYMBOL_GPL(iommu_aux_detach_device
);
2553 int iommu_aux_get_pasid(struct iommu_domain
*domain
, struct device
*dev
)
2557 if (domain
->ops
->aux_get_pasid
)
2558 ret
= domain
->ops
->aux_get_pasid(domain
, dev
);
2562 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid
);
2565 * iommu_sva_bind_device() - Bind a process address space to a device
2567 * @mm: the mm to bind, caller must hold a reference to it
2569 * Create a bond between device and address space, allowing the device to access
2570 * the mm using the returned PASID. If a bond already exists between @device and
2571 * @mm, it is returned and an additional reference is taken. Caller must call
2572 * iommu_sva_unbind_device() to release each reference.
2574 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2575 * initialize the required SVA features.
2577 * On error, returns an ERR_PTR value.
2580 iommu_sva_bind_device(struct device
*dev
, struct mm_struct
*mm
, void *drvdata
)
2582 struct iommu_group
*group
;
2583 struct iommu_sva
*handle
= ERR_PTR(-EINVAL
);
2584 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2586 if (!ops
|| !ops
->sva_bind
)
2587 return ERR_PTR(-ENODEV
);
2589 group
= iommu_group_get(dev
);
2591 return ERR_PTR(-ENODEV
);
2593 /* Ensure device count and domain don't change while we're binding */
2594 mutex_lock(&group
->mutex
);
2597 * To keep things simple, SVA currently doesn't support IOMMU groups
2598 * with more than one device. Existing SVA-capable systems are not
2599 * affected by the problems that required IOMMU groups (lack of ACS
2600 * isolation, device ID aliasing and other hardware issues).
2602 if (iommu_group_device_count(group
) != 1)
2605 handle
= ops
->sva_bind(dev
, mm
, drvdata
);
2608 mutex_unlock(&group
->mutex
);
2609 iommu_group_put(group
);
2613 EXPORT_SYMBOL_GPL(iommu_sva_bind_device
);
2616 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2617 * @handle: the handle returned by iommu_sva_bind_device()
2619 * Put reference to a bond between device and address space. The device should
2620 * not be issuing any more transaction for this PASID. All outstanding page
2621 * requests for this PASID must have been flushed to the IOMMU.
2623 * Returns 0 on success, or an error value
2625 void iommu_sva_unbind_device(struct iommu_sva
*handle
)
2627 struct iommu_group
*group
;
2628 struct device
*dev
= handle
->dev
;
2629 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2631 if (!ops
|| !ops
->sva_unbind
)
2634 group
= iommu_group_get(dev
);
2638 mutex_lock(&group
->mutex
);
2639 ops
->sva_unbind(handle
);
2640 mutex_unlock(&group
->mutex
);
2642 iommu_group_put(group
);
2644 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device
);
2646 int iommu_sva_set_ops(struct iommu_sva
*handle
,
2647 const struct iommu_sva_ops
*sva_ops
)
2649 if (handle
->ops
&& handle
->ops
!= sva_ops
)
2652 handle
->ops
= sva_ops
;
2655 EXPORT_SYMBOL_GPL(iommu_sva_set_ops
);
2657 int iommu_sva_get_pasid(struct iommu_sva
*handle
)
2659 const struct iommu_ops
*ops
= handle
->dev
->bus
->iommu_ops
;
2661 if (!ops
|| !ops
->sva_get_pasid
)
2662 return IOMMU_PASID_INVALID
;
2664 return ops
->sva_get_pasid(handle
);
2666 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid
);