2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "iommu: " fmt
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <linux/property.h>
35 #include <trace/events/iommu.h>
37 static struct kset
*iommu_group_kset
;
38 static DEFINE_IDA(iommu_group_ida
);
39 #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
40 static unsigned int iommu_def_domain_type
= IOMMU_DOMAIN_IDENTITY
;
42 static unsigned int iommu_def_domain_type
= IOMMU_DOMAIN_DMA
;
45 struct iommu_callback_data
{
46 const struct iommu_ops
*ops
;
51 struct kobject
*devices_kobj
;
52 struct list_head devices
;
54 struct blocking_notifier_head notifier
;
56 void (*iommu_data_release
)(void *iommu_data
);
59 struct iommu_domain
*default_domain
;
60 struct iommu_domain
*domain
;
64 struct list_head list
;
69 struct iommu_group_attribute
{
70 struct attribute attr
;
71 ssize_t (*show
)(struct iommu_group
*group
, char *buf
);
72 ssize_t (*store
)(struct iommu_group
*group
,
73 const char *buf
, size_t count
);
76 static const char * const iommu_group_resv_type_string
[] = {
77 [IOMMU_RESV_DIRECT
] = "direct",
78 [IOMMU_RESV_RESERVED
] = "reserved",
79 [IOMMU_RESV_MSI
] = "msi",
80 [IOMMU_RESV_SW_MSI
] = "msi",
83 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
84 struct iommu_group_attribute iommu_group_attr_##_name = \
85 __ATTR(_name, _mode, _show, _store)
87 #define to_iommu_group_attr(_attr) \
88 container_of(_attr, struct iommu_group_attribute, attr)
89 #define to_iommu_group(_kobj) \
90 container_of(_kobj, struct iommu_group, kobj)
92 static LIST_HEAD(iommu_device_list
);
93 static DEFINE_SPINLOCK(iommu_device_lock
);
95 int iommu_device_register(struct iommu_device
*iommu
)
97 spin_lock(&iommu_device_lock
);
98 list_add_tail(&iommu
->list
, &iommu_device_list
);
99 spin_unlock(&iommu_device_lock
);
104 void iommu_device_unregister(struct iommu_device
*iommu
)
106 spin_lock(&iommu_device_lock
);
107 list_del(&iommu
->list
);
108 spin_unlock(&iommu_device_lock
);
111 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
113 static int __iommu_attach_device(struct iommu_domain
*domain
,
115 static int __iommu_attach_group(struct iommu_domain
*domain
,
116 struct iommu_group
*group
);
117 static void __iommu_detach_group(struct iommu_domain
*domain
,
118 struct iommu_group
*group
);
120 static int __init
iommu_set_def_domain_type(char *str
)
125 ret
= kstrtobool(str
, &pt
);
129 iommu_def_domain_type
= pt
? IOMMU_DOMAIN_IDENTITY
: IOMMU_DOMAIN_DMA
;
132 early_param("iommu.passthrough", iommu_set_def_domain_type
);
134 static ssize_t
iommu_group_attr_show(struct kobject
*kobj
,
135 struct attribute
*__attr
, char *buf
)
137 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
138 struct iommu_group
*group
= to_iommu_group(kobj
);
142 ret
= attr
->show(group
, buf
);
146 static ssize_t
iommu_group_attr_store(struct kobject
*kobj
,
147 struct attribute
*__attr
,
148 const char *buf
, size_t count
)
150 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
151 struct iommu_group
*group
= to_iommu_group(kobj
);
155 ret
= attr
->store(group
, buf
, count
);
159 static const struct sysfs_ops iommu_group_sysfs_ops
= {
160 .show
= iommu_group_attr_show
,
161 .store
= iommu_group_attr_store
,
164 static int iommu_group_create_file(struct iommu_group
*group
,
165 struct iommu_group_attribute
*attr
)
167 return sysfs_create_file(&group
->kobj
, &attr
->attr
);
170 static void iommu_group_remove_file(struct iommu_group
*group
,
171 struct iommu_group_attribute
*attr
)
173 sysfs_remove_file(&group
->kobj
, &attr
->attr
);
176 static ssize_t
iommu_group_show_name(struct iommu_group
*group
, char *buf
)
178 return sprintf(buf
, "%s\n", group
->name
);
182 * iommu_insert_resv_region - Insert a new region in the
183 * list of reserved regions.
184 * @new: new region to insert
185 * @regions: list of regions
187 * The new element is sorted by address with respect to the other
188 * regions of the same type. In case it overlaps with another
189 * region of the same type, regions are merged. In case it
190 * overlaps with another region of different type, regions are
193 static int iommu_insert_resv_region(struct iommu_resv_region
*new,
194 struct list_head
*regions
)
196 struct iommu_resv_region
*region
;
197 phys_addr_t start
= new->start
;
198 phys_addr_t end
= new->start
+ new->length
- 1;
199 struct list_head
*pos
= regions
->next
;
201 while (pos
!= regions
) {
202 struct iommu_resv_region
*entry
=
203 list_entry(pos
, struct iommu_resv_region
, list
);
204 phys_addr_t a
= entry
->start
;
205 phys_addr_t b
= entry
->start
+ entry
->length
- 1;
206 int type
= entry
->type
;
210 } else if (start
> b
) {
212 } else if ((start
>= a
) && (end
<= b
)) {
213 if (new->type
== type
)
218 if (new->type
== type
) {
219 phys_addr_t new_start
= min(a
, start
);
220 phys_addr_t new_end
= max(b
, end
);
222 list_del(&entry
->list
);
223 entry
->start
= new_start
;
224 entry
->length
= new_end
- new_start
+ 1;
225 iommu_insert_resv_region(entry
, regions
);
232 region
= iommu_alloc_resv_region(new->start
, new->length
,
233 new->prot
, new->type
);
237 list_add_tail(®ion
->list
, pos
);
243 iommu_insert_device_resv_regions(struct list_head
*dev_resv_regions
,
244 struct list_head
*group_resv_regions
)
246 struct iommu_resv_region
*entry
;
249 list_for_each_entry(entry
, dev_resv_regions
, list
) {
250 ret
= iommu_insert_resv_region(entry
, group_resv_regions
);
257 int iommu_get_group_resv_regions(struct iommu_group
*group
,
258 struct list_head
*head
)
260 struct group_device
*device
;
263 mutex_lock(&group
->mutex
);
264 list_for_each_entry(device
, &group
->devices
, list
) {
265 struct list_head dev_resv_regions
;
267 INIT_LIST_HEAD(&dev_resv_regions
);
268 iommu_get_resv_regions(device
->dev
, &dev_resv_regions
);
269 ret
= iommu_insert_device_resv_regions(&dev_resv_regions
, head
);
270 iommu_put_resv_regions(device
->dev
, &dev_resv_regions
);
274 mutex_unlock(&group
->mutex
);
277 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions
);
279 static ssize_t
iommu_group_show_resv_regions(struct iommu_group
*group
,
282 struct iommu_resv_region
*region
, *next
;
283 struct list_head group_resv_regions
;
286 INIT_LIST_HEAD(&group_resv_regions
);
287 iommu_get_group_resv_regions(group
, &group_resv_regions
);
289 list_for_each_entry_safe(region
, next
, &group_resv_regions
, list
) {
290 str
+= sprintf(str
, "0x%016llx 0x%016llx %s\n",
291 (long long int)region
->start
,
292 (long long int)(region
->start
+
294 iommu_group_resv_type_string
[region
->type
]);
301 static ssize_t
iommu_group_show_type(struct iommu_group
*group
,
304 char *type
= "unknown\n";
306 if (group
->default_domain
) {
307 switch (group
->default_domain
->type
) {
308 case IOMMU_DOMAIN_BLOCKED
:
311 case IOMMU_DOMAIN_IDENTITY
:
314 case IOMMU_DOMAIN_UNMANAGED
:
315 type
= "unmanaged\n";
317 case IOMMU_DOMAIN_DMA
:
327 static IOMMU_GROUP_ATTR(name
, S_IRUGO
, iommu_group_show_name
, NULL
);
329 static IOMMU_GROUP_ATTR(reserved_regions
, 0444,
330 iommu_group_show_resv_regions
, NULL
);
332 static IOMMU_GROUP_ATTR(type
, 0444, iommu_group_show_type
, NULL
);
334 static void iommu_group_release(struct kobject
*kobj
)
336 struct iommu_group
*group
= to_iommu_group(kobj
);
338 pr_debug("Releasing group %d\n", group
->id
);
340 if (group
->iommu_data_release
)
341 group
->iommu_data_release(group
->iommu_data
);
343 ida_simple_remove(&iommu_group_ida
, group
->id
);
345 if (group
->default_domain
)
346 iommu_domain_free(group
->default_domain
);
352 static struct kobj_type iommu_group_ktype
= {
353 .sysfs_ops
= &iommu_group_sysfs_ops
,
354 .release
= iommu_group_release
,
358 * iommu_group_alloc - Allocate a new group
360 * This function is called by an iommu driver to allocate a new iommu
361 * group. The iommu group represents the minimum granularity of the iommu.
362 * Upon successful return, the caller holds a reference to the supplied
363 * group in order to hold the group until devices are added. Use
364 * iommu_group_put() to release this extra reference count, allowing the
365 * group to be automatically reclaimed once it has no devices or external
368 struct iommu_group
*iommu_group_alloc(void)
370 struct iommu_group
*group
;
373 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
375 return ERR_PTR(-ENOMEM
);
377 group
->kobj
.kset
= iommu_group_kset
;
378 mutex_init(&group
->mutex
);
379 INIT_LIST_HEAD(&group
->devices
);
380 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
382 ret
= ida_simple_get(&iommu_group_ida
, 0, 0, GFP_KERNEL
);
389 ret
= kobject_init_and_add(&group
->kobj
, &iommu_group_ktype
,
390 NULL
, "%d", group
->id
);
392 ida_simple_remove(&iommu_group_ida
, group
->id
);
397 group
->devices_kobj
= kobject_create_and_add("devices", &group
->kobj
);
398 if (!group
->devices_kobj
) {
399 kobject_put(&group
->kobj
); /* triggers .release & free */
400 return ERR_PTR(-ENOMEM
);
404 * The devices_kobj holds a reference on the group kobject, so
405 * as long as that exists so will the group. We can therefore
406 * use the devices_kobj for reference counting.
408 kobject_put(&group
->kobj
);
410 ret
= iommu_group_create_file(group
,
411 &iommu_group_attr_reserved_regions
);
415 ret
= iommu_group_create_file(group
, &iommu_group_attr_type
);
419 pr_debug("Allocated group %d\n", group
->id
);
423 EXPORT_SYMBOL_GPL(iommu_group_alloc
);
425 struct iommu_group
*iommu_group_get_by_id(int id
)
427 struct kobject
*group_kobj
;
428 struct iommu_group
*group
;
431 if (!iommu_group_kset
)
434 name
= kasprintf(GFP_KERNEL
, "%d", id
);
438 group_kobj
= kset_find_obj(iommu_group_kset
, name
);
444 group
= container_of(group_kobj
, struct iommu_group
, kobj
);
445 BUG_ON(group
->id
!= id
);
447 kobject_get(group
->devices_kobj
);
448 kobject_put(&group
->kobj
);
452 EXPORT_SYMBOL_GPL(iommu_group_get_by_id
);
455 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
458 * iommu drivers can store data in the group for use when doing iommu
459 * operations. This function provides a way to retrieve it. Caller
460 * should hold a group reference.
462 void *iommu_group_get_iommudata(struct iommu_group
*group
)
464 return group
->iommu_data
;
466 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata
);
469 * iommu_group_set_iommudata - set iommu_data for a group
471 * @iommu_data: new data
472 * @release: release function for iommu_data
474 * iommu drivers can store data in the group for use when doing iommu
475 * operations. This function provides a way to set the data after
476 * the group has been allocated. Caller should hold a group reference.
478 void iommu_group_set_iommudata(struct iommu_group
*group
, void *iommu_data
,
479 void (*release
)(void *iommu_data
))
481 group
->iommu_data
= iommu_data
;
482 group
->iommu_data_release
= release
;
484 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata
);
487 * iommu_group_set_name - set name for a group
491 * Allow iommu driver to set a name for a group. When set it will
492 * appear in a name attribute file under the group in sysfs.
494 int iommu_group_set_name(struct iommu_group
*group
, const char *name
)
499 iommu_group_remove_file(group
, &iommu_group_attr_name
);
506 group
->name
= kstrdup(name
, GFP_KERNEL
);
510 ret
= iommu_group_create_file(group
, &iommu_group_attr_name
);
519 EXPORT_SYMBOL_GPL(iommu_group_set_name
);
521 static int iommu_group_create_direct_mappings(struct iommu_group
*group
,
524 struct iommu_domain
*domain
= group
->default_domain
;
525 struct iommu_resv_region
*entry
;
526 struct list_head mappings
;
527 unsigned long pg_size
;
530 if (!domain
|| domain
->type
!= IOMMU_DOMAIN_DMA
)
533 BUG_ON(!domain
->pgsize_bitmap
);
535 pg_size
= 1UL << __ffs(domain
->pgsize_bitmap
);
536 INIT_LIST_HEAD(&mappings
);
538 iommu_get_resv_regions(dev
, &mappings
);
540 /* We need to consider overlapping regions for different devices */
541 list_for_each_entry(entry
, &mappings
, list
) {
542 dma_addr_t start
, end
, addr
;
544 if (domain
->ops
->apply_resv_region
)
545 domain
->ops
->apply_resv_region(dev
, domain
, entry
);
547 start
= ALIGN(entry
->start
, pg_size
);
548 end
= ALIGN(entry
->start
+ entry
->length
, pg_size
);
550 if (entry
->type
!= IOMMU_RESV_DIRECT
)
553 for (addr
= start
; addr
< end
; addr
+= pg_size
) {
554 phys_addr_t phys_addr
;
556 phys_addr
= iommu_iova_to_phys(domain
, addr
);
560 ret
= iommu_map(domain
, addr
, addr
, pg_size
, entry
->prot
);
567 iommu_flush_tlb_all(domain
);
570 iommu_put_resv_regions(dev
, &mappings
);
576 * iommu_group_add_device - add a device to an iommu group
577 * @group: the group into which to add the device (reference should be held)
580 * This function is called by an iommu driver to add a device into a
581 * group. Adding a device increments the group reference count.
583 int iommu_group_add_device(struct iommu_group
*group
, struct device
*dev
)
586 struct group_device
*device
;
588 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
594 ret
= sysfs_create_link(&dev
->kobj
, &group
->kobj
, "iommu_group");
596 goto err_free_device
;
598 device
->name
= kasprintf(GFP_KERNEL
, "%s", kobject_name(&dev
->kobj
));
602 goto err_remove_link
;
605 ret
= sysfs_create_link_nowarn(group
->devices_kobj
,
606 &dev
->kobj
, device
->name
);
608 if (ret
== -EEXIST
&& i
>= 0) {
610 * Account for the slim chance of collision
611 * and append an instance to the name.
614 device
->name
= kasprintf(GFP_KERNEL
, "%s.%d",
615 kobject_name(&dev
->kobj
), i
++);
621 kobject_get(group
->devices_kobj
);
623 dev
->iommu_group
= group
;
625 iommu_group_create_direct_mappings(group
, dev
);
627 mutex_lock(&group
->mutex
);
628 list_add_tail(&device
->list
, &group
->devices
);
630 ret
= __iommu_attach_device(group
->domain
, dev
);
631 mutex_unlock(&group
->mutex
);
635 /* Notify any listeners about change to group. */
636 blocking_notifier_call_chain(&group
->notifier
,
637 IOMMU_GROUP_NOTIFY_ADD_DEVICE
, dev
);
639 trace_add_device_to_group(group
->id
, dev
);
641 pr_info("Adding device %s to group %d\n", dev_name(dev
), group
->id
);
646 mutex_lock(&group
->mutex
);
647 list_del(&device
->list
);
648 mutex_unlock(&group
->mutex
);
649 dev
->iommu_group
= NULL
;
650 kobject_put(group
->devices_kobj
);
654 sysfs_remove_link(&dev
->kobj
, "iommu_group");
657 pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev
), group
->id
, ret
);
660 EXPORT_SYMBOL_GPL(iommu_group_add_device
);
663 * iommu_group_remove_device - remove a device from it's current group
664 * @dev: device to be removed
666 * This function is called by an iommu driver to remove the device from
667 * it's current group. This decrements the iommu group reference count.
669 void iommu_group_remove_device(struct device
*dev
)
671 struct iommu_group
*group
= dev
->iommu_group
;
672 struct group_device
*tmp_device
, *device
= NULL
;
674 pr_info("Removing device %s from group %d\n", dev_name(dev
), group
->id
);
676 /* Pre-notify listeners that a device is being removed. */
677 blocking_notifier_call_chain(&group
->notifier
,
678 IOMMU_GROUP_NOTIFY_DEL_DEVICE
, dev
);
680 mutex_lock(&group
->mutex
);
681 list_for_each_entry(tmp_device
, &group
->devices
, list
) {
682 if (tmp_device
->dev
== dev
) {
684 list_del(&device
->list
);
688 mutex_unlock(&group
->mutex
);
693 sysfs_remove_link(group
->devices_kobj
, device
->name
);
694 sysfs_remove_link(&dev
->kobj
, "iommu_group");
696 trace_remove_device_from_group(group
->id
, dev
);
700 dev
->iommu_group
= NULL
;
701 kobject_put(group
->devices_kobj
);
703 EXPORT_SYMBOL_GPL(iommu_group_remove_device
);
705 static int iommu_group_device_count(struct iommu_group
*group
)
707 struct group_device
*entry
;
710 list_for_each_entry(entry
, &group
->devices
, list
)
717 * iommu_group_for_each_dev - iterate over each device in the group
719 * @data: caller opaque data to be passed to callback function
720 * @fn: caller supplied callback function
722 * This function is called by group users to iterate over group devices.
723 * Callers should hold a reference count to the group during callback.
724 * The group->mutex is held across callbacks, which will block calls to
725 * iommu_group_add/remove_device.
727 static int __iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
728 int (*fn
)(struct device
*, void *))
730 struct group_device
*device
;
733 list_for_each_entry(device
, &group
->devices
, list
) {
734 ret
= fn(device
->dev
, data
);
742 int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
743 int (*fn
)(struct device
*, void *))
747 mutex_lock(&group
->mutex
);
748 ret
= __iommu_group_for_each_dev(group
, data
, fn
);
749 mutex_unlock(&group
->mutex
);
753 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev
);
756 * iommu_group_get - Return the group for a device and increment reference
757 * @dev: get the group that this device belongs to
759 * This function is called by iommu drivers and users to get the group
760 * for the specified device. If found, the group is returned and the group
761 * reference in incremented, else NULL.
763 struct iommu_group
*iommu_group_get(struct device
*dev
)
765 struct iommu_group
*group
= dev
->iommu_group
;
768 kobject_get(group
->devices_kobj
);
772 EXPORT_SYMBOL_GPL(iommu_group_get
);
775 * iommu_group_ref_get - Increment reference on a group
776 * @group: the group to use, must not be NULL
778 * This function is called by iommu drivers to take additional references on an
779 * existing group. Returns the given group for convenience.
781 struct iommu_group
*iommu_group_ref_get(struct iommu_group
*group
)
783 kobject_get(group
->devices_kobj
);
788 * iommu_group_put - Decrement group reference
789 * @group: the group to use
791 * This function is called by iommu drivers and users to release the
792 * iommu group. Once the reference count is zero, the group is released.
794 void iommu_group_put(struct iommu_group
*group
)
797 kobject_put(group
->devices_kobj
);
799 EXPORT_SYMBOL_GPL(iommu_group_put
);
802 * iommu_group_register_notifier - Register a notifier for group changes
803 * @group: the group to watch
804 * @nb: notifier block to signal
806 * This function allows iommu group users to track changes in a group.
807 * See include/linux/iommu.h for actions sent via this notifier. Caller
808 * should hold a reference to the group throughout notifier registration.
810 int iommu_group_register_notifier(struct iommu_group
*group
,
811 struct notifier_block
*nb
)
813 return blocking_notifier_chain_register(&group
->notifier
, nb
);
815 EXPORT_SYMBOL_GPL(iommu_group_register_notifier
);
818 * iommu_group_unregister_notifier - Unregister a notifier
819 * @group: the group to watch
820 * @nb: notifier block to signal
822 * Unregister a previously registered group notifier block.
824 int iommu_group_unregister_notifier(struct iommu_group
*group
,
825 struct notifier_block
*nb
)
827 return blocking_notifier_chain_unregister(&group
->notifier
, nb
);
829 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier
);
832 * iommu_group_id - Return ID for a group
833 * @group: the group to ID
835 * Return the unique ID for the group matching the sysfs group number.
837 int iommu_group_id(struct iommu_group
*group
)
841 EXPORT_SYMBOL_GPL(iommu_group_id
);
843 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
844 unsigned long *devfns
);
847 * To consider a PCI device isolated, we require ACS to support Source
848 * Validation, Request Redirection, Completer Redirection, and Upstream
849 * Forwarding. This effectively means that devices cannot spoof their
850 * requester ID, requests and completions cannot be redirected, and all
851 * transactions are forwarded upstream, even as it passes through a
852 * bridge where the target device is downstream.
854 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
857 * For multifunction devices which are not isolated from each other, find
858 * all the other non-isolated functions and look for existing groups. For
859 * each function, we also need to look for aliases to or from other devices
860 * that may already have a group.
862 static struct iommu_group
*get_pci_function_alias_group(struct pci_dev
*pdev
,
863 unsigned long *devfns
)
865 struct pci_dev
*tmp
= NULL
;
866 struct iommu_group
*group
;
868 if (!pdev
->multifunction
|| pci_acs_enabled(pdev
, REQ_ACS_FLAGS
))
871 for_each_pci_dev(tmp
) {
872 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
||
873 PCI_SLOT(tmp
->devfn
) != PCI_SLOT(pdev
->devfn
) ||
874 pci_acs_enabled(tmp
, REQ_ACS_FLAGS
))
877 group
= get_pci_alias_group(tmp
, devfns
);
888 * Look for aliases to or from the given device for existing groups. DMA
889 * aliases are only supported on the same bus, therefore the search
890 * space is quite small (especially since we're really only looking at pcie
891 * device, and therefore only expect multiple slots on the root complex or
892 * downstream switch ports). It's conceivable though that a pair of
893 * multifunction devices could have aliases between them that would cause a
894 * loop. To prevent this, we use a bitmap to track where we've been.
896 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
897 unsigned long *devfns
)
899 struct pci_dev
*tmp
= NULL
;
900 struct iommu_group
*group
;
902 if (test_and_set_bit(pdev
->devfn
& 0xff, devfns
))
905 group
= iommu_group_get(&pdev
->dev
);
909 for_each_pci_dev(tmp
) {
910 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
)
913 /* We alias them or they alias us */
914 if (pci_devs_are_dma_aliases(pdev
, tmp
)) {
915 group
= get_pci_alias_group(tmp
, devfns
);
921 group
= get_pci_function_alias_group(tmp
, devfns
);
932 struct group_for_pci_data
{
933 struct pci_dev
*pdev
;
934 struct iommu_group
*group
;
938 * DMA alias iterator callback, return the last seen device. Stop and return
939 * the IOMMU group if we find one along the way.
941 static int get_pci_alias_or_group(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
943 struct group_for_pci_data
*data
= opaque
;
946 data
->group
= iommu_group_get(&pdev
->dev
);
948 return data
->group
!= NULL
;
952 * Generic device_group call-back function. It just allocates one
953 * iommu-group per device.
955 struct iommu_group
*generic_device_group(struct device
*dev
)
957 return iommu_group_alloc();
961 * Use standard PCI bus topology, isolation features, and DMA alias quirks
962 * to find or create an IOMMU group for a device.
964 struct iommu_group
*pci_device_group(struct device
*dev
)
966 struct pci_dev
*pdev
= to_pci_dev(dev
);
967 struct group_for_pci_data data
;
969 struct iommu_group
*group
= NULL
;
970 u64 devfns
[4] = { 0 };
972 if (WARN_ON(!dev_is_pci(dev
)))
973 return ERR_PTR(-EINVAL
);
976 * Find the upstream DMA alias for the device. A device must not
977 * be aliased due to topology in order to have its own IOMMU group.
978 * If we find an alias along the way that already belongs to a
981 if (pci_for_each_dma_alias(pdev
, get_pci_alias_or_group
, &data
))
987 * Continue upstream from the point of minimum IOMMU granularity
988 * due to aliases to the point where devices are protected from
989 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
992 for (bus
= pdev
->bus
; !pci_is_root_bus(bus
); bus
= bus
->parent
) {
996 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
1001 group
= iommu_group_get(&pdev
->dev
);
1007 * Look for existing groups on device aliases. If we alias another
1008 * device or another device aliases us, use the same group.
1010 group
= get_pci_alias_group(pdev
, (unsigned long *)devfns
);
1015 * Look for existing groups on non-isolated functions on the same
1016 * slot and aliases of those funcions, if any. No need to clear
1017 * the search bitmap, the tested devfns are still valid.
1019 group
= get_pci_function_alias_group(pdev
, (unsigned long *)devfns
);
1023 /* No shared group found, allocate new */
1024 return iommu_group_alloc();
1028 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1029 * @dev: target device
1031 * This function is intended to be called by IOMMU drivers and extended to
1032 * support common, bus-defined algorithms when determining or creating the
1033 * IOMMU group for a device. On success, the caller will hold a reference
1034 * to the returned IOMMU group, which will already include the provided
1035 * device. The reference should be released with iommu_group_put().
1037 struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
)
1039 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1040 struct iommu_group
*group
;
1043 group
= iommu_group_get(dev
);
1048 return ERR_PTR(-EINVAL
);
1050 group
= ops
->device_group(dev
);
1051 if (WARN_ON_ONCE(group
== NULL
))
1052 return ERR_PTR(-EINVAL
);
1058 * Try to allocate a default domain - needs support from the
1061 if (!group
->default_domain
) {
1062 struct iommu_domain
*dom
;
1064 dom
= __iommu_domain_alloc(dev
->bus
, iommu_def_domain_type
);
1065 if (!dom
&& iommu_def_domain_type
!= IOMMU_DOMAIN_DMA
) {
1067 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1068 iommu_def_domain_type
);
1069 dom
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_DMA
);
1072 group
->default_domain
= dom
;
1074 group
->domain
= dom
;
1077 ret
= iommu_group_add_device(group
, dev
);
1079 iommu_group_put(group
);
1080 return ERR_PTR(ret
);
1086 struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*group
)
1088 return group
->default_domain
;
1091 static int add_iommu_group(struct device
*dev
, void *data
)
1093 struct iommu_callback_data
*cb
= data
;
1094 const struct iommu_ops
*ops
= cb
->ops
;
1097 if (!ops
->add_device
)
1100 WARN_ON(dev
->iommu_group
);
1102 ret
= ops
->add_device(dev
);
1105 * We ignore -ENODEV errors for now, as they just mean that the
1106 * device is not translated by an IOMMU. We still care about
1107 * other errors and fail to initialize when they happen.
1115 static int remove_iommu_group(struct device
*dev
, void *data
)
1117 struct iommu_callback_data
*cb
= data
;
1118 const struct iommu_ops
*ops
= cb
->ops
;
1120 if (ops
->remove_device
&& dev
->iommu_group
)
1121 ops
->remove_device(dev
);
1126 static int iommu_bus_notifier(struct notifier_block
*nb
,
1127 unsigned long action
, void *data
)
1129 struct device
*dev
= data
;
1130 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1131 struct iommu_group
*group
;
1132 unsigned long group_action
= 0;
1135 * ADD/DEL call into iommu driver ops if provided, which may
1136 * result in ADD/DEL notifiers to group->notifier
1138 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
1139 if (ops
->add_device
) {
1142 ret
= ops
->add_device(dev
);
1143 return (ret
) ? NOTIFY_DONE
: NOTIFY_OK
;
1145 } else if (action
== BUS_NOTIFY_REMOVED_DEVICE
) {
1146 if (ops
->remove_device
&& dev
->iommu_group
) {
1147 ops
->remove_device(dev
);
1153 * Remaining BUS_NOTIFYs get filtered and republished to the
1154 * group, if anyone is listening
1156 group
= iommu_group_get(dev
);
1161 case BUS_NOTIFY_BIND_DRIVER
:
1162 group_action
= IOMMU_GROUP_NOTIFY_BIND_DRIVER
;
1164 case BUS_NOTIFY_BOUND_DRIVER
:
1165 group_action
= IOMMU_GROUP_NOTIFY_BOUND_DRIVER
;
1167 case BUS_NOTIFY_UNBIND_DRIVER
:
1168 group_action
= IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
;
1170 case BUS_NOTIFY_UNBOUND_DRIVER
:
1171 group_action
= IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
;
1176 blocking_notifier_call_chain(&group
->notifier
,
1179 iommu_group_put(group
);
1183 static int iommu_bus_init(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1186 struct notifier_block
*nb
;
1187 struct iommu_callback_data cb
= {
1191 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
1195 nb
->notifier_call
= iommu_bus_notifier
;
1197 err
= bus_register_notifier(bus
, nb
);
1201 err
= bus_for_each_dev(bus
, NULL
, &cb
, add_iommu_group
);
1210 bus_for_each_dev(bus
, NULL
, &cb
, remove_iommu_group
);
1211 bus_unregister_notifier(bus
, nb
);
1220 * bus_set_iommu - set iommu-callbacks for the bus
1222 * @ops: the callbacks provided by the iommu-driver
1224 * This function is called by an iommu driver to set the iommu methods
1225 * used for a particular bus. Drivers for devices on that bus can use
1226 * the iommu-api after these ops are registered.
1227 * This special function is needed because IOMMUs are usually devices on
1228 * the bus itself, so the iommu drivers are not initialized when the bus
1229 * is set up. With this function the iommu-driver can set the iommu-ops
1232 int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1236 if (bus
->iommu_ops
!= NULL
)
1239 bus
->iommu_ops
= ops
;
1241 /* Do IOMMU specific setup for this bus-type */
1242 err
= iommu_bus_init(bus
, ops
);
1244 bus
->iommu_ops
= NULL
;
1248 EXPORT_SYMBOL_GPL(bus_set_iommu
);
1250 bool iommu_present(struct bus_type
*bus
)
1252 return bus
->iommu_ops
!= NULL
;
1254 EXPORT_SYMBOL_GPL(iommu_present
);
1256 bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
1258 if (!bus
->iommu_ops
|| !bus
->iommu_ops
->capable
)
1261 return bus
->iommu_ops
->capable(cap
);
1263 EXPORT_SYMBOL_GPL(iommu_capable
);
1266 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1267 * @domain: iommu domain
1268 * @handler: fault handler
1269 * @token: user data, will be passed back to the fault handler
1271 * This function should be used by IOMMU users which want to be notified
1272 * whenever an IOMMU fault happens.
1274 * The fault handler itself should return 0 on success, and an appropriate
1275 * error code otherwise.
1277 void iommu_set_fault_handler(struct iommu_domain
*domain
,
1278 iommu_fault_handler_t handler
,
1283 domain
->handler
= handler
;
1284 domain
->handler_token
= token
;
1286 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
1288 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
1291 struct iommu_domain
*domain
;
1293 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
1296 domain
= bus
->iommu_ops
->domain_alloc(type
);
1300 domain
->ops
= bus
->iommu_ops
;
1301 domain
->type
= type
;
1302 /* Assume all sizes by default; the driver may override this later */
1303 domain
->pgsize_bitmap
= bus
->iommu_ops
->pgsize_bitmap
;
1308 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
1310 return __iommu_domain_alloc(bus
, IOMMU_DOMAIN_UNMANAGED
);
1312 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
1314 void iommu_domain_free(struct iommu_domain
*domain
)
1316 domain
->ops
->domain_free(domain
);
1318 EXPORT_SYMBOL_GPL(iommu_domain_free
);
1320 static int __iommu_attach_device(struct iommu_domain
*domain
,
1324 if ((domain
->ops
->is_attach_deferred
!= NULL
) &&
1325 domain
->ops
->is_attach_deferred(domain
, dev
))
1328 if (unlikely(domain
->ops
->attach_dev
== NULL
))
1331 ret
= domain
->ops
->attach_dev(domain
, dev
);
1333 trace_attach_device_to_domain(dev
);
1337 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1339 struct iommu_group
*group
;
1342 group
= iommu_group_get(dev
);
1347 * Lock the group to make sure the device-count doesn't
1348 * change while we are attaching
1350 mutex_lock(&group
->mutex
);
1352 if (iommu_group_device_count(group
) != 1)
1355 ret
= __iommu_attach_group(domain
, group
);
1358 mutex_unlock(&group
->mutex
);
1359 iommu_group_put(group
);
1363 EXPORT_SYMBOL_GPL(iommu_attach_device
);
1365 static void __iommu_detach_device(struct iommu_domain
*domain
,
1368 if ((domain
->ops
->is_attach_deferred
!= NULL
) &&
1369 domain
->ops
->is_attach_deferred(domain
, dev
))
1372 if (unlikely(domain
->ops
->detach_dev
== NULL
))
1375 domain
->ops
->detach_dev(domain
, dev
);
1376 trace_detach_device_from_domain(dev
);
1379 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
1381 struct iommu_group
*group
;
1383 group
= iommu_group_get(dev
);
1387 mutex_lock(&group
->mutex
);
1388 if (iommu_group_device_count(group
) != 1) {
1393 __iommu_detach_group(domain
, group
);
1396 mutex_unlock(&group
->mutex
);
1397 iommu_group_put(group
);
1399 EXPORT_SYMBOL_GPL(iommu_detach_device
);
1401 struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
1403 struct iommu_domain
*domain
;
1404 struct iommu_group
*group
;
1406 group
= iommu_group_get(dev
);
1410 domain
= group
->domain
;
1412 iommu_group_put(group
);
1416 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev
);
1419 * IOMMU groups are really the natrual working unit of the IOMMU, but
1420 * the IOMMU API works on domains and devices. Bridge that gap by
1421 * iterating over the devices in a group. Ideally we'd have a single
1422 * device which represents the requestor ID of the group, but we also
1423 * allow IOMMU drivers to create policy defined minimum sets, where
1424 * the physical hardware may be able to distiguish members, but we
1425 * wish to group them at a higher level (ex. untrusted multi-function
1426 * PCI devices). Thus we attach each device.
1428 static int iommu_group_do_attach_device(struct device
*dev
, void *data
)
1430 struct iommu_domain
*domain
= data
;
1432 return __iommu_attach_device(domain
, dev
);
1435 static int __iommu_attach_group(struct iommu_domain
*domain
,
1436 struct iommu_group
*group
)
1440 if (group
->default_domain
&& group
->domain
!= group
->default_domain
)
1443 ret
= __iommu_group_for_each_dev(group
, domain
,
1444 iommu_group_do_attach_device
);
1446 group
->domain
= domain
;
1451 int iommu_attach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1455 mutex_lock(&group
->mutex
);
1456 ret
= __iommu_attach_group(domain
, group
);
1457 mutex_unlock(&group
->mutex
);
1461 EXPORT_SYMBOL_GPL(iommu_attach_group
);
1463 static int iommu_group_do_detach_device(struct device
*dev
, void *data
)
1465 struct iommu_domain
*domain
= data
;
1467 __iommu_detach_device(domain
, dev
);
1472 static void __iommu_detach_group(struct iommu_domain
*domain
,
1473 struct iommu_group
*group
)
1477 if (!group
->default_domain
) {
1478 __iommu_group_for_each_dev(group
, domain
,
1479 iommu_group_do_detach_device
);
1480 group
->domain
= NULL
;
1484 if (group
->domain
== group
->default_domain
)
1487 /* Detach by re-attaching to the default domain */
1488 ret
= __iommu_group_for_each_dev(group
, group
->default_domain
,
1489 iommu_group_do_attach_device
);
1493 group
->domain
= group
->default_domain
;
1496 void iommu_detach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1498 mutex_lock(&group
->mutex
);
1499 __iommu_detach_group(domain
, group
);
1500 mutex_unlock(&group
->mutex
);
1502 EXPORT_SYMBOL_GPL(iommu_detach_group
);
1504 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
1506 if (unlikely(domain
->ops
->iova_to_phys
== NULL
))
1509 return domain
->ops
->iova_to_phys(domain
, iova
);
1511 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
1513 static size_t iommu_pgsize(struct iommu_domain
*domain
,
1514 unsigned long addr_merge
, size_t size
)
1516 unsigned int pgsize_idx
;
1519 /* Max page size that still fits into 'size' */
1520 pgsize_idx
= __fls(size
);
1522 /* need to consider alignment requirements ? */
1523 if (likely(addr_merge
)) {
1524 /* Max page size allowed by address */
1525 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
1526 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
1529 /* build a mask of acceptable page sizes */
1530 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
1532 /* throw away page sizes not supported by the hardware */
1533 pgsize
&= domain
->pgsize_bitmap
;
1535 /* make sure we're still sane */
1538 /* pick the biggest page */
1539 pgsize_idx
= __fls(pgsize
);
1540 pgsize
= 1UL << pgsize_idx
;
1545 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
1546 phys_addr_t paddr
, size_t size
, int prot
)
1548 unsigned long orig_iova
= iova
;
1549 unsigned int min_pagesz
;
1550 size_t orig_size
= size
;
1551 phys_addr_t orig_paddr
= paddr
;
1554 if (unlikely(domain
->ops
->map
== NULL
||
1555 domain
->pgsize_bitmap
== 0UL))
1558 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1561 /* find out the minimum page size supported */
1562 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1565 * both the virtual address and the physical one, as well as
1566 * the size of the mapping, must be aligned (at least) to the
1567 * size of the smallest page supported by the hardware
1569 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
1570 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1571 iova
, &paddr
, size
, min_pagesz
);
1575 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
1578 size_t pgsize
= iommu_pgsize(domain
, iova
| paddr
, size
);
1580 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1581 iova
, &paddr
, pgsize
);
1583 ret
= domain
->ops
->map(domain
, iova
, paddr
, pgsize
, prot
);
1592 /* unroll mapping in case something went wrong */
1594 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
1596 trace_map(orig_iova
, orig_paddr
, orig_size
);
1600 EXPORT_SYMBOL_GPL(iommu_map
);
1602 static size_t __iommu_unmap(struct iommu_domain
*domain
,
1603 unsigned long iova
, size_t size
,
1606 const struct iommu_ops
*ops
= domain
->ops
;
1607 size_t unmapped_page
, unmapped
= 0;
1608 unsigned long orig_iova
= iova
;
1609 unsigned int min_pagesz
;
1611 if (unlikely(ops
->unmap
== NULL
||
1612 domain
->pgsize_bitmap
== 0UL))
1615 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1618 /* find out the minimum page size supported */
1619 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1622 * The virtual address, as well as the size of the mapping, must be
1623 * aligned (at least) to the size of the smallest page supported
1626 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
1627 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1628 iova
, size
, min_pagesz
);
1632 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova
, size
);
1635 * Keep iterating until we either unmap 'size' bytes (or more)
1636 * or we hit an area that isn't mapped.
1638 while (unmapped
< size
) {
1639 size_t pgsize
= iommu_pgsize(domain
, iova
, size
- unmapped
);
1641 unmapped_page
= ops
->unmap(domain
, iova
, pgsize
);
1645 if (sync
&& ops
->iotlb_range_add
)
1646 ops
->iotlb_range_add(domain
, iova
, pgsize
);
1648 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1649 iova
, unmapped_page
);
1651 iova
+= unmapped_page
;
1652 unmapped
+= unmapped_page
;
1655 if (sync
&& ops
->iotlb_sync
)
1656 ops
->iotlb_sync(domain
);
1658 trace_unmap(orig_iova
, size
, unmapped
);
1662 size_t iommu_unmap(struct iommu_domain
*domain
,
1663 unsigned long iova
, size_t size
)
1665 return __iommu_unmap(domain
, iova
, size
, true);
1667 EXPORT_SYMBOL_GPL(iommu_unmap
);
1669 size_t iommu_unmap_fast(struct iommu_domain
*domain
,
1670 unsigned long iova
, size_t size
)
1672 return __iommu_unmap(domain
, iova
, size
, false);
1674 EXPORT_SYMBOL_GPL(iommu_unmap_fast
);
1676 size_t iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
1677 struct scatterlist
*sg
, unsigned int nents
, int prot
)
1679 struct scatterlist
*s
;
1681 unsigned int i
, min_pagesz
;
1684 if (unlikely(domain
->pgsize_bitmap
== 0UL))
1687 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1689 for_each_sg(sg
, s
, nents
, i
) {
1690 phys_addr_t phys
= page_to_phys(sg_page(s
)) + s
->offset
;
1693 * We are mapping on IOMMU page boundaries, so offset within
1694 * the page must be 0. However, the IOMMU may support pages
1695 * smaller than PAGE_SIZE, so s->offset may still represent
1696 * an offset of that boundary within the CPU page.
1698 if (!IS_ALIGNED(s
->offset
, min_pagesz
))
1701 ret
= iommu_map(domain
, iova
+ mapped
, phys
, s
->length
, prot
);
1705 mapped
+= s
->length
;
1711 /* undo mappings already done */
1712 iommu_unmap(domain
, iova
, mapped
);
1717 EXPORT_SYMBOL_GPL(iommu_map_sg
);
1719 int iommu_domain_window_enable(struct iommu_domain
*domain
, u32 wnd_nr
,
1720 phys_addr_t paddr
, u64 size
, int prot
)
1722 if (unlikely(domain
->ops
->domain_window_enable
== NULL
))
1725 return domain
->ops
->domain_window_enable(domain
, wnd_nr
, paddr
, size
,
1728 EXPORT_SYMBOL_GPL(iommu_domain_window_enable
);
1730 void iommu_domain_window_disable(struct iommu_domain
*domain
, u32 wnd_nr
)
1732 if (unlikely(domain
->ops
->domain_window_disable
== NULL
))
1735 return domain
->ops
->domain_window_disable(domain
, wnd_nr
);
1737 EXPORT_SYMBOL_GPL(iommu_domain_window_disable
);
1740 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
1741 * @domain: the iommu domain where the fault has happened
1742 * @dev: the device where the fault has happened
1743 * @iova: the faulting address
1744 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
1746 * This function should be called by the low-level IOMMU implementations
1747 * whenever IOMMU faults happen, to allow high-level users, that are
1748 * interested in such events, to know about them.
1750 * This event may be useful for several possible use cases:
1751 * - mere logging of the event
1752 * - dynamic TLB/PTE loading
1753 * - if restarting of the faulting device is required
1755 * Returns 0 on success and an appropriate error code otherwise (if dynamic
1756 * PTE/TLB loading will one day be supported, implementations will be able
1757 * to tell whether it succeeded or not according to this return value).
1759 * Specifically, -ENOSYS is returned if a fault handler isn't installed
1760 * (though fault handlers can also return -ENOSYS, in case they want to
1761 * elicit the default behavior of the IOMMU drivers).
1763 int report_iommu_fault(struct iommu_domain
*domain
, struct device
*dev
,
1764 unsigned long iova
, int flags
)
1769 * if upper layers showed interest and installed a fault handler,
1772 if (domain
->handler
)
1773 ret
= domain
->handler(domain
, dev
, iova
, flags
,
1774 domain
->handler_token
);
1776 trace_io_page_fault(dev
, iova
, flags
);
1779 EXPORT_SYMBOL_GPL(report_iommu_fault
);
1781 static int __init
iommu_init(void)
1783 iommu_group_kset
= kset_create_and_add("iommu_groups",
1785 BUG_ON(!iommu_group_kset
);
1787 iommu_debugfs_setup();
1791 core_initcall(iommu_init
);
1793 int iommu_domain_get_attr(struct iommu_domain
*domain
,
1794 enum iommu_attr attr
, void *data
)
1796 struct iommu_domain_geometry
*geometry
;
1802 case DOMAIN_ATTR_GEOMETRY
:
1804 *geometry
= domain
->geometry
;
1807 case DOMAIN_ATTR_PAGING
:
1809 *paging
= (domain
->pgsize_bitmap
!= 0UL);
1811 case DOMAIN_ATTR_WINDOWS
:
1814 if (domain
->ops
->domain_get_windows
!= NULL
)
1815 *count
= domain
->ops
->domain_get_windows(domain
);
1821 if (!domain
->ops
->domain_get_attr
)
1824 ret
= domain
->ops
->domain_get_attr(domain
, attr
, data
);
1829 EXPORT_SYMBOL_GPL(iommu_domain_get_attr
);
1831 int iommu_domain_set_attr(struct iommu_domain
*domain
,
1832 enum iommu_attr attr
, void *data
)
1838 case DOMAIN_ATTR_WINDOWS
:
1841 if (domain
->ops
->domain_set_windows
!= NULL
)
1842 ret
= domain
->ops
->domain_set_windows(domain
, *count
);
1848 if (domain
->ops
->domain_set_attr
== NULL
)
1851 ret
= domain
->ops
->domain_set_attr(domain
, attr
, data
);
1856 EXPORT_SYMBOL_GPL(iommu_domain_set_attr
);
1858 void iommu_get_resv_regions(struct device
*dev
, struct list_head
*list
)
1860 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1862 if (ops
&& ops
->get_resv_regions
)
1863 ops
->get_resv_regions(dev
, list
);
1866 void iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
)
1868 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1870 if (ops
&& ops
->put_resv_regions
)
1871 ops
->put_resv_regions(dev
, list
);
1874 struct iommu_resv_region
*iommu_alloc_resv_region(phys_addr_t start
,
1875 size_t length
, int prot
,
1876 enum iommu_resv_type type
)
1878 struct iommu_resv_region
*region
;
1880 region
= kzalloc(sizeof(*region
), GFP_KERNEL
);
1884 INIT_LIST_HEAD(®ion
->list
);
1885 region
->start
= start
;
1886 region
->length
= length
;
1887 region
->prot
= prot
;
1888 region
->type
= type
;
1892 /* Request that a device is direct mapped by the IOMMU */
1893 int iommu_request_dm_for_dev(struct device
*dev
)
1895 struct iommu_domain
*dm_domain
;
1896 struct iommu_group
*group
;
1899 /* Device must already be in a group before calling this function */
1900 group
= iommu_group_get_for_dev(dev
);
1902 return PTR_ERR(group
);
1904 mutex_lock(&group
->mutex
);
1906 /* Check if the default domain is already direct mapped */
1908 if (group
->default_domain
&&
1909 group
->default_domain
->type
== IOMMU_DOMAIN_IDENTITY
)
1912 /* Don't change mappings of existing devices */
1914 if (iommu_group_device_count(group
) != 1)
1917 /* Allocate a direct mapped domain */
1919 dm_domain
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_IDENTITY
);
1923 /* Attach the device to the domain */
1924 ret
= __iommu_attach_group(dm_domain
, group
);
1926 iommu_domain_free(dm_domain
);
1930 /* Make the direct mapped domain the default for this group */
1931 if (group
->default_domain
)
1932 iommu_domain_free(group
->default_domain
);
1933 group
->default_domain
= dm_domain
;
1935 pr_info("Using direct mapping for device %s\n", dev_name(dev
));
1939 mutex_unlock(&group
->mutex
);
1940 iommu_group_put(group
);
1945 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
)
1947 const struct iommu_ops
*ops
= NULL
;
1948 struct iommu_device
*iommu
;
1950 spin_lock(&iommu_device_lock
);
1951 list_for_each_entry(iommu
, &iommu_device_list
, list
)
1952 if (iommu
->fwnode
== fwnode
) {
1956 spin_unlock(&iommu_device_lock
);
1960 int iommu_fwspec_init(struct device
*dev
, struct fwnode_handle
*iommu_fwnode
,
1961 const struct iommu_ops
*ops
)
1963 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1966 return ops
== fwspec
->ops
? 0 : -EINVAL
;
1968 fwspec
= kzalloc(sizeof(*fwspec
), GFP_KERNEL
);
1972 of_node_get(to_of_node(iommu_fwnode
));
1973 fwspec
->iommu_fwnode
= iommu_fwnode
;
1975 dev
->iommu_fwspec
= fwspec
;
1978 EXPORT_SYMBOL_GPL(iommu_fwspec_init
);
1980 void iommu_fwspec_free(struct device
*dev
)
1982 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1985 fwnode_handle_put(fwspec
->iommu_fwnode
);
1987 dev
->iommu_fwspec
= NULL
;
1990 EXPORT_SYMBOL_GPL(iommu_fwspec_free
);
1992 int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
, int num_ids
)
1994 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
2001 size
= offsetof(struct iommu_fwspec
, ids
[fwspec
->num_ids
+ num_ids
]);
2002 if (size
> sizeof(*fwspec
)) {
2003 fwspec
= krealloc(dev
->iommu_fwspec
, size
, GFP_KERNEL
);
2007 dev
->iommu_fwspec
= fwspec
;
2010 for (i
= 0; i
< num_ids
; i
++)
2011 fwspec
->ids
[fwspec
->num_ids
+ i
] = ids
[i
];
2013 fwspec
->num_ids
+= num_ids
;
2016 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids
);