2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "iommu: " fmt
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <linux/property.h>
35 #include <trace/events/iommu.h>
37 static struct kset
*iommu_group_kset
;
38 static DEFINE_IDA(iommu_group_ida
);
39 #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
40 static unsigned int iommu_def_domain_type
= IOMMU_DOMAIN_IDENTITY
;
42 static unsigned int iommu_def_domain_type
= IOMMU_DOMAIN_DMA
;
45 struct iommu_callback_data
{
46 const struct iommu_ops
*ops
;
51 struct kobject
*devices_kobj
;
52 struct list_head devices
;
54 struct blocking_notifier_head notifier
;
56 void (*iommu_data_release
)(void *iommu_data
);
59 struct iommu_domain
*default_domain
;
60 struct iommu_domain
*domain
;
64 struct list_head list
;
69 struct iommu_group_attribute
{
70 struct attribute attr
;
71 ssize_t (*show
)(struct iommu_group
*group
, char *buf
);
72 ssize_t (*store
)(struct iommu_group
*group
,
73 const char *buf
, size_t count
);
76 static const char * const iommu_group_resv_type_string
[] = {
77 [IOMMU_RESV_DIRECT
] = "direct",
78 [IOMMU_RESV_RESERVED
] = "reserved",
79 [IOMMU_RESV_MSI
] = "msi",
80 [IOMMU_RESV_SW_MSI
] = "msi",
83 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
84 struct iommu_group_attribute iommu_group_attr_##_name = \
85 __ATTR(_name, _mode, _show, _store)
87 #define to_iommu_group_attr(_attr) \
88 container_of(_attr, struct iommu_group_attribute, attr)
89 #define to_iommu_group(_kobj) \
90 container_of(_kobj, struct iommu_group, kobj)
92 static LIST_HEAD(iommu_device_list
);
93 static DEFINE_SPINLOCK(iommu_device_lock
);
95 int iommu_device_register(struct iommu_device
*iommu
)
97 spin_lock(&iommu_device_lock
);
98 list_add_tail(&iommu
->list
, &iommu_device_list
);
99 spin_unlock(&iommu_device_lock
);
104 void iommu_device_unregister(struct iommu_device
*iommu
)
106 spin_lock(&iommu_device_lock
);
107 list_del(&iommu
->list
);
108 spin_unlock(&iommu_device_lock
);
111 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
113 static int __iommu_attach_device(struct iommu_domain
*domain
,
115 static int __iommu_attach_group(struct iommu_domain
*domain
,
116 struct iommu_group
*group
);
117 static void __iommu_detach_group(struct iommu_domain
*domain
,
118 struct iommu_group
*group
);
120 static int __init
iommu_set_def_domain_type(char *str
)
125 ret
= kstrtobool(str
, &pt
);
129 iommu_def_domain_type
= pt
? IOMMU_DOMAIN_IDENTITY
: IOMMU_DOMAIN_DMA
;
132 early_param("iommu.passthrough", iommu_set_def_domain_type
);
134 static ssize_t
iommu_group_attr_show(struct kobject
*kobj
,
135 struct attribute
*__attr
, char *buf
)
137 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
138 struct iommu_group
*group
= to_iommu_group(kobj
);
142 ret
= attr
->show(group
, buf
);
146 static ssize_t
iommu_group_attr_store(struct kobject
*kobj
,
147 struct attribute
*__attr
,
148 const char *buf
, size_t count
)
150 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
151 struct iommu_group
*group
= to_iommu_group(kobj
);
155 ret
= attr
->store(group
, buf
, count
);
159 static const struct sysfs_ops iommu_group_sysfs_ops
= {
160 .show
= iommu_group_attr_show
,
161 .store
= iommu_group_attr_store
,
164 static int iommu_group_create_file(struct iommu_group
*group
,
165 struct iommu_group_attribute
*attr
)
167 return sysfs_create_file(&group
->kobj
, &attr
->attr
);
170 static void iommu_group_remove_file(struct iommu_group
*group
,
171 struct iommu_group_attribute
*attr
)
173 sysfs_remove_file(&group
->kobj
, &attr
->attr
);
176 static ssize_t
iommu_group_show_name(struct iommu_group
*group
, char *buf
)
178 return sprintf(buf
, "%s\n", group
->name
);
182 * iommu_insert_resv_region - Insert a new region in the
183 * list of reserved regions.
184 * @new: new region to insert
185 * @regions: list of regions
187 * The new element is sorted by address with respect to the other
188 * regions of the same type. In case it overlaps with another
189 * region of the same type, regions are merged. In case it
190 * overlaps with another region of different type, regions are
193 static int iommu_insert_resv_region(struct iommu_resv_region
*new,
194 struct list_head
*regions
)
196 struct iommu_resv_region
*region
;
197 phys_addr_t start
= new->start
;
198 phys_addr_t end
= new->start
+ new->length
- 1;
199 struct list_head
*pos
= regions
->next
;
201 while (pos
!= regions
) {
202 struct iommu_resv_region
*entry
=
203 list_entry(pos
, struct iommu_resv_region
, list
);
204 phys_addr_t a
= entry
->start
;
205 phys_addr_t b
= entry
->start
+ entry
->length
- 1;
206 int type
= entry
->type
;
210 } else if (start
> b
) {
212 } else if ((start
>= a
) && (end
<= b
)) {
213 if (new->type
== type
)
218 if (new->type
== type
) {
219 phys_addr_t new_start
= min(a
, start
);
220 phys_addr_t new_end
= max(b
, end
);
223 list_del(&entry
->list
);
224 entry
->start
= new_start
;
225 entry
->length
= new_end
- new_start
+ 1;
226 ret
= iommu_insert_resv_region(entry
, regions
);
235 region
= iommu_alloc_resv_region(new->start
, new->length
,
236 new->prot
, new->type
);
240 list_add_tail(®ion
->list
, pos
);
245 iommu_insert_device_resv_regions(struct list_head
*dev_resv_regions
,
246 struct list_head
*group_resv_regions
)
248 struct iommu_resv_region
*entry
;
251 list_for_each_entry(entry
, dev_resv_regions
, list
) {
252 ret
= iommu_insert_resv_region(entry
, group_resv_regions
);
259 int iommu_get_group_resv_regions(struct iommu_group
*group
,
260 struct list_head
*head
)
262 struct group_device
*device
;
265 mutex_lock(&group
->mutex
);
266 list_for_each_entry(device
, &group
->devices
, list
) {
267 struct list_head dev_resv_regions
;
269 INIT_LIST_HEAD(&dev_resv_regions
);
270 iommu_get_resv_regions(device
->dev
, &dev_resv_regions
);
271 ret
= iommu_insert_device_resv_regions(&dev_resv_regions
, head
);
272 iommu_put_resv_regions(device
->dev
, &dev_resv_regions
);
276 mutex_unlock(&group
->mutex
);
279 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions
);
281 static ssize_t
iommu_group_show_resv_regions(struct iommu_group
*group
,
284 struct iommu_resv_region
*region
, *next
;
285 struct list_head group_resv_regions
;
288 INIT_LIST_HEAD(&group_resv_regions
);
289 iommu_get_group_resv_regions(group
, &group_resv_regions
);
291 list_for_each_entry_safe(region
, next
, &group_resv_regions
, list
) {
292 str
+= sprintf(str
, "0x%016llx 0x%016llx %s\n",
293 (long long int)region
->start
,
294 (long long int)(region
->start
+
296 iommu_group_resv_type_string
[region
->type
]);
303 static ssize_t
iommu_group_show_type(struct iommu_group
*group
,
306 char *type
= "unknown\n";
308 if (group
->default_domain
) {
309 switch (group
->default_domain
->type
) {
310 case IOMMU_DOMAIN_BLOCKED
:
313 case IOMMU_DOMAIN_IDENTITY
:
316 case IOMMU_DOMAIN_UNMANAGED
:
317 type
= "unmanaged\n";
319 case IOMMU_DOMAIN_DMA
:
329 static IOMMU_GROUP_ATTR(name
, S_IRUGO
, iommu_group_show_name
, NULL
);
331 static IOMMU_GROUP_ATTR(reserved_regions
, 0444,
332 iommu_group_show_resv_regions
, NULL
);
334 static IOMMU_GROUP_ATTR(type
, 0444, iommu_group_show_type
, NULL
);
336 static void iommu_group_release(struct kobject
*kobj
)
338 struct iommu_group
*group
= to_iommu_group(kobj
);
340 pr_debug("Releasing group %d\n", group
->id
);
342 if (group
->iommu_data_release
)
343 group
->iommu_data_release(group
->iommu_data
);
345 ida_simple_remove(&iommu_group_ida
, group
->id
);
347 if (group
->default_domain
)
348 iommu_domain_free(group
->default_domain
);
354 static struct kobj_type iommu_group_ktype
= {
355 .sysfs_ops
= &iommu_group_sysfs_ops
,
356 .release
= iommu_group_release
,
360 * iommu_group_alloc - Allocate a new group
362 * This function is called by an iommu driver to allocate a new iommu
363 * group. The iommu group represents the minimum granularity of the iommu.
364 * Upon successful return, the caller holds a reference to the supplied
365 * group in order to hold the group until devices are added. Use
366 * iommu_group_put() to release this extra reference count, allowing the
367 * group to be automatically reclaimed once it has no devices or external
370 struct iommu_group
*iommu_group_alloc(void)
372 struct iommu_group
*group
;
375 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
377 return ERR_PTR(-ENOMEM
);
379 group
->kobj
.kset
= iommu_group_kset
;
380 mutex_init(&group
->mutex
);
381 INIT_LIST_HEAD(&group
->devices
);
382 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
384 ret
= ida_simple_get(&iommu_group_ida
, 0, 0, GFP_KERNEL
);
391 ret
= kobject_init_and_add(&group
->kobj
, &iommu_group_ktype
,
392 NULL
, "%d", group
->id
);
394 ida_simple_remove(&iommu_group_ida
, group
->id
);
395 kobject_put(&group
->kobj
);
399 group
->devices_kobj
= kobject_create_and_add("devices", &group
->kobj
);
400 if (!group
->devices_kobj
) {
401 kobject_put(&group
->kobj
); /* triggers .release & free */
402 return ERR_PTR(-ENOMEM
);
406 * The devices_kobj holds a reference on the group kobject, so
407 * as long as that exists so will the group. We can therefore
408 * use the devices_kobj for reference counting.
410 kobject_put(&group
->kobj
);
412 ret
= iommu_group_create_file(group
,
413 &iommu_group_attr_reserved_regions
);
417 ret
= iommu_group_create_file(group
, &iommu_group_attr_type
);
421 pr_debug("Allocated group %d\n", group
->id
);
425 EXPORT_SYMBOL_GPL(iommu_group_alloc
);
427 struct iommu_group
*iommu_group_get_by_id(int id
)
429 struct kobject
*group_kobj
;
430 struct iommu_group
*group
;
433 if (!iommu_group_kset
)
436 name
= kasprintf(GFP_KERNEL
, "%d", id
);
440 group_kobj
= kset_find_obj(iommu_group_kset
, name
);
446 group
= container_of(group_kobj
, struct iommu_group
, kobj
);
447 BUG_ON(group
->id
!= id
);
449 kobject_get(group
->devices_kobj
);
450 kobject_put(&group
->kobj
);
454 EXPORT_SYMBOL_GPL(iommu_group_get_by_id
);
457 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
460 * iommu drivers can store data in the group for use when doing iommu
461 * operations. This function provides a way to retrieve it. Caller
462 * should hold a group reference.
464 void *iommu_group_get_iommudata(struct iommu_group
*group
)
466 return group
->iommu_data
;
468 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata
);
471 * iommu_group_set_iommudata - set iommu_data for a group
473 * @iommu_data: new data
474 * @release: release function for iommu_data
476 * iommu drivers can store data in the group for use when doing iommu
477 * operations. This function provides a way to set the data after
478 * the group has been allocated. Caller should hold a group reference.
480 void iommu_group_set_iommudata(struct iommu_group
*group
, void *iommu_data
,
481 void (*release
)(void *iommu_data
))
483 group
->iommu_data
= iommu_data
;
484 group
->iommu_data_release
= release
;
486 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata
);
489 * iommu_group_set_name - set name for a group
493 * Allow iommu driver to set a name for a group. When set it will
494 * appear in a name attribute file under the group in sysfs.
496 int iommu_group_set_name(struct iommu_group
*group
, const char *name
)
501 iommu_group_remove_file(group
, &iommu_group_attr_name
);
508 group
->name
= kstrdup(name
, GFP_KERNEL
);
512 ret
= iommu_group_create_file(group
, &iommu_group_attr_name
);
521 EXPORT_SYMBOL_GPL(iommu_group_set_name
);
523 static int iommu_group_create_direct_mappings(struct iommu_group
*group
,
526 struct iommu_domain
*domain
= group
->default_domain
;
527 struct iommu_resv_region
*entry
;
528 struct list_head mappings
;
529 unsigned long pg_size
;
532 if (!domain
|| domain
->type
!= IOMMU_DOMAIN_DMA
)
535 BUG_ON(!domain
->pgsize_bitmap
);
537 pg_size
= 1UL << __ffs(domain
->pgsize_bitmap
);
538 INIT_LIST_HEAD(&mappings
);
540 iommu_get_resv_regions(dev
, &mappings
);
542 /* We need to consider overlapping regions for different devices */
543 list_for_each_entry(entry
, &mappings
, list
) {
544 dma_addr_t start
, end
, addr
;
546 if (domain
->ops
->apply_resv_region
)
547 domain
->ops
->apply_resv_region(dev
, domain
, entry
);
549 start
= ALIGN(entry
->start
, pg_size
);
550 end
= ALIGN(entry
->start
+ entry
->length
, pg_size
);
552 if (entry
->type
!= IOMMU_RESV_DIRECT
)
555 for (addr
= start
; addr
< end
; addr
+= pg_size
) {
556 phys_addr_t phys_addr
;
558 phys_addr
= iommu_iova_to_phys(domain
, addr
);
562 ret
= iommu_map(domain
, addr
, addr
, pg_size
, entry
->prot
);
569 iommu_flush_tlb_all(domain
);
572 iommu_put_resv_regions(dev
, &mappings
);
578 * iommu_group_add_device - add a device to an iommu group
579 * @group: the group into which to add the device (reference should be held)
582 * This function is called by an iommu driver to add a device into a
583 * group. Adding a device increments the group reference count.
585 int iommu_group_add_device(struct iommu_group
*group
, struct device
*dev
)
588 struct group_device
*device
;
590 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
596 ret
= sysfs_create_link(&dev
->kobj
, &group
->kobj
, "iommu_group");
598 goto err_free_device
;
600 device
->name
= kasprintf(GFP_KERNEL
, "%s", kobject_name(&dev
->kobj
));
604 goto err_remove_link
;
607 ret
= sysfs_create_link_nowarn(group
->devices_kobj
,
608 &dev
->kobj
, device
->name
);
610 if (ret
== -EEXIST
&& i
>= 0) {
612 * Account for the slim chance of collision
613 * and append an instance to the name.
616 device
->name
= kasprintf(GFP_KERNEL
, "%s.%d",
617 kobject_name(&dev
->kobj
), i
++);
623 kobject_get(group
->devices_kobj
);
625 dev
->iommu_group
= group
;
627 iommu_group_create_direct_mappings(group
, dev
);
629 mutex_lock(&group
->mutex
);
630 list_add_tail(&device
->list
, &group
->devices
);
632 ret
= __iommu_attach_device(group
->domain
, dev
);
633 mutex_unlock(&group
->mutex
);
637 /* Notify any listeners about change to group. */
638 blocking_notifier_call_chain(&group
->notifier
,
639 IOMMU_GROUP_NOTIFY_ADD_DEVICE
, dev
);
641 trace_add_device_to_group(group
->id
, dev
);
643 pr_info("Adding device %s to group %d\n", dev_name(dev
), group
->id
);
648 mutex_lock(&group
->mutex
);
649 list_del(&device
->list
);
650 mutex_unlock(&group
->mutex
);
651 dev
->iommu_group
= NULL
;
652 kobject_put(group
->devices_kobj
);
653 sysfs_remove_link(group
->devices_kobj
, device
->name
);
657 sysfs_remove_link(&dev
->kobj
, "iommu_group");
660 pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev
), group
->id
, ret
);
663 EXPORT_SYMBOL_GPL(iommu_group_add_device
);
666 * iommu_group_remove_device - remove a device from it's current group
667 * @dev: device to be removed
669 * This function is called by an iommu driver to remove the device from
670 * it's current group. This decrements the iommu group reference count.
672 void iommu_group_remove_device(struct device
*dev
)
674 struct iommu_group
*group
= dev
->iommu_group
;
675 struct group_device
*tmp_device
, *device
= NULL
;
677 pr_info("Removing device %s from group %d\n", dev_name(dev
), group
->id
);
679 /* Pre-notify listeners that a device is being removed. */
680 blocking_notifier_call_chain(&group
->notifier
,
681 IOMMU_GROUP_NOTIFY_DEL_DEVICE
, dev
);
683 mutex_lock(&group
->mutex
);
684 list_for_each_entry(tmp_device
, &group
->devices
, list
) {
685 if (tmp_device
->dev
== dev
) {
687 list_del(&device
->list
);
691 mutex_unlock(&group
->mutex
);
696 sysfs_remove_link(group
->devices_kobj
, device
->name
);
697 sysfs_remove_link(&dev
->kobj
, "iommu_group");
699 trace_remove_device_from_group(group
->id
, dev
);
703 dev
->iommu_group
= NULL
;
704 kobject_put(group
->devices_kobj
);
706 EXPORT_SYMBOL_GPL(iommu_group_remove_device
);
708 static int iommu_group_device_count(struct iommu_group
*group
)
710 struct group_device
*entry
;
713 list_for_each_entry(entry
, &group
->devices
, list
)
720 * iommu_group_for_each_dev - iterate over each device in the group
722 * @data: caller opaque data to be passed to callback function
723 * @fn: caller supplied callback function
725 * This function is called by group users to iterate over group devices.
726 * Callers should hold a reference count to the group during callback.
727 * The group->mutex is held across callbacks, which will block calls to
728 * iommu_group_add/remove_device.
730 static int __iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
731 int (*fn
)(struct device
*, void *))
733 struct group_device
*device
;
736 list_for_each_entry(device
, &group
->devices
, list
) {
737 ret
= fn(device
->dev
, data
);
745 int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
746 int (*fn
)(struct device
*, void *))
750 mutex_lock(&group
->mutex
);
751 ret
= __iommu_group_for_each_dev(group
, data
, fn
);
752 mutex_unlock(&group
->mutex
);
756 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev
);
759 * iommu_group_get - Return the group for a device and increment reference
760 * @dev: get the group that this device belongs to
762 * This function is called by iommu drivers and users to get the group
763 * for the specified device. If found, the group is returned and the group
764 * reference in incremented, else NULL.
766 struct iommu_group
*iommu_group_get(struct device
*dev
)
768 struct iommu_group
*group
= dev
->iommu_group
;
771 kobject_get(group
->devices_kobj
);
775 EXPORT_SYMBOL_GPL(iommu_group_get
);
778 * iommu_group_ref_get - Increment reference on a group
779 * @group: the group to use, must not be NULL
781 * This function is called by iommu drivers to take additional references on an
782 * existing group. Returns the given group for convenience.
784 struct iommu_group
*iommu_group_ref_get(struct iommu_group
*group
)
786 kobject_get(group
->devices_kobj
);
791 * iommu_group_put - Decrement group reference
792 * @group: the group to use
794 * This function is called by iommu drivers and users to release the
795 * iommu group. Once the reference count is zero, the group is released.
797 void iommu_group_put(struct iommu_group
*group
)
800 kobject_put(group
->devices_kobj
);
802 EXPORT_SYMBOL_GPL(iommu_group_put
);
805 * iommu_group_register_notifier - Register a notifier for group changes
806 * @group: the group to watch
807 * @nb: notifier block to signal
809 * This function allows iommu group users to track changes in a group.
810 * See include/linux/iommu.h for actions sent via this notifier. Caller
811 * should hold a reference to the group throughout notifier registration.
813 int iommu_group_register_notifier(struct iommu_group
*group
,
814 struct notifier_block
*nb
)
816 return blocking_notifier_chain_register(&group
->notifier
, nb
);
818 EXPORT_SYMBOL_GPL(iommu_group_register_notifier
);
821 * iommu_group_unregister_notifier - Unregister a notifier
822 * @group: the group to watch
823 * @nb: notifier block to signal
825 * Unregister a previously registered group notifier block.
827 int iommu_group_unregister_notifier(struct iommu_group
*group
,
828 struct notifier_block
*nb
)
830 return blocking_notifier_chain_unregister(&group
->notifier
, nb
);
832 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier
);
835 * iommu_group_id - Return ID for a group
836 * @group: the group to ID
838 * Return the unique ID for the group matching the sysfs group number.
840 int iommu_group_id(struct iommu_group
*group
)
844 EXPORT_SYMBOL_GPL(iommu_group_id
);
846 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
847 unsigned long *devfns
);
850 * To consider a PCI device isolated, we require ACS to support Source
851 * Validation, Request Redirection, Completer Redirection, and Upstream
852 * Forwarding. This effectively means that devices cannot spoof their
853 * requester ID, requests and completions cannot be redirected, and all
854 * transactions are forwarded upstream, even as it passes through a
855 * bridge where the target device is downstream.
857 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
860 * For multifunction devices which are not isolated from each other, find
861 * all the other non-isolated functions and look for existing groups. For
862 * each function, we also need to look for aliases to or from other devices
863 * that may already have a group.
865 static struct iommu_group
*get_pci_function_alias_group(struct pci_dev
*pdev
,
866 unsigned long *devfns
)
868 struct pci_dev
*tmp
= NULL
;
869 struct iommu_group
*group
;
871 if (!pdev
->multifunction
|| pci_acs_enabled(pdev
, REQ_ACS_FLAGS
))
874 for_each_pci_dev(tmp
) {
875 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
||
876 PCI_SLOT(tmp
->devfn
) != PCI_SLOT(pdev
->devfn
) ||
877 pci_acs_enabled(tmp
, REQ_ACS_FLAGS
))
880 group
= get_pci_alias_group(tmp
, devfns
);
891 * Look for aliases to or from the given device for existing groups. DMA
892 * aliases are only supported on the same bus, therefore the search
893 * space is quite small (especially since we're really only looking at pcie
894 * device, and therefore only expect multiple slots on the root complex or
895 * downstream switch ports). It's conceivable though that a pair of
896 * multifunction devices could have aliases between them that would cause a
897 * loop. To prevent this, we use a bitmap to track where we've been.
899 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
900 unsigned long *devfns
)
902 struct pci_dev
*tmp
= NULL
;
903 struct iommu_group
*group
;
905 if (test_and_set_bit(pdev
->devfn
& 0xff, devfns
))
908 group
= iommu_group_get(&pdev
->dev
);
912 for_each_pci_dev(tmp
) {
913 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
)
916 /* We alias them or they alias us */
917 if (pci_devs_are_dma_aliases(pdev
, tmp
)) {
918 group
= get_pci_alias_group(tmp
, devfns
);
924 group
= get_pci_function_alias_group(tmp
, devfns
);
935 struct group_for_pci_data
{
936 struct pci_dev
*pdev
;
937 struct iommu_group
*group
;
941 * DMA alias iterator callback, return the last seen device. Stop and return
942 * the IOMMU group if we find one along the way.
944 static int get_pci_alias_or_group(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
946 struct group_for_pci_data
*data
= opaque
;
949 data
->group
= iommu_group_get(&pdev
->dev
);
951 return data
->group
!= NULL
;
955 * Generic device_group call-back function. It just allocates one
956 * iommu-group per device.
958 struct iommu_group
*generic_device_group(struct device
*dev
)
960 return iommu_group_alloc();
964 * Use standard PCI bus topology, isolation features, and DMA alias quirks
965 * to find or create an IOMMU group for a device.
967 struct iommu_group
*pci_device_group(struct device
*dev
)
969 struct pci_dev
*pdev
= to_pci_dev(dev
);
970 struct group_for_pci_data data
;
972 struct iommu_group
*group
= NULL
;
973 u64 devfns
[4] = { 0 };
975 if (WARN_ON(!dev_is_pci(dev
)))
976 return ERR_PTR(-EINVAL
);
979 * Find the upstream DMA alias for the device. A device must not
980 * be aliased due to topology in order to have its own IOMMU group.
981 * If we find an alias along the way that already belongs to a
984 if (pci_for_each_dma_alias(pdev
, get_pci_alias_or_group
, &data
))
990 * Continue upstream from the point of minimum IOMMU granularity
991 * due to aliases to the point where devices are protected from
992 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
995 for (bus
= pdev
->bus
; !pci_is_root_bus(bus
); bus
= bus
->parent
) {
999 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
1004 group
= iommu_group_get(&pdev
->dev
);
1010 * Look for existing groups on device aliases. If we alias another
1011 * device or another device aliases us, use the same group.
1013 group
= get_pci_alias_group(pdev
, (unsigned long *)devfns
);
1018 * Look for existing groups on non-isolated functions on the same
1019 * slot and aliases of those funcions, if any. No need to clear
1020 * the search bitmap, the tested devfns are still valid.
1022 group
= get_pci_function_alias_group(pdev
, (unsigned long *)devfns
);
1026 /* No shared group found, allocate new */
1027 return iommu_group_alloc();
1031 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1032 * @dev: target device
1034 * This function is intended to be called by IOMMU drivers and extended to
1035 * support common, bus-defined algorithms when determining or creating the
1036 * IOMMU group for a device. On success, the caller will hold a reference
1037 * to the returned IOMMU group, which will already include the provided
1038 * device. The reference should be released with iommu_group_put().
1040 struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
)
1042 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1043 struct iommu_group
*group
;
1046 group
= iommu_group_get(dev
);
1051 return ERR_PTR(-EINVAL
);
1053 group
= ops
->device_group(dev
);
1054 if (WARN_ON_ONCE(group
== NULL
))
1055 return ERR_PTR(-EINVAL
);
1061 * Try to allocate a default domain - needs support from the
1064 if (!group
->default_domain
) {
1065 struct iommu_domain
*dom
;
1067 dom
= __iommu_domain_alloc(dev
->bus
, iommu_def_domain_type
);
1068 if (!dom
&& iommu_def_domain_type
!= IOMMU_DOMAIN_DMA
) {
1070 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1071 iommu_def_domain_type
);
1072 dom
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_DMA
);
1075 group
->default_domain
= dom
;
1077 group
->domain
= dom
;
1080 ret
= iommu_group_add_device(group
, dev
);
1082 iommu_group_put(group
);
1083 return ERR_PTR(ret
);
1089 struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*group
)
1091 return group
->default_domain
;
1094 static int add_iommu_group(struct device
*dev
, void *data
)
1096 struct iommu_callback_data
*cb
= data
;
1097 const struct iommu_ops
*ops
= cb
->ops
;
1100 if (!ops
->add_device
)
1103 WARN_ON(dev
->iommu_group
);
1105 ret
= ops
->add_device(dev
);
1108 * We ignore -ENODEV errors for now, as they just mean that the
1109 * device is not translated by an IOMMU. We still care about
1110 * other errors and fail to initialize when they happen.
1118 static int remove_iommu_group(struct device
*dev
, void *data
)
1120 struct iommu_callback_data
*cb
= data
;
1121 const struct iommu_ops
*ops
= cb
->ops
;
1123 if (ops
->remove_device
&& dev
->iommu_group
)
1124 ops
->remove_device(dev
);
1129 static int iommu_bus_notifier(struct notifier_block
*nb
,
1130 unsigned long action
, void *data
)
1132 struct device
*dev
= data
;
1133 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1134 struct iommu_group
*group
;
1135 unsigned long group_action
= 0;
1138 * ADD/DEL call into iommu driver ops if provided, which may
1139 * result in ADD/DEL notifiers to group->notifier
1141 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
1142 if (ops
->add_device
) {
1145 ret
= ops
->add_device(dev
);
1146 return (ret
) ? NOTIFY_DONE
: NOTIFY_OK
;
1148 } else if (action
== BUS_NOTIFY_REMOVED_DEVICE
) {
1149 if (ops
->remove_device
&& dev
->iommu_group
) {
1150 ops
->remove_device(dev
);
1156 * Remaining BUS_NOTIFYs get filtered and republished to the
1157 * group, if anyone is listening
1159 group
= iommu_group_get(dev
);
1164 case BUS_NOTIFY_BIND_DRIVER
:
1165 group_action
= IOMMU_GROUP_NOTIFY_BIND_DRIVER
;
1167 case BUS_NOTIFY_BOUND_DRIVER
:
1168 group_action
= IOMMU_GROUP_NOTIFY_BOUND_DRIVER
;
1170 case BUS_NOTIFY_UNBIND_DRIVER
:
1171 group_action
= IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
;
1173 case BUS_NOTIFY_UNBOUND_DRIVER
:
1174 group_action
= IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
;
1179 blocking_notifier_call_chain(&group
->notifier
,
1182 iommu_group_put(group
);
1186 static int iommu_bus_init(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1189 struct notifier_block
*nb
;
1190 struct iommu_callback_data cb
= {
1194 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
1198 nb
->notifier_call
= iommu_bus_notifier
;
1200 err
= bus_register_notifier(bus
, nb
);
1204 err
= bus_for_each_dev(bus
, NULL
, &cb
, add_iommu_group
);
1213 bus_for_each_dev(bus
, NULL
, &cb
, remove_iommu_group
);
1214 bus_unregister_notifier(bus
, nb
);
1223 * bus_set_iommu - set iommu-callbacks for the bus
1225 * @ops: the callbacks provided by the iommu-driver
1227 * This function is called by an iommu driver to set the iommu methods
1228 * used for a particular bus. Drivers for devices on that bus can use
1229 * the iommu-api after these ops are registered.
1230 * This special function is needed because IOMMUs are usually devices on
1231 * the bus itself, so the iommu drivers are not initialized when the bus
1232 * is set up. With this function the iommu-driver can set the iommu-ops
1235 int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1239 if (bus
->iommu_ops
!= NULL
)
1242 bus
->iommu_ops
= ops
;
1244 /* Do IOMMU specific setup for this bus-type */
1245 err
= iommu_bus_init(bus
, ops
);
1247 bus
->iommu_ops
= NULL
;
1251 EXPORT_SYMBOL_GPL(bus_set_iommu
);
1253 bool iommu_present(struct bus_type
*bus
)
1255 return bus
->iommu_ops
!= NULL
;
1257 EXPORT_SYMBOL_GPL(iommu_present
);
1259 bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
1261 if (!bus
->iommu_ops
|| !bus
->iommu_ops
->capable
)
1264 return bus
->iommu_ops
->capable(cap
);
1266 EXPORT_SYMBOL_GPL(iommu_capable
);
1269 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1270 * @domain: iommu domain
1271 * @handler: fault handler
1272 * @token: user data, will be passed back to the fault handler
1274 * This function should be used by IOMMU users which want to be notified
1275 * whenever an IOMMU fault happens.
1277 * The fault handler itself should return 0 on success, and an appropriate
1278 * error code otherwise.
1280 void iommu_set_fault_handler(struct iommu_domain
*domain
,
1281 iommu_fault_handler_t handler
,
1286 domain
->handler
= handler
;
1287 domain
->handler_token
= token
;
1289 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
1291 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
1294 struct iommu_domain
*domain
;
1296 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
1299 domain
= bus
->iommu_ops
->domain_alloc(type
);
1303 domain
->ops
= bus
->iommu_ops
;
1304 domain
->type
= type
;
1305 /* Assume all sizes by default; the driver may override this later */
1306 domain
->pgsize_bitmap
= bus
->iommu_ops
->pgsize_bitmap
;
1311 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
1313 return __iommu_domain_alloc(bus
, IOMMU_DOMAIN_UNMANAGED
);
1315 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
1317 void iommu_domain_free(struct iommu_domain
*domain
)
1319 domain
->ops
->domain_free(domain
);
1321 EXPORT_SYMBOL_GPL(iommu_domain_free
);
1323 static int __iommu_attach_device(struct iommu_domain
*domain
,
1327 if ((domain
->ops
->is_attach_deferred
!= NULL
) &&
1328 domain
->ops
->is_attach_deferred(domain
, dev
))
1331 if (unlikely(domain
->ops
->attach_dev
== NULL
))
1334 ret
= domain
->ops
->attach_dev(domain
, dev
);
1336 trace_attach_device_to_domain(dev
);
1340 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1342 struct iommu_group
*group
;
1345 group
= iommu_group_get(dev
);
1350 * Lock the group to make sure the device-count doesn't
1351 * change while we are attaching
1353 mutex_lock(&group
->mutex
);
1355 if (iommu_group_device_count(group
) != 1)
1358 ret
= __iommu_attach_group(domain
, group
);
1361 mutex_unlock(&group
->mutex
);
1362 iommu_group_put(group
);
1366 EXPORT_SYMBOL_GPL(iommu_attach_device
);
1368 static void __iommu_detach_device(struct iommu_domain
*domain
,
1371 if ((domain
->ops
->is_attach_deferred
!= NULL
) &&
1372 domain
->ops
->is_attach_deferred(domain
, dev
))
1375 if (unlikely(domain
->ops
->detach_dev
== NULL
))
1378 domain
->ops
->detach_dev(domain
, dev
);
1379 trace_detach_device_from_domain(dev
);
1382 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
1384 struct iommu_group
*group
;
1386 group
= iommu_group_get(dev
);
1390 mutex_lock(&group
->mutex
);
1391 if (iommu_group_device_count(group
) != 1) {
1396 __iommu_detach_group(domain
, group
);
1399 mutex_unlock(&group
->mutex
);
1400 iommu_group_put(group
);
1402 EXPORT_SYMBOL_GPL(iommu_detach_device
);
1404 struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
1406 struct iommu_domain
*domain
;
1407 struct iommu_group
*group
;
1409 group
= iommu_group_get(dev
);
1413 domain
= group
->domain
;
1415 iommu_group_put(group
);
1419 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev
);
1422 * IOMMU groups are really the natrual working unit of the IOMMU, but
1423 * the IOMMU API works on domains and devices. Bridge that gap by
1424 * iterating over the devices in a group. Ideally we'd have a single
1425 * device which represents the requestor ID of the group, but we also
1426 * allow IOMMU drivers to create policy defined minimum sets, where
1427 * the physical hardware may be able to distiguish members, but we
1428 * wish to group them at a higher level (ex. untrusted multi-function
1429 * PCI devices). Thus we attach each device.
1431 static int iommu_group_do_attach_device(struct device
*dev
, void *data
)
1433 struct iommu_domain
*domain
= data
;
1435 return __iommu_attach_device(domain
, dev
);
1438 static int __iommu_attach_group(struct iommu_domain
*domain
,
1439 struct iommu_group
*group
)
1443 if (group
->default_domain
&& group
->domain
!= group
->default_domain
)
1446 ret
= __iommu_group_for_each_dev(group
, domain
,
1447 iommu_group_do_attach_device
);
1449 group
->domain
= domain
;
1454 int iommu_attach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1458 mutex_lock(&group
->mutex
);
1459 ret
= __iommu_attach_group(domain
, group
);
1460 mutex_unlock(&group
->mutex
);
1464 EXPORT_SYMBOL_GPL(iommu_attach_group
);
1466 static int iommu_group_do_detach_device(struct device
*dev
, void *data
)
1468 struct iommu_domain
*domain
= data
;
1470 __iommu_detach_device(domain
, dev
);
1475 static void __iommu_detach_group(struct iommu_domain
*domain
,
1476 struct iommu_group
*group
)
1480 if (!group
->default_domain
) {
1481 __iommu_group_for_each_dev(group
, domain
,
1482 iommu_group_do_detach_device
);
1483 group
->domain
= NULL
;
1487 if (group
->domain
== group
->default_domain
)
1490 /* Detach by re-attaching to the default domain */
1491 ret
= __iommu_group_for_each_dev(group
, group
->default_domain
,
1492 iommu_group_do_attach_device
);
1496 group
->domain
= group
->default_domain
;
1499 void iommu_detach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1501 mutex_lock(&group
->mutex
);
1502 __iommu_detach_group(domain
, group
);
1503 mutex_unlock(&group
->mutex
);
1505 EXPORT_SYMBOL_GPL(iommu_detach_group
);
1507 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
1509 if (unlikely(domain
->ops
->iova_to_phys
== NULL
))
1512 return domain
->ops
->iova_to_phys(domain
, iova
);
1514 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
1516 static size_t iommu_pgsize(struct iommu_domain
*domain
,
1517 unsigned long addr_merge
, size_t size
)
1519 unsigned int pgsize_idx
;
1522 /* Max page size that still fits into 'size' */
1523 pgsize_idx
= __fls(size
);
1525 /* need to consider alignment requirements ? */
1526 if (likely(addr_merge
)) {
1527 /* Max page size allowed by address */
1528 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
1529 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
1532 /* build a mask of acceptable page sizes */
1533 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
1535 /* throw away page sizes not supported by the hardware */
1536 pgsize
&= domain
->pgsize_bitmap
;
1538 /* make sure we're still sane */
1541 /* pick the biggest page */
1542 pgsize_idx
= __fls(pgsize
);
1543 pgsize
= 1UL << pgsize_idx
;
1548 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
1549 phys_addr_t paddr
, size_t size
, int prot
)
1551 unsigned long orig_iova
= iova
;
1552 unsigned int min_pagesz
;
1553 size_t orig_size
= size
;
1554 phys_addr_t orig_paddr
= paddr
;
1557 if (unlikely(domain
->ops
->map
== NULL
||
1558 domain
->pgsize_bitmap
== 0UL))
1561 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1564 /* find out the minimum page size supported */
1565 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1568 * both the virtual address and the physical one, as well as
1569 * the size of the mapping, must be aligned (at least) to the
1570 * size of the smallest page supported by the hardware
1572 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
1573 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1574 iova
, &paddr
, size
, min_pagesz
);
1578 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
1581 size_t pgsize
= iommu_pgsize(domain
, iova
| paddr
, size
);
1583 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1584 iova
, &paddr
, pgsize
);
1586 ret
= domain
->ops
->map(domain
, iova
, paddr
, pgsize
, prot
);
1595 /* unroll mapping in case something went wrong */
1597 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
1599 trace_map(orig_iova
, orig_paddr
, orig_size
);
1603 EXPORT_SYMBOL_GPL(iommu_map
);
1605 static size_t __iommu_unmap(struct iommu_domain
*domain
,
1606 unsigned long iova
, size_t size
,
1609 const struct iommu_ops
*ops
= domain
->ops
;
1610 size_t unmapped_page
, unmapped
= 0;
1611 unsigned long orig_iova
= iova
;
1612 unsigned int min_pagesz
;
1614 if (unlikely(ops
->unmap
== NULL
||
1615 domain
->pgsize_bitmap
== 0UL))
1618 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1621 /* find out the minimum page size supported */
1622 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1625 * The virtual address, as well as the size of the mapping, must be
1626 * aligned (at least) to the size of the smallest page supported
1629 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
1630 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1631 iova
, size
, min_pagesz
);
1635 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova
, size
);
1638 * Keep iterating until we either unmap 'size' bytes (or more)
1639 * or we hit an area that isn't mapped.
1641 while (unmapped
< size
) {
1642 size_t pgsize
= iommu_pgsize(domain
, iova
, size
- unmapped
);
1644 unmapped_page
= ops
->unmap(domain
, iova
, pgsize
);
1648 if (sync
&& ops
->iotlb_range_add
)
1649 ops
->iotlb_range_add(domain
, iova
, pgsize
);
1651 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1652 iova
, unmapped_page
);
1654 iova
+= unmapped_page
;
1655 unmapped
+= unmapped_page
;
1658 if (sync
&& ops
->iotlb_sync
)
1659 ops
->iotlb_sync(domain
);
1661 trace_unmap(orig_iova
, size
, unmapped
);
1665 size_t iommu_unmap(struct iommu_domain
*domain
,
1666 unsigned long iova
, size_t size
)
1668 return __iommu_unmap(domain
, iova
, size
, true);
1670 EXPORT_SYMBOL_GPL(iommu_unmap
);
1672 size_t iommu_unmap_fast(struct iommu_domain
*domain
,
1673 unsigned long iova
, size_t size
)
1675 return __iommu_unmap(domain
, iova
, size
, false);
1677 EXPORT_SYMBOL_GPL(iommu_unmap_fast
);
1679 size_t iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
1680 struct scatterlist
*sg
, unsigned int nents
, int prot
)
1682 struct scatterlist
*s
;
1684 unsigned int i
, min_pagesz
;
1687 if (unlikely(domain
->pgsize_bitmap
== 0UL))
1690 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1692 for_each_sg(sg
, s
, nents
, i
) {
1693 phys_addr_t phys
= page_to_phys(sg_page(s
)) + s
->offset
;
1696 * We are mapping on IOMMU page boundaries, so offset within
1697 * the page must be 0. However, the IOMMU may support pages
1698 * smaller than PAGE_SIZE, so s->offset may still represent
1699 * an offset of that boundary within the CPU page.
1701 if (!IS_ALIGNED(s
->offset
, min_pagesz
))
1704 ret
= iommu_map(domain
, iova
+ mapped
, phys
, s
->length
, prot
);
1708 mapped
+= s
->length
;
1714 /* undo mappings already done */
1715 iommu_unmap(domain
, iova
, mapped
);
1720 EXPORT_SYMBOL_GPL(iommu_map_sg
);
1722 int iommu_domain_window_enable(struct iommu_domain
*domain
, u32 wnd_nr
,
1723 phys_addr_t paddr
, u64 size
, int prot
)
1725 if (unlikely(domain
->ops
->domain_window_enable
== NULL
))
1728 return domain
->ops
->domain_window_enable(domain
, wnd_nr
, paddr
, size
,
1731 EXPORT_SYMBOL_GPL(iommu_domain_window_enable
);
1733 void iommu_domain_window_disable(struct iommu_domain
*domain
, u32 wnd_nr
)
1735 if (unlikely(domain
->ops
->domain_window_disable
== NULL
))
1738 return domain
->ops
->domain_window_disable(domain
, wnd_nr
);
1740 EXPORT_SYMBOL_GPL(iommu_domain_window_disable
);
1743 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
1744 * @domain: the iommu domain where the fault has happened
1745 * @dev: the device where the fault has happened
1746 * @iova: the faulting address
1747 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
1749 * This function should be called by the low-level IOMMU implementations
1750 * whenever IOMMU faults happen, to allow high-level users, that are
1751 * interested in such events, to know about them.
1753 * This event may be useful for several possible use cases:
1754 * - mere logging of the event
1755 * - dynamic TLB/PTE loading
1756 * - if restarting of the faulting device is required
1758 * Returns 0 on success and an appropriate error code otherwise (if dynamic
1759 * PTE/TLB loading will one day be supported, implementations will be able
1760 * to tell whether it succeeded or not according to this return value).
1762 * Specifically, -ENOSYS is returned if a fault handler isn't installed
1763 * (though fault handlers can also return -ENOSYS, in case they want to
1764 * elicit the default behavior of the IOMMU drivers).
1766 int report_iommu_fault(struct iommu_domain
*domain
, struct device
*dev
,
1767 unsigned long iova
, int flags
)
1772 * if upper layers showed interest and installed a fault handler,
1775 if (domain
->handler
)
1776 ret
= domain
->handler(domain
, dev
, iova
, flags
,
1777 domain
->handler_token
);
1779 trace_io_page_fault(dev
, iova
, flags
);
1782 EXPORT_SYMBOL_GPL(report_iommu_fault
);
1784 static int __init
iommu_init(void)
1786 iommu_group_kset
= kset_create_and_add("iommu_groups",
1788 BUG_ON(!iommu_group_kset
);
1790 iommu_debugfs_setup();
1794 core_initcall(iommu_init
);
1796 int iommu_domain_get_attr(struct iommu_domain
*domain
,
1797 enum iommu_attr attr
, void *data
)
1799 struct iommu_domain_geometry
*geometry
;
1805 case DOMAIN_ATTR_GEOMETRY
:
1807 *geometry
= domain
->geometry
;
1810 case DOMAIN_ATTR_PAGING
:
1812 *paging
= (domain
->pgsize_bitmap
!= 0UL);
1814 case DOMAIN_ATTR_WINDOWS
:
1817 if (domain
->ops
->domain_get_windows
!= NULL
)
1818 *count
= domain
->ops
->domain_get_windows(domain
);
1824 if (!domain
->ops
->domain_get_attr
)
1827 ret
= domain
->ops
->domain_get_attr(domain
, attr
, data
);
1832 EXPORT_SYMBOL_GPL(iommu_domain_get_attr
);
1834 int iommu_domain_set_attr(struct iommu_domain
*domain
,
1835 enum iommu_attr attr
, void *data
)
1841 case DOMAIN_ATTR_WINDOWS
:
1844 if (domain
->ops
->domain_set_windows
!= NULL
)
1845 ret
= domain
->ops
->domain_set_windows(domain
, *count
);
1851 if (domain
->ops
->domain_set_attr
== NULL
)
1854 ret
= domain
->ops
->domain_set_attr(domain
, attr
, data
);
1859 EXPORT_SYMBOL_GPL(iommu_domain_set_attr
);
1861 void iommu_get_resv_regions(struct device
*dev
, struct list_head
*list
)
1863 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1865 if (ops
&& ops
->get_resv_regions
)
1866 ops
->get_resv_regions(dev
, list
);
1869 void iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
)
1871 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1873 if (ops
&& ops
->put_resv_regions
)
1874 ops
->put_resv_regions(dev
, list
);
1877 struct iommu_resv_region
*iommu_alloc_resv_region(phys_addr_t start
,
1878 size_t length
, int prot
,
1879 enum iommu_resv_type type
)
1881 struct iommu_resv_region
*region
;
1883 region
= kzalloc(sizeof(*region
), GFP_KERNEL
);
1887 INIT_LIST_HEAD(®ion
->list
);
1888 region
->start
= start
;
1889 region
->length
= length
;
1890 region
->prot
= prot
;
1891 region
->type
= type
;
1895 /* Request that a device is direct mapped by the IOMMU */
1896 int iommu_request_dm_for_dev(struct device
*dev
)
1898 struct iommu_domain
*dm_domain
;
1899 struct iommu_group
*group
;
1902 /* Device must already be in a group before calling this function */
1903 group
= iommu_group_get(dev
);
1907 mutex_lock(&group
->mutex
);
1909 /* Check if the default domain is already direct mapped */
1911 if (group
->default_domain
&&
1912 group
->default_domain
->type
== IOMMU_DOMAIN_IDENTITY
)
1915 /* Don't change mappings of existing devices */
1917 if (iommu_group_device_count(group
) != 1)
1920 /* Allocate a direct mapped domain */
1922 dm_domain
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_IDENTITY
);
1926 /* Attach the device to the domain */
1927 ret
= __iommu_attach_group(dm_domain
, group
);
1929 iommu_domain_free(dm_domain
);
1933 /* Make the direct mapped domain the default for this group */
1934 if (group
->default_domain
)
1935 iommu_domain_free(group
->default_domain
);
1936 group
->default_domain
= dm_domain
;
1938 pr_info("Using direct mapping for device %s\n", dev_name(dev
));
1942 mutex_unlock(&group
->mutex
);
1943 iommu_group_put(group
);
1948 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
)
1950 const struct iommu_ops
*ops
= NULL
;
1951 struct iommu_device
*iommu
;
1953 spin_lock(&iommu_device_lock
);
1954 list_for_each_entry(iommu
, &iommu_device_list
, list
)
1955 if (iommu
->fwnode
== fwnode
) {
1959 spin_unlock(&iommu_device_lock
);
1963 int iommu_fwspec_init(struct device
*dev
, struct fwnode_handle
*iommu_fwnode
,
1964 const struct iommu_ops
*ops
)
1966 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1969 return ops
== fwspec
->ops
? 0 : -EINVAL
;
1971 fwspec
= kzalloc(sizeof(*fwspec
), GFP_KERNEL
);
1975 of_node_get(to_of_node(iommu_fwnode
));
1976 fwspec
->iommu_fwnode
= iommu_fwnode
;
1978 dev
->iommu_fwspec
= fwspec
;
1981 EXPORT_SYMBOL_GPL(iommu_fwspec_init
);
1983 void iommu_fwspec_free(struct device
*dev
)
1985 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1988 fwnode_handle_put(fwspec
->iommu_fwnode
);
1990 dev
->iommu_fwspec
= NULL
;
1993 EXPORT_SYMBOL_GPL(iommu_fwspec_free
);
1995 int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
, int num_ids
)
1997 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
2004 size
= offsetof(struct iommu_fwspec
, ids
[fwspec
->num_ids
+ num_ids
]);
2005 if (size
> sizeof(*fwspec
)) {
2006 fwspec
= krealloc(dev
->iommu_fwspec
, size
, GFP_KERNEL
);
2010 dev
->iommu_fwspec
= fwspec
;
2013 for (i
= 0; i
< num_ids
; i
++)
2014 fwspec
->ids
[fwspec
->num_ids
+ i
] = ids
[i
];
2016 fwspec
->num_ids
+= num_ids
;
2019 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids
);