2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "iommu: " fmt
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <trace/events/iommu.h>
36 static struct kset
*iommu_group_kset
;
37 static DEFINE_IDA(iommu_group_ida
);
39 struct iommu_callback_data
{
40 const struct iommu_ops
*ops
;
45 struct kobject
*devices_kobj
;
46 struct list_head devices
;
48 struct blocking_notifier_head notifier
;
50 void (*iommu_data_release
)(void *iommu_data
);
53 struct iommu_domain
*default_domain
;
54 struct iommu_domain
*domain
;
58 struct list_head list
;
63 struct iommu_group_attribute
{
64 struct attribute attr
;
65 ssize_t (*show
)(struct iommu_group
*group
, char *buf
);
66 ssize_t (*store
)(struct iommu_group
*group
,
67 const char *buf
, size_t count
);
70 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
71 struct iommu_group_attribute iommu_group_attr_##_name = \
72 __ATTR(_name, _mode, _show, _store)
74 #define to_iommu_group_attr(_attr) \
75 container_of(_attr, struct iommu_group_attribute, attr)
76 #define to_iommu_group(_kobj) \
77 container_of(_kobj, struct iommu_group, kobj)
79 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
81 static int __iommu_attach_device(struct iommu_domain
*domain
,
83 static int __iommu_attach_group(struct iommu_domain
*domain
,
84 struct iommu_group
*group
);
85 static void __iommu_detach_group(struct iommu_domain
*domain
,
86 struct iommu_group
*group
);
88 static ssize_t
iommu_group_attr_show(struct kobject
*kobj
,
89 struct attribute
*__attr
, char *buf
)
91 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
92 struct iommu_group
*group
= to_iommu_group(kobj
);
96 ret
= attr
->show(group
, buf
);
100 static ssize_t
iommu_group_attr_store(struct kobject
*kobj
,
101 struct attribute
*__attr
,
102 const char *buf
, size_t count
)
104 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
105 struct iommu_group
*group
= to_iommu_group(kobj
);
109 ret
= attr
->store(group
, buf
, count
);
113 static const struct sysfs_ops iommu_group_sysfs_ops
= {
114 .show
= iommu_group_attr_show
,
115 .store
= iommu_group_attr_store
,
118 static int iommu_group_create_file(struct iommu_group
*group
,
119 struct iommu_group_attribute
*attr
)
121 return sysfs_create_file(&group
->kobj
, &attr
->attr
);
124 static void iommu_group_remove_file(struct iommu_group
*group
,
125 struct iommu_group_attribute
*attr
)
127 sysfs_remove_file(&group
->kobj
, &attr
->attr
);
130 static ssize_t
iommu_group_show_name(struct iommu_group
*group
, char *buf
)
132 return sprintf(buf
, "%s\n", group
->name
);
135 static IOMMU_GROUP_ATTR(name
, S_IRUGO
, iommu_group_show_name
, NULL
);
137 static void iommu_group_release(struct kobject
*kobj
)
139 struct iommu_group
*group
= to_iommu_group(kobj
);
141 pr_debug("Releasing group %d\n", group
->id
);
143 if (group
->iommu_data_release
)
144 group
->iommu_data_release(group
->iommu_data
);
146 ida_simple_remove(&iommu_group_ida
, group
->id
);
148 if (group
->default_domain
)
149 iommu_domain_free(group
->default_domain
);
155 static struct kobj_type iommu_group_ktype
= {
156 .sysfs_ops
= &iommu_group_sysfs_ops
,
157 .release
= iommu_group_release
,
161 * iommu_group_alloc - Allocate a new group
162 * @name: Optional name to associate with group, visible in sysfs
164 * This function is called by an iommu driver to allocate a new iommu
165 * group. The iommu group represents the minimum granularity of the iommu.
166 * Upon successful return, the caller holds a reference to the supplied
167 * group in order to hold the group until devices are added. Use
168 * iommu_group_put() to release this extra reference count, allowing the
169 * group to be automatically reclaimed once it has no devices or external
172 struct iommu_group
*iommu_group_alloc(void)
174 struct iommu_group
*group
;
177 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
179 return ERR_PTR(-ENOMEM
);
181 group
->kobj
.kset
= iommu_group_kset
;
182 mutex_init(&group
->mutex
);
183 INIT_LIST_HEAD(&group
->devices
);
184 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
186 ret
= ida_simple_get(&iommu_group_ida
, 0, 0, GFP_KERNEL
);
193 ret
= kobject_init_and_add(&group
->kobj
, &iommu_group_ktype
,
194 NULL
, "%d", group
->id
);
196 ida_simple_remove(&iommu_group_ida
, group
->id
);
201 group
->devices_kobj
= kobject_create_and_add("devices", &group
->kobj
);
202 if (!group
->devices_kobj
) {
203 kobject_put(&group
->kobj
); /* triggers .release & free */
204 return ERR_PTR(-ENOMEM
);
208 * The devices_kobj holds a reference on the group kobject, so
209 * as long as that exists so will the group. We can therefore
210 * use the devices_kobj for reference counting.
212 kobject_put(&group
->kobj
);
214 pr_debug("Allocated group %d\n", group
->id
);
218 EXPORT_SYMBOL_GPL(iommu_group_alloc
);
220 struct iommu_group
*iommu_group_get_by_id(int id
)
222 struct kobject
*group_kobj
;
223 struct iommu_group
*group
;
226 if (!iommu_group_kset
)
229 name
= kasprintf(GFP_KERNEL
, "%d", id
);
233 group_kobj
= kset_find_obj(iommu_group_kset
, name
);
239 group
= container_of(group_kobj
, struct iommu_group
, kobj
);
240 BUG_ON(group
->id
!= id
);
242 kobject_get(group
->devices_kobj
);
243 kobject_put(&group
->kobj
);
247 EXPORT_SYMBOL_GPL(iommu_group_get_by_id
);
250 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
253 * iommu drivers can store data in the group for use when doing iommu
254 * operations. This function provides a way to retrieve it. Caller
255 * should hold a group reference.
257 void *iommu_group_get_iommudata(struct iommu_group
*group
)
259 return group
->iommu_data
;
261 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata
);
264 * iommu_group_set_iommudata - set iommu_data for a group
266 * @iommu_data: new data
267 * @release: release function for iommu_data
269 * iommu drivers can store data in the group for use when doing iommu
270 * operations. This function provides a way to set the data after
271 * the group has been allocated. Caller should hold a group reference.
273 void iommu_group_set_iommudata(struct iommu_group
*group
, void *iommu_data
,
274 void (*release
)(void *iommu_data
))
276 group
->iommu_data
= iommu_data
;
277 group
->iommu_data_release
= release
;
279 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata
);
282 * iommu_group_set_name - set name for a group
286 * Allow iommu driver to set a name for a group. When set it will
287 * appear in a name attribute file under the group in sysfs.
289 int iommu_group_set_name(struct iommu_group
*group
, const char *name
)
294 iommu_group_remove_file(group
, &iommu_group_attr_name
);
301 group
->name
= kstrdup(name
, GFP_KERNEL
);
305 ret
= iommu_group_create_file(group
, &iommu_group_attr_name
);
314 EXPORT_SYMBOL_GPL(iommu_group_set_name
);
316 static int iommu_group_create_direct_mappings(struct iommu_group
*group
,
319 struct iommu_domain
*domain
= group
->default_domain
;
320 struct iommu_dm_region
*entry
;
321 struct list_head mappings
;
322 unsigned long pg_size
;
325 if (!domain
|| domain
->type
!= IOMMU_DOMAIN_DMA
)
328 BUG_ON(!domain
->pgsize_bitmap
);
330 pg_size
= 1UL << __ffs(domain
->pgsize_bitmap
);
331 INIT_LIST_HEAD(&mappings
);
333 iommu_get_dm_regions(dev
, &mappings
);
335 /* We need to consider overlapping regions for different devices */
336 list_for_each_entry(entry
, &mappings
, list
) {
337 dma_addr_t start
, end
, addr
;
339 if (domain
->ops
->apply_dm_region
)
340 domain
->ops
->apply_dm_region(dev
, domain
, entry
);
342 start
= ALIGN(entry
->start
, pg_size
);
343 end
= ALIGN(entry
->start
+ entry
->length
, pg_size
);
345 for (addr
= start
; addr
< end
; addr
+= pg_size
) {
346 phys_addr_t phys_addr
;
348 phys_addr
= iommu_iova_to_phys(domain
, addr
);
352 ret
= iommu_map(domain
, addr
, addr
, pg_size
, entry
->prot
);
360 iommu_put_dm_regions(dev
, &mappings
);
366 * iommu_group_add_device - add a device to an iommu group
367 * @group: the group into which to add the device (reference should be held)
370 * This function is called by an iommu driver to add a device into a
371 * group. Adding a device increments the group reference count.
373 int iommu_group_add_device(struct iommu_group
*group
, struct device
*dev
)
376 struct iommu_device
*device
;
378 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
384 ret
= sysfs_create_link(&dev
->kobj
, &group
->kobj
, "iommu_group");
390 device
->name
= kasprintf(GFP_KERNEL
, "%s", kobject_name(&dev
->kobj
));
393 sysfs_remove_link(&dev
->kobj
, "iommu_group");
398 ret
= sysfs_create_link_nowarn(group
->devices_kobj
,
399 &dev
->kobj
, device
->name
);
402 if (ret
== -EEXIST
&& i
>= 0) {
404 * Account for the slim chance of collision
405 * and append an instance to the name.
407 device
->name
= kasprintf(GFP_KERNEL
, "%s.%d",
408 kobject_name(&dev
->kobj
), i
++);
412 sysfs_remove_link(&dev
->kobj
, "iommu_group");
417 kobject_get(group
->devices_kobj
);
419 dev
->iommu_group
= group
;
421 iommu_group_create_direct_mappings(group
, dev
);
423 mutex_lock(&group
->mutex
);
424 list_add_tail(&device
->list
, &group
->devices
);
426 __iommu_attach_device(group
->domain
, dev
);
427 mutex_unlock(&group
->mutex
);
429 /* Notify any listeners about change to group. */
430 blocking_notifier_call_chain(&group
->notifier
,
431 IOMMU_GROUP_NOTIFY_ADD_DEVICE
, dev
);
433 trace_add_device_to_group(group
->id
, dev
);
435 pr_info("Adding device %s to group %d\n", dev_name(dev
), group
->id
);
439 EXPORT_SYMBOL_GPL(iommu_group_add_device
);
442 * iommu_group_remove_device - remove a device from it's current group
443 * @dev: device to be removed
445 * This function is called by an iommu driver to remove the device from
446 * it's current group. This decrements the iommu group reference count.
448 void iommu_group_remove_device(struct device
*dev
)
450 struct iommu_group
*group
= dev
->iommu_group
;
451 struct iommu_device
*tmp_device
, *device
= NULL
;
453 pr_info("Removing device %s from group %d\n", dev_name(dev
), group
->id
);
455 /* Pre-notify listeners that a device is being removed. */
456 blocking_notifier_call_chain(&group
->notifier
,
457 IOMMU_GROUP_NOTIFY_DEL_DEVICE
, dev
);
459 mutex_lock(&group
->mutex
);
460 list_for_each_entry(tmp_device
, &group
->devices
, list
) {
461 if (tmp_device
->dev
== dev
) {
463 list_del(&device
->list
);
467 mutex_unlock(&group
->mutex
);
472 sysfs_remove_link(group
->devices_kobj
, device
->name
);
473 sysfs_remove_link(&dev
->kobj
, "iommu_group");
475 trace_remove_device_from_group(group
->id
, dev
);
479 dev
->iommu_group
= NULL
;
480 kobject_put(group
->devices_kobj
);
482 EXPORT_SYMBOL_GPL(iommu_group_remove_device
);
484 static int iommu_group_device_count(struct iommu_group
*group
)
486 struct iommu_device
*entry
;
489 list_for_each_entry(entry
, &group
->devices
, list
)
496 * iommu_group_for_each_dev - iterate over each device in the group
498 * @data: caller opaque data to be passed to callback function
499 * @fn: caller supplied callback function
501 * This function is called by group users to iterate over group devices.
502 * Callers should hold a reference count to the group during callback.
503 * The group->mutex is held across callbacks, which will block calls to
504 * iommu_group_add/remove_device.
506 static int __iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
507 int (*fn
)(struct device
*, void *))
509 struct iommu_device
*device
;
512 list_for_each_entry(device
, &group
->devices
, list
) {
513 ret
= fn(device
->dev
, data
);
521 int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
522 int (*fn
)(struct device
*, void *))
526 mutex_lock(&group
->mutex
);
527 ret
= __iommu_group_for_each_dev(group
, data
, fn
);
528 mutex_unlock(&group
->mutex
);
532 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev
);
535 * iommu_group_get - Return the group for a device and increment reference
536 * @dev: get the group that this device belongs to
538 * This function is called by iommu drivers and users to get the group
539 * for the specified device. If found, the group is returned and the group
540 * reference in incremented, else NULL.
542 struct iommu_group
*iommu_group_get(struct device
*dev
)
544 struct iommu_group
*group
= dev
->iommu_group
;
547 kobject_get(group
->devices_kobj
);
551 EXPORT_SYMBOL_GPL(iommu_group_get
);
554 * iommu_group_put - Decrement group reference
555 * @group: the group to use
557 * This function is called by iommu drivers and users to release the
558 * iommu group. Once the reference count is zero, the group is released.
560 void iommu_group_put(struct iommu_group
*group
)
563 kobject_put(group
->devices_kobj
);
565 EXPORT_SYMBOL_GPL(iommu_group_put
);
568 * iommu_group_register_notifier - Register a notifier for group changes
569 * @group: the group to watch
570 * @nb: notifier block to signal
572 * This function allows iommu group users to track changes in a group.
573 * See include/linux/iommu.h for actions sent via this notifier. Caller
574 * should hold a reference to the group throughout notifier registration.
576 int iommu_group_register_notifier(struct iommu_group
*group
,
577 struct notifier_block
*nb
)
579 return blocking_notifier_chain_register(&group
->notifier
, nb
);
581 EXPORT_SYMBOL_GPL(iommu_group_register_notifier
);
584 * iommu_group_unregister_notifier - Unregister a notifier
585 * @group: the group to watch
586 * @nb: notifier block to signal
588 * Unregister a previously registered group notifier block.
590 int iommu_group_unregister_notifier(struct iommu_group
*group
,
591 struct notifier_block
*nb
)
593 return blocking_notifier_chain_unregister(&group
->notifier
, nb
);
595 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier
);
598 * iommu_group_id - Return ID for a group
599 * @group: the group to ID
601 * Return the unique ID for the group matching the sysfs group number.
603 int iommu_group_id(struct iommu_group
*group
)
607 EXPORT_SYMBOL_GPL(iommu_group_id
);
609 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
610 unsigned long *devfns
);
613 * To consider a PCI device isolated, we require ACS to support Source
614 * Validation, Request Redirection, Completer Redirection, and Upstream
615 * Forwarding. This effectively means that devices cannot spoof their
616 * requester ID, requests and completions cannot be redirected, and all
617 * transactions are forwarded upstream, even as it passes through a
618 * bridge where the target device is downstream.
620 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
623 * For multifunction devices which are not isolated from each other, find
624 * all the other non-isolated functions and look for existing groups. For
625 * each function, we also need to look for aliases to or from other devices
626 * that may already have a group.
628 static struct iommu_group
*get_pci_function_alias_group(struct pci_dev
*pdev
,
629 unsigned long *devfns
)
631 struct pci_dev
*tmp
= NULL
;
632 struct iommu_group
*group
;
634 if (!pdev
->multifunction
|| pci_acs_enabled(pdev
, REQ_ACS_FLAGS
))
637 for_each_pci_dev(tmp
) {
638 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
||
639 PCI_SLOT(tmp
->devfn
) != PCI_SLOT(pdev
->devfn
) ||
640 pci_acs_enabled(tmp
, REQ_ACS_FLAGS
))
643 group
= get_pci_alias_group(tmp
, devfns
);
654 * Look for aliases to or from the given device for existing groups. DMA
655 * aliases are only supported on the same bus, therefore the search
656 * space is quite small (especially since we're really only looking at pcie
657 * device, and therefore only expect multiple slots on the root complex or
658 * downstream switch ports). It's conceivable though that a pair of
659 * multifunction devices could have aliases between them that would cause a
660 * loop. To prevent this, we use a bitmap to track where we've been.
662 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
663 unsigned long *devfns
)
665 struct pci_dev
*tmp
= NULL
;
666 struct iommu_group
*group
;
668 if (test_and_set_bit(pdev
->devfn
& 0xff, devfns
))
671 group
= iommu_group_get(&pdev
->dev
);
675 for_each_pci_dev(tmp
) {
676 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
)
679 /* We alias them or they alias us */
680 if (pci_devs_are_dma_aliases(pdev
, tmp
)) {
681 group
= get_pci_alias_group(tmp
, devfns
);
687 group
= get_pci_function_alias_group(tmp
, devfns
);
698 struct group_for_pci_data
{
699 struct pci_dev
*pdev
;
700 struct iommu_group
*group
;
704 * DMA alias iterator callback, return the last seen device. Stop and return
705 * the IOMMU group if we find one along the way.
707 static int get_pci_alias_or_group(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
709 struct group_for_pci_data
*data
= opaque
;
712 data
->group
= iommu_group_get(&pdev
->dev
);
714 return data
->group
!= NULL
;
718 * Generic device_group call-back function. It just allocates one
719 * iommu-group per device.
721 struct iommu_group
*generic_device_group(struct device
*dev
)
723 struct iommu_group
*group
;
725 group
= iommu_group_alloc();
733 * Use standard PCI bus topology, isolation features, and DMA alias quirks
734 * to find or create an IOMMU group for a device.
736 struct iommu_group
*pci_device_group(struct device
*dev
)
738 struct pci_dev
*pdev
= to_pci_dev(dev
);
739 struct group_for_pci_data data
;
741 struct iommu_group
*group
= NULL
;
742 u64 devfns
[4] = { 0 };
744 if (WARN_ON(!dev_is_pci(dev
)))
745 return ERR_PTR(-EINVAL
);
748 * Find the upstream DMA alias for the device. A device must not
749 * be aliased due to topology in order to have its own IOMMU group.
750 * If we find an alias along the way that already belongs to a
753 if (pci_for_each_dma_alias(pdev
, get_pci_alias_or_group
, &data
))
759 * Continue upstream from the point of minimum IOMMU granularity
760 * due to aliases to the point where devices are protected from
761 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
764 for (bus
= pdev
->bus
; !pci_is_root_bus(bus
); bus
= bus
->parent
) {
768 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
773 group
= iommu_group_get(&pdev
->dev
);
779 * Look for existing groups on device aliases. If we alias another
780 * device or another device aliases us, use the same group.
782 group
= get_pci_alias_group(pdev
, (unsigned long *)devfns
);
787 * Look for existing groups on non-isolated functions on the same
788 * slot and aliases of those funcions, if any. No need to clear
789 * the search bitmap, the tested devfns are still valid.
791 group
= get_pci_function_alias_group(pdev
, (unsigned long *)devfns
);
795 /* No shared group found, allocate new */
796 group
= iommu_group_alloc();
804 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
805 * @dev: target device
807 * This function is intended to be called by IOMMU drivers and extended to
808 * support common, bus-defined algorithms when determining or creating the
809 * IOMMU group for a device. On success, the caller will hold a reference
810 * to the returned IOMMU group, which will already include the provided
811 * device. The reference should be released with iommu_group_put().
813 struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
)
815 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
816 struct iommu_group
*group
;
819 group
= iommu_group_get(dev
);
823 group
= ERR_PTR(-EINVAL
);
825 if (ops
&& ops
->device_group
)
826 group
= ops
->device_group(dev
);
832 * Try to allocate a default domain - needs support from the
835 if (!group
->default_domain
) {
836 group
->default_domain
= __iommu_domain_alloc(dev
->bus
,
839 group
->domain
= group
->default_domain
;
842 ret
= iommu_group_add_device(group
, dev
);
844 iommu_group_put(group
);
851 struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*group
)
853 return group
->default_domain
;
856 static int add_iommu_group(struct device
*dev
, void *data
)
858 struct iommu_callback_data
*cb
= data
;
859 const struct iommu_ops
*ops
= cb
->ops
;
862 if (!ops
->add_device
)
865 WARN_ON(dev
->iommu_group
);
867 ret
= ops
->add_device(dev
);
870 * We ignore -ENODEV errors for now, as they just mean that the
871 * device is not translated by an IOMMU. We still care about
872 * other errors and fail to initialize when they happen.
880 static int remove_iommu_group(struct device
*dev
, void *data
)
882 struct iommu_callback_data
*cb
= data
;
883 const struct iommu_ops
*ops
= cb
->ops
;
885 if (ops
->remove_device
&& dev
->iommu_group
)
886 ops
->remove_device(dev
);
891 static int iommu_bus_notifier(struct notifier_block
*nb
,
892 unsigned long action
, void *data
)
894 struct device
*dev
= data
;
895 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
896 struct iommu_group
*group
;
897 unsigned long group_action
= 0;
900 * ADD/DEL call into iommu driver ops if provided, which may
901 * result in ADD/DEL notifiers to group->notifier
903 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
905 return ops
->add_device(dev
);
906 } else if (action
== BUS_NOTIFY_REMOVED_DEVICE
) {
907 if (ops
->remove_device
&& dev
->iommu_group
) {
908 ops
->remove_device(dev
);
914 * Remaining BUS_NOTIFYs get filtered and republished to the
915 * group, if anyone is listening
917 group
= iommu_group_get(dev
);
922 case BUS_NOTIFY_BIND_DRIVER
:
923 group_action
= IOMMU_GROUP_NOTIFY_BIND_DRIVER
;
925 case BUS_NOTIFY_BOUND_DRIVER
:
926 group_action
= IOMMU_GROUP_NOTIFY_BOUND_DRIVER
;
928 case BUS_NOTIFY_UNBIND_DRIVER
:
929 group_action
= IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
;
931 case BUS_NOTIFY_UNBOUND_DRIVER
:
932 group_action
= IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
;
937 blocking_notifier_call_chain(&group
->notifier
,
940 iommu_group_put(group
);
944 static int iommu_bus_init(struct bus_type
*bus
, const struct iommu_ops
*ops
)
947 struct notifier_block
*nb
;
948 struct iommu_callback_data cb
= {
952 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
956 nb
->notifier_call
= iommu_bus_notifier
;
958 err
= bus_register_notifier(bus
, nb
);
962 err
= bus_for_each_dev(bus
, NULL
, &cb
, add_iommu_group
);
971 bus_for_each_dev(bus
, NULL
, &cb
, remove_iommu_group
);
972 bus_unregister_notifier(bus
, nb
);
981 * bus_set_iommu - set iommu-callbacks for the bus
983 * @ops: the callbacks provided by the iommu-driver
985 * This function is called by an iommu driver to set the iommu methods
986 * used for a particular bus. Drivers for devices on that bus can use
987 * the iommu-api after these ops are registered.
988 * This special function is needed because IOMMUs are usually devices on
989 * the bus itself, so the iommu drivers are not initialized when the bus
990 * is set up. With this function the iommu-driver can set the iommu-ops
993 int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
)
997 if (bus
->iommu_ops
!= NULL
)
1000 bus
->iommu_ops
= ops
;
1002 /* Do IOMMU specific setup for this bus-type */
1003 err
= iommu_bus_init(bus
, ops
);
1005 bus
->iommu_ops
= NULL
;
1009 EXPORT_SYMBOL_GPL(bus_set_iommu
);
1011 bool iommu_present(struct bus_type
*bus
)
1013 return bus
->iommu_ops
!= NULL
;
1015 EXPORT_SYMBOL_GPL(iommu_present
);
1017 bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
1019 if (!bus
->iommu_ops
|| !bus
->iommu_ops
->capable
)
1022 return bus
->iommu_ops
->capable(cap
);
1024 EXPORT_SYMBOL_GPL(iommu_capable
);
1027 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1028 * @domain: iommu domain
1029 * @handler: fault handler
1030 * @token: user data, will be passed back to the fault handler
1032 * This function should be used by IOMMU users which want to be notified
1033 * whenever an IOMMU fault happens.
1035 * The fault handler itself should return 0 on success, and an appropriate
1036 * error code otherwise.
1038 void iommu_set_fault_handler(struct iommu_domain
*domain
,
1039 iommu_fault_handler_t handler
,
1044 domain
->handler
= handler
;
1045 domain
->handler_token
= token
;
1047 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
1049 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
1052 struct iommu_domain
*domain
;
1054 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
1057 domain
= bus
->iommu_ops
->domain_alloc(type
);
1061 domain
->ops
= bus
->iommu_ops
;
1062 domain
->type
= type
;
1063 /* Assume all sizes by default; the driver may override this later */
1064 domain
->pgsize_bitmap
= bus
->iommu_ops
->pgsize_bitmap
;
1069 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
1071 return __iommu_domain_alloc(bus
, IOMMU_DOMAIN_UNMANAGED
);
1073 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
1075 void iommu_domain_free(struct iommu_domain
*domain
)
1077 domain
->ops
->domain_free(domain
);
1079 EXPORT_SYMBOL_GPL(iommu_domain_free
);
1081 static int __iommu_attach_device(struct iommu_domain
*domain
,
1085 if (unlikely(domain
->ops
->attach_dev
== NULL
))
1088 ret
= domain
->ops
->attach_dev(domain
, dev
);
1090 trace_attach_device_to_domain(dev
);
1094 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1096 struct iommu_group
*group
;
1099 group
= iommu_group_get(dev
);
1100 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1102 return __iommu_attach_device(domain
, dev
);
1105 * We have a group - lock it to make sure the device-count doesn't
1106 * change while we are attaching
1108 mutex_lock(&group
->mutex
);
1110 if (iommu_group_device_count(group
) != 1)
1113 ret
= __iommu_attach_group(domain
, group
);
1116 mutex_unlock(&group
->mutex
);
1117 iommu_group_put(group
);
1121 EXPORT_SYMBOL_GPL(iommu_attach_device
);
1123 static void __iommu_detach_device(struct iommu_domain
*domain
,
1126 if (unlikely(domain
->ops
->detach_dev
== NULL
))
1129 domain
->ops
->detach_dev(domain
, dev
);
1130 trace_detach_device_from_domain(dev
);
1133 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
1135 struct iommu_group
*group
;
1137 group
= iommu_group_get(dev
);
1138 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1140 return __iommu_detach_device(domain
, dev
);
1142 mutex_lock(&group
->mutex
);
1143 if (iommu_group_device_count(group
) != 1) {
1148 __iommu_detach_group(domain
, group
);
1151 mutex_unlock(&group
->mutex
);
1152 iommu_group_put(group
);
1154 EXPORT_SYMBOL_GPL(iommu_detach_device
);
1156 struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
1158 struct iommu_domain
*domain
;
1159 struct iommu_group
*group
;
1161 group
= iommu_group_get(dev
);
1162 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1166 domain
= group
->domain
;
1168 iommu_group_put(group
);
1172 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev
);
1175 * IOMMU groups are really the natrual working unit of the IOMMU, but
1176 * the IOMMU API works on domains and devices. Bridge that gap by
1177 * iterating over the devices in a group. Ideally we'd have a single
1178 * device which represents the requestor ID of the group, but we also
1179 * allow IOMMU drivers to create policy defined minimum sets, where
1180 * the physical hardware may be able to distiguish members, but we
1181 * wish to group them at a higher level (ex. untrusted multi-function
1182 * PCI devices). Thus we attach each device.
1184 static int iommu_group_do_attach_device(struct device
*dev
, void *data
)
1186 struct iommu_domain
*domain
= data
;
1188 return __iommu_attach_device(domain
, dev
);
1191 static int __iommu_attach_group(struct iommu_domain
*domain
,
1192 struct iommu_group
*group
)
1196 if (group
->default_domain
&& group
->domain
!= group
->default_domain
)
1199 ret
= __iommu_group_for_each_dev(group
, domain
,
1200 iommu_group_do_attach_device
);
1202 group
->domain
= domain
;
1207 int iommu_attach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1211 mutex_lock(&group
->mutex
);
1212 ret
= __iommu_attach_group(domain
, group
);
1213 mutex_unlock(&group
->mutex
);
1217 EXPORT_SYMBOL_GPL(iommu_attach_group
);
1219 static int iommu_group_do_detach_device(struct device
*dev
, void *data
)
1221 struct iommu_domain
*domain
= data
;
1223 __iommu_detach_device(domain
, dev
);
1228 static void __iommu_detach_group(struct iommu_domain
*domain
,
1229 struct iommu_group
*group
)
1233 if (!group
->default_domain
) {
1234 __iommu_group_for_each_dev(group
, domain
,
1235 iommu_group_do_detach_device
);
1236 group
->domain
= NULL
;
1240 if (group
->domain
== group
->default_domain
)
1243 /* Detach by re-attaching to the default domain */
1244 ret
= __iommu_group_for_each_dev(group
, group
->default_domain
,
1245 iommu_group_do_attach_device
);
1249 group
->domain
= group
->default_domain
;
1252 void iommu_detach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1254 mutex_lock(&group
->mutex
);
1255 __iommu_detach_group(domain
, group
);
1256 mutex_unlock(&group
->mutex
);
1258 EXPORT_SYMBOL_GPL(iommu_detach_group
);
1260 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
1262 if (unlikely(domain
->ops
->iova_to_phys
== NULL
))
1265 return domain
->ops
->iova_to_phys(domain
, iova
);
1267 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
1269 static size_t iommu_pgsize(struct iommu_domain
*domain
,
1270 unsigned long addr_merge
, size_t size
)
1272 unsigned int pgsize_idx
;
1275 /* Max page size that still fits into 'size' */
1276 pgsize_idx
= __fls(size
);
1278 /* need to consider alignment requirements ? */
1279 if (likely(addr_merge
)) {
1280 /* Max page size allowed by address */
1281 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
1282 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
1285 /* build a mask of acceptable page sizes */
1286 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
1288 /* throw away page sizes not supported by the hardware */
1289 pgsize
&= domain
->pgsize_bitmap
;
1291 /* make sure we're still sane */
1294 /* pick the biggest page */
1295 pgsize_idx
= __fls(pgsize
);
1296 pgsize
= 1UL << pgsize_idx
;
1301 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
1302 phys_addr_t paddr
, size_t size
, int prot
)
1304 unsigned long orig_iova
= iova
;
1305 unsigned int min_pagesz
;
1306 size_t orig_size
= size
;
1307 phys_addr_t orig_paddr
= paddr
;
1310 if (unlikely(domain
->ops
->map
== NULL
||
1311 domain
->pgsize_bitmap
== 0UL))
1314 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1317 /* find out the minimum page size supported */
1318 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1321 * both the virtual address and the physical one, as well as
1322 * the size of the mapping, must be aligned (at least) to the
1323 * size of the smallest page supported by the hardware
1325 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
1326 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1327 iova
, &paddr
, size
, min_pagesz
);
1331 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
1334 size_t pgsize
= iommu_pgsize(domain
, iova
| paddr
, size
);
1336 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1337 iova
, &paddr
, pgsize
);
1339 ret
= domain
->ops
->map(domain
, iova
, paddr
, pgsize
, prot
);
1348 /* unroll mapping in case something went wrong */
1350 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
1352 trace_map(orig_iova
, orig_paddr
, orig_size
);
1356 EXPORT_SYMBOL_GPL(iommu_map
);
1358 size_t iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
, size_t size
)
1360 size_t unmapped_page
, unmapped
= 0;
1361 unsigned int min_pagesz
;
1362 unsigned long orig_iova
= iova
;
1364 if (unlikely(domain
->ops
->unmap
== NULL
||
1365 domain
->pgsize_bitmap
== 0UL))
1368 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1371 /* find out the minimum page size supported */
1372 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1375 * The virtual address, as well as the size of the mapping, must be
1376 * aligned (at least) to the size of the smallest page supported
1379 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
1380 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1381 iova
, size
, min_pagesz
);
1385 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova
, size
);
1388 * Keep iterating until we either unmap 'size' bytes (or more)
1389 * or we hit an area that isn't mapped.
1391 while (unmapped
< size
) {
1392 size_t pgsize
= iommu_pgsize(domain
, iova
, size
- unmapped
);
1394 unmapped_page
= domain
->ops
->unmap(domain
, iova
, pgsize
);
1398 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1399 iova
, unmapped_page
);
1401 iova
+= unmapped_page
;
1402 unmapped
+= unmapped_page
;
1405 trace_unmap(orig_iova
, size
, unmapped
);
1408 EXPORT_SYMBOL_GPL(iommu_unmap
);
1410 size_t default_iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
1411 struct scatterlist
*sg
, unsigned int nents
, int prot
)
1413 struct scatterlist
*s
;
1415 unsigned int i
, min_pagesz
;
1418 if (unlikely(domain
->pgsize_bitmap
== 0UL))
1421 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1423 for_each_sg(sg
, s
, nents
, i
) {
1424 phys_addr_t phys
= page_to_phys(sg_page(s
)) + s
->offset
;
1427 * We are mapping on IOMMU page boundaries, so offset within
1428 * the page must be 0. However, the IOMMU may support pages
1429 * smaller than PAGE_SIZE, so s->offset may still represent
1430 * an offset of that boundary within the CPU page.
1432 if (!IS_ALIGNED(s
->offset
, min_pagesz
))
1435 ret
= iommu_map(domain
, iova
+ mapped
, phys
, s
->length
, prot
);
1439 mapped
+= s
->length
;
1445 /* undo mappings already done */
1446 iommu_unmap(domain
, iova
, mapped
);
1451 EXPORT_SYMBOL_GPL(default_iommu_map_sg
);
1453 int iommu_domain_window_enable(struct iommu_domain
*domain
, u32 wnd_nr
,
1454 phys_addr_t paddr
, u64 size
, int prot
)
1456 if (unlikely(domain
->ops
->domain_window_enable
== NULL
))
1459 return domain
->ops
->domain_window_enable(domain
, wnd_nr
, paddr
, size
,
1462 EXPORT_SYMBOL_GPL(iommu_domain_window_enable
);
1464 void iommu_domain_window_disable(struct iommu_domain
*domain
, u32 wnd_nr
)
1466 if (unlikely(domain
->ops
->domain_window_disable
== NULL
))
1469 return domain
->ops
->domain_window_disable(domain
, wnd_nr
);
1471 EXPORT_SYMBOL_GPL(iommu_domain_window_disable
);
1473 static int __init
iommu_init(void)
1475 iommu_group_kset
= kset_create_and_add("iommu_groups",
1477 BUG_ON(!iommu_group_kset
);
1481 core_initcall(iommu_init
);
1483 int iommu_domain_get_attr(struct iommu_domain
*domain
,
1484 enum iommu_attr attr
, void *data
)
1486 struct iommu_domain_geometry
*geometry
;
1492 case DOMAIN_ATTR_GEOMETRY
:
1494 *geometry
= domain
->geometry
;
1497 case DOMAIN_ATTR_PAGING
:
1499 *paging
= (domain
->pgsize_bitmap
!= 0UL);
1501 case DOMAIN_ATTR_WINDOWS
:
1504 if (domain
->ops
->domain_get_windows
!= NULL
)
1505 *count
= domain
->ops
->domain_get_windows(domain
);
1511 if (!domain
->ops
->domain_get_attr
)
1514 ret
= domain
->ops
->domain_get_attr(domain
, attr
, data
);
1519 EXPORT_SYMBOL_GPL(iommu_domain_get_attr
);
1521 int iommu_domain_set_attr(struct iommu_domain
*domain
,
1522 enum iommu_attr attr
, void *data
)
1528 case DOMAIN_ATTR_WINDOWS
:
1531 if (domain
->ops
->domain_set_windows
!= NULL
)
1532 ret
= domain
->ops
->domain_set_windows(domain
, *count
);
1538 if (domain
->ops
->domain_set_attr
== NULL
)
1541 ret
= domain
->ops
->domain_set_attr(domain
, attr
, data
);
1546 EXPORT_SYMBOL_GPL(iommu_domain_set_attr
);
1548 void iommu_get_dm_regions(struct device
*dev
, struct list_head
*list
)
1550 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1552 if (ops
&& ops
->get_dm_regions
)
1553 ops
->get_dm_regions(dev
, list
);
1556 void iommu_put_dm_regions(struct device
*dev
, struct list_head
*list
)
1558 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1560 if (ops
&& ops
->put_dm_regions
)
1561 ops
->put_dm_regions(dev
, list
);
1564 /* Request that a device is direct mapped by the IOMMU */
1565 int iommu_request_dm_for_dev(struct device
*dev
)
1567 struct iommu_domain
*dm_domain
;
1568 struct iommu_group
*group
;
1571 /* Device must already be in a group before calling this function */
1572 group
= iommu_group_get_for_dev(dev
);
1574 return PTR_ERR(group
);
1576 mutex_lock(&group
->mutex
);
1578 /* Check if the default domain is already direct mapped */
1580 if (group
->default_domain
&&
1581 group
->default_domain
->type
== IOMMU_DOMAIN_IDENTITY
)
1584 /* Don't change mappings of existing devices */
1586 if (iommu_group_device_count(group
) != 1)
1589 /* Allocate a direct mapped domain */
1591 dm_domain
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_IDENTITY
);
1595 /* Attach the device to the domain */
1596 ret
= __iommu_attach_group(dm_domain
, group
);
1598 iommu_domain_free(dm_domain
);
1602 /* Make the direct mapped domain the default for this group */
1603 if (group
->default_domain
)
1604 iommu_domain_free(group
->default_domain
);
1605 group
->default_domain
= dm_domain
;
1607 pr_info("Using direct mapping for device %s\n", dev_name(dev
));
1611 mutex_unlock(&group
->mutex
);
1612 iommu_group_put(group
);