2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "%s: " fmt, __func__
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
30 static ssize_t
show_iommu_group(struct device
*dev
,
31 struct device_attribute
*attr
, char *buf
)
35 if (iommu_device_group(dev
, &groupid
))
38 return sprintf(buf
, "%u", groupid
);
40 static DEVICE_ATTR(iommu_group
, S_IRUGO
, show_iommu_group
, NULL
);
42 static int add_iommu_group(struct device
*dev
, void *data
)
46 if (iommu_device_group(dev
, &groupid
) == 0)
47 return device_create_file(dev
, &dev_attr_iommu_group
);
52 static int remove_iommu_group(struct device
*dev
)
56 if (iommu_device_group(dev
, &groupid
) == 0)
57 device_remove_file(dev
, &dev_attr_iommu_group
);
62 static int iommu_device_notifier(struct notifier_block
*nb
,
63 unsigned long action
, void *data
)
65 struct device
*dev
= data
;
67 if (action
== BUS_NOTIFY_ADD_DEVICE
)
68 return add_iommu_group(dev
, NULL
);
69 else if (action
== BUS_NOTIFY_DEL_DEVICE
)
70 return remove_iommu_group(dev
);
75 static struct notifier_block iommu_device_nb
= {
76 .notifier_call
= iommu_device_notifier
,
79 static void iommu_bus_init(struct bus_type
*bus
, struct iommu_ops
*ops
)
81 bus_register_notifier(bus
, &iommu_device_nb
);
82 bus_for_each_dev(bus
, NULL
, NULL
, add_iommu_group
);
86 * bus_set_iommu - set iommu-callbacks for the bus
88 * @ops: the callbacks provided by the iommu-driver
90 * This function is called by an iommu driver to set the iommu methods
91 * used for a particular bus. Drivers for devices on that bus can use
92 * the iommu-api after these ops are registered.
93 * This special function is needed because IOMMUs are usually devices on
94 * the bus itself, so the iommu drivers are not initialized when the bus
95 * is set up. With this function the iommu-driver can set the iommu-ops
98 int bus_set_iommu(struct bus_type
*bus
, struct iommu_ops
*ops
)
100 if (bus
->iommu_ops
!= NULL
)
103 bus
->iommu_ops
= ops
;
105 /* Do IOMMU specific setup for this bus-type */
106 iommu_bus_init(bus
, ops
);
110 EXPORT_SYMBOL_GPL(bus_set_iommu
);
112 bool iommu_present(struct bus_type
*bus
)
114 return bus
->iommu_ops
!= NULL
;
116 EXPORT_SYMBOL_GPL(iommu_present
);
119 * iommu_set_fault_handler() - set a fault handler for an iommu domain
120 * @domain: iommu domain
121 * @handler: fault handler
122 * @token: user data, will be passed back to the fault handler
124 * This function should be used by IOMMU users which want to be notified
125 * whenever an IOMMU fault happens.
127 * The fault handler itself should return 0 on success, and an appropriate
128 * error code otherwise.
130 void iommu_set_fault_handler(struct iommu_domain
*domain
,
131 iommu_fault_handler_t handler
,
136 domain
->handler
= handler
;
137 domain
->handler_token
= token
;
139 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
141 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
143 struct iommu_domain
*domain
;
146 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
149 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
153 domain
->ops
= bus
->iommu_ops
;
155 ret
= domain
->ops
->domain_init(domain
);
166 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
168 void iommu_domain_free(struct iommu_domain
*domain
)
170 if (likely(domain
->ops
->domain_destroy
!= NULL
))
171 domain
->ops
->domain_destroy(domain
);
175 EXPORT_SYMBOL_GPL(iommu_domain_free
);
177 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
179 if (unlikely(domain
->ops
->attach_dev
== NULL
))
182 return domain
->ops
->attach_dev(domain
, dev
);
184 EXPORT_SYMBOL_GPL(iommu_attach_device
);
186 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
188 if (unlikely(domain
->ops
->detach_dev
== NULL
))
191 domain
->ops
->detach_dev(domain
, dev
);
193 EXPORT_SYMBOL_GPL(iommu_detach_device
);
195 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
,
198 if (unlikely(domain
->ops
->iova_to_phys
== NULL
))
201 return domain
->ops
->iova_to_phys(domain
, iova
);
203 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
205 int iommu_domain_has_cap(struct iommu_domain
*domain
,
208 if (unlikely(domain
->ops
->domain_has_cap
== NULL
))
211 return domain
->ops
->domain_has_cap(domain
, cap
);
213 EXPORT_SYMBOL_GPL(iommu_domain_has_cap
);
215 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
216 phys_addr_t paddr
, size_t size
, int prot
)
218 unsigned long orig_iova
= iova
;
219 unsigned int min_pagesz
;
220 size_t orig_size
= size
;
223 if (unlikely(domain
->ops
->map
== NULL
))
226 /* find out the minimum page size supported */
227 min_pagesz
= 1 << __ffs(domain
->ops
->pgsize_bitmap
);
230 * both the virtual address and the physical one, as well as
231 * the size of the mapping, must be aligned (at least) to the
232 * size of the smallest page supported by the hardware
234 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
235 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
236 "0x%x\n", iova
, (unsigned long)paddr
,
237 (unsigned long)size
, min_pagesz
);
241 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova
,
242 (unsigned long)paddr
, (unsigned long)size
);
245 unsigned long pgsize
, addr_merge
= iova
| paddr
;
246 unsigned int pgsize_idx
;
248 /* Max page size that still fits into 'size' */
249 pgsize_idx
= __fls(size
);
251 /* need to consider alignment requirements ? */
252 if (likely(addr_merge
)) {
253 /* Max page size allowed by both iova and paddr */
254 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
256 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
259 /* build a mask of acceptable page sizes */
260 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
262 /* throw away page sizes not supported by the hardware */
263 pgsize
&= domain
->ops
->pgsize_bitmap
;
265 /* make sure we're still sane */
268 /* pick the biggest page */
269 pgsize_idx
= __fls(pgsize
);
270 pgsize
= 1UL << pgsize_idx
;
272 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova
,
273 (unsigned long)paddr
, pgsize
);
275 ret
= domain
->ops
->map(domain
, iova
, paddr
, pgsize
, prot
);
284 /* unroll mapping in case something went wrong */
286 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
290 EXPORT_SYMBOL_GPL(iommu_map
);
292 size_t iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
, size_t size
)
294 size_t unmapped_page
, unmapped
= 0;
295 unsigned int min_pagesz
;
297 if (unlikely(domain
->ops
->unmap
== NULL
))
300 /* find out the minimum page size supported */
301 min_pagesz
= 1 << __ffs(domain
->ops
->pgsize_bitmap
);
304 * The virtual address, as well as the size of the mapping, must be
305 * aligned (at least) to the size of the smallest page supported
308 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
309 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
310 iova
, (unsigned long)size
, min_pagesz
);
314 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova
,
315 (unsigned long)size
);
318 * Keep iterating until we either unmap 'size' bytes (or more)
319 * or we hit an area that isn't mapped.
321 while (unmapped
< size
) {
322 size_t left
= size
- unmapped
;
324 unmapped_page
= domain
->ops
->unmap(domain
, iova
, left
);
328 pr_debug("unmapped: iova 0x%lx size %lx\n", iova
,
329 (unsigned long)unmapped_page
);
331 iova
+= unmapped_page
;
332 unmapped
+= unmapped_page
;
337 EXPORT_SYMBOL_GPL(iommu_unmap
);
339 int iommu_device_group(struct device
*dev
, unsigned int *groupid
)
341 if (iommu_present(dev
->bus
) && dev
->bus
->iommu_ops
->device_group
)
342 return dev
->bus
->iommu_ops
->device_group(dev
, groupid
);
346 EXPORT_SYMBOL_GPL(iommu_device_group
);