2 * A fairly generic DMA-API to IOMMU-API glue layer.
4 * Copyright (C) 2014-2015 ARM Ltd.
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/huge_mm.h>
25 #include <linux/iommu.h>
26 #include <linux/iova.h>
29 int iommu_dma_init(void)
31 return iova_cache_get();
35 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
36 * @domain: IOMMU domain to prepare for DMA-API usage
38 * IOMMU drivers should normally call this from their domain_alloc
39 * callback when domain->type == IOMMU_DOMAIN_DMA.
41 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
43 struct iova_domain
*iovad
;
45 if (domain
->iova_cookie
)
48 iovad
= kzalloc(sizeof(*iovad
), GFP_KERNEL
);
49 domain
->iova_cookie
= iovad
;
51 return iovad
? 0 : -ENOMEM
;
53 EXPORT_SYMBOL(iommu_get_dma_cookie
);
56 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
57 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
59 * IOMMU drivers should normally call this from their domain_free callback.
61 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
63 struct iova_domain
*iovad
= domain
->iova_cookie
;
68 put_iova_domain(iovad
);
70 domain
->iova_cookie
= NULL
;
72 EXPORT_SYMBOL(iommu_put_dma_cookie
);
75 * iommu_dma_init_domain - Initialise a DMA mapping domain
76 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
77 * @base: IOVA at which the mappable address space starts
78 * @size: Size of IOVA space
80 * @base and @size should be exact multiples of IOMMU page granularity to
81 * avoid rounding surprises. If necessary, we reserve the page at address 0
82 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
83 * any change which could make prior IOVAs invalid will fail.
85 int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
, u64 size
)
87 struct iova_domain
*iovad
= domain
->iova_cookie
;
88 unsigned long order
, base_pfn
, end_pfn
;
93 /* Use the smallest supported page size for IOVA granularity */
94 order
= __ffs(domain
->ops
->pgsize_bitmap
);
95 base_pfn
= max_t(unsigned long, 1, base
>> order
);
96 end_pfn
= (base
+ size
- 1) >> order
;
98 /* Check the domain allows at least some access to the device... */
99 if (domain
->geometry
.force_aperture
) {
100 if (base
> domain
->geometry
.aperture_end
||
101 base
+ size
<= domain
->geometry
.aperture_start
) {
102 pr_warn("specified DMA range outside IOMMU capability\n");
105 /* ...then finally give it a kicking to make sure it fits */
106 base_pfn
= max_t(unsigned long, base_pfn
,
107 domain
->geometry
.aperture_start
>> order
);
108 end_pfn
= min_t(unsigned long, end_pfn
,
109 domain
->geometry
.aperture_end
>> order
);
112 /* All we can safely do with an existing domain is enlarge it */
113 if (iovad
->start_pfn
) {
114 if (1UL << order
!= iovad
->granule
||
115 base_pfn
!= iovad
->start_pfn
||
116 end_pfn
< iovad
->dma_32bit_pfn
) {
117 pr_warn("Incompatible range for DMA domain\n");
120 iovad
->dma_32bit_pfn
= end_pfn
;
122 init_iova_domain(iovad
, 1UL << order
, base_pfn
, end_pfn
);
126 EXPORT_SYMBOL(iommu_dma_init_domain
);
129 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
130 * @dir: Direction of DMA transfer
131 * @coherent: Is the DMA master cache-coherent?
133 * Return: corresponding IOMMU API page protection flags
135 int dma_direction_to_prot(enum dma_data_direction dir
, bool coherent
)
137 int prot
= coherent
? IOMMU_CACHE
: 0;
140 case DMA_BIDIRECTIONAL
:
141 return prot
| IOMMU_READ
| IOMMU_WRITE
;
143 return prot
| IOMMU_READ
;
144 case DMA_FROM_DEVICE
:
145 return prot
| IOMMU_WRITE
;
151 static struct iova
*__alloc_iova(struct iova_domain
*iovad
, size_t size
,
152 dma_addr_t dma_limit
)
154 unsigned long shift
= iova_shift(iovad
);
155 unsigned long length
= iova_align(iovad
, size
) >> shift
;
158 * Enforce size-alignment to be safe - there could perhaps be an
159 * attribute to control this per-device, or at least per-domain...
161 return alloc_iova(iovad
, length
, dma_limit
>> shift
, true);
164 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
165 static void __iommu_dma_unmap(struct iommu_domain
*domain
, dma_addr_t dma_addr
)
167 struct iova_domain
*iovad
= domain
->iova_cookie
;
168 unsigned long shift
= iova_shift(iovad
);
169 unsigned long pfn
= dma_addr
>> shift
;
170 struct iova
*iova
= find_iova(iovad
, pfn
);
176 size
= iova_size(iova
) << shift
;
177 size
-= iommu_unmap(domain
, pfn
<< shift
, size
);
178 /* ...and if we can't, then something is horribly, horribly wrong */
180 __free_iova(iovad
, iova
);
183 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
186 __free_page(pages
[count
]);
190 static struct page
**__iommu_dma_alloc_pages(unsigned int count
, gfp_t gfp
)
193 unsigned int i
= 0, array_size
= count
* sizeof(*pages
);
195 if (array_size
<= PAGE_SIZE
)
196 pages
= kzalloc(array_size
, GFP_KERNEL
);
198 pages
= vzalloc(array_size
);
202 /* IOMMU can map any pages, so himem can also be used here */
203 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
206 struct page
*page
= NULL
;
207 int j
, order
= __fls(count
);
210 * Higher-order allocations are a convenience rather
211 * than a necessity, hence using __GFP_NORETRY until
212 * falling back to single-page allocations.
214 for (order
= min(order
, MAX_ORDER
); order
> 0; order
--) {
215 page
= alloc_pages(gfp
| __GFP_NORETRY
, order
);
218 if (PageCompound(page
)) {
219 if (!split_huge_page(page
))
221 __free_pages(page
, order
);
223 split_page(page
, order
);
228 page
= alloc_page(gfp
);
230 __iommu_dma_free_pages(pages
, i
);
242 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
243 * @dev: Device which owns this buffer
244 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
245 * @size: Size of buffer in bytes
246 * @handle: DMA address of buffer
248 * Frees both the pages associated with the buffer, and the array
251 void iommu_dma_free(struct device
*dev
, struct page
**pages
, size_t size
,
254 __iommu_dma_unmap(iommu_get_domain_for_dev(dev
), *handle
);
255 __iommu_dma_free_pages(pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
256 *handle
= DMA_ERROR_CODE
;
260 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
261 * @dev: Device to allocate memory for. Must be a real device
262 * attached to an iommu_dma_domain
263 * @size: Size of buffer in bytes
264 * @gfp: Allocation flags
265 * @prot: IOMMU mapping flags
266 * @handle: Out argument for allocated DMA handle
267 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
268 * given VA/PA are visible to the given non-coherent device.
270 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
271 * but an IOMMU which supports smaller pages might not map the whole thing.
273 * Return: Array of struct page pointers describing the buffer,
274 * or NULL on failure.
276 struct page
**iommu_dma_alloc(struct device
*dev
, size_t size
,
277 gfp_t gfp
, int prot
, dma_addr_t
*handle
,
278 void (*flush_page
)(struct device
*, const void *, phys_addr_t
))
280 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
281 struct iova_domain
*iovad
= domain
->iova_cookie
;
286 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
288 *handle
= DMA_ERROR_CODE
;
290 pages
= __iommu_dma_alloc_pages(count
, gfp
);
294 iova
= __alloc_iova(iovad
, size
, dev
->coherent_dma_mask
);
298 size
= iova_align(iovad
, size
);
299 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
302 if (!(prot
& IOMMU_CACHE
)) {
303 struct sg_mapping_iter miter
;
305 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
306 * sufficient here, so skip it by using the "wrong" direction.
308 sg_miter_start(&miter
, sgt
.sgl
, sgt
.orig_nents
, SG_MITER_FROM_SG
);
309 while (sg_miter_next(&miter
))
310 flush_page(dev
, miter
.addr
, page_to_phys(miter
.page
));
311 sg_miter_stop(&miter
);
314 dma_addr
= iova_dma_addr(iovad
, iova
);
315 if (iommu_map_sg(domain
, dma_addr
, sgt
.sgl
, sgt
.orig_nents
, prot
)
326 __free_iova(iovad
, iova
);
328 __iommu_dma_free_pages(pages
, count
);
333 * iommu_dma_mmap - Map a buffer into provided user VMA
334 * @pages: Array representing buffer from iommu_dma_alloc()
335 * @size: Size of buffer in bytes
336 * @vma: VMA describing requested userspace mapping
338 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
339 * for verifying the correct size and protection of @vma beforehand.
342 int iommu_dma_mmap(struct page
**pages
, size_t size
, struct vm_area_struct
*vma
)
344 unsigned long uaddr
= vma
->vm_start
;
345 unsigned int i
, count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
348 for (i
= vma
->vm_pgoff
; i
< count
&& uaddr
< vma
->vm_end
; i
++) {
349 ret
= vm_insert_page(vma
, uaddr
, pages
[i
]);
357 dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
358 unsigned long offset
, size_t size
, int prot
)
361 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
362 struct iova_domain
*iovad
= domain
->iova_cookie
;
363 phys_addr_t phys
= page_to_phys(page
) + offset
;
364 size_t iova_off
= iova_offset(iovad
, phys
);
365 size_t len
= iova_align(iovad
, size
+ iova_off
);
366 struct iova
*iova
= __alloc_iova(iovad
, len
, dma_get_mask(dev
));
369 return DMA_ERROR_CODE
;
371 dma_addr
= iova_dma_addr(iovad
, iova
);
372 if (iommu_map(domain
, dma_addr
, phys
- iova_off
, len
, prot
)) {
373 __free_iova(iovad
, iova
);
374 return DMA_ERROR_CODE
;
376 return dma_addr
+ iova_off
;
379 void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t handle
, size_t size
,
380 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
382 __iommu_dma_unmap(iommu_get_domain_for_dev(dev
), handle
);
386 * Prepare a successfully-mapped scatterlist to give back to the caller.
387 * Handling IOVA concatenation can come later, if needed
389 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
392 struct scatterlist
*s
;
395 for_each_sg(sg
, s
, nents
, i
) {
396 /* Un-swizzling the fields here, hence the naming mismatch */
397 unsigned int s_offset
= sg_dma_address(s
);
398 unsigned int s_length
= sg_dma_len(s
);
399 unsigned int s_dma_len
= s
->length
;
401 s
->offset
= s_offset
;
402 s
->length
= s_length
;
403 sg_dma_address(s
) = dma_addr
+ s_offset
;
404 dma_addr
+= s_dma_len
;
410 * If mapping failed, then just restore the original list,
411 * but making sure the DMA fields are invalidated.
413 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
415 struct scatterlist
*s
;
418 for_each_sg(sg
, s
, nents
, i
) {
419 if (sg_dma_address(s
) != DMA_ERROR_CODE
)
420 s
->offset
= sg_dma_address(s
);
422 s
->length
= sg_dma_len(s
);
423 sg_dma_address(s
) = DMA_ERROR_CODE
;
429 * The DMA API client is passing in a scatterlist which could describe
430 * any old buffer layout, but the IOMMU API requires everything to be
431 * aligned to IOMMU pages. Hence the need for this complicated bit of
432 * impedance-matching, to be able to hand off a suitably-aligned list,
433 * but still preserve the original offsets and sizes for the caller.
435 int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
438 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
439 struct iova_domain
*iovad
= domain
->iova_cookie
;
441 struct scatterlist
*s
, *prev
= NULL
;
447 * Work out how much IOVA space we need, and align the segments to
448 * IOVA granules for the IOMMU driver to handle. With some clever
449 * trickery we can modify the list in-place, but reversibly, by
450 * hiding the original data in the as-yet-unused DMA fields.
452 for_each_sg(sg
, s
, nents
, i
) {
453 size_t s_offset
= iova_offset(iovad
, s
->offset
);
454 size_t s_length
= s
->length
;
456 sg_dma_address(s
) = s
->offset
;
457 sg_dma_len(s
) = s_length
;
458 s
->offset
-= s_offset
;
459 s_length
= iova_align(iovad
, s_length
+ s_offset
);
460 s
->length
= s_length
;
463 * The simple way to avoid the rare case of a segment
464 * crossing the boundary mask is to pad the previous one
465 * to end at a naturally-aligned IOVA for this one's size,
466 * at the cost of potentially over-allocating a little.
469 size_t pad_len
= roundup_pow_of_two(s_length
);
471 pad_len
= (pad_len
- iova_len
) & (pad_len
- 1);
472 prev
->length
+= pad_len
;
476 iova_len
+= s_length
;
480 iova
= __alloc_iova(iovad
, iova_len
, dma_get_mask(dev
));
485 * We'll leave any physical concatenation to the IOMMU driver's
486 * implementation - it knows better than we do.
488 dma_addr
= iova_dma_addr(iovad
, iova
);
489 if (iommu_map_sg(domain
, dma_addr
, sg
, nents
, prot
) < iova_len
)
492 return __finalise_sg(dev
, sg
, nents
, dma_addr
);
495 __free_iova(iovad
, iova
);
497 __invalidate_sg(sg
, nents
);
501 void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
502 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
505 * The scatterlist segments are mapped into a single
506 * contiguous IOVA allocation, so this is incredibly easy.
508 __iommu_dma_unmap(iommu_get_domain_for_dev(dev
), sg_dma_address(sg
));
511 int iommu_dma_supported(struct device
*dev
, u64 mask
)
514 * 'Special' IOMMUs which don't have the same addressing capability
515 * as the CPU will have to wait until we have some way to query that
516 * before they'll be able to use this framework.
521 int iommu_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
523 return dma_addr
== DMA_ERROR_CODE
;