dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / iommu / dma-iommu.c
blob347a3c17f73a6ff9ed5acde668c9fb5df1b3a5cb
1 /*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
4 * Copyright (C) 2014-2015 ARM Ltd.
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/gfp.h>
25 #include <linux/huge_mm.h>
26 #include <linux/iommu.h>
27 #include <linux/iova.h>
28 #include <linux/mm.h>
29 #include <linux/scatterlist.h>
30 #include <linux/vmalloc.h>
32 int iommu_dma_init(void)
34 return iova_cache_get();
37 /**
38 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
39 * @domain: IOMMU domain to prepare for DMA-API usage
41 * IOMMU drivers should normally call this from their domain_alloc
42 * callback when domain->type == IOMMU_DOMAIN_DMA.
44 int iommu_get_dma_cookie(struct iommu_domain *domain)
46 struct iova_domain *iovad;
48 if (domain->iova_cookie)
49 return -EEXIST;
51 iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
52 domain->iova_cookie = iovad;
54 return iovad ? 0 : -ENOMEM;
56 EXPORT_SYMBOL(iommu_get_dma_cookie);
58 /**
59 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
60 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
62 * IOMMU drivers should normally call this from their domain_free callback.
64 void iommu_put_dma_cookie(struct iommu_domain *domain)
66 struct iova_domain *iovad = domain->iova_cookie;
68 if (!iovad)
69 return;
71 if (iovad->granule)
72 put_iova_domain(iovad);
73 kfree(iovad);
74 domain->iova_cookie = NULL;
76 EXPORT_SYMBOL(iommu_put_dma_cookie);
78 /**
79 * iommu_dma_init_domain - Initialise a DMA mapping domain
80 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
81 * @base: IOVA at which the mappable address space starts
82 * @size: Size of IOVA space
84 * @base and @size should be exact multiples of IOMMU page granularity to
85 * avoid rounding surprises. If necessary, we reserve the page at address 0
86 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
87 * any change which could make prior IOVAs invalid will fail.
89 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
91 struct iova_domain *iovad = domain->iova_cookie;
92 unsigned long order, base_pfn, end_pfn;
94 if (!iovad)
95 return -ENODEV;
97 /* Use the smallest supported page size for IOVA granularity */
98 order = __ffs(domain->ops->pgsize_bitmap);
99 base_pfn = max_t(unsigned long, 1, base >> order);
100 end_pfn = (base + size - 1) >> order;
102 /* Check the domain allows at least some access to the device... */
103 if (domain->geometry.force_aperture) {
104 if (base > domain->geometry.aperture_end ||
105 base + size <= domain->geometry.aperture_start) {
106 pr_warn("specified DMA range outside IOMMU capability\n");
107 return -EFAULT;
109 /* ...then finally give it a kicking to make sure it fits */
110 base_pfn = max_t(unsigned long, base_pfn,
111 domain->geometry.aperture_start >> order);
112 end_pfn = min_t(unsigned long, end_pfn,
113 domain->geometry.aperture_end >> order);
116 /* All we can safely do with an existing domain is enlarge it */
117 if (iovad->start_pfn) {
118 if (1UL << order != iovad->granule ||
119 base_pfn != iovad->start_pfn ||
120 end_pfn < iovad->dma_32bit_pfn) {
121 pr_warn("Incompatible range for DMA domain\n");
122 return -EFAULT;
124 iovad->dma_32bit_pfn = end_pfn;
125 } else {
126 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
128 return 0;
130 EXPORT_SYMBOL(iommu_dma_init_domain);
133 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
134 * @dir: Direction of DMA transfer
135 * @coherent: Is the DMA master cache-coherent?
137 * Return: corresponding IOMMU API page protection flags
139 int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
141 int prot = coherent ? IOMMU_CACHE : 0;
143 switch (dir) {
144 case DMA_BIDIRECTIONAL:
145 return prot | IOMMU_READ | IOMMU_WRITE;
146 case DMA_TO_DEVICE:
147 return prot | IOMMU_READ;
148 case DMA_FROM_DEVICE:
149 return prot | IOMMU_WRITE;
150 default:
151 return 0;
155 static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
156 dma_addr_t dma_limit)
158 unsigned long shift = iova_shift(iovad);
159 unsigned long length = iova_align(iovad, size) >> shift;
162 * Enforce size-alignment to be safe - there could perhaps be an
163 * attribute to control this per-device, or at least per-domain...
165 return alloc_iova(iovad, length, dma_limit >> shift, true);
168 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
169 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
171 struct iova_domain *iovad = domain->iova_cookie;
172 unsigned long shift = iova_shift(iovad);
173 unsigned long pfn = dma_addr >> shift;
174 struct iova *iova = find_iova(iovad, pfn);
175 size_t size;
177 if (WARN_ON(!iova))
178 return;
180 size = iova_size(iova) << shift;
181 size -= iommu_unmap(domain, pfn << shift, size);
182 /* ...and if we can't, then something is horribly, horribly wrong */
183 WARN_ON(size > 0);
184 __free_iova(iovad, iova);
187 static void __iommu_dma_free_pages(struct page **pages, int count)
189 while (count--)
190 __free_page(pages[count]);
191 kvfree(pages);
194 static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
196 struct page **pages;
197 unsigned int i = 0, array_size = count * sizeof(*pages);
198 unsigned int order = MAX_ORDER;
200 if (array_size <= PAGE_SIZE)
201 pages = kzalloc(array_size, GFP_KERNEL);
202 else
203 pages = vzalloc(array_size);
204 if (!pages)
205 return NULL;
207 /* IOMMU can map any pages, so himem can also be used here */
208 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
210 while (count) {
211 struct page *page = NULL;
212 int j;
215 * Higher-order allocations are a convenience rather
216 * than a necessity, hence using __GFP_NORETRY until
217 * falling back to single-page allocations.
219 for (order = min_t(unsigned int, order, __fls(count));
220 order > 0; order--) {
221 page = alloc_pages(gfp | __GFP_NORETRY, order);
222 if (!page)
223 continue;
224 if (PageCompound(page)) {
225 if (!split_huge_page(page))
226 break;
227 __free_pages(page, order);
228 } else {
229 split_page(page, order);
230 break;
233 if (!page)
234 page = alloc_page(gfp);
235 if (!page) {
236 __iommu_dma_free_pages(pages, i);
237 return NULL;
239 j = 1 << order;
240 count -= j;
241 while (j--)
242 pages[i++] = page++;
244 return pages;
248 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
249 * @dev: Device which owns this buffer
250 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
251 * @size: Size of buffer in bytes
252 * @handle: DMA address of buffer
254 * Frees both the pages associated with the buffer, and the array
255 * describing them
257 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
258 dma_addr_t *handle)
260 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
261 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
262 *handle = DMA_ERROR_CODE;
266 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
267 * @dev: Device to allocate memory for. Must be a real device
268 * attached to an iommu_dma_domain
269 * @size: Size of buffer in bytes
270 * @gfp: Allocation flags
271 * @prot: IOMMU mapping flags
272 * @handle: Out argument for allocated DMA handle
273 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
274 * given VA/PA are visible to the given non-coherent device.
276 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
277 * but an IOMMU which supports smaller pages might not map the whole thing.
279 * Return: Array of struct page pointers describing the buffer,
280 * or NULL on failure.
282 struct page **iommu_dma_alloc(struct device *dev, size_t size,
283 gfp_t gfp, int prot, dma_addr_t *handle,
284 void (*flush_page)(struct device *, const void *, phys_addr_t))
286 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
287 struct iova_domain *iovad = domain->iova_cookie;
288 struct iova *iova;
289 struct page **pages;
290 struct sg_table sgt;
291 dma_addr_t dma_addr;
292 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
294 *handle = DMA_ERROR_CODE;
296 pages = __iommu_dma_alloc_pages(count, gfp);
297 if (!pages)
298 return NULL;
300 iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
301 if (!iova)
302 goto out_free_pages;
304 size = iova_align(iovad, size);
305 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
306 goto out_free_iova;
308 if (!(prot & IOMMU_CACHE)) {
309 struct sg_mapping_iter miter;
311 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
312 * sufficient here, so skip it by using the "wrong" direction.
314 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
315 while (sg_miter_next(&miter))
316 flush_page(dev, miter.addr, page_to_phys(miter.page));
317 sg_miter_stop(&miter);
320 dma_addr = iova_dma_addr(iovad, iova);
321 if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
322 < size)
323 goto out_free_sg;
325 *handle = dma_addr;
326 sg_free_table(&sgt);
327 return pages;
329 out_free_sg:
330 sg_free_table(&sgt);
331 out_free_iova:
332 __free_iova(iovad, iova);
333 out_free_pages:
334 __iommu_dma_free_pages(pages, count);
335 return NULL;
339 * iommu_dma_mmap - Map a buffer into provided user VMA
340 * @pages: Array representing buffer from iommu_dma_alloc()
341 * @size: Size of buffer in bytes
342 * @vma: VMA describing requested userspace mapping
344 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
345 * for verifying the correct size and protection of @vma beforehand.
348 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
350 unsigned long uaddr = vma->vm_start;
351 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
352 int ret = -ENXIO;
354 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
355 ret = vm_insert_page(vma, uaddr, pages[i]);
356 if (ret)
357 break;
358 uaddr += PAGE_SIZE;
360 return ret;
363 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
364 unsigned long offset, size_t size, int prot)
366 dma_addr_t dma_addr;
367 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
368 struct iova_domain *iovad = domain->iova_cookie;
369 phys_addr_t phys = page_to_phys(page) + offset;
370 size_t iova_off = iova_offset(iovad, phys);
371 size_t len = iova_align(iovad, size + iova_off);
372 struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
374 if (!iova)
375 return DMA_ERROR_CODE;
377 dma_addr = iova_dma_addr(iovad, iova);
378 if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
379 __free_iova(iovad, iova);
380 return DMA_ERROR_CODE;
382 return dma_addr + iova_off;
385 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
386 enum dma_data_direction dir, struct dma_attrs *attrs)
388 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
392 * Prepare a successfully-mapped scatterlist to give back to the caller.
393 * Handling IOVA concatenation can come later, if needed
395 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
396 dma_addr_t dma_addr)
398 struct scatterlist *s;
399 int i;
401 for_each_sg(sg, s, nents, i) {
402 /* Un-swizzling the fields here, hence the naming mismatch */
403 unsigned int s_offset = sg_dma_address(s);
404 unsigned int s_length = sg_dma_len(s);
405 unsigned int s_dma_len = s->length;
407 s->offset += s_offset;
408 s->length = s_length;
409 sg_dma_address(s) = dma_addr + s_offset;
410 dma_addr += s_dma_len;
412 return i;
416 * If mapping failed, then just restore the original list,
417 * but making sure the DMA fields are invalidated.
419 static void __invalidate_sg(struct scatterlist *sg, int nents)
421 struct scatterlist *s;
422 int i;
424 for_each_sg(sg, s, nents, i) {
425 if (sg_dma_address(s) != DMA_ERROR_CODE)
426 s->offset += sg_dma_address(s);
427 if (sg_dma_len(s))
428 s->length = sg_dma_len(s);
429 sg_dma_address(s) = DMA_ERROR_CODE;
430 sg_dma_len(s) = 0;
435 * The DMA API client is passing in a scatterlist which could describe
436 * any old buffer layout, but the IOMMU API requires everything to be
437 * aligned to IOMMU pages. Hence the need for this complicated bit of
438 * impedance-matching, to be able to hand off a suitably-aligned list,
439 * but still preserve the original offsets and sizes for the caller.
441 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
442 int nents, int prot)
444 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
445 struct iova_domain *iovad = domain->iova_cookie;
446 struct iova *iova;
447 struct scatterlist *s, *prev = NULL;
448 dma_addr_t dma_addr;
449 size_t iova_len = 0;
450 int i;
453 * Work out how much IOVA space we need, and align the segments to
454 * IOVA granules for the IOMMU driver to handle. With some clever
455 * trickery we can modify the list in-place, but reversibly, by
456 * hiding the original data in the as-yet-unused DMA fields.
458 for_each_sg(sg, s, nents, i) {
459 size_t s_offset = iova_offset(iovad, s->offset);
460 size_t s_length = s->length;
462 sg_dma_address(s) = s_offset;
463 sg_dma_len(s) = s_length;
464 s->offset -= s_offset;
465 s_length = iova_align(iovad, s_length + s_offset);
466 s->length = s_length;
469 * The simple way to avoid the rare case of a segment
470 * crossing the boundary mask is to pad the previous one
471 * to end at a naturally-aligned IOVA for this one's size,
472 * at the cost of potentially over-allocating a little.
474 if (prev) {
475 size_t pad_len = roundup_pow_of_two(s_length);
477 pad_len = (pad_len - iova_len) & (pad_len - 1);
478 prev->length += pad_len;
479 iova_len += pad_len;
482 iova_len += s_length;
483 prev = s;
486 iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
487 if (!iova)
488 goto out_restore_sg;
491 * We'll leave any physical concatenation to the IOMMU driver's
492 * implementation - it knows better than we do.
494 dma_addr = iova_dma_addr(iovad, iova);
495 if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
496 goto out_free_iova;
498 return __finalise_sg(dev, sg, nents, dma_addr);
500 out_free_iova:
501 __free_iova(iovad, iova);
502 out_restore_sg:
503 __invalidate_sg(sg, nents);
504 return 0;
507 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
508 enum dma_data_direction dir, struct dma_attrs *attrs)
511 * The scatterlist segments are mapped into a single
512 * contiguous IOVA allocation, so this is incredibly easy.
514 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
517 int iommu_dma_supported(struct device *dev, u64 mask)
520 * 'Special' IOMMUs which don't have the same addressing capability
521 * as the CPU will have to wait until we have some way to query that
522 * before they'll be able to use this framework.
524 return 1;
527 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
529 return dma_addr == DMA_ERROR_CODE;