2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-contiguous.h>
26 #include <linux/vmalloc.h>
27 #include <linux/swiotlb.h>
29 #include <asm/cacheflush.h>
31 struct dma_map_ops
*dma_ops
;
32 EXPORT_SYMBOL(dma_ops
);
34 static pgprot_t
__get_dma_pgprot(struct dma_attrs
*attrs
, pgprot_t prot
,
37 if (!coherent
|| dma_get_attr(DMA_ATTR_WRITE_COMBINE
, attrs
))
38 return pgprot_writecombine(prot
);
42 static struct gen_pool
*atomic_pool
;
44 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45 static size_t atomic_pool_size
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
47 static int __init
early_coherent_pool(char *p
)
49 atomic_pool_size
= memparse(p
, &p
);
52 early_param("coherent_pool", early_coherent_pool
);
54 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
, gfp_t flags
)
60 WARN(1, "coherent pool not initialised!\n");
64 val
= gen_pool_alloc(atomic_pool
, size
);
66 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
68 *ret_page
= phys_to_page(phys
);
76 static bool __in_atomic_pool(void *start
, size_t size
)
78 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
81 static int __free_from_pool(void *start
, size_t size
)
83 if (!__in_atomic_pool(start
, size
))
86 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
91 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
92 dma_addr_t
*dma_handle
, gfp_t flags
,
93 struct dma_attrs
*attrs
)
96 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
100 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
101 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
103 if (IS_ENABLED(CONFIG_DMA_CMA
) && (flags
& __GFP_WAIT
)) {
107 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
112 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
113 addr
= page_address(page
);
114 memset(addr
, 0, size
);
117 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
121 static void __dma_free_coherent(struct device
*dev
, size_t size
,
122 void *vaddr
, dma_addr_t dma_handle
,
123 struct dma_attrs
*attrs
)
126 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
129 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
133 freed
= dma_release_from_contiguous(dev
,
137 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
140 static void *__dma_alloc(struct device
*dev
, size_t size
,
141 dma_addr_t
*dma_handle
, gfp_t flags
,
142 struct dma_attrs
*attrs
)
145 void *ptr
, *coherent_ptr
;
146 bool coherent
= is_device_dma_coherent(dev
);
148 size
= PAGE_ALIGN(size
);
150 if (!coherent
&& !(flags
& __GFP_WAIT
)) {
151 struct page
*page
= NULL
;
152 void *addr
= __alloc_from_pool(size
, &page
, flags
);
155 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
160 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
164 /* no need for non-cacheable mapping if coherent */
168 /* remove any dirty cache lines on the kernel alias */
169 __dma_flush_range(ptr
, ptr
+ size
);
171 /* create a coherent mapping */
172 page
= virt_to_page(ptr
);
173 coherent_ptr
= dma_common_contiguous_remap(page
, size
, VM_USERMAP
,
174 __get_dma_pgprot(attrs
,
175 __pgprot(PROT_NORMAL_NC
), false),
183 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
185 *dma_handle
= DMA_ERROR_CODE
;
189 static void __dma_free(struct device
*dev
, size_t size
,
190 void *vaddr
, dma_addr_t dma_handle
,
191 struct dma_attrs
*attrs
)
193 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
195 size
= PAGE_ALIGN(size
);
197 if (!is_device_dma_coherent(dev
)) {
198 if (__free_from_pool(vaddr
, size
))
202 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
205 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
206 unsigned long offset
, size_t size
,
207 enum dma_data_direction dir
,
208 struct dma_attrs
*attrs
)
212 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
213 if (!is_device_dma_coherent(dev
))
214 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
220 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
221 size_t size
, enum dma_data_direction dir
,
222 struct dma_attrs
*attrs
)
224 if (!is_device_dma_coherent(dev
))
225 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
226 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
229 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
230 int nelems
, enum dma_data_direction dir
,
231 struct dma_attrs
*attrs
)
233 struct scatterlist
*sg
;
236 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
237 if (!is_device_dma_coherent(dev
))
238 for_each_sg(sgl
, sg
, ret
, i
)
239 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
245 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
246 struct scatterlist
*sgl
, int nelems
,
247 enum dma_data_direction dir
,
248 struct dma_attrs
*attrs
)
250 struct scatterlist
*sg
;
253 if (!is_device_dma_coherent(dev
))
254 for_each_sg(sgl
, sg
, nelems
, i
)
255 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
257 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
260 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
261 dma_addr_t dev_addr
, size_t size
,
262 enum dma_data_direction dir
)
264 if (!is_device_dma_coherent(dev
))
265 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
266 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
269 static void __swiotlb_sync_single_for_device(struct device
*dev
,
270 dma_addr_t dev_addr
, size_t size
,
271 enum dma_data_direction dir
)
273 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
274 if (!is_device_dma_coherent(dev
))
275 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
278 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
279 struct scatterlist
*sgl
, int nelems
,
280 enum dma_data_direction dir
)
282 struct scatterlist
*sg
;
285 if (!is_device_dma_coherent(dev
))
286 for_each_sg(sgl
, sg
, nelems
, i
)
287 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
289 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
292 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
293 struct scatterlist
*sgl
, int nelems
,
294 enum dma_data_direction dir
)
296 struct scatterlist
*sg
;
299 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
300 if (!is_device_dma_coherent(dev
))
301 for_each_sg(sgl
, sg
, nelems
, i
)
302 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
306 /* vma->vm_page_prot must be set appropriately before calling this function */
307 static int __dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
308 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
311 unsigned long nr_vma_pages
= (vma
->vm_end
- vma
->vm_start
) >>
313 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
314 unsigned long pfn
= dma_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
;
315 unsigned long off
= vma
->vm_pgoff
;
317 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
320 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
321 ret
= remap_pfn_range(vma
, vma
->vm_start
,
323 vma
->vm_end
- vma
->vm_start
,
330 static int __swiotlb_mmap(struct device
*dev
,
331 struct vm_area_struct
*vma
,
332 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
333 struct dma_attrs
*attrs
)
335 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
,
336 is_device_dma_coherent(dev
));
337 return __dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
340 static struct dma_map_ops swiotlb_dma_ops
= {
341 .alloc
= __dma_alloc
,
343 .mmap
= __swiotlb_mmap
,
344 .map_page
= __swiotlb_map_page
,
345 .unmap_page
= __swiotlb_unmap_page
,
346 .map_sg
= __swiotlb_map_sg_attrs
,
347 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
348 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
349 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
350 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
351 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
352 .dma_supported
= swiotlb_dma_supported
,
353 .mapping_error
= swiotlb_dma_mapping_error
,
356 static int __init
atomic_pool_init(void)
358 pgprot_t prot
= __pgprot(PROT_NORMAL_NC
);
359 unsigned long nr_pages
= atomic_pool_size
>> PAGE_SHIFT
;
362 unsigned int pool_size_order
= get_order(atomic_pool_size
);
364 if (dev_get_cma_area(NULL
))
365 page
= dma_alloc_from_contiguous(NULL
, nr_pages
,
368 page
= alloc_pages(GFP_DMA
, pool_size_order
);
372 void *page_addr
= page_address(page
);
374 memset(page_addr
, 0, atomic_pool_size
);
375 __dma_flush_range(page_addr
, page_addr
+ atomic_pool_size
);
377 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
381 addr
= dma_common_contiguous_remap(page
, atomic_pool_size
,
382 VM_USERMAP
, prot
, atomic_pool_init
);
385 goto destroy_genpool
;
387 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)addr
,
389 atomic_pool_size
, -1);
393 gen_pool_set_algo(atomic_pool
,
394 gen_pool_first_fit_order_align
,
397 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
398 atomic_pool_size
/ 1024);
404 dma_common_free_remap(addr
, atomic_pool_size
, VM_USERMAP
);
406 gen_pool_destroy(atomic_pool
);
409 if (!dma_release_from_contiguous(NULL
, page
, nr_pages
))
410 __free_pages(page
, pool_size_order
);
412 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
413 atomic_pool_size
/ 1024);
417 /********************************************
418 * The following APIs are for dummy DMA ops *
419 ********************************************/
421 static void *__dummy_alloc(struct device
*dev
, size_t size
,
422 dma_addr_t
*dma_handle
, gfp_t flags
,
423 struct dma_attrs
*attrs
)
428 static void __dummy_free(struct device
*dev
, size_t size
,
429 void *vaddr
, dma_addr_t dma_handle
,
430 struct dma_attrs
*attrs
)
434 static int __dummy_mmap(struct device
*dev
,
435 struct vm_area_struct
*vma
,
436 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
437 struct dma_attrs
*attrs
)
442 static dma_addr_t
__dummy_map_page(struct device
*dev
, struct page
*page
,
443 unsigned long offset
, size_t size
,
444 enum dma_data_direction dir
,
445 struct dma_attrs
*attrs
)
447 return DMA_ERROR_CODE
;
450 static void __dummy_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
451 size_t size
, enum dma_data_direction dir
,
452 struct dma_attrs
*attrs
)
456 static int __dummy_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
457 int nelems
, enum dma_data_direction dir
,
458 struct dma_attrs
*attrs
)
463 static void __dummy_unmap_sg(struct device
*dev
,
464 struct scatterlist
*sgl
, int nelems
,
465 enum dma_data_direction dir
,
466 struct dma_attrs
*attrs
)
470 static void __dummy_sync_single(struct device
*dev
,
471 dma_addr_t dev_addr
, size_t size
,
472 enum dma_data_direction dir
)
476 static void __dummy_sync_sg(struct device
*dev
,
477 struct scatterlist
*sgl
, int nelems
,
478 enum dma_data_direction dir
)
482 static int __dummy_mapping_error(struct device
*hwdev
, dma_addr_t dma_addr
)
487 static int __dummy_dma_supported(struct device
*hwdev
, u64 mask
)
492 struct dma_map_ops dummy_dma_ops
= {
493 .alloc
= __dummy_alloc
,
494 .free
= __dummy_free
,
495 .mmap
= __dummy_mmap
,
496 .map_page
= __dummy_map_page
,
497 .unmap_page
= __dummy_unmap_page
,
498 .map_sg
= __dummy_map_sg
,
499 .unmap_sg
= __dummy_unmap_sg
,
500 .sync_single_for_cpu
= __dummy_sync_single
,
501 .sync_single_for_device
= __dummy_sync_single
,
502 .sync_sg_for_cpu
= __dummy_sync_sg
,
503 .sync_sg_for_device
= __dummy_sync_sg
,
504 .mapping_error
= __dummy_mapping_error
,
505 .dma_supported
= __dummy_dma_supported
,
507 EXPORT_SYMBOL(dummy_dma_ops
);
509 static int __init
arm64_dma_init(void)
513 dma_ops
= &swiotlb_dma_ops
;
515 ret
= atomic_pool_init();
519 arch_initcall(arm64_dma_init
);
521 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
523 static int __init
dma_debug_do_init(void)
525 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
528 fs_initcall(dma_debug_do_init
);