1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
7 * DMA uncached mapping support.
9 #include <linux/module.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dma-noncoherent.h>
19 #include <linux/dma-contiguous.h>
20 #include <linux/highmem.h>
21 #include <linux/memblock.h>
22 #include <linux/slab.h>
23 #include <linux/iommu.h>
25 #include <linux/vmalloc.h>
26 #include <linux/sizes.h>
27 #include <linux/cma.h>
29 #include <asm/memory.h>
30 #include <asm/highmem.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mach/arch.h>
34 #include <asm/dma-iommu.h>
35 #include <asm/mach/map.h>
36 #include <asm/system_info.h>
37 #include <asm/dma-contiguous.h>
42 struct arm_dma_alloc_args
{
52 struct arm_dma_free_args
{
63 struct arm_dma_allocator
{
64 void *(*alloc
)(struct arm_dma_alloc_args
*args
,
65 struct page
**ret_page
);
66 void (*free
)(struct arm_dma_free_args
*args
);
69 struct arm_dma_buffer
{
70 struct list_head list
;
72 struct arm_dma_allocator
*allocator
;
75 static LIST_HEAD(arm_dma_bufs
);
76 static DEFINE_SPINLOCK(arm_dma_bufs_lock
);
78 static struct arm_dma_buffer
*arm_dma_buffer_find(void *virt
)
80 struct arm_dma_buffer
*buf
, *found
= NULL
;
83 spin_lock_irqsave(&arm_dma_bufs_lock
, flags
);
84 list_for_each_entry(buf
, &arm_dma_bufs
, list
) {
85 if (buf
->virt
== virt
) {
91 spin_unlock_irqrestore(&arm_dma_bufs_lock
, flags
);
96 * The DMA API is built upon the notion of "buffer ownership". A buffer
97 * is either exclusively owned by the CPU (and therefore may be accessed
98 * by it) or exclusively owned by the DMA device. These helper functions
99 * represent the transitions between these two ownership states.
101 * Note, however, that on later ARMs, this notion does not work due to
102 * speculative prefetches. We model our approach on the assumption that
103 * the CPU does do speculative prefetches, which means we clean caches
104 * before transfers and delay cache invalidation until transfer completion.
107 static void __dma_page_cpu_to_dev(struct page
*, unsigned long,
108 size_t, enum dma_data_direction
);
109 static void __dma_page_dev_to_cpu(struct page
*, unsigned long,
110 size_t, enum dma_data_direction
);
113 * arm_dma_map_page - map a portion of a page for streaming DMA
114 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
115 * @page: page that buffer resides in
116 * @offset: offset into page for start of buffer
117 * @size: size of buffer to map
118 * @dir: DMA transfer direction
120 * Ensure that any data held in the cache is appropriately discarded
123 * The device owns this memory once this call has completed. The CPU
124 * can regain ownership by calling dma_unmap_page().
126 static dma_addr_t
arm_dma_map_page(struct device
*dev
, struct page
*page
,
127 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
130 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
131 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
132 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
135 static dma_addr_t
arm_coherent_dma_map_page(struct device
*dev
, struct page
*page
,
136 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
139 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
143 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
144 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
145 * @handle: DMA address of buffer
146 * @size: size of buffer (same as passed to dma_map_page)
147 * @dir: DMA transfer direction (same as passed to dma_map_page)
149 * Unmap a page streaming mode DMA translation. The handle and size
150 * must match what was provided in the previous dma_map_page() call.
151 * All other usages are undefined.
153 * After this call, reads by the CPU to the buffer are guaranteed to see
154 * whatever the device wrote there.
156 static void arm_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
157 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
159 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
160 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev
, handle
)),
161 handle
& ~PAGE_MASK
, size
, dir
);
164 static void arm_dma_sync_single_for_cpu(struct device
*dev
,
165 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
167 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
168 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
169 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
172 static void arm_dma_sync_single_for_device(struct device
*dev
,
173 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
175 unsigned int offset
= handle
& (PAGE_SIZE
- 1);
176 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
-offset
));
177 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
180 const struct dma_map_ops arm_dma_ops
= {
181 .alloc
= arm_dma_alloc
,
182 .free
= arm_dma_free
,
183 .mmap
= arm_dma_mmap
,
184 .get_sgtable
= arm_dma_get_sgtable
,
185 .map_page
= arm_dma_map_page
,
186 .unmap_page
= arm_dma_unmap_page
,
187 .map_sg
= arm_dma_map_sg
,
188 .unmap_sg
= arm_dma_unmap_sg
,
189 .map_resource
= dma_direct_map_resource
,
190 .sync_single_for_cpu
= arm_dma_sync_single_for_cpu
,
191 .sync_single_for_device
= arm_dma_sync_single_for_device
,
192 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
193 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
194 .dma_supported
= arm_dma_supported
,
196 EXPORT_SYMBOL(arm_dma_ops
);
198 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
199 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
);
200 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
201 dma_addr_t handle
, unsigned long attrs
);
202 static int arm_coherent_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
203 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
204 unsigned long attrs
);
206 const struct dma_map_ops arm_coherent_dma_ops
= {
207 .alloc
= arm_coherent_dma_alloc
,
208 .free
= arm_coherent_dma_free
,
209 .mmap
= arm_coherent_dma_mmap
,
210 .get_sgtable
= arm_dma_get_sgtable
,
211 .map_page
= arm_coherent_dma_map_page
,
212 .map_sg
= arm_dma_map_sg
,
213 .map_resource
= dma_direct_map_resource
,
214 .dma_supported
= arm_dma_supported
,
216 EXPORT_SYMBOL(arm_coherent_dma_ops
);
218 static int __dma_supported(struct device
*dev
, u64 mask
, bool warn
)
220 unsigned long max_dma_pfn
= min(max_pfn
, arm_dma_pfn_limit
);
223 * Translate the device's DMA mask to a PFN limit. This
224 * PFN number includes the page which we can DMA to.
226 if (dma_to_pfn(dev
, mask
) < max_dma_pfn
) {
228 dev_warn(dev
, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
230 dma_to_pfn(dev
, 0), dma_to_pfn(dev
, mask
) + 1,
238 static u64
get_coherent_dma_mask(struct device
*dev
)
240 u64 mask
= (u64
)DMA_BIT_MASK(32);
243 mask
= dev
->coherent_dma_mask
;
246 * Sanity check the DMA mask - it must be non-zero, and
247 * must be able to be satisfied by a DMA allocation.
250 dev_warn(dev
, "coherent DMA mask is unset\n");
254 if (!__dma_supported(dev
, mask
, true))
261 static void __dma_clear_buffer(struct page
*page
, size_t size
, int coherent_flag
)
264 * Ensure that the allocated pages are zeroed, and that any data
265 * lurking in the kernel direct-mapped region is invalidated.
267 if (PageHighMem(page
)) {
268 phys_addr_t base
= __pfn_to_phys(page_to_pfn(page
));
269 phys_addr_t end
= base
+ size
;
271 void *ptr
= kmap_atomic(page
);
272 memset(ptr
, 0, PAGE_SIZE
);
273 if (coherent_flag
!= COHERENT
)
274 dmac_flush_range(ptr
, ptr
+ PAGE_SIZE
);
279 if (coherent_flag
!= COHERENT
)
280 outer_flush_range(base
, end
);
282 void *ptr
= page_address(page
);
283 memset(ptr
, 0, size
);
284 if (coherent_flag
!= COHERENT
) {
285 dmac_flush_range(ptr
, ptr
+ size
);
286 outer_flush_range(__pa(ptr
), __pa(ptr
) + size
);
292 * Allocate a DMA buffer for 'dev' of size 'size' using the
293 * specified gfp mask. Note that 'size' must be page aligned.
295 static struct page
*__dma_alloc_buffer(struct device
*dev
, size_t size
,
296 gfp_t gfp
, int coherent_flag
)
298 unsigned long order
= get_order(size
);
299 struct page
*page
, *p
, *e
;
301 page
= alloc_pages(gfp
, order
);
306 * Now split the huge page and free the excess pages
308 split_page(page
, order
);
309 for (p
= page
+ (size
>> PAGE_SHIFT
), e
= page
+ (1 << order
); p
< e
; p
++)
312 __dma_clear_buffer(page
, size
, coherent_flag
);
318 * Free a DMA buffer. 'size' must be page aligned.
320 static void __dma_free_buffer(struct page
*page
, size_t size
)
322 struct page
*e
= page
+ (size
>> PAGE_SHIFT
);
330 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
331 pgprot_t prot
, struct page
**ret_page
,
332 const void *caller
, bool want_vaddr
,
333 int coherent_flag
, gfp_t gfp
);
335 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
336 pgprot_t prot
, struct page
**ret_page
,
337 const void *caller
, bool want_vaddr
);
340 __dma_alloc_remap(struct page
*page
, size_t size
, gfp_t gfp
, pgprot_t prot
,
344 * DMA allocation can be mapped to user space, so lets
345 * set VM_USERMAP flags too.
347 return dma_common_contiguous_remap(page
, size
,
348 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
,
352 static void __dma_free_remap(void *cpu_addr
, size_t size
)
354 dma_common_free_remap(cpu_addr
, size
,
355 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
358 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
359 static struct gen_pool
*atomic_pool __ro_after_init
;
361 static size_t atomic_pool_size __initdata
= DEFAULT_DMA_COHERENT_POOL_SIZE
;
363 static int __init
early_coherent_pool(char *p
)
365 atomic_pool_size
= memparse(p
, &p
);
368 early_param("coherent_pool", early_coherent_pool
);
371 * Initialise the coherent pool for atomic allocations.
373 static int __init
atomic_pool_init(void)
375 pgprot_t prot
= pgprot_dmacoherent(PAGE_KERNEL
);
376 gfp_t gfp
= GFP_KERNEL
| GFP_DMA
;
380 atomic_pool
= gen_pool_create(PAGE_SHIFT
, -1);
384 * The atomic pool is only used for non-coherent allocations
385 * so we must pass NORMAL for coherent_flag.
387 if (dev_get_cma_area(NULL
))
388 ptr
= __alloc_from_contiguous(NULL
, atomic_pool_size
, prot
,
389 &page
, atomic_pool_init
, true, NORMAL
,
392 ptr
= __alloc_remap_buffer(NULL
, atomic_pool_size
, gfp
, prot
,
393 &page
, atomic_pool_init
, true);
397 ret
= gen_pool_add_virt(atomic_pool
, (unsigned long)ptr
,
399 atomic_pool_size
, -1);
401 goto destroy_genpool
;
403 gen_pool_set_algo(atomic_pool
,
404 gen_pool_first_fit_order_align
,
406 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
407 atomic_pool_size
/ 1024);
412 gen_pool_destroy(atomic_pool
);
415 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
416 atomic_pool_size
/ 1024);
420 * CMA is activated by core_initcall, so we must be called after it.
422 postcore_initcall(atomic_pool_init
);
424 struct dma_contig_early_reserve
{
429 static struct dma_contig_early_reserve dma_mmu_remap
[MAX_CMA_AREAS
] __initdata
;
431 static int dma_mmu_remap_num __initdata
;
433 void __init
dma_contiguous_early_fixup(phys_addr_t base
, unsigned long size
)
435 dma_mmu_remap
[dma_mmu_remap_num
].base
= base
;
436 dma_mmu_remap
[dma_mmu_remap_num
].size
= size
;
440 void __init
dma_contiguous_remap(void)
443 for (i
= 0; i
< dma_mmu_remap_num
; i
++) {
444 phys_addr_t start
= dma_mmu_remap
[i
].base
;
445 phys_addr_t end
= start
+ dma_mmu_remap
[i
].size
;
449 if (end
> arm_lowmem_limit
)
450 end
= arm_lowmem_limit
;
454 map
.pfn
= __phys_to_pfn(start
);
455 map
.virtual = __phys_to_virt(start
);
456 map
.length
= end
- start
;
457 map
.type
= MT_MEMORY_DMA_READY
;
460 * Clear previous low-memory mapping to ensure that the
461 * TLB does not see any conflicting entries, then flush
462 * the TLB of the old entries before creating new mappings.
464 * This ensures that any speculatively loaded TLB entries
465 * (even though they may be rare) can not cause any problems,
466 * and ensures that this code is architecturally compliant.
468 for (addr
= __phys_to_virt(start
); addr
< __phys_to_virt(end
);
470 pmd_clear(pmd_off_k(addr
));
472 flush_tlb_kernel_range(__phys_to_virt(start
),
473 __phys_to_virt(end
));
475 iotable_init(&map
, 1);
479 static int __dma_update_pte(pte_t
*pte
, unsigned long addr
, void *data
)
481 struct page
*page
= virt_to_page(addr
);
482 pgprot_t prot
= *(pgprot_t
*)data
;
484 set_pte_ext(pte
, mk_pte(page
, prot
), 0);
488 static void __dma_remap(struct page
*page
, size_t size
, pgprot_t prot
)
490 unsigned long start
= (unsigned long) page_address(page
);
491 unsigned end
= start
+ size
;
493 apply_to_page_range(&init_mm
, start
, size
, __dma_update_pte
, &prot
);
494 flush_tlb_kernel_range(start
, end
);
497 static void *__alloc_remap_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
498 pgprot_t prot
, struct page
**ret_page
,
499 const void *caller
, bool want_vaddr
)
504 * __alloc_remap_buffer is only called when the device is
507 page
= __dma_alloc_buffer(dev
, size
, gfp
, NORMAL
);
513 ptr
= __dma_alloc_remap(page
, size
, gfp
, prot
, caller
);
515 __dma_free_buffer(page
, size
);
524 static void *__alloc_from_pool(size_t size
, struct page
**ret_page
)
530 WARN(1, "coherent pool not initialised!\n");
534 val
= gen_pool_alloc(atomic_pool
, size
);
536 phys_addr_t phys
= gen_pool_virt_to_phys(atomic_pool
, val
);
538 *ret_page
= phys_to_page(phys
);
545 static bool __in_atomic_pool(void *start
, size_t size
)
547 return addr_in_gen_pool(atomic_pool
, (unsigned long)start
, size
);
550 static int __free_from_pool(void *start
, size_t size
)
552 if (!__in_atomic_pool(start
, size
))
555 gen_pool_free(atomic_pool
, (unsigned long)start
, size
);
560 static void *__alloc_from_contiguous(struct device
*dev
, size_t size
,
561 pgprot_t prot
, struct page
**ret_page
,
562 const void *caller
, bool want_vaddr
,
563 int coherent_flag
, gfp_t gfp
)
565 unsigned long order
= get_order(size
);
566 size_t count
= size
>> PAGE_SHIFT
;
570 page
= dma_alloc_from_contiguous(dev
, count
, order
, gfp
& __GFP_NOWARN
);
574 __dma_clear_buffer(page
, size
, coherent_flag
);
579 if (PageHighMem(page
)) {
580 ptr
= __dma_alloc_remap(page
, size
, GFP_KERNEL
, prot
, caller
);
582 dma_release_from_contiguous(dev
, page
, count
);
586 __dma_remap(page
, size
, prot
);
587 ptr
= page_address(page
);
595 static void __free_from_contiguous(struct device
*dev
, struct page
*page
,
596 void *cpu_addr
, size_t size
, bool want_vaddr
)
599 if (PageHighMem(page
))
600 __dma_free_remap(cpu_addr
, size
);
602 __dma_remap(page
, size
, PAGE_KERNEL
);
604 dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
);
607 static inline pgprot_t
__get_dma_pgprot(unsigned long attrs
, pgprot_t prot
)
609 prot
= (attrs
& DMA_ATTR_WRITE_COMBINE
) ?
610 pgprot_writecombine(prot
) :
611 pgprot_dmacoherent(prot
);
615 static void *__alloc_simple_buffer(struct device
*dev
, size_t size
, gfp_t gfp
,
616 struct page
**ret_page
)
619 /* __alloc_simple_buffer is only called when the device is coherent */
620 page
= __dma_alloc_buffer(dev
, size
, gfp
, COHERENT
);
625 return page_address(page
);
628 static void *simple_allocator_alloc(struct arm_dma_alloc_args
*args
,
629 struct page
**ret_page
)
631 return __alloc_simple_buffer(args
->dev
, args
->size
, args
->gfp
,
635 static void simple_allocator_free(struct arm_dma_free_args
*args
)
637 __dma_free_buffer(args
->page
, args
->size
);
640 static struct arm_dma_allocator simple_allocator
= {
641 .alloc
= simple_allocator_alloc
,
642 .free
= simple_allocator_free
,
645 static void *cma_allocator_alloc(struct arm_dma_alloc_args
*args
,
646 struct page
**ret_page
)
648 return __alloc_from_contiguous(args
->dev
, args
->size
, args
->prot
,
649 ret_page
, args
->caller
,
650 args
->want_vaddr
, args
->coherent_flag
,
654 static void cma_allocator_free(struct arm_dma_free_args
*args
)
656 __free_from_contiguous(args
->dev
, args
->page
, args
->cpu_addr
,
657 args
->size
, args
->want_vaddr
);
660 static struct arm_dma_allocator cma_allocator
= {
661 .alloc
= cma_allocator_alloc
,
662 .free
= cma_allocator_free
,
665 static void *pool_allocator_alloc(struct arm_dma_alloc_args
*args
,
666 struct page
**ret_page
)
668 return __alloc_from_pool(args
->size
, ret_page
);
671 static void pool_allocator_free(struct arm_dma_free_args
*args
)
673 __free_from_pool(args
->cpu_addr
, args
->size
);
676 static struct arm_dma_allocator pool_allocator
= {
677 .alloc
= pool_allocator_alloc
,
678 .free
= pool_allocator_free
,
681 static void *remap_allocator_alloc(struct arm_dma_alloc_args
*args
,
682 struct page
**ret_page
)
684 return __alloc_remap_buffer(args
->dev
, args
->size
, args
->gfp
,
685 args
->prot
, ret_page
, args
->caller
,
689 static void remap_allocator_free(struct arm_dma_free_args
*args
)
691 if (args
->want_vaddr
)
692 __dma_free_remap(args
->cpu_addr
, args
->size
);
694 __dma_free_buffer(args
->page
, args
->size
);
697 static struct arm_dma_allocator remap_allocator
= {
698 .alloc
= remap_allocator_alloc
,
699 .free
= remap_allocator_free
,
702 static void *__dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
703 gfp_t gfp
, pgprot_t prot
, bool is_coherent
,
704 unsigned long attrs
, const void *caller
)
706 u64 mask
= get_coherent_dma_mask(dev
);
707 struct page
*page
= NULL
;
709 bool allowblock
, cma
;
710 struct arm_dma_buffer
*buf
;
711 struct arm_dma_alloc_args args
= {
713 .size
= PAGE_ALIGN(size
),
717 .want_vaddr
= ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0),
718 .coherent_flag
= is_coherent
? COHERENT
: NORMAL
,
721 #ifdef CONFIG_DMA_API_DEBUG
722 u64 limit
= (mask
+ 1) & ~mask
;
723 if (limit
&& size
>= limit
) {
724 dev_warn(dev
, "coherent allocation too big (requested %#x mask %#llx)\n",
733 buf
= kzalloc(sizeof(*buf
),
734 gfp
& ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
));
738 if (mask
< 0xffffffffULL
)
742 * Following is a work-around (a.k.a. hack) to prevent pages
743 * with __GFP_COMP being passed to split_page() which cannot
744 * handle them. The real problem is that this flag probably
745 * should be 0 on ARM as it is not supported on this
746 * platform; see CONFIG_HUGETLBFS.
748 gfp
&= ~(__GFP_COMP
);
751 *handle
= DMA_MAPPING_ERROR
;
752 allowblock
= gfpflags_allow_blocking(gfp
);
753 cma
= allowblock
? dev_get_cma_area(dev
) : false;
756 buf
->allocator
= &cma_allocator
;
757 else if (is_coherent
)
758 buf
->allocator
= &simple_allocator
;
760 buf
->allocator
= &remap_allocator
;
762 buf
->allocator
= &pool_allocator
;
764 addr
= buf
->allocator
->alloc(&args
, &page
);
769 *handle
= pfn_to_dma(dev
, page_to_pfn(page
));
770 buf
->virt
= args
.want_vaddr
? addr
: page
;
772 spin_lock_irqsave(&arm_dma_bufs_lock
, flags
);
773 list_add(&buf
->list
, &arm_dma_bufs
);
774 spin_unlock_irqrestore(&arm_dma_bufs_lock
, flags
);
779 return args
.want_vaddr
? addr
: page
;
783 * Allocate DMA-coherent memory space and return both the kernel remapped
784 * virtual and bus address for that space.
786 void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
787 gfp_t gfp
, unsigned long attrs
)
789 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
791 return __dma_alloc(dev
, size
, handle
, gfp
, prot
, false,
792 attrs
, __builtin_return_address(0));
795 static void *arm_coherent_dma_alloc(struct device
*dev
, size_t size
,
796 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
798 return __dma_alloc(dev
, size
, handle
, gfp
, PAGE_KERNEL
, true,
799 attrs
, __builtin_return_address(0));
802 static int __arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
803 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
807 unsigned long nr_vma_pages
= vma_pages(vma
);
808 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
809 unsigned long pfn
= dma_to_pfn(dev
, dma_addr
);
810 unsigned long off
= vma
->vm_pgoff
;
812 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
815 if (off
< nr_pages
&& nr_vma_pages
<= (nr_pages
- off
)) {
816 ret
= remap_pfn_range(vma
, vma
->vm_start
,
818 vma
->vm_end
- vma
->vm_start
,
826 * Create userspace mapping for the DMA-coherent memory.
828 static int arm_coherent_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
829 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
832 return __arm_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
835 int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
836 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
839 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
840 return __arm_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
844 * Free a buffer as defined by the above mapping.
846 static void __arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
847 dma_addr_t handle
, unsigned long attrs
,
850 struct page
*page
= pfn_to_page(dma_to_pfn(dev
, handle
));
851 struct arm_dma_buffer
*buf
;
852 struct arm_dma_free_args args
= {
854 .size
= PAGE_ALIGN(size
),
855 .cpu_addr
= cpu_addr
,
857 .want_vaddr
= ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0),
860 buf
= arm_dma_buffer_find(cpu_addr
);
861 if (WARN(!buf
, "Freeing invalid buffer %p\n", cpu_addr
))
864 buf
->allocator
->free(&args
);
868 void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
869 dma_addr_t handle
, unsigned long attrs
)
871 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, false);
874 static void arm_coherent_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
875 dma_addr_t handle
, unsigned long attrs
)
877 __arm_dma_free(dev
, size
, cpu_addr
, handle
, attrs
, true);
881 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
882 * that the intention is to allow exporting memory allocated via the
883 * coherent DMA APIs through the dma_buf API, which only accepts a
884 * scattertable. This presents a couple of problems:
885 * 1. Not all memory allocated via the coherent DMA APIs is backed by
887 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
888 * as we will try to flush the memory through a different alias to that
889 * actually being used (and the flushes are redundant.)
891 int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
892 void *cpu_addr
, dma_addr_t handle
, size_t size
,
895 unsigned long pfn
= dma_to_pfn(dev
, handle
);
899 /* If the PFN is not valid, we do not have a struct page */
903 page
= pfn_to_page(pfn
);
905 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
909 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
913 static void dma_cache_maint_page(struct page
*page
, unsigned long offset
,
914 size_t size
, enum dma_data_direction dir
,
915 void (*op
)(const void *, size_t, int))
920 pfn
= page_to_pfn(page
) + offset
/ PAGE_SIZE
;
924 * A single sg entry may refer to multiple physically contiguous
925 * pages. But we still need to process highmem pages individually.
926 * If highmem is not configured then the bulk of this loop gets
933 page
= pfn_to_page(pfn
);
935 if (PageHighMem(page
)) {
936 if (len
+ offset
> PAGE_SIZE
)
937 len
= PAGE_SIZE
- offset
;
939 if (cache_is_vipt_nonaliasing()) {
940 vaddr
= kmap_atomic(page
);
941 op(vaddr
+ offset
, len
, dir
);
942 kunmap_atomic(vaddr
);
944 vaddr
= kmap_high_get(page
);
946 op(vaddr
+ offset
, len
, dir
);
951 vaddr
= page_address(page
) + offset
;
961 * Make an area consistent for devices.
962 * Note: Drivers should NOT use this function directly, as it will break
963 * platforms with CONFIG_DMABOUNCE.
964 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
966 static void __dma_page_cpu_to_dev(struct page
*page
, unsigned long off
,
967 size_t size
, enum dma_data_direction dir
)
971 dma_cache_maint_page(page
, off
, size
, dir
, dmac_map_area
);
973 paddr
= page_to_phys(page
) + off
;
974 if (dir
== DMA_FROM_DEVICE
) {
975 outer_inv_range(paddr
, paddr
+ size
);
977 outer_clean_range(paddr
, paddr
+ size
);
979 /* FIXME: non-speculating: flush on bidirectional mappings? */
982 static void __dma_page_dev_to_cpu(struct page
*page
, unsigned long off
,
983 size_t size
, enum dma_data_direction dir
)
985 phys_addr_t paddr
= page_to_phys(page
) + off
;
987 /* FIXME: non-speculating: not required */
988 /* in any case, don't bother invalidating if DMA to device */
989 if (dir
!= DMA_TO_DEVICE
) {
990 outer_inv_range(paddr
, paddr
+ size
);
992 dma_cache_maint_page(page
, off
, size
, dir
, dmac_unmap_area
);
996 * Mark the D-cache clean for these pages to avoid extra flushing.
998 if (dir
!= DMA_TO_DEVICE
&& size
>= PAGE_SIZE
) {
1002 pfn
= page_to_pfn(page
) + off
/ PAGE_SIZE
;
1006 left
-= PAGE_SIZE
- off
;
1008 while (left
>= PAGE_SIZE
) {
1009 page
= pfn_to_page(pfn
++);
1010 set_bit(PG_dcache_clean
, &page
->flags
);
1017 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1018 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1019 * @sg: list of buffers
1020 * @nents: number of buffers to map
1021 * @dir: DMA transfer direction
1023 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1024 * This is the scatter-gather version of the dma_map_single interface.
1025 * Here the scatter gather list elements are each tagged with the
1026 * appropriate dma address and length. They are obtained via
1027 * sg_dma_{address,length}.
1029 * Device ownership issues as mentioned for dma_map_single are the same
1032 int arm_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1033 enum dma_data_direction dir
, unsigned long attrs
)
1035 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1036 struct scatterlist
*s
;
1039 for_each_sg(sg
, s
, nents
, i
) {
1040 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1041 s
->dma_length
= s
->length
;
1043 s
->dma_address
= ops
->map_page(dev
, sg_page(s
), s
->offset
,
1044 s
->length
, dir
, attrs
);
1045 if (dma_mapping_error(dev
, s
->dma_address
))
1051 for_each_sg(sg
, s
, i
, j
)
1052 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1057 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1058 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1059 * @sg: list of buffers
1060 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1061 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1063 * Unmap a set of streaming mode DMA translations. Again, CPU access
1064 * rules concerning calls here are the same as for dma_unmap_single().
1066 void arm_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1067 enum dma_data_direction dir
, unsigned long attrs
)
1069 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1070 struct scatterlist
*s
;
1074 for_each_sg(sg
, s
, nents
, i
)
1075 ops
->unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, attrs
);
1079 * arm_dma_sync_sg_for_cpu
1080 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1081 * @sg: list of buffers
1082 * @nents: number of buffers to map (returned from dma_map_sg)
1083 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1085 void arm_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1086 int nents
, enum dma_data_direction dir
)
1088 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1089 struct scatterlist
*s
;
1092 for_each_sg(sg
, s
, nents
, i
)
1093 ops
->sync_single_for_cpu(dev
, sg_dma_address(s
), s
->length
,
1098 * arm_dma_sync_sg_for_device
1099 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1100 * @sg: list of buffers
1101 * @nents: number of buffers to map (returned from dma_map_sg)
1102 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1104 void arm_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1105 int nents
, enum dma_data_direction dir
)
1107 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
1108 struct scatterlist
*s
;
1111 for_each_sg(sg
, s
, nents
, i
)
1112 ops
->sync_single_for_device(dev
, sg_dma_address(s
), s
->length
,
1117 * Return whether the given device DMA address mask can be supported
1118 * properly. For example, if your device can only drive the low 24-bits
1119 * during bus mastering, then you would pass 0x00ffffff as the mask
1122 int arm_dma_supported(struct device
*dev
, u64 mask
)
1124 return __dma_supported(dev
, mask
, false);
1127 static const struct dma_map_ops
*arm_get_dma_map_ops(bool coherent
)
1130 * When CONFIG_ARM_LPAE is set, physical address can extend above
1131 * 32-bits, which then can't be addressed by devices that only support
1133 * Use the generic dma-direct / swiotlb ops code in that case, as that
1134 * handles bounce buffering for us.
1136 * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
1137 * latter is also selected by the Xen code, but that code for now relies
1138 * on non-NULL dev_dma_ops. To be cleaned up later.
1140 if (IS_ENABLED(CONFIG_ARM_LPAE
))
1142 return coherent
? &arm_coherent_dma_ops
: &arm_dma_ops
;
1145 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1147 static int __dma_info_to_prot(enum dma_data_direction dir
, unsigned long attrs
)
1151 if (attrs
& DMA_ATTR_PRIVILEGED
)
1155 case DMA_BIDIRECTIONAL
:
1156 return prot
| IOMMU_READ
| IOMMU_WRITE
;
1158 return prot
| IOMMU_READ
;
1159 case DMA_FROM_DEVICE
:
1160 return prot
| IOMMU_WRITE
;
1168 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
);
1170 static inline dma_addr_t
__alloc_iova(struct dma_iommu_mapping
*mapping
,
1173 unsigned int order
= get_order(size
);
1174 unsigned int align
= 0;
1175 unsigned int count
, start
;
1176 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1177 unsigned long flags
;
1181 if (order
> CONFIG_ARM_DMA_IOMMU_ALIGNMENT
)
1182 order
= CONFIG_ARM_DMA_IOMMU_ALIGNMENT
;
1184 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1185 align
= (1 << order
) - 1;
1187 spin_lock_irqsave(&mapping
->lock
, flags
);
1188 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++) {
1189 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1190 mapping
->bits
, 0, count
, align
);
1192 if (start
> mapping
->bits
)
1195 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1200 * No unused range found. Try to extend the existing mapping
1201 * and perform a second attempt to reserve an IO virtual
1202 * address range of size bytes.
1204 if (i
== mapping
->nr_bitmaps
) {
1205 if (extend_iommu_mapping(mapping
)) {
1206 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1207 return DMA_MAPPING_ERROR
;
1210 start
= bitmap_find_next_zero_area(mapping
->bitmaps
[i
],
1211 mapping
->bits
, 0, count
, align
);
1213 if (start
> mapping
->bits
) {
1214 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1215 return DMA_MAPPING_ERROR
;
1218 bitmap_set(mapping
->bitmaps
[i
], start
, count
);
1220 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1222 iova
= mapping
->base
+ (mapping_size
* i
);
1223 iova
+= start
<< PAGE_SHIFT
;
1228 static inline void __free_iova(struct dma_iommu_mapping
*mapping
,
1229 dma_addr_t addr
, size_t size
)
1231 unsigned int start
, count
;
1232 size_t mapping_size
= mapping
->bits
<< PAGE_SHIFT
;
1233 unsigned long flags
;
1234 dma_addr_t bitmap_base
;
1240 bitmap_index
= (u32
) (addr
- mapping
->base
) / (u32
) mapping_size
;
1241 BUG_ON(addr
< mapping
->base
|| bitmap_index
> mapping
->extensions
);
1243 bitmap_base
= mapping
->base
+ mapping_size
* bitmap_index
;
1245 start
= (addr
- bitmap_base
) >> PAGE_SHIFT
;
1247 if (addr
+ size
> bitmap_base
+ mapping_size
) {
1249 * The address range to be freed reaches into the iova
1250 * range of the next bitmap. This should not happen as
1251 * we don't allow this in __alloc_iova (at the
1256 count
= size
>> PAGE_SHIFT
;
1258 spin_lock_irqsave(&mapping
->lock
, flags
);
1259 bitmap_clear(mapping
->bitmaps
[bitmap_index
], start
, count
);
1260 spin_unlock_irqrestore(&mapping
->lock
, flags
);
1263 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1264 static const int iommu_order_array
[] = { 9, 8, 4, 0 };
1266 static struct page
**__iommu_alloc_buffer(struct device
*dev
, size_t size
,
1267 gfp_t gfp
, unsigned long attrs
,
1270 struct page
**pages
;
1271 int count
= size
>> PAGE_SHIFT
;
1272 int array_size
= count
* sizeof(struct page
*);
1276 if (array_size
<= PAGE_SIZE
)
1277 pages
= kzalloc(array_size
, GFP_KERNEL
);
1279 pages
= vzalloc(array_size
);
1283 if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
)
1285 unsigned long order
= get_order(size
);
1288 page
= dma_alloc_from_contiguous(dev
, count
, order
,
1289 gfp
& __GFP_NOWARN
);
1293 __dma_clear_buffer(page
, size
, coherent_flag
);
1295 for (i
= 0; i
< count
; i
++)
1296 pages
[i
] = page
+ i
;
1301 /* Go straight to 4K chunks if caller says it's OK. */
1302 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
1303 order_idx
= ARRAY_SIZE(iommu_order_array
) - 1;
1306 * IOMMU can map any pages, so himem can also be used here
1308 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
1313 order
= iommu_order_array
[order_idx
];
1315 /* Drop down when we get small */
1316 if (__fls(count
) < order
) {
1322 /* See if it's easy to allocate a high-order chunk */
1323 pages
[i
] = alloc_pages(gfp
| __GFP_NORETRY
, order
);
1325 /* Go down a notch at first sign of pressure */
1331 pages
[i
] = alloc_pages(gfp
, 0);
1337 split_page(pages
[i
], order
);
1340 pages
[i
+ j
] = pages
[i
] + j
;
1343 __dma_clear_buffer(pages
[i
], PAGE_SIZE
<< order
, coherent_flag
);
1345 count
-= 1 << order
;
1352 __free_pages(pages
[i
], 0);
1357 static int __iommu_free_buffer(struct device
*dev
, struct page
**pages
,
1358 size_t size
, unsigned long attrs
)
1360 int count
= size
>> PAGE_SHIFT
;
1363 if (attrs
& DMA_ATTR_FORCE_CONTIGUOUS
) {
1364 dma_release_from_contiguous(dev
, pages
[0], count
);
1366 for (i
= 0; i
< count
; i
++)
1368 __free_pages(pages
[i
], 0);
1376 * Create a CPU mapping for a specified pages
1379 __iommu_alloc_remap(struct page
**pages
, size_t size
, gfp_t gfp
, pgprot_t prot
,
1382 return dma_common_pages_remap(pages
, size
,
1383 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
, prot
, caller
);
1387 * Create a mapping in device IO address space for specified pages
1390 __iommu_create_mapping(struct device
*dev
, struct page
**pages
, size_t size
,
1391 unsigned long attrs
)
1393 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1394 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1395 dma_addr_t dma_addr
, iova
;
1398 dma_addr
= __alloc_iova(mapping
, size
);
1399 if (dma_addr
== DMA_MAPPING_ERROR
)
1403 for (i
= 0; i
< count
; ) {
1406 unsigned int next_pfn
= page_to_pfn(pages
[i
]) + 1;
1407 phys_addr_t phys
= page_to_phys(pages
[i
]);
1408 unsigned int len
, j
;
1410 for (j
= i
+ 1; j
< count
; j
++, next_pfn
++)
1411 if (page_to_pfn(pages
[j
]) != next_pfn
)
1414 len
= (j
- i
) << PAGE_SHIFT
;
1415 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
,
1416 __dma_info_to_prot(DMA_BIDIRECTIONAL
, attrs
));
1424 iommu_unmap(mapping
->domain
, dma_addr
, iova
-dma_addr
);
1425 __free_iova(mapping
, dma_addr
, size
);
1426 return DMA_MAPPING_ERROR
;
1429 static int __iommu_remove_mapping(struct device
*dev
, dma_addr_t iova
, size_t size
)
1431 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1434 * add optional in-page offset from iova to size and align
1435 * result to page size
1437 size
= PAGE_ALIGN((iova
& ~PAGE_MASK
) + size
);
1440 iommu_unmap(mapping
->domain
, iova
, size
);
1441 __free_iova(mapping
, iova
, size
);
1445 static struct page
**__atomic_get_pages(void *addr
)
1450 phys
= gen_pool_virt_to_phys(atomic_pool
, (unsigned long)addr
);
1451 page
= phys_to_page(phys
);
1453 return (struct page
**)page
;
1456 static struct page
**__iommu_get_pages(void *cpu_addr
, unsigned long attrs
)
1458 struct vm_struct
*area
;
1460 if (__in_atomic_pool(cpu_addr
, PAGE_SIZE
))
1461 return __atomic_get_pages(cpu_addr
);
1463 if (attrs
& DMA_ATTR_NO_KERNEL_MAPPING
)
1466 area
= find_vm_area(cpu_addr
);
1467 if (area
&& (area
->flags
& VM_ARM_DMA_CONSISTENT
))
1472 static void *__iommu_alloc_simple(struct device
*dev
, size_t size
, gfp_t gfp
,
1473 dma_addr_t
*handle
, int coherent_flag
,
1474 unsigned long attrs
)
1479 if (coherent_flag
== COHERENT
)
1480 addr
= __alloc_simple_buffer(dev
, size
, gfp
, &page
);
1482 addr
= __alloc_from_pool(size
, &page
);
1486 *handle
= __iommu_create_mapping(dev
, &page
, size
, attrs
);
1487 if (*handle
== DMA_MAPPING_ERROR
)
1493 __free_from_pool(addr
, size
);
1497 static void __iommu_free_atomic(struct device
*dev
, void *cpu_addr
,
1498 dma_addr_t handle
, size_t size
, int coherent_flag
)
1500 __iommu_remove_mapping(dev
, handle
, size
);
1501 if (coherent_flag
== COHERENT
)
1502 __dma_free_buffer(virt_to_page(cpu_addr
), size
);
1504 __free_from_pool(cpu_addr
, size
);
1507 static void *__arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1508 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
,
1511 pgprot_t prot
= __get_dma_pgprot(attrs
, PAGE_KERNEL
);
1512 struct page
**pages
;
1515 *handle
= DMA_MAPPING_ERROR
;
1516 size
= PAGE_ALIGN(size
);
1518 if (coherent_flag
== COHERENT
|| !gfpflags_allow_blocking(gfp
))
1519 return __iommu_alloc_simple(dev
, size
, gfp
, handle
,
1520 coherent_flag
, attrs
);
1523 * Following is a work-around (a.k.a. hack) to prevent pages
1524 * with __GFP_COMP being passed to split_page() which cannot
1525 * handle them. The real problem is that this flag probably
1526 * should be 0 on ARM as it is not supported on this
1527 * platform; see CONFIG_HUGETLBFS.
1529 gfp
&= ~(__GFP_COMP
);
1531 pages
= __iommu_alloc_buffer(dev
, size
, gfp
, attrs
, coherent_flag
);
1535 *handle
= __iommu_create_mapping(dev
, pages
, size
, attrs
);
1536 if (*handle
== DMA_MAPPING_ERROR
)
1539 if (attrs
& DMA_ATTR_NO_KERNEL_MAPPING
)
1542 addr
= __iommu_alloc_remap(pages
, size
, gfp
, prot
,
1543 __builtin_return_address(0));
1550 __iommu_remove_mapping(dev
, *handle
, size
);
1552 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1556 static void *arm_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1557 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1559 return __arm_iommu_alloc_attrs(dev
, size
, handle
, gfp
, attrs
, NORMAL
);
1562 static void *arm_coherent_iommu_alloc_attrs(struct device
*dev
, size_t size
,
1563 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1565 return __arm_iommu_alloc_attrs(dev
, size
, handle
, gfp
, attrs
, COHERENT
);
1568 static int __arm_iommu_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
1569 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1570 unsigned long attrs
)
1572 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1573 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1579 if (vma
->vm_pgoff
>= nr_pages
)
1582 err
= vm_map_pages(vma
, pages
, nr_pages
);
1584 pr_err("Remapping memory failed: %d\n", err
);
1588 static int arm_iommu_mmap_attrs(struct device
*dev
,
1589 struct vm_area_struct
*vma
, void *cpu_addr
,
1590 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
1592 vma
->vm_page_prot
= __get_dma_pgprot(attrs
, vma
->vm_page_prot
);
1594 return __arm_iommu_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
1597 static int arm_coherent_iommu_mmap_attrs(struct device
*dev
,
1598 struct vm_area_struct
*vma
, void *cpu_addr
,
1599 dma_addr_t dma_addr
, size_t size
, unsigned long attrs
)
1601 return __arm_iommu_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
1605 * free a page as defined by the above mapping.
1606 * Must not be called with IRQs disabled.
1608 void __arm_iommu_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
1609 dma_addr_t handle
, unsigned long attrs
, int coherent_flag
)
1611 struct page
**pages
;
1612 size
= PAGE_ALIGN(size
);
1614 if (coherent_flag
== COHERENT
|| __in_atomic_pool(cpu_addr
, size
)) {
1615 __iommu_free_atomic(dev
, cpu_addr
, handle
, size
, coherent_flag
);
1619 pages
= __iommu_get_pages(cpu_addr
, attrs
);
1621 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr
);
1625 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) == 0) {
1626 dma_common_free_remap(cpu_addr
, size
,
1627 VM_ARM_DMA_CONSISTENT
| VM_USERMAP
);
1630 __iommu_remove_mapping(dev
, handle
, size
);
1631 __iommu_free_buffer(dev
, pages
, size
, attrs
);
1634 void arm_iommu_free_attrs(struct device
*dev
, size_t size
,
1635 void *cpu_addr
, dma_addr_t handle
, unsigned long attrs
)
1637 __arm_iommu_free_attrs(dev
, size
, cpu_addr
, handle
, attrs
, NORMAL
);
1640 void arm_coherent_iommu_free_attrs(struct device
*dev
, size_t size
,
1641 void *cpu_addr
, dma_addr_t handle
, unsigned long attrs
)
1643 __arm_iommu_free_attrs(dev
, size
, cpu_addr
, handle
, attrs
, COHERENT
);
1646 static int arm_iommu_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1647 void *cpu_addr
, dma_addr_t dma_addr
,
1648 size_t size
, unsigned long attrs
)
1650 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1651 struct page
**pages
= __iommu_get_pages(cpu_addr
, attrs
);
1656 return sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
,
1661 * Map a part of the scatter-gather list into contiguous io address space
1663 static int __map_sg_chunk(struct device
*dev
, struct scatterlist
*sg
,
1664 size_t size
, dma_addr_t
*handle
,
1665 enum dma_data_direction dir
, unsigned long attrs
,
1668 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1669 dma_addr_t iova
, iova_base
;
1672 struct scatterlist
*s
;
1675 size
= PAGE_ALIGN(size
);
1676 *handle
= DMA_MAPPING_ERROR
;
1678 iova_base
= iova
= __alloc_iova(mapping
, size
);
1679 if (iova
== DMA_MAPPING_ERROR
)
1682 for (count
= 0, s
= sg
; count
< (size
>> PAGE_SHIFT
); s
= sg_next(s
)) {
1683 phys_addr_t phys
= page_to_phys(sg_page(s
));
1684 unsigned int len
= PAGE_ALIGN(s
->offset
+ s
->length
);
1686 if (!is_coherent
&& (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1687 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1689 prot
= __dma_info_to_prot(dir
, attrs
);
1691 ret
= iommu_map(mapping
->domain
, iova
, phys
, len
, prot
);
1694 count
+= len
>> PAGE_SHIFT
;
1697 *handle
= iova_base
;
1701 iommu_unmap(mapping
->domain
, iova_base
, count
* PAGE_SIZE
);
1702 __free_iova(mapping
, iova_base
, size
);
1706 static int __iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1707 enum dma_data_direction dir
, unsigned long attrs
,
1710 struct scatterlist
*s
= sg
, *dma
= sg
, *start
= sg
;
1712 unsigned int offset
= s
->offset
;
1713 unsigned int size
= s
->offset
+ s
->length
;
1714 unsigned int max
= dma_get_max_seg_size(dev
);
1716 for (i
= 1; i
< nents
; i
++) {
1719 s
->dma_address
= DMA_MAPPING_ERROR
;
1722 if (s
->offset
|| (size
& ~PAGE_MASK
) || size
+ s
->length
> max
) {
1723 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
,
1724 dir
, attrs
, is_coherent
) < 0)
1727 dma
->dma_address
+= offset
;
1728 dma
->dma_length
= size
- offset
;
1730 size
= offset
= s
->offset
;
1737 if (__map_sg_chunk(dev
, start
, size
, &dma
->dma_address
, dir
, attrs
,
1741 dma
->dma_address
+= offset
;
1742 dma
->dma_length
= size
- offset
;
1747 for_each_sg(sg
, s
, count
, i
)
1748 __iommu_remove_mapping(dev
, sg_dma_address(s
), sg_dma_len(s
));
1753 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1754 * @dev: valid struct device pointer
1755 * @sg: list of buffers
1756 * @nents: number of buffers to map
1757 * @dir: DMA transfer direction
1759 * Map a set of i/o coherent buffers described by scatterlist in streaming
1760 * mode for DMA. The scatter gather list elements are merged together (if
1761 * possible) and tagged with the appropriate dma address and length. They are
1762 * obtained via sg_dma_{address,length}.
1764 int arm_coherent_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1765 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1767 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, true);
1771 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1772 * @dev: valid struct device pointer
1773 * @sg: list of buffers
1774 * @nents: number of buffers to map
1775 * @dir: DMA transfer direction
1777 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1778 * The scatter gather list elements are merged together (if possible) and
1779 * tagged with the appropriate dma address and length. They are obtained via
1780 * sg_dma_{address,length}.
1782 int arm_iommu_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1783 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1785 return __iommu_map_sg(dev
, sg
, nents
, dir
, attrs
, false);
1788 static void __iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1789 int nents
, enum dma_data_direction dir
,
1790 unsigned long attrs
, bool is_coherent
)
1792 struct scatterlist
*s
;
1795 for_each_sg(sg
, s
, nents
, i
) {
1797 __iommu_remove_mapping(dev
, sg_dma_address(s
),
1799 if (!is_coherent
&& (attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1800 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
,
1806 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1807 * @dev: valid struct device pointer
1808 * @sg: list of buffers
1809 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1810 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1812 * Unmap a set of streaming mode DMA translations. Again, CPU access
1813 * rules concerning calls here are the same as for dma_unmap_single().
1815 void arm_coherent_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1816 int nents
, enum dma_data_direction dir
,
1817 unsigned long attrs
)
1819 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, true);
1823 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1824 * @dev: valid struct device pointer
1825 * @sg: list of buffers
1826 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1827 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1829 * Unmap a set of streaming mode DMA translations. Again, CPU access
1830 * rules concerning calls here are the same as for dma_unmap_single().
1832 void arm_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1833 enum dma_data_direction dir
,
1834 unsigned long attrs
)
1836 __iommu_unmap_sg(dev
, sg
, nents
, dir
, attrs
, false);
1840 * arm_iommu_sync_sg_for_cpu
1841 * @dev: valid struct device pointer
1842 * @sg: list of buffers
1843 * @nents: number of buffers to map (returned from dma_map_sg)
1844 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1846 void arm_iommu_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1847 int nents
, enum dma_data_direction dir
)
1849 struct scatterlist
*s
;
1852 for_each_sg(sg
, s
, nents
, i
)
1853 __dma_page_dev_to_cpu(sg_page(s
), s
->offset
, s
->length
, dir
);
1858 * arm_iommu_sync_sg_for_device
1859 * @dev: valid struct device pointer
1860 * @sg: list of buffers
1861 * @nents: number of buffers to map (returned from dma_map_sg)
1862 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1864 void arm_iommu_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1865 int nents
, enum dma_data_direction dir
)
1867 struct scatterlist
*s
;
1870 for_each_sg(sg
, s
, nents
, i
)
1871 __dma_page_cpu_to_dev(sg_page(s
), s
->offset
, s
->length
, dir
);
1876 * arm_coherent_iommu_map_page
1877 * @dev: valid struct device pointer
1878 * @page: page that buffer resides in
1879 * @offset: offset into page for start of buffer
1880 * @size: size of buffer to map
1881 * @dir: DMA transfer direction
1883 * Coherent IOMMU aware version of arm_dma_map_page()
1885 static dma_addr_t
arm_coherent_iommu_map_page(struct device
*dev
, struct page
*page
,
1886 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1887 unsigned long attrs
)
1889 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1890 dma_addr_t dma_addr
;
1891 int ret
, prot
, len
= PAGE_ALIGN(size
+ offset
);
1893 dma_addr
= __alloc_iova(mapping
, len
);
1894 if (dma_addr
== DMA_MAPPING_ERROR
)
1897 prot
= __dma_info_to_prot(dir
, attrs
);
1899 ret
= iommu_map(mapping
->domain
, dma_addr
, page_to_phys(page
), len
, prot
);
1903 return dma_addr
+ offset
;
1905 __free_iova(mapping
, dma_addr
, len
);
1906 return DMA_MAPPING_ERROR
;
1910 * arm_iommu_map_page
1911 * @dev: valid struct device pointer
1912 * @page: page that buffer resides in
1913 * @offset: offset into page for start of buffer
1914 * @size: size of buffer to map
1915 * @dir: DMA transfer direction
1917 * IOMMU aware version of arm_dma_map_page()
1919 static dma_addr_t
arm_iommu_map_page(struct device
*dev
, struct page
*page
,
1920 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
1921 unsigned long attrs
)
1923 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1924 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
1926 return arm_coherent_iommu_map_page(dev
, page
, offset
, size
, dir
, attrs
);
1930 * arm_coherent_iommu_unmap_page
1931 * @dev: valid struct device pointer
1932 * @handle: DMA address of buffer
1933 * @size: size of buffer (same as passed to dma_map_page)
1934 * @dir: DMA transfer direction (same as passed to dma_map_page)
1936 * Coherent IOMMU aware version of arm_dma_unmap_page()
1938 static void arm_coherent_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1939 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1941 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1942 dma_addr_t iova
= handle
& PAGE_MASK
;
1943 int offset
= handle
& ~PAGE_MASK
;
1944 int len
= PAGE_ALIGN(size
+ offset
);
1949 iommu_unmap(mapping
->domain
, iova
, len
);
1950 __free_iova(mapping
, iova
, len
);
1954 * arm_iommu_unmap_page
1955 * @dev: valid struct device pointer
1956 * @handle: DMA address of buffer
1957 * @size: size of buffer (same as passed to dma_map_page)
1958 * @dir: DMA transfer direction (same as passed to dma_map_page)
1960 * IOMMU aware version of arm_dma_unmap_page()
1962 static void arm_iommu_unmap_page(struct device
*dev
, dma_addr_t handle
,
1963 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1965 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1966 dma_addr_t iova
= handle
& PAGE_MASK
;
1967 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
1968 int offset
= handle
& ~PAGE_MASK
;
1969 int len
= PAGE_ALIGN(size
+ offset
);
1974 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
1975 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
1977 iommu_unmap(mapping
->domain
, iova
, len
);
1978 __free_iova(mapping
, iova
, len
);
1982 * arm_iommu_map_resource - map a device resource for DMA
1983 * @dev: valid struct device pointer
1984 * @phys_addr: physical address of resource
1985 * @size: size of resource to map
1986 * @dir: DMA transfer direction
1988 static dma_addr_t
arm_iommu_map_resource(struct device
*dev
,
1989 phys_addr_t phys_addr
, size_t size
,
1990 enum dma_data_direction dir
, unsigned long attrs
)
1992 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
1993 dma_addr_t dma_addr
;
1995 phys_addr_t addr
= phys_addr
& PAGE_MASK
;
1996 unsigned int offset
= phys_addr
& ~PAGE_MASK
;
1997 size_t len
= PAGE_ALIGN(size
+ offset
);
1999 dma_addr
= __alloc_iova(mapping
, len
);
2000 if (dma_addr
== DMA_MAPPING_ERROR
)
2003 prot
= __dma_info_to_prot(dir
, attrs
) | IOMMU_MMIO
;
2005 ret
= iommu_map(mapping
->domain
, dma_addr
, addr
, len
, prot
);
2009 return dma_addr
+ offset
;
2011 __free_iova(mapping
, dma_addr
, len
);
2012 return DMA_MAPPING_ERROR
;
2016 * arm_iommu_unmap_resource - unmap a device DMA resource
2017 * @dev: valid struct device pointer
2018 * @dma_handle: DMA address to resource
2019 * @size: size of resource to map
2020 * @dir: DMA transfer direction
2022 static void arm_iommu_unmap_resource(struct device
*dev
, dma_addr_t dma_handle
,
2023 size_t size
, enum dma_data_direction dir
,
2024 unsigned long attrs
)
2026 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2027 dma_addr_t iova
= dma_handle
& PAGE_MASK
;
2028 unsigned int offset
= dma_handle
& ~PAGE_MASK
;
2029 size_t len
= PAGE_ALIGN(size
+ offset
);
2034 iommu_unmap(mapping
->domain
, iova
, len
);
2035 __free_iova(mapping
, iova
, len
);
2038 static void arm_iommu_sync_single_for_cpu(struct device
*dev
,
2039 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
2041 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2042 dma_addr_t iova
= handle
& PAGE_MASK
;
2043 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
2044 unsigned int offset
= handle
& ~PAGE_MASK
;
2049 __dma_page_dev_to_cpu(page
, offset
, size
, dir
);
2052 static void arm_iommu_sync_single_for_device(struct device
*dev
,
2053 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
2055 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2056 dma_addr_t iova
= handle
& PAGE_MASK
;
2057 struct page
*page
= phys_to_page(iommu_iova_to_phys(mapping
->domain
, iova
));
2058 unsigned int offset
= handle
& ~PAGE_MASK
;
2063 __dma_page_cpu_to_dev(page
, offset
, size
, dir
);
2066 const struct dma_map_ops iommu_ops
= {
2067 .alloc
= arm_iommu_alloc_attrs
,
2068 .free
= arm_iommu_free_attrs
,
2069 .mmap
= arm_iommu_mmap_attrs
,
2070 .get_sgtable
= arm_iommu_get_sgtable
,
2072 .map_page
= arm_iommu_map_page
,
2073 .unmap_page
= arm_iommu_unmap_page
,
2074 .sync_single_for_cpu
= arm_iommu_sync_single_for_cpu
,
2075 .sync_single_for_device
= arm_iommu_sync_single_for_device
,
2077 .map_sg
= arm_iommu_map_sg
,
2078 .unmap_sg
= arm_iommu_unmap_sg
,
2079 .sync_sg_for_cpu
= arm_iommu_sync_sg_for_cpu
,
2080 .sync_sg_for_device
= arm_iommu_sync_sg_for_device
,
2082 .map_resource
= arm_iommu_map_resource
,
2083 .unmap_resource
= arm_iommu_unmap_resource
,
2085 .dma_supported
= arm_dma_supported
,
2088 const struct dma_map_ops iommu_coherent_ops
= {
2089 .alloc
= arm_coherent_iommu_alloc_attrs
,
2090 .free
= arm_coherent_iommu_free_attrs
,
2091 .mmap
= arm_coherent_iommu_mmap_attrs
,
2092 .get_sgtable
= arm_iommu_get_sgtable
,
2094 .map_page
= arm_coherent_iommu_map_page
,
2095 .unmap_page
= arm_coherent_iommu_unmap_page
,
2097 .map_sg
= arm_coherent_iommu_map_sg
,
2098 .unmap_sg
= arm_coherent_iommu_unmap_sg
,
2100 .map_resource
= arm_iommu_map_resource
,
2101 .unmap_resource
= arm_iommu_unmap_resource
,
2103 .dma_supported
= arm_dma_supported
,
2107 * arm_iommu_create_mapping
2108 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2109 * @base: start address of the valid IO address space
2110 * @size: maximum size of the valid IO address space
2112 * Creates a mapping structure which holds information about used/unused
2113 * IO address ranges, which is required to perform memory allocation and
2114 * mapping with IOMMU aware functions.
2116 * The client device need to be attached to the mapping with
2117 * arm_iommu_attach_device function.
2119 struct dma_iommu_mapping
*
2120 arm_iommu_create_mapping(struct bus_type
*bus
, dma_addr_t base
, u64 size
)
2122 unsigned int bits
= size
>> PAGE_SHIFT
;
2123 unsigned int bitmap_size
= BITS_TO_LONGS(bits
) * sizeof(long);
2124 struct dma_iommu_mapping
*mapping
;
2128 /* currently only 32-bit DMA address space is supported */
2129 if (size
> DMA_BIT_MASK(32) + 1)
2130 return ERR_PTR(-ERANGE
);
2133 return ERR_PTR(-EINVAL
);
2135 if (bitmap_size
> PAGE_SIZE
) {
2136 extensions
= bitmap_size
/ PAGE_SIZE
;
2137 bitmap_size
= PAGE_SIZE
;
2140 mapping
= kzalloc(sizeof(struct dma_iommu_mapping
), GFP_KERNEL
);
2144 mapping
->bitmap_size
= bitmap_size
;
2145 mapping
->bitmaps
= kcalloc(extensions
, sizeof(unsigned long *),
2147 if (!mapping
->bitmaps
)
2150 mapping
->bitmaps
[0] = kzalloc(bitmap_size
, GFP_KERNEL
);
2151 if (!mapping
->bitmaps
[0])
2154 mapping
->nr_bitmaps
= 1;
2155 mapping
->extensions
= extensions
;
2156 mapping
->base
= base
;
2157 mapping
->bits
= BITS_PER_BYTE
* bitmap_size
;
2159 spin_lock_init(&mapping
->lock
);
2161 mapping
->domain
= iommu_domain_alloc(bus
);
2162 if (!mapping
->domain
)
2165 kref_init(&mapping
->kref
);
2168 kfree(mapping
->bitmaps
[0]);
2170 kfree(mapping
->bitmaps
);
2174 return ERR_PTR(err
);
2176 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping
);
2178 static void release_iommu_mapping(struct kref
*kref
)
2181 struct dma_iommu_mapping
*mapping
=
2182 container_of(kref
, struct dma_iommu_mapping
, kref
);
2184 iommu_domain_free(mapping
->domain
);
2185 for (i
= 0; i
< mapping
->nr_bitmaps
; i
++)
2186 kfree(mapping
->bitmaps
[i
]);
2187 kfree(mapping
->bitmaps
);
2191 static int extend_iommu_mapping(struct dma_iommu_mapping
*mapping
)
2195 if (mapping
->nr_bitmaps
>= mapping
->extensions
)
2198 next_bitmap
= mapping
->nr_bitmaps
;
2199 mapping
->bitmaps
[next_bitmap
] = kzalloc(mapping
->bitmap_size
,
2201 if (!mapping
->bitmaps
[next_bitmap
])
2204 mapping
->nr_bitmaps
++;
2209 void arm_iommu_release_mapping(struct dma_iommu_mapping
*mapping
)
2212 kref_put(&mapping
->kref
, release_iommu_mapping
);
2214 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping
);
2216 static int __arm_iommu_attach_device(struct device
*dev
,
2217 struct dma_iommu_mapping
*mapping
)
2221 err
= iommu_attach_device(mapping
->domain
, dev
);
2225 kref_get(&mapping
->kref
);
2226 to_dma_iommu_mapping(dev
) = mapping
;
2228 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev
));
2233 * arm_iommu_attach_device
2234 * @dev: valid struct device pointer
2235 * @mapping: io address space mapping structure (returned from
2236 * arm_iommu_create_mapping)
2238 * Attaches specified io address space mapping to the provided device.
2239 * This replaces the dma operations (dma_map_ops pointer) with the
2240 * IOMMU aware version.
2242 * More than one client might be attached to the same io address space
2245 int arm_iommu_attach_device(struct device
*dev
,
2246 struct dma_iommu_mapping
*mapping
)
2250 err
= __arm_iommu_attach_device(dev
, mapping
);
2254 set_dma_ops(dev
, &iommu_ops
);
2257 EXPORT_SYMBOL_GPL(arm_iommu_attach_device
);
2260 * arm_iommu_detach_device
2261 * @dev: valid struct device pointer
2263 * Detaches the provided device from a previously attached map.
2264 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2266 void arm_iommu_detach_device(struct device
*dev
)
2268 struct dma_iommu_mapping
*mapping
;
2270 mapping
= to_dma_iommu_mapping(dev
);
2272 dev_warn(dev
, "Not attached\n");
2276 iommu_detach_device(mapping
->domain
, dev
);
2277 kref_put(&mapping
->kref
, release_iommu_mapping
);
2278 to_dma_iommu_mapping(dev
) = NULL
;
2279 set_dma_ops(dev
, arm_get_dma_map_ops(dev
->archdata
.dma_coherent
));
2281 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev
));
2283 EXPORT_SYMBOL_GPL(arm_iommu_detach_device
);
2285 static const struct dma_map_ops
*arm_get_iommu_dma_map_ops(bool coherent
)
2287 return coherent
? &iommu_coherent_ops
: &iommu_ops
;
2290 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2291 const struct iommu_ops
*iommu
)
2293 struct dma_iommu_mapping
*mapping
;
2298 mapping
= arm_iommu_create_mapping(dev
->bus
, dma_base
, size
);
2299 if (IS_ERR(mapping
)) {
2300 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2301 size
, dev_name(dev
));
2305 if (__arm_iommu_attach_device(dev
, mapping
)) {
2306 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2308 arm_iommu_release_mapping(mapping
);
2315 static void arm_teardown_iommu_dma_ops(struct device
*dev
)
2317 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
2322 arm_iommu_detach_device(dev
);
2323 arm_iommu_release_mapping(mapping
);
2328 static bool arm_setup_iommu_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2329 const struct iommu_ops
*iommu
)
2334 static void arm_teardown_iommu_dma_ops(struct device
*dev
) { }
2336 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2338 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2340 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
2341 const struct iommu_ops
*iommu
, bool coherent
)
2343 const struct dma_map_ops
*dma_ops
;
2345 dev
->archdata
.dma_coherent
= coherent
;
2346 #ifdef CONFIG_SWIOTLB
2347 dev
->dma_coherent
= coherent
;
2351 * Don't override the dma_ops if they have already been set. Ideally
2352 * this should be the only location where dma_ops are set, remove this
2353 * check when all other callers of set_dma_ops will have disappeared.
2358 if (arm_setup_iommu_dma_ops(dev
, dma_base
, size
, iommu
))
2359 dma_ops
= arm_get_iommu_dma_map_ops(coherent
);
2361 dma_ops
= arm_get_dma_map_ops(coherent
);
2363 set_dma_ops(dev
, dma_ops
);
2366 if (xen_initial_domain()) {
2367 dev
->archdata
.dev_dma_ops
= dev
->dma_ops
;
2368 dev
->dma_ops
= xen_dma_ops
;
2371 dev
->archdata
.dma_ops_setup
= true;
2374 void arch_teardown_dma_ops(struct device
*dev
)
2376 if (!dev
->archdata
.dma_ops_setup
)
2379 arm_teardown_iommu_dma_ops(dev
);
2380 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2381 set_dma_ops(dev
, NULL
);
2384 #ifdef CONFIG_SWIOTLB
2385 void arch_sync_dma_for_device(struct device
*dev
, phys_addr_t paddr
,
2386 size_t size
, enum dma_data_direction dir
)
2388 __dma_page_cpu_to_dev(phys_to_page(paddr
), paddr
& (PAGE_SIZE
- 1),
2392 void arch_sync_dma_for_cpu(struct device
*dev
, phys_addr_t paddr
,
2393 size_t size
, enum dma_data_direction dir
)
2395 __dma_page_dev_to_cpu(phys_to_page(paddr
), paddr
& (PAGE_SIZE
- 1),
2399 long arch_dma_coherent_to_pfn(struct device
*dev
, void *cpu_addr
,
2400 dma_addr_t dma_addr
)
2402 return dma_to_pfn(dev
, dma_addr
);
2405 pgprot_t
arch_dma_mmap_pgprot(struct device
*dev
, pgprot_t prot
,
2406 unsigned long attrs
)
2408 return __get_dma_pgprot(attrs
, prot
);
2411 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
2412 gfp_t gfp
, unsigned long attrs
)
2414 return __dma_alloc(dev
, size
, dma_handle
, gfp
,
2415 __get_dma_pgprot(attrs
, PAGE_KERNEL
), false,
2416 attrs
, __builtin_return_address(0));
2419 void arch_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
2420 dma_addr_t dma_handle
, unsigned long attrs
)
2422 __arm_dma_free(dev
, size
, cpu_addr
, dma_handle
, attrs
, false);
2424 #endif /* CONFIG_SWIOTLB */