1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle
;
28 static void dmam_release(struct device
*dev
, void *res
)
30 struct dma_devres
*this = res
;
32 dma_free_attrs(dev
, this->size
, this->vaddr
, this->dma_handle
,
36 static int dmam_match(struct device
*dev
, void *res
, void *match_data
)
38 struct dma_devres
*this = res
, *match
= match_data
;
40 if (this->vaddr
== match
->vaddr
) {
41 WARN_ON(this->size
!= match
->size
||
42 this->dma_handle
!= match
->dma_handle
);
49 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
55 * Managed dma_free_coherent().
57 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
58 dma_addr_t dma_handle
)
60 struct dma_devres match_data
= { size
, vaddr
, dma_handle
};
62 dma_free_coherent(dev
, size
, vaddr
, dma_handle
);
63 WARN_ON(devres_destroy(dev
, dmam_release
, dmam_match
, &match_data
));
65 EXPORT_SYMBOL(dmam_free_coherent
);
68 * dmam_alloc_attrs - Managed dma_alloc_attrs()
69 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
73 * @attrs: Flags in the DMA_ATTR_* namespace.
75 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
79 * Pointer to allocated memory on success, NULL on failure.
81 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
82 gfp_t gfp
, unsigned long attrs
)
84 struct dma_devres
*dr
;
87 dr
= devres_alloc(dmam_release
, sizeof(*dr
), gfp
);
91 vaddr
= dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, attrs
);
98 dr
->dma_handle
= *dma_handle
;
106 EXPORT_SYMBOL(dmam_alloc_attrs
);
109 * Create scatter-list for the already allocated DMA buffer.
111 int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
112 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
115 struct page
*page
= virt_to_page(cpu_addr
);
118 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
120 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
125 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
126 * that the intention is to allow exporting memory allocated via the
127 * coherent DMA APIs through the dma_buf API, which only accepts a
128 * scattertable. This presents a couple of problems:
129 * 1. Not all memory allocated via the coherent DMA APIs is backed by
131 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
132 * as we will try to flush the memory through a different alias to that
133 * actually being used (and the flushes are redundant.)
135 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
136 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
139 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
141 if (dma_is_direct(ops
))
142 return dma_direct_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
,
144 if (!ops
->get_sgtable
)
146 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
, attrs
);
148 EXPORT_SYMBOL(dma_get_sgtable_attrs
);
152 * Return the page attributes used for mapping dma_alloc_* memory, either in
153 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
155 pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
, unsigned long attrs
)
157 if (force_dma_unencrypted(dev
))
158 prot
= pgprot_decrypted(prot
);
159 if (dev_is_dma_coherent(dev
) ||
160 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC
) &&
161 (attrs
& DMA_ATTR_NON_CONSISTENT
)))
163 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
164 if (attrs
& DMA_ATTR_WRITE_COMBINE
)
165 return pgprot_writecombine(prot
);
167 return pgprot_dmacoherent(prot
);
169 #endif /* CONFIG_MMU */
172 * Create userspace mapping for the DMA-coherent memory.
174 int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
175 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
179 unsigned long user_count
= vma_pages(vma
);
180 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
181 unsigned long off
= vma
->vm_pgoff
;
184 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
186 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
189 if (off
>= count
|| user_count
> count
- off
)
192 return remap_pfn_range(vma
, vma
->vm_start
,
193 page_to_pfn(virt_to_page(cpu_addr
)) + vma
->vm_pgoff
,
194 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
197 #endif /* CONFIG_MMU */
201 * dma_can_mmap - check if a given device supports dma_mmap_*
202 * @dev: device to check
204 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
205 * map DMA allocations to userspace.
207 bool dma_can_mmap(struct device
*dev
)
209 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
211 if (dma_is_direct(ops
))
212 return dma_direct_can_mmap(dev
);
213 return ops
->mmap
!= NULL
;
215 EXPORT_SYMBOL_GPL(dma_can_mmap
);
218 * dma_mmap_attrs - map a coherent DMA allocation into user space
219 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
220 * @vma: vm_area_struct describing requested user mapping
221 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
222 * @dma_addr: device-view address returned from dma_alloc_attrs
223 * @size: size of memory originally requested in dma_alloc_attrs
224 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
226 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
227 * space. The coherent DMA buffer must not be freed by the driver until the
228 * user space mapping has been released.
230 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
231 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
234 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
236 if (dma_is_direct(ops
))
237 return dma_direct_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
,
241 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
243 EXPORT_SYMBOL(dma_mmap_attrs
);
245 u64
dma_get_required_mask(struct device
*dev
)
247 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
249 if (dma_is_direct(ops
))
250 return dma_direct_get_required_mask(dev
);
251 if (ops
->get_required_mask
)
252 return ops
->get_required_mask(dev
);
255 * We require every DMA ops implementation to at least support a 32-bit
256 * DMA mask (and use bounce buffering if that isn't supported in
257 * hardware). As the direct mapping code has its own routine to
258 * actually report an optimal mask we default to 32-bit here as that
259 * is the right thing for most IOMMUs, and at least not actively
260 * harmful in general.
262 return DMA_BIT_MASK(32);
264 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
266 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
267 gfp_t flag
, unsigned long attrs
)
269 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
272 WARN_ON_ONCE(!dev
->coherent_dma_mask
);
274 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
277 /* let the implementation decide on the zone to allocate from: */
278 flag
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
280 if (dma_is_direct(ops
))
281 cpu_addr
= dma_direct_alloc(dev
, size
, dma_handle
, flag
, attrs
);
283 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
287 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
290 EXPORT_SYMBOL(dma_alloc_attrs
);
292 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
293 dma_addr_t dma_handle
, unsigned long attrs
)
295 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
297 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
300 * On non-coherent platforms which implement DMA-coherent buffers via
301 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
302 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
303 * sleep on some machines, and b) an indication that the driver is
304 * probably misusing the coherent API anyway.
306 WARN_ON(irqs_disabled());
311 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
312 if (dma_is_direct(ops
))
313 dma_direct_free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
315 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
317 EXPORT_SYMBOL(dma_free_attrs
);
319 int dma_supported(struct device
*dev
, u64 mask
)
321 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
323 if (dma_is_direct(ops
))
324 return dma_direct_supported(dev
, mask
);
325 if (!ops
->dma_supported
)
327 return ops
->dma_supported(dev
, mask
);
329 EXPORT_SYMBOL(dma_supported
);
331 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
332 void arch_dma_set_mask(struct device
*dev
, u64 mask
);
334 #define arch_dma_set_mask(dev, mask) do { } while (0)
337 int dma_set_mask(struct device
*dev
, u64 mask
)
340 * Truncate the mask to the actually supported dma_addr_t width to
341 * avoid generating unsupportable addresses.
343 mask
= (dma_addr_t
)mask
;
345 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
348 arch_dma_set_mask(dev
, mask
);
349 *dev
->dma_mask
= mask
;
352 EXPORT_SYMBOL(dma_set_mask
);
354 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
355 int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
358 * Truncate the mask to the actually supported dma_addr_t width to
359 * avoid generating unsupportable addresses.
361 mask
= (dma_addr_t
)mask
;
363 if (!dma_supported(dev
, mask
))
366 dev
->coherent_dma_mask
= mask
;
369 EXPORT_SYMBOL(dma_set_coherent_mask
);
372 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
373 enum dma_data_direction dir
)
375 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
377 BUG_ON(!valid_dma_direction(dir
));
379 if (dma_is_direct(ops
))
380 arch_dma_cache_sync(dev
, vaddr
, size
, dir
);
381 else if (ops
->cache_sync
)
382 ops
->cache_sync(dev
, vaddr
, size
, dir
);
384 EXPORT_SYMBOL(dma_cache_sync
);
386 size_t dma_max_mapping_size(struct device
*dev
)
388 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
389 size_t size
= SIZE_MAX
;
391 if (dma_is_direct(ops
))
392 size
= dma_direct_max_mapping_size(dev
);
393 else if (ops
&& ops
->max_mapping_size
)
394 size
= ops
->max_mapping_size(dev
);
398 EXPORT_SYMBOL_GPL(dma_max_mapping_size
);
400 unsigned long dma_get_merge_boundary(struct device
*dev
)
402 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
404 if (!ops
|| !ops
->get_merge_boundary
)
405 return 0; /* can't merge */
407 return ops
->get_merge_boundary(dev
);
409 EXPORT_SYMBOL_GPL(dma_get_merge_boundary
);