1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/iommu-dma.h>
14 #include <linux/kmsan.h>
15 #include <linux/of_device.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/dma.h>
24 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
25 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
26 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
27 bool dma_default_coherent
= IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT
);
36 dma_addr_t dma_handle
;
40 static void dmam_release(struct device
*dev
, void *res
)
42 struct dma_devres
*this = res
;
44 dma_free_attrs(dev
, this->size
, this->vaddr
, this->dma_handle
,
48 static int dmam_match(struct device
*dev
, void *res
, void *match_data
)
50 struct dma_devres
*this = res
, *match
= match_data
;
52 if (this->vaddr
== match
->vaddr
) {
53 WARN_ON(this->size
!= match
->size
||
54 this->dma_handle
!= match
->dma_handle
);
61 * dmam_free_coherent - Managed dma_free_coherent()
62 * @dev: Device to free coherent memory for
63 * @size: Size of allocation
64 * @vaddr: Virtual address of the memory to free
65 * @dma_handle: DMA handle of the memory to free
67 * Managed dma_free_coherent().
69 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
70 dma_addr_t dma_handle
)
72 struct dma_devres match_data
= { size
, vaddr
, dma_handle
};
74 WARN_ON(devres_destroy(dev
, dmam_release
, dmam_match
, &match_data
));
75 dma_free_coherent(dev
, size
, vaddr
, dma_handle
);
77 EXPORT_SYMBOL(dmam_free_coherent
);
80 * dmam_alloc_attrs - Managed dma_alloc_attrs()
81 * @dev: Device to allocate non_coherent memory for
82 * @size: Size of allocation
83 * @dma_handle: Out argument for allocated DMA handle
84 * @gfp: Allocation flags
85 * @attrs: Flags in the DMA_ATTR_* namespace.
87 * Managed dma_alloc_attrs(). Memory allocated using this function will be
88 * automatically released on driver detach.
91 * Pointer to allocated memory on success, NULL on failure.
93 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
94 gfp_t gfp
, unsigned long attrs
)
96 struct dma_devres
*dr
;
99 dr
= devres_alloc(dmam_release
, sizeof(*dr
), gfp
);
103 vaddr
= dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, attrs
);
110 dr
->dma_handle
= *dma_handle
;
118 EXPORT_SYMBOL(dmam_alloc_attrs
);
120 static bool dma_go_direct(struct device
*dev
, dma_addr_t mask
,
121 const struct dma_map_ops
*ops
)
123 if (use_dma_iommu(dev
))
129 #ifdef CONFIG_DMA_OPS_BYPASS
130 if (dev
->dma_ops_bypass
)
131 return min_not_zero(mask
, dev
->bus_dma_limit
) >=
132 dma_direct_get_required_mask(dev
);
139 * Check if the devices uses a direct mapping for streaming DMA operations.
140 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
143 static inline bool dma_alloc_direct(struct device
*dev
,
144 const struct dma_map_ops
*ops
)
146 return dma_go_direct(dev
, dev
->coherent_dma_mask
, ops
);
149 static inline bool dma_map_direct(struct device
*dev
,
150 const struct dma_map_ops
*ops
)
152 return dma_go_direct(dev
, *dev
->dma_mask
, ops
);
155 dma_addr_t
dma_map_page_attrs(struct device
*dev
, struct page
*page
,
156 size_t offset
, size_t size
, enum dma_data_direction dir
,
159 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
162 BUG_ON(!valid_dma_direction(dir
));
164 if (WARN_ON_ONCE(!dev
->dma_mask
))
165 return DMA_MAPPING_ERROR
;
167 if (dma_map_direct(dev
, ops
) ||
168 arch_dma_map_page_direct(dev
, page_to_phys(page
) + offset
+ size
))
169 addr
= dma_direct_map_page(dev
, page
, offset
, size
, dir
, attrs
);
170 else if (use_dma_iommu(dev
))
171 addr
= iommu_dma_map_page(dev
, page
, offset
, size
, dir
, attrs
);
173 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, attrs
);
174 kmsan_handle_dma(page
, offset
, size
, dir
);
175 trace_dma_map_page(dev
, page_to_phys(page
) + offset
, addr
, size
, dir
,
177 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, attrs
);
181 EXPORT_SYMBOL(dma_map_page_attrs
);
183 void dma_unmap_page_attrs(struct device
*dev
, dma_addr_t addr
, size_t size
,
184 enum dma_data_direction dir
, unsigned long attrs
)
186 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
188 BUG_ON(!valid_dma_direction(dir
));
189 if (dma_map_direct(dev
, ops
) ||
190 arch_dma_unmap_page_direct(dev
, addr
+ size
))
191 dma_direct_unmap_page(dev
, addr
, size
, dir
, attrs
);
192 else if (use_dma_iommu(dev
))
193 iommu_dma_unmap_page(dev
, addr
, size
, dir
, attrs
);
195 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
196 trace_dma_unmap_page(dev
, addr
, size
, dir
, attrs
);
197 debug_dma_unmap_page(dev
, addr
, size
, dir
);
199 EXPORT_SYMBOL(dma_unmap_page_attrs
);
201 static int __dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
202 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
204 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
207 BUG_ON(!valid_dma_direction(dir
));
209 if (WARN_ON_ONCE(!dev
->dma_mask
))
212 if (dma_map_direct(dev
, ops
) ||
213 arch_dma_map_sg_direct(dev
, sg
, nents
))
214 ents
= dma_direct_map_sg(dev
, sg
, nents
, dir
, attrs
);
215 else if (use_dma_iommu(dev
))
216 ents
= iommu_dma_map_sg(dev
, sg
, nents
, dir
, attrs
);
218 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
221 kmsan_handle_dma_sg(sg
, nents
, dir
);
222 trace_dma_map_sg(dev
, sg
, nents
, ents
, dir
, attrs
);
223 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
, attrs
);
224 } else if (WARN_ON_ONCE(ents
!= -EINVAL
&& ents
!= -ENOMEM
&&
225 ents
!= -EIO
&& ents
!= -EREMOTEIO
)) {
226 trace_dma_map_sg_err(dev
, sg
, nents
, ents
, dir
, attrs
);
234 * dma_map_sg_attrs - Map the given buffer for DMA
235 * @dev: The device for which to perform the DMA operation
236 * @sg: The sg_table object describing the buffer
237 * @nents: Number of entries to map
238 * @dir: DMA direction
239 * @attrs: Optional DMA attributes for the map operation
241 * Maps a buffer described by a scatterlist passed in the sg argument with
242 * nents segments for the @dir DMA operation by the @dev device.
244 * Returns the number of mapped entries (which can be less than nents)
245 * on success. Zero is returned for any error.
247 * dma_unmap_sg_attrs() should be used to unmap the buffer with the
248 * original sg and original nents (not the value returned by this funciton).
250 unsigned int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
251 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
255 ret
= __dma_map_sg_attrs(dev
, sg
, nents
, dir
, attrs
);
260 EXPORT_SYMBOL(dma_map_sg_attrs
);
263 * dma_map_sgtable - Map the given buffer for DMA
264 * @dev: The device for which to perform the DMA operation
265 * @sgt: The sg_table object describing the buffer
266 * @dir: DMA direction
267 * @attrs: Optional DMA attributes for the map operation
269 * Maps a buffer described by a scatterlist stored in the given sg_table
270 * object for the @dir DMA operation by the @dev device. After success, the
271 * ownership for the buffer is transferred to the DMA domain. One has to
272 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
273 * ownership of the buffer back to the CPU domain before touching the
276 * Returns 0 on success or a negative error code on error. The following
277 * error codes are supported with the given meaning:
279 * -EINVAL An invalid argument, unaligned access or other error
280 * in usage. Will not succeed if retried.
281 * -ENOMEM Insufficient resources (like memory or IOVA space) to
282 * complete the mapping. Should succeed if retried later.
283 * -EIO Legacy error code with an unknown meaning. eg. this is
284 * returned if a lower level call returned
286 * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
287 * in the sg_table. This will not succeed if retried.
289 int dma_map_sgtable(struct device
*dev
, struct sg_table
*sgt
,
290 enum dma_data_direction dir
, unsigned long attrs
)
294 nents
= __dma_map_sg_attrs(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
, attrs
);
300 EXPORT_SYMBOL_GPL(dma_map_sgtable
);
302 void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
303 int nents
, enum dma_data_direction dir
,
306 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
308 BUG_ON(!valid_dma_direction(dir
));
309 trace_dma_unmap_sg(dev
, sg
, nents
, dir
, attrs
);
310 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
311 if (dma_map_direct(dev
, ops
) ||
312 arch_dma_unmap_sg_direct(dev
, sg
, nents
))
313 dma_direct_unmap_sg(dev
, sg
, nents
, dir
, attrs
);
314 else if (use_dma_iommu(dev
))
315 iommu_dma_unmap_sg(dev
, sg
, nents
, dir
, attrs
);
316 else if (ops
->unmap_sg
)
317 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
319 EXPORT_SYMBOL(dma_unmap_sg_attrs
);
321 dma_addr_t
dma_map_resource(struct device
*dev
, phys_addr_t phys_addr
,
322 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
324 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
325 dma_addr_t addr
= DMA_MAPPING_ERROR
;
327 BUG_ON(!valid_dma_direction(dir
));
329 if (WARN_ON_ONCE(!dev
->dma_mask
))
330 return DMA_MAPPING_ERROR
;
332 if (dma_map_direct(dev
, ops
))
333 addr
= dma_direct_map_resource(dev
, phys_addr
, size
, dir
, attrs
);
334 else if (use_dma_iommu(dev
))
335 addr
= iommu_dma_map_resource(dev
, phys_addr
, size
, dir
, attrs
);
336 else if (ops
->map_resource
)
337 addr
= ops
->map_resource(dev
, phys_addr
, size
, dir
, attrs
);
339 trace_dma_map_resource(dev
, phys_addr
, addr
, size
, dir
, attrs
);
340 debug_dma_map_resource(dev
, phys_addr
, size
, dir
, addr
, attrs
);
343 EXPORT_SYMBOL(dma_map_resource
);
345 void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
, size_t size
,
346 enum dma_data_direction dir
, unsigned long attrs
)
348 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
350 BUG_ON(!valid_dma_direction(dir
));
351 if (dma_map_direct(dev
, ops
))
352 ; /* nothing to do: uncached and no swiotlb */
353 else if (use_dma_iommu(dev
))
354 iommu_dma_unmap_resource(dev
, addr
, size
, dir
, attrs
);
355 else if (ops
->unmap_resource
)
356 ops
->unmap_resource(dev
, addr
, size
, dir
, attrs
);
357 trace_dma_unmap_resource(dev
, addr
, size
, dir
, attrs
);
358 debug_dma_unmap_resource(dev
, addr
, size
, dir
);
360 EXPORT_SYMBOL(dma_unmap_resource
);
362 #ifdef CONFIG_DMA_NEED_SYNC
363 void __dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
364 enum dma_data_direction dir
)
366 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
368 BUG_ON(!valid_dma_direction(dir
));
369 if (dma_map_direct(dev
, ops
))
370 dma_direct_sync_single_for_cpu(dev
, addr
, size
, dir
);
371 else if (use_dma_iommu(dev
))
372 iommu_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
373 else if (ops
->sync_single_for_cpu
)
374 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
375 trace_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
376 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
378 EXPORT_SYMBOL(__dma_sync_single_for_cpu
);
380 void __dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
,
381 size_t size
, enum dma_data_direction dir
)
383 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
385 BUG_ON(!valid_dma_direction(dir
));
386 if (dma_map_direct(dev
, ops
))
387 dma_direct_sync_single_for_device(dev
, addr
, size
, dir
);
388 else if (use_dma_iommu(dev
))
389 iommu_dma_sync_single_for_device(dev
, addr
, size
, dir
);
390 else if (ops
->sync_single_for_device
)
391 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
392 trace_dma_sync_single_for_device(dev
, addr
, size
, dir
);
393 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
395 EXPORT_SYMBOL(__dma_sync_single_for_device
);
397 void __dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
398 int nelems
, enum dma_data_direction dir
)
400 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
402 BUG_ON(!valid_dma_direction(dir
));
403 if (dma_map_direct(dev
, ops
))
404 dma_direct_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
405 else if (use_dma_iommu(dev
))
406 iommu_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
407 else if (ops
->sync_sg_for_cpu
)
408 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
409 trace_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
410 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
412 EXPORT_SYMBOL(__dma_sync_sg_for_cpu
);
414 void __dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
415 int nelems
, enum dma_data_direction dir
)
417 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
419 BUG_ON(!valid_dma_direction(dir
));
420 if (dma_map_direct(dev
, ops
))
421 dma_direct_sync_sg_for_device(dev
, sg
, nelems
, dir
);
422 else if (use_dma_iommu(dev
))
423 iommu_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
424 else if (ops
->sync_sg_for_device
)
425 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
426 trace_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
427 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
429 EXPORT_SYMBOL(__dma_sync_sg_for_device
);
431 bool __dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
433 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
435 if (dma_map_direct(dev
, ops
))
437 * dma_skip_sync could've been reset on first SWIOTLB buffer
438 * mapping, but @dma_addr is not necessary an SWIOTLB buffer.
439 * In this case, fall back to more granular check.
441 return dma_direct_need_sync(dev
, dma_addr
);
444 EXPORT_SYMBOL_GPL(__dma_need_sync
);
446 static void dma_setup_need_sync(struct device
*dev
)
448 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
450 if (dma_map_direct(dev
, ops
) || use_dma_iommu(dev
))
452 * dma_skip_sync will be reset to %false on first SWIOTLB buffer
453 * mapping, if any. During the device initialization, it's
454 * enough to check only for the DMA coherence.
456 dev
->dma_skip_sync
= dev_is_dma_coherent(dev
);
457 else if (!ops
->sync_single_for_device
&& !ops
->sync_single_for_cpu
&&
458 !ops
->sync_sg_for_device
&& !ops
->sync_sg_for_cpu
)
460 * Synchronization is not possible when none of DMA sync ops
463 dev
->dma_skip_sync
= true;
465 dev
->dma_skip_sync
= false;
467 #else /* !CONFIG_DMA_NEED_SYNC */
468 static inline void dma_setup_need_sync(struct device
*dev
) { }
469 #endif /* !CONFIG_DMA_NEED_SYNC */
472 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
473 * that the intention is to allow exporting memory allocated via the
474 * coherent DMA APIs through the dma_buf API, which only accepts a
475 * scattertable. This presents a couple of problems:
476 * 1. Not all memory allocated via the coherent DMA APIs is backed by
478 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
479 * as we will try to flush the memory through a different alias to that
480 * actually being used (and the flushes are redundant.)
482 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
483 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
486 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
488 if (dma_alloc_direct(dev
, ops
))
489 return dma_direct_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
,
491 if (use_dma_iommu(dev
))
492 return iommu_dma_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
,
494 if (!ops
->get_sgtable
)
496 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
, attrs
);
498 EXPORT_SYMBOL(dma_get_sgtable_attrs
);
502 * Return the page attributes used for mapping dma_alloc_* memory, either in
503 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
505 pgprot_t
dma_pgprot(struct device
*dev
, pgprot_t prot
, unsigned long attrs
)
507 if (dev_is_dma_coherent(dev
))
509 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
510 if (attrs
& DMA_ATTR_WRITE_COMBINE
)
511 return pgprot_writecombine(prot
);
513 return pgprot_dmacoherent(prot
);
515 #endif /* CONFIG_MMU */
518 * dma_can_mmap - check if a given device supports dma_mmap_*
519 * @dev: device to check
521 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
522 * map DMA allocations to userspace.
524 bool dma_can_mmap(struct device
*dev
)
526 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
528 if (dma_alloc_direct(dev
, ops
))
529 return dma_direct_can_mmap(dev
);
530 if (use_dma_iommu(dev
))
532 return ops
->mmap
!= NULL
;
534 EXPORT_SYMBOL_GPL(dma_can_mmap
);
537 * dma_mmap_attrs - map a coherent DMA allocation into user space
538 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
539 * @vma: vm_area_struct describing requested user mapping
540 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
541 * @dma_addr: device-view address returned from dma_alloc_attrs
542 * @size: size of memory originally requested in dma_alloc_attrs
543 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
545 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
546 * space. The coherent DMA buffer must not be freed by the driver until the
547 * user space mapping has been released.
549 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
550 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
553 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
555 if (dma_alloc_direct(dev
, ops
))
556 return dma_direct_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
,
558 if (use_dma_iommu(dev
))
559 return iommu_dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
,
563 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
565 EXPORT_SYMBOL(dma_mmap_attrs
);
567 u64
dma_get_required_mask(struct device
*dev
)
569 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
571 if (dma_alloc_direct(dev
, ops
))
572 return dma_direct_get_required_mask(dev
);
574 if (use_dma_iommu(dev
))
575 return DMA_BIT_MASK(32);
577 if (ops
->get_required_mask
)
578 return ops
->get_required_mask(dev
);
581 * We require every DMA ops implementation to at least support a 32-bit
582 * DMA mask (and use bounce buffering if that isn't supported in
583 * hardware). As the direct mapping code has its own routine to
584 * actually report an optimal mask we default to 32-bit here as that
585 * is the right thing for most IOMMUs, and at least not actively
586 * harmful in general.
588 return DMA_BIT_MASK(32);
590 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
592 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
593 gfp_t flag
, unsigned long attrs
)
595 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
598 WARN_ON_ONCE(!dev
->coherent_dma_mask
);
601 * DMA allocations can never be turned back into a page pointer, so
602 * requesting compound pages doesn't make sense (and can't even be
603 * supported at all by various backends).
605 if (WARN_ON_ONCE(flag
& __GFP_COMP
))
608 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
)) {
609 trace_dma_alloc(dev
, cpu_addr
, *dma_handle
, size
,
610 DMA_BIDIRECTIONAL
, flag
, attrs
);
614 /* let the implementation decide on the zone to allocate from: */
615 flag
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
617 if (dma_alloc_direct(dev
, ops
)) {
618 cpu_addr
= dma_direct_alloc(dev
, size
, dma_handle
, flag
, attrs
);
619 } else if (use_dma_iommu(dev
)) {
620 cpu_addr
= iommu_dma_alloc(dev
, size
, dma_handle
, flag
, attrs
);
621 } else if (ops
->alloc
) {
622 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
624 trace_dma_alloc(dev
, NULL
, 0, size
, DMA_BIDIRECTIONAL
, flag
,
629 trace_dma_alloc(dev
, cpu_addr
, *dma_handle
, size
, DMA_BIDIRECTIONAL
,
631 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
, attrs
);
634 EXPORT_SYMBOL(dma_alloc_attrs
);
636 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
637 dma_addr_t dma_handle
, unsigned long attrs
)
639 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
641 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
644 * On non-coherent platforms which implement DMA-coherent buffers via
645 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
646 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
647 * sleep on some machines, and b) an indication that the driver is
648 * probably misusing the coherent API anyway.
650 WARN_ON(irqs_disabled());
652 trace_dma_free(dev
, cpu_addr
, dma_handle
, size
, DMA_BIDIRECTIONAL
,
657 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
658 if (dma_alloc_direct(dev
, ops
))
659 dma_direct_free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
660 else if (use_dma_iommu(dev
))
661 iommu_dma_free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
663 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
665 EXPORT_SYMBOL(dma_free_attrs
);
667 static struct page
*__dma_alloc_pages(struct device
*dev
, size_t size
,
668 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
)
670 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
672 if (WARN_ON_ONCE(!dev
->coherent_dma_mask
))
674 if (WARN_ON_ONCE(gfp
& (__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
)))
676 if (WARN_ON_ONCE(gfp
& __GFP_COMP
))
679 size
= PAGE_ALIGN(size
);
680 if (dma_alloc_direct(dev
, ops
))
681 return dma_direct_alloc_pages(dev
, size
, dma_handle
, dir
, gfp
);
682 if (use_dma_iommu(dev
))
683 return dma_common_alloc_pages(dev
, size
, dma_handle
, dir
, gfp
);
684 if (!ops
->alloc_pages_op
)
686 return ops
->alloc_pages_op(dev
, size
, dma_handle
, dir
, gfp
);
689 struct page
*dma_alloc_pages(struct device
*dev
, size_t size
,
690 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
)
692 struct page
*page
= __dma_alloc_pages(dev
, size
, dma_handle
, dir
, gfp
);
695 trace_dma_alloc_pages(dev
, page_to_virt(page
), *dma_handle
,
697 debug_dma_map_page(dev
, page
, 0, size
, dir
, *dma_handle
, 0);
699 trace_dma_alloc_pages(dev
, NULL
, 0, size
, dir
, gfp
, 0);
703 EXPORT_SYMBOL_GPL(dma_alloc_pages
);
705 static void __dma_free_pages(struct device
*dev
, size_t size
, struct page
*page
,
706 dma_addr_t dma_handle
, enum dma_data_direction dir
)
708 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
710 size
= PAGE_ALIGN(size
);
711 if (dma_alloc_direct(dev
, ops
))
712 dma_direct_free_pages(dev
, size
, page
, dma_handle
, dir
);
713 else if (use_dma_iommu(dev
))
714 dma_common_free_pages(dev
, size
, page
, dma_handle
, dir
);
715 else if (ops
->free_pages
)
716 ops
->free_pages(dev
, size
, page
, dma_handle
, dir
);
719 void dma_free_pages(struct device
*dev
, size_t size
, struct page
*page
,
720 dma_addr_t dma_handle
, enum dma_data_direction dir
)
722 trace_dma_free_pages(dev
, page_to_virt(page
), dma_handle
, size
, dir
, 0);
723 debug_dma_unmap_page(dev
, dma_handle
, size
, dir
);
724 __dma_free_pages(dev
, size
, page
, dma_handle
, dir
);
726 EXPORT_SYMBOL_GPL(dma_free_pages
);
728 int dma_mmap_pages(struct device
*dev
, struct vm_area_struct
*vma
,
729 size_t size
, struct page
*page
)
731 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
733 if (vma
->vm_pgoff
>= count
|| vma_pages(vma
) > count
- vma
->vm_pgoff
)
735 return remap_pfn_range(vma
, vma
->vm_start
,
736 page_to_pfn(page
) + vma
->vm_pgoff
,
737 vma_pages(vma
) << PAGE_SHIFT
, vma
->vm_page_prot
);
739 EXPORT_SYMBOL_GPL(dma_mmap_pages
);
741 static struct sg_table
*alloc_single_sgt(struct device
*dev
, size_t size
,
742 enum dma_data_direction dir
, gfp_t gfp
)
744 struct sg_table
*sgt
;
747 sgt
= kmalloc(sizeof(*sgt
), gfp
);
750 if (sg_alloc_table(sgt
, 1, gfp
))
752 page
= __dma_alloc_pages(dev
, size
, &sgt
->sgl
->dma_address
, dir
, gfp
);
755 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
756 sg_dma_len(sgt
->sgl
) = sgt
->sgl
->length
;
765 struct sg_table
*dma_alloc_noncontiguous(struct device
*dev
, size_t size
,
766 enum dma_data_direction dir
, gfp_t gfp
, unsigned long attrs
)
768 struct sg_table
*sgt
;
770 if (WARN_ON_ONCE(attrs
& ~DMA_ATTR_ALLOC_SINGLE_PAGES
))
772 if (WARN_ON_ONCE(gfp
& __GFP_COMP
))
775 if (use_dma_iommu(dev
))
776 sgt
= iommu_dma_alloc_noncontiguous(dev
, size
, dir
, gfp
, attrs
);
778 sgt
= alloc_single_sgt(dev
, size
, dir
, gfp
);
782 trace_dma_alloc_sgt(dev
, sgt
, size
, dir
, gfp
, attrs
);
783 debug_dma_map_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, 1, dir
, attrs
);
785 trace_dma_alloc_sgt_err(dev
, NULL
, 0, size
, dir
, gfp
, attrs
);
789 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous
);
791 static void free_single_sgt(struct device
*dev
, size_t size
,
792 struct sg_table
*sgt
, enum dma_data_direction dir
)
794 __dma_free_pages(dev
, size
, sg_page(sgt
->sgl
), sgt
->sgl
->dma_address
,
800 void dma_free_noncontiguous(struct device
*dev
, size_t size
,
801 struct sg_table
*sgt
, enum dma_data_direction dir
)
803 trace_dma_free_sgt(dev
, sgt
, size
, dir
);
804 debug_dma_unmap_sg(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
806 if (use_dma_iommu(dev
))
807 iommu_dma_free_noncontiguous(dev
, size
, sgt
, dir
);
809 free_single_sgt(dev
, size
, sgt
, dir
);
811 EXPORT_SYMBOL_GPL(dma_free_noncontiguous
);
813 void *dma_vmap_noncontiguous(struct device
*dev
, size_t size
,
814 struct sg_table
*sgt
)
817 if (use_dma_iommu(dev
))
818 return iommu_dma_vmap_noncontiguous(dev
, size
, sgt
);
820 return page_address(sg_page(sgt
->sgl
));
822 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous
);
824 void dma_vunmap_noncontiguous(struct device
*dev
, void *vaddr
)
826 if (use_dma_iommu(dev
))
827 iommu_dma_vunmap_noncontiguous(dev
, vaddr
);
829 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous
);
831 int dma_mmap_noncontiguous(struct device
*dev
, struct vm_area_struct
*vma
,
832 size_t size
, struct sg_table
*sgt
)
834 if (use_dma_iommu(dev
))
835 return iommu_dma_mmap_noncontiguous(dev
, vma
, size
, sgt
);
836 return dma_mmap_pages(dev
, vma
, size
, sg_page(sgt
->sgl
));
838 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous
);
840 static int dma_supported(struct device
*dev
, u64 mask
)
842 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
844 if (use_dma_iommu(dev
)) {
851 * ->dma_supported sets and clears the bypass flag, so ignore it here
852 * and always call into the method if there is one.
855 if (!ops
->dma_supported
)
857 return ops
->dma_supported(dev
, mask
);
860 return dma_direct_supported(dev
, mask
);
863 bool dma_pci_p2pdma_supported(struct device
*dev
)
865 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
868 * Note: dma_ops_bypass is not checked here because P2PDMA should
869 * not be used with dma mapping ops that do not have support even
870 * if the specific device is bypassing them.
873 /* if ops is not set, dma direct and default IOMMU support P2PDMA */
876 EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported
);
878 int dma_set_mask(struct device
*dev
, u64 mask
)
881 * Truncate the mask to the actually supported dma_addr_t width to
882 * avoid generating unsupportable addresses.
884 mask
= (dma_addr_t
)mask
;
886 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
889 arch_dma_set_mask(dev
, mask
);
890 *dev
->dma_mask
= mask
;
891 dma_setup_need_sync(dev
);
895 EXPORT_SYMBOL(dma_set_mask
);
897 int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
900 * Truncate the mask to the actually supported dma_addr_t width to
901 * avoid generating unsupportable addresses.
903 mask
= (dma_addr_t
)mask
;
905 if (!dma_supported(dev
, mask
))
908 dev
->coherent_dma_mask
= mask
;
911 EXPORT_SYMBOL(dma_set_coherent_mask
);
914 * dma_addressing_limited - return if the device is addressing limited
915 * @dev: device to check
917 * Return %true if the devices DMA mask is too small to address all memory in
918 * the system, else %false. Lack of addressing bits is the prime reason for
919 * bounce buffering, but might not be the only one.
921 bool dma_addressing_limited(struct device
*dev
)
923 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
925 if (min_not_zero(dma_get_mask(dev
), dev
->bus_dma_limit
) <
926 dma_get_required_mask(dev
))
929 if (unlikely(ops
) || use_dma_iommu(dev
))
931 return !dma_direct_all_ram_mapped(dev
);
933 EXPORT_SYMBOL_GPL(dma_addressing_limited
);
935 size_t dma_max_mapping_size(struct device
*dev
)
937 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
938 size_t size
= SIZE_MAX
;
940 if (dma_map_direct(dev
, ops
))
941 size
= dma_direct_max_mapping_size(dev
);
942 else if (use_dma_iommu(dev
))
943 size
= iommu_dma_max_mapping_size(dev
);
944 else if (ops
&& ops
->max_mapping_size
)
945 size
= ops
->max_mapping_size(dev
);
949 EXPORT_SYMBOL_GPL(dma_max_mapping_size
);
951 size_t dma_opt_mapping_size(struct device
*dev
)
953 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
954 size_t size
= SIZE_MAX
;
956 if (use_dma_iommu(dev
))
957 size
= iommu_dma_opt_mapping_size();
958 else if (ops
&& ops
->opt_mapping_size
)
959 size
= ops
->opt_mapping_size();
961 return min(dma_max_mapping_size(dev
), size
);
963 EXPORT_SYMBOL_GPL(dma_opt_mapping_size
);
965 unsigned long dma_get_merge_boundary(struct device
*dev
)
967 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
969 if (use_dma_iommu(dev
))
970 return iommu_dma_get_merge_boundary(dev
);
972 if (!ops
|| !ops
->get_merge_boundary
)
973 return 0; /* can't merge */
975 return ops
->get_merge_boundary(dev
);
977 EXPORT_SYMBOL_GPL(dma_get_merge_boundary
);