1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/device.h>
7 #include <linux/dma-direction.h>
8 #include <linux/scatterlist.h>
12 * List of possible attributes associated with a DMA mapping. The semantics
13 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
17 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
18 * may be weakly ordered, that is that reads and writes may pass each other.
20 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
22 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
23 * buffered to improve performance.
25 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
27 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
28 * virtual mapping for the allocated buffer.
30 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
32 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
33 * the CPU cache for the given buffer assuming that it has been already
34 * transferred to 'device' domain.
36 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
38 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
41 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
43 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
44 * that it's probably not worth the time to try to allocate memory to in a way
45 * that gives better TLB efficiency.
47 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
49 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
50 * allocation failure reports (similarly to __GFP_NOWARN).
52 #define DMA_ATTR_NO_WARN (1UL << 8)
55 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
56 * accessible at an elevated privilege level (and ideally inaccessible or
57 * at least read-only at lesser-privileged levels).
59 #define DMA_ATTR_PRIVILEGED (1UL << 9)
62 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
63 * be given to a device to use as a DMA source or target. It is specific to a
64 * given device and there may be a translation between the CPU physical address
65 * space and the bus address space.
67 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
68 * be used directly in drivers, but checked for using dma_mapping_error()
71 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
73 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
75 #ifdef CONFIG_DMA_API_DEBUG
76 void debug_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
);
77 void debug_dma_map_single(struct device
*dev
, const void *addr
,
80 static inline void debug_dma_mapping_error(struct device
*dev
,
84 static inline void debug_dma_map_single(struct device
*dev
, const void *addr
,
88 #endif /* CONFIG_DMA_API_DEBUG */
91 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
93 debug_dma_mapping_error(dev
, dma_addr
);
95 if (unlikely(dma_addr
== DMA_MAPPING_ERROR
))
100 dma_addr_t
dma_map_page_attrs(struct device
*dev
, struct page
*page
,
101 size_t offset
, size_t size
, enum dma_data_direction dir
,
102 unsigned long attrs
);
103 void dma_unmap_page_attrs(struct device
*dev
, dma_addr_t addr
, size_t size
,
104 enum dma_data_direction dir
, unsigned long attrs
);
105 unsigned int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
106 int nents
, enum dma_data_direction dir
, unsigned long attrs
);
107 void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
108 int nents
, enum dma_data_direction dir
,
109 unsigned long attrs
);
110 int dma_map_sgtable(struct device
*dev
, struct sg_table
*sgt
,
111 enum dma_data_direction dir
, unsigned long attrs
);
112 dma_addr_t
dma_map_resource(struct device
*dev
, phys_addr_t phys_addr
,
113 size_t size
, enum dma_data_direction dir
, unsigned long attrs
);
114 void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
, size_t size
,
115 enum dma_data_direction dir
, unsigned long attrs
);
116 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
117 gfp_t flag
, unsigned long attrs
);
118 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
119 dma_addr_t dma_handle
, unsigned long attrs
);
120 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
121 gfp_t gfp
, unsigned long attrs
);
122 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
123 dma_addr_t dma_handle
);
124 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
125 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
126 unsigned long attrs
);
127 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
128 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
129 unsigned long attrs
);
130 bool dma_can_mmap(struct device
*dev
);
131 bool dma_pci_p2pdma_supported(struct device
*dev
);
132 int dma_set_mask(struct device
*dev
, u64 mask
);
133 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
134 u64
dma_get_required_mask(struct device
*dev
);
135 bool dma_addressing_limited(struct device
*dev
);
136 size_t dma_max_mapping_size(struct device
*dev
);
137 size_t dma_opt_mapping_size(struct device
*dev
);
138 unsigned long dma_get_merge_boundary(struct device
*dev
);
139 struct sg_table
*dma_alloc_noncontiguous(struct device
*dev
, size_t size
,
140 enum dma_data_direction dir
, gfp_t gfp
, unsigned long attrs
);
141 void dma_free_noncontiguous(struct device
*dev
, size_t size
,
142 struct sg_table
*sgt
, enum dma_data_direction dir
);
143 void *dma_vmap_noncontiguous(struct device
*dev
, size_t size
,
144 struct sg_table
*sgt
);
145 void dma_vunmap_noncontiguous(struct device
*dev
, void *vaddr
);
146 int dma_mmap_noncontiguous(struct device
*dev
, struct vm_area_struct
*vma
,
147 size_t size
, struct sg_table
*sgt
);
148 #else /* CONFIG_HAS_DMA */
149 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
150 struct page
*page
, size_t offset
, size_t size
,
151 enum dma_data_direction dir
, unsigned long attrs
)
153 return DMA_MAPPING_ERROR
;
155 static inline void dma_unmap_page_attrs(struct device
*dev
, dma_addr_t addr
,
156 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
159 static inline unsigned int dma_map_sg_attrs(struct device
*dev
,
160 struct scatterlist
*sg
, int nents
, enum dma_data_direction dir
,
165 static inline void dma_unmap_sg_attrs(struct device
*dev
,
166 struct scatterlist
*sg
, int nents
, enum dma_data_direction dir
,
170 static inline int dma_map_sgtable(struct device
*dev
, struct sg_table
*sgt
,
171 enum dma_data_direction dir
, unsigned long attrs
)
175 static inline dma_addr_t
dma_map_resource(struct device
*dev
,
176 phys_addr_t phys_addr
, size_t size
, enum dma_data_direction dir
,
179 return DMA_MAPPING_ERROR
;
181 static inline void dma_unmap_resource(struct device
*dev
, dma_addr_t addr
,
182 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
185 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
189 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
190 dma_addr_t
*dma_handle
, gfp_t flag
, unsigned long attrs
)
194 static void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
195 dma_addr_t dma_handle
, unsigned long attrs
)
198 static inline void *dmam_alloc_attrs(struct device
*dev
, size_t size
,
199 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
203 static inline void dmam_free_coherent(struct device
*dev
, size_t size
,
204 void *vaddr
, dma_addr_t dma_handle
)
207 static inline int dma_get_sgtable_attrs(struct device
*dev
,
208 struct sg_table
*sgt
, void *cpu_addr
, dma_addr_t dma_addr
,
209 size_t size
, unsigned long attrs
)
213 static inline int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
214 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
219 static inline bool dma_can_mmap(struct device
*dev
)
223 static inline bool dma_pci_p2pdma_supported(struct device
*dev
)
227 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
231 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
235 static inline u64
dma_get_required_mask(struct device
*dev
)
239 static inline bool dma_addressing_limited(struct device
*dev
)
243 static inline size_t dma_max_mapping_size(struct device
*dev
)
247 static inline size_t dma_opt_mapping_size(struct device
*dev
)
251 static inline unsigned long dma_get_merge_boundary(struct device
*dev
)
255 static inline struct sg_table
*dma_alloc_noncontiguous(struct device
*dev
,
256 size_t size
, enum dma_data_direction dir
, gfp_t gfp
,
261 static inline void dma_free_noncontiguous(struct device
*dev
, size_t size
,
262 struct sg_table
*sgt
, enum dma_data_direction dir
)
265 static inline void *dma_vmap_noncontiguous(struct device
*dev
, size_t size
,
266 struct sg_table
*sgt
)
270 static inline void dma_vunmap_noncontiguous(struct device
*dev
, void *vaddr
)
273 static inline int dma_mmap_noncontiguous(struct device
*dev
,
274 struct vm_area_struct
*vma
, size_t size
, struct sg_table
*sgt
)
278 #endif /* CONFIG_HAS_DMA */
280 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
281 void __dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
282 enum dma_data_direction dir
);
283 void __dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
,
284 size_t size
, enum dma_data_direction dir
);
285 void __dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
286 int nelems
, enum dma_data_direction dir
);
287 void __dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
288 int nelems
, enum dma_data_direction dir
);
289 bool __dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
);
291 static inline bool dma_dev_need_sync(const struct device
*dev
)
293 /* Always call DMA sync operations when debugging is enabled */
294 return !dev
->dma_skip_sync
|| IS_ENABLED(CONFIG_DMA_API_DEBUG
);
297 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
298 size_t size
, enum dma_data_direction dir
)
300 if (dma_dev_need_sync(dev
))
301 __dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
304 static inline void dma_sync_single_for_device(struct device
*dev
,
305 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
307 if (dma_dev_need_sync(dev
))
308 __dma_sync_single_for_device(dev
, addr
, size
, dir
);
311 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
312 struct scatterlist
*sg
, int nelems
, enum dma_data_direction dir
)
314 if (dma_dev_need_sync(dev
))
315 __dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
318 static inline void dma_sync_sg_for_device(struct device
*dev
,
319 struct scatterlist
*sg
, int nelems
, enum dma_data_direction dir
)
321 if (dma_dev_need_sync(dev
))
322 __dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
325 static inline bool dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
327 return dma_dev_need_sync(dev
) ? __dma_need_sync(dev
, dma_addr
) : false;
329 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
330 static inline bool dma_dev_need_sync(const struct device
*dev
)
334 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
335 size_t size
, enum dma_data_direction dir
)
338 static inline void dma_sync_single_for_device(struct device
*dev
,
339 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
342 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
343 struct scatterlist
*sg
, int nelems
, enum dma_data_direction dir
)
346 static inline void dma_sync_sg_for_device(struct device
*dev
,
347 struct scatterlist
*sg
, int nelems
, enum dma_data_direction dir
)
350 static inline bool dma_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
354 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
356 struct page
*dma_alloc_pages(struct device
*dev
, size_t size
,
357 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
);
358 void dma_free_pages(struct device
*dev
, size_t size
, struct page
*page
,
359 dma_addr_t dma_handle
, enum dma_data_direction dir
);
360 int dma_mmap_pages(struct device
*dev
, struct vm_area_struct
*vma
,
361 size_t size
, struct page
*page
);
363 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
364 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
)
366 struct page
*page
= dma_alloc_pages(dev
, size
, dma_handle
, dir
, gfp
);
367 return page
? page_address(page
) : NULL
;
370 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
371 void *vaddr
, dma_addr_t dma_handle
, enum dma_data_direction dir
)
373 dma_free_pages(dev
, size
, virt_to_page(vaddr
), dma_handle
, dir
);
376 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
377 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
379 /* DMA must never operate on areas that might be remapped. */
380 if (dev_WARN_ONCE(dev
, is_vmalloc_addr(ptr
),
381 "rejecting DMA map of vmalloc memory\n"))
382 return DMA_MAPPING_ERROR
;
383 debug_dma_map_single(dev
, ptr
, size
);
384 return dma_map_page_attrs(dev
, virt_to_page(ptr
), offset_in_page(ptr
),
388 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
389 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
391 return dma_unmap_page_attrs(dev
, addr
, size
, dir
, attrs
);
394 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
395 dma_addr_t addr
, unsigned long offset
, size_t size
,
396 enum dma_data_direction dir
)
398 return dma_sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
401 static inline void dma_sync_single_range_for_device(struct device
*dev
,
402 dma_addr_t addr
, unsigned long offset
, size_t size
,
403 enum dma_data_direction dir
)
405 return dma_sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
409 * dma_unmap_sgtable - Unmap the given buffer for DMA
410 * @dev: The device for which to perform the DMA operation
411 * @sgt: The sg_table object describing the buffer
412 * @dir: DMA direction
413 * @attrs: Optional DMA attributes for the unmap operation
415 * Unmaps a buffer described by a scatterlist stored in the given sg_table
416 * object for the @dir DMA operation by the @dev device. After this function
417 * the ownership of the buffer is transferred back to the CPU domain.
419 static inline void dma_unmap_sgtable(struct device
*dev
, struct sg_table
*sgt
,
420 enum dma_data_direction dir
, unsigned long attrs
)
422 dma_unmap_sg_attrs(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
, attrs
);
426 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
427 * @dev: The device for which to perform the DMA operation
428 * @sgt: The sg_table object describing the buffer
429 * @dir: DMA direction
431 * Performs the needed cache synchronization and moves the ownership of the
432 * buffer back to the CPU domain, so it is safe to perform any access to it
433 * by the CPU. Before doing any further DMA operations, one has to transfer
434 * the ownership of the buffer back to the DMA domain by calling the
435 * dma_sync_sgtable_for_device().
437 static inline void dma_sync_sgtable_for_cpu(struct device
*dev
,
438 struct sg_table
*sgt
, enum dma_data_direction dir
)
440 dma_sync_sg_for_cpu(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
444 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
445 * @dev: The device for which to perform the DMA operation
446 * @sgt: The sg_table object describing the buffer
447 * @dir: DMA direction
449 * Performs the needed cache synchronization and moves the ownership of the
450 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
451 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
452 * dma_unmap_sgtable().
454 static inline void dma_sync_sgtable_for_device(struct device
*dev
,
455 struct sg_table
*sgt
, enum dma_data_direction dir
)
457 dma_sync_sg_for_device(dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
460 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
461 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
462 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
463 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
464 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
465 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
466 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
467 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
469 bool dma_coherent_ok(struct device
*dev
, phys_addr_t phys
, size_t size
);
471 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
472 dma_addr_t
*dma_handle
, gfp_t gfp
)
474 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
,
475 (gfp
& __GFP_NOWARN
) ? DMA_ATTR_NO_WARN
: 0);
478 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
479 void *cpu_addr
, dma_addr_t dma_handle
)
481 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, 0);
485 static inline u64
dma_get_mask(struct device
*dev
)
487 if (dev
->dma_mask
&& *dev
->dma_mask
)
488 return *dev
->dma_mask
;
489 return DMA_BIT_MASK(32);
493 * Set both the DMA mask and the coherent DMA mask to the same thing.
494 * Note that we don't check the return value from dma_set_coherent_mask()
495 * as the DMA API guarantees that the coherent DMA mask can be set to
496 * the same or smaller than the streaming DMA mask.
498 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
500 int rc
= dma_set_mask(dev
, mask
);
502 dma_set_coherent_mask(dev
, mask
);
507 * Similar to the above, except it deals with the case where the device
508 * does not have dev->dma_mask appropriately setup.
510 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
512 dev
->dma_mask
= &dev
->coherent_dma_mask
;
513 return dma_set_mask_and_coherent(dev
, mask
);
516 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
518 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
519 return dev
->dma_parms
->max_segment_size
;
523 static inline void dma_set_max_seg_size(struct device
*dev
, unsigned int size
)
525 if (WARN_ON_ONCE(!dev
->dma_parms
))
527 dev
->dma_parms
->max_segment_size
= size
;
530 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
532 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
533 return dev
->dma_parms
->segment_boundary_mask
;
538 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
539 * @dev: device to guery the boundary for
540 * @page_shift: ilog() of the IOMMU page size
542 * Return the segment boundary in IOMMU page units (which may be different from
543 * the CPU page size) for the passed in device.
545 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
546 * non-DMA API callers.
548 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device
*dev
,
549 unsigned int page_shift
)
552 return (U32_MAX
>> page_shift
) + 1;
553 return (dma_get_seg_boundary(dev
) >> page_shift
) + 1;
556 static inline void dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
558 if (WARN_ON_ONCE(!dev
->dma_parms
))
560 dev
->dma_parms
->segment_boundary_mask
= mask
;
563 static inline unsigned int dma_get_min_align_mask(struct device
*dev
)
566 return dev
->dma_parms
->min_align_mask
;
570 static inline void dma_set_min_align_mask(struct device
*dev
,
571 unsigned int min_align_mask
)
573 if (WARN_ON_ONCE(!dev
->dma_parms
))
575 dev
->dma_parms
->min_align_mask
= min_align_mask
;
578 #ifndef dma_get_cache_alignment
579 static inline int dma_get_cache_alignment(void)
581 #ifdef ARCH_HAS_DMA_MINALIGN
582 return ARCH_DMA_MINALIGN
;
588 static inline void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
589 dma_addr_t
*dma_handle
, gfp_t gfp
)
591 return dmam_alloc_attrs(dev
, size
, dma_handle
, gfp
,
592 (gfp
& __GFP_NOWARN
) ? DMA_ATTR_NO_WARN
: 0);
595 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
596 dma_addr_t
*dma_addr
, gfp_t gfp
)
598 unsigned long attrs
= DMA_ATTR_WRITE_COMBINE
;
600 if (gfp
& __GFP_NOWARN
)
601 attrs
|= DMA_ATTR_NO_WARN
;
603 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
, attrs
);
606 static inline void dma_free_wc(struct device
*dev
, size_t size
,
607 void *cpu_addr
, dma_addr_t dma_addr
)
609 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
,
610 DMA_ATTR_WRITE_COMBINE
);
613 static inline int dma_mmap_wc(struct device
*dev
,
614 struct vm_area_struct
*vma
,
615 void *cpu_addr
, dma_addr_t dma_addr
,
618 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
,
619 DMA_ATTR_WRITE_COMBINE
);
622 #ifdef CONFIG_NEED_DMA_MAP_STATE
623 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
624 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
625 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
626 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
627 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
628 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
630 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
631 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
632 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
633 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
634 #define dma_unmap_len(PTR, LEN_NAME) (0)
635 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
638 #endif /* _LINUX_DMA_MAPPING_H */