1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/bug.h>
16 * A dma_addr_t can hold any valid DMA or bus address for the platform.
17 * It can be given to a device to use as a DMA source or target. A CPU cannot
18 * reference a dma_addr_t directly because there may be translation between
19 * its physical address space and the bus address space.
22 void* (*alloc
)(struct device
*dev
, size_t size
,
23 dma_addr_t
*dma_handle
, gfp_t gfp
,
24 struct dma_attrs
*attrs
);
25 void (*free
)(struct device
*dev
, size_t size
,
26 void *vaddr
, dma_addr_t dma_handle
,
27 struct dma_attrs
*attrs
);
28 int (*mmap
)(struct device
*, struct vm_area_struct
*,
29 void *, dma_addr_t
, size_t, struct dma_attrs
*attrs
);
31 int (*get_sgtable
)(struct device
*dev
, struct sg_table
*sgt
, void *,
32 dma_addr_t
, size_t, struct dma_attrs
*attrs
);
34 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
35 unsigned long offset
, size_t size
,
36 enum dma_data_direction dir
,
37 struct dma_attrs
*attrs
);
38 void (*unmap_page
)(struct device
*dev
, dma_addr_t dma_handle
,
39 size_t size
, enum dma_data_direction dir
,
40 struct dma_attrs
*attrs
);
42 * map_sg returns 0 on error and a value > 0 on success.
43 * It should never return a value < 0.
45 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
46 int nents
, enum dma_data_direction dir
,
47 struct dma_attrs
*attrs
);
48 void (*unmap_sg
)(struct device
*dev
,
49 struct scatterlist
*sg
, int nents
,
50 enum dma_data_direction dir
,
51 struct dma_attrs
*attrs
);
52 void (*sync_single_for_cpu
)(struct device
*dev
,
53 dma_addr_t dma_handle
, size_t size
,
54 enum dma_data_direction dir
);
55 void (*sync_single_for_device
)(struct device
*dev
,
56 dma_addr_t dma_handle
, size_t size
,
57 enum dma_data_direction dir
);
58 void (*sync_sg_for_cpu
)(struct device
*dev
,
59 struct scatterlist
*sg
, int nents
,
60 enum dma_data_direction dir
);
61 void (*sync_sg_for_device
)(struct device
*dev
,
62 struct scatterlist
*sg
, int nents
,
63 enum dma_data_direction dir
);
64 int (*mapping_error
)(struct device
*dev
, dma_addr_t dma_addr
);
65 int (*dma_supported
)(struct device
*dev
, u64 mask
);
66 int (*set_dma_mask
)(struct device
*dev
, u64 mask
);
67 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
68 u64 (*get_required_mask
)(struct device
*dev
);
73 extern struct dma_map_ops dma_noop_ops
;
75 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
77 #define DMA_MASK_NONE 0x0ULL
79 static inline int valid_dma_direction(int dma_direction
)
81 return ((dma_direction
== DMA_BIDIRECTIONAL
) ||
82 (dma_direction
== DMA_TO_DEVICE
) ||
83 (dma_direction
== DMA_FROM_DEVICE
));
86 static inline int is_device_dma_capable(struct device
*dev
)
88 return dev
->dma_mask
!= NULL
&& *dev
->dma_mask
!= DMA_MASK_NONE
;
91 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
93 * These three functions are only for dma allocator.
94 * Don't use them in device drivers.
96 int dma_alloc_from_coherent(struct device
*dev
, ssize_t size
,
97 dma_addr_t
*dma_handle
, void **ret
);
98 int dma_release_from_coherent(struct device
*dev
, int order
, void *vaddr
);
100 int dma_mmap_from_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
101 void *cpu_addr
, size_t size
, int *ret
);
103 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
104 #define dma_release_from_coherent(dev, order, vaddr) (0)
105 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
106 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
108 #ifdef CONFIG_HAS_DMA
109 #include <asm/dma-mapping.h>
112 * Define the dma api to allow compilation but not linking of
113 * dma dependent code. Code that depends on the dma-mapping
114 * API needs to set 'depends on HAS_DMA' in its Kconfig
116 extern struct dma_map_ops bad_dma_ops
;
117 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
123 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
125 enum dma_data_direction dir
,
126 struct dma_attrs
*attrs
)
128 struct dma_map_ops
*ops
= get_dma_ops(dev
);
131 kmemcheck_mark_initialized(ptr
, size
);
132 BUG_ON(!valid_dma_direction(dir
));
133 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
134 offset_in_page(ptr
), size
,
136 debug_dma_map_page(dev
, virt_to_page(ptr
),
137 offset_in_page(ptr
), size
,
142 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
144 enum dma_data_direction dir
,
145 struct dma_attrs
*attrs
)
147 struct dma_map_ops
*ops
= get_dma_ops(dev
);
149 BUG_ON(!valid_dma_direction(dir
));
151 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
152 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
156 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
157 * It should never return a value < 0.
159 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
160 int nents
, enum dma_data_direction dir
,
161 struct dma_attrs
*attrs
)
163 struct dma_map_ops
*ops
= get_dma_ops(dev
);
165 struct scatterlist
*s
;
167 for_each_sg(sg
, s
, nents
, i
)
168 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
169 BUG_ON(!valid_dma_direction(dir
));
170 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
172 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
177 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
178 int nents
, enum dma_data_direction dir
,
179 struct dma_attrs
*attrs
)
181 struct dma_map_ops
*ops
= get_dma_ops(dev
);
183 BUG_ON(!valid_dma_direction(dir
));
184 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
186 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
189 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
190 size_t offset
, size_t size
,
191 enum dma_data_direction dir
)
193 struct dma_map_ops
*ops
= get_dma_ops(dev
);
196 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
197 BUG_ON(!valid_dma_direction(dir
));
198 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, NULL
);
199 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
204 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
205 size_t size
, enum dma_data_direction dir
)
207 struct dma_map_ops
*ops
= get_dma_ops(dev
);
209 BUG_ON(!valid_dma_direction(dir
));
211 ops
->unmap_page(dev
, addr
, size
, dir
, NULL
);
212 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
215 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
217 enum dma_data_direction dir
)
219 struct dma_map_ops
*ops
= get_dma_ops(dev
);
221 BUG_ON(!valid_dma_direction(dir
));
222 if (ops
->sync_single_for_cpu
)
223 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
224 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
227 static inline void dma_sync_single_for_device(struct device
*dev
,
228 dma_addr_t addr
, size_t size
,
229 enum dma_data_direction dir
)
231 struct dma_map_ops
*ops
= get_dma_ops(dev
);
233 BUG_ON(!valid_dma_direction(dir
));
234 if (ops
->sync_single_for_device
)
235 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
236 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
239 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
241 unsigned long offset
,
243 enum dma_data_direction dir
)
245 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
247 BUG_ON(!valid_dma_direction(dir
));
248 if (ops
->sync_single_for_cpu
)
249 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
250 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
253 static inline void dma_sync_single_range_for_device(struct device
*dev
,
255 unsigned long offset
,
257 enum dma_data_direction dir
)
259 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
261 BUG_ON(!valid_dma_direction(dir
));
262 if (ops
->sync_single_for_device
)
263 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
264 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
268 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
269 int nelems
, enum dma_data_direction dir
)
271 struct dma_map_ops
*ops
= get_dma_ops(dev
);
273 BUG_ON(!valid_dma_direction(dir
));
274 if (ops
->sync_sg_for_cpu
)
275 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
276 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
280 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
281 int nelems
, enum dma_data_direction dir
)
283 struct dma_map_ops
*ops
= get_dma_ops(dev
);
285 BUG_ON(!valid_dma_direction(dir
));
286 if (ops
->sync_sg_for_device
)
287 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
288 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
292 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
293 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
294 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
295 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
297 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
298 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
300 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
301 unsigned long vm_flags
,
302 pgprot_t prot
, const void *caller
);
304 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
305 unsigned long vm_flags
, pgprot_t prot
,
307 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
310 * dma_mmap_attrs - map a coherent DMA allocation into user space
311 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
312 * @vma: vm_area_struct describing requested user mapping
313 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
314 * @handle: device-view address returned from dma_alloc_attrs
315 * @size: size of memory originally requested in dma_alloc_attrs
316 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
318 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
319 * into user space. The coherent DMA buffer must not be freed by the
320 * driver until the user space mapping has been released.
323 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
324 dma_addr_t dma_addr
, size_t size
, struct dma_attrs
*attrs
)
326 struct dma_map_ops
*ops
= get_dma_ops(dev
);
329 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
330 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
333 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
336 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
337 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
340 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
341 dma_addr_t dma_addr
, size_t size
, struct dma_attrs
*attrs
)
343 struct dma_map_ops
*ops
= get_dma_ops(dev
);
345 if (ops
->get_sgtable
)
346 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
348 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
351 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
353 #ifndef arch_dma_alloc_attrs
354 #define arch_dma_alloc_attrs(dev, flag) (true)
357 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
358 dma_addr_t
*dma_handle
, gfp_t flag
,
359 struct dma_attrs
*attrs
)
361 struct dma_map_ops
*ops
= get_dma_ops(dev
);
366 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &cpu_addr
))
369 if (!arch_dma_alloc_attrs(&dev
, &flag
))
374 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
375 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
379 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
380 void *cpu_addr
, dma_addr_t dma_handle
,
381 struct dma_attrs
*attrs
)
383 struct dma_map_ops
*ops
= get_dma_ops(dev
);
386 WARN_ON(irqs_disabled());
388 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
391 if (!ops
->free
|| !cpu_addr
)
394 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
395 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
398 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
399 dma_addr_t
*dma_handle
, gfp_t flag
)
401 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, NULL
);
404 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
405 void *cpu_addr
, dma_addr_t dma_handle
)
407 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, NULL
);
410 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
411 dma_addr_t
*dma_handle
, gfp_t gfp
)
413 DEFINE_DMA_ATTRS(attrs
);
415 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &attrs
);
416 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, &attrs
);
419 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
420 void *cpu_addr
, dma_addr_t dma_handle
)
422 DEFINE_DMA_ATTRS(attrs
);
424 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &attrs
);
425 dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, &attrs
);
428 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
430 debug_dma_mapping_error(dev
, dma_addr
);
432 if (get_dma_ops(dev
)->mapping_error
)
433 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
435 #ifdef DMA_ERROR_CODE
436 return dma_addr
== DMA_ERROR_CODE
;
442 #ifndef HAVE_ARCH_DMA_SUPPORTED
443 static inline int dma_supported(struct device
*dev
, u64 mask
)
445 struct dma_map_ops
*ops
= get_dma_ops(dev
);
449 if (!ops
->dma_supported
)
451 return ops
->dma_supported(dev
, mask
);
455 #ifndef HAVE_ARCH_DMA_SET_MASK
456 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
458 struct dma_map_ops
*ops
= get_dma_ops(dev
);
460 if (ops
->set_dma_mask
)
461 return ops
->set_dma_mask(dev
, mask
);
463 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
465 *dev
->dma_mask
= mask
;
470 static inline u64
dma_get_mask(struct device
*dev
)
472 if (dev
&& dev
->dma_mask
&& *dev
->dma_mask
)
473 return *dev
->dma_mask
;
474 return DMA_BIT_MASK(32);
477 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
478 int dma_set_coherent_mask(struct device
*dev
, u64 mask
);
480 static inline int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
482 if (!dma_supported(dev
, mask
))
484 dev
->coherent_dma_mask
= mask
;
490 * Set both the DMA mask and the coherent DMA mask to the same thing.
491 * Note that we don't check the return value from dma_set_coherent_mask()
492 * as the DMA API guarantees that the coherent DMA mask can be set to
493 * the same or smaller than the streaming DMA mask.
495 static inline int dma_set_mask_and_coherent(struct device
*dev
, u64 mask
)
497 int rc
= dma_set_mask(dev
, mask
);
499 dma_set_coherent_mask(dev
, mask
);
504 * Similar to the above, except it deals with the case where the device
505 * does not have dev->dma_mask appropriately setup.
507 static inline int dma_coerce_mask_and_coherent(struct device
*dev
, u64 mask
)
509 dev
->dma_mask
= &dev
->coherent_dma_mask
;
510 return dma_set_mask_and_coherent(dev
, mask
);
513 extern u64
dma_get_required_mask(struct device
*dev
);
515 #ifndef arch_setup_dma_ops
516 static inline void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
,
517 u64 size
, struct iommu_ops
*iommu
,
521 #ifndef arch_teardown_dma_ops
522 static inline void arch_teardown_dma_ops(struct device
*dev
) { }
525 static inline unsigned int dma_get_max_seg_size(struct device
*dev
)
527 if (dev
->dma_parms
&& dev
->dma_parms
->max_segment_size
)
528 return dev
->dma_parms
->max_segment_size
;
532 static inline unsigned int dma_set_max_seg_size(struct device
*dev
,
535 if (dev
->dma_parms
) {
536 dev
->dma_parms
->max_segment_size
= size
;
542 static inline unsigned long dma_get_seg_boundary(struct device
*dev
)
544 if (dev
->dma_parms
&& dev
->dma_parms
->segment_boundary_mask
)
545 return dev
->dma_parms
->segment_boundary_mask
;
546 return DMA_BIT_MASK(32);
549 static inline int dma_set_seg_boundary(struct device
*dev
, unsigned long mask
)
551 if (dev
->dma_parms
) {
552 dev
->dma_parms
->segment_boundary_mask
= mask
;
559 static inline unsigned long dma_max_pfn(struct device
*dev
)
561 return *dev
->dma_mask
>> PAGE_SHIFT
;
565 static inline void *dma_zalloc_coherent(struct device
*dev
, size_t size
,
566 dma_addr_t
*dma_handle
, gfp_t flag
)
568 void *ret
= dma_alloc_coherent(dev
, size
, dma_handle
,
573 #ifdef CONFIG_HAS_DMA
574 static inline int dma_get_cache_alignment(void)
576 #ifdef ARCH_DMA_MINALIGN
577 return ARCH_DMA_MINALIGN
;
583 /* flags for the coherent memory api */
584 #define DMA_MEMORY_MAP 0x01
585 #define DMA_MEMORY_IO 0x02
586 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
587 #define DMA_MEMORY_EXCLUSIVE 0x08
589 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
590 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
591 dma_addr_t device_addr
, size_t size
, int flags
);
592 void dma_release_declared_memory(struct device
*dev
);
593 void *dma_mark_declared_memory_occupied(struct device
*dev
,
594 dma_addr_t device_addr
, size_t size
);
597 dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
598 dma_addr_t device_addr
, size_t size
, int flags
)
604 dma_release_declared_memory(struct device
*dev
)
609 dma_mark_declared_memory_occupied(struct device
*dev
,
610 dma_addr_t device_addr
, size_t size
)
612 return ERR_PTR(-EBUSY
);
614 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
619 extern void *dmam_alloc_coherent(struct device
*dev
, size_t size
,
620 dma_addr_t
*dma_handle
, gfp_t gfp
);
621 extern void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
622 dma_addr_t dma_handle
);
623 extern void *dmam_alloc_noncoherent(struct device
*dev
, size_t size
,
624 dma_addr_t
*dma_handle
, gfp_t gfp
);
625 extern void dmam_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
626 dma_addr_t dma_handle
);
627 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
628 extern int dmam_declare_coherent_memory(struct device
*dev
,
629 phys_addr_t phys_addr
,
630 dma_addr_t device_addr
, size_t size
,
632 extern void dmam_release_declared_memory(struct device
*dev
);
633 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
634 static inline int dmam_declare_coherent_memory(struct device
*dev
,
635 phys_addr_t phys_addr
, dma_addr_t device_addr
,
636 size_t size
, gfp_t gfp
)
641 static inline void dmam_release_declared_memory(struct device
*dev
)
644 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
646 static inline void *dma_alloc_wc(struct device
*dev
, size_t size
,
647 dma_addr_t
*dma_addr
, gfp_t gfp
)
649 DEFINE_DMA_ATTRS(attrs
);
650 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
651 return dma_alloc_attrs(dev
, size
, dma_addr
, gfp
, &attrs
);
653 #ifndef dma_alloc_writecombine
654 #define dma_alloc_writecombine dma_alloc_wc
657 static inline void dma_free_wc(struct device
*dev
, size_t size
,
658 void *cpu_addr
, dma_addr_t dma_addr
)
660 DEFINE_DMA_ATTRS(attrs
);
661 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
662 return dma_free_attrs(dev
, size
, cpu_addr
, dma_addr
, &attrs
);
664 #ifndef dma_free_writecombine
665 #define dma_free_writecombine dma_free_wc
668 static inline int dma_mmap_wc(struct device
*dev
,
669 struct vm_area_struct
*vma
,
670 void *cpu_addr
, dma_addr_t dma_addr
,
673 DEFINE_DMA_ATTRS(attrs
);
674 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
675 return dma_mmap_attrs(dev
, vma
, cpu_addr
, dma_addr
, size
, &attrs
);
677 #ifndef dma_mmap_writecombine
678 #define dma_mmap_writecombine dma_mmap_wc
681 #ifdef CONFIG_NEED_DMA_MAP_STATE
682 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
683 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
684 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
685 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
686 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
687 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
689 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
690 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
691 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
692 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
693 #define dma_unmap_len(PTR, LEN_NAME) (0)
694 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)