1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
4 #include <linux/kmemcheck.h>
6 #include <linux/scatterlist.h>
7 #include <linux/dma-debug.h>
8 #include <linux/dma-attrs.h>
9 #include <asm-generic/dma-coherent.h>
11 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
, void *ptr
,
13 enum dma_data_direction dir
,
14 struct dma_attrs
*attrs
)
16 struct dma_map_ops
*ops
= get_dma_ops(dev
);
19 kmemcheck_mark_initialized(ptr
, size
);
20 BUG_ON(!valid_dma_direction(dir
));
21 addr
= ops
->map_page(dev
, virt_to_page(ptr
),
22 (unsigned long)ptr
& ~PAGE_MASK
, size
,
24 debug_dma_map_page(dev
, virt_to_page(ptr
),
25 (unsigned long)ptr
& ~PAGE_MASK
, size
,
30 static inline void dma_unmap_single_attrs(struct device
*dev
, dma_addr_t addr
,
32 enum dma_data_direction dir
,
33 struct dma_attrs
*attrs
)
35 struct dma_map_ops
*ops
= get_dma_ops(dev
);
37 BUG_ON(!valid_dma_direction(dir
));
39 ops
->unmap_page(dev
, addr
, size
, dir
, attrs
);
40 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
44 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
45 * It should never return a value < 0.
47 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
48 int nents
, enum dma_data_direction dir
,
49 struct dma_attrs
*attrs
)
51 struct dma_map_ops
*ops
= get_dma_ops(dev
);
53 struct scatterlist
*s
;
55 for_each_sg(sg
, s
, nents
, i
)
56 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
57 BUG_ON(!valid_dma_direction(dir
));
58 ents
= ops
->map_sg(dev
, sg
, nents
, dir
, attrs
);
60 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
65 static inline void dma_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
66 int nents
, enum dma_data_direction dir
,
67 struct dma_attrs
*attrs
)
69 struct dma_map_ops
*ops
= get_dma_ops(dev
);
71 BUG_ON(!valid_dma_direction(dir
));
72 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
74 ops
->unmap_sg(dev
, sg
, nents
, dir
, attrs
);
77 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
78 size_t offset
, size_t size
,
79 enum dma_data_direction dir
)
81 struct dma_map_ops
*ops
= get_dma_ops(dev
);
84 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
85 BUG_ON(!valid_dma_direction(dir
));
86 addr
= ops
->map_page(dev
, page
, offset
, size
, dir
, NULL
);
87 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
92 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
93 size_t size
, enum dma_data_direction dir
)
95 struct dma_map_ops
*ops
= get_dma_ops(dev
);
97 BUG_ON(!valid_dma_direction(dir
));
99 ops
->unmap_page(dev
, addr
, size
, dir
, NULL
);
100 debug_dma_unmap_page(dev
, addr
, size
, dir
, false);
103 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
105 enum dma_data_direction dir
)
107 struct dma_map_ops
*ops
= get_dma_ops(dev
);
109 BUG_ON(!valid_dma_direction(dir
));
110 if (ops
->sync_single_for_cpu
)
111 ops
->sync_single_for_cpu(dev
, addr
, size
, dir
);
112 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
115 static inline void dma_sync_single_for_device(struct device
*dev
,
116 dma_addr_t addr
, size_t size
,
117 enum dma_data_direction dir
)
119 struct dma_map_ops
*ops
= get_dma_ops(dev
);
121 BUG_ON(!valid_dma_direction(dir
));
122 if (ops
->sync_single_for_device
)
123 ops
->sync_single_for_device(dev
, addr
, size
, dir
);
124 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
127 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
129 unsigned long offset
,
131 enum dma_data_direction dir
)
133 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
135 BUG_ON(!valid_dma_direction(dir
));
136 if (ops
->sync_single_for_cpu
)
137 ops
->sync_single_for_cpu(dev
, addr
+ offset
, size
, dir
);
138 debug_dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
141 static inline void dma_sync_single_range_for_device(struct device
*dev
,
143 unsigned long offset
,
145 enum dma_data_direction dir
)
147 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
149 BUG_ON(!valid_dma_direction(dir
));
150 if (ops
->sync_single_for_device
)
151 ops
->sync_single_for_device(dev
, addr
+ offset
, size
, dir
);
152 debug_dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
156 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
157 int nelems
, enum dma_data_direction dir
)
159 struct dma_map_ops
*ops
= get_dma_ops(dev
);
161 BUG_ON(!valid_dma_direction(dir
));
162 if (ops
->sync_sg_for_cpu
)
163 ops
->sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
164 debug_dma_sync_sg_for_cpu(dev
, sg
, nelems
, dir
);
168 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
169 int nelems
, enum dma_data_direction dir
)
171 struct dma_map_ops
*ops
= get_dma_ops(dev
);
173 BUG_ON(!valid_dma_direction(dir
));
174 if (ops
->sync_sg_for_device
)
175 ops
->sync_sg_for_device(dev
, sg
, nelems
, dir
);
176 debug_dma_sync_sg_for_device(dev
, sg
, nelems
, dir
);
180 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
181 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
182 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
183 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
185 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
186 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
188 void *dma_common_contiguous_remap(struct page
*page
, size_t size
,
189 unsigned long vm_flags
,
190 pgprot_t prot
, const void *caller
);
192 void *dma_common_pages_remap(struct page
**pages
, size_t size
,
193 unsigned long vm_flags
, pgprot_t prot
,
195 void dma_common_free_remap(void *cpu_addr
, size_t size
, unsigned long vm_flags
);
198 * dma_mmap_attrs - map a coherent DMA allocation into user space
199 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
200 * @vma: vm_area_struct describing requested user mapping
201 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
202 * @handle: device-view address returned from dma_alloc_attrs
203 * @size: size of memory originally requested in dma_alloc_attrs
204 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
206 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
207 * into user space. The coherent DMA buffer must not be freed by the
208 * driver until the user space mapping has been released.
211 dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
, void *cpu_addr
,
212 dma_addr_t dma_addr
, size_t size
, struct dma_attrs
*attrs
)
214 struct dma_map_ops
*ops
= get_dma_ops(dev
);
217 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
218 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
221 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
224 dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
225 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
228 dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
, void *cpu_addr
,
229 dma_addr_t dma_addr
, size_t size
, struct dma_attrs
*attrs
)
231 struct dma_map_ops
*ops
= get_dma_ops(dev
);
233 if (ops
->get_sgtable
)
234 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
236 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
);
239 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
241 #ifndef arch_dma_alloc_attrs
242 #define arch_dma_alloc_attrs(dev, flag) (true)
245 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
246 dma_addr_t
*dma_handle
, gfp_t flag
,
247 struct dma_attrs
*attrs
)
249 struct dma_map_ops
*ops
= get_dma_ops(dev
);
254 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &cpu_addr
))
257 if (!arch_dma_alloc_attrs(&dev
, &flag
))
262 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
263 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
267 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
268 void *cpu_addr
, dma_addr_t dma_handle
,
269 struct dma_attrs
*attrs
)
271 struct dma_map_ops
*ops
= get_dma_ops(dev
);
274 WARN_ON(irqs_disabled());
276 if (dma_release_from_coherent(dev
, get_order(size
), cpu_addr
))
282 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
283 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
286 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
287 dma_addr_t
*dma_handle
, gfp_t flag
)
289 return dma_alloc_attrs(dev
, size
, dma_handle
, flag
, NULL
);
292 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
293 void *cpu_addr
, dma_addr_t dma_handle
)
295 return dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, NULL
);
298 static inline void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
299 dma_addr_t
*dma_handle
, gfp_t gfp
)
301 DEFINE_DMA_ATTRS(attrs
);
303 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &attrs
);
304 return dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, &attrs
);
307 static inline void dma_free_noncoherent(struct device
*dev
, size_t size
,
308 void *cpu_addr
, dma_addr_t dma_handle
)
310 DEFINE_DMA_ATTRS(attrs
);
312 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &attrs
);
313 dma_free_attrs(dev
, size
, cpu_addr
, dma_handle
, &attrs
);
316 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
318 debug_dma_mapping_error(dev
, dma_addr
);
320 if (get_dma_ops(dev
)->mapping_error
)
321 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
323 #ifdef DMA_ERROR_CODE
324 return dma_addr
== DMA_ERROR_CODE
;
330 #ifndef HAVE_ARCH_DMA_SUPPORTED
331 static inline int dma_supported(struct device
*dev
, u64 mask
)
333 struct dma_map_ops
*ops
= get_dma_ops(dev
);
337 if (!ops
->dma_supported
)
339 return ops
->dma_supported(dev
, mask
);
343 #ifndef HAVE_ARCH_DMA_SET_MASK
344 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
346 struct dma_map_ops
*ops
= get_dma_ops(dev
);
348 if (ops
->set_dma_mask
)
349 return ops
->set_dma_mask(dev
, mask
);
351 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
353 *dev
->dma_mask
= mask
;