2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/types.h>
12 #include <linux/cache.h>
13 /* need struct page definitions */
15 #include <linux/scatterlist.h>
16 #include <linux/dma-attrs.h>
19 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21 #ifdef CONFIG_NOT_COHERENT_CACHE
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
29 extern void *__dma_alloc_coherent(size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
30 extern void __dma_free_coherent(size_t size
, void *vaddr
);
31 extern void __dma_sync(void *vaddr
, size_t size
, int direction
);
32 extern void __dma_sync_page(struct page
*page
, unsigned long offset
,
33 size_t size
, int direction
);
35 #else /* ! CONFIG_NOT_COHERENT_CACHE */
37 * Cache coherent cores.
40 #define __dma_alloc_coherent(gfp, size, handle) NULL
41 #define __dma_free_coherent(size, addr) ((void)0)
42 #define __dma_sync(addr, size, rw) ((void)0)
43 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
45 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
47 static inline unsigned long device_to_mask(struct device
*dev
)
49 if (dev
->dma_mask
&& *dev
->dma_mask
)
50 return *dev
->dma_mask
;
51 /* Assume devices without mask can take 32 bit addresses */
56 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
58 struct dma_mapping_ops
{
59 void * (*alloc_coherent
)(struct device
*dev
, size_t size
,
60 dma_addr_t
*dma_handle
, gfp_t flag
);
61 void (*free_coherent
)(struct device
*dev
, size_t size
,
62 void *vaddr
, dma_addr_t dma_handle
);
63 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
64 int nents
, enum dma_data_direction direction
,
65 struct dma_attrs
*attrs
);
66 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
,
67 int nents
, enum dma_data_direction direction
,
68 struct dma_attrs
*attrs
);
69 int (*dma_supported
)(struct device
*dev
, u64 mask
);
70 int (*set_dma_mask
)(struct device
*dev
, u64 dma_mask
);
71 dma_addr_t (*map_page
)(struct device
*dev
, struct page
*page
,
72 unsigned long offset
, size_t size
,
73 enum dma_data_direction direction
,
74 struct dma_attrs
*attrs
);
75 void (*unmap_page
)(struct device
*dev
,
76 dma_addr_t dma_address
, size_t size
,
77 enum dma_data_direction direction
,
78 struct dma_attrs
*attrs
);
79 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
80 void (*sync_single_range_for_cpu
)(struct device
*hwdev
,
81 dma_addr_t dma_handle
, unsigned long offset
,
83 enum dma_data_direction direction
);
84 void (*sync_single_range_for_device
)(struct device
*hwdev
,
85 dma_addr_t dma_handle
, unsigned long offset
,
87 enum dma_data_direction direction
);
88 void (*sync_sg_for_cpu
)(struct device
*hwdev
,
89 struct scatterlist
*sg
, int nelems
,
90 enum dma_data_direction direction
);
91 void (*sync_sg_for_device
)(struct device
*hwdev
,
92 struct scatterlist
*sg
, int nelems
,
93 enum dma_data_direction direction
);
98 * Available generic sets of operations
101 extern struct dma_mapping_ops dma_iommu_ops
;
103 extern struct dma_mapping_ops dma_direct_ops
;
105 static inline struct dma_mapping_ops
*get_dma_ops(struct device
*dev
)
107 /* We don't handle the NULL dev case for ISA for now. We could
108 * do it via an out of line call but it is not needed for now. The
109 * only ISA DMA device we support is the floppy and we have a hack
110 * in the floppy driver directly to get a device for us.
113 if (unlikely(dev
== NULL
) || dev
->archdata
.dma_ops
== NULL
) {
117 /* Use default on 32-bit if dma_ops is not set up */
118 /* TODO: Long term, we should fix drivers so that dev and
119 * archdata dma_ops are set up for all buses.
121 return &dma_direct_ops
;
125 return dev
->archdata
.dma_ops
;
128 static inline void set_dma_ops(struct device
*dev
, struct dma_mapping_ops
*ops
)
130 dev
->archdata
.dma_ops
= ops
;
133 static inline int dma_supported(struct device
*dev
, u64 mask
)
135 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
137 if (unlikely(dma_ops
== NULL
))
139 if (dma_ops
->dma_supported
== NULL
)
141 return dma_ops
->dma_supported(dev
, mask
);
144 /* We have our own implementation of pci_set_dma_mask() */
145 #define HAVE_ARCH_PCI_SET_DMA_MASK
147 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
149 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
151 if (unlikely(dma_ops
== NULL
))
153 if (dma_ops
->set_dma_mask
!= NULL
)
154 return dma_ops
->set_dma_mask(dev
, dma_mask
);
155 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
157 *dev
->dma_mask
= dma_mask
;
162 * map_/unmap_single actually call through to map/unmap_page now that all the
163 * dma_mapping_ops have been converted over. We just have to get the page and
164 * offset to pass through to map_page
166 static inline dma_addr_t
dma_map_single_attrs(struct device
*dev
,
169 enum dma_data_direction direction
,
170 struct dma_attrs
*attrs
)
172 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
176 return dma_ops
->map_page(dev
, virt_to_page(cpu_addr
),
177 (unsigned long)cpu_addr
% PAGE_SIZE
, size
,
181 static inline void dma_unmap_single_attrs(struct device
*dev
,
184 enum dma_data_direction direction
,
185 struct dma_attrs
*attrs
)
187 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
191 dma_ops
->unmap_page(dev
, dma_addr
, size
, direction
, attrs
);
194 static inline dma_addr_t
dma_map_page_attrs(struct device
*dev
,
196 unsigned long offset
, size_t size
,
197 enum dma_data_direction direction
,
198 struct dma_attrs
*attrs
)
200 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
204 return dma_ops
->map_page(dev
, page
, offset
, size
, direction
, attrs
);
207 static inline void dma_unmap_page_attrs(struct device
*dev
,
208 dma_addr_t dma_address
,
210 enum dma_data_direction direction
,
211 struct dma_attrs
*attrs
)
213 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
217 dma_ops
->unmap_page(dev
, dma_address
, size
, direction
, attrs
);
220 static inline int dma_map_sg_attrs(struct device
*dev
, struct scatterlist
*sg
,
221 int nents
, enum dma_data_direction direction
,
222 struct dma_attrs
*attrs
)
224 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
227 return dma_ops
->map_sg(dev
, sg
, nents
, direction
, attrs
);
230 static inline void dma_unmap_sg_attrs(struct device
*dev
,
231 struct scatterlist
*sg
,
233 enum dma_data_direction direction
,
234 struct dma_attrs
*attrs
)
236 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
239 dma_ops
->unmap_sg(dev
, sg
, nhwentries
, direction
, attrs
);
242 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
243 dma_addr_t
*dma_handle
, gfp_t flag
)
245 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
248 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, flag
);
251 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
252 void *cpu_addr
, dma_addr_t dma_handle
)
254 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
257 dma_ops
->free_coherent(dev
, size
, cpu_addr
, dma_handle
);
260 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
262 enum dma_data_direction direction
)
264 return dma_map_single_attrs(dev
, cpu_addr
, size
, direction
, NULL
);
267 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
269 enum dma_data_direction direction
)
271 dma_unmap_single_attrs(dev
, dma_addr
, size
, direction
, NULL
);
274 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
275 unsigned long offset
, size_t size
,
276 enum dma_data_direction direction
)
278 return dma_map_page_attrs(dev
, page
, offset
, size
, direction
, NULL
);
281 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
283 enum dma_data_direction direction
)
285 dma_unmap_page_attrs(dev
, dma_address
, size
, direction
, NULL
);
288 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
289 int nents
, enum dma_data_direction direction
)
291 return dma_map_sg_attrs(dev
, sg
, nents
, direction
, NULL
);
294 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
296 enum dma_data_direction direction
)
298 dma_unmap_sg_attrs(dev
, sg
, nhwentries
, direction
, NULL
);
301 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
302 static inline void dma_sync_single_for_cpu(struct device
*dev
,
303 dma_addr_t dma_handle
, size_t size
,
304 enum dma_data_direction direction
)
306 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
309 dma_ops
->sync_single_range_for_cpu(dev
, dma_handle
, 0,
313 static inline void dma_sync_single_for_device(struct device
*dev
,
314 dma_addr_t dma_handle
, size_t size
,
315 enum dma_data_direction direction
)
317 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
320 dma_ops
->sync_single_range_for_device(dev
, dma_handle
,
324 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
325 struct scatterlist
*sgl
, int nents
,
326 enum dma_data_direction direction
)
328 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
331 dma_ops
->sync_sg_for_cpu(dev
, sgl
, nents
, direction
);
334 static inline void dma_sync_sg_for_device(struct device
*dev
,
335 struct scatterlist
*sgl
, int nents
,
336 enum dma_data_direction direction
)
338 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
341 dma_ops
->sync_sg_for_device(dev
, sgl
, nents
, direction
);
344 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
345 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
346 enum dma_data_direction direction
)
348 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
351 dma_ops
->sync_single_range_for_cpu(dev
, dma_handle
,
352 offset
, size
, direction
);
355 static inline void dma_sync_single_range_for_device(struct device
*dev
,
356 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
357 enum dma_data_direction direction
)
359 struct dma_mapping_ops
*dma_ops
= get_dma_ops(dev
);
362 dma_ops
->sync_single_range_for_device(dev
, dma_handle
, offset
,
365 #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
366 static inline void dma_sync_single_for_cpu(struct device
*dev
,
367 dma_addr_t dma_handle
, size_t size
,
368 enum dma_data_direction direction
)
372 static inline void dma_sync_single_for_device(struct device
*dev
,
373 dma_addr_t dma_handle
, size_t size
,
374 enum dma_data_direction direction
)
378 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
379 struct scatterlist
*sgl
, int nents
,
380 enum dma_data_direction direction
)
384 static inline void dma_sync_sg_for_device(struct device
*dev
,
385 struct scatterlist
*sgl
, int nents
,
386 enum dma_data_direction direction
)
390 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
391 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
392 enum dma_data_direction direction
)
396 static inline void dma_sync_single_range_for_device(struct device
*dev
,
397 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
398 enum dma_data_direction direction
)
403 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
406 return (dma_addr
== DMA_ERROR_CODE
);
412 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
413 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
414 #ifdef CONFIG_NOT_COHERENT_CACHE
415 #define dma_is_consistent(d, h) (0)
417 #define dma_is_consistent(d, h) (1)
420 static inline int dma_get_cache_alignment(void)
423 /* no easy way to get cache size on all processors, so return
424 * the maximum possible, to be safe */
425 return (1 << INTERNODE_CACHE_SHIFT
);
428 * Each processor family will define its own L1_CACHE_SHIFT,
429 * L1_CACHE_BYTES wraps to this, so this is always safe.
431 return L1_CACHE_BYTES
;
435 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
436 enum dma_data_direction direction
)
438 BUG_ON(direction
== DMA_NONE
);
439 __dma_sync(vaddr
, size
, (int)direction
);
442 #endif /* __KERNEL__ */
443 #endif /* _ASM_DMA_MAPPING_H */