2 * Copyright (C) 2004 IBM
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/cache.h>
14 /* need struct page definitions */
16 #include <asm/scatterlist.h>
20 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
22 #ifdef CONFIG_NOT_COHERENT_CACHE
24 * DMA-consistent mapping functions for PowerPCs that don't support
25 * cache snooping. These allocate/free a region of uncached mapped
26 * memory space for use with DMA devices. Alternatively, you could
27 * allocate the space "normally" and use the cache management functions
28 * to ensure it is consistent.
30 extern void *__dma_alloc_coherent(size_t size
, dma_addr_t
*handle
, gfp_t gfp
);
31 extern void __dma_free_coherent(size_t size
, void *vaddr
);
32 extern void __dma_sync(void *vaddr
, size_t size
, int direction
);
33 extern void __dma_sync_page(struct page
*page
, unsigned long offset
,
34 size_t size
, int direction
);
36 #else /* ! CONFIG_NOT_COHERENT_CACHE */
38 * Cache coherent cores.
41 #define __dma_alloc_coherent(gfp, size, handle) NULL
42 #define __dma_free_coherent(size, addr) do { } while (0)
43 #define __dma_sync(addr, size, rw) do { } while (0)
44 #define __dma_sync_page(pg, off, sz, rw) do { } while (0)
46 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
50 extern int dma_supported(struct device
*dev
, u64 mask
);
51 extern int dma_set_mask(struct device
*dev
, u64 dma_mask
);
52 extern void *dma_alloc_coherent(struct device
*dev
, size_t size
,
53 dma_addr_t
*dma_handle
, gfp_t flag
);
54 extern void dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
55 dma_addr_t dma_handle
);
56 extern dma_addr_t
dma_map_single(struct device
*dev
, void *cpu_addr
,
57 size_t size
, enum dma_data_direction direction
);
58 extern void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
59 size_t size
, enum dma_data_direction direction
);
60 extern dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
61 unsigned long offset
, size_t size
,
62 enum dma_data_direction direction
);
63 extern void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
64 size_t size
, enum dma_data_direction direction
);
65 extern int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
66 enum dma_data_direction direction
);
67 extern void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
68 int nhwentries
, enum dma_data_direction direction
);
70 #else /* CONFIG_PPC64 */
72 #define dma_supported(dev, mask) (1)
74 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
76 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
79 *dev
->dma_mask
= dma_mask
;
84 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
85 dma_addr_t
* dma_handle
,
88 #ifdef CONFIG_NOT_COHERENT_CACHE
89 return __dma_alloc_coherent(size
, dma_handle
, gfp
);
92 /* ignore region specifiers */
93 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
95 if (dev
== NULL
|| dev
->coherent_dma_mask
< 0xffffffff)
98 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
101 memset(ret
, 0, size
);
102 *dma_handle
= virt_to_bus(ret
);
110 dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
111 dma_addr_t dma_handle
)
113 #ifdef CONFIG_NOT_COHERENT_CACHE
114 __dma_free_coherent(size
, vaddr
);
116 free_pages((unsigned long)vaddr
, get_order(size
));
120 static inline dma_addr_t
121 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
122 enum dma_data_direction direction
)
124 BUG_ON(direction
== DMA_NONE
);
126 __dma_sync(ptr
, size
, direction
);
128 return virt_to_bus(ptr
);
132 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
134 static inline dma_addr_t
135 dma_map_page(struct device
*dev
, struct page
*page
,
136 unsigned long offset
, size_t size
,
137 enum dma_data_direction direction
)
139 BUG_ON(direction
== DMA_NONE
);
141 __dma_sync_page(page
, offset
, size
, direction
);
143 return page_to_bus(page
) + offset
;
147 #define dma_unmap_page(dev, handle, size, dir) do { } while (0)
150 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
151 enum dma_data_direction direction
)
155 BUG_ON(direction
== DMA_NONE
);
157 for (i
= 0; i
< nents
; i
++, sg
++) {
159 __dma_sync_page(sg
->page
, sg
->offset
, sg
->length
, direction
);
160 sg
->dma_address
= page_to_bus(sg
->page
) + sg
->offset
;
166 /* We don't do anything here. */
167 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
169 #endif /* CONFIG_PPC64 */
171 static inline void dma_sync_single_for_cpu(struct device
*dev
,
172 dma_addr_t dma_handle
, size_t size
,
173 enum dma_data_direction direction
)
175 BUG_ON(direction
== DMA_NONE
);
176 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
179 static inline void dma_sync_single_for_device(struct device
*dev
,
180 dma_addr_t dma_handle
, size_t size
,
181 enum dma_data_direction direction
)
183 BUG_ON(direction
== DMA_NONE
);
184 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
187 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
188 struct scatterlist
*sg
, int nents
,
189 enum dma_data_direction direction
)
193 BUG_ON(direction
== DMA_NONE
);
195 for (i
= 0; i
< nents
; i
++, sg
++)
196 __dma_sync_page(sg
->page
, sg
->offset
, sg
->length
, direction
);
199 static inline void dma_sync_sg_for_device(struct device
*dev
,
200 struct scatterlist
*sg
, int nents
,
201 enum dma_data_direction direction
)
205 BUG_ON(direction
== DMA_NONE
);
207 for (i
= 0; i
< nents
; i
++, sg
++)
208 __dma_sync_page(sg
->page
, sg
->offset
, sg
->length
, direction
);
211 static inline int dma_mapping_error(dma_addr_t dma_addr
)
214 return (dma_addr
== DMA_ERROR_CODE
);
220 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
221 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
222 #ifdef CONFIG_NOT_COHERENT_CACHE
223 #define dma_is_consistent(d) (0)
225 #define dma_is_consistent(d) (1)
228 static inline int dma_get_cache_alignment(void)
231 /* no easy way to get cache size on all processors, so return
232 * the maximum possible, to be safe */
233 return (1 << INTERNODE_CACHE_SHIFT
);
236 * Each processor family will define its own L1_CACHE_SHIFT,
237 * L1_CACHE_BYTES wraps to this, so this is always safe.
239 return L1_CACHE_BYTES
;
243 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
244 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
245 enum dma_data_direction direction
)
247 /* just sync everything for now */
248 dma_sync_single_for_cpu(dev
, dma_handle
, offset
+ size
, direction
);
251 static inline void dma_sync_single_range_for_device(struct device
*dev
,
252 dma_addr_t dma_handle
, unsigned long offset
, size_t size
,
253 enum dma_data_direction direction
)
255 /* just sync everything for now */
256 dma_sync_single_for_device(dev
, dma_handle
, offset
+ size
, direction
);
259 static inline void dma_cache_sync(void *vaddr
, size_t size
,
260 enum dma_data_direction direction
)
262 BUG_ON(direction
== DMA_NONE
);
263 __dma_sync(vaddr
, size
, (int)direction
);
267 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
269 struct dma_mapping_ops
{
270 void * (*alloc_coherent
)(struct device
*dev
, size_t size
,
271 dma_addr_t
*dma_handle
, gfp_t flag
);
272 void (*free_coherent
)(struct device
*dev
, size_t size
,
273 void *vaddr
, dma_addr_t dma_handle
);
274 dma_addr_t (*map_single
)(struct device
*dev
, void *ptr
,
275 size_t size
, enum dma_data_direction direction
);
276 void (*unmap_single
)(struct device
*dev
, dma_addr_t dma_addr
,
277 size_t size
, enum dma_data_direction direction
);
278 int (*map_sg
)(struct device
*dev
, struct scatterlist
*sg
,
279 int nents
, enum dma_data_direction direction
);
280 void (*unmap_sg
)(struct device
*dev
, struct scatterlist
*sg
,
281 int nents
, enum dma_data_direction direction
);
282 int (*dma_supported
)(struct device
*dev
, u64 mask
);
283 int (*dac_dma_supported
)(struct device
*dev
, u64 mask
);
286 #endif /* __KERNEL__ */
287 #endif /* _ASM_DMA_MAPPING_H */