2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
18 #include <asm/cache.h>
21 #include <dma-coherence.h>
23 static inline unsigned long dma_addr_to_virt(struct device
*dev
,
26 unsigned long addr
= plat_dma_addr_to_phys(dev
, dma_addr
);
28 return (unsigned long)phys_to_virt(addr
);
32 * Warning on the terminology - Linux calls an uncached area coherent;
33 * MIPS terminology calls memory areas with hardware maintained coherency
37 static inline int cpu_is_noncoherent_r10000(struct device
*dev
)
39 return !plat_device_is_coherent(dev
) &&
40 (current_cpu_type() == CPU_R10000
||
41 current_cpu_type() == CPU_R12000
);
44 static gfp_t
massage_gfp_flags(const struct device
*dev
, gfp_t gfp
)
46 /* ignore region specifiers */
47 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
49 #ifdef CONFIG_ZONE_DMA
52 else if (dev
->coherent_dma_mask
< DMA_BIT_MASK(24))
56 #ifdef CONFIG_ZONE_DMA32
57 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
63 /* Don't invoke OOM killer */
69 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
70 dma_addr_t
* dma_handle
, gfp_t gfp
)
74 gfp
= massage_gfp_flags(dev
, gfp
);
76 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
80 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
86 EXPORT_SYMBOL(dma_alloc_noncoherent
);
88 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
89 dma_addr_t
* dma_handle
, gfp_t gfp
)
93 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
96 gfp
= massage_gfp_flags(dev
, gfp
);
98 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
101 memset(ret
, 0, size
);
102 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
104 if (!plat_device_is_coherent(dev
)) {
105 dma_cache_wback_inv((unsigned long) ret
, size
);
106 ret
= UNCAC_ADDR(ret
);
113 EXPORT_SYMBOL(dma_alloc_coherent
);
115 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
116 dma_addr_t dma_handle
)
118 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
119 free_pages((unsigned long) vaddr
, get_order(size
));
122 EXPORT_SYMBOL(dma_free_noncoherent
);
124 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
125 dma_addr_t dma_handle
)
127 unsigned long addr
= (unsigned long) vaddr
;
128 int order
= get_order(size
);
130 if (dma_release_from_coherent(dev
, order
, vaddr
))
133 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
135 if (!plat_device_is_coherent(dev
))
136 addr
= CAC_ADDR(addr
);
138 free_pages(addr
, get_order(size
));
141 EXPORT_SYMBOL(dma_free_coherent
);
143 static inline void __dma_sync(unsigned long addr
, size_t size
,
144 enum dma_data_direction direction
)
148 dma_cache_wback(addr
, size
);
151 case DMA_FROM_DEVICE
:
152 dma_cache_inv(addr
, size
);
155 case DMA_BIDIRECTIONAL
:
156 dma_cache_wback_inv(addr
, size
);
164 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
165 enum dma_data_direction direction
)
167 unsigned long addr
= (unsigned long) ptr
;
169 if (!plat_device_is_coherent(dev
))
170 __dma_sync(addr
, size
, direction
);
172 return plat_map_dma_mem(dev
, ptr
, size
);
175 EXPORT_SYMBOL(dma_map_single
);
177 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
178 enum dma_data_direction direction
)
180 if (cpu_is_noncoherent_r10000(dev
))
181 __dma_sync(dma_addr_to_virt(dev
, dma_addr
), size
,
184 plat_unmap_dma_mem(dev
, dma_addr
, size
, direction
);
187 EXPORT_SYMBOL(dma_unmap_single
);
189 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
190 enum dma_data_direction direction
)
194 BUG_ON(direction
== DMA_NONE
);
196 for (i
= 0; i
< nents
; i
++, sg
++) {
199 addr
= (unsigned long) sg_virt(sg
);
200 if (!plat_device_is_coherent(dev
) && addr
)
201 __dma_sync(addr
, sg
->length
, direction
);
202 sg
->dma_address
= plat_map_dma_mem(dev
,
203 (void *)addr
, sg
->length
);
209 EXPORT_SYMBOL(dma_map_sg
);
211 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
212 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
214 BUG_ON(direction
== DMA_NONE
);
216 if (!plat_device_is_coherent(dev
)) {
219 addr
= (unsigned long) page_address(page
) + offset
;
220 __dma_sync(addr
, size
, direction
);
223 return plat_map_dma_mem_page(dev
, page
) + offset
;
226 EXPORT_SYMBOL(dma_map_page
);
228 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
229 enum dma_data_direction direction
)
234 BUG_ON(direction
== DMA_NONE
);
236 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
237 if (!plat_device_is_coherent(dev
) &&
238 direction
!= DMA_TO_DEVICE
) {
239 addr
= (unsigned long) sg_virt(sg
);
241 __dma_sync(addr
, sg
->length
, direction
);
243 plat_unmap_dma_mem(dev
, sg
->dma_address
, sg
->length
, direction
);
247 EXPORT_SYMBOL(dma_unmap_sg
);
249 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
250 size_t size
, enum dma_data_direction direction
)
252 BUG_ON(direction
== DMA_NONE
);
254 if (cpu_is_noncoherent_r10000(dev
)) {
257 addr
= dma_addr_to_virt(dev
, dma_handle
);
258 __dma_sync(addr
, size
, direction
);
262 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
264 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
265 size_t size
, enum dma_data_direction direction
)
267 BUG_ON(direction
== DMA_NONE
);
269 plat_extra_sync_for_device(dev
);
270 if (!plat_device_is_coherent(dev
)) {
273 addr
= dma_addr_to_virt(dev
, dma_handle
);
274 __dma_sync(addr
, size
, direction
);
278 EXPORT_SYMBOL(dma_sync_single_for_device
);
280 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
281 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
283 BUG_ON(direction
== DMA_NONE
);
285 if (cpu_is_noncoherent_r10000(dev
)) {
288 addr
= dma_addr_to_virt(dev
, dma_handle
);
289 __dma_sync(addr
+ offset
, size
, direction
);
293 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
295 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
296 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
298 BUG_ON(direction
== DMA_NONE
);
300 plat_extra_sync_for_device(dev
);
301 if (!plat_device_is_coherent(dev
)) {
304 addr
= dma_addr_to_virt(dev
, dma_handle
);
305 __dma_sync(addr
+ offset
, size
, direction
);
309 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
311 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
312 enum dma_data_direction direction
)
316 BUG_ON(direction
== DMA_NONE
);
318 /* Make sure that gcc doesn't leave the empty loop body. */
319 for (i
= 0; i
< nelems
; i
++, sg
++) {
320 if (cpu_is_noncoherent_r10000(dev
))
321 __dma_sync((unsigned long)page_address(sg_page(sg
)),
322 sg
->length
, direction
);
326 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
328 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
329 enum dma_data_direction direction
)
333 BUG_ON(direction
== DMA_NONE
);
335 /* Make sure that gcc doesn't leave the empty loop body. */
336 for (i
= 0; i
< nelems
; i
++, sg
++) {
337 if (!plat_device_is_coherent(dev
))
338 __dma_sync((unsigned long)page_address(sg_page(sg
)),
339 sg
->length
, direction
);
343 EXPORT_SYMBOL(dma_sync_sg_for_device
);
345 int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
347 return plat_dma_mapping_error(dev
, dma_addr
);
350 EXPORT_SYMBOL(dma_mapping_error
);
352 int dma_supported(struct device
*dev
, u64 mask
)
354 return plat_dma_supported(dev
, mask
);
357 EXPORT_SYMBOL(dma_supported
);
359 int dma_is_consistent(struct device
*dev
, dma_addr_t dma_addr
)
361 return plat_device_is_coherent(dev
);
364 EXPORT_SYMBOL(dma_is_consistent
);
366 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
367 enum dma_data_direction direction
)
369 BUG_ON(direction
== DMA_NONE
);
371 plat_extra_sync_for_device(dev
);
372 if (!plat_device_is_coherent(dev
))
373 __dma_sync((unsigned long)vaddr
, size
, direction
);
376 EXPORT_SYMBOL(dma_cache_sync
);