2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
18 #include <asm/cache.h>
21 #include <dma-coherence.h>
23 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr
)
25 unsigned long addr
= plat_dma_addr_to_phys(dma_addr
);
27 return (unsigned long)phys_to_virt(addr
);
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
36 static inline int cpu_is_noncoherent_r10000(struct device
*dev
)
38 return !plat_device_is_coherent(dev
) &&
39 (current_cpu_type() == CPU_R10000
||
40 current_cpu_type() == CPU_R12000
);
43 static gfp_t
massage_gfp_flags(const struct device
*dev
, gfp_t gfp
)
45 /* ignore region specifiers */
46 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
48 #ifdef CONFIG_ZONE_DMA
51 else if (dev
->coherent_dma_mask
< DMA_BIT_MASK(24))
55 #ifdef CONFIG_ZONE_DMA32
56 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
62 /* Don't invoke OOM killer */
68 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
69 dma_addr_t
* dma_handle
, gfp_t gfp
)
73 gfp
= massage_gfp_flags(dev
, gfp
);
75 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
79 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
85 EXPORT_SYMBOL(dma_alloc_noncoherent
);
87 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
88 dma_addr_t
* dma_handle
, gfp_t gfp
)
92 gfp
= massage_gfp_flags(dev
, gfp
);
94 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
98 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
100 if (!plat_device_is_coherent(dev
)) {
101 dma_cache_wback_inv((unsigned long) ret
, size
);
102 ret
= UNCAC_ADDR(ret
);
109 EXPORT_SYMBOL(dma_alloc_coherent
);
111 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
112 dma_addr_t dma_handle
)
114 plat_unmap_dma_mem(dev
, dma_handle
);
115 free_pages((unsigned long) vaddr
, get_order(size
));
118 EXPORT_SYMBOL(dma_free_noncoherent
);
120 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
121 dma_addr_t dma_handle
)
123 unsigned long addr
= (unsigned long) vaddr
;
125 plat_unmap_dma_mem(dev
, dma_handle
);
127 if (!plat_device_is_coherent(dev
))
128 addr
= CAC_ADDR(addr
);
130 free_pages(addr
, get_order(size
));
133 EXPORT_SYMBOL(dma_free_coherent
);
135 static inline void __dma_sync(unsigned long addr
, size_t size
,
136 enum dma_data_direction direction
)
140 dma_cache_wback(addr
, size
);
143 case DMA_FROM_DEVICE
:
144 dma_cache_inv(addr
, size
);
147 case DMA_BIDIRECTIONAL
:
148 dma_cache_wback_inv(addr
, size
);
156 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
157 enum dma_data_direction direction
)
159 unsigned long addr
= (unsigned long) ptr
;
161 if (!plat_device_is_coherent(dev
))
162 __dma_sync(addr
, size
, direction
);
164 return plat_map_dma_mem(dev
, ptr
, size
);
167 EXPORT_SYMBOL(dma_map_single
);
169 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
170 enum dma_data_direction direction
)
172 if (cpu_is_noncoherent_r10000(dev
))
173 __dma_sync(dma_addr_to_virt(dma_addr
), size
,
176 plat_unmap_dma_mem(dev
, dma_addr
);
179 EXPORT_SYMBOL(dma_unmap_single
);
181 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
182 enum dma_data_direction direction
)
186 BUG_ON(direction
== DMA_NONE
);
188 for (i
= 0; i
< nents
; i
++, sg
++) {
191 addr
= (unsigned long) sg_virt(sg
);
192 if (!plat_device_is_coherent(dev
) && addr
)
193 __dma_sync(addr
, sg
->length
, direction
);
194 sg
->dma_address
= plat_map_dma_mem(dev
,
195 (void *)addr
, sg
->length
);
201 EXPORT_SYMBOL(dma_map_sg
);
203 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
204 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
206 BUG_ON(direction
== DMA_NONE
);
208 if (!plat_device_is_coherent(dev
)) {
211 addr
= (unsigned long) page_address(page
) + offset
;
212 dma_cache_wback_inv(addr
, size
);
215 return plat_map_dma_mem_page(dev
, page
) + offset
;
218 EXPORT_SYMBOL(dma_map_page
);
220 void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
221 enum dma_data_direction direction
)
223 BUG_ON(direction
== DMA_NONE
);
225 if (!plat_device_is_coherent(dev
) && direction
!= DMA_TO_DEVICE
) {
228 addr
= plat_dma_addr_to_phys(dma_address
);
229 dma_cache_wback_inv(addr
, size
);
232 plat_unmap_dma_mem(dev
, dma_address
);
235 EXPORT_SYMBOL(dma_unmap_page
);
237 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
238 enum dma_data_direction direction
)
243 BUG_ON(direction
== DMA_NONE
);
245 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
246 if (!plat_device_is_coherent(dev
) &&
247 direction
!= DMA_TO_DEVICE
) {
248 addr
= (unsigned long) sg_virt(sg
);
250 __dma_sync(addr
, sg
->length
, direction
);
252 plat_unmap_dma_mem(dev
, sg
->dma_address
);
256 EXPORT_SYMBOL(dma_unmap_sg
);
258 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
259 size_t size
, enum dma_data_direction direction
)
261 BUG_ON(direction
== DMA_NONE
);
263 if (cpu_is_noncoherent_r10000(dev
)) {
266 addr
= dma_addr_to_virt(dma_handle
);
267 __dma_sync(addr
, size
, direction
);
271 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
273 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
274 size_t size
, enum dma_data_direction direction
)
276 BUG_ON(direction
== DMA_NONE
);
278 plat_extra_sync_for_device(dev
);
279 if (!plat_device_is_coherent(dev
)) {
282 addr
= dma_addr_to_virt(dma_handle
);
283 __dma_sync(addr
, size
, direction
);
287 EXPORT_SYMBOL(dma_sync_single_for_device
);
289 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
290 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
292 BUG_ON(direction
== DMA_NONE
);
294 if (cpu_is_noncoherent_r10000(dev
)) {
297 addr
= dma_addr_to_virt(dma_handle
);
298 __dma_sync(addr
+ offset
, size
, direction
);
302 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
304 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
305 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
307 BUG_ON(direction
== DMA_NONE
);
309 plat_extra_sync_for_device(dev
);
310 if (!plat_device_is_coherent(dev
)) {
313 addr
= dma_addr_to_virt(dma_handle
);
314 __dma_sync(addr
+ offset
, size
, direction
);
318 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
320 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
321 enum dma_data_direction direction
)
325 BUG_ON(direction
== DMA_NONE
);
327 /* Make sure that gcc doesn't leave the empty loop body. */
328 for (i
= 0; i
< nelems
; i
++, sg
++) {
329 if (cpu_is_noncoherent_r10000(dev
))
330 __dma_sync((unsigned long)page_address(sg_page(sg
)),
331 sg
->length
, direction
);
335 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
337 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
338 enum dma_data_direction direction
)
342 BUG_ON(direction
== DMA_NONE
);
344 /* Make sure that gcc doesn't leave the empty loop body. */
345 for (i
= 0; i
< nelems
; i
++, sg
++) {
346 if (!plat_device_is_coherent(dev
))
347 __dma_sync((unsigned long)page_address(sg_page(sg
)),
348 sg
->length
, direction
);
352 EXPORT_SYMBOL(dma_sync_sg_for_device
);
354 int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
356 return plat_dma_mapping_error(dev
, dma_addr
);
359 EXPORT_SYMBOL(dma_mapping_error
);
361 int dma_supported(struct device
*dev
, u64 mask
)
363 return plat_dma_supported(dev
, mask
);
366 EXPORT_SYMBOL(dma_supported
);
368 int dma_is_consistent(struct device
*dev
, dma_addr_t dma_addr
)
370 return plat_device_is_coherent(dev
);
373 EXPORT_SYMBOL(dma_is_consistent
);
375 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
376 enum dma_data_direction direction
)
378 BUG_ON(direction
== DMA_NONE
);
380 plat_extra_sync_for_device(dev
);
381 if (!plat_device_is_coherent(dev
))
382 __dma_sync((unsigned long)vaddr
, size
, direction
);
385 EXPORT_SYMBOL(dma_cache_sync
);