2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
20 #include <asm/cache.h>
21 #include <asm/cpu-type.h>
24 #include <dma-coherence.h>
26 #ifdef CONFIG_DMA_MAYBE_COHERENT
27 int coherentio
= 0; /* User defined DMA coherency from command line. */
28 EXPORT_SYMBOL_GPL(coherentio
);
29 int hw_coherentio
= 0; /* Actual hardware supported DMA coherency setting. */
31 static int __init
setcoherentio(char *str
)
34 pr_info("Hardware DMA cache coherency (command line)\n");
37 early_param("coherentio", setcoherentio
);
39 static int __init
setnocoherentio(char *str
)
42 pr_info("Software DMA cache coherency (command line)\n");
45 early_param("nocoherentio", setnocoherentio
);
48 static inline struct page
*dma_addr_to_page(struct device
*dev
,
52 plat_dma_addr_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
);
56 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
57 * speculatively fill random cachelines with stale data at any time,
58 * requiring an extra flush post-DMA.
60 * Warning on the terminology - Linux calls an uncached area coherent;
61 * MIPS terminology calls memory areas with hardware maintained coherency
64 static inline int cpu_needs_post_dma_flush(struct device
*dev
)
66 return !plat_device_is_coherent(dev
) &&
67 (boot_cpu_type() == CPU_R10000
||
68 boot_cpu_type() == CPU_R12000
||
69 boot_cpu_type() == CPU_BMIPS5000
);
72 static gfp_t
massage_gfp_flags(const struct device
*dev
, gfp_t gfp
)
76 /* ignore region specifiers */
77 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
84 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
85 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
87 else if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
88 dma_flag
= __GFP_DMA32
;
91 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
92 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
93 dma_flag
= __GFP_DMA32
;
96 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
97 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
103 /* Don't invoke OOM killer */
104 gfp
|= __GFP_NORETRY
;
106 return gfp
| dma_flag
;
109 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
110 dma_addr_t
* dma_handle
, gfp_t gfp
)
114 gfp
= massage_gfp_flags(dev
, gfp
);
116 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
119 memset(ret
, 0, size
);
120 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
125 EXPORT_SYMBOL(dma_alloc_noncoherent
);
127 static void *mips_dma_alloc_coherent(struct device
*dev
, size_t size
,
128 dma_addr_t
* dma_handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
132 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
135 gfp
= massage_gfp_flags(dev
, gfp
);
137 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
140 memset(ret
, 0, size
);
141 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
143 if (!plat_device_is_coherent(dev
)) {
144 dma_cache_wback_inv((unsigned long) ret
, size
);
146 ret
= UNCAC_ADDR(ret
);
154 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
155 dma_addr_t dma_handle
)
157 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
158 free_pages((unsigned long) vaddr
, get_order(size
));
160 EXPORT_SYMBOL(dma_free_noncoherent
);
162 static void mips_dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
163 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
165 unsigned long addr
= (unsigned long) vaddr
;
166 int order
= get_order(size
);
168 if (dma_release_from_coherent(dev
, order
, vaddr
))
171 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
173 if (!plat_device_is_coherent(dev
) && !hw_coherentio
)
174 addr
= CAC_ADDR(addr
);
176 free_pages(addr
, get_order(size
));
179 static inline void __dma_sync_virtual(void *addr
, size_t size
,
180 enum dma_data_direction direction
)
184 dma_cache_wback((unsigned long)addr
, size
);
187 case DMA_FROM_DEVICE
:
188 dma_cache_inv((unsigned long)addr
, size
);
191 case DMA_BIDIRECTIONAL
:
192 dma_cache_wback_inv((unsigned long)addr
, size
);
201 * A single sg entry may refer to multiple physically contiguous
202 * pages. But we still need to process highmem pages individually.
203 * If highmem is not configured then the bulk of this loop gets
206 static inline void __dma_sync(struct page
*page
,
207 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
214 if (PageHighMem(page
)) {
217 if (offset
+ len
> PAGE_SIZE
) {
218 if (offset
>= PAGE_SIZE
) {
219 page
+= offset
>> PAGE_SHIFT
;
220 offset
&= ~PAGE_MASK
;
222 len
= PAGE_SIZE
- offset
;
225 addr
= kmap_atomic(page
);
226 __dma_sync_virtual(addr
+ offset
, len
, direction
);
229 __dma_sync_virtual(page_address(page
) + offset
,
237 static void mips_dma_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
238 size_t size
, enum dma_data_direction direction
, struct dma_attrs
*attrs
)
240 if (cpu_needs_post_dma_flush(dev
))
241 __dma_sync(dma_addr_to_page(dev
, dma_addr
),
242 dma_addr
& ~PAGE_MASK
, size
, direction
);
244 plat_unmap_dma_mem(dev
, dma_addr
, size
, direction
);
247 static int mips_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
248 int nents
, enum dma_data_direction direction
, struct dma_attrs
*attrs
)
252 for (i
= 0; i
< nents
; i
++, sg
++) {
253 if (!plat_device_is_coherent(dev
))
254 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
256 #ifdef CONFIG_NEED_SG_DMA_LENGTH
257 sg
->dma_length
= sg
->length
;
259 sg
->dma_address
= plat_map_dma_mem_page(dev
, sg_page(sg
)) +
266 static dma_addr_t
mips_dma_map_page(struct device
*dev
, struct page
*page
,
267 unsigned long offset
, size_t size
, enum dma_data_direction direction
,
268 struct dma_attrs
*attrs
)
270 if (!plat_device_is_coherent(dev
))
271 __dma_sync(page
, offset
, size
, direction
);
273 return plat_map_dma_mem_page(dev
, page
) + offset
;
276 static void mips_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
277 int nhwentries
, enum dma_data_direction direction
,
278 struct dma_attrs
*attrs
)
282 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
283 if (!plat_device_is_coherent(dev
) &&
284 direction
!= DMA_TO_DEVICE
)
285 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
287 plat_unmap_dma_mem(dev
, sg
->dma_address
, sg
->length
, direction
);
291 static void mips_dma_sync_single_for_cpu(struct device
*dev
,
292 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction direction
)
294 if (cpu_needs_post_dma_flush(dev
))
295 __dma_sync(dma_addr_to_page(dev
, dma_handle
),
296 dma_handle
& ~PAGE_MASK
, size
, direction
);
299 static void mips_dma_sync_single_for_device(struct device
*dev
,
300 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction direction
)
302 if (!plat_device_is_coherent(dev
))
303 __dma_sync(dma_addr_to_page(dev
, dma_handle
),
304 dma_handle
& ~PAGE_MASK
, size
, direction
);
307 static void mips_dma_sync_sg_for_cpu(struct device
*dev
,
308 struct scatterlist
*sg
, int nelems
, enum dma_data_direction direction
)
312 if (cpu_needs_post_dma_flush(dev
))
313 for (i
= 0; i
< nelems
; i
++, sg
++)
314 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
318 static void mips_dma_sync_sg_for_device(struct device
*dev
,
319 struct scatterlist
*sg
, int nelems
, enum dma_data_direction direction
)
323 if (!plat_device_is_coherent(dev
))
324 for (i
= 0; i
< nelems
; i
++, sg
++)
325 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
329 int mips_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
334 int mips_dma_supported(struct device
*dev
, u64 mask
)
336 return plat_dma_supported(dev
, mask
);
339 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
340 enum dma_data_direction direction
)
342 BUG_ON(direction
== DMA_NONE
);
344 if (!plat_device_is_coherent(dev
))
345 __dma_sync_virtual(vaddr
, size
, direction
);
348 EXPORT_SYMBOL(dma_cache_sync
);
350 static struct dma_map_ops mips_default_dma_map_ops
= {
351 .alloc
= mips_dma_alloc_coherent
,
352 .free
= mips_dma_free_coherent
,
353 .map_page
= mips_dma_map_page
,
354 .unmap_page
= mips_dma_unmap_page
,
355 .map_sg
= mips_dma_map_sg
,
356 .unmap_sg
= mips_dma_unmap_sg
,
357 .sync_single_for_cpu
= mips_dma_sync_single_for_cpu
,
358 .sync_single_for_device
= mips_dma_sync_single_for_device
,
359 .sync_sg_for_cpu
= mips_dma_sync_sg_for_cpu
,
360 .sync_sg_for_device
= mips_dma_sync_sg_for_device
,
361 .mapping_error
= mips_dma_mapping_error
,
362 .dma_supported
= mips_dma_supported
365 struct dma_map_ops
*mips_dma_map_ops
= &mips_default_dma_map_ops
;
366 EXPORT_SYMBOL(mips_dma_map_ops
);
368 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
370 static int __init
mips_dma_init(void)
372 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
376 fs_initcall(mips_dma_init
);