mtd: dc21285: use raw spinlock functions for nw_gpio_lock
[linux/fpc-iii.git] / arch / mips / mm / dma-default.c
blob609d1241b0c47c383db16fbb6941ff81624d08c8
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
19 #include <linux/dma-contiguous.h>
21 #include <asm/cache.h>
22 #include <asm/cpu-type.h>
23 #include <asm/io.h>
25 #include <dma-coherence.h>
27 #ifdef CONFIG_DMA_MAYBE_COHERENT
28 int coherentio = 0; /* User defined DMA coherency from command line. */
29 EXPORT_SYMBOL_GPL(coherentio);
30 int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
32 static int __init setcoherentio(char *str)
34 coherentio = 1;
35 pr_info("Hardware DMA cache coherency (command line)\n");
36 return 0;
38 early_param("coherentio", setcoherentio);
40 static int __init setnocoherentio(char *str)
42 coherentio = 0;
43 pr_info("Software DMA cache coherency (command line)\n");
44 return 0;
46 early_param("nocoherentio", setnocoherentio);
47 #endif
49 static inline struct page *dma_addr_to_page(struct device *dev,
50 dma_addr_t dma_addr)
52 return pfn_to_page(
53 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
57 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
58 * speculatively fill random cachelines with stale data at any time,
59 * requiring an extra flush post-DMA.
61 * Warning on the terminology - Linux calls an uncached area coherent;
62 * MIPS terminology calls memory areas with hardware maintained coherency
63 * coherent.
65 * Note that the R14000 and R16000 should also be checked for in this
66 * condition. However this function is only called on non-I/O-coherent
67 * systems and only the R10000 and R12000 are used in such systems, the
68 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
70 static inline int cpu_needs_post_dma_flush(struct device *dev)
72 return !plat_device_is_coherent(dev) &&
73 (boot_cpu_type() == CPU_R10000 ||
74 boot_cpu_type() == CPU_R12000 ||
75 boot_cpu_type() == CPU_BMIPS5000);
78 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
80 gfp_t dma_flag;
82 /* ignore region specifiers */
83 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
85 #ifdef CONFIG_ISA
86 if (dev == NULL)
87 dma_flag = __GFP_DMA;
88 else
89 #endif
90 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
91 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
92 dma_flag = __GFP_DMA;
93 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
94 dma_flag = __GFP_DMA32;
95 else
96 #endif
97 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
98 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
99 dma_flag = __GFP_DMA32;
100 else
101 #endif
102 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
103 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
104 dma_flag = __GFP_DMA;
105 else
106 #endif
107 dma_flag = 0;
109 /* Don't invoke OOM killer */
110 gfp |= __GFP_NORETRY;
112 return gfp | dma_flag;
115 void *dma_alloc_noncoherent(struct device *dev, size_t size,
116 dma_addr_t * dma_handle, gfp_t gfp)
118 void *ret;
120 gfp = massage_gfp_flags(dev, gfp);
122 ret = (void *) __get_free_pages(gfp, get_order(size));
124 if (ret != NULL) {
125 memset(ret, 0, size);
126 *dma_handle = plat_map_dma_mem(dev, ret, size);
129 return ret;
131 EXPORT_SYMBOL(dma_alloc_noncoherent);
133 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
134 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
136 void *ret;
137 struct page *page = NULL;
138 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
140 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
141 return ret;
143 gfp = massage_gfp_flags(dev, gfp);
145 if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
146 page = dma_alloc_from_contiguous(dev,
147 count, get_order(size));
148 if (!page)
149 page = alloc_pages(gfp, get_order(size));
151 if (!page)
152 return NULL;
154 ret = page_address(page);
155 memset(ret, 0, size);
156 *dma_handle = plat_map_dma_mem(dev, ret, size);
157 if (!plat_device_is_coherent(dev)) {
158 dma_cache_wback_inv((unsigned long) ret, size);
159 if (!hw_coherentio)
160 ret = UNCAC_ADDR(ret);
163 return ret;
167 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
168 dma_addr_t dma_handle)
170 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
171 free_pages((unsigned long) vaddr, get_order(size));
173 EXPORT_SYMBOL(dma_free_noncoherent);
175 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
176 dma_addr_t dma_handle, struct dma_attrs *attrs)
178 unsigned long addr = (unsigned long) vaddr;
179 int order = get_order(size);
180 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
181 struct page *page = NULL;
183 if (dma_release_from_coherent(dev, order, vaddr))
184 return;
186 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
188 if (!plat_device_is_coherent(dev) && !hw_coherentio)
189 addr = CAC_ADDR(addr);
191 page = virt_to_page((void *) addr);
193 if (!dma_release_from_contiguous(dev, page, count))
194 __free_pages(page, get_order(size));
197 static inline void __dma_sync_virtual(void *addr, size_t size,
198 enum dma_data_direction direction)
200 switch (direction) {
201 case DMA_TO_DEVICE:
202 dma_cache_wback((unsigned long)addr, size);
203 break;
205 case DMA_FROM_DEVICE:
206 dma_cache_inv((unsigned long)addr, size);
207 break;
209 case DMA_BIDIRECTIONAL:
210 dma_cache_wback_inv((unsigned long)addr, size);
211 break;
213 default:
214 BUG();
219 * A single sg entry may refer to multiple physically contiguous
220 * pages. But we still need to process highmem pages individually.
221 * If highmem is not configured then the bulk of this loop gets
222 * optimized out.
224 static inline void __dma_sync(struct page *page,
225 unsigned long offset, size_t size, enum dma_data_direction direction)
227 size_t left = size;
229 do {
230 size_t len = left;
232 if (PageHighMem(page)) {
233 void *addr;
235 if (offset + len > PAGE_SIZE) {
236 if (offset >= PAGE_SIZE) {
237 page += offset >> PAGE_SHIFT;
238 offset &= ~PAGE_MASK;
240 len = PAGE_SIZE - offset;
243 addr = kmap_atomic(page);
244 __dma_sync_virtual(addr + offset, len, direction);
245 kunmap_atomic(addr);
246 } else
247 __dma_sync_virtual(page_address(page) + offset,
248 size, direction);
249 offset = 0;
250 page++;
251 left -= len;
252 } while (left);
255 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
256 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
258 if (cpu_needs_post_dma_flush(dev))
259 __dma_sync(dma_addr_to_page(dev, dma_addr),
260 dma_addr & ~PAGE_MASK, size, direction);
261 plat_post_dma_flush(dev);
262 plat_unmap_dma_mem(dev, dma_addr, size, direction);
265 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
266 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
268 int i;
270 for (i = 0; i < nents; i++, sg++) {
271 if (!plat_device_is_coherent(dev))
272 __dma_sync(sg_page(sg), sg->offset, sg->length,
273 direction);
274 #ifdef CONFIG_NEED_SG_DMA_LENGTH
275 sg->dma_length = sg->length;
276 #endif
277 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
278 sg->offset;
281 return nents;
284 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
285 unsigned long offset, size_t size, enum dma_data_direction direction,
286 struct dma_attrs *attrs)
288 if (!plat_device_is_coherent(dev))
289 __dma_sync(page, offset, size, direction);
291 return plat_map_dma_mem_page(dev, page) + offset;
294 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
295 int nhwentries, enum dma_data_direction direction,
296 struct dma_attrs *attrs)
298 int i;
300 for (i = 0; i < nhwentries; i++, sg++) {
301 if (!plat_device_is_coherent(dev) &&
302 direction != DMA_TO_DEVICE)
303 __dma_sync(sg_page(sg), sg->offset, sg->length,
304 direction);
305 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
309 static void mips_dma_sync_single_for_cpu(struct device *dev,
310 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
312 if (cpu_needs_post_dma_flush(dev))
313 __dma_sync(dma_addr_to_page(dev, dma_handle),
314 dma_handle & ~PAGE_MASK, size, direction);
315 plat_post_dma_flush(dev);
318 static void mips_dma_sync_single_for_device(struct device *dev,
319 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
321 if (!plat_device_is_coherent(dev))
322 __dma_sync(dma_addr_to_page(dev, dma_handle),
323 dma_handle & ~PAGE_MASK, size, direction);
326 static void mips_dma_sync_sg_for_cpu(struct device *dev,
327 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
329 int i;
331 if (cpu_needs_post_dma_flush(dev))
332 for (i = 0; i < nelems; i++, sg++)
333 __dma_sync(sg_page(sg), sg->offset, sg->length,
334 direction);
335 plat_post_dma_flush(dev);
338 static void mips_dma_sync_sg_for_device(struct device *dev,
339 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
341 int i;
343 if (!plat_device_is_coherent(dev))
344 for (i = 0; i < nelems; i++, sg++)
345 __dma_sync(sg_page(sg), sg->offset, sg->length,
346 direction);
349 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
351 return 0;
354 int mips_dma_supported(struct device *dev, u64 mask)
356 return plat_dma_supported(dev, mask);
359 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
360 enum dma_data_direction direction)
362 BUG_ON(direction == DMA_NONE);
364 if (!plat_device_is_coherent(dev))
365 __dma_sync_virtual(vaddr, size, direction);
368 EXPORT_SYMBOL(dma_cache_sync);
370 static struct dma_map_ops mips_default_dma_map_ops = {
371 .alloc = mips_dma_alloc_coherent,
372 .free = mips_dma_free_coherent,
373 .map_page = mips_dma_map_page,
374 .unmap_page = mips_dma_unmap_page,
375 .map_sg = mips_dma_map_sg,
376 .unmap_sg = mips_dma_unmap_sg,
377 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
378 .sync_single_for_device = mips_dma_sync_single_for_device,
379 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
380 .sync_sg_for_device = mips_dma_sync_sg_for_device,
381 .mapping_error = mips_dma_mapping_error,
382 .dma_supported = mips_dma_supported
385 struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
386 EXPORT_SYMBOL(mips_dma_map_ops);
388 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
390 static int __init mips_dma_init(void)
392 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
394 return 0;
396 fs_initcall(mips_dma_init);