initial commit with v2.6.32.60
[linux-2.6.32.60-moxart.git] / arch / mips / mm / dma-default.c
blob3d9bb9b980559f6bca31ed30ea22ab71c5fbd00d
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
18 #include <asm/cache.h>
19 #include <asm/io.h>
21 #include <dma-coherence.h>
23 static inline unsigned long dma_addr_to_virt(struct device *dev,
24 dma_addr_t dma_addr)
26 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28 return (unsigned long)phys_to_virt(addr);
32 * Warning on the terminology - Linux calls an uncached area coherent;
33 * MIPS terminology calls memory areas with hardware maintained coherency
34 * coherent.
37 static inline int cpu_is_noncoherent_r10000(struct device *dev)
39 return !plat_device_is_coherent(dev) &&
40 (current_cpu_type() == CPU_R10000 ||
41 current_cpu_type() == CPU_R12000);
44 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46 gfp_t dma_flag;
48 /* ignore region specifiers */
49 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
51 #ifdef CONFIG_ISA
52 if (dev == NULL)
53 dma_flag = __GFP_DMA;
54 else
55 #endif
56 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
57 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
58 dma_flag = __GFP_DMA;
59 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
60 dma_flag = __GFP_DMA32;
61 else
62 #endif
63 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
64 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
65 dma_flag = __GFP_DMA32;
66 else
67 #endif
68 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
69 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
70 dma_flag = __GFP_DMA;
71 else
72 #endif
73 dma_flag = 0;
75 /* Don't invoke OOM killer */
76 gfp |= __GFP_NORETRY;
78 return gfp | dma_flag;
81 void *dma_alloc_noncoherent(struct device *dev, size_t size,
82 dma_addr_t * dma_handle, gfp_t gfp)
84 void *ret;
86 gfp = massage_gfp_flags(dev, gfp);
88 ret = (void *) __get_free_pages(gfp, get_order(size));
90 if (ret != NULL) {
91 memset(ret, 0, size);
92 *dma_handle = plat_map_dma_mem(dev, ret, size);
95 return ret;
98 EXPORT_SYMBOL(dma_alloc_noncoherent);
100 void *dma_alloc_coherent(struct device *dev, size_t size,
101 dma_addr_t * dma_handle, gfp_t gfp)
103 void *ret;
105 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
106 return ret;
108 gfp = massage_gfp_flags(dev, gfp);
110 ret = (void *) __get_free_pages(gfp, get_order(size));
112 if (ret) {
113 memset(ret, 0, size);
114 *dma_handle = plat_map_dma_mem(dev, ret, size);
116 if (!plat_device_is_coherent(dev)) {
117 dma_cache_wback_inv((unsigned long) ret, size);
118 ret = UNCAC_ADDR(ret);
122 return ret;
125 EXPORT_SYMBOL(dma_alloc_coherent);
127 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
128 dma_addr_t dma_handle)
130 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
131 free_pages((unsigned long) vaddr, get_order(size));
134 EXPORT_SYMBOL(dma_free_noncoherent);
136 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
137 dma_addr_t dma_handle)
139 unsigned long addr = (unsigned long) vaddr;
140 int order = get_order(size);
142 if (dma_release_from_coherent(dev, order, vaddr))
143 return;
145 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
147 if (!plat_device_is_coherent(dev))
148 addr = CAC_ADDR(addr);
150 free_pages(addr, get_order(size));
153 EXPORT_SYMBOL(dma_free_coherent);
155 static inline void __dma_sync(unsigned long addr, size_t size,
156 enum dma_data_direction direction)
158 switch (direction) {
159 case DMA_TO_DEVICE:
160 dma_cache_wback(addr, size);
161 break;
163 case DMA_FROM_DEVICE:
164 dma_cache_inv(addr, size);
165 break;
167 case DMA_BIDIRECTIONAL:
168 dma_cache_wback_inv(addr, size);
169 break;
171 default:
172 BUG();
176 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
177 enum dma_data_direction direction)
179 unsigned long addr = (unsigned long) ptr;
181 if (!plat_device_is_coherent(dev))
182 __dma_sync(addr, size, direction);
184 return plat_map_dma_mem(dev, ptr, size);
187 EXPORT_SYMBOL(dma_map_single);
189 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
190 enum dma_data_direction direction)
192 if (cpu_is_noncoherent_r10000(dev))
193 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
194 direction);
196 plat_unmap_dma_mem(dev, dma_addr, size, direction);
199 EXPORT_SYMBOL(dma_unmap_single);
201 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
202 enum dma_data_direction direction)
204 int i;
206 BUG_ON(direction == DMA_NONE);
208 for (i = 0; i < nents; i++, sg++) {
209 unsigned long addr;
211 addr = (unsigned long) sg_virt(sg);
212 if (!plat_device_is_coherent(dev) && addr)
213 __dma_sync(addr, sg->length, direction);
214 sg->dma_address = plat_map_dma_mem(dev,
215 (void *)addr, sg->length);
218 return nents;
221 EXPORT_SYMBOL(dma_map_sg);
223 dma_addr_t dma_map_page(struct device *dev, struct page *page,
224 unsigned long offset, size_t size, enum dma_data_direction direction)
226 BUG_ON(direction == DMA_NONE);
228 if (!plat_device_is_coherent(dev)) {
229 unsigned long addr;
231 addr = (unsigned long) page_address(page) + offset;
232 __dma_sync(addr, size, direction);
235 return plat_map_dma_mem_page(dev, page) + offset;
238 EXPORT_SYMBOL(dma_map_page);
240 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
241 enum dma_data_direction direction)
243 unsigned long addr;
244 int i;
246 BUG_ON(direction == DMA_NONE);
248 for (i = 0; i < nhwentries; i++, sg++) {
249 if (!plat_device_is_coherent(dev) &&
250 direction != DMA_TO_DEVICE) {
251 addr = (unsigned long) sg_virt(sg);
252 if (addr)
253 __dma_sync(addr, sg->length, direction);
255 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
259 EXPORT_SYMBOL(dma_unmap_sg);
261 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
262 size_t size, enum dma_data_direction direction)
264 BUG_ON(direction == DMA_NONE);
266 if (cpu_is_noncoherent_r10000(dev)) {
267 unsigned long addr;
269 addr = dma_addr_to_virt(dev, dma_handle);
270 __dma_sync(addr, size, direction);
274 EXPORT_SYMBOL(dma_sync_single_for_cpu);
276 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
277 size_t size, enum dma_data_direction direction)
279 BUG_ON(direction == DMA_NONE);
281 plat_extra_sync_for_device(dev);
282 if (!plat_device_is_coherent(dev)) {
283 unsigned long addr;
285 addr = dma_addr_to_virt(dev, dma_handle);
286 __dma_sync(addr, size, direction);
290 EXPORT_SYMBOL(dma_sync_single_for_device);
292 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
293 unsigned long offset, size_t size, enum dma_data_direction direction)
295 BUG_ON(direction == DMA_NONE);
297 if (cpu_is_noncoherent_r10000(dev)) {
298 unsigned long addr;
300 addr = dma_addr_to_virt(dev, dma_handle);
301 __dma_sync(addr + offset, size, direction);
305 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
307 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
308 unsigned long offset, size_t size, enum dma_data_direction direction)
310 BUG_ON(direction == DMA_NONE);
312 plat_extra_sync_for_device(dev);
313 if (!plat_device_is_coherent(dev)) {
314 unsigned long addr;
316 addr = dma_addr_to_virt(dev, dma_handle);
317 __dma_sync(addr + offset, size, direction);
321 EXPORT_SYMBOL(dma_sync_single_range_for_device);
323 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
324 enum dma_data_direction direction)
326 int i;
328 BUG_ON(direction == DMA_NONE);
330 /* Make sure that gcc doesn't leave the empty loop body. */
331 for (i = 0; i < nelems; i++, sg++) {
332 if (cpu_is_noncoherent_r10000(dev))
333 __dma_sync((unsigned long)page_address(sg_page(sg)),
334 sg->length, direction);
338 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
340 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
341 enum dma_data_direction direction)
343 int i;
345 BUG_ON(direction == DMA_NONE);
347 /* Make sure that gcc doesn't leave the empty loop body. */
348 for (i = 0; i < nelems; i++, sg++) {
349 if (!plat_device_is_coherent(dev))
350 __dma_sync((unsigned long)page_address(sg_page(sg)),
351 sg->length, direction);
355 EXPORT_SYMBOL(dma_sync_sg_for_device);
357 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
359 return plat_dma_mapping_error(dev, dma_addr);
362 EXPORT_SYMBOL(dma_mapping_error);
364 int dma_supported(struct device *dev, u64 mask)
366 return plat_dma_supported(dev, mask);
369 EXPORT_SYMBOL(dma_supported);
371 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
373 return plat_device_is_coherent(dev);
376 EXPORT_SYMBOL(dma_is_consistent);
378 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
379 enum dma_data_direction direction)
381 BUG_ON(direction == DMA_NONE);
383 plat_extra_sync_for_device(dev);
384 if (!plat_device_is_coherent(dev))
385 __dma_sync((unsigned long)vaddr, size, direction);
388 EXPORT_SYMBOL(dma_cache_sync);