Linux 2.6.33-rc8
[linux-2.6/lguest.git] / arch / mips / mm / dma-default.c
blob9367e33fbd1822d92f55302a38fb872982e2fb7d
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
18 #include <asm/cache.h>
19 #include <asm/io.h>
21 #include <dma-coherence.h>
23 static inline unsigned long dma_addr_to_virt(struct device *dev,
24 dma_addr_t dma_addr)
26 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28 return (unsigned long)phys_to_virt(addr);
32 * Warning on the terminology - Linux calls an uncached area coherent;
33 * MIPS terminology calls memory areas with hardware maintained coherency
34 * coherent.
37 static inline int cpu_is_noncoherent_r10000(struct device *dev)
39 return !plat_device_is_coherent(dev) &&
40 (current_cpu_type() == CPU_R10000 ||
41 current_cpu_type() == CPU_R12000);
44 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46 /* ignore region specifiers */
47 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
49 #ifdef CONFIG_ZONE_DMA
50 if (dev == NULL)
51 gfp |= __GFP_DMA;
52 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
53 gfp |= __GFP_DMA;
54 else
55 #endif
56 #ifdef CONFIG_ZONE_DMA32
57 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
58 gfp |= __GFP_DMA32;
59 else
60 #endif
63 /* Don't invoke OOM killer */
64 gfp |= __GFP_NORETRY;
66 return gfp;
69 void *dma_alloc_noncoherent(struct device *dev, size_t size,
70 dma_addr_t * dma_handle, gfp_t gfp)
72 void *ret;
74 gfp = massage_gfp_flags(dev, gfp);
76 ret = (void *) __get_free_pages(gfp, get_order(size));
78 if (ret != NULL) {
79 memset(ret, 0, size);
80 *dma_handle = plat_map_dma_mem(dev, ret, size);
83 return ret;
86 EXPORT_SYMBOL(dma_alloc_noncoherent);
88 void *dma_alloc_coherent(struct device *dev, size_t size,
89 dma_addr_t * dma_handle, gfp_t gfp)
91 void *ret;
93 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
94 return ret;
96 gfp = massage_gfp_flags(dev, gfp);
98 ret = (void *) __get_free_pages(gfp, get_order(size));
100 if (ret) {
101 memset(ret, 0, size);
102 *dma_handle = plat_map_dma_mem(dev, ret, size);
104 if (!plat_device_is_coherent(dev)) {
105 dma_cache_wback_inv((unsigned long) ret, size);
106 ret = UNCAC_ADDR(ret);
110 return ret;
113 EXPORT_SYMBOL(dma_alloc_coherent);
115 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
116 dma_addr_t dma_handle)
118 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
119 free_pages((unsigned long) vaddr, get_order(size));
122 EXPORT_SYMBOL(dma_free_noncoherent);
124 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
125 dma_addr_t dma_handle)
127 unsigned long addr = (unsigned long) vaddr;
128 int order = get_order(size);
130 if (dma_release_from_coherent(dev, order, vaddr))
131 return;
133 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
135 if (!plat_device_is_coherent(dev))
136 addr = CAC_ADDR(addr);
138 free_pages(addr, get_order(size));
141 EXPORT_SYMBOL(dma_free_coherent);
143 static inline void __dma_sync(unsigned long addr, size_t size,
144 enum dma_data_direction direction)
146 switch (direction) {
147 case DMA_TO_DEVICE:
148 dma_cache_wback(addr, size);
149 break;
151 case DMA_FROM_DEVICE:
152 dma_cache_inv(addr, size);
153 break;
155 case DMA_BIDIRECTIONAL:
156 dma_cache_wback_inv(addr, size);
157 break;
159 default:
160 BUG();
164 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
165 enum dma_data_direction direction)
167 unsigned long addr = (unsigned long) ptr;
169 if (!plat_device_is_coherent(dev))
170 __dma_sync(addr, size, direction);
172 return plat_map_dma_mem(dev, ptr, size);
175 EXPORT_SYMBOL(dma_map_single);
177 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
178 enum dma_data_direction direction)
180 if (cpu_is_noncoherent_r10000(dev))
181 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
182 direction);
184 plat_unmap_dma_mem(dev, dma_addr, size, direction);
187 EXPORT_SYMBOL(dma_unmap_single);
189 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
190 enum dma_data_direction direction)
192 int i;
194 BUG_ON(direction == DMA_NONE);
196 for (i = 0; i < nents; i++, sg++) {
197 unsigned long addr;
199 addr = (unsigned long) sg_virt(sg);
200 if (!plat_device_is_coherent(dev) && addr)
201 __dma_sync(addr, sg->length, direction);
202 sg->dma_address = plat_map_dma_mem(dev,
203 (void *)addr, sg->length);
206 return nents;
209 EXPORT_SYMBOL(dma_map_sg);
211 dma_addr_t dma_map_page(struct device *dev, struct page *page,
212 unsigned long offset, size_t size, enum dma_data_direction direction)
214 BUG_ON(direction == DMA_NONE);
216 if (!plat_device_is_coherent(dev)) {
217 unsigned long addr;
219 addr = (unsigned long) page_address(page) + offset;
220 __dma_sync(addr, size, direction);
223 return plat_map_dma_mem_page(dev, page) + offset;
226 EXPORT_SYMBOL(dma_map_page);
228 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
229 enum dma_data_direction direction)
231 unsigned long addr;
232 int i;
234 BUG_ON(direction == DMA_NONE);
236 for (i = 0; i < nhwentries; i++, sg++) {
237 if (!plat_device_is_coherent(dev) &&
238 direction != DMA_TO_DEVICE) {
239 addr = (unsigned long) sg_virt(sg);
240 if (addr)
241 __dma_sync(addr, sg->length, direction);
243 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
247 EXPORT_SYMBOL(dma_unmap_sg);
249 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
250 size_t size, enum dma_data_direction direction)
252 BUG_ON(direction == DMA_NONE);
254 if (cpu_is_noncoherent_r10000(dev)) {
255 unsigned long addr;
257 addr = dma_addr_to_virt(dev, dma_handle);
258 __dma_sync(addr, size, direction);
262 EXPORT_SYMBOL(dma_sync_single_for_cpu);
264 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
265 size_t size, enum dma_data_direction direction)
267 BUG_ON(direction == DMA_NONE);
269 plat_extra_sync_for_device(dev);
270 if (!plat_device_is_coherent(dev)) {
271 unsigned long addr;
273 addr = dma_addr_to_virt(dev, dma_handle);
274 __dma_sync(addr, size, direction);
278 EXPORT_SYMBOL(dma_sync_single_for_device);
280 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size, enum dma_data_direction direction)
283 BUG_ON(direction == DMA_NONE);
285 if (cpu_is_noncoherent_r10000(dev)) {
286 unsigned long addr;
288 addr = dma_addr_to_virt(dev, dma_handle);
289 __dma_sync(addr + offset, size, direction);
293 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
295 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
296 unsigned long offset, size_t size, enum dma_data_direction direction)
298 BUG_ON(direction == DMA_NONE);
300 plat_extra_sync_for_device(dev);
301 if (!plat_device_is_coherent(dev)) {
302 unsigned long addr;
304 addr = dma_addr_to_virt(dev, dma_handle);
305 __dma_sync(addr + offset, size, direction);
309 EXPORT_SYMBOL(dma_sync_single_range_for_device);
311 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
312 enum dma_data_direction direction)
314 int i;
316 BUG_ON(direction == DMA_NONE);
318 /* Make sure that gcc doesn't leave the empty loop body. */
319 for (i = 0; i < nelems; i++, sg++) {
320 if (cpu_is_noncoherent_r10000(dev))
321 __dma_sync((unsigned long)page_address(sg_page(sg)),
322 sg->length, direction);
326 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
328 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
329 enum dma_data_direction direction)
331 int i;
333 BUG_ON(direction == DMA_NONE);
335 /* Make sure that gcc doesn't leave the empty loop body. */
336 for (i = 0; i < nelems; i++, sg++) {
337 if (!plat_device_is_coherent(dev))
338 __dma_sync((unsigned long)page_address(sg_page(sg)),
339 sg->length, direction);
343 EXPORT_SYMBOL(dma_sync_sg_for_device);
345 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
347 return plat_dma_mapping_error(dev, dma_addr);
350 EXPORT_SYMBOL(dma_mapping_error);
352 int dma_supported(struct device *dev, u64 mask)
354 return plat_dma_supported(dev, mask);
357 EXPORT_SYMBOL(dma_supported);
359 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
361 return plat_device_is_coherent(dev);
364 EXPORT_SYMBOL(dma_is_consistent);
366 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
367 enum dma_data_direction direction)
369 BUG_ON(direction == DMA_NONE);
371 plat_extra_sync_for_device(dev);
372 if (!plat_device_is_coherent(dev))
373 __dma_sync((unsigned long)vaddr, size, direction);
376 EXPORT_SYMBOL(dma_cache_sync);