2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 #include <linux/config.h>
11 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/string.h>
15 #include <linux/dma-mapping.h>
17 #include <asm/cache.h>
21 * Warning on the terminology - Linux calls an uncached area coherent;
22 * MIPS terminology calls memory areas with hardware maintained coherency
26 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
27 dma_addr_t
* dma_handle
, gfp_t gfp
)
30 /* ignore region specifiers */
31 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
33 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
35 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
39 *dma_handle
= virt_to_phys(ret
);
45 EXPORT_SYMBOL(dma_alloc_noncoherent
);
47 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
48 dma_addr_t
* dma_handle
, gfp_t gfp
)
52 ret
= dma_alloc_noncoherent(dev
, size
, dma_handle
, gfp
);
54 dma_cache_wback_inv((unsigned long) ret
, size
);
55 ret
= UNCAC_ADDR(ret
);
61 EXPORT_SYMBOL(dma_alloc_coherent
);
63 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
64 dma_addr_t dma_handle
)
66 free_pages((unsigned long) vaddr
, get_order(size
));
69 EXPORT_SYMBOL(dma_free_noncoherent
);
71 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
72 dma_addr_t dma_handle
)
74 unsigned long addr
= (unsigned long) vaddr
;
76 addr
= CAC_ADDR(addr
);
77 free_pages(addr
, get_order(size
));
80 EXPORT_SYMBOL(dma_free_coherent
);
82 static inline void __dma_sync(unsigned long addr
, size_t size
,
83 enum dma_data_direction direction
)
87 dma_cache_wback(addr
, size
);
91 dma_cache_inv(addr
, size
);
94 case DMA_BIDIRECTIONAL
:
95 dma_cache_wback_inv(addr
, size
);
103 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
104 enum dma_data_direction direction
)
106 unsigned long addr
= (unsigned long) ptr
;
108 __dma_sync(addr
, size
, direction
);
110 return virt_to_phys(ptr
);
113 EXPORT_SYMBOL(dma_map_single
);
115 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
116 enum dma_data_direction direction
)
119 addr
= dma_addr
+ PAGE_OFFSET
;
121 //__dma_sync(addr, size, direction);
124 EXPORT_SYMBOL(dma_unmap_single
);
126 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
127 enum dma_data_direction direction
)
131 BUG_ON(direction
== DMA_NONE
);
133 for (i
= 0; i
< nents
; i
++, sg
++) {
136 addr
= (unsigned long) page_address(sg
->page
);
138 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
139 sg
->dma_address
= (dma_addr_t
)page_to_phys(sg
->page
)
147 EXPORT_SYMBOL(dma_map_sg
);
149 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
150 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
154 BUG_ON(direction
== DMA_NONE
);
156 addr
= (unsigned long) page_address(page
) + offset
;
157 dma_cache_wback_inv(addr
, size
);
159 return page_to_phys(page
) + offset
;
162 EXPORT_SYMBOL(dma_map_page
);
164 void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
165 enum dma_data_direction direction
)
167 BUG_ON(direction
== DMA_NONE
);
169 if (direction
!= DMA_TO_DEVICE
) {
172 addr
= dma_address
+ PAGE_OFFSET
;
173 dma_cache_wback_inv(addr
, size
);
177 EXPORT_SYMBOL(dma_unmap_page
);
179 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
180 enum dma_data_direction direction
)
185 BUG_ON(direction
== DMA_NONE
);
187 if (direction
== DMA_TO_DEVICE
)
190 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
191 addr
= (unsigned long) page_address(sg
->page
);
193 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
197 EXPORT_SYMBOL(dma_unmap_sg
);
199 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
200 size_t size
, enum dma_data_direction direction
)
204 BUG_ON(direction
== DMA_NONE
);
206 addr
= dma_handle
+ PAGE_OFFSET
;
207 __dma_sync(addr
, size
, direction
);
210 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
212 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
213 size_t size
, enum dma_data_direction direction
)
217 BUG_ON(direction
== DMA_NONE
);
219 addr
= dma_handle
+ PAGE_OFFSET
;
220 __dma_sync(addr
, size
, direction
);
223 EXPORT_SYMBOL(dma_sync_single_for_device
);
225 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
226 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
230 BUG_ON(direction
== DMA_NONE
);
232 addr
= dma_handle
+ offset
+ PAGE_OFFSET
;
233 __dma_sync(addr
, size
, direction
);
236 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
238 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
239 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
243 BUG_ON(direction
== DMA_NONE
);
245 addr
= dma_handle
+ offset
+ PAGE_OFFSET
;
246 __dma_sync(addr
, size
, direction
);
249 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
251 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
252 enum dma_data_direction direction
)
256 BUG_ON(direction
== DMA_NONE
);
258 /* Make sure that gcc doesn't leave the empty loop body. */
259 for (i
= 0; i
< nelems
; i
++, sg
++)
260 __dma_sync((unsigned long)page_address(sg
->page
),
261 sg
->length
, direction
);
264 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
266 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
267 enum dma_data_direction direction
)
271 BUG_ON(direction
== DMA_NONE
);
273 /* Make sure that gcc doesn't leave the empty loop body. */
274 for (i
= 0; i
< nelems
; i
++, sg
++)
275 __dma_sync((unsigned long)page_address(sg
->page
),
276 sg
->length
, direction
);
279 EXPORT_SYMBOL(dma_sync_sg_for_device
);
281 int dma_mapping_error(dma_addr_t dma_addr
)
286 EXPORT_SYMBOL(dma_mapping_error
);
288 int dma_supported(struct device
*dev
, u64 mask
)
291 * we fall back to GFP_DMA when the mask isn't all 1s,
292 * so we can't guarantee allocations that must be
293 * within a tighter range than GFP_DMA..
295 if (mask
< 0x00ffffff)
301 EXPORT_SYMBOL(dma_supported
);
303 int dma_is_consistent(dma_addr_t dma_addr
)
308 EXPORT_SYMBOL(dma_is_consistent
);
310 void dma_cache_sync(void *vaddr
, size_t size
, enum dma_data_direction direction
)
312 if (direction
== DMA_NONE
)
315 dma_cache_wback_inv((unsigned long)vaddr
, size
);
318 EXPORT_SYMBOL(dma_cache_sync
);
320 /* The DAC routines are a PCIism.. */
324 #include <linux/pci.h>
326 dma64_addr_t
pci_dac_page_to_dma(struct pci_dev
*pdev
,
327 struct page
*page
, unsigned long offset
, int direction
)
329 return (dma64_addr_t
)page_to_phys(page
) + offset
;
332 EXPORT_SYMBOL(pci_dac_page_to_dma
);
334 struct page
*pci_dac_dma_to_page(struct pci_dev
*pdev
,
335 dma64_addr_t dma_addr
)
337 return mem_map
+ (dma_addr
>> PAGE_SHIFT
);
340 EXPORT_SYMBOL(pci_dac_dma_to_page
);
342 unsigned long pci_dac_dma_to_offset(struct pci_dev
*pdev
,
343 dma64_addr_t dma_addr
)
345 return dma_addr
& ~PAGE_MASK
;
348 EXPORT_SYMBOL(pci_dac_dma_to_offset
);
350 void pci_dac_dma_sync_single_for_cpu(struct pci_dev
*pdev
,
351 dma64_addr_t dma_addr
, size_t len
, int direction
)
353 BUG_ON(direction
== PCI_DMA_NONE
);
355 dma_cache_wback_inv(dma_addr
+ PAGE_OFFSET
, len
);
358 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu
);
360 void pci_dac_dma_sync_single_for_device(struct pci_dev
*pdev
,
361 dma64_addr_t dma_addr
, size_t len
, int direction
)
363 BUG_ON(direction
== PCI_DMA_NONE
);
365 dma_cache_wback_inv(dma_addr
+ PAGE_OFFSET
, len
);
368 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device
);
370 #endif /* CONFIG_PCI */