2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/string.h>
14 #include <linux/dma-mapping.h>
16 #include <asm/cache.h>
20 * Warning on the terminology - Linux calls an uncached area coherent;
21 * MIPS terminology calls memory areas with hardware maintained coherency
25 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
26 dma_addr_t
* dma_handle
, gfp_t gfp
)
29 /* ignore region specifiers */
30 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
32 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
34 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
38 *dma_handle
= virt_to_phys(ret
);
44 EXPORT_SYMBOL(dma_alloc_noncoherent
);
46 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
47 dma_addr_t
* dma_handle
, gfp_t gfp
)
51 ret
= dma_alloc_noncoherent(dev
, size
, dma_handle
, gfp
);
53 dma_cache_wback_inv((unsigned long) ret
, size
);
54 ret
= UNCAC_ADDR(ret
);
60 EXPORT_SYMBOL(dma_alloc_coherent
);
62 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
63 dma_addr_t dma_handle
)
65 free_pages((unsigned long) vaddr
, get_order(size
));
68 EXPORT_SYMBOL(dma_free_noncoherent
);
70 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
71 dma_addr_t dma_handle
)
73 unsigned long addr
= (unsigned long) vaddr
;
75 addr
= CAC_ADDR(addr
);
76 free_pages(addr
, get_order(size
));
79 EXPORT_SYMBOL(dma_free_coherent
);
81 static inline void __dma_sync(unsigned long addr
, size_t size
,
82 enum dma_data_direction direction
)
86 dma_cache_wback(addr
, size
);
90 dma_cache_inv(addr
, size
);
93 case DMA_BIDIRECTIONAL
:
94 dma_cache_wback_inv(addr
, size
);
102 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
103 enum dma_data_direction direction
)
105 unsigned long addr
= (unsigned long) ptr
;
107 __dma_sync(addr
, size
, direction
);
109 return virt_to_phys(ptr
);
112 EXPORT_SYMBOL(dma_map_single
);
114 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
115 enum dma_data_direction direction
)
118 addr
= dma_addr
+ PAGE_OFFSET
;
120 //__dma_sync(addr, size, direction);
123 EXPORT_SYMBOL(dma_unmap_single
);
125 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
126 enum dma_data_direction direction
)
130 BUG_ON(direction
== DMA_NONE
);
132 for (i
= 0; i
< nents
; i
++, sg
++) {
135 addr
= (unsigned long) page_address(sg
->page
);
137 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
138 sg
->dma_address
= (dma_addr_t
)page_to_phys(sg
->page
)
146 EXPORT_SYMBOL(dma_map_sg
);
148 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
149 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
153 BUG_ON(direction
== DMA_NONE
);
155 addr
= (unsigned long) page_address(page
) + offset
;
156 dma_cache_wback_inv(addr
, size
);
158 return page_to_phys(page
) + offset
;
161 EXPORT_SYMBOL(dma_map_page
);
163 void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
164 enum dma_data_direction direction
)
166 BUG_ON(direction
== DMA_NONE
);
168 if (direction
!= DMA_TO_DEVICE
) {
171 addr
= dma_address
+ PAGE_OFFSET
;
172 dma_cache_wback_inv(addr
, size
);
176 EXPORT_SYMBOL(dma_unmap_page
);
178 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
179 enum dma_data_direction direction
)
184 BUG_ON(direction
== DMA_NONE
);
186 if (direction
== DMA_TO_DEVICE
)
189 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
190 addr
= (unsigned long) page_address(sg
->page
);
192 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
196 EXPORT_SYMBOL(dma_unmap_sg
);
198 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
199 size_t size
, enum dma_data_direction direction
)
203 BUG_ON(direction
== DMA_NONE
);
205 addr
= dma_handle
+ PAGE_OFFSET
;
206 __dma_sync(addr
, size
, direction
);
209 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
211 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
212 size_t size
, enum dma_data_direction direction
)
216 BUG_ON(direction
== DMA_NONE
);
218 addr
= dma_handle
+ PAGE_OFFSET
;
219 __dma_sync(addr
, size
, direction
);
222 EXPORT_SYMBOL(dma_sync_single_for_device
);
224 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
225 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
229 BUG_ON(direction
== DMA_NONE
);
231 addr
= dma_handle
+ offset
+ PAGE_OFFSET
;
232 __dma_sync(addr
, size
, direction
);
235 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
237 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
238 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
242 BUG_ON(direction
== DMA_NONE
);
244 addr
= dma_handle
+ offset
+ PAGE_OFFSET
;
245 __dma_sync(addr
, size
, direction
);
248 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
250 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
251 enum dma_data_direction direction
)
255 BUG_ON(direction
== DMA_NONE
);
257 /* Make sure that gcc doesn't leave the empty loop body. */
258 for (i
= 0; i
< nelems
; i
++, sg
++)
259 __dma_sync((unsigned long)page_address(sg
->page
),
260 sg
->length
, direction
);
263 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
265 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
266 enum dma_data_direction direction
)
270 BUG_ON(direction
== DMA_NONE
);
272 /* Make sure that gcc doesn't leave the empty loop body. */
273 for (i
= 0; i
< nelems
; i
++, sg
++)
274 __dma_sync((unsigned long)page_address(sg
->page
),
275 sg
->length
, direction
);
278 EXPORT_SYMBOL(dma_sync_sg_for_device
);
280 int dma_mapping_error(dma_addr_t dma_addr
)
285 EXPORT_SYMBOL(dma_mapping_error
);
287 int dma_supported(struct device
*dev
, u64 mask
)
290 * we fall back to GFP_DMA when the mask isn't all 1s,
291 * so we can't guarantee allocations that must be
292 * within a tighter range than GFP_DMA..
294 if (mask
< 0x00ffffff)
300 EXPORT_SYMBOL(dma_supported
);
302 int dma_is_consistent(dma_addr_t dma_addr
)
307 EXPORT_SYMBOL(dma_is_consistent
);
309 void dma_cache_sync(void *vaddr
, size_t size
, enum dma_data_direction direction
)
311 if (direction
== DMA_NONE
)
314 dma_cache_wback_inv((unsigned long)vaddr
, size
);
317 EXPORT_SYMBOL(dma_cache_sync
);
319 /* The DAC routines are a PCIism.. */
323 #include <linux/pci.h>
325 dma64_addr_t
pci_dac_page_to_dma(struct pci_dev
*pdev
,
326 struct page
*page
, unsigned long offset
, int direction
)
328 return (dma64_addr_t
)page_to_phys(page
) + offset
;
331 EXPORT_SYMBOL(pci_dac_page_to_dma
);
333 struct page
*pci_dac_dma_to_page(struct pci_dev
*pdev
,
334 dma64_addr_t dma_addr
)
336 return mem_map
+ (dma_addr
>> PAGE_SHIFT
);
339 EXPORT_SYMBOL(pci_dac_dma_to_page
);
341 unsigned long pci_dac_dma_to_offset(struct pci_dev
*pdev
,
342 dma64_addr_t dma_addr
)
344 return dma_addr
& ~PAGE_MASK
;
347 EXPORT_SYMBOL(pci_dac_dma_to_offset
);
349 void pci_dac_dma_sync_single_for_cpu(struct pci_dev
*pdev
,
350 dma64_addr_t dma_addr
, size_t len
, int direction
)
352 BUG_ON(direction
== PCI_DMA_NONE
);
354 dma_cache_wback_inv(dma_addr
+ PAGE_OFFSET
, len
);
357 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu
);
359 void pci_dac_dma_sync_single_for_device(struct pci_dev
*pdev
,
360 dma64_addr_t dma_addr
, size_t len
, int direction
)
362 BUG_ON(direction
== PCI_DMA_NONE
);
364 dma_cache_wback_inv(dma_addr
+ PAGE_OFFSET
, len
);
367 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device
);
369 #endif /* CONFIG_PCI */