2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
6 * Based on DMA code from MIPS.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/dma-mapping.h>
18 #include <linux/cache.h>
19 #include <asm/cacheflush.h>
21 void arch_sync_dma_for_device(struct device
*dev
, phys_addr_t paddr
,
22 size_t size
, enum dma_data_direction dir
)
24 void *vaddr
= phys_to_virt(paddr
);
28 invalidate_dcache_range((unsigned long)vaddr
,
29 (unsigned long)(vaddr
+ size
));
33 * We just need to flush the caches here , but Nios2 flush
34 * instruction will do both writeback and invalidate.
36 case DMA_BIDIRECTIONAL
: /* flush and invalidate */
37 flush_dcache_range((unsigned long)vaddr
,
38 (unsigned long)(vaddr
+ size
));
45 void arch_sync_dma_for_cpu(struct device
*dev
, phys_addr_t paddr
,
46 size_t size
, enum dma_data_direction dir
)
48 void *vaddr
= phys_to_virt(paddr
);
51 case DMA_BIDIRECTIONAL
:
53 invalidate_dcache_range((unsigned long)vaddr
,
54 (unsigned long)(vaddr
+ size
));
63 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
64 gfp_t gfp
, unsigned long attrs
)
68 /* optimized page clearing */
71 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
74 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
76 *dma_handle
= virt_to_phys(ret
);
77 flush_dcache_range((unsigned long) ret
,
78 (unsigned long) ret
+ size
);
79 ret
= UNCAC_ADDR(ret
);
85 void arch_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
86 dma_addr_t dma_handle
, unsigned long attrs
)
88 unsigned long addr
= (unsigned long) CAC_ADDR((unsigned long) vaddr
);
90 free_pages(addr
, get_order(size
));