2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
6 * Based on DMA code from MIPS.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/types.h>
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
20 #include <linux/cache.h>
21 #include <asm/cacheflush.h>
23 static inline void __dma_sync_for_device(void *vaddr
, size_t size
,
24 enum dma_data_direction direction
)
28 invalidate_dcache_range((unsigned long)vaddr
,
29 (unsigned long)(vaddr
+ size
));
33 * We just need to flush the caches here , but Nios2 flush
34 * instruction will do both writeback and invalidate.
36 case DMA_BIDIRECTIONAL
: /* flush and invalidate */
37 flush_dcache_range((unsigned long)vaddr
,
38 (unsigned long)(vaddr
+ size
));
45 static inline void __dma_sync_for_cpu(void *vaddr
, size_t size
,
46 enum dma_data_direction direction
)
49 case DMA_BIDIRECTIONAL
:
51 invalidate_dcache_range((unsigned long)vaddr
,
52 (unsigned long)(vaddr
+ size
));
61 static void *nios2_dma_alloc(struct device
*dev
, size_t size
,
62 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
66 /* optimized page clearing */
69 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
72 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
74 *dma_handle
= virt_to_phys(ret
);
75 flush_dcache_range((unsigned long) ret
,
76 (unsigned long) ret
+ size
);
77 ret
= UNCAC_ADDR(ret
);
83 static void nios2_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
84 dma_addr_t dma_handle
, unsigned long attrs
)
86 unsigned long addr
= (unsigned long) CAC_ADDR((unsigned long) vaddr
);
88 free_pages(addr
, get_order(size
));
91 static int nios2_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
92 int nents
, enum dma_data_direction direction
,
97 for_each_sg(sg
, sg
, nents
, i
) {
98 void *addr
= sg_virt(sg
);
103 sg
->dma_address
= sg_phys(sg
);
105 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
108 __dma_sync_for_device(addr
, sg
->length
, direction
);
114 static dma_addr_t
nios2_dma_map_page(struct device
*dev
, struct page
*page
,
115 unsigned long offset
, size_t size
,
116 enum dma_data_direction direction
,
119 void *addr
= page_address(page
) + offset
;
121 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
122 __dma_sync_for_device(addr
, size
, direction
);
124 return page_to_phys(page
) + offset
;
127 static void nios2_dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
128 size_t size
, enum dma_data_direction direction
,
131 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
132 __dma_sync_for_cpu(phys_to_virt(dma_address
), size
, direction
);
135 static void nios2_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
136 int nhwentries
, enum dma_data_direction direction
,
142 if (direction
== DMA_TO_DEVICE
)
145 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
148 for_each_sg(sg
, sg
, nhwentries
, i
) {
151 __dma_sync_for_cpu(addr
, sg
->length
, direction
);
155 static void nios2_dma_sync_single_for_cpu(struct device
*dev
,
156 dma_addr_t dma_handle
, size_t size
,
157 enum dma_data_direction direction
)
159 __dma_sync_for_cpu(phys_to_virt(dma_handle
), size
, direction
);
162 static void nios2_dma_sync_single_for_device(struct device
*dev
,
163 dma_addr_t dma_handle
, size_t size
,
164 enum dma_data_direction direction
)
166 __dma_sync_for_device(phys_to_virt(dma_handle
), size
, direction
);
169 static void nios2_dma_sync_sg_for_cpu(struct device
*dev
,
170 struct scatterlist
*sg
, int nelems
,
171 enum dma_data_direction direction
)
175 /* Make sure that gcc doesn't leave the empty loop body. */
176 for_each_sg(sg
, sg
, nelems
, i
)
177 __dma_sync_for_cpu(sg_virt(sg
), sg
->length
, direction
);
180 static void nios2_dma_sync_sg_for_device(struct device
*dev
,
181 struct scatterlist
*sg
, int nelems
,
182 enum dma_data_direction direction
)
186 /* Make sure that gcc doesn't leave the empty loop body. */
187 for_each_sg(sg
, sg
, nelems
, i
)
188 __dma_sync_for_device(sg_virt(sg
), sg
->length
, direction
);
192 const struct dma_map_ops nios2_dma_ops
= {
193 .alloc
= nios2_dma_alloc
,
194 .free
= nios2_dma_free
,
195 .map_page
= nios2_dma_map_page
,
196 .unmap_page
= nios2_dma_unmap_page
,
197 .map_sg
= nios2_dma_map_sg
,
198 .unmap_sg
= nios2_dma_unmap_sg
,
199 .sync_single_for_device
= nios2_dma_sync_single_for_device
,
200 .sync_single_for_cpu
= nios2_dma_sync_single_for_cpu
,
201 .sync_sg_for_cpu
= nios2_dma_sync_sg_for_cpu
,
202 .sync_sg_for_device
= nios2_dma_sync_sg_for_device
,
204 EXPORT_SYMBOL(nios2_dma_ops
);