2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
6 * Based on DMA code from MIPS.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/types.h>
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
20 #include <linux/cache.h>
21 #include <asm/cacheflush.h>
23 static inline void __dma_sync_for_device(void *vaddr
, size_t size
,
24 enum dma_data_direction direction
)
28 invalidate_dcache_range((unsigned long)vaddr
,
29 (unsigned long)(vaddr
+ size
));
33 * We just need to flush the caches here , but Nios2 flush
34 * instruction will do both writeback and invalidate.
36 case DMA_BIDIRECTIONAL
: /* flush and invalidate */
37 flush_dcache_range((unsigned long)vaddr
,
38 (unsigned long)(vaddr
+ size
));
45 static inline void __dma_sync_for_cpu(void *vaddr
, size_t size
,
46 enum dma_data_direction direction
)
49 case DMA_BIDIRECTIONAL
:
51 invalidate_dcache_range((unsigned long)vaddr
,
52 (unsigned long)(vaddr
+ size
));
61 static void *nios2_dma_alloc(struct device
*dev
, size_t size
,
62 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
66 /* ignore region specifiers */
67 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
69 /* optimized page clearing */
72 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
75 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
77 *dma_handle
= virt_to_phys(ret
);
78 flush_dcache_range((unsigned long) ret
,
79 (unsigned long) ret
+ size
);
80 ret
= UNCAC_ADDR(ret
);
86 static void nios2_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
87 dma_addr_t dma_handle
, unsigned long attrs
)
89 unsigned long addr
= (unsigned long) CAC_ADDR((unsigned long) vaddr
);
91 free_pages(addr
, get_order(size
));
94 static int nios2_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
95 int nents
, enum dma_data_direction direction
,
100 for_each_sg(sg
, sg
, nents
, i
) {
101 void *addr
= sg_virt(sg
);
106 sg
->dma_address
= sg_phys(sg
);
108 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
111 __dma_sync_for_device(addr
, sg
->length
, direction
);
117 static dma_addr_t
nios2_dma_map_page(struct device
*dev
, struct page
*page
,
118 unsigned long offset
, size_t size
,
119 enum dma_data_direction direction
,
122 void *addr
= page_address(page
) + offset
;
124 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
125 __dma_sync_for_device(addr
, size
, direction
);
127 return page_to_phys(page
) + offset
;
130 static void nios2_dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
131 size_t size
, enum dma_data_direction direction
,
134 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
135 __dma_sync_for_cpu(phys_to_virt(dma_address
), size
, direction
);
138 static void nios2_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
139 int nhwentries
, enum dma_data_direction direction
,
145 if (direction
== DMA_TO_DEVICE
)
148 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
151 for_each_sg(sg
, sg
, nhwentries
, i
) {
154 __dma_sync_for_cpu(addr
, sg
->length
, direction
);
158 static void nios2_dma_sync_single_for_cpu(struct device
*dev
,
159 dma_addr_t dma_handle
, size_t size
,
160 enum dma_data_direction direction
)
162 __dma_sync_for_cpu(phys_to_virt(dma_handle
), size
, direction
);
165 static void nios2_dma_sync_single_for_device(struct device
*dev
,
166 dma_addr_t dma_handle
, size_t size
,
167 enum dma_data_direction direction
)
169 __dma_sync_for_device(phys_to_virt(dma_handle
), size
, direction
);
172 static void nios2_dma_sync_sg_for_cpu(struct device
*dev
,
173 struct scatterlist
*sg
, int nelems
,
174 enum dma_data_direction direction
)
178 /* Make sure that gcc doesn't leave the empty loop body. */
179 for_each_sg(sg
, sg
, nelems
, i
)
180 __dma_sync_for_cpu(sg_virt(sg
), sg
->length
, direction
);
183 static void nios2_dma_sync_sg_for_device(struct device
*dev
,
184 struct scatterlist
*sg
, int nelems
,
185 enum dma_data_direction direction
)
189 /* Make sure that gcc doesn't leave the empty loop body. */
190 for_each_sg(sg
, sg
, nelems
, i
)
191 __dma_sync_for_device(sg_virt(sg
), sg
->length
, direction
);
195 const struct dma_map_ops nios2_dma_ops
= {
196 .alloc
= nios2_dma_alloc
,
197 .free
= nios2_dma_free
,
198 .map_page
= nios2_dma_map_page
,
199 .unmap_page
= nios2_dma_unmap_page
,
200 .map_sg
= nios2_dma_map_sg
,
201 .unmap_sg
= nios2_dma_unmap_sg
,
202 .sync_single_for_device
= nios2_dma_sync_single_for_device
,
203 .sync_single_for_cpu
= nios2_dma_sync_single_for_cpu
,
204 .sync_sg_for_cpu
= nios2_dma_sync_sg_for_cpu
,
205 .sync_sg_for_device
= nios2_dma_sync_sg_for_device
,
207 EXPORT_SYMBOL(nios2_dma_ops
);