2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DMA Coherent API Notes
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
16 * The default DMA address == Phy address which is 0x8000_0000 based.
19 #include <linux/dma-mapping.h>
20 #include <asm/cache.h>
21 #include <asm/cacheflush.h>
24 static void *arc_dma_alloc(struct device
*dev
, size_t size
,
25 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
27 unsigned long order
= get_order(size
);
31 int need_coh
= 1, need_kvaddr
= 0;
33 page
= alloc_pages(gfp
, order
);
38 * IOC relies on all data (even coherent DMA data) being in cache
39 * Thus allocate normal cached memory
41 * The gains with IOC are two pronged:
42 * -For streaming data, elides need for cache maintenance, saving
43 * cycles in flush code, and bus bandwidth as all the lines of a
44 * buffer need to be flushed out to memory
45 * -For coherent data, Read/Write to buffers terminate early in cache
46 * (vs. always going to memory - thus are faster)
48 if ((is_isa_arcv2() && ioc_enable
) ||
49 (attrs
& DMA_ATTR_NON_CONSISTENT
))
53 * - A coherent buffer needs MMU mapping to enforce non-cachability
54 * - A highmem page needs a virtual handle (hence MMU mapping)
55 * independent of cachability
57 if (PageHighMem(page
) || need_coh
)
60 /* This is linear addr (0x8000_0000 based) */
61 paddr
= page_to_phys(page
);
65 /* This is kernel Virtual address (0x7000_0000 based) */
67 kvaddr
= ioremap_nocache(paddr
, size
);
69 __free_pages(page
, order
);
73 kvaddr
= (void *)(u32
)paddr
;
77 * Evict any existing L1 and/or L2 lines for the backing page
78 * in case it was used earlier as a normal "cached" page.
79 * Yeah this bit us - STAR 9000898266
81 * Although core does call flush_cache_vmap(), it gets kvaddr hence
82 * can't be used to efficiently flush L1 and/or L2 which need paddr
83 * Currently flush_cache_vmap nukes the L1 cache completely which
84 * will be optimized as a separate commit
87 dma_cache_wback_inv(paddr
, size
);
92 static void arc_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
93 dma_addr_t dma_handle
, unsigned long attrs
)
95 phys_addr_t paddr
= dma_handle
;
96 struct page
*page
= virt_to_page(paddr
);
99 is_non_coh
= (attrs
& DMA_ATTR_NON_CONSISTENT
) ||
100 (is_isa_arcv2() && ioc_enable
);
102 if (PageHighMem(page
) || !is_non_coh
)
103 iounmap((void __force __iomem
*)vaddr
);
105 __free_pages(page
, get_order(size
));
108 static int arc_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
109 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
112 unsigned long user_count
= vma_pages(vma
);
113 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
114 unsigned long pfn
= __phys_to_pfn(dma_addr
);
115 unsigned long off
= vma
->vm_pgoff
;
118 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
120 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
123 if (off
< count
&& user_count
<= (count
- off
)) {
124 ret
= remap_pfn_range(vma
, vma
->vm_start
,
126 user_count
<< PAGE_SHIFT
,
134 * streaming DMA Mapping API...
135 * CPU accesses page via normal paddr, thus needs to explicitly made
136 * consistent before each use
138 static void _dma_cache_sync(phys_addr_t paddr
, size_t size
,
139 enum dma_data_direction dir
)
142 case DMA_FROM_DEVICE
:
143 dma_cache_inv(paddr
, size
);
146 dma_cache_wback(paddr
, size
);
148 case DMA_BIDIRECTIONAL
:
149 dma_cache_wback_inv(paddr
, size
);
152 pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir
, &paddr
);
157 * arc_dma_map_page - map a portion of a page for streaming DMA
159 * Ensure that any data held in the cache is appropriately discarded
162 * The device owns this memory once this call has completed. The CPU
163 * can regain ownership by calling dma_unmap_page().
165 * Note: while it takes struct page as arg, caller can "abuse" it to pass
166 * a region larger than PAGE_SIZE, provided it is physically contiguous
167 * and this still works correctly
169 static dma_addr_t
arc_dma_map_page(struct device
*dev
, struct page
*page
,
170 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
173 phys_addr_t paddr
= page_to_phys(page
) + offset
;
175 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
176 _dma_cache_sync(paddr
, size
, dir
);
182 * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
184 * After this call, reads by the CPU to the buffer are guaranteed to see
185 * whatever the device wrote there.
187 * Note: historically this routine was not implemented for ARC
189 static void arc_dma_unmap_page(struct device
*dev
, dma_addr_t handle
,
190 size_t size
, enum dma_data_direction dir
,
193 phys_addr_t paddr
= handle
;
195 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
196 _dma_cache_sync(paddr
, size
, dir
);
199 static int arc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
200 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
202 struct scatterlist
*s
;
205 for_each_sg(sg
, s
, nents
, i
)
206 s
->dma_address
= dma_map_page(dev
, sg_page(s
), s
->offset
,
212 static void arc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
213 int nents
, enum dma_data_direction dir
,
216 struct scatterlist
*s
;
219 for_each_sg(sg
, s
, nents
, i
)
220 arc_dma_unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
,
224 static void arc_dma_sync_single_for_cpu(struct device
*dev
,
225 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
227 _dma_cache_sync(dma_handle
, size
, DMA_FROM_DEVICE
);
230 static void arc_dma_sync_single_for_device(struct device
*dev
,
231 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
233 _dma_cache_sync(dma_handle
, size
, DMA_TO_DEVICE
);
236 static void arc_dma_sync_sg_for_cpu(struct device
*dev
,
237 struct scatterlist
*sglist
, int nelems
,
238 enum dma_data_direction dir
)
241 struct scatterlist
*sg
;
243 for_each_sg(sglist
, sg
, nelems
, i
)
244 _dma_cache_sync(sg_phys(sg
), sg
->length
, dir
);
247 static void arc_dma_sync_sg_for_device(struct device
*dev
,
248 struct scatterlist
*sglist
, int nelems
,
249 enum dma_data_direction dir
)
252 struct scatterlist
*sg
;
254 for_each_sg(sglist
, sg
, nelems
, i
)
255 _dma_cache_sync(sg_phys(sg
), sg
->length
, dir
);
258 static int arc_dma_supported(struct device
*dev
, u64 dma_mask
)
260 /* Support 32 bit DMA mask exclusively */
261 return dma_mask
== DMA_BIT_MASK(32);
264 const struct dma_map_ops arc_dma_ops
= {
265 .alloc
= arc_dma_alloc
,
266 .free
= arc_dma_free
,
267 .mmap
= arc_dma_mmap
,
268 .map_page
= arc_dma_map_page
,
269 .unmap_page
= arc_dma_unmap_page
,
270 .map_sg
= arc_dma_map_sg
,
271 .unmap_sg
= arc_dma_unmap_sg
,
272 .sync_single_for_device
= arc_dma_sync_single_for_device
,
273 .sync_single_for_cpu
= arc_dma_sync_single_for_cpu
,
274 .sync_sg_for_cpu
= arc_dma_sync_sg_for_cpu
,
275 .sync_sg_for_device
= arc_dma_sync_sg_for_device
,
276 .dma_supported
= arc_dma_supported
,
278 EXPORT_SYMBOL(arc_dma_ops
);