2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DMA Coherent API Notes
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessintg it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
16 * The default DMA address == Phy address which is 0x8000_0000 based.
19 #include <linux/dma-mapping.h>
20 #include <asm/cache.h>
21 #include <asm/cacheflush.h>
24 static void *arc_dma_alloc(struct device
*dev
, size_t size
,
25 dma_addr_t
*dma_handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
27 unsigned long order
= get_order(size
);
31 int need_coh
= 1, need_kvaddr
= 0;
33 page
= alloc_pages(gfp
, order
);
38 * IOC relies on all data (even coherent DMA data) being in cache
39 * Thus allocate normal cached memory
41 * The gains with IOC are two pronged:
42 * -For streaming data, elides need for cache maintenance, saving
43 * cycles in flush code, and bus bandwidth as all the lines of a
44 * buffer need to be flushed out to memory
45 * -For coherent data, Read/Write to buffers terminate early in cache
46 * (vs. always going to memory - thus are faster)
48 if ((is_isa_arcv2() && ioc_exists
) ||
49 dma_get_attr(DMA_ATTR_NON_CONSISTENT
, attrs
))
53 * - A coherent buffer needs MMU mapping to enforce non-cachability
54 * - A highmem page needs a virtual handle (hence MMU mapping)
55 * independent of cachability
57 if (PageHighMem(page
) || need_coh
)
60 /* This is linear addr (0x8000_0000 based) */
61 paddr
= page_to_phys(page
);
63 *dma_handle
= plat_phys_to_dma(dev
, paddr
);
65 /* This is kernel Virtual address (0x7000_0000 based) */
67 kvaddr
= ioremap_nocache(paddr
, size
);
69 __free_pages(page
, order
);
73 kvaddr
= (void *)(u32
)paddr
;
77 * Evict any existing L1 and/or L2 lines for the backing page
78 * in case it was used earlier as a normal "cached" page.
79 * Yeah this bit us - STAR 9000898266
81 * Although core does call flush_cache_vmap(), it gets kvaddr hence
82 * can't be used to efficiently flush L1 and/or L2 which need paddr
83 * Currently flush_cache_vmap nukes the L1 cache completely which
84 * will be optimized as a separate commit
87 dma_cache_wback_inv(paddr
, size
);
92 static void arc_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
93 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
95 struct page
*page
= virt_to_page(dma_handle
);
98 is_non_coh
= dma_get_attr(DMA_ATTR_NON_CONSISTENT
, attrs
) ||
99 (is_isa_arcv2() && ioc_exists
);
101 if (PageHighMem(page
) || !is_non_coh
)
102 iounmap((void __force __iomem
*)vaddr
);
104 __free_pages(page
, get_order(size
));
108 * streaming DMA Mapping API...
109 * CPU accesses page via normal paddr, thus needs to explicitly made
110 * consistent before each use
112 static void _dma_cache_sync(phys_addr_t paddr
, size_t size
,
113 enum dma_data_direction dir
)
116 case DMA_FROM_DEVICE
:
117 dma_cache_inv(paddr
, size
);
120 dma_cache_wback(paddr
, size
);
122 case DMA_BIDIRECTIONAL
:
123 dma_cache_wback_inv(paddr
, size
);
126 pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir
, &paddr
);
130 static dma_addr_t
arc_dma_map_page(struct device
*dev
, struct page
*page
,
131 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
132 struct dma_attrs
*attrs
)
134 phys_addr_t paddr
= page_to_phys(page
) + offset
;
135 _dma_cache_sync(paddr
, size
, dir
);
136 return plat_phys_to_dma(dev
, paddr
);
139 static int arc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
140 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
142 struct scatterlist
*s
;
145 for_each_sg(sg
, s
, nents
, i
)
146 s
->dma_address
= dma_map_page(dev
, sg_page(s
), s
->offset
,
152 static void arc_dma_sync_single_for_cpu(struct device
*dev
,
153 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
155 _dma_cache_sync(plat_dma_to_phys(dev
, dma_handle
), size
, DMA_FROM_DEVICE
);
158 static void arc_dma_sync_single_for_device(struct device
*dev
,
159 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
161 _dma_cache_sync(plat_dma_to_phys(dev
, dma_handle
), size
, DMA_TO_DEVICE
);
164 static void arc_dma_sync_sg_for_cpu(struct device
*dev
,
165 struct scatterlist
*sglist
, int nelems
,
166 enum dma_data_direction dir
)
169 struct scatterlist
*sg
;
171 for_each_sg(sglist
, sg
, nelems
, i
)
172 _dma_cache_sync(sg_phys(sg
), sg
->length
, dir
);
175 static void arc_dma_sync_sg_for_device(struct device
*dev
,
176 struct scatterlist
*sglist
, int nelems
,
177 enum dma_data_direction dir
)
180 struct scatterlist
*sg
;
182 for_each_sg(sglist
, sg
, nelems
, i
)
183 _dma_cache_sync(sg_phys(sg
), sg
->length
, dir
);
186 static int arc_dma_supported(struct device
*dev
, u64 dma_mask
)
188 /* Support 32 bit DMA mask exclusively */
189 return dma_mask
== DMA_BIT_MASK(32);
192 struct dma_map_ops arc_dma_ops
= {
193 .alloc
= arc_dma_alloc
,
194 .free
= arc_dma_free
,
195 .map_page
= arc_dma_map_page
,
196 .map_sg
= arc_dma_map_sg
,
197 .sync_single_for_device
= arc_dma_sync_single_for_device
,
198 .sync_single_for_cpu
= arc_dma_sync_single_for_cpu
,
199 .sync_sg_for_cpu
= arc_dma_sync_sg_for_cpu
,
200 .sync_sg_for_device
= arc_dma_sync_sg_for_device
,
201 .dma_supported
= arc_dma_supported
,
203 EXPORT_SYMBOL(arc_dma_ops
);