xtensa: support DMA buffers in high memory
[cris-mirror.git] / arch / xtensa / kernel / pci-dma.c
blob732631ce250fcab756d7e4d8d39ee054cf7a4de9
1 /*
2 * DMA coherent memory allocation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 * Copyright (C) 2015 Cadence Design Systems Inc.
12 * Based on version for i386.
14 * Chris Zankel <chris@zankel.net>
15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
18 #include <linux/dma-contiguous.h>
19 #include <linux/dma-direct.h>
20 #include <linux/gfp.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
27 #include <asm/cacheflush.h>
28 #include <asm/io.h>
30 static void do_cache_op(dma_addr_t dma_handle, size_t size,
31 void (*fn)(unsigned long, unsigned long))
33 unsigned long off = dma_handle & (PAGE_SIZE - 1);
34 unsigned long pfn = PFN_DOWN(dma_handle);
35 struct page *page = pfn_to_page(pfn);
37 if (!PageHighMem(page))
38 fn((unsigned long)bus_to_virt(dma_handle), size);
39 else
40 while (size > 0) {
41 size_t sz = min_t(size_t, size, PAGE_SIZE - off);
42 void *vaddr = kmap_atomic(page);
44 fn((unsigned long)vaddr + off, sz);
45 kunmap_atomic(vaddr);
46 off = 0;
47 ++page;
48 size -= sz;
52 static void xtensa_sync_single_for_cpu(struct device *dev,
53 dma_addr_t dma_handle, size_t size,
54 enum dma_data_direction dir)
56 switch (dir) {
57 case DMA_BIDIRECTIONAL:
58 case DMA_FROM_DEVICE:
59 do_cache_op(dma_handle, size, __invalidate_dcache_range);
60 break;
62 case DMA_NONE:
63 BUG();
64 break;
66 default:
67 break;
71 static void xtensa_sync_single_for_device(struct device *dev,
72 dma_addr_t dma_handle, size_t size,
73 enum dma_data_direction dir)
75 switch (dir) {
76 case DMA_BIDIRECTIONAL:
77 case DMA_TO_DEVICE:
78 if (XCHAL_DCACHE_IS_WRITEBACK)
79 do_cache_op(dma_handle, size, __flush_dcache_range);
80 break;
82 case DMA_NONE:
83 BUG();
84 break;
86 default:
87 break;
91 static void xtensa_sync_sg_for_cpu(struct device *dev,
92 struct scatterlist *sg, int nents,
93 enum dma_data_direction dir)
95 struct scatterlist *s;
96 int i;
98 for_each_sg(sg, s, nents, i) {
99 xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
100 sg_dma_len(s), dir);
104 static void xtensa_sync_sg_for_device(struct device *dev,
105 struct scatterlist *sg, int nents,
106 enum dma_data_direction dir)
108 struct scatterlist *s;
109 int i;
111 for_each_sg(sg, s, nents, i) {
112 xtensa_sync_single_for_device(dev, sg_dma_address(s),
113 sg_dma_len(s), dir);
118 * Note: We assume that the full memory space is always mapped to 'kseg'
119 * Otherwise we have to use page attributes (not implemented).
122 static void *xtensa_dma_alloc(struct device *dev, size_t size,
123 dma_addr_t *handle, gfp_t flag,
124 unsigned long attrs)
126 unsigned long ret;
127 unsigned long uncached;
128 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
129 struct page *page = NULL;
131 /* ignore region speicifiers */
133 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
135 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
136 flag |= GFP_DMA;
138 if (gfpflags_allow_blocking(flag))
139 page = dma_alloc_from_contiguous(dev, count, get_order(size),
140 flag);
142 if (!page)
143 page = alloc_pages(flag, get_order(size));
145 if (!page)
146 return NULL;
148 *handle = phys_to_dma(dev, page_to_phys(page));
150 #ifdef CONFIG_MMU
151 if (PageHighMem(page)) {
152 void *p;
154 p = dma_common_contiguous_remap(page, size, VM_MAP,
155 pgprot_noncached(PAGE_KERNEL),
156 __builtin_return_address(0));
157 if (!p) {
158 if (!dma_release_from_contiguous(dev, page, count))
159 __free_pages(page, get_order(size));
161 return p;
163 #endif
164 ret = (unsigned long)page_address(page);
165 BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
166 ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
168 uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
169 __invalidate_dcache_range(ret, size);
171 return (void *)uncached;
174 static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
175 dma_addr_t dma_handle, unsigned long attrs)
177 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
178 unsigned long addr = (unsigned long)vaddr;
179 struct page *page;
181 if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
182 addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
183 addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
184 page = virt_to_page(addr);
185 } else {
186 #ifdef CONFIG_MMU
187 dma_common_free_remap(vaddr, size, VM_MAP);
188 #endif
189 page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
192 if (!dma_release_from_contiguous(dev, page, count))
193 __free_pages(page, get_order(size));
196 static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
197 unsigned long offset, size_t size,
198 enum dma_data_direction dir,
199 unsigned long attrs)
201 dma_addr_t dma_handle = page_to_phys(page) + offset;
203 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
204 xtensa_sync_single_for_device(dev, dma_handle, size, dir);
206 return dma_handle;
209 static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
210 size_t size, enum dma_data_direction dir,
211 unsigned long attrs)
213 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
214 xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
217 static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
218 int nents, enum dma_data_direction dir,
219 unsigned long attrs)
221 struct scatterlist *s;
222 int i;
224 for_each_sg(sg, s, nents, i) {
225 s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
226 s->length, dir, attrs);
228 return nents;
231 static void xtensa_unmap_sg(struct device *dev,
232 struct scatterlist *sg, int nents,
233 enum dma_data_direction dir,
234 unsigned long attrs)
236 struct scatterlist *s;
237 int i;
239 for_each_sg(sg, s, nents, i) {
240 xtensa_unmap_page(dev, sg_dma_address(s),
241 sg_dma_len(s), dir, attrs);
245 int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
247 return 0;
250 const struct dma_map_ops xtensa_dma_map_ops = {
251 .alloc = xtensa_dma_alloc,
252 .free = xtensa_dma_free,
253 .map_page = xtensa_map_page,
254 .unmap_page = xtensa_unmap_page,
255 .map_sg = xtensa_map_sg,
256 .unmap_sg = xtensa_unmap_sg,
257 .sync_single_for_cpu = xtensa_sync_single_for_cpu,
258 .sync_single_for_device = xtensa_sync_single_for_device,
259 .sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
260 .sync_sg_for_device = xtensa_sync_sg_for_device,
261 .mapping_error = xtensa_dma_mapping_error,
263 EXPORT_SYMBOL(xtensa_dma_map_ops);
265 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
267 static int __init xtensa_dma_init(void)
269 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
270 return 0;
272 fs_initcall(xtensa_dma_init);