Linux 4.15.10
[linux/fpc-iii.git] / arch / microblaze / kernel / dma.c
blob990bf9ea0ec6b547c377105a4721131df3d205c0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009-2010 PetaLogix
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
6 * Provide default implementations of the DMA mapping callbacks for
7 * directly mapped busses.
8 */
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/gfp.h>
13 #include <linux/dma-debug.h>
14 #include <linux/export.h>
15 #include <linux/bug.h>
16 #include <asm/cacheflush.h>
18 #define NOT_COHERENT_CACHE
20 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
21 dma_addr_t *dma_handle, gfp_t flag,
22 unsigned long attrs)
24 #ifdef NOT_COHERENT_CACHE
25 return consistent_alloc(flag, size, dma_handle);
26 #else
27 void *ret;
28 struct page *page;
29 int node = dev_to_node(dev);
31 /* ignore region specifiers */
32 flag &= ~(__GFP_HIGHMEM);
34 page = alloc_pages_node(node, flag, get_order(size));
35 if (page == NULL)
36 return NULL;
37 ret = page_address(page);
38 memset(ret, 0, size);
39 *dma_handle = virt_to_phys(ret);
41 return ret;
42 #endif
45 static void dma_direct_free_coherent(struct device *dev, size_t size,
46 void *vaddr, dma_addr_t dma_handle,
47 unsigned long attrs)
49 #ifdef NOT_COHERENT_CACHE
50 consistent_free(size, vaddr);
51 #else
52 free_pages((unsigned long)vaddr, get_order(size));
53 #endif
56 static inline void __dma_sync(unsigned long paddr,
57 size_t size, enum dma_data_direction direction)
59 switch (direction) {
60 case DMA_TO_DEVICE:
61 case DMA_BIDIRECTIONAL:
62 flush_dcache_range(paddr, paddr + size);
63 break;
64 case DMA_FROM_DEVICE:
65 invalidate_dcache_range(paddr, paddr + size);
66 break;
67 default:
68 BUG();
72 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
73 int nents, enum dma_data_direction direction,
74 unsigned long attrs)
76 struct scatterlist *sg;
77 int i;
79 /* FIXME this part of code is untested */
80 for_each_sg(sgl, sg, nents, i) {
81 sg->dma_address = sg_phys(sg);
83 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
84 continue;
86 __dma_sync(sg_phys(sg), sg->length, direction);
89 return nents;
92 static int dma_direct_dma_supported(struct device *dev, u64 mask)
94 return 1;
97 static inline dma_addr_t dma_direct_map_page(struct device *dev,
98 struct page *page,
99 unsigned long offset,
100 size_t size,
101 enum dma_data_direction direction,
102 unsigned long attrs)
104 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
105 __dma_sync(page_to_phys(page) + offset, size, direction);
106 return page_to_phys(page) + offset;
109 static inline void dma_direct_unmap_page(struct device *dev,
110 dma_addr_t dma_address,
111 size_t size,
112 enum dma_data_direction direction,
113 unsigned long attrs)
115 /* There is not necessary to do cache cleanup
117 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
118 * dma_address is physical address
120 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
121 __dma_sync(dma_address, size, direction);
124 static inline void
125 dma_direct_sync_single_for_cpu(struct device *dev,
126 dma_addr_t dma_handle, size_t size,
127 enum dma_data_direction direction)
130 * It's pointless to flush the cache as the memory segment
131 * is given to the CPU
134 if (direction == DMA_FROM_DEVICE)
135 __dma_sync(dma_handle, size, direction);
138 static inline void
139 dma_direct_sync_single_for_device(struct device *dev,
140 dma_addr_t dma_handle, size_t size,
141 enum dma_data_direction direction)
144 * It's pointless to invalidate the cache if the device isn't
145 * supposed to write to the relevant region
148 if (direction == DMA_TO_DEVICE)
149 __dma_sync(dma_handle, size, direction);
152 static inline void
153 dma_direct_sync_sg_for_cpu(struct device *dev,
154 struct scatterlist *sgl, int nents,
155 enum dma_data_direction direction)
157 struct scatterlist *sg;
158 int i;
160 /* FIXME this part of code is untested */
161 if (direction == DMA_FROM_DEVICE)
162 for_each_sg(sgl, sg, nents, i)
163 __dma_sync(sg->dma_address, sg->length, direction);
166 static inline void
167 dma_direct_sync_sg_for_device(struct device *dev,
168 struct scatterlist *sgl, int nents,
169 enum dma_data_direction direction)
171 struct scatterlist *sg;
172 int i;
174 /* FIXME this part of code is untested */
175 if (direction == DMA_TO_DEVICE)
176 for_each_sg(sgl, sg, nents, i)
177 __dma_sync(sg->dma_address, sg->length, direction);
180 static
181 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
182 void *cpu_addr, dma_addr_t handle, size_t size,
183 unsigned long attrs)
185 #ifdef CONFIG_MMU
186 unsigned long user_count = vma_pages(vma);
187 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
188 unsigned long off = vma->vm_pgoff;
189 unsigned long pfn;
191 if (off >= count || user_count > (count - off))
192 return -ENXIO;
194 #ifdef NOT_COHERENT_CACHE
195 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
196 pfn = consistent_virt_to_pfn(cpu_addr);
197 #else
198 pfn = virt_to_pfn(cpu_addr);
199 #endif
200 return remap_pfn_range(vma, vma->vm_start, pfn + off,
201 vma->vm_end - vma->vm_start, vma->vm_page_prot);
202 #else
203 return -ENXIO;
204 #endif
207 const struct dma_map_ops dma_direct_ops = {
208 .alloc = dma_direct_alloc_coherent,
209 .free = dma_direct_free_coherent,
210 .mmap = dma_direct_mmap_coherent,
211 .map_sg = dma_direct_map_sg,
212 .dma_supported = dma_direct_dma_supported,
213 .map_page = dma_direct_map_page,
214 .unmap_page = dma_direct_unmap_page,
215 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
216 .sync_single_for_device = dma_direct_sync_single_for_device,
217 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
218 .sync_sg_for_device = dma_direct_sync_sg_for_device,
220 EXPORT_SYMBOL(dma_direct_ops);
222 /* Number of entries preallocated for DMA-API debugging */
223 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
225 static int __init dma_init(void)
227 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
229 return 0;
231 fs_initcall(dma_init);