Linux 4.18.8
[linux/fpc-iii.git] / arch / microblaze / kernel / dma.c
blob3145e7dc8ab191fb7fde8692fcd0eaeef0265ee3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009-2010 PetaLogix
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
6 * Provide default implementations of the DMA mapping callbacks for
7 * directly mapped busses.
8 */
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/gfp.h>
13 #include <linux/dma-debug.h>
14 #include <linux/export.h>
15 #include <linux/bug.h>
16 #include <asm/cacheflush.h>
18 static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag,
20 unsigned long attrs)
22 return consistent_alloc(flag, size, dma_handle);
25 static void dma_nommu_free_coherent(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle,
27 unsigned long attrs)
29 consistent_free(size, vaddr);
32 static inline void __dma_sync(unsigned long paddr,
33 size_t size, enum dma_data_direction direction)
35 switch (direction) {
36 case DMA_TO_DEVICE:
37 case DMA_BIDIRECTIONAL:
38 flush_dcache_range(paddr, paddr + size);
39 break;
40 case DMA_FROM_DEVICE:
41 invalidate_dcache_range(paddr, paddr + size);
42 break;
43 default:
44 BUG();
48 static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
49 int nents, enum dma_data_direction direction,
50 unsigned long attrs)
52 struct scatterlist *sg;
53 int i;
55 /* FIXME this part of code is untested */
56 for_each_sg(sgl, sg, nents, i) {
57 sg->dma_address = sg_phys(sg);
59 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
60 continue;
62 __dma_sync(sg_phys(sg), sg->length, direction);
65 return nents;
68 static inline dma_addr_t dma_nommu_map_page(struct device *dev,
69 struct page *page,
70 unsigned long offset,
71 size_t size,
72 enum dma_data_direction direction,
73 unsigned long attrs)
75 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
76 __dma_sync(page_to_phys(page) + offset, size, direction);
77 return page_to_phys(page) + offset;
80 static inline void dma_nommu_unmap_page(struct device *dev,
81 dma_addr_t dma_address,
82 size_t size,
83 enum dma_data_direction direction,
84 unsigned long attrs)
86 /* There is not necessary to do cache cleanup
88 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
89 * dma_address is physical address
91 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
92 __dma_sync(dma_address, size, direction);
95 static inline void
96 dma_nommu_sync_single_for_cpu(struct device *dev,
97 dma_addr_t dma_handle, size_t size,
98 enum dma_data_direction direction)
101 * It's pointless to flush the cache as the memory segment
102 * is given to the CPU
105 if (direction == DMA_FROM_DEVICE)
106 __dma_sync(dma_handle, size, direction);
109 static inline void
110 dma_nommu_sync_single_for_device(struct device *dev,
111 dma_addr_t dma_handle, size_t size,
112 enum dma_data_direction direction)
115 * It's pointless to invalidate the cache if the device isn't
116 * supposed to write to the relevant region
119 if (direction == DMA_TO_DEVICE)
120 __dma_sync(dma_handle, size, direction);
123 static inline void
124 dma_nommu_sync_sg_for_cpu(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
128 struct scatterlist *sg;
129 int i;
131 /* FIXME this part of code is untested */
132 if (direction == DMA_FROM_DEVICE)
133 for_each_sg(sgl, sg, nents, i)
134 __dma_sync(sg->dma_address, sg->length, direction);
137 static inline void
138 dma_nommu_sync_sg_for_device(struct device *dev,
139 struct scatterlist *sgl, int nents,
140 enum dma_data_direction direction)
142 struct scatterlist *sg;
143 int i;
145 /* FIXME this part of code is untested */
146 if (direction == DMA_TO_DEVICE)
147 for_each_sg(sgl, sg, nents, i)
148 __dma_sync(sg->dma_address, sg->length, direction);
151 static
152 int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
153 void *cpu_addr, dma_addr_t handle, size_t size,
154 unsigned long attrs)
156 #ifdef CONFIG_MMU
157 unsigned long user_count = vma_pages(vma);
158 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
159 unsigned long off = vma->vm_pgoff;
160 unsigned long pfn;
162 if (off >= count || user_count > (count - off))
163 return -ENXIO;
165 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
166 pfn = consistent_virt_to_pfn(cpu_addr);
167 return remap_pfn_range(vma, vma->vm_start, pfn + off,
168 vma->vm_end - vma->vm_start, vma->vm_page_prot);
169 #else
170 return -ENXIO;
171 #endif
174 const struct dma_map_ops dma_nommu_ops = {
175 .alloc = dma_nommu_alloc_coherent,
176 .free = dma_nommu_free_coherent,
177 .mmap = dma_nommu_mmap_coherent,
178 .map_sg = dma_nommu_map_sg,
179 .map_page = dma_nommu_map_page,
180 .unmap_page = dma_nommu_unmap_page,
181 .sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
182 .sync_single_for_device = dma_nommu_sync_single_for_device,
183 .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
184 .sync_sg_for_device = dma_nommu_sync_sg_for_device,
186 EXPORT_SYMBOL(dma_nommu_ops);