dccp: do not assume DCCP code is non preemptible
[linux/fpc-iii.git] / arch / microblaze / kernel / dma.c
blobbf4dec229437a836ee1829504be7bf603ce73b37
1 /*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <linux/bug.h>
16 #define NOT_COHERENT_CACHE
18 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag,
20 struct dma_attrs *attrs)
22 #ifdef NOT_COHERENT_CACHE
23 return consistent_alloc(flag, size, dma_handle);
24 #else
25 void *ret;
26 struct page *page;
27 int node = dev_to_node(dev);
29 /* ignore region specifiers */
30 flag &= ~(__GFP_HIGHMEM);
32 page = alloc_pages_node(node, flag, get_order(size));
33 if (page == NULL)
34 return NULL;
35 ret = page_address(page);
36 memset(ret, 0, size);
37 *dma_handle = virt_to_phys(ret);
39 return ret;
40 #endif
43 static void dma_direct_free_coherent(struct device *dev, size_t size,
44 void *vaddr, dma_addr_t dma_handle,
45 struct dma_attrs *attrs)
47 #ifdef NOT_COHERENT_CACHE
48 consistent_free(size, vaddr);
49 #else
50 free_pages((unsigned long)vaddr, get_order(size));
51 #endif
54 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
55 int nents, enum dma_data_direction direction,
56 struct dma_attrs *attrs)
58 struct scatterlist *sg;
59 int i;
61 /* FIXME this part of code is untested */
62 for_each_sg(sgl, sg, nents, i) {
63 sg->dma_address = sg_phys(sg);
64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
65 sg->length, direction);
68 return nents;
71 static int dma_direct_dma_supported(struct device *dev, u64 mask)
73 return 1;
76 static inline dma_addr_t dma_direct_map_page(struct device *dev,
77 struct page *page,
78 unsigned long offset,
79 size_t size,
80 enum dma_data_direction direction,
81 struct dma_attrs *attrs)
83 __dma_sync(page_to_phys(page) + offset, size, direction);
84 return page_to_phys(page) + offset;
87 static inline void dma_direct_unmap_page(struct device *dev,
88 dma_addr_t dma_address,
89 size_t size,
90 enum dma_data_direction direction,
91 struct dma_attrs *attrs)
93 /* There is not necessary to do cache cleanup
95 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
96 * dma_address is physical address
98 __dma_sync(dma_address, size, direction);
101 static inline void
102 dma_direct_sync_single_for_cpu(struct device *dev,
103 dma_addr_t dma_handle, size_t size,
104 enum dma_data_direction direction)
107 * It's pointless to flush the cache as the memory segment
108 * is given to the CPU
111 if (direction == DMA_FROM_DEVICE)
112 __dma_sync(dma_handle, size, direction);
115 static inline void
116 dma_direct_sync_single_for_device(struct device *dev,
117 dma_addr_t dma_handle, size_t size,
118 enum dma_data_direction direction)
121 * It's pointless to invalidate the cache if the device isn't
122 * supposed to write to the relevant region
125 if (direction == DMA_TO_DEVICE)
126 __dma_sync(dma_handle, size, direction);
129 static inline void
130 dma_direct_sync_sg_for_cpu(struct device *dev,
131 struct scatterlist *sgl, int nents,
132 enum dma_data_direction direction)
134 struct scatterlist *sg;
135 int i;
137 /* FIXME this part of code is untested */
138 if (direction == DMA_FROM_DEVICE)
139 for_each_sg(sgl, sg, nents, i)
140 __dma_sync(sg->dma_address, sg->length, direction);
143 static inline void
144 dma_direct_sync_sg_for_device(struct device *dev,
145 struct scatterlist *sgl, int nents,
146 enum dma_data_direction direction)
148 struct scatterlist *sg;
149 int i;
151 /* FIXME this part of code is untested */
152 if (direction == DMA_TO_DEVICE)
153 for_each_sg(sgl, sg, nents, i)
154 __dma_sync(sg->dma_address, sg->length, direction);
157 static
158 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
159 void *cpu_addr, dma_addr_t handle, size_t size,
160 struct dma_attrs *attrs)
162 #ifdef CONFIG_MMU
163 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
164 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
165 unsigned long off = vma->vm_pgoff;
166 unsigned long pfn;
168 if (off >= count || user_count > (count - off))
169 return -ENXIO;
171 #ifdef NOT_COHERENT_CACHE
172 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
173 pfn = consistent_virt_to_pfn(cpu_addr);
174 #else
175 pfn = virt_to_pfn(cpu_addr);
176 #endif
177 return remap_pfn_range(vma, vma->vm_start, pfn + off,
178 vma->vm_end - vma->vm_start, vma->vm_page_prot);
179 #else
180 return -ENXIO;
181 #endif
184 struct dma_map_ops dma_direct_ops = {
185 .alloc = dma_direct_alloc_coherent,
186 .free = dma_direct_free_coherent,
187 .mmap = dma_direct_mmap_coherent,
188 .map_sg = dma_direct_map_sg,
189 .dma_supported = dma_direct_dma_supported,
190 .map_page = dma_direct_map_page,
191 .unmap_page = dma_direct_unmap_page,
192 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
193 .sync_single_for_device = dma_direct_sync_single_for_device,
194 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
195 .sync_sg_for_device = dma_direct_sync_sg_for_device,
197 EXPORT_SYMBOL(dma_direct_ops);
199 /* Number of entries preallocated for DMA-API debugging */
200 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
202 static int __init dma_init(void)
204 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
206 return 0;
208 fs_initcall(dma_init);