2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <linux/bug.h>
16 #define NOT_COHERENT_CACHE
18 static void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
19 dma_addr_t
*dma_handle
, gfp_t flag
,
20 struct dma_attrs
*attrs
)
22 #ifdef NOT_COHERENT_CACHE
23 return consistent_alloc(flag
, size
, dma_handle
);
27 int node
= dev_to_node(dev
);
29 /* ignore region specifiers */
30 flag
&= ~(__GFP_HIGHMEM
);
32 page
= alloc_pages_node(node
, flag
, get_order(size
));
35 ret
= page_address(page
);
37 *dma_handle
= virt_to_phys(ret
);
43 static void dma_direct_free_coherent(struct device
*dev
, size_t size
,
44 void *vaddr
, dma_addr_t dma_handle
,
45 struct dma_attrs
*attrs
)
47 #ifdef NOT_COHERENT_CACHE
48 consistent_free(size
, vaddr
);
50 free_pages((unsigned long)vaddr
, get_order(size
));
54 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
55 int nents
, enum dma_data_direction direction
,
56 struct dma_attrs
*attrs
)
58 struct scatterlist
*sg
;
61 /* FIXME this part of code is untested */
62 for_each_sg(sgl
, sg
, nents
, i
) {
63 sg
->dma_address
= sg_phys(sg
);
64 __dma_sync(page_to_phys(sg_page(sg
)) + sg
->offset
,
65 sg
->length
, direction
);
71 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
76 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
80 enum dma_data_direction direction
,
81 struct dma_attrs
*attrs
)
83 __dma_sync(page_to_phys(page
) + offset
, size
, direction
);
84 return page_to_phys(page
) + offset
;
87 static inline void dma_direct_unmap_page(struct device
*dev
,
88 dma_addr_t dma_address
,
90 enum dma_data_direction direction
,
91 struct dma_attrs
*attrs
)
93 /* There is not necessary to do cache cleanup
95 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
96 * dma_address is physical address
98 __dma_sync(dma_address
, size
, direction
);
102 dma_direct_sync_single_for_cpu(struct device
*dev
,
103 dma_addr_t dma_handle
, size_t size
,
104 enum dma_data_direction direction
)
107 * It's pointless to flush the cache as the memory segment
108 * is given to the CPU
111 if (direction
== DMA_FROM_DEVICE
)
112 __dma_sync(dma_handle
, size
, direction
);
116 dma_direct_sync_single_for_device(struct device
*dev
,
117 dma_addr_t dma_handle
, size_t size
,
118 enum dma_data_direction direction
)
121 * It's pointless to invalidate the cache if the device isn't
122 * supposed to write to the relevant region
125 if (direction
== DMA_TO_DEVICE
)
126 __dma_sync(dma_handle
, size
, direction
);
130 dma_direct_sync_sg_for_cpu(struct device
*dev
,
131 struct scatterlist
*sgl
, int nents
,
132 enum dma_data_direction direction
)
134 struct scatterlist
*sg
;
137 /* FIXME this part of code is untested */
138 if (direction
== DMA_FROM_DEVICE
)
139 for_each_sg(sgl
, sg
, nents
, i
)
140 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
144 dma_direct_sync_sg_for_device(struct device
*dev
,
145 struct scatterlist
*sgl
, int nents
,
146 enum dma_data_direction direction
)
148 struct scatterlist
*sg
;
151 /* FIXME this part of code is untested */
152 if (direction
== DMA_TO_DEVICE
)
153 for_each_sg(sgl
, sg
, nents
, i
)
154 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
157 int dma_direct_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
158 void *cpu_addr
, dma_addr_t handle
, size_t size
,
159 struct dma_attrs
*attrs
)
162 unsigned long user_count
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
163 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
164 unsigned long off
= vma
->vm_pgoff
;
167 if (off
>= count
|| user_count
> (count
- off
))
170 #ifdef NOT_COHERENT_CACHE
171 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
172 pfn
= consistent_virt_to_pfn(cpu_addr
);
174 pfn
= virt_to_pfn(cpu_addr
);
176 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
177 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
);
183 struct dma_map_ops dma_direct_ops
= {
184 .alloc
= dma_direct_alloc_coherent
,
185 .free
= dma_direct_free_coherent
,
186 .mmap
= dma_direct_mmap_coherent
,
187 .map_sg
= dma_direct_map_sg
,
188 .dma_supported
= dma_direct_dma_supported
,
189 .map_page
= dma_direct_map_page
,
190 .unmap_page
= dma_direct_unmap_page
,
191 .sync_single_for_cpu
= dma_direct_sync_single_for_cpu
,
192 .sync_single_for_device
= dma_direct_sync_single_for_device
,
193 .sync_sg_for_cpu
= dma_direct_sync_sg_for_cpu
,
194 .sync_sg_for_device
= dma_direct_sync_sg_for_device
,
196 EXPORT_SYMBOL(dma_direct_ops
);
198 /* Number of entries preallocated for DMA-API debugging */
199 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
201 static int __init
dma_init(void)
203 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
207 fs_initcall(dma_init
);