2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
17 * Generic direct DMA implementation
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
25 static unsigned long get_dma_direct_offset(struct device
*dev
)
28 return (unsigned long)dev
->archdata
.dma_data
;
30 return PCI_DRAM_OFFSET
; /* FIXME Not sure if is correct */
33 #define NOT_COHERENT_CACHE
35 static void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
36 dma_addr_t
*dma_handle
, gfp_t flag
)
38 #ifdef NOT_COHERENT_CACHE
39 return consistent_alloc(flag
, size
, dma_handle
);
43 int node
= dev_to_node(dev
);
45 /* ignore region specifiers */
46 flag
&= ~(__GFP_HIGHMEM
);
48 page
= alloc_pages_node(node
, flag
, get_order(size
));
51 ret
= page_address(page
);
53 *dma_handle
= virt_to_phys(ret
) + get_dma_direct_offset(dev
);
59 static void dma_direct_free_coherent(struct device
*dev
, size_t size
,
60 void *vaddr
, dma_addr_t dma_handle
)
62 #ifdef NOT_COHERENT_CACHE
63 consistent_free(size
, vaddr
);
65 free_pages((unsigned long)vaddr
, get_order(size
));
69 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
70 int nents
, enum dma_data_direction direction
,
71 struct dma_attrs
*attrs
)
73 struct scatterlist
*sg
;
76 /* FIXME this part of code is untested */
77 for_each_sg(sgl
, sg
, nents
, i
) {
78 sg
->dma_address
= sg_phys(sg
) + get_dma_direct_offset(dev
);
79 __dma_sync(page_to_phys(sg_page(sg
)) + sg
->offset
,
80 sg
->length
, direction
);
86 static void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
87 int nents
, enum dma_data_direction direction
,
88 struct dma_attrs
*attrs
)
92 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
97 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
101 enum dma_data_direction direction
,
102 struct dma_attrs
*attrs
)
104 __dma_sync(page_to_phys(page
) + offset
, size
, direction
);
105 return page_to_phys(page
) + offset
+ get_dma_direct_offset(dev
);
108 static inline void dma_direct_unmap_page(struct device
*dev
,
109 dma_addr_t dma_address
,
111 enum dma_data_direction direction
,
112 struct dma_attrs
*attrs
)
114 /* There is not necessary to do cache cleanup
116 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
117 * dma_address is physical address
119 __dma_sync(dma_address
, size
, direction
);
123 dma_direct_sync_single_for_cpu(struct device
*dev
,
124 dma_addr_t dma_handle
, size_t size
,
125 enum dma_data_direction direction
)
128 * It's pointless to flush the cache as the memory segment
129 * is given to the CPU
132 if (direction
== DMA_FROM_DEVICE
)
133 __dma_sync(dma_handle
, size
, direction
);
137 dma_direct_sync_single_for_device(struct device
*dev
,
138 dma_addr_t dma_handle
, size_t size
,
139 enum dma_data_direction direction
)
142 * It's pointless to invalidate the cache if the device isn't
143 * supposed to write to the relevant region
146 if (direction
== DMA_TO_DEVICE
)
147 __dma_sync(dma_handle
, size
, direction
);
151 dma_direct_sync_sg_for_cpu(struct device
*dev
,
152 struct scatterlist
*sgl
, int nents
,
153 enum dma_data_direction direction
)
155 struct scatterlist
*sg
;
158 /* FIXME this part of code is untested */
159 if (direction
== DMA_FROM_DEVICE
)
160 for_each_sg(sgl
, sg
, nents
, i
)
161 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
165 dma_direct_sync_sg_for_device(struct device
*dev
,
166 struct scatterlist
*sgl
, int nents
,
167 enum dma_data_direction direction
)
169 struct scatterlist
*sg
;
172 /* FIXME this part of code is untested */
173 if (direction
== DMA_TO_DEVICE
)
174 for_each_sg(sgl
, sg
, nents
, i
)
175 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
178 struct dma_map_ops dma_direct_ops
= {
179 .alloc_coherent
= dma_direct_alloc_coherent
,
180 .free_coherent
= dma_direct_free_coherent
,
181 .map_sg
= dma_direct_map_sg
,
182 .unmap_sg
= dma_direct_unmap_sg
,
183 .dma_supported
= dma_direct_dma_supported
,
184 .map_page
= dma_direct_map_page
,
185 .unmap_page
= dma_direct_unmap_page
,
186 .sync_single_for_cpu
= dma_direct_sync_single_for_cpu
,
187 .sync_single_for_device
= dma_direct_sync_single_for_device
,
188 .sync_sg_for_cpu
= dma_direct_sync_sg_for_cpu
,
189 .sync_sg_for_device
= dma_direct_sync_sg_for_device
,
191 EXPORT_SYMBOL(dma_direct_ops
);
193 /* Number of entries preallocated for DMA-API debugging */
194 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
196 static int __init
dma_init(void)
198 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
202 fs_initcall(dma_init
);