1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-debug.h>
10 #include <asm/memory.h>
13 #include <asm/xen/hypervisor.h>
15 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
16 extern struct dma_map_ops arm_dma_ops
;
17 extern struct dma_map_ops arm_coherent_dma_ops
;
19 static inline struct dma_map_ops
*__generic_dma_ops(struct device
*dev
)
21 if (dev
&& dev
->archdata
.dma_ops
)
22 return dev
->archdata
.dma_ops
;
26 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
28 if (xen_initial_domain())
31 return __generic_dma_ops(dev
);
34 static inline void set_dma_ops(struct device
*dev
, struct dma_map_ops
*ops
)
37 dev
->archdata
.dma_ops
= ops
;
40 #define HAVE_ARCH_DMA_SUPPORTED 1
41 extern int dma_supported(struct device
*dev
, u64 mask
);
43 #ifdef __arch_page_to_dma
44 #error Please update to __arch_pfn_to_dma
48 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
49 * functions used internally by the DMA-mapping API to provide DMA
50 * addresses. They must not be used by drivers.
52 #ifndef __arch_pfn_to_dma
53 static inline dma_addr_t
pfn_to_dma(struct device
*dev
, unsigned long pfn
)
56 pfn
-= dev
->dma_pfn_offset
;
57 return (dma_addr_t
)__pfn_to_bus(pfn
);
60 static inline unsigned long dma_to_pfn(struct device
*dev
, dma_addr_t addr
)
62 unsigned long pfn
= __bus_to_pfn(addr
);
65 pfn
+= dev
->dma_pfn_offset
;
70 static inline void *dma_to_virt(struct device
*dev
, dma_addr_t addr
)
73 unsigned long pfn
= dma_to_pfn(dev
, addr
);
75 return phys_to_virt(__pfn_to_phys(pfn
));
78 return (void *)__bus_to_virt((unsigned long)addr
);
81 static inline dma_addr_t
virt_to_dma(struct device
*dev
, void *addr
)
84 return pfn_to_dma(dev
, virt_to_pfn(addr
));
86 return (dma_addr_t
)__virt_to_bus((unsigned long)(addr
));
90 static inline dma_addr_t
pfn_to_dma(struct device
*dev
, unsigned long pfn
)
92 return __arch_pfn_to_dma(dev
, pfn
);
95 static inline unsigned long dma_to_pfn(struct device
*dev
, dma_addr_t addr
)
97 return __arch_dma_to_pfn(dev
, addr
);
100 static inline void *dma_to_virt(struct device
*dev
, dma_addr_t addr
)
102 return __arch_dma_to_virt(dev
, addr
);
105 static inline dma_addr_t
virt_to_dma(struct device
*dev
, void *addr
)
107 return __arch_virt_to_dma(dev
, addr
);
111 /* The ARM override for dma_max_pfn() */
112 static inline unsigned long dma_max_pfn(struct device
*dev
)
114 return dma_to_pfn(dev
, *dev
->dma_mask
);
116 #define dma_max_pfn(dev) dma_max_pfn(dev)
118 #define arch_setup_dma_ops arch_setup_dma_ops
119 extern void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
120 const struct iommu_ops
*iommu
, bool coherent
);
122 #define arch_teardown_dma_ops arch_teardown_dma_ops
123 extern void arch_teardown_dma_ops(struct device
*dev
);
125 /* do not use this function in a driver */
126 static inline bool is_device_dma_coherent(struct device
*dev
)
128 return dev
->archdata
.dma_coherent
;
131 static inline dma_addr_t
phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
133 unsigned int offset
= paddr
& ~PAGE_MASK
;
134 return pfn_to_dma(dev
, __phys_to_pfn(paddr
)) + offset
;
137 static inline phys_addr_t
dma_to_phys(struct device
*dev
, dma_addr_t dev_addr
)
139 unsigned int offset
= dev_addr
& ~PAGE_MASK
;
140 return __pfn_to_phys(dma_to_pfn(dev
, dev_addr
)) + offset
;
143 static inline bool dma_capable(struct device
*dev
, dma_addr_t addr
, size_t size
)
150 mask
= *dev
->dma_mask
;
152 limit
= (mask
+ 1) & ~mask
;
153 if (limit
&& size
> limit
)
156 if ((addr
| (addr
+ size
- 1)) & ~mask
)
162 static inline void dma_mark_clean(void *addr
, size_t size
) { }
165 * arm_dma_alloc - allocate consistent memory for DMA
166 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
167 * @size: required memory size
168 * @handle: bus-specific DMA address
169 * @attrs: optinal attributes that specific mapping properties
171 * Allocate some memory for a device for performing DMA. This function
172 * allocates pages, and will return the CPU-viewed address, and sets @handle
173 * to be the device-viewed address.
175 extern void *arm_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
176 gfp_t gfp
, unsigned long attrs
);
179 * arm_dma_free - free memory allocated by arm_dma_alloc
180 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
181 * @size: size of memory originally requested in dma_alloc_coherent
182 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
183 * @handle: device-view address returned from dma_alloc_coherent
184 * @attrs: optinal attributes that specific mapping properties
186 * Free (and unmap) a DMA buffer previously allocated by
189 * References to memory and mappings associated with cpu_addr/handle
190 * during and after this call executing are illegal.
192 extern void arm_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
193 dma_addr_t handle
, unsigned long attrs
);
196 * arm_dma_mmap - map a coherent DMA allocation into user space
197 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
198 * @vma: vm_area_struct describing requested user mapping
199 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
200 * @handle: device-view address returned from dma_alloc_coherent
201 * @size: size of memory originally requested in dma_alloc_coherent
202 * @attrs: optinal attributes that specific mapping properties
204 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
205 * into user space. The coherent DMA buffer must not be freed by the
206 * driver until the user space mapping has been released.
208 extern int arm_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
209 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
210 unsigned long attrs
);
213 * This can be called during early boot to increase the size of the atomic
214 * coherent DMA pool above the default value of 256KiB. It must be called
215 * before postcore_initcall.
217 extern void __init
init_dma_coherent_pool_size(unsigned long size
);
220 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
221 * and utilize bounce buffers as needed to work around limited DMA windows.
223 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
224 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
225 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
227 * The following are helper functions used by the dmabounce subystem
232 * dmabounce_register_dev
234 * @dev: valid struct device pointer
235 * @small_buf_size: size of buffers to use with small buffer pool
236 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
237 * @needs_bounce_fn: called to determine whether buffer needs bouncing
239 * This function should be called by low-level platform code to register
240 * a device as requireing DMA buffer bouncing. The function will allocate
241 * appropriate DMA pools for the device.
243 extern int dmabounce_register_dev(struct device
*, unsigned long,
244 unsigned long, int (*)(struct device
*, dma_addr_t
, size_t));
247 * dmabounce_unregister_dev
249 * @dev: valid struct device pointer
251 * This function should be called by low-level platform code when device
252 * that was previously registered with dmabounce_register_dev is removed
256 extern void dmabounce_unregister_dev(struct device
*);
261 * The scatter list versions of the above methods.
263 extern int arm_dma_map_sg(struct device
*, struct scatterlist
*, int,
264 enum dma_data_direction
, unsigned long attrs
);
265 extern void arm_dma_unmap_sg(struct device
*, struct scatterlist
*, int,
266 enum dma_data_direction
, unsigned long attrs
);
267 extern void arm_dma_sync_sg_for_cpu(struct device
*, struct scatterlist
*, int,
268 enum dma_data_direction
);
269 extern void arm_dma_sync_sg_for_device(struct device
*, struct scatterlist
*, int,
270 enum dma_data_direction
);
271 extern int arm_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
272 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
273 unsigned long attrs
);
275 #endif /* __KERNEL__ */