1 /* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
3 * Implements the generic device dma API via the existing pci_ one
4 * for unconverted architectures
7 #ifndef _ASM_GENERIC_DMA_MAPPING_H
8 #define _ASM_GENERIC_DMA_MAPPING_H
10 #include <linux/config.h>
14 /* we implement the API below in terms of the existing PCI one,
16 #include <linux/pci.h>
17 /* need struct page definitions */
21 dma_supported(struct device
*dev
, u64 mask
)
23 BUG_ON(dev
->bus
!= &pci_bus_type
);
25 return pci_dma_supported(to_pci_dev(dev
), mask
);
29 dma_set_mask(struct device
*dev
, u64 dma_mask
)
31 BUG_ON(dev
->bus
!= &pci_bus_type
);
33 return pci_set_dma_mask(to_pci_dev(dev
), dma_mask
);
37 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
40 BUG_ON(dev
->bus
!= &pci_bus_type
);
42 return pci_alloc_consistent(to_pci_dev(dev
), size
, dma_handle
);
46 dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
47 dma_addr_t dma_handle
)
49 BUG_ON(dev
->bus
!= &pci_bus_type
);
51 pci_free_consistent(to_pci_dev(dev
), size
, cpu_addr
, dma_handle
);
54 static inline dma_addr_t
55 dma_map_single(struct device
*dev
, void *cpu_addr
, size_t size
,
56 enum dma_data_direction direction
)
58 BUG_ON(dev
->bus
!= &pci_bus_type
);
60 return pci_map_single(to_pci_dev(dev
), cpu_addr
, size
, (int)direction
);
64 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
65 enum dma_data_direction direction
)
67 BUG_ON(dev
->bus
!= &pci_bus_type
);
69 pci_unmap_single(to_pci_dev(dev
), dma_addr
, size
, (int)direction
);
72 static inline dma_addr_t
73 dma_map_page(struct device
*dev
, struct page
*page
,
74 unsigned long offset
, size_t size
,
75 enum dma_data_direction direction
)
77 BUG_ON(dev
->bus
!= &pci_bus_type
);
79 return pci_map_page(to_pci_dev(dev
), page
, offset
, size
, (int)direction
);
83 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
84 enum dma_data_direction direction
)
86 BUG_ON(dev
->bus
!= &pci_bus_type
);
88 pci_unmap_page(to_pci_dev(dev
), dma_address
, size
, (int)direction
);
92 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
93 enum dma_data_direction direction
)
95 BUG_ON(dev
->bus
!= &pci_bus_type
);
97 return pci_map_sg(to_pci_dev(dev
), sg
, nents
, (int)direction
);
101 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
102 enum dma_data_direction direction
)
104 BUG_ON(dev
->bus
!= &pci_bus_type
);
106 pci_unmap_sg(to_pci_dev(dev
), sg
, nhwentries
, (int)direction
);
110 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
111 enum dma_data_direction direction
)
113 BUG_ON(dev
->bus
!= &pci_bus_type
);
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev
), dma_handle
,
116 size
, (int)direction
);
120 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
121 enum dma_data_direction direction
)
123 BUG_ON(dev
->bus
!= &pci_bus_type
);
125 pci_dma_sync_single_for_device(to_pci_dev(dev
), dma_handle
,
126 size
, (int)direction
);
130 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
131 enum dma_data_direction direction
)
133 BUG_ON(dev
->bus
!= &pci_bus_type
);
135 pci_dma_sync_sg_for_cpu(to_pci_dev(dev
), sg
, nelems
, (int)direction
);
139 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
140 enum dma_data_direction direction
)
142 BUG_ON(dev
->bus
!= &pci_bus_type
);
144 pci_dma_sync_sg_for_device(to_pci_dev(dev
), sg
, nelems
, (int)direction
);
148 dma_mapping_error(dma_addr_t dma_addr
)
150 return pci_dma_mapping_error(dma_addr
);
157 dma_supported(struct device
*dev
, u64 mask
)
163 dma_set_mask(struct device
*dev
, u64 dma_mask
)
170 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
178 dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
,
179 dma_addr_t dma_handle
)
184 static inline dma_addr_t
185 dma_map_single(struct device
*dev
, void *cpu_addr
, size_t size
,
186 enum dma_data_direction direction
)
193 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
194 enum dma_data_direction direction
)
199 static inline dma_addr_t
200 dma_map_page(struct device
*dev
, struct page
*page
,
201 unsigned long offset
, size_t size
,
202 enum dma_data_direction direction
)
209 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
210 enum dma_data_direction direction
)
216 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
217 enum dma_data_direction direction
)
224 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
225 enum dma_data_direction direction
)
231 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
232 enum dma_data_direction direction
)
238 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
239 enum dma_data_direction direction
)
245 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
246 enum dma_data_direction direction
)
252 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
253 enum dma_data_direction direction
)
259 dma_error(dma_addr_t dma_addr
)
266 /* Now for the API extensions over the pci_ one */
268 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
269 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
270 #define dma_is_consistent(d) (1)
273 dma_get_cache_alignment(void)
275 /* no easy way to get cache size on all processors, so return
276 * the maximum possible, to be safe */
277 return (1 << L1_CACHE_SHIFT_MAX
);
281 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
282 unsigned long offset
, size_t size
,
283 enum dma_data_direction direction
)
285 /* just sync everything, that's all the pci API can do */
286 dma_sync_single_for_cpu(dev
, dma_handle
, offset
+size
, direction
);
290 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
291 unsigned long offset
, size_t size
,
292 enum dma_data_direction direction
)
294 /* just sync everything, that's all the pci API can do */
295 dma_sync_single_for_device(dev
, dma_handle
, offset
+size
, direction
);
299 dma_cache_sync(void *vaddr
, size_t size
,
300 enum dma_data_direction direction
)
302 /* could define this in terms of the dma_cache ... operations,
303 * but if you get this on a platform, you should convert the platform
304 * to using the generic device DMA API */