fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-sh64 / dma-mapping.h
blobde4309960207779eb216d7d06ecc4cb9a6d32778
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/mm.h>
5 #include <asm/scatterlist.h>
6 #include <asm/io.h>
8 struct pci_dev;
9 extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
10 dma_addr_t *dma_handle);
11 extern void consistent_free(struct pci_dev *hwdev, size_t size,
12 void *vaddr, dma_addr_t dma_handle);
14 #define dma_supported(dev, mask) (1)
16 static inline int dma_set_mask(struct device *dev, u64 mask)
18 if (!dev->dma_mask || !dma_supported(dev, mask))
19 return -EIO;
21 *dev->dma_mask = mask;
23 return 0;
26 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag)
29 return consistent_alloc(NULL, size, dma_handle);
32 static inline void dma_free_coherent(struct device *dev, size_t size,
33 void *vaddr, dma_addr_t dma_handle)
35 consistent_free(NULL, size, vaddr, dma_handle);
38 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40 #define dma_is_consistent(d, h) (1)
42 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
43 enum dma_data_direction dir)
45 dma_cache_wback_inv((unsigned long)vaddr, size);
48 static inline dma_addr_t dma_map_single(struct device *dev,
49 void *ptr, size_t size,
50 enum dma_data_direction dir)
52 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
53 if (dev->bus == &pci_bus_type)
54 return virt_to_phys(ptr);
55 #endif
56 dma_cache_sync(dev, ptr, size, dir);
58 return virt_to_phys(ptr);
61 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
63 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir)
66 int i;
68 for (i = 0; i < nents; i++) {
69 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
70 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
71 sg[i].length, dir);
72 #endif
73 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
76 return nents;
79 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
81 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
82 unsigned long offset, size_t size,
83 enum dma_data_direction dir)
85 return dma_map_single(dev, page_address(page) + offset, size, dir);
88 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
89 size_t size, enum dma_data_direction dir)
91 dma_unmap_single(dev, dma_address, size, dir);
94 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
95 size_t size, enum dma_data_direction dir)
97 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
98 if (dev->bus == &pci_bus_type)
99 return;
100 #endif
101 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
104 static inline void dma_sync_single_range(struct device *dev,
105 dma_addr_t dma_handle,
106 unsigned long offset, size_t size,
107 enum dma_data_direction dir)
109 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
110 if (dev->bus == &pci_bus_type)
111 return;
112 #endif
113 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
116 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
117 int nelems, enum dma_data_direction dir)
119 int i;
121 for (i = 0; i < nelems; i++) {
122 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
123 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
124 sg[i].length, dir);
125 #endif
126 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
130 static inline void dma_sync_single_for_cpu(struct device *dev,
131 dma_addr_t dma_handle, size_t size,
132 enum dma_data_direction dir)
134 dma_sync_single(dev, dma_handle, size, dir);
137 static inline void dma_sync_single_for_device(struct device *dev,
138 dma_addr_t dma_handle, size_t size,
139 enum dma_data_direction dir)
141 dma_sync_single(dev, dma_handle, size, dir);
144 static inline void dma_sync_single_range_for_cpu(struct device *dev,
145 dma_addr_t dma_handle,
146 unsigned long offset,
147 size_t size,
148 enum dma_data_direction direction)
150 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
153 static inline void dma_sync_single_range_for_device(struct device *dev,
154 dma_addr_t dma_handle,
155 unsigned long offset,
156 size_t size,
157 enum dma_data_direction direction)
159 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
162 static inline void dma_sync_sg_for_cpu(struct device *dev,
163 struct scatterlist *sg, int nelems,
164 enum dma_data_direction dir)
166 dma_sync_sg(dev, sg, nelems, dir);
169 static inline void dma_sync_sg_for_device(struct device *dev,
170 struct scatterlist *sg, int nelems,
171 enum dma_data_direction dir)
173 dma_sync_sg(dev, sg, nelems, dir);
176 static inline int dma_get_cache_alignment(void)
179 * Each processor family will define its own L1_CACHE_SHIFT,
180 * L1_CACHE_BYTES wraps to this, so this is always safe.
182 return L1_CACHE_BYTES;
185 static inline int dma_mapping_error(dma_addr_t dma_addr)
187 return dma_addr == 0;
190 #endif /* __ASM_SH_DMA_MAPPING_H */