1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
5 #include <linux/scatterlist.h>
9 extern void *consistent_alloc(struct pci_dev
*hwdev
, size_t size
,
10 dma_addr_t
*dma_handle
);
11 extern void consistent_free(struct pci_dev
*hwdev
, size_t size
,
12 void *vaddr
, dma_addr_t dma_handle
);
14 #define dma_supported(dev, mask) (1)
16 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
18 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
21 *dev
->dma_mask
= mask
;
26 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
27 dma_addr_t
*dma_handle
, gfp_t flag
)
29 return consistent_alloc(NULL
, size
, dma_handle
);
32 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
33 void *vaddr
, dma_addr_t dma_handle
)
35 consistent_free(NULL
, size
, vaddr
, dma_handle
);
38 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40 #define dma_is_consistent(d, h) (1)
42 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
43 enum dma_data_direction dir
)
45 unsigned long start
= (unsigned long) vaddr
;
46 unsigned long s
= start
& L1_CACHE_ALIGN_MASK
;
47 unsigned long e
= (start
+ size
) & L1_CACHE_ALIGN_MASK
;
49 for (; s
<= e
; s
+= L1_CACHE_BYTES
)
50 asm volatile ("ocbp %0, 0" : : "r" (s
));
53 static inline dma_addr_t
dma_map_single(struct device
*dev
,
54 void *ptr
, size_t size
,
55 enum dma_data_direction dir
)
57 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
58 if (dev
->bus
== &pci_bus_type
)
59 return virt_to_phys(ptr
);
61 dma_cache_sync(dev
, ptr
, size
, dir
);
63 return virt_to_phys(ptr
);
66 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
68 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
69 int nents
, enum dma_data_direction dir
)
73 for (i
= 0; i
< nents
; i
++) {
74 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
75 dma_cache_sync(dev
, sg_virt(&sg
[i
]), sg
[i
].length
, dir
);
77 sg
[i
].dma_address
= sg_phys(&sg
[i
]);
83 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
85 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
86 unsigned long offset
, size_t size
,
87 enum dma_data_direction dir
)
89 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
92 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
93 size_t size
, enum dma_data_direction dir
)
95 dma_unmap_single(dev
, dma_address
, size
, dir
);
98 static inline void dma_sync_single(struct device
*dev
, dma_addr_t dma_handle
,
99 size_t size
, enum dma_data_direction dir
)
101 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
102 if (dev
->bus
== &pci_bus_type
)
105 dma_cache_sync(dev
, phys_to_virt(dma_handle
), size
, dir
);
108 static inline void dma_sync_single_range(struct device
*dev
,
109 dma_addr_t dma_handle
,
110 unsigned long offset
, size_t size
,
111 enum dma_data_direction dir
)
113 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
114 if (dev
->bus
== &pci_bus_type
)
117 dma_cache_sync(dev
, phys_to_virt(dma_handle
) + offset
, size
, dir
);
120 static inline void dma_sync_sg(struct device
*dev
, struct scatterlist
*sg
,
121 int nelems
, enum dma_data_direction dir
)
125 for (i
= 0; i
< nelems
; i
++) {
126 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
127 dma_cache_sync(dev
, sg_virt(&sg
[i
]), sg
[i
].length
, dir
);
129 sg
[i
].dma_address
= sg_phys(&sg
[i
]);
133 static inline void dma_sync_single_for_cpu(struct device
*dev
,
134 dma_addr_t dma_handle
, size_t size
,
135 enum dma_data_direction dir
)
137 dma_sync_single(dev
, dma_handle
, size
, dir
);
140 static inline void dma_sync_single_for_device(struct device
*dev
,
141 dma_addr_t dma_handle
, size_t size
,
142 enum dma_data_direction dir
)
144 dma_sync_single(dev
, dma_handle
, size
, dir
);
147 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
148 dma_addr_t dma_handle
,
149 unsigned long offset
,
151 enum dma_data_direction direction
)
153 dma_sync_single_for_cpu(dev
, dma_handle
+offset
, size
, direction
);
156 static inline void dma_sync_single_range_for_device(struct device
*dev
,
157 dma_addr_t dma_handle
,
158 unsigned long offset
,
160 enum dma_data_direction direction
)
162 dma_sync_single_for_device(dev
, dma_handle
+offset
, size
, direction
);
165 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
166 struct scatterlist
*sg
, int nelems
,
167 enum dma_data_direction dir
)
169 dma_sync_sg(dev
, sg
, nelems
, dir
);
172 static inline void dma_sync_sg_for_device(struct device
*dev
,
173 struct scatterlist
*sg
, int nelems
,
174 enum dma_data_direction dir
)
176 dma_sync_sg(dev
, sg
, nelems
, dir
);
179 static inline int dma_get_cache_alignment(void)
182 * Each processor family will define its own L1_CACHE_SHIFT,
183 * L1_CACHE_BYTES wraps to this, so this is always safe.
185 return L1_CACHE_BYTES
;
188 static inline int dma_mapping_error(dma_addr_t dma_addr
)
190 return dma_addr
== 0;
193 #endif /* __ASM_SH_DMA_MAPPING_H */