1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
5 #include <linux/scatterlist.h>
9 extern void *consistent_alloc(struct pci_dev
*hwdev
, size_t size
,
10 dma_addr_t
*dma_handle
);
11 extern void consistent_free(struct pci_dev
*hwdev
, size_t size
,
12 void *vaddr
, dma_addr_t dma_handle
);
14 #define dma_supported(dev, mask) (1)
16 static inline int dma_set_mask(struct device
*dev
, u64 mask
)
18 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
21 *dev
->dma_mask
= mask
;
26 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
27 dma_addr_t
*dma_handle
, gfp_t flag
)
29 return consistent_alloc(NULL
, size
, dma_handle
);
32 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
33 void *vaddr
, dma_addr_t dma_handle
)
35 consistent_free(NULL
, size
, vaddr
, dma_handle
);
38 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
39 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
40 #define dma_is_consistent(d, h) (1)
42 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
43 enum dma_data_direction dir
)
45 unsigned long s
= (unsigned long) vaddr
& L1_CACHE_ALIGN_MASK
;
46 unsigned long e
= (vaddr
+ size
) & L1_CACHE_ALIGN_MASK
;
48 for (; s
<= e
; s
+= L1_CACHE_BYTES
)
49 asm volatile ("ocbp %0, 0" : : "r" (s
));
52 static inline dma_addr_t
dma_map_single(struct device
*dev
,
53 void *ptr
, size_t size
,
54 enum dma_data_direction dir
)
56 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
57 if (dev
->bus
== &pci_bus_type
)
58 return virt_to_phys(ptr
);
60 dma_cache_sync(dev
, ptr
, size
, dir
);
62 return virt_to_phys(ptr
);
65 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
67 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
68 int nents
, enum dma_data_direction dir
)
72 for (i
= 0; i
< nents
; i
++) {
73 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
74 dma_cache_sync(dev
, sg_virt(&sg
[i
]), sg
[i
].length
, dir
);
76 sg
[i
].dma_address
= sg_phys(&sg
[i
]);
82 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
84 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
85 unsigned long offset
, size_t size
,
86 enum dma_data_direction dir
)
88 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
91 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
,
92 size_t size
, enum dma_data_direction dir
)
94 dma_unmap_single(dev
, dma_address
, size
, dir
);
97 static inline void dma_sync_single(struct device
*dev
, dma_addr_t dma_handle
,
98 size_t size
, enum dma_data_direction dir
)
100 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
101 if (dev
->bus
== &pci_bus_type
)
104 dma_cache_sync(dev
, phys_to_virt(dma_handle
), size
, dir
);
107 static inline void dma_sync_single_range(struct device
*dev
,
108 dma_addr_t dma_handle
,
109 unsigned long offset
, size_t size
,
110 enum dma_data_direction dir
)
112 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
113 if (dev
->bus
== &pci_bus_type
)
116 dma_cache_sync(dev
, phys_to_virt(dma_handle
) + offset
, size
, dir
);
119 static inline void dma_sync_sg(struct device
*dev
, struct scatterlist
*sg
,
120 int nelems
, enum dma_data_direction dir
)
124 for (i
= 0; i
< nelems
; i
++) {
125 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
126 dma_cache_sync(dev
, sg_virt(&sg
[i
]), sg
[i
].length
, dir
);
128 sg
[i
].dma_address
= sg_phys(&sg
[i
]);
132 static inline void dma_sync_single_for_cpu(struct device
*dev
,
133 dma_addr_t dma_handle
, size_t size
,
134 enum dma_data_direction dir
)
136 dma_sync_single(dev
, dma_handle
, size
, dir
);
139 static inline void dma_sync_single_for_device(struct device
*dev
,
140 dma_addr_t dma_handle
, size_t size
,
141 enum dma_data_direction dir
)
143 dma_sync_single(dev
, dma_handle
, size
, dir
);
146 static inline void dma_sync_single_range_for_cpu(struct device
*dev
,
147 dma_addr_t dma_handle
,
148 unsigned long offset
,
150 enum dma_data_direction direction
)
152 dma_sync_single_for_cpu(dev
, dma_handle
+offset
, size
, direction
);
155 static inline void dma_sync_single_range_for_device(struct device
*dev
,
156 dma_addr_t dma_handle
,
157 unsigned long offset
,
159 enum dma_data_direction direction
)
161 dma_sync_single_for_device(dev
, dma_handle
+offset
, size
, direction
);
164 static inline void dma_sync_sg_for_cpu(struct device
*dev
,
165 struct scatterlist
*sg
, int nelems
,
166 enum dma_data_direction dir
)
168 dma_sync_sg(dev
, sg
, nelems
, dir
);
171 static inline void dma_sync_sg_for_device(struct device
*dev
,
172 struct scatterlist
*sg
, int nelems
,
173 enum dma_data_direction dir
)
175 dma_sync_sg(dev
, sg
, nelems
, dir
);
178 static inline int dma_get_cache_alignment(void)
181 * Each processor family will define its own L1_CACHE_SHIFT,
182 * L1_CACHE_BYTES wraps to this, so this is always safe.
184 return L1_CACHE_BYTES
;
187 static inline int dma_mapping_error(dma_addr_t dma_addr
)
189 return dma_addr
== 0;
192 #endif /* __ASM_SH_DMA_MAPPING_H */