2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
7 #ifndef _BLACKFIN_DMA_MAPPING_H
8 #define _BLACKFIN_DMA_MAPPING_H
10 #include <asm/cacheflush.h>
13 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
14 dma_addr_t
*dma_handle
, gfp_t gfp
);
15 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
16 dma_addr_t dma_handle
);
19 * Now for the API extensions over the pci_ one
21 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
23 #define dma_supported(d, m) (1)
26 dma_set_mask(struct device
*dev
, u64 dma_mask
)
28 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
31 *dev
->dma_mask
= dma_mask
;
37 dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
43 __dma_sync(dma_addr_t addr
, size_t size
, enum dma_data_direction dir
);
45 __dma_sync_inline(dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
50 case DMA_TO_DEVICE
: /* writeback only */
51 flush_dcache_range(addr
, addr
+ size
);
53 case DMA_FROM_DEVICE
: /* invalidate only */
54 case DMA_BIDIRECTIONAL
: /* flush and invalidate */
55 /* Blackfin has no dedicated invalidate (it includes a flush) */
56 invalidate_dcache_range(addr
, addr
+ size
);
61 _dma_sync(dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
63 if (__builtin_constant_p(dir
))
64 __dma_sync_inline(addr
, size
, dir
);
66 __dma_sync(addr
, size
, dir
);
69 static inline dma_addr_t
70 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
71 enum dma_data_direction dir
)
73 _dma_sync((dma_addr_t
)ptr
, size
, dir
);
74 return (dma_addr_t
) ptr
;
77 static inline dma_addr_t
78 dma_map_page(struct device
*dev
, struct page
*page
,
79 unsigned long offset
, size_t size
,
80 enum dma_data_direction dir
)
82 return dma_map_single(dev
, page_address(page
) + offset
, size
, dir
);
86 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
87 enum dma_data_direction dir
)
89 BUG_ON(!valid_dma_direction(dir
));
93 dma_unmap_page(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
94 enum dma_data_direction dir
)
96 dma_unmap_single(dev
, dma_addr
, size
, dir
);
99 extern int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
100 enum dma_data_direction dir
);
103 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
104 int nhwentries
, enum dma_data_direction dir
)
106 BUG_ON(!valid_dma_direction(dir
));
110 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t handle
,
111 unsigned long offset
, size_t size
,
112 enum dma_data_direction dir
)
114 BUG_ON(!valid_dma_direction(dir
));
118 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t handle
,
119 unsigned long offset
, size_t size
,
120 enum dma_data_direction dir
)
122 _dma_sync(handle
+ offset
, size
, dir
);
126 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t handle
, size_t size
,
127 enum dma_data_direction dir
)
129 dma_sync_single_range_for_cpu(dev
, handle
, 0, size
, dir
);
133 dma_sync_single_for_device(struct device
*dev
, dma_addr_t handle
, size_t size
,
134 enum dma_data_direction dir
)
136 dma_sync_single_range_for_device(dev
, handle
, 0, size
, dir
);
140 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nents
,
141 enum dma_data_direction dir
)
143 BUG_ON(!valid_dma_direction(dir
));
147 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
148 int nents
, enum dma_data_direction dir
);
151 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
152 enum dma_data_direction dir
)
154 _dma_sync((dma_addr_t
)vaddr
, size
, dir
);
157 #endif /* _BLACKFIN_DMA_MAPPING_H */