2 * include/asm-xtensa/dma-mapping.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2003 - 2005 Tensilica Inc.
11 #ifndef _XTENSA_DMA_MAPPING_H
12 #define _XTENSA_DMA_MAPPING_H
14 #include <asm/scatterlist.h>
15 #include <asm/cache.h>
20 * DMA-consistent mapping functions.
23 extern void *consistent_alloc(int, size_t, dma_addr_t
, unsigned long);
24 extern void consistent_free(void*, size_t, dma_addr_t
);
25 extern void consistent_sync(void*, size_t, int);
27 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
28 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
30 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
31 dma_addr_t
*dma_handle
, gfp_t flag
);
33 void dma_free_coherent(struct device
*dev
, size_t size
,
34 void *vaddr
, dma_addr_t dma_handle
);
36 static inline dma_addr_t
37 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
38 enum dma_data_direction direction
)
40 BUG_ON(direction
== DMA_NONE
);
41 consistent_sync(ptr
, size
, direction
);
42 return virt_to_phys(ptr
);
46 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
47 enum dma_data_direction direction
)
49 BUG_ON(direction
== DMA_NONE
);
53 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
54 enum dma_data_direction direction
)
58 BUG_ON(direction
== DMA_NONE
);
60 for (i
= 0; i
< nents
; i
++, sg
++ ) {
63 sg
->dma_address
= page_to_phys(sg
->page
) + sg
->offset
;
64 consistent_sync(page_address(sg
->page
) + sg
->offset
,
65 sg
->length
, direction
);
71 static inline dma_addr_t
72 dma_map_page(struct device
*dev
, struct page
*page
, unsigned long offset
,
73 size_t size
, enum dma_data_direction direction
)
75 BUG_ON(direction
== DMA_NONE
);
76 return (dma_addr_t
)(page_to_pfn(page
)) * PAGE_SIZE
+ offset
;
80 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
81 enum dma_data_direction direction
)
83 BUG_ON(direction
== DMA_NONE
);
88 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
89 enum dma_data_direction direction
)
91 BUG_ON(direction
== DMA_NONE
);
95 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
96 enum dma_data_direction direction
)
98 consistent_sync((void *)bus_to_virt(dma_handle
), size
, direction
);
102 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
103 enum dma_data_direction direction
)
105 consistent_sync((void *)bus_to_virt(dma_handle
), size
, direction
);
109 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
110 unsigned long offset
, size_t size
,
111 enum dma_data_direction direction
)
114 consistent_sync((void *)bus_to_virt(dma_handle
)+offset
,size
,direction
);
118 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
119 unsigned long offset
, size_t size
,
120 enum dma_data_direction direction
)
123 consistent_sync((void *)bus_to_virt(dma_handle
)+offset
,size
,direction
);
126 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
127 enum dma_data_direction dir
)
130 for (i
= 0; i
< nelems
; i
++, sg
++)
131 consistent_sync(page_address(sg
->page
) + sg
->offset
,
136 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
137 enum dma_data_direction dir
)
140 for (i
= 0; i
< nelems
; i
++, sg
++)
141 consistent_sync(page_address(sg
->page
) + sg
->offset
,
145 dma_mapping_error(dma_addr_t dma_addr
)
151 dma_supported(struct device
*dev
, u64 mask
)
157 dma_set_mask(struct device
*dev
, u64 mask
)
159 if(!dev
->dma_mask
|| !dma_supported(dev
, mask
))
162 *dev
->dma_mask
= mask
;
168 dma_get_cache_alignment(void)
170 return L1_CACHE_BYTES
;
173 #define dma_is_consistent(d) (1)
176 dma_cache_sync(void *vaddr
, size_t size
,
177 enum dma_data_direction direction
)
179 consistent_sync(vaddr
, size
, direction
);
182 #endif /* _XTENSA_DMA_MAPPING_H */