vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / xtensa / include / asm / dma-mapping.h
blob492c95790ad5ac2072ab60720ee6469d53e5d0f0
1 /*
2 * include/asm-xtensa/dma-mapping.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2003 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_DMA_MAPPING_H
12 #define _XTENSA_DMA_MAPPING_H
14 #include <asm/cache.h>
15 #include <asm/io.h>
16 #include <linux/mm.h>
17 #include <linux/scatterlist.h>
20 * DMA-consistent mapping functions.
23 extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
24 extern void consistent_free(void*, size_t, dma_addr_t);
25 extern void consistent_sync(void*, size_t, int);
27 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
28 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
30 void *dma_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *dma_handle, gfp_t flag);
33 void dma_free_coherent(struct device *dev, size_t size,
34 void *vaddr, dma_addr_t dma_handle);
36 static inline dma_addr_t
37 dma_map_single(struct device *dev, void *ptr, size_t size,
38 enum dma_data_direction direction)
40 BUG_ON(direction == DMA_NONE);
41 consistent_sync(ptr, size, direction);
42 return virt_to_phys(ptr);
45 static inline void
46 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
47 enum dma_data_direction direction)
49 BUG_ON(direction == DMA_NONE);
52 static inline int
53 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
54 enum dma_data_direction direction)
56 int i;
58 BUG_ON(direction == DMA_NONE);
60 for (i = 0; i < nents; i++, sg++ ) {
61 BUG_ON(!sg_page(sg));
63 sg->dma_address = sg_phys(sg);
64 consistent_sync(sg_virt(sg), sg->length, direction);
67 return nents;
70 static inline dma_addr_t
71 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
72 size_t size, enum dma_data_direction direction)
74 BUG_ON(direction == DMA_NONE);
75 return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
78 static inline void
79 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
80 enum dma_data_direction direction)
82 BUG_ON(direction == DMA_NONE);
86 static inline void
87 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
88 enum dma_data_direction direction)
90 BUG_ON(direction == DMA_NONE);
93 static inline void
94 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
95 enum dma_data_direction direction)
97 consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
100 static inline void
101 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
102 enum dma_data_direction direction)
104 consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
107 static inline void
108 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
109 unsigned long offset, size_t size,
110 enum dma_data_direction direction)
113 consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
116 static inline void
117 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
118 unsigned long offset, size_t size,
119 enum dma_data_direction direction)
122 consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
124 static inline void
125 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
126 enum dma_data_direction dir)
128 int i;
129 for (i = 0; i < nelems; i++, sg++)
130 consistent_sync(sg_virt(sg), sg->length, dir);
133 static inline void
134 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
135 enum dma_data_direction dir)
137 int i;
138 for (i = 0; i < nelems; i++, sg++)
139 consistent_sync(sg_virt(sg), sg->length, dir);
141 static inline int
142 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
144 return 0;
147 static inline int
148 dma_supported(struct device *dev, u64 mask)
150 return 1;
153 static inline int
154 dma_set_mask(struct device *dev, u64 mask)
156 if(!dev->dma_mask || !dma_supported(dev, mask))
157 return -EIO;
159 *dev->dma_mask = mask;
161 return 0;
164 static inline void
165 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
166 enum dma_data_direction direction)
168 consistent_sync(vaddr, size, direction);
171 #endif /* _XTENSA_DMA_MAPPING_H */