2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/dma-mapping.h>
10 #include <linux/gfp.h>
11 #include <linux/export.h>
13 #include <linux/device.h>
14 #include <linux/scatterlist.h>
16 #include <asm/processor.h>
17 #include <asm/cacheflush.h>
19 #include <asm/addrspace.h>
21 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
, int direction
)
24 * No need to sync an uncached area
26 if (PXSEG(vaddr
) == P2SEG
)
30 case DMA_FROM_DEVICE
: /* invalidate only */
31 invalidate_dcache_region(vaddr
, size
);
33 case DMA_TO_DEVICE
: /* writeback only */
34 clean_dcache_region(vaddr
, size
);
36 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
37 flush_dcache_region(vaddr
, size
);
43 EXPORT_SYMBOL(dma_cache_sync
);
45 static struct page
*__dma_alloc(struct device
*dev
, size_t size
,
46 dma_addr_t
*handle
, gfp_t gfp
)
48 struct page
*page
, *free
, *end
;
51 /* Following is a work-around (a.k.a. hack) to prevent pages
52 * with __GFP_COMP being passed to split_page() which cannot
53 * handle them. The real problem is that this flag probably
54 * should be 0 on AVR32 as it is not supported on this
55 * platform--see CONFIG_HUGETLB_PAGE. */
58 size
= PAGE_ALIGN(size
);
59 order
= get_order(size
);
61 page
= alloc_pages(gfp
, order
);
64 split_page(page
, order
);
67 * When accessing physical memory with valid cache data, we
68 * get a cache hit even if the virtual memory region is marked
71 * Since the memory is newly allocated, there is no point in
72 * doing a writeback. If the previous owner cares, he should
73 * have flushed the cache before releasing the memory.
75 invalidate_dcache_region(phys_to_virt(page_to_phys(page
)), size
);
77 *handle
= page_to_bus(page
);
78 free
= page
+ (size
>> PAGE_SHIFT
);
79 end
= page
+ (1 << order
);
82 * Free any unused pages
92 static void __dma_free(struct device
*dev
, size_t size
,
93 struct page
*page
, dma_addr_t handle
)
95 struct page
*end
= page
+ (PAGE_ALIGN(size
) >> PAGE_SHIFT
);
101 static void *avr32_dma_alloc(struct device
*dev
, size_t size
,
102 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
107 page
= __dma_alloc(dev
, size
, handle
, gfp
);
110 phys
= page_to_phys(page
);
112 if (attrs
& DMA_ATTR_WRITE_COMBINE
) {
113 /* Now, map the page into P3 with write-combining turned on */
115 return __ioremap(phys
, size
, _PAGE_BUFFER
);
117 return phys_to_uncached(phys
);
121 static void avr32_dma_free(struct device
*dev
, size_t size
,
122 void *cpu_addr
, dma_addr_t handle
, unsigned long attrs
)
126 if (attrs
& DMA_ATTR_WRITE_COMBINE
) {
129 page
= phys_to_page(handle
);
131 void *addr
= phys_to_cached(uncached_to_phys(cpu_addr
));
133 pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
134 cpu_addr
, (unsigned long)handle
, (unsigned)size
);
136 BUG_ON(!virt_addr_valid(addr
));
137 page
= virt_to_page(addr
);
140 __dma_free(dev
, size
, page
, handle
);
143 static dma_addr_t
avr32_dma_map_page(struct device
*dev
, struct page
*page
,
144 unsigned long offset
, size_t size
,
145 enum dma_data_direction direction
, unsigned long attrs
)
147 void *cpu_addr
= page_address(page
) + offset
;
149 dma_cache_sync(dev
, cpu_addr
, size
, direction
);
150 return virt_to_bus(cpu_addr
);
153 static int avr32_dma_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
154 int nents
, enum dma_data_direction direction
,
158 struct scatterlist
*sg
;
160 for_each_sg(sglist
, sg
, nents
, i
) {
163 sg
->dma_address
= page_to_bus(sg_page(sg
)) + sg
->offset
;
165 dma_cache_sync(dev
, virt
, sg
->length
, direction
);
171 static void avr32_dma_sync_single_for_device(struct device
*dev
,
172 dma_addr_t dma_handle
, size_t size
,
173 enum dma_data_direction direction
)
175 dma_cache_sync(dev
, bus_to_virt(dma_handle
), size
, direction
);
178 static void avr32_dma_sync_sg_for_device(struct device
*dev
,
179 struct scatterlist
*sglist
, int nents
,
180 enum dma_data_direction direction
)
183 struct scatterlist
*sg
;
185 for_each_sg(sglist
, sg
, nents
, i
)
186 dma_cache_sync(dev
, sg_virt(sg
), sg
->length
, direction
);
189 struct dma_map_ops avr32_dma_ops
= {
190 .alloc
= avr32_dma_alloc
,
191 .free
= avr32_dma_free
,
192 .map_page
= avr32_dma_map_page
,
193 .map_sg
= avr32_dma_map_sg
,
194 .sync_single_for_device
= avr32_dma_sync_single_for_device
,
195 .sync_sg_for_device
= avr32_dma_sync_sg_for_device
,
197 EXPORT_SYMBOL(avr32_dma_ops
);