2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/dma-mapping.h>
10 #include <linux/gfp.h>
11 #include <linux/export.h>
13 #include <asm/addrspace.h>
14 #include <asm/cacheflush.h>
16 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
, int direction
)
19 * No need to sync an uncached area
21 if (PXSEG(vaddr
) == P2SEG
)
25 case DMA_FROM_DEVICE
: /* invalidate only */
26 invalidate_dcache_region(vaddr
, size
);
28 case DMA_TO_DEVICE
: /* writeback only */
29 clean_dcache_region(vaddr
, size
);
31 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
32 flush_dcache_region(vaddr
, size
);
38 EXPORT_SYMBOL(dma_cache_sync
);
40 static struct page
*__dma_alloc(struct device
*dev
, size_t size
,
41 dma_addr_t
*handle
, gfp_t gfp
)
43 struct page
*page
, *free
, *end
;
46 /* Following is a work-around (a.k.a. hack) to prevent pages
47 * with __GFP_COMP being passed to split_page() which cannot
48 * handle them. The real problem is that this flag probably
49 * should be 0 on AVR32 as it is not supported on this
50 * platform--see CONFIG_HUGETLB_PAGE. */
53 size
= PAGE_ALIGN(size
);
54 order
= get_order(size
);
56 page
= alloc_pages(gfp
, order
);
59 split_page(page
, order
);
62 * When accessing physical memory with valid cache data, we
63 * get a cache hit even if the virtual memory region is marked
66 * Since the memory is newly allocated, there is no point in
67 * doing a writeback. If the previous owner cares, he should
68 * have flushed the cache before releasing the memory.
70 invalidate_dcache_region(phys_to_virt(page_to_phys(page
)), size
);
72 *handle
= page_to_bus(page
);
73 free
= page
+ (size
>> PAGE_SHIFT
);
74 end
= page
+ (1 << order
);
77 * Free any unused pages
87 static void __dma_free(struct device
*dev
, size_t size
,
88 struct page
*page
, dma_addr_t handle
)
90 struct page
*end
= page
+ (PAGE_ALIGN(size
) >> PAGE_SHIFT
);
96 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
97 dma_addr_t
*handle
, gfp_t gfp
)
102 page
= __dma_alloc(dev
, size
, handle
, gfp
);
104 ret
= phys_to_uncached(page_to_phys(page
));
108 EXPORT_SYMBOL(dma_alloc_coherent
);
110 void dma_free_coherent(struct device
*dev
, size_t size
,
111 void *cpu_addr
, dma_addr_t handle
)
113 void *addr
= phys_to_cached(uncached_to_phys(cpu_addr
));
116 pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
117 cpu_addr
, (unsigned long)handle
, (unsigned)size
);
118 BUG_ON(!virt_addr_valid(addr
));
119 page
= virt_to_page(addr
);
120 __dma_free(dev
, size
, page
, handle
);
122 EXPORT_SYMBOL(dma_free_coherent
);
124 void *dma_alloc_writecombine(struct device
*dev
, size_t size
,
125 dma_addr_t
*handle
, gfp_t gfp
)
130 page
= __dma_alloc(dev
, size
, handle
, gfp
);
134 phys
= page_to_phys(page
);
137 /* Now, map the page into P3 with write-combining turned on */
138 return __ioremap(phys
, size
, _PAGE_BUFFER
);
140 EXPORT_SYMBOL(dma_alloc_writecombine
);
142 void dma_free_writecombine(struct device
*dev
, size_t size
,
143 void *cpu_addr
, dma_addr_t handle
)
149 page
= phys_to_page(handle
);
150 __dma_free(dev
, size
, page
, handle
);
152 EXPORT_SYMBOL(dma_free_writecombine
);