Staging: hv: mousevsc: Cleanup alloc_input_device()
[zen-stable.git] / arch / m68k / kernel / dma_mm.c
blob4bbb3c2a888057e93c264c76e029909dc7bd8884
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
7 #undef DEBUG
9 #include <linux/dma-mapping.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/pgalloc.h>
18 void *dma_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *handle, gfp_t flag)
21 struct page *page, **map;
22 pgprot_t pgprot;
23 void *addr;
24 int i, order;
26 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
28 size = PAGE_ALIGN(size);
29 order = get_order(size);
31 page = alloc_pages(flag, order);
32 if (!page)
33 return NULL;
35 *handle = page_to_phys(page);
36 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
37 if (!map) {
38 __free_pages(page, order);
39 return NULL;
41 split_page(page, order);
43 order = 1 << order;
44 size >>= PAGE_SHIFT;
45 map[0] = page;
46 for (i = 1; i < size; i++)
47 map[i] = page + i;
48 for (; i < order; i++)
49 __free_page(page + i);
50 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
51 if (CPU_IS_040_OR_060)
52 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
53 else
54 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
55 addr = vmap(map, size, VM_MAP, pgprot);
56 kfree(map);
58 return addr;
60 EXPORT_SYMBOL(dma_alloc_coherent);
62 void dma_free_coherent(struct device *dev, size_t size,
63 void *addr, dma_addr_t handle)
65 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
66 vfree(addr);
68 EXPORT_SYMBOL(dma_free_coherent);
70 void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
71 size_t size, enum dma_data_direction dir)
73 switch (dir) {
74 case DMA_TO_DEVICE:
75 cache_push(handle, size);
76 break;
77 case DMA_FROM_DEVICE:
78 cache_clear(handle, size);
79 break;
80 default:
81 if (printk_ratelimit())
82 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
83 break;
86 EXPORT_SYMBOL(dma_sync_single_for_device);
88 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
89 enum dma_data_direction dir)
91 int i;
93 for (i = 0; i < nents; sg++, i++)
94 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
96 EXPORT_SYMBOL(dma_sync_sg_for_device);
98 dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
99 enum dma_data_direction dir)
101 dma_addr_t handle = virt_to_bus(addr);
103 dma_sync_single_for_device(dev, handle, size, dir);
104 return handle;
106 EXPORT_SYMBOL(dma_map_single);
108 dma_addr_t dma_map_page(struct device *dev, struct page *page,
109 unsigned long offset, size_t size,
110 enum dma_data_direction dir)
112 dma_addr_t handle = page_to_phys(page) + offset;
114 dma_sync_single_for_device(dev, handle, size, dir);
115 return handle;
117 EXPORT_SYMBOL(dma_map_page);
119 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
120 enum dma_data_direction dir)
122 int i;
124 for (i = 0; i < nents; sg++, i++) {
125 sg->dma_address = sg_phys(sg);
126 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
128 return nents;
130 EXPORT_SYMBOL(dma_map_sg);