2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
9 #include <linux/dma-mapping.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/export.h>
17 #include <asm/pgalloc.h>
19 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
21 static void *m68k_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
22 gfp_t flag
, struct dma_attrs
*attrs
)
24 struct page
*page
, **map
;
29 pr_debug("dma_alloc_coherent: %d,%x\n", size
, flag
);
31 size
= PAGE_ALIGN(size
);
32 order
= get_order(size
);
34 page
= alloc_pages(flag
, order
);
38 *handle
= page_to_phys(page
);
39 map
= kmalloc(sizeof(struct page
*) << order
, flag
& ~__GFP_DMA
);
41 __free_pages(page
, order
);
44 split_page(page
, order
);
49 for (i
= 1; i
< size
; i
++)
51 for (; i
< order
; i
++)
52 __free_page(page
+ i
);
53 pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_DIRTY
);
54 if (CPU_IS_040_OR_060
)
55 pgprot_val(pgprot
) |= _PAGE_GLOBAL040
| _PAGE_NOCACHE_S
;
57 pgprot_val(pgprot
) |= _PAGE_NOCACHE030
;
58 addr
= vmap(map
, size
, VM_MAP
, pgprot
);
64 static void m68k_dma_free(struct device
*dev
, size_t size
, void *addr
,
65 dma_addr_t handle
, struct dma_attrs
*attrs
)
67 pr_debug("dma_free_coherent: %p, %x\n", addr
, handle
);
73 #include <asm/cacheflush.h>
75 static void *m68k_dma_alloc(struct device
*dev
, size_t size
,
76 dma_addr_t
*dma_handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
79 /* ignore region specifiers */
80 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
82 if (dev
== NULL
|| (*dev
->dma_mask
< 0xffffffff))
84 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
88 *dma_handle
= virt_to_phys(ret
);
93 static void m68k_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
94 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
96 free_pages((unsigned long)vaddr
, get_order(size
));
99 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
101 static void m68k_dma_sync_single_for_device(struct device
*dev
,
102 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
105 case DMA_BIDIRECTIONAL
:
107 cache_push(handle
, size
);
109 case DMA_FROM_DEVICE
:
110 cache_clear(handle
, size
);
113 if (printk_ratelimit())
114 printk("dma_sync_single_for_device: unsupported dir %u\n", dir
);
119 static void m68k_dma_sync_sg_for_device(struct device
*dev
,
120 struct scatterlist
*sglist
, int nents
, enum dma_data_direction dir
)
123 struct scatterlist
*sg
;
125 for_each_sg(sglist
, sg
, nents
, i
) {
126 dma_sync_single_for_device(dev
, sg
->dma_address
, sg
->length
,
131 static dma_addr_t
m68k_dma_map_page(struct device
*dev
, struct page
*page
,
132 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
133 struct dma_attrs
*attrs
)
135 dma_addr_t handle
= page_to_phys(page
) + offset
;
137 dma_sync_single_for_device(dev
, handle
, size
, dir
);
141 static int m68k_dma_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
142 int nents
, enum dma_data_direction dir
, struct dma_attrs
*attrs
)
145 struct scatterlist
*sg
;
147 for_each_sg(sglist
, sg
, nents
, i
) {
148 sg
->dma_address
= sg_phys(sg
);
149 dma_sync_single_for_device(dev
, sg
->dma_address
, sg
->length
,
155 struct dma_map_ops m68k_dma_ops
= {
156 .alloc
= m68k_dma_alloc
,
157 .free
= m68k_dma_free
,
158 .map_page
= m68k_dma_map_page
,
159 .map_sg
= m68k_dma_map_sg
,
160 .sync_single_for_device
= m68k_dma_sync_single_for_device
,
161 .sync_sg_for_device
= m68k_dma_sync_sg_for_device
,
163 EXPORT_SYMBOL(m68k_dma_ops
);