2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
9 #include <linux/dma-mapping.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/export.h>
17 #include <asm/pgalloc.h>
19 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
20 dma_addr_t
*handle
, gfp_t flag
)
22 struct page
*page
, **map
;
27 pr_debug("dma_alloc_coherent: %d,%x\n", size
, flag
);
29 size
= PAGE_ALIGN(size
);
30 order
= get_order(size
);
32 page
= alloc_pages(flag
, order
);
36 *handle
= page_to_phys(page
);
37 map
= kmalloc(sizeof(struct page
*) << order
, flag
& ~__GFP_DMA
);
39 __free_pages(page
, order
);
42 split_page(page
, order
);
47 for (i
= 1; i
< size
; i
++)
49 for (; i
< order
; i
++)
50 __free_page(page
+ i
);
51 pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_DIRTY
);
52 if (CPU_IS_040_OR_060
)
53 pgprot_val(pgprot
) |= _PAGE_GLOBAL040
| _PAGE_NOCACHE_S
;
55 pgprot_val(pgprot
) |= _PAGE_NOCACHE030
;
56 addr
= vmap(map
, size
, VM_MAP
, pgprot
);
61 EXPORT_SYMBOL(dma_alloc_coherent
);
63 void dma_free_coherent(struct device
*dev
, size_t size
,
64 void *addr
, dma_addr_t handle
)
66 pr_debug("dma_free_coherent: %p, %x\n", addr
, handle
);
69 EXPORT_SYMBOL(dma_free_coherent
);
71 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t handle
,
72 size_t size
, enum dma_data_direction dir
)
76 cache_push(handle
, size
);
79 cache_clear(handle
, size
);
82 if (printk_ratelimit())
83 printk("dma_sync_single_for_device: unsupported dir %u\n", dir
);
87 EXPORT_SYMBOL(dma_sync_single_for_device
);
89 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nents
,
90 enum dma_data_direction dir
)
94 for (i
= 0; i
< nents
; sg
++, i
++)
95 dma_sync_single_for_device(dev
, sg
->dma_address
, sg
->length
, dir
);
97 EXPORT_SYMBOL(dma_sync_sg_for_device
);
99 dma_addr_t
dma_map_single(struct device
*dev
, void *addr
, size_t size
,
100 enum dma_data_direction dir
)
102 dma_addr_t handle
= virt_to_bus(addr
);
104 dma_sync_single_for_device(dev
, handle
, size
, dir
);
107 EXPORT_SYMBOL(dma_map_single
);
109 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
110 unsigned long offset
, size_t size
,
111 enum dma_data_direction dir
)
113 dma_addr_t handle
= page_to_phys(page
) + offset
;
115 dma_sync_single_for_device(dev
, handle
, size
, dir
);
118 EXPORT_SYMBOL(dma_map_page
);
120 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
121 enum dma_data_direction dir
)
125 for (i
= 0; i
< nents
; sg
++, i
++) {
126 sg
->dma_address
= sg_phys(sg
);
127 dma_sync_single_for_device(dev
, sg
->dma_address
, sg
->length
, dir
);
131 EXPORT_SYMBOL(dma_map_sg
);