2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
9 #include <linux/dma-noncoherent.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/platform_device.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/export.h>
18 #include <asm/pgalloc.h>
20 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
22 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
,
23 gfp_t flag
, unsigned long attrs
)
25 struct page
*page
, **map
;
30 pr_debug("dma_alloc_coherent: %d,%x\n", size
, flag
);
32 size
= PAGE_ALIGN(size
);
33 order
= get_order(size
);
35 page
= alloc_pages(flag
, order
);
39 *handle
= page_to_phys(page
);
40 map
= kmalloc(sizeof(struct page
*) << order
, flag
& ~__GFP_DMA
);
42 __free_pages(page
, order
);
45 split_page(page
, order
);
50 for (i
= 1; i
< size
; i
++)
52 for (; i
< order
; i
++)
53 __free_page(page
+ i
);
54 pgprot
= __pgprot(_PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_DIRTY
);
55 if (CPU_IS_040_OR_060
)
56 pgprot_val(pgprot
) |= _PAGE_GLOBAL040
| _PAGE_NOCACHE_S
;
58 pgprot_val(pgprot
) |= _PAGE_NOCACHE030
;
59 addr
= vmap(map
, size
, VM_MAP
, pgprot
);
65 void arch_dma_free(struct device
*dev
, size_t size
, void *addr
,
66 dma_addr_t handle
, unsigned long attrs
)
68 pr_debug("dma_free_coherent: %p, %x\n", addr
, handle
);
74 #include <asm/cacheflush.h>
76 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
77 gfp_t gfp
, unsigned long attrs
)
81 if (dev
== NULL
|| (*dev
->dma_mask
< 0xffffffff))
83 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
87 *dma_handle
= virt_to_phys(ret
);
92 void arch_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
93 dma_addr_t dma_handle
, unsigned long attrs
)
95 free_pages((unsigned long)vaddr
, get_order(size
));
98 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
100 void arch_sync_dma_for_device(struct device
*dev
, phys_addr_t handle
,
101 size_t size
, enum dma_data_direction dir
)
104 case DMA_BIDIRECTIONAL
:
106 cache_push(handle
, size
);
108 case DMA_FROM_DEVICE
:
109 cache_clear(handle
, size
);
112 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
118 void arch_setup_pdev_archdata(struct platform_device
*pdev
)
120 if (pdev
->dev
.coherent_dma_mask
== DMA_MASK_NONE
&&
121 pdev
->dev
.dma_mask
== NULL
) {
122 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
123 pdev
->dev
.dma_mask
= &pdev
->dev
.coherent_dma_mask
;