2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
9 #include <linux/dma-noncoherent.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/platform_device.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/export.h>
18 #include <asm/pgalloc.h>
20 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
21 void arch_dma_prep_coherent(struct page
*page
, size_t size
)
23 cache_push(page_to_phys(page
), size
);
26 pgprot_t
arch_dma_mmap_pgprot(struct device
*dev
, pgprot_t prot
,
29 if (CPU_IS_040_OR_060
) {
30 pgprot_val(prot
) &= ~_PAGE_CACHE040
;
31 pgprot_val(prot
) |= _PAGE_GLOBAL040
| _PAGE_NOCACHE_S
;
33 pgprot_val(prot
) |= _PAGE_NOCACHE030
;
39 #include <asm/cacheflush.h>
41 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
42 gfp_t gfp
, unsigned long attrs
)
46 if (dev
== NULL
|| (*dev
->dma_mask
< 0xffffffff))
48 ret
= (void *)__get_free_pages(gfp
, get_order(size
));
52 *dma_handle
= virt_to_phys(ret
);
57 void arch_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
58 dma_addr_t dma_handle
, unsigned long attrs
)
60 free_pages((unsigned long)vaddr
, get_order(size
));
63 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
65 void arch_sync_dma_for_device(struct device
*dev
, phys_addr_t handle
,
66 size_t size
, enum dma_data_direction dir
)
69 case DMA_BIDIRECTIONAL
:
71 cache_push(handle
, size
);
74 cache_clear(handle
, size
);
77 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
83 void arch_setup_pdev_archdata(struct platform_device
*pdev
)
85 if (pdev
->dev
.coherent_dma_mask
== DMA_MASK_NONE
&&
86 pdev
->dev
.dma_mask
== NULL
) {
87 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
88 pdev
->dev
.dma_mask
= &pdev
->dev
.coherent_dma_mask
;