2 * arch/sh/mm/consistent.c
4 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-debug.h>
18 #include <linux/module.h>
19 #include <linux/gfp.h>
20 #include <asm/cacheflush.h>
21 #include <asm/addrspace.h>
23 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
25 struct dma_map_ops
*dma_ops
;
26 EXPORT_SYMBOL(dma_ops
);
28 static int __init
dma_init(void)
30 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
33 fs_initcall(dma_init
);
35 void *dma_generic_alloc_coherent(struct device
*dev
, size_t size
,
36 dma_addr_t
*dma_handle
, gfp_t gfp
,
39 void *ret
, *ret_nocache
;
40 int order
= get_order(size
);
44 ret
= (void *)__get_free_pages(gfp
, order
);
49 * Pages from the page allocator may have data present in
50 * cache. So flush the cache before using uncached memory.
52 dma_cache_sync(dev
, ret
, size
, DMA_BIDIRECTIONAL
);
54 ret_nocache
= (void __force
*)ioremap_nocache(virt_to_phys(ret
), size
);
56 free_pages((unsigned long)ret
, order
);
60 split_page(pfn_to_page(virt_to_phys(ret
) >> PAGE_SHIFT
), order
);
62 *dma_handle
= virt_to_phys(ret
);
67 void dma_generic_free_coherent(struct device
*dev
, size_t size
,
68 void *vaddr
, dma_addr_t dma_handle
,
71 int order
= get_order(size
);
72 unsigned long pfn
= dma_handle
>> PAGE_SHIFT
;
75 for (k
= 0; k
< (1 << order
); k
++)
76 __free_pages(pfn_to_page(pfn
+ k
), 0);
81 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
82 enum dma_data_direction direction
)
86 addr
= __in_29bit_mode() ?
87 (void *)CAC_ADDR((unsigned long)vaddr
) : vaddr
;
90 case DMA_FROM_DEVICE
: /* invalidate only */
91 __flush_invalidate_region(addr
, size
);
93 case DMA_TO_DEVICE
: /* writeback only */
94 __flush_wback_region(addr
, size
);
96 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
97 __flush_purge_region(addr
, size
);
103 EXPORT_SYMBOL(dma_cache_sync
);
105 static int __init
memchunk_setup(char *str
)
107 return 1; /* accept anything that begins with "memchunk." */
109 __setup("memchunk.", memchunk_setup
);
111 static void __init
memchunk_cmdline_override(char *name
, unsigned long *sizep
)
113 char *p
= boot_command_line
;
114 int k
= strlen(name
);
116 while ((p
= strstr(p
, "memchunk."))) {
117 p
+= 9; /* strlen("memchunk.") */
118 if (!strncmp(name
, p
, k
) && p
[k
] == '=') {
120 *sizep
= memparse(p
, NULL
);
121 pr_info("%s: forcing memory chunk size to 0x%08lx\n",
128 int __init
platform_resource_setup_memory(struct platform_device
*pdev
,
129 char *name
, unsigned long memsize
)
132 dma_addr_t dma_handle
;
135 r
= pdev
->resource
+ pdev
->num_resources
- 1;
137 pr_warning("%s: unable to find empty space for resource\n",
142 memchunk_cmdline_override(name
, &memsize
);
146 buf
= dma_alloc_coherent(NULL
, memsize
, &dma_handle
, GFP_KERNEL
);
148 pr_warning("%s: unable to allocate memory\n", name
);
152 memset(buf
, 0, memsize
);
154 r
->flags
= IORESOURCE_MEM
;
155 r
->start
= dma_handle
;
156 r
->end
= r
->start
+ memsize
- 1;