2 * arch/sh/mm/consistent.c
4 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-debug.h>
18 #include <linux/module.h>
19 #include <linux/gfp.h>
20 #include <asm/cacheflush.h>
21 #include <asm/addrspace.h>
23 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
25 struct dma_map_ops
*dma_ops
;
26 EXPORT_SYMBOL(dma_ops
);
28 static int __init
dma_init(void)
30 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
33 fs_initcall(dma_init
);
35 void *dma_generic_alloc_coherent(struct device
*dev
, size_t size
,
36 dma_addr_t
*dma_handle
, gfp_t gfp
)
38 void *ret
, *ret_nocache
;
39 int order
= get_order(size
);
43 ret
= (void *)__get_free_pages(gfp
, order
);
48 * Pages from the page allocator may have data present in
49 * cache. So flush the cache before using uncached memory.
51 dma_cache_sync(dev
, ret
, size
, DMA_BIDIRECTIONAL
);
53 ret_nocache
= (void __force
*)ioremap_nocache(virt_to_phys(ret
), size
);
55 free_pages((unsigned long)ret
, order
);
59 split_page(pfn_to_page(virt_to_phys(ret
) >> PAGE_SHIFT
), order
);
61 *dma_handle
= virt_to_phys(ret
);
66 void dma_generic_free_coherent(struct device
*dev
, size_t size
,
67 void *vaddr
, dma_addr_t dma_handle
)
69 int order
= get_order(size
);
70 unsigned long pfn
= dma_handle
>> PAGE_SHIFT
;
73 for (k
= 0; k
< (1 << order
); k
++)
74 __free_pages(pfn_to_page(pfn
+ k
), 0);
79 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
80 enum dma_data_direction direction
)
84 addr
= __in_29bit_mode() ?
85 (void *)P1SEGADDR((unsigned long)vaddr
) : vaddr
;
88 case DMA_FROM_DEVICE
: /* invalidate only */
89 __flush_invalidate_region(addr
, size
);
91 case DMA_TO_DEVICE
: /* writeback only */
92 __flush_wback_region(addr
, size
);
94 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
95 __flush_purge_region(addr
, size
);
101 EXPORT_SYMBOL(dma_cache_sync
);
103 static int __init
memchunk_setup(char *str
)
105 return 1; /* accept anything that begins with "memchunk." */
107 __setup("memchunk.", memchunk_setup
);
109 static void __init
memchunk_cmdline_override(char *name
, unsigned long *sizep
)
111 char *p
= boot_command_line
;
112 int k
= strlen(name
);
114 while ((p
= strstr(p
, "memchunk."))) {
115 p
+= 9; /* strlen("memchunk.") */
116 if (!strncmp(name
, p
, k
) && p
[k
] == '=') {
118 *sizep
= memparse(p
, NULL
);
119 pr_info("%s: forcing memory chunk size to 0x%08lx\n",
126 int __init
platform_resource_setup_memory(struct platform_device
*pdev
,
127 char *name
, unsigned long memsize
)
130 dma_addr_t dma_handle
;
133 r
= pdev
->resource
+ pdev
->num_resources
- 1;
135 pr_warning("%s: unable to find empty space for resource\n",
140 memchunk_cmdline_override(name
, &memsize
);
144 buf
= dma_alloc_coherent(NULL
, memsize
, &dma_handle
, GFP_KERNEL
);
146 pr_warning("%s: unable to allocate memory\n", name
);
150 memset(buf
, 0, memsize
);
152 r
->flags
= IORESOURCE_MEM
;
153 r
->start
= dma_handle
;
154 r
->end
= r
->start
+ memsize
- 1;