2 * arch/sh/mm/consistent.c
4 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/dma-mapping.h>
14 #include <asm/cacheflush.h>
15 #include <asm/addrspace.h>
18 struct dma_coherent_mem
{
23 unsigned long *bitmap
;
26 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
27 dma_addr_t
*dma_handle
, gfp_t gfp
)
29 void *ret
, *ret_nocache
;
30 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
31 int order
= get_order(size
);
34 int page
= bitmap_find_free_region(mem
->bitmap
, mem
->size
,
37 *dma_handle
= mem
->device_base
+ (page
<< PAGE_SHIFT
);
38 ret
= mem
->virt_base
+ (page
<< PAGE_SHIFT
);
42 if (mem
->flags
& DMA_MEMORY_EXCLUSIVE
)
46 ret
= (void *)__get_free_pages(gfp
, order
);
52 * Pages from the page allocator may have data present in
53 * cache. So flush the cache before using uncached memory.
55 dma_cache_sync(dev
, ret
, size
, DMA_BIDIRECTIONAL
);
57 ret_nocache
= ioremap_nocache(virt_to_phys(ret
), size
);
59 free_pages((unsigned long)ret
, order
);
63 *dma_handle
= virt_to_phys(ret
);
66 EXPORT_SYMBOL(dma_alloc_coherent
);
68 void dma_free_coherent(struct device
*dev
, size_t size
,
69 void *vaddr
, dma_addr_t dma_handle
)
71 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
72 int order
= get_order(size
);
74 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
< (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
75 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
77 bitmap_release_region(mem
->bitmap
, page
, order
);
79 WARN_ON(irqs_disabled()); /* for portability */
80 BUG_ON(mem
&& mem
->flags
& DMA_MEMORY_EXCLUSIVE
);
81 free_pages((unsigned long)phys_to_virt(dma_handle
), order
);
85 EXPORT_SYMBOL(dma_free_coherent
);
87 int dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
88 dma_addr_t device_addr
, size_t size
, int flags
)
90 void __iomem
*mem_base
= NULL
;
91 int pages
= size
>> PAGE_SHIFT
;
92 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
94 if ((flags
& (DMA_MEMORY_MAP
| DMA_MEMORY_IO
)) == 0)
101 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
103 mem_base
= ioremap_nocache(bus_addr
, size
);
107 dev
->dma_mem
= kmalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
110 dev
->dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
111 if (!dev
->dma_mem
->bitmap
)
114 dev
->dma_mem
->virt_base
= mem_base
;
115 dev
->dma_mem
->device_base
= device_addr
;
116 dev
->dma_mem
->size
= pages
;
117 dev
->dma_mem
->flags
= flags
;
119 if (flags
& DMA_MEMORY_MAP
)
120 return DMA_MEMORY_MAP
;
122 return DMA_MEMORY_IO
;
131 EXPORT_SYMBOL(dma_declare_coherent_memory
);
133 void dma_release_declared_memory(struct device
*dev
)
135 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
140 iounmap(mem
->virt_base
);
144 EXPORT_SYMBOL(dma_release_declared_memory
);
146 void *dma_mark_declared_memory_occupied(struct device
*dev
,
147 dma_addr_t device_addr
, size_t size
)
149 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
150 int pages
= (size
+ (device_addr
& ~PAGE_MASK
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
154 return ERR_PTR(-EINVAL
);
156 pos
= (device_addr
- mem
->device_base
) >> PAGE_SHIFT
;
157 err
= bitmap_allocate_region(mem
->bitmap
, pos
, get_order(pages
));
160 return mem
->virt_base
+ (pos
<< PAGE_SHIFT
);
162 EXPORT_SYMBOL(dma_mark_declared_memory_occupied
);
164 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
165 enum dma_data_direction direction
)
167 #ifdef CONFIG_CPU_SH5
168 void *p1addr
= vaddr
;
170 void *p1addr
= (void*) P1SEGADDR((unsigned long)vaddr
);
174 case DMA_FROM_DEVICE
: /* invalidate only */
175 __flush_invalidate_region(p1addr
, size
);
177 case DMA_TO_DEVICE
: /* writeback only */
178 __flush_wback_region(p1addr
, size
);
180 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
181 __flush_purge_region(p1addr
, size
);
187 EXPORT_SYMBOL(dma_cache_sync
);