2 * Dynamic DMA mapping support.
4 * On cris there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
12 #include <linux/types.h>
14 #include <linux/string.h>
15 #include <linux/pci.h>
18 struct dma_coherent_mem
{
23 unsigned long *bitmap
;
26 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
27 dma_addr_t
*dma_handle
, gfp_t gfp
)
30 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
31 int order
= get_order(size
);
32 /* ignore region specifiers */
33 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
36 int page
= bitmap_find_free_region(mem
->bitmap
, mem
->size
,
39 *dma_handle
= mem
->device_base
+ (page
<< PAGE_SHIFT
);
40 ret
= mem
->virt_base
+ (page
<< PAGE_SHIFT
);
44 if (mem
->flags
& DMA_MEMORY_EXCLUSIVE
)
48 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
51 ret
= (void *)__get_free_pages(gfp
, order
);
55 *dma_handle
= virt_to_phys(ret
);
60 void dma_free_coherent(struct device
*dev
, size_t size
,
61 void *vaddr
, dma_addr_t dma_handle
)
63 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
64 int order
= get_order(size
);
66 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
< (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
67 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
69 bitmap_release_region(mem
->bitmap
, page
, order
);
71 free_pages((unsigned long)vaddr
, order
);
74 int dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
75 dma_addr_t device_addr
, size_t size
, int flags
)
77 void __iomem
*mem_base
;
78 int pages
= size
>> PAGE_SHIFT
;
79 int bitmap_size
= (pages
+ 31)/32;
81 if ((flags
& (DMA_MEMORY_MAP
| DMA_MEMORY_IO
)) == 0)
88 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
90 mem_base
= ioremap(bus_addr
, size
);
94 dev
->dma_mem
= kmalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
97 memset(dev
->dma_mem
, 0, sizeof(struct dma_coherent_mem
));
98 dev
->dma_mem
->bitmap
= kmalloc(bitmap_size
, GFP_KERNEL
);
99 if (!dev
->dma_mem
->bitmap
)
101 memset(dev
->dma_mem
->bitmap
, 0, bitmap_size
);
103 dev
->dma_mem
->virt_base
= mem_base
;
104 dev
->dma_mem
->device_base
= device_addr
;
105 dev
->dma_mem
->size
= pages
;
106 dev
->dma_mem
->flags
= flags
;
108 if (flags
& DMA_MEMORY_MAP
)
109 return DMA_MEMORY_MAP
;
111 return DMA_MEMORY_IO
;
114 kfree(dev
->dma_mem
->bitmap
);
118 EXPORT_SYMBOL(dma_declare_coherent_memory
);
120 void dma_release_declared_memory(struct device
*dev
)
122 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
127 iounmap(mem
->virt_base
);
131 EXPORT_SYMBOL(dma_release_declared_memory
);
133 void *dma_mark_declared_memory_occupied(struct device
*dev
,
134 dma_addr_t device_addr
, size_t size
)
136 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
137 int pages
= (size
+ (device_addr
& ~PAGE_MASK
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
141 return ERR_PTR(-EINVAL
);
143 pos
= (device_addr
- mem
->device_base
) >> PAGE_SHIFT
;
144 err
= bitmap_allocate_region(mem
->bitmap
, pos
, get_order(pages
));
147 return mem
->virt_base
+ (pos
<< PAGE_SHIFT
);
149 EXPORT_SYMBOL(dma_mark_declared_memory_occupied
);