2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
10 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
17 struct dma_coherent_mem
{
22 unsigned long *bitmap
;
25 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
26 dma_addr_t
*dma_handle
, gfp_t gfp
)
29 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
30 int order
= get_order(size
);
31 /* ignore region specifiers */
32 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
35 int page
= bitmap_find_free_region(mem
->bitmap
, mem
->size
,
38 *dma_handle
= mem
->device_base
+ (page
<< PAGE_SHIFT
);
39 ret
= mem
->virt_base
+ (page
<< PAGE_SHIFT
);
43 if (mem
->flags
& DMA_MEMORY_EXCLUSIVE
)
47 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
50 ret
= (void *)__get_free_pages(gfp
, order
);
54 *dma_handle
= virt_to_phys(ret
);
58 EXPORT_SYMBOL(dma_alloc_coherent
);
60 void dma_free_coherent(struct device
*dev
, size_t size
,
61 void *vaddr
, dma_addr_t dma_handle
)
63 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
64 int order
= get_order(size
);
66 WARN_ON(irqs_disabled()); /* for portability */
67 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
< (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
68 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
70 bitmap_release_region(mem
->bitmap
, page
, order
);
72 free_pages((unsigned long)vaddr
, order
);
74 EXPORT_SYMBOL(dma_free_coherent
);
76 int dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
77 dma_addr_t device_addr
, size_t size
, int flags
)
79 void __iomem
*mem_base
= NULL
;
80 int pages
= size
>> PAGE_SHIFT
;
81 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
83 if ((flags
& (DMA_MEMORY_MAP
| DMA_MEMORY_IO
)) == 0)
90 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
92 mem_base
= ioremap(bus_addr
, size
);
96 dev
->dma_mem
= kzalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
99 dev
->dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
100 if (!dev
->dma_mem
->bitmap
)
103 dev
->dma_mem
->virt_base
= mem_base
;
104 dev
->dma_mem
->device_base
= device_addr
;
105 dev
->dma_mem
->size
= pages
;
106 dev
->dma_mem
->flags
= flags
;
108 if (flags
& DMA_MEMORY_MAP
)
109 return DMA_MEMORY_MAP
;
111 return DMA_MEMORY_IO
;
120 EXPORT_SYMBOL(dma_declare_coherent_memory
);
122 void dma_release_declared_memory(struct device
*dev
)
124 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
129 iounmap(mem
->virt_base
);
133 EXPORT_SYMBOL(dma_release_declared_memory
);
135 void *dma_mark_declared_memory_occupied(struct device
*dev
,
136 dma_addr_t device_addr
, size_t size
)
138 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
139 int pages
= (size
+ (device_addr
& ~PAGE_MASK
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
143 return ERR_PTR(-EINVAL
);
145 pos
= (device_addr
- mem
->device_base
) >> PAGE_SHIFT
;
146 err
= bitmap_allocate_region(mem
->bitmap
, pos
, get_order(pages
));
149 return mem
->virt_base
+ (pos
<< PAGE_SHIFT
);
151 EXPORT_SYMBOL(dma_mark_declared_memory_occupied
);
154 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
157 EXPORT_SYMBOL(forbid_dac
);
159 static __devinit
void via_no_dac(struct pci_dev
*dev
)
161 if ((dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
&& forbid_dac
== 0) {
162 printk(KERN_INFO
"PCI: VIA PCI bridge detected. Disabling DAC.\n");
166 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA
, PCI_ANY_ID
, via_no_dac
);
168 static int check_iommu(char *s
)
170 if (!strcmp(s
, "usedac")) {
176 __setup("iommu=", check_iommu
);