1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
11 int forbid_dac __read_mostly
;
12 EXPORT_SYMBOL(forbid_dac
);
14 const struct dma_mapping_ops
*dma_ops
;
15 EXPORT_SYMBOL(dma_ops
);
17 static int iommu_sac_force __read_mostly
;
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly
= 1;
21 int force_iommu __read_mostly
= 1;
23 int panic_on_overflow __read_mostly
= 0;
24 int force_iommu __read_mostly
= 0;
27 int iommu_merge __read_mostly
= 0;
29 int no_iommu __read_mostly
;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly
= 0;
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly
= 0;
36 EXPORT_SYMBOL(iommu_bio_merge
);
38 dma_addr_t bad_dma_address __read_mostly
= 0;
39 EXPORT_SYMBOL(bad_dma_address
);
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 struct device fallback_dev
= {
45 .bus_id
= "fallback device",
46 .coherent_dma_mask
= DMA_32BIT_MASK
,
47 .dma_mask
= &fallback_dev
.coherent_dma_mask
,
50 int dma_set_mask(struct device
*dev
, u64 mask
)
52 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
55 *dev
->dma_mask
= mask
;
59 EXPORT_SYMBOL(dma_set_mask
);
62 static __initdata
void *dma32_bootmem_ptr
;
63 static unsigned long dma32_bootmem_size __initdata
= (128ULL<<20);
65 static int __init
parse_dma32_size_opt(char *p
)
69 dma32_bootmem_size
= memparse(p
, &p
);
72 early_param("dma32_size", parse_dma32_size_opt
);
74 void __init
dma32_reserve_bootmem(void)
76 unsigned long size
, align
;
77 if (end_pfn
<= MAX_DMA32_PFN
)
81 size
= round_up(dma32_bootmem_size
, align
);
82 dma32_bootmem_ptr
= __alloc_bootmem_nopanic(size
, align
,
83 __pa(MAX_DMA_ADDRESS
));
84 if (dma32_bootmem_ptr
)
85 dma32_bootmem_size
= size
;
87 dma32_bootmem_size
= 0;
89 static void __init
dma32_free_bootmem(void)
93 if (end_pfn
<= MAX_DMA32_PFN
)
96 if (!dma32_bootmem_ptr
)
99 for_each_online_node(node
)
100 free_bootmem_node(NODE_DATA(node
), __pa(dma32_bootmem_ptr
),
103 dma32_bootmem_ptr
= NULL
;
104 dma32_bootmem_size
= 0;
107 void __init
pci_iommu_alloc(void)
109 /* free the range so iommu could get some range less than 4G */
110 dma32_free_bootmem();
112 * The order of these functions is important for
113 * fall-back/fail-over reasons
115 #ifdef CONFIG_GART_IOMMU
116 gart_iommu_hole_init();
119 #ifdef CONFIG_CALGARY_IOMMU
123 detect_intel_iommu();
125 #ifdef CONFIG_SWIOTLB
132 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
135 static __init
int iommu_setup(char *p
)
143 if (!strncmp(p
, "off", 3))
145 /* gart_parse_options has more force support */
146 if (!strncmp(p
, "force", 5))
148 if (!strncmp(p
, "noforce", 7)) {
153 if (!strncmp(p
, "biomerge", 8)) {
154 iommu_bio_merge
= 4096;
158 if (!strncmp(p
, "panic", 5))
159 panic_on_overflow
= 1;
160 if (!strncmp(p
, "nopanic", 7))
161 panic_on_overflow
= 0;
162 if (!strncmp(p
, "merge", 5)) {
166 if (!strncmp(p
, "nomerge", 7))
168 if (!strncmp(p
, "forcesac", 8))
170 if (!strncmp(p
, "allowdac", 8))
172 if (!strncmp(p
, "nodac", 5))
174 if (!strncmp(p
, "usedac", 6)) {
178 #ifdef CONFIG_SWIOTLB
179 if (!strncmp(p
, "soft", 4))
183 #ifdef CONFIG_GART_IOMMU
184 gart_parse_options(p
);
187 #ifdef CONFIG_CALGARY_IOMMU
188 if (!strncmp(p
, "calgary", 7))
190 #endif /* CONFIG_CALGARY_IOMMU */
192 p
+= strcspn(p
, ",");
198 early_param("iommu", iommu_setup
);
201 int dma_declare_coherent_memory(struct device
*dev
, dma_addr_t bus_addr
,
202 dma_addr_t device_addr
, size_t size
, int flags
)
204 void __iomem
*mem_base
= NULL
;
205 int pages
= size
>> PAGE_SHIFT
;
206 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
208 if ((flags
& (DMA_MEMORY_MAP
| DMA_MEMORY_IO
)) == 0)
215 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
217 mem_base
= ioremap(bus_addr
, size
);
221 dev
->dma_mem
= kzalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
224 dev
->dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
225 if (!dev
->dma_mem
->bitmap
)
228 dev
->dma_mem
->virt_base
= mem_base
;
229 dev
->dma_mem
->device_base
= device_addr
;
230 dev
->dma_mem
->size
= pages
;
231 dev
->dma_mem
->flags
= flags
;
233 if (flags
& DMA_MEMORY_MAP
)
234 return DMA_MEMORY_MAP
;
236 return DMA_MEMORY_IO
;
245 EXPORT_SYMBOL(dma_declare_coherent_memory
);
247 void dma_release_declared_memory(struct device
*dev
)
249 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
254 iounmap(mem
->virt_base
);
258 EXPORT_SYMBOL(dma_release_declared_memory
);
260 void *dma_mark_declared_memory_occupied(struct device
*dev
,
261 dma_addr_t device_addr
, size_t size
)
263 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
265 int pages
= (size
+ (device_addr
& ~PAGE_MASK
) + PAGE_SIZE
- 1);
267 pages
>>= PAGE_SHIFT
;
270 return ERR_PTR(-EINVAL
);
272 pos
= (device_addr
- mem
->device_base
) >> PAGE_SHIFT
;
273 err
= bitmap_allocate_region(mem
->bitmap
, pos
, get_order(pages
));
276 return mem
->virt_base
+ (pos
<< PAGE_SHIFT
);
278 EXPORT_SYMBOL(dma_mark_declared_memory_occupied
);
280 static int dma_alloc_from_coherent_mem(struct device
*dev
, ssize_t size
,
281 dma_addr_t
*dma_handle
, void **ret
)
283 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
284 int order
= get_order(size
);
287 int page
= bitmap_find_free_region(mem
->bitmap
, mem
->size
,
290 *dma_handle
= mem
->device_base
+ (page
<< PAGE_SHIFT
);
291 *ret
= mem
->virt_base
+ (page
<< PAGE_SHIFT
);
292 memset(*ret
, 0, size
);
294 if (mem
->flags
& DMA_MEMORY_EXCLUSIVE
)
297 return (mem
!= NULL
);
300 static int dma_release_coherent(struct device
*dev
, int order
, void *vaddr
)
302 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
304 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
<
305 (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
306 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
308 bitmap_release_region(mem
->bitmap
, page
, order
);
314 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
315 #define dma_release_coherent(dev, order, vaddr) (0)
316 #endif /* CONFIG_X86_32 */
318 int dma_supported(struct device
*dev
, u64 mask
)
321 if (mask
> 0xffffffff && forbid_dac
> 0) {
322 printk(KERN_INFO
"PCI: Disallowing DAC for device %s\n",
328 if (dma_ops
->dma_supported
)
329 return dma_ops
->dma_supported(dev
, mask
);
331 /* Copied from i386. Doesn't make much sense, because it will
332 only work for pci_alloc_coherent.
333 The caller just has to use GFP_DMA in this case. */
334 if (mask
< DMA_24BIT_MASK
)
337 /* Tell the device to use SAC when IOMMU force is on. This
338 allows the driver to use cheaper accesses in some cases.
340 Problem with this is that if we overflow the IOMMU area and
341 return DAC as fallback address the device may not handle it
344 As a special case some controllers have a 39bit address
345 mode that is as efficient as 32bit (aic79xx). Don't force
346 SAC for these. Assume all masks <= 40 bits are of this
347 type. Normally this doesn't make any difference, but gives
348 more gentle handling of IOMMU overflow. */
349 if (iommu_sac_force
&& (mask
>= DMA_40BIT_MASK
)) {
350 printk(KERN_INFO
"%s: Force SAC with mask %Lx\n",
357 EXPORT_SYMBOL(dma_supported
);
359 /* Allocate DMA memory on node near device */
360 noinline
struct page
*
361 dma_alloc_pages(struct device
*dev
, gfp_t gfp
, unsigned order
)
365 node
= dev_to_node(dev
);
367 return alloc_pages_node(node
, gfp
, order
);
371 * Allocate memory for a coherent mapping.
374 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
379 unsigned long dma_mask
= 0;
383 /* ignore region specifiers */
384 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
386 if (dma_alloc_from_coherent_mem(dev
, size
, dma_handle
, &memory
))
393 dma_mask
= dev
->coherent_dma_mask
;
395 dma_mask
= (gfp
& GFP_DMA
) ? DMA_24BIT_MASK
: DMA_32BIT_MASK
;
397 /* Device not DMA able */
398 if (dev
->dma_mask
== NULL
)
401 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
406 /* Why <=? Even when the mask is smaller than 4GB it is often
407 larger than 16MB and in this case we have a chance of
408 finding fitting memory in the next higher zone first. If
409 not retry with true GFP_DMA. -AK */
410 if (dma_mask
<= DMA_32BIT_MASK
&& !(gfp
& GFP_DMA
)) {
412 if (dma_mask
< DMA_32BIT_MASK
)
418 page
= dma_alloc_pages(dev
,
419 noretry
? gfp
| __GFP_NORETRY
: gfp
, get_order(size
));
425 bus
= page_to_phys(page
);
426 memory
= page_address(page
);
427 high
= (bus
+ size
) >= dma_mask
;
429 if (force_iommu
&& !(gfp
& GFP_DMA
))
432 free_pages((unsigned long)memory
,
435 /* Don't use the 16MB ZONE_DMA unless absolutely
436 needed. It's better to use remapping first. */
437 if (dma_mask
< DMA_32BIT_MASK
&& !(gfp
& GFP_DMA
)) {
438 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
442 /* Let low level make its own zone decisions */
443 gfp
&= ~(GFP_DMA32
|GFP_DMA
);
445 if (dma_ops
->alloc_coherent
)
446 return dma_ops
->alloc_coherent(dev
, size
,
451 memset(memory
, 0, size
);
458 if (dma_ops
->alloc_coherent
) {
459 free_pages((unsigned long)memory
, get_order(size
));
460 gfp
&= ~(GFP_DMA
|GFP_DMA32
);
461 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, gfp
);
464 if (dma_ops
->map_simple
) {
465 *dma_handle
= dma_ops
->map_simple(dev
, virt_to_phys(memory
),
467 PCI_DMA_BIDIRECTIONAL
);
468 if (*dma_handle
!= bad_dma_address
)
472 if (panic_on_overflow
)
473 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
474 (unsigned long)size
);
475 free_pages((unsigned long)memory
, get_order(size
));
478 EXPORT_SYMBOL(dma_alloc_coherent
);
481 * Unmap coherent memory.
482 * The caller must ensure that the device has finished accessing the mapping.
484 void dma_free_coherent(struct device
*dev
, size_t size
,
485 void *vaddr
, dma_addr_t bus
)
487 int order
= get_order(size
);
488 WARN_ON(irqs_disabled()); /* for portability */
489 if (dma_release_coherent(dev
, order
, vaddr
))
491 if (dma_ops
->unmap_single
)
492 dma_ops
->unmap_single(dev
, bus
, size
, 0);
493 free_pages((unsigned long)vaddr
, order
);
495 EXPORT_SYMBOL(dma_free_coherent
);
497 static int __init
pci_iommu_init(void)
499 #ifdef CONFIG_CALGARY_IOMMU
500 calgary_iommu_init();
505 #ifdef CONFIG_GART_IOMMU
513 void pci_iommu_shutdown(void)
515 gart_iommu_shutdown();
517 /* Must execute after PCI subsystem */
518 fs_initcall(pci_iommu_init
);
521 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
523 static __devinit
void via_no_dac(struct pci_dev
*dev
)
525 if ((dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
&& forbid_dac
== 0) {
526 printk(KERN_INFO
"PCI: VIA PCI bridge detected."
531 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA
, PCI_ANY_ID
, via_no_dac
);