2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-API-HOWTO.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/topology.h>
25 #include <linux/interrupt.h>
26 #include <linux/bitmap.h>
27 #include <linux/kdebug.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30 #include <linux/syscore_ops.h>
32 #include <linux/gfp.h>
33 #include <linux/atomic.h>
35 #include <asm/pgtable.h>
36 #include <asm/proto.h>
37 #include <asm/iommu.h>
39 #include <asm/cacheflush.h>
40 #include <asm/swiotlb.h>
42 #include <asm/amd_nb.h>
43 #include <asm/x86_init.h>
44 #include <asm/iommu_table.h>
46 static unsigned long iommu_bus_base
; /* GART remapping area (physical) */
47 static unsigned long iommu_size
; /* size of remapping area bytes */
48 static unsigned long iommu_pages
; /* .. and in pages */
50 static u32
*iommu_gatt_base
; /* Remapping table */
52 static dma_addr_t bad_dma_addr
;
55 * If this is disabled the IOMMU will use an optimized flushing strategy
56 * of only flushing when an mapping is reused. With it true the GART is
57 * flushed for every mapping. Problem is that doing the lazy flush seems
58 * to trigger bugs with some popular PCI cards, in particular 3ware (but
59 * has been also also seen with Qlogic at least).
61 static int iommu_fullflush
= 1;
63 /* Allocation bitmap for the remapping area: */
64 static DEFINE_SPINLOCK(iommu_bitmap_lock
);
65 /* Guarded by iommu_bitmap_lock: */
66 static unsigned long *iommu_gart_bitmap
;
68 static u32 gart_unmapped_entry
;
71 #define GPTE_COHERENT 2
72 #define GPTE_ENCODE(x) \
73 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
74 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
76 #define EMERGENCY_PAGES 32 /* = 128KB */
79 #define AGPEXTERN extern
84 /* GART can only remap to physical addresses < 1TB */
85 #define GART_MAX_PHYS_ADDR (1ULL << 40)
87 /* backdoor interface to AGP driver */
88 AGPEXTERN
int agp_memory_reserved
;
89 AGPEXTERN __u32
*agp_gatt_table
;
91 static unsigned long next_bit
; /* protected by iommu_bitmap_lock */
92 static bool need_flush
; /* global flush state. set for each gart wrap */
94 static unsigned long alloc_iommu(struct device
*dev
, int size
,
95 unsigned long align_mask
)
97 unsigned long offset
, flags
;
98 unsigned long boundary_size
;
99 unsigned long base_index
;
101 base_index
= ALIGN(iommu_bus_base
& dma_get_seg_boundary(dev
),
102 PAGE_SIZE
) >> PAGE_SHIFT
;
103 boundary_size
= ALIGN((u64
)dma_get_seg_boundary(dev
) + 1,
104 PAGE_SIZE
) >> PAGE_SHIFT
;
106 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
107 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, next_bit
,
108 size
, base_index
, boundary_size
, align_mask
);
111 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, 0,
112 size
, base_index
, boundary_size
,
116 next_bit
= offset
+size
;
117 if (next_bit
>= iommu_pages
) {
124 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
129 static void free_iommu(unsigned long offset
, int size
)
133 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
134 bitmap_clear(iommu_gart_bitmap
, offset
, size
);
135 if (offset
>= next_bit
)
136 next_bit
= offset
+ size
;
137 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
141 * Use global flush state to avoid races with multiple flushers.
143 static void flush_gart(void)
147 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
152 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
155 #ifdef CONFIG_IOMMU_LEAK
156 /* Debugging aid for drivers that don't free their IOMMU tables */
157 static int leak_trace
;
158 static int iommu_leak_pages
= 20;
160 static void dump_leak(void)
168 show_stack(NULL
, NULL
);
169 debug_dma_dump_mappings(NULL
);
173 static void iommu_full(struct device
*dev
, size_t size
, int dir
)
176 * Ran out of IOMMU space for this operation. This is very bad.
177 * Unfortunately the drivers cannot handle this operation properly.
178 * Return some non mapped prereserved space in the aperture and
179 * let the Northbridge deal with it. This will result in garbage
180 * in the IO operation. When the size exceeds the prereserved space
181 * memory corruption will occur or random memory will be DMAed
182 * out. Hopefully no network devices use single mappings that big.
185 dev_err(dev
, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size
);
187 if (size
> PAGE_SIZE
*EMERGENCY_PAGES
) {
188 if (dir
== PCI_DMA_FROMDEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
189 panic("PCI-DMA: Memory would be corrupted\n");
190 if (dir
== PCI_DMA_TODEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
192 "PCI-DMA: Random memory would be DMAed\n");
194 #ifdef CONFIG_IOMMU_LEAK
200 need_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
202 return force_iommu
|| !dma_capable(dev
, addr
, size
);
206 nonforced_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
208 return !dma_capable(dev
, addr
, size
);
211 /* Map a single continuous physical area into the IOMMU.
212 * Caller needs to check if the iommu is needed and flush.
214 static dma_addr_t
dma_map_area(struct device
*dev
, dma_addr_t phys_mem
,
215 size_t size
, int dir
, unsigned long align_mask
)
217 unsigned long npages
= iommu_num_pages(phys_mem
, size
, PAGE_SIZE
);
218 unsigned long iommu_page
;
221 if (unlikely(phys_mem
+ size
> GART_MAX_PHYS_ADDR
))
224 iommu_page
= alloc_iommu(dev
, npages
, align_mask
);
225 if (iommu_page
== -1) {
226 if (!nonforced_iommu(dev
, phys_mem
, size
))
228 if (panic_on_overflow
)
229 panic("dma_map_area overflow %lu bytes\n", size
);
230 iommu_full(dev
, size
, dir
);
234 for (i
= 0; i
< npages
; i
++) {
235 iommu_gatt_base
[iommu_page
+ i
] = GPTE_ENCODE(phys_mem
);
236 phys_mem
+= PAGE_SIZE
;
238 return iommu_bus_base
+ iommu_page
*PAGE_SIZE
+ (phys_mem
& ~PAGE_MASK
);
241 /* Map a single area into the IOMMU */
242 static dma_addr_t
gart_map_page(struct device
*dev
, struct page
*page
,
243 unsigned long offset
, size_t size
,
244 enum dma_data_direction dir
,
245 struct dma_attrs
*attrs
)
248 phys_addr_t paddr
= page_to_phys(page
) + offset
;
251 dev
= &x86_dma_fallback_dev
;
253 if (!need_iommu(dev
, paddr
, size
))
256 bus
= dma_map_area(dev
, paddr
, size
, dir
, 0);
263 * Free a DMA mapping.
265 static void gart_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
266 size_t size
, enum dma_data_direction dir
,
267 struct dma_attrs
*attrs
)
269 unsigned long iommu_page
;
273 if (dma_addr
< iommu_bus_base
+ EMERGENCY_PAGES
*PAGE_SIZE
||
274 dma_addr
>= iommu_bus_base
+ iommu_size
)
277 iommu_page
= (dma_addr
- iommu_bus_base
)>>PAGE_SHIFT
;
278 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
279 for (i
= 0; i
< npages
; i
++) {
280 iommu_gatt_base
[iommu_page
+ i
] = gart_unmapped_entry
;
282 free_iommu(iommu_page
, npages
);
286 * Wrapper for pci_unmap_single working with scatterlists.
288 static void gart_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
289 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
291 struct scatterlist
*s
;
294 for_each_sg(sg
, s
, nents
, i
) {
295 if (!s
->dma_length
|| !s
->length
)
297 gart_unmap_page(dev
, s
->dma_address
, s
->dma_length
, dir
, NULL
);
301 /* Fallback for dma_map_sg in case of overflow */
302 static int dma_map_sg_nonforce(struct device
*dev
, struct scatterlist
*sg
,
305 struct scatterlist
*s
;
308 #ifdef CONFIG_IOMMU_DEBUG
309 pr_debug("dma_map_sg overflow\n");
312 for_each_sg(sg
, s
, nents
, i
) {
313 unsigned long addr
= sg_phys(s
);
315 if (nonforced_iommu(dev
, addr
, s
->length
)) {
316 addr
= dma_map_area(dev
, addr
, s
->length
, dir
, 0);
317 if (addr
== bad_dma_addr
) {
319 gart_unmap_sg(dev
, sg
, i
, dir
, NULL
);
321 sg
[0].dma_length
= 0;
325 s
->dma_address
= addr
;
326 s
->dma_length
= s
->length
;
333 /* Map multiple scatterlist entries continuous into the first. */
334 static int __dma_map_cont(struct device
*dev
, struct scatterlist
*start
,
335 int nelems
, struct scatterlist
*sout
,
338 unsigned long iommu_start
= alloc_iommu(dev
, pages
, 0);
339 unsigned long iommu_page
= iommu_start
;
340 struct scatterlist
*s
;
343 if (iommu_start
== -1)
346 for_each_sg(start
, s
, nelems
, i
) {
347 unsigned long pages
, addr
;
348 unsigned long phys_addr
= s
->dma_address
;
350 BUG_ON(s
!= start
&& s
->offset
);
352 sout
->dma_address
= iommu_bus_base
;
353 sout
->dma_address
+= iommu_page
*PAGE_SIZE
+ s
->offset
;
354 sout
->dma_length
= s
->length
;
356 sout
->dma_length
+= s
->length
;
360 pages
= iommu_num_pages(s
->offset
, s
->length
, PAGE_SIZE
);
362 iommu_gatt_base
[iommu_page
] = GPTE_ENCODE(addr
);
367 BUG_ON(iommu_page
- iommu_start
!= pages
);
373 dma_map_cont(struct device
*dev
, struct scatterlist
*start
, int nelems
,
374 struct scatterlist
*sout
, unsigned long pages
, int need
)
378 sout
->dma_address
= start
->dma_address
;
379 sout
->dma_length
= start
->length
;
382 return __dma_map_cont(dev
, start
, nelems
, sout
, pages
);
386 * DMA map all entries in a scatterlist.
387 * Merge chunks that have page aligned sizes into a continuous mapping.
389 static int gart_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
390 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
392 struct scatterlist
*s
, *ps
, *start_sg
, *sgmap
;
393 int need
= 0, nextneed
, i
, out
, start
;
394 unsigned long pages
= 0;
395 unsigned int seg_size
;
396 unsigned int max_seg_size
;
402 dev
= &x86_dma_fallback_dev
;
409 max_seg_size
= dma_get_max_seg_size(dev
);
410 ps
= NULL
; /* shut up gcc */
412 for_each_sg(sg
, s
, nents
, i
) {
413 dma_addr_t addr
= sg_phys(s
);
415 s
->dma_address
= addr
;
416 BUG_ON(s
->length
== 0);
418 nextneed
= need_iommu(dev
, addr
, s
->length
);
420 /* Handle the previous not yet processed entries */
423 * Can only merge when the last chunk ends on a
424 * page boundary and the new one doesn't have an
427 if (!iommu_merge
|| !nextneed
|| !need
|| s
->offset
||
428 (s
->length
+ seg_size
> max_seg_size
) ||
429 (ps
->offset
+ ps
->length
) % PAGE_SIZE
) {
430 if (dma_map_cont(dev
, start_sg
, i
- start
,
431 sgmap
, pages
, need
) < 0)
436 sgmap
= sg_next(sgmap
);
443 seg_size
+= s
->length
;
445 pages
+= iommu_num_pages(s
->offset
, s
->length
, PAGE_SIZE
);
448 if (dma_map_cont(dev
, start_sg
, i
- start
, sgmap
, pages
, need
) < 0)
453 sgmap
= sg_next(sgmap
);
454 sgmap
->dma_length
= 0;
460 gart_unmap_sg(dev
, sg
, out
, dir
, NULL
);
462 /* When it was forced or merged try again in a dumb way */
463 if (force_iommu
|| iommu_merge
) {
464 out
= dma_map_sg_nonforce(dev
, sg
, nents
, dir
);
468 if (panic_on_overflow
)
469 panic("dma_map_sg: overflow on %lu pages\n", pages
);
471 iommu_full(dev
, pages
<< PAGE_SHIFT
, dir
);
472 for_each_sg(sg
, s
, nents
, i
)
473 s
->dma_address
= bad_dma_addr
;
477 /* allocate and map a coherent mapping */
479 gart_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_addr
,
480 gfp_t flag
, struct dma_attrs
*attrs
)
483 unsigned long align_mask
;
486 if (force_iommu
&& !(flag
& GFP_DMA
)) {
487 flag
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
488 page
= alloc_pages(flag
| __GFP_ZERO
, get_order(size
));
492 align_mask
= (1UL << get_order(size
)) - 1;
493 paddr
= dma_map_area(dev
, page_to_phys(page
), size
,
494 DMA_BIDIRECTIONAL
, align_mask
);
497 if (paddr
!= bad_dma_addr
) {
499 return page_address(page
);
501 __free_pages(page
, get_order(size
));
503 return dma_generic_alloc_coherent(dev
, size
, dma_addr
, flag
,
509 /* free a coherent mapping */
511 gart_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
512 dma_addr_t dma_addr
, struct dma_attrs
*attrs
)
514 gart_unmap_page(dev
, dma_addr
, size
, DMA_BIDIRECTIONAL
, NULL
);
515 dma_generic_free_coherent(dev
, size
, vaddr
, dma_addr
, attrs
);
518 static int gart_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
520 return (dma_addr
== bad_dma_addr
);
525 static __init
unsigned long check_iommu_size(unsigned long aper
, u64 aper_size
)
530 iommu_size
= aper_size
;
535 a
= aper
+ iommu_size
;
536 iommu_size
-= round_up(a
, PMD_PAGE_SIZE
) - a
;
538 if (iommu_size
< 64*1024*1024) {
540 "PCI-DMA: Warning: Small IOMMU %luMB."
541 " Consider increasing the AGP aperture in BIOS\n",
548 static __init
unsigned read_aperture(struct pci_dev
*dev
, u32
*size
)
550 unsigned aper_size
= 0, aper_base_32
, aper_order
;
553 pci_read_config_dword(dev
, AMD64_GARTAPERTUREBASE
, &aper_base_32
);
554 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &aper_order
);
555 aper_order
= (aper_order
>> 1) & 7;
557 aper_base
= aper_base_32
& 0x7fff;
560 aper_size
= (32 * 1024 * 1024) << aper_order
;
561 if (aper_base
+ aper_size
> 0x100000000UL
|| !aper_size
)
568 static void enable_gart_translations(void)
572 if (!amd_nb_has_feature(AMD_NB_GART
))
575 for (i
= 0; i
< amd_nb_num(); i
++) {
576 struct pci_dev
*dev
= node_to_amd_nb(i
)->misc
;
578 enable_gart_translation(dev
, __pa(agp_gatt_table
));
581 /* Flush the GART-TLB to remove stale entries */
586 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
587 * resume in the same way as they are handled in gart_iommu_hole_init().
589 static bool fix_up_north_bridges
;
590 static u32 aperture_order
;
591 static u32 aperture_alloc
;
593 void set_up_gart_resume(u32 aper_order
, u32 aper_alloc
)
595 fix_up_north_bridges
= true;
596 aperture_order
= aper_order
;
597 aperture_alloc
= aper_alloc
;
600 static void gart_fixup_northbridges(void)
604 if (!fix_up_north_bridges
)
607 if (!amd_nb_has_feature(AMD_NB_GART
))
610 pr_info("PCI-DMA: Restoring GART aperture settings\n");
612 for (i
= 0; i
< amd_nb_num(); i
++) {
613 struct pci_dev
*dev
= node_to_amd_nb(i
)->misc
;
616 * Don't enable translations just yet. That is the next
617 * step. Restore the pre-suspend aperture settings.
619 gart_set_size_and_enable(dev
, aperture_order
);
620 pci_write_config_dword(dev
, AMD64_GARTAPERTUREBASE
, aperture_alloc
>> 25);
624 static void gart_resume(void)
626 pr_info("PCI-DMA: Resuming GART IOMMU\n");
628 gart_fixup_northbridges();
630 enable_gart_translations();
633 static struct syscore_ops gart_syscore_ops
= {
634 .resume
= gart_resume
,
639 * Private Northbridge GATT initialization in case we cannot use the
640 * AGP driver for some reason.
642 static __init
int init_amd_gatt(struct agp_kern_info
*info
)
644 unsigned aper_size
, gatt_size
, new_aper_size
;
645 unsigned aper_base
, new_aper_base
;
650 pr_info("PCI-DMA: Disabling AGP.\n");
652 aper_size
= aper_base
= info
->aper_size
= 0;
654 for (i
= 0; i
< amd_nb_num(); i
++) {
655 dev
= node_to_amd_nb(i
)->misc
;
656 new_aper_base
= read_aperture(dev
, &new_aper_size
);
661 aper_size
= new_aper_size
;
662 aper_base
= new_aper_base
;
664 if (aper_size
!= new_aper_size
|| aper_base
!= new_aper_base
)
670 info
->aper_base
= aper_base
;
671 info
->aper_size
= aper_size
>> 20;
673 gatt_size
= (aper_size
>> PAGE_SHIFT
) * sizeof(u32
);
674 gatt
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
675 get_order(gatt_size
));
677 panic("Cannot allocate GATT table");
678 if (set_memory_uc((unsigned long)gatt
, gatt_size
>> PAGE_SHIFT
))
679 panic("Could not set GART PTEs to uncacheable pages");
681 agp_gatt_table
= gatt
;
683 register_syscore_ops(&gart_syscore_ops
);
687 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
688 aper_base
, aper_size
>>10);
693 /* Should not happen anymore */
694 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
695 "falling back to iommu=soft.\n");
699 static struct dma_map_ops gart_dma_ops
= {
700 .map_sg
= gart_map_sg
,
701 .unmap_sg
= gart_unmap_sg
,
702 .map_page
= gart_map_page
,
703 .unmap_page
= gart_unmap_page
,
704 .alloc
= gart_alloc_coherent
,
705 .free
= gart_free_coherent
,
706 .mapping_error
= gart_mapping_error
,
709 static void gart_iommu_shutdown(void)
714 /* don't shutdown it if there is AGP installed */
718 if (!amd_nb_has_feature(AMD_NB_GART
))
721 for (i
= 0; i
< amd_nb_num(); i
++) {
724 dev
= node_to_amd_nb(i
)->misc
;
725 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &ctl
);
729 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
, ctl
);
733 int __init
gart_iommu_init(void)
735 struct agp_kern_info info
;
736 unsigned long iommu_start
;
737 unsigned long aper_base
, aper_size
;
738 unsigned long start_pfn
, end_pfn
;
739 unsigned long scratch
;
742 if (!amd_nb_has_feature(AMD_NB_GART
))
745 #ifndef CONFIG_AGP_AMD64
748 /* Makefile puts PCI initialization via subsys_initcall first. */
749 /* Add other AMD AGP bridge drivers here */
751 (agp_amd64_init() < 0) ||
752 (agp_copy_info(agp_bridge
, &info
) < 0);
756 (!force_iommu
&& max_pfn
<= MAX_DMA32_PFN
) ||
757 !gart_iommu_aperture
||
758 (no_agp
&& init_amd_gatt(&info
) < 0)) {
759 if (max_pfn
> MAX_DMA32_PFN
) {
760 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
761 pr_warning("falling back to iommu=soft.\n");
766 /* need to map that range */
767 aper_size
= info
.aper_size
<< 20;
768 aper_base
= info
.aper_base
;
769 end_pfn
= (aper_base
>>PAGE_SHIFT
) + (aper_size
>>PAGE_SHIFT
);
771 start_pfn
= PFN_DOWN(aper_base
);
772 if (!pfn_range_is_mapped(start_pfn
, end_pfn
))
773 init_memory_mapping(start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
775 pr_info("PCI-DMA: using GART IOMMU.\n");
776 iommu_size
= check_iommu_size(info
.aper_base
, aper_size
);
777 iommu_pages
= iommu_size
>> PAGE_SHIFT
;
779 iommu_gart_bitmap
= (void *) __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
780 get_order(iommu_pages
/8));
781 if (!iommu_gart_bitmap
)
782 panic("Cannot allocate iommu bitmap\n");
784 #ifdef CONFIG_IOMMU_LEAK
788 ret
= dma_debug_resize_entries(iommu_pages
);
790 pr_debug("PCI-DMA: Cannot trace all the entries\n");
795 * Out of IOMMU space handling.
796 * Reserve some invalid pages at the beginning of the GART.
798 bitmap_set(iommu_gart_bitmap
, 0, EMERGENCY_PAGES
);
800 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
803 agp_memory_reserved
= iommu_size
;
804 iommu_start
= aper_size
- iommu_size
;
805 iommu_bus_base
= info
.aper_base
+ iommu_start
;
806 bad_dma_addr
= iommu_bus_base
;
807 iommu_gatt_base
= agp_gatt_table
+ (iommu_start
>>PAGE_SHIFT
);
810 * Unmap the IOMMU part of the GART. The alias of the page is
811 * always mapped with cache enabled and there is no full cache
812 * coherency across the GART remapping. The unmapping avoids
813 * automatic prefetches from the CPU allocating cache lines in
814 * there. All CPU accesses are done via the direct mapping to
815 * the backing memory. The GART address is only used by PCI
818 set_memory_np((unsigned long)__va(iommu_bus_base
),
819 iommu_size
>> PAGE_SHIFT
);
821 * Tricky. The GART table remaps the physical memory range,
822 * so the CPU wont notice potential aliases and if the memory
823 * is remapped to UC later on, we might surprise the PCI devices
824 * with a stray writeout of a cacheline. So play it sure and
825 * do an explicit, full-scale wbinvd() _after_ having marked all
826 * the pages as Not-Present:
831 * Now all caches are flushed and we can safely enable
832 * GART hardware. Doing it early leaves the possibility
833 * of stale cache entries that can lead to GART PTE
836 enable_gart_translations();
839 * Try to workaround a bug (thanks to BenH):
840 * Set unmapped entries to a scratch page instead of 0.
841 * Any prefetches that hit unmapped entries won't get an bus abort
842 * then. (P2P bridge may be prefetching on DMA reads).
844 scratch
= get_zeroed_page(GFP_KERNEL
);
846 panic("Cannot allocate iommu scratch page");
847 gart_unmapped_entry
= GPTE_ENCODE(__pa(scratch
));
848 for (i
= EMERGENCY_PAGES
; i
< iommu_pages
; i
++)
849 iommu_gatt_base
[i
] = gart_unmapped_entry
;
852 dma_ops
= &gart_dma_ops
;
853 x86_platform
.iommu_shutdown
= gart_iommu_shutdown
;
859 void __init
gart_parse_options(char *p
)
863 #ifdef CONFIG_IOMMU_LEAK
864 if (!strncmp(p
, "leak", 4)) {
869 if (isdigit(*p
) && get_option(&p
, &arg
))
870 iommu_leak_pages
= arg
;
873 if (isdigit(*p
) && get_option(&p
, &arg
))
875 if (!strncmp(p
, "fullflush", 9))
877 if (!strncmp(p
, "nofullflush", 11))
879 if (!strncmp(p
, "noagp", 5))
881 if (!strncmp(p
, "noaperture", 10))
883 /* duplicated from pci-dma.c */
884 if (!strncmp(p
, "force", 5))
885 gart_iommu_aperture_allowed
= 1;
886 if (!strncmp(p
, "allowed", 7))
887 gart_iommu_aperture_allowed
= 1;
888 if (!strncmp(p
, "memaper", 7)) {
889 fallback_aper_force
= 1;
893 if (get_option(&p
, &arg
))
894 fallback_aper_order
= arg
;
898 IOMMU_INIT_POST(gart_iommu_hole_init
);