treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / x86 / kernel / amd_gart_64.c
blob4e5f502360480eec6f90a355009d9a881be3515e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Dynamic DMA mapping support for AMD Hammer.
5 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
6 * This allows to use PCI devices that only support 32bit addresses on systems
7 * with more than 4GB.
9 * See Documentation/DMA-API-HOWTO.txt for the interface specification.
11 * Copyright 2002 Andi Kleen, SuSE Labs.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/string.h>
22 #include <linux/spinlock.h>
23 #include <linux/pci.h>
24 #include <linux/topology.h>
25 #include <linux/interrupt.h>
26 #include <linux/bitmap.h>
27 #include <linux/kdebug.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/io.h>
32 #include <linux/gfp.h>
33 #include <linux/atomic.h>
34 #include <linux/dma-direct.h>
35 #include <asm/mtrr.h>
36 #include <asm/pgtable.h>
37 #include <asm/proto.h>
38 #include <asm/iommu.h>
39 #include <asm/gart.h>
40 #include <asm/set_memory.h>
41 #include <asm/swiotlb.h>
42 #include <asm/dma.h>
43 #include <asm/amd_nb.h>
44 #include <asm/x86_init.h>
45 #include <asm/iommu_table.h>
47 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
48 static unsigned long iommu_size; /* size of remapping area bytes */
49 static unsigned long iommu_pages; /* .. and in pages */
51 static u32 *iommu_gatt_base; /* Remapping table */
54 * If this is disabled the IOMMU will use an optimized flushing strategy
55 * of only flushing when an mapping is reused. With it true the GART is
56 * flushed for every mapping. Problem is that doing the lazy flush seems
57 * to trigger bugs with some popular PCI cards, in particular 3ware (but
58 * has been also also seen with Qlogic at least).
60 static int iommu_fullflush = 1;
62 /* Allocation bitmap for the remapping area: */
63 static DEFINE_SPINLOCK(iommu_bitmap_lock);
64 /* Guarded by iommu_bitmap_lock: */
65 static unsigned long *iommu_gart_bitmap;
67 static u32 gart_unmapped_entry;
69 #define GPTE_VALID 1
70 #define GPTE_COHERENT 2
71 #define GPTE_ENCODE(x) \
72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
73 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
75 #ifdef CONFIG_AGP
76 #define AGPEXTERN extern
77 #else
78 #define AGPEXTERN
79 #endif
81 /* GART can only remap to physical addresses < 1TB */
82 #define GART_MAX_PHYS_ADDR (1ULL << 40)
84 /* backdoor interface to AGP driver */
85 AGPEXTERN int agp_memory_reserved;
86 AGPEXTERN __u32 *agp_gatt_table;
88 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
89 static bool need_flush; /* global flush state. set for each gart wrap */
91 static unsigned long alloc_iommu(struct device *dev, int size,
92 unsigned long align_mask)
94 unsigned long offset, flags;
95 unsigned long boundary_size;
96 unsigned long base_index;
98 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
99 PAGE_SIZE) >> PAGE_SHIFT;
100 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
101 PAGE_SIZE) >> PAGE_SHIFT;
103 spin_lock_irqsave(&iommu_bitmap_lock, flags);
104 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
105 size, base_index, boundary_size, align_mask);
106 if (offset == -1) {
107 need_flush = true;
108 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
109 size, base_index, boundary_size,
110 align_mask);
112 if (offset != -1) {
113 next_bit = offset+size;
114 if (next_bit >= iommu_pages) {
115 next_bit = 0;
116 need_flush = true;
119 if (iommu_fullflush)
120 need_flush = true;
121 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
123 return offset;
126 static void free_iommu(unsigned long offset, int size)
128 unsigned long flags;
130 spin_lock_irqsave(&iommu_bitmap_lock, flags);
131 bitmap_clear(iommu_gart_bitmap, offset, size);
132 if (offset >= next_bit)
133 next_bit = offset + size;
134 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
138 * Use global flush state to avoid races with multiple flushers.
140 static void flush_gart(void)
142 unsigned long flags;
144 spin_lock_irqsave(&iommu_bitmap_lock, flags);
145 if (need_flush) {
146 amd_flush_garts();
147 need_flush = false;
149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
152 #ifdef CONFIG_IOMMU_LEAK
153 /* Debugging aid for drivers that don't free their IOMMU tables */
154 static void dump_leak(void)
156 static int dump;
158 if (dump)
159 return;
160 dump = 1;
162 show_stack(NULL, NULL);
163 debug_dma_dump_mappings(NULL);
165 #endif
167 static void iommu_full(struct device *dev, size_t size, int dir)
170 * Ran out of IOMMU space for this operation. This is very bad.
171 * Unfortunately the drivers cannot handle this operation properly.
172 * Return some non mapped prereserved space in the aperture and
173 * let the Northbridge deal with it. This will result in garbage
174 * in the IO operation. When the size exceeds the prereserved space
175 * memory corruption will occur or random memory will be DMAed
176 * out. Hopefully no network devices use single mappings that big.
179 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
180 #ifdef CONFIG_IOMMU_LEAK
181 dump_leak();
182 #endif
185 static inline int
186 need_iommu(struct device *dev, unsigned long addr, size_t size)
188 return force_iommu || !dma_capable(dev, addr, size, true);
191 static inline int
192 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
194 return !dma_capable(dev, addr, size, true);
197 /* Map a single continuous physical area into the IOMMU.
198 * Caller needs to check if the iommu is needed and flush.
200 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
201 size_t size, int dir, unsigned long align_mask)
203 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
204 unsigned long iommu_page;
205 int i;
207 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
208 return DMA_MAPPING_ERROR;
210 iommu_page = alloc_iommu(dev, npages, align_mask);
211 if (iommu_page == -1) {
212 if (!nonforced_iommu(dev, phys_mem, size))
213 return phys_mem;
214 if (panic_on_overflow)
215 panic("dma_map_area overflow %lu bytes\n", size);
216 iommu_full(dev, size, dir);
217 return DMA_MAPPING_ERROR;
220 for (i = 0; i < npages; i++) {
221 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
222 phys_mem += PAGE_SIZE;
224 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
227 /* Map a single area into the IOMMU */
228 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
229 unsigned long offset, size_t size,
230 enum dma_data_direction dir,
231 unsigned long attrs)
233 unsigned long bus;
234 phys_addr_t paddr = page_to_phys(page) + offset;
236 if (!need_iommu(dev, paddr, size))
237 return paddr;
239 bus = dma_map_area(dev, paddr, size, dir, 0);
240 flush_gart();
242 return bus;
246 * Free a DMA mapping.
248 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
249 size_t size, enum dma_data_direction dir,
250 unsigned long attrs)
252 unsigned long iommu_page;
253 int npages;
254 int i;
256 if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
257 return;
260 * This driver will not always use a GART mapping, but might have
261 * created a direct mapping instead. If that is the case there is
262 * nothing to unmap here.
264 if (dma_addr < iommu_bus_base ||
265 dma_addr >= iommu_bus_base + iommu_size)
266 return;
268 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
269 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
270 for (i = 0; i < npages; i++) {
271 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
273 free_iommu(iommu_page, npages);
277 * Wrapper for pci_unmap_single working with scatterlists.
279 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
280 enum dma_data_direction dir, unsigned long attrs)
282 struct scatterlist *s;
283 int i;
285 for_each_sg(sg, s, nents, i) {
286 if (!s->dma_length || !s->length)
287 break;
288 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
292 /* Fallback for dma_map_sg in case of overflow */
293 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
294 int nents, int dir)
296 struct scatterlist *s;
297 int i;
299 #ifdef CONFIG_IOMMU_DEBUG
300 pr_debug("dma_map_sg overflow\n");
301 #endif
303 for_each_sg(sg, s, nents, i) {
304 unsigned long addr = sg_phys(s);
306 if (nonforced_iommu(dev, addr, s->length)) {
307 addr = dma_map_area(dev, addr, s->length, dir, 0);
308 if (addr == DMA_MAPPING_ERROR) {
309 if (i > 0)
310 gart_unmap_sg(dev, sg, i, dir, 0);
311 nents = 0;
312 sg[0].dma_length = 0;
313 break;
316 s->dma_address = addr;
317 s->dma_length = s->length;
319 flush_gart();
321 return nents;
324 /* Map multiple scatterlist entries continuous into the first. */
325 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
326 int nelems, struct scatterlist *sout,
327 unsigned long pages)
329 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
330 unsigned long iommu_page = iommu_start;
331 struct scatterlist *s;
332 int i;
334 if (iommu_start == -1)
335 return -1;
337 for_each_sg(start, s, nelems, i) {
338 unsigned long pages, addr;
339 unsigned long phys_addr = s->dma_address;
341 BUG_ON(s != start && s->offset);
342 if (s == start) {
343 sout->dma_address = iommu_bus_base;
344 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
345 sout->dma_length = s->length;
346 } else {
347 sout->dma_length += s->length;
350 addr = phys_addr;
351 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
352 while (pages--) {
353 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
354 addr += PAGE_SIZE;
355 iommu_page++;
358 BUG_ON(iommu_page - iommu_start != pages);
360 return 0;
363 static inline int
364 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
365 struct scatterlist *sout, unsigned long pages, int need)
367 if (!need) {
368 BUG_ON(nelems != 1);
369 sout->dma_address = start->dma_address;
370 sout->dma_length = start->length;
371 return 0;
373 return __dma_map_cont(dev, start, nelems, sout, pages);
377 * DMA map all entries in a scatterlist.
378 * Merge chunks that have page aligned sizes into a continuous mapping.
380 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
381 enum dma_data_direction dir, unsigned long attrs)
383 struct scatterlist *s, *ps, *start_sg, *sgmap;
384 int need = 0, nextneed, i, out, start;
385 unsigned long pages = 0;
386 unsigned int seg_size;
387 unsigned int max_seg_size;
389 if (nents == 0)
390 return 0;
392 out = 0;
393 start = 0;
394 start_sg = sg;
395 sgmap = sg;
396 seg_size = 0;
397 max_seg_size = dma_get_max_seg_size(dev);
398 ps = NULL; /* shut up gcc */
400 for_each_sg(sg, s, nents, i) {
401 dma_addr_t addr = sg_phys(s);
403 s->dma_address = addr;
404 BUG_ON(s->length == 0);
406 nextneed = need_iommu(dev, addr, s->length);
408 /* Handle the previous not yet processed entries */
409 if (i > start) {
411 * Can only merge when the last chunk ends on a
412 * page boundary and the new one doesn't have an
413 * offset.
415 if (!iommu_merge || !nextneed || !need || s->offset ||
416 (s->length + seg_size > max_seg_size) ||
417 (ps->offset + ps->length) % PAGE_SIZE) {
418 if (dma_map_cont(dev, start_sg, i - start,
419 sgmap, pages, need) < 0)
420 goto error;
421 out++;
423 seg_size = 0;
424 sgmap = sg_next(sgmap);
425 pages = 0;
426 start = i;
427 start_sg = s;
431 seg_size += s->length;
432 need = nextneed;
433 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
434 ps = s;
436 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
437 goto error;
438 out++;
439 flush_gart();
440 if (out < nents) {
441 sgmap = sg_next(sgmap);
442 sgmap->dma_length = 0;
444 return out;
446 error:
447 flush_gart();
448 gart_unmap_sg(dev, sg, out, dir, 0);
450 /* When it was forced or merged try again in a dumb way */
451 if (force_iommu || iommu_merge) {
452 out = dma_map_sg_nonforce(dev, sg, nents, dir);
453 if (out > 0)
454 return out;
456 if (panic_on_overflow)
457 panic("dma_map_sg: overflow on %lu pages\n", pages);
459 iommu_full(dev, pages << PAGE_SHIFT, dir);
460 for_each_sg(sg, s, nents, i)
461 s->dma_address = DMA_MAPPING_ERROR;
462 return 0;
465 /* allocate and map a coherent mapping */
466 static void *
467 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
468 gfp_t flag, unsigned long attrs)
470 void *vaddr;
472 vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
473 if (!vaddr ||
474 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
475 return vaddr;
477 *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
478 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
479 flush_gart();
480 if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
481 goto out_free;
482 return vaddr;
483 out_free:
484 dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
485 return NULL;
488 /* free a coherent mapping */
489 static void
490 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
491 dma_addr_t dma_addr, unsigned long attrs)
493 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
494 dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
497 static int no_agp;
499 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
501 unsigned long a;
503 if (!iommu_size) {
504 iommu_size = aper_size;
505 if (!no_agp)
506 iommu_size /= 2;
509 a = aper + iommu_size;
510 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
512 if (iommu_size < 64*1024*1024) {
513 pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
514 " Consider increasing the AGP aperture in BIOS\n",
515 iommu_size >> 20);
518 return iommu_size;
521 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
523 unsigned aper_size = 0, aper_base_32, aper_order;
524 u64 aper_base;
526 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
527 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
528 aper_order = (aper_order >> 1) & 7;
530 aper_base = aper_base_32 & 0x7fff;
531 aper_base <<= 25;
533 aper_size = (32 * 1024 * 1024) << aper_order;
534 if (aper_base + aper_size > 0x100000000UL || !aper_size)
535 aper_base = 0;
537 *size = aper_size;
538 return aper_base;
541 static void enable_gart_translations(void)
543 int i;
545 if (!amd_nb_has_feature(AMD_NB_GART))
546 return;
548 for (i = 0; i < amd_nb_num(); i++) {
549 struct pci_dev *dev = node_to_amd_nb(i)->misc;
551 enable_gart_translation(dev, __pa(agp_gatt_table));
554 /* Flush the GART-TLB to remove stale entries */
555 amd_flush_garts();
559 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
560 * resume in the same way as they are handled in gart_iommu_hole_init().
562 static bool fix_up_north_bridges;
563 static u32 aperture_order;
564 static u32 aperture_alloc;
566 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
568 fix_up_north_bridges = true;
569 aperture_order = aper_order;
570 aperture_alloc = aper_alloc;
573 static void gart_fixup_northbridges(void)
575 int i;
577 if (!fix_up_north_bridges)
578 return;
580 if (!amd_nb_has_feature(AMD_NB_GART))
581 return;
583 pr_info("PCI-DMA: Restoring GART aperture settings\n");
585 for (i = 0; i < amd_nb_num(); i++) {
586 struct pci_dev *dev = node_to_amd_nb(i)->misc;
589 * Don't enable translations just yet. That is the next
590 * step. Restore the pre-suspend aperture settings.
592 gart_set_size_and_enable(dev, aperture_order);
593 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
597 static void gart_resume(void)
599 pr_info("PCI-DMA: Resuming GART IOMMU\n");
601 gart_fixup_northbridges();
603 enable_gart_translations();
606 static struct syscore_ops gart_syscore_ops = {
607 .resume = gart_resume,
612 * Private Northbridge GATT initialization in case we cannot use the
613 * AGP driver for some reason.
615 static __init int init_amd_gatt(struct agp_kern_info *info)
617 unsigned aper_size, gatt_size, new_aper_size;
618 unsigned aper_base, new_aper_base;
619 struct pci_dev *dev;
620 void *gatt;
621 int i;
623 pr_info("PCI-DMA: Disabling AGP.\n");
625 aper_size = aper_base = info->aper_size = 0;
626 dev = NULL;
627 for (i = 0; i < amd_nb_num(); i++) {
628 dev = node_to_amd_nb(i)->misc;
629 new_aper_base = read_aperture(dev, &new_aper_size);
630 if (!new_aper_base)
631 goto nommu;
633 if (!aper_base) {
634 aper_size = new_aper_size;
635 aper_base = new_aper_base;
637 if (aper_size != new_aper_size || aper_base != new_aper_base)
638 goto nommu;
640 if (!aper_base)
641 goto nommu;
643 info->aper_base = aper_base;
644 info->aper_size = aper_size >> 20;
646 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
647 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
648 get_order(gatt_size));
649 if (!gatt)
650 panic("Cannot allocate GATT table");
651 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
652 panic("Could not set GART PTEs to uncacheable pages");
654 agp_gatt_table = gatt;
656 register_syscore_ops(&gart_syscore_ops);
658 flush_gart();
660 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
661 aper_base, aper_size>>10);
663 return 0;
665 nommu:
666 /* Should not happen anymore */
667 pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n");
668 return -1;
671 static const struct dma_map_ops gart_dma_ops = {
672 .map_sg = gart_map_sg,
673 .unmap_sg = gart_unmap_sg,
674 .map_page = gart_map_page,
675 .unmap_page = gart_unmap_page,
676 .alloc = gart_alloc_coherent,
677 .free = gart_free_coherent,
678 .mmap = dma_common_mmap,
679 .get_sgtable = dma_common_get_sgtable,
680 .dma_supported = dma_direct_supported,
681 .get_required_mask = dma_direct_get_required_mask,
684 static void gart_iommu_shutdown(void)
686 struct pci_dev *dev;
687 int i;
689 /* don't shutdown it if there is AGP installed */
690 if (!no_agp)
691 return;
693 if (!amd_nb_has_feature(AMD_NB_GART))
694 return;
696 for (i = 0; i < amd_nb_num(); i++) {
697 u32 ctl;
699 dev = node_to_amd_nb(i)->misc;
700 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
702 ctl &= ~GARTEN;
704 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
708 int __init gart_iommu_init(void)
710 struct agp_kern_info info;
711 unsigned long iommu_start;
712 unsigned long aper_base, aper_size;
713 unsigned long start_pfn, end_pfn;
714 unsigned long scratch;
716 if (!amd_nb_has_feature(AMD_NB_GART))
717 return 0;
719 #ifndef CONFIG_AGP_AMD64
720 no_agp = 1;
721 #else
722 /* Makefile puts PCI initialization via subsys_initcall first. */
723 /* Add other AMD AGP bridge drivers here */
724 no_agp = no_agp ||
725 (agp_amd64_init() < 0) ||
726 (agp_copy_info(agp_bridge, &info) < 0);
727 #endif
729 if (no_iommu ||
730 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
731 !gart_iommu_aperture ||
732 (no_agp && init_amd_gatt(&info) < 0)) {
733 if (max_pfn > MAX_DMA32_PFN) {
734 pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
735 pr_warn("falling back to iommu=soft.\n");
737 return 0;
740 /* need to map that range */
741 aper_size = info.aper_size << 20;
742 aper_base = info.aper_base;
743 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
745 start_pfn = PFN_DOWN(aper_base);
746 if (!pfn_range_is_mapped(start_pfn, end_pfn))
747 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
749 pr_info("PCI-DMA: using GART IOMMU.\n");
750 iommu_size = check_iommu_size(info.aper_base, aper_size);
751 iommu_pages = iommu_size >> PAGE_SHIFT;
753 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
754 get_order(iommu_pages/8));
755 if (!iommu_gart_bitmap)
756 panic("Cannot allocate iommu bitmap\n");
758 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
759 iommu_size >> 20);
761 agp_memory_reserved = iommu_size;
762 iommu_start = aper_size - iommu_size;
763 iommu_bus_base = info.aper_base + iommu_start;
764 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
767 * Unmap the IOMMU part of the GART. The alias of the page is
768 * always mapped with cache enabled and there is no full cache
769 * coherency across the GART remapping. The unmapping avoids
770 * automatic prefetches from the CPU allocating cache lines in
771 * there. All CPU accesses are done via the direct mapping to
772 * the backing memory. The GART address is only used by PCI
773 * devices.
775 set_memory_np((unsigned long)__va(iommu_bus_base),
776 iommu_size >> PAGE_SHIFT);
778 * Tricky. The GART table remaps the physical memory range,
779 * so the CPU wont notice potential aliases and if the memory
780 * is remapped to UC later on, we might surprise the PCI devices
781 * with a stray writeout of a cacheline. So play it sure and
782 * do an explicit, full-scale wbinvd() _after_ having marked all
783 * the pages as Not-Present:
785 wbinvd();
788 * Now all caches are flushed and we can safely enable
789 * GART hardware. Doing it early leaves the possibility
790 * of stale cache entries that can lead to GART PTE
791 * errors.
793 enable_gart_translations();
796 * Try to workaround a bug (thanks to BenH):
797 * Set unmapped entries to a scratch page instead of 0.
798 * Any prefetches that hit unmapped entries won't get an bus abort
799 * then. (P2P bridge may be prefetching on DMA reads).
801 scratch = get_zeroed_page(GFP_KERNEL);
802 if (!scratch)
803 panic("Cannot allocate iommu scratch page");
804 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
806 flush_gart();
807 dma_ops = &gart_dma_ops;
808 x86_platform.iommu_shutdown = gart_iommu_shutdown;
809 swiotlb = 0;
811 return 0;
814 void __init gart_parse_options(char *p)
816 int arg;
818 if (isdigit(*p) && get_option(&p, &arg))
819 iommu_size = arg;
820 if (!strncmp(p, "fullflush", 9))
821 iommu_fullflush = 1;
822 if (!strncmp(p, "nofullflush", 11))
823 iommu_fullflush = 0;
824 if (!strncmp(p, "noagp", 5))
825 no_agp = 1;
826 if (!strncmp(p, "noaperture", 10))
827 fix_aperture = 0;
828 /* duplicated from pci-dma.c */
829 if (!strncmp(p, "force", 5))
830 gart_iommu_aperture_allowed = 1;
831 if (!strncmp(p, "allowed", 7))
832 gart_iommu_aperture_allowed = 1;
833 if (!strncmp(p, "memaper", 7)) {
834 fallback_aper_force = 1;
835 p += 7;
836 if (*p == '=') {
837 ++p;
838 if (get_option(&p, &arg))
839 fallback_aper_order = arg;
843 IOMMU_INIT_POST(gart_iommu_hole_init);