1 // SPDX-License-Identifier: GPL-2.0
3 * Firmware replacement code.
5 * Work around broken BIOSes that don't set an aperture, only set the
6 * aperture in the AGP bridge, or set too small aperture.
8 * If all fails map the aperture over some low memory. This is cheaper than
9 * doing bounce buffering. The memory is lost. This is done at early boot
10 * because only the bootmem allocator can allocate 32+MB.
12 * Copyright 2002 Andi Kleen, SuSE Labs.
14 #define pr_fmt(fmt) "AGP: " fmt
16 #include <linux/kernel.h>
17 #include <linux/kcore.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #include <linux/memblock.h>
21 #include <linux/mmzone.h>
22 #include <linux/pci_ids.h>
23 #include <linux/pci.h>
24 #include <linux/bitops.h>
25 #include <linux/suspend.h>
26 #include <asm/e820/api.h>
28 #include <asm/iommu.h>
30 #include <asm/pci-direct.h>
32 #include <asm/amd_nb.h>
33 #include <asm/x86_init.h>
34 #include <linux/crash_dump.h>
37 * Using 512M as goal, in case kexec will load kernel_big
38 * that will do the on-position decompress, and could overlap with
39 * the gart aperture that is used.
42 * ==> kexec (with kdump trigger path or gart still enabled)
43 * ==> kernel_small (gart area become e820_reserved)
44 * ==> kexec (with kdump trigger path or gart still enabled)
45 * ==> kerne_big (uncompressed size will be big than 64M or 128M)
46 * So don't use 512M below as gart iommu, leave the space for kernel
49 #define GART_MIN_ADDR (512ULL << 20)
50 #define GART_MAX_ADDR (1ULL << 32)
52 int gart_iommu_aperture
;
53 int gart_iommu_aperture_disabled __initdata
;
54 int gart_iommu_aperture_allowed __initdata
;
56 int fallback_aper_order __initdata
= 1; /* 64MB */
57 int fallback_aper_force __initdata
;
59 int fix_aperture __initdata
= 1;
61 #if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
63 * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
64 * use the same range because it will remain configured in the northbridge.
65 * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
68 static unsigned long aperture_pfn_start
, aperture_page_count
;
70 static int gart_mem_pfn_is_ram(unsigned long pfn
)
72 return likely((pfn
< aperture_pfn_start
) ||
73 (pfn
>= aperture_pfn_start
+ aperture_page_count
));
76 #ifdef CONFIG_PROC_VMCORE
77 static bool gart_oldmem_pfn_is_ram(struct vmcore_cb
*cb
, unsigned long pfn
)
79 return !!gart_mem_pfn_is_ram(pfn
);
82 static struct vmcore_cb gart_vmcore_cb
= {
83 .pfn_is_ram
= gart_oldmem_pfn_is_ram
,
87 static void __init
exclude_from_core(u64 aper_base
, u32 aper_order
)
89 aperture_pfn_start
= aper_base
>> PAGE_SHIFT
;
90 aperture_page_count
= (32 * 1024 * 1024) << aper_order
>> PAGE_SHIFT
;
91 #ifdef CONFIG_PROC_VMCORE
92 register_vmcore_cb(&gart_vmcore_cb
);
94 #ifdef CONFIG_PROC_KCORE
95 WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram
));
99 static void exclude_from_core(u64 aper_base
, u32 aper_order
)
104 /* This code runs before the PCI subsystem is initialized, so just
105 access the northbridge directly. */
107 static u32 __init
allocate_aperture(void)
112 /* aper_size should <= 1G */
113 if (fallback_aper_order
> 5)
114 fallback_aper_order
= 5;
115 aper_size
= (32 * 1024 * 1024) << fallback_aper_order
;
118 * Aperture has to be naturally aligned. This means a 2GB aperture
119 * won't have much chance of finding a place in the lower 4GB of
120 * memory. Unfortunately we cannot move it up because that would
121 * make the IOMMU useless.
123 addr
= memblock_phys_alloc_range(aper_size
, aper_size
,
124 GART_MIN_ADDR
, GART_MAX_ADDR
);
126 pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n",
127 addr
, addr
+ aper_size
- 1, aper_size
>> 10);
130 pr_info("Mapping aperture over RAM [mem %#010lx-%#010lx] (%uKB)\n",
131 addr
, addr
+ aper_size
- 1, aper_size
>> 10);
132 register_nosave_region(addr
>> PAGE_SHIFT
,
133 (addr
+aper_size
) >> PAGE_SHIFT
);
139 /* Find a PCI capability */
140 static u32 __init
find_cap(int bus
, int slot
, int func
, int cap
)
145 if (!(read_pci_config_16(bus
, slot
, func
, PCI_STATUS
) &
146 PCI_STATUS_CAP_LIST
))
149 pos
= read_pci_config_byte(bus
, slot
, func
, PCI_CAPABILITY_LIST
);
150 for (bytes
= 0; bytes
< 48 && pos
>= 0x40; bytes
++) {
154 id
= read_pci_config_byte(bus
, slot
, func
, pos
+PCI_CAP_LIST_ID
);
159 pos
= read_pci_config_byte(bus
, slot
, func
,
160 pos
+PCI_CAP_LIST_NEXT
);
165 /* Read a standard AGPv3 bridge header */
166 static u32 __init
read_agp(int bus
, int slot
, int func
, int cap
, u32
*order
)
171 u32 aper_low
, aper_hi
;
175 pr_info("pci 0000:%02x:%02x:%02x: AGP bridge\n", bus
, slot
, func
);
176 apsizereg
= read_pci_config_16(bus
, slot
, func
, cap
+ 0x14);
177 if (apsizereg
== 0xffffffff) {
178 pr_err("pci 0000:%02x:%02x.%d: APSIZE unreadable\n",
183 /* old_order could be the value from NB gart setting */
186 apsize
= apsizereg
& 0xfff;
187 /* Some BIOS use weird encodings not in the AGPv3 table. */
190 nbits
= hweight16(apsize
);
192 if ((int)*order
< 0) /* < 32MB */
195 aper_low
= read_pci_config(bus
, slot
, func
, 0x10);
196 aper_hi
= read_pci_config(bus
, slot
, func
, 0x14);
197 aper
= (aper_low
& ~((1<<22)-1)) | ((u64
)aper_hi
<< 32);
200 * On some sick chips, APSIZE is 0. It means it wants 4G
201 * so let double check that order, and lets trust AMD NB settings:
203 pr_info("pci 0000:%02x:%02x.%d: AGP aperture [bus addr %#010Lx-%#010Lx] (old size %uMB)\n",
204 bus
, slot
, func
, aper
, aper
+ (32ULL << (old_order
+ 20)) - 1,
206 if (aper
+ (32ULL<<(20 + *order
)) > 0x100000000ULL
) {
207 pr_info("pci 0000:%02x:%02x.%d: AGP aperture size %uMB (APSIZE %#x) is not right, using settings from NB\n",
208 bus
, slot
, func
, 32 << *order
, apsizereg
);
212 pr_info("pci 0000:%02x:%02x.%d: AGP aperture [bus addr %#010Lx-%#010Lx] (%uMB, APSIZE %#x)\n",
213 bus
, slot
, func
, aper
, aper
+ (32ULL << (*order
+ 20)) - 1,
214 32 << *order
, apsizereg
);
216 if (!aperture_valid(aper
, (32*1024*1024) << *order
, 32<<20))
222 * Look for an AGP bridge. Windows only expects the aperture in the
223 * AGP bridge and some BIOS forget to initialize the Northbridge too.
224 * Work around this here.
226 * Do an PCI bus scan by hand because we're running before the PCI
229 * All AMD AGP bridges are AGPv3 compliant, so we can do this scan
230 * generically. It's probably overkill to always scan all slots because
231 * the AGP bridges should be always an own bus on the HT hierarchy,
232 * but do it here for future safety.
234 static u32 __init
search_agp_bridge(u32
*order
, int *valid_agp
)
238 /* Poor man's PCI discovery */
239 for (bus
= 0; bus
< 256; bus
++) {
240 for (slot
= 0; slot
< 32; slot
++) {
241 for (func
= 0; func
< 8; func
++) {
244 class = read_pci_config(bus
, slot
, func
,
246 if (class == 0xffffffff)
249 switch (class >> 16) {
250 case PCI_CLASS_BRIDGE_HOST
:
251 case PCI_CLASS_BRIDGE_OTHER
: /* needed? */
253 cap
= find_cap(bus
, slot
, func
,
258 return read_agp(bus
, slot
, func
, cap
,
262 type
= read_pci_config_byte(bus
, slot
, func
,
264 if (!(type
& PCI_HEADER_TYPE_MFD
))
269 pr_info("No AGP bridge found\n");
274 static bool gart_fix_e820 __initdata
= true;
276 static int __init
parse_gart_mem(char *p
)
278 return kstrtobool(p
, &gart_fix_e820
);
280 early_param("gart_fix_e820", parse_gart_mem
);
283 * With kexec/kdump, if the first kernel doesn't shut down the GART and the
284 * second kernel allocates a different GART region, there might be two
285 * overlapping GART regions present:
287 * - the first still used by the GART initialized in the first kernel.
288 * - (sub-)set of it used as normal RAM by the second kernel.
290 * which leads to memory corruptions and a kernel panic eventually.
292 * This can also happen if the BIOS has forgotten to mark the GART region
295 * Try to update the e820 map to mark that new region as reserved.
297 void __init
early_gart_iommu_check(void)
299 u32 agp_aper_order
= 0;
300 int i
, fix
, slot
, valid_agp
= 0;
302 u32 aper_size
= 0, aper_order
= 0, last_aper_order
= 0;
303 u64 aper_base
= 0, last_aper_base
= 0;
304 int aper_enabled
= 0, last_aper_enabled
= 0, last_valid
= 0;
306 if (!amd_gart_present())
309 if (!early_pci_allowed())
312 /* This is mostly duplicate of iommu_hole_init */
313 search_agp_bridge(&agp_aper_order
, &valid_agp
);
316 for (i
= 0; amd_nb_bus_dev_ranges
[i
].dev_limit
; i
++) {
318 int dev_base
, dev_limit
;
320 bus
= amd_nb_bus_dev_ranges
[i
].bus
;
321 dev_base
= amd_nb_bus_dev_ranges
[i
].dev_base
;
322 dev_limit
= amd_nb_bus_dev_ranges
[i
].dev_limit
;
324 for (slot
= dev_base
; slot
< dev_limit
; slot
++) {
325 if (!early_is_amd_nb(read_pci_config(bus
, slot
, 3, 0x00)))
328 ctl
= read_pci_config(bus
, slot
, 3, AMD64_GARTAPERTURECTL
);
329 aper_enabled
= ctl
& GARTEN
;
330 aper_order
= (ctl
>> 1) & 7;
331 aper_size
= (32 * 1024 * 1024) << aper_order
;
332 aper_base
= read_pci_config(bus
, slot
, 3, AMD64_GARTAPERTUREBASE
) & 0x7fff;
336 if ((aper_order
!= last_aper_order
) ||
337 (aper_base
!= last_aper_base
) ||
338 (aper_enabled
!= last_aper_enabled
)) {
344 last_aper_order
= aper_order
;
345 last_aper_base
= aper_base
;
346 last_aper_enabled
= aper_enabled
;
351 if (!fix
&& !aper_enabled
)
354 if (!aper_base
|| !aper_size
|| aper_base
+ aper_size
> 0x100000000UL
)
357 if (gart_fix_e820
&& !fix
&& aper_enabled
) {
358 if (e820__mapped_any(aper_base
, aper_base
+ aper_size
,
360 /* reserve it, so we can reuse it in second kernel */
361 pr_info("e820: reserve [mem %#010Lx-%#010Lx] for GART\n",
362 aper_base
, aper_base
+ aper_size
- 1);
363 e820__range_add(aper_base
, aper_size
, E820_TYPE_RESERVED
);
364 e820__update_table_print();
371 /* disable them all at first */
372 for (i
= 0; i
< amd_nb_bus_dev_ranges
[i
].dev_limit
; i
++) {
374 int dev_base
, dev_limit
;
376 bus
= amd_nb_bus_dev_ranges
[i
].bus
;
377 dev_base
= amd_nb_bus_dev_ranges
[i
].dev_base
;
378 dev_limit
= amd_nb_bus_dev_ranges
[i
].dev_limit
;
380 for (slot
= dev_base
; slot
< dev_limit
; slot
++) {
381 if (!early_is_amd_nb(read_pci_config(bus
, slot
, 3, 0x00)))
384 ctl
= read_pci_config(bus
, slot
, 3, AMD64_GARTAPERTURECTL
);
386 write_pci_config(bus
, slot
, 3, AMD64_GARTAPERTURECTL
, ctl
);
392 static int __initdata printed_gart_size_msg
;
394 void __init
gart_iommu_hole_init(void)
396 u32 agp_aper_base
= 0, agp_aper_order
= 0;
397 u32 aper_size
, aper_alloc
= 0, aper_order
= 0, last_aper_order
= 0;
398 u64 aper_base
, last_aper_base
= 0;
399 int fix
, slot
, valid_agp
= 0;
402 if (!amd_gart_present())
405 if (gart_iommu_aperture_disabled
|| !fix_aperture
||
406 !early_pci_allowed())
409 pr_info("Checking aperture...\n");
411 if (!fallback_aper_force
)
412 agp_aper_base
= search_agp_bridge(&agp_aper_order
, &valid_agp
);
416 for (i
= 0; i
< amd_nb_bus_dev_ranges
[i
].dev_limit
; i
++) {
418 int dev_base
, dev_limit
;
421 bus
= amd_nb_bus_dev_ranges
[i
].bus
;
422 dev_base
= amd_nb_bus_dev_ranges
[i
].dev_base
;
423 dev_limit
= amd_nb_bus_dev_ranges
[i
].dev_limit
;
425 for (slot
= dev_base
; slot
< dev_limit
; slot
++) {
426 if (!early_is_amd_nb(read_pci_config(bus
, slot
, 3, 0x00)))
430 gart_iommu_aperture
= 1;
431 x86_init
.iommu
.iommu_init
= gart_iommu_init
;
433 ctl
= read_pci_config(bus
, slot
, 3,
434 AMD64_GARTAPERTURECTL
);
437 * Before we do anything else disable the GART. It may
438 * still be enabled if we boot into a crash-kernel here.
439 * Reconfiguring the GART while it is enabled could have
440 * unknown side-effects.
443 write_pci_config(bus
, slot
, 3, AMD64_GARTAPERTURECTL
, ctl
);
445 aper_order
= (ctl
>> 1) & 7;
446 aper_size
= (32 * 1024 * 1024) << aper_order
;
447 aper_base
= read_pci_config(bus
, slot
, 3, AMD64_GARTAPERTUREBASE
) & 0x7fff;
450 pr_info("Node %d: aperture [bus addr %#010Lx-%#010Lx] (%uMB)\n",
451 node
, aper_base
, aper_base
+ aper_size
- 1,
455 if (!aperture_valid(aper_base
, aper_size
, 64<<20)) {
456 if (valid_agp
&& agp_aper_base
&&
457 agp_aper_base
== aper_base
&&
458 agp_aper_order
== aper_order
) {
459 /* the same between two setting from NB and agp */
461 max_pfn
> MAX_DMA32_PFN
&&
462 !printed_gart_size_msg
) {
463 pr_err("you are using iommu with agp, but GART size is less than 64MB\n");
464 pr_err("please increase GART size in your BIOS setup\n");
465 pr_err("if BIOS doesn't have that option, contact your HW vendor!\n");
466 printed_gart_size_msg
= 1;
474 if ((last_aper_order
&& aper_order
!= last_aper_order
) ||
475 (last_aper_base
&& aper_base
!= last_aper_base
)) {
479 last_aper_order
= aper_order
;
480 last_aper_base
= aper_base
;
485 if (!fix
&& !fallback_aper_force
) {
486 if (last_aper_base
) {
488 * If this is the kdump kernel, the first kernel
489 * may have allocated the range over its e820 RAM
490 * and fixed up the northbridge
492 exclude_from_core(last_aper_base
, last_aper_order
);
497 if (!fallback_aper_force
) {
498 aper_alloc
= agp_aper_base
;
499 aper_order
= agp_aper_order
;
503 /* Got the aperture from the AGP bridge */
504 } else if ((!no_iommu
&& max_pfn
> MAX_DMA32_PFN
) ||
507 fallback_aper_force
) {
508 pr_info("Your BIOS doesn't leave an aperture memory hole\n");
509 pr_info("Please enable the IOMMU option in the BIOS setup\n");
510 pr_info("This costs you %dMB of RAM\n",
511 32 << fallback_aper_order
);
513 aper_order
= fallback_aper_order
;
514 aper_alloc
= allocate_aperture();
517 * Could disable AGP and IOMMU here, but it's
518 * probably not worth it. But the later users
519 * cannot deal with bad apertures and turning
520 * on the aperture over memory causes very
521 * strange problems, so it's better to panic
524 panic("Not enough memory for aperture");
531 * If this is the kdump kernel _and_ the first kernel did not
532 * configure the aperture in the northbridge, this range may
533 * overlap with the first kernel's memory. We can't access the
534 * range through vmcore even though it should be part of the dump.
536 exclude_from_core(aper_alloc
, aper_order
);
538 /* Fix up the north bridges */
539 for (i
= 0; i
< amd_nb_bus_dev_ranges
[i
].dev_limit
; i
++) {
540 int bus
, dev_base
, dev_limit
;
543 * Don't enable translation yet but enable GART IO and CPU
544 * accesses and set DISTLBWALKPRB since GART table memory is UC.
546 u32 ctl
= aper_order
<< 1;
548 bus
= amd_nb_bus_dev_ranges
[i
].bus
;
549 dev_base
= amd_nb_bus_dev_ranges
[i
].dev_base
;
550 dev_limit
= amd_nb_bus_dev_ranges
[i
].dev_limit
;
551 for (slot
= dev_base
; slot
< dev_limit
; slot
++) {
552 if (!early_is_amd_nb(read_pci_config(bus
, slot
, 3, 0x00)))
555 write_pci_config(bus
, slot
, 3, AMD64_GARTAPERTURECTL
, ctl
);
556 write_pci_config(bus
, slot
, 3, AMD64_GARTAPERTUREBASE
, aper_alloc
>> 25);
560 set_up_gart_resume(aper_order
, aper_alloc
);