1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Multiprocessor Specification 1.1 and 1.4
4 * compliant MP-table parsing routines.
6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
22 #include <asm/irqdomain.h>
24 #include <asm/mpspec.h>
25 #include <asm/pgalloc.h>
26 #include <asm/io_apic.h>
27 #include <asm/proto.h>
28 #include <asm/bios_ebda.h>
29 #include <asm/e820/api.h>
30 #include <asm/setup.h>
35 * Checksum an MP configuration block.
38 static int __init
mpf_checksum(unsigned char *mp
, int len
)
48 int __init
default_mpc_apic_id(struct mpc_cpu
*m
)
53 static void __init
MP_processor_info(struct mpc_cpu
*m
)
56 char *bootup_cpu
= "";
58 if (!(m
->cpuflag
& CPU_ENABLED
)) {
63 apicid
= x86_init
.mpparse
.mpc_apic_id(m
);
65 if (m
->cpuflag
& CPU_BOOTPROCESSOR
) {
66 bootup_cpu
= " (Bootup-CPU)";
67 boot_cpu_physical_apicid
= m
->apicid
;
70 pr_info("Processor #%d%s\n", m
->apicid
, bootup_cpu
);
71 generic_processor_info(apicid
, m
->apicver
);
74 #ifdef CONFIG_X86_IO_APIC
75 void __init
default_mpc_oem_bus_info(struct mpc_bus
*m
, char *str
)
77 memcpy(str
, m
->bustype
, 6);
79 apic_printk(APIC_VERBOSE
, "Bus #%d is %s\n", m
->busid
, str
);
82 static void __init
MP_bus_info(struct mpc_bus
*m
)
86 x86_init
.mpparse
.mpc_oem_bus_info(m
, str
);
88 #if MAX_MP_BUSSES < 256
89 if (m
->busid
>= MAX_MP_BUSSES
) {
90 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
91 m
->busid
, str
, MAX_MP_BUSSES
- 1);
96 set_bit(m
->busid
, mp_bus_not_pci
);
97 if (strncmp(str
, BUSTYPE_ISA
, sizeof(BUSTYPE_ISA
) - 1) == 0) {
99 mp_bus_id_to_type
[m
->busid
] = MP_BUS_ISA
;
101 } else if (strncmp(str
, BUSTYPE_PCI
, sizeof(BUSTYPE_PCI
) - 1) == 0) {
102 if (x86_init
.mpparse
.mpc_oem_pci_bus
)
103 x86_init
.mpparse
.mpc_oem_pci_bus(m
);
105 clear_bit(m
->busid
, mp_bus_not_pci
);
107 mp_bus_id_to_type
[m
->busid
] = MP_BUS_PCI
;
108 } else if (strncmp(str
, BUSTYPE_EISA
, sizeof(BUSTYPE_EISA
) - 1) == 0) {
109 mp_bus_id_to_type
[m
->busid
] = MP_BUS_EISA
;
112 pr_warn("Unknown bustype %s - ignoring\n", str
);
115 static void __init
MP_ioapic_info(struct mpc_ioapic
*m
)
117 struct ioapic_domain_cfg cfg
= {
118 .type
= IOAPIC_DOMAIN_LEGACY
,
119 .ops
= &mp_ioapic_irqdomain_ops
,
122 if (m
->flags
& MPC_APIC_USABLE
)
123 mp_register_ioapic(m
->apicid
, m
->apicaddr
, gsi_top
, &cfg
);
126 static void __init
print_mp_irq_info(struct mpc_intsrc
*mp_irq
)
128 apic_printk(APIC_VERBOSE
,
129 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
130 mp_irq
->irqtype
, mp_irq
->irqflag
& 3,
131 (mp_irq
->irqflag
>> 2) & 3, mp_irq
->srcbus
,
132 mp_irq
->srcbusirq
, mp_irq
->dstapic
, mp_irq
->dstirq
);
135 #else /* CONFIG_X86_IO_APIC */
136 static inline void __init
MP_bus_info(struct mpc_bus
*m
) {}
137 static inline void __init
MP_ioapic_info(struct mpc_ioapic
*m
) {}
138 #endif /* CONFIG_X86_IO_APIC */
140 static void __init
MP_lintsrc_info(struct mpc_lintsrc
*m
)
142 apic_printk(APIC_VERBOSE
,
143 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
144 m
->irqtype
, m
->irqflag
& 3, (m
->irqflag
>> 2) & 3, m
->srcbusid
,
145 m
->srcbusirq
, m
->destapic
, m
->destapiclint
);
151 static int __init
smp_check_mpc(struct mpc_table
*mpc
, char *oem
, char *str
)
154 if (memcmp(mpc
->signature
, MPC_SIGNATURE
, 4)) {
155 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
156 mpc
->signature
[0], mpc
->signature
[1],
157 mpc
->signature
[2], mpc
->signature
[3]);
160 if (mpf_checksum((unsigned char *)mpc
, mpc
->length
)) {
161 pr_err("MPTABLE: checksum error!\n");
164 if (mpc
->spec
!= 0x01 && mpc
->spec
!= 0x04) {
165 pr_err("MPTABLE: bad table version (%d)!!\n", mpc
->spec
);
169 pr_err("MPTABLE: null local APIC address!\n");
172 memcpy(oem
, mpc
->oem
, 8);
174 pr_info("MPTABLE: OEM ID: %s\n", oem
);
176 memcpy(str
, mpc
->productid
, 12);
179 pr_info("MPTABLE: Product ID: %s\n", str
);
181 pr_info("MPTABLE: APIC at: 0x%X\n", mpc
->lapic
);
186 static void skip_entry(unsigned char **ptr
, int *count
, int size
)
192 static void __init
smp_dump_mptable(struct mpc_table
*mpc
, unsigned char *mpt
)
194 pr_err("Your mptable is wrong, contact your HW vendor!\n");
195 pr_cont("type %x\n", *mpt
);
196 print_hex_dump(KERN_ERR
, " ", DUMP_PREFIX_ADDRESS
, 16,
197 1, mpc
, mpc
->length
, 1);
200 void __init
default_smp_read_mpc_oem(struct mpc_table
*mpc
) { }
202 static int __init
smp_read_mpc(struct mpc_table
*mpc
, unsigned early
)
207 int count
= sizeof(*mpc
);
208 unsigned char *mpt
= ((unsigned char *)mpc
) + count
;
210 if (!smp_check_mpc(mpc
, oem
, str
))
213 /* Initialize the lapic mapping */
215 register_lapic_address(mpc
->lapic
);
221 x86_init
.mpparse
.smp_read_mpc_oem(mpc
);
224 * Now process the configuration blocks.
226 x86_init
.mpparse
.mpc_record(0);
228 while (count
< mpc
->length
) {
231 /* ACPI may have already provided this data */
233 MP_processor_info((struct mpc_cpu
*)mpt
);
234 skip_entry(&mpt
, &count
, sizeof(struct mpc_cpu
));
237 MP_bus_info((struct mpc_bus
*)mpt
);
238 skip_entry(&mpt
, &count
, sizeof(struct mpc_bus
));
241 MP_ioapic_info((struct mpc_ioapic
*)mpt
);
242 skip_entry(&mpt
, &count
, sizeof(struct mpc_ioapic
));
245 mp_save_irq((struct mpc_intsrc
*)mpt
);
246 skip_entry(&mpt
, &count
, sizeof(struct mpc_intsrc
));
249 MP_lintsrc_info((struct mpc_lintsrc
*)mpt
);
250 skip_entry(&mpt
, &count
, sizeof(struct mpc_lintsrc
));
254 smp_dump_mptable(mpc
, mpt
);
258 x86_init
.mpparse
.mpc_record(1);
262 pr_err("MPTABLE: no processors registered!\n");
263 return num_processors
;
266 #ifdef CONFIG_X86_IO_APIC
268 static int __init
ELCR_trigger(unsigned int irq
)
272 port
= 0x4d0 + (irq
>> 3);
273 return (inb(port
) >> (irq
& 7)) & 1;
276 static void __init
construct_default_ioirq_mptable(int mpc_default_type
)
278 struct mpc_intsrc intsrc
;
280 int ELCR_fallback
= 0;
282 intsrc
.type
= MP_INTSRC
;
283 intsrc
.irqflag
= MP_IRQTRIG_DEFAULT
| MP_IRQPOL_DEFAULT
;
285 intsrc
.dstapic
= mpc_ioapic_id(0);
287 intsrc
.irqtype
= mp_INT
;
290 * If true, we have an ISA/PCI system with no IRQ entries
291 * in the MP table. To prevent the PCI interrupts from being set up
292 * incorrectly, we try to use the ELCR. The sanity check to see if
293 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
294 * never be level sensitive, so we simply see if the ELCR agrees.
295 * If it does, we assume it's valid.
297 if (mpc_default_type
== 5) {
298 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
300 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
302 pr_err("ELCR contains invalid data... not using ELCR\n");
304 pr_info("Using ELCR to identify PCI interrupts\n");
309 for (i
= 0; i
< 16; i
++) {
310 switch (mpc_default_type
) {
312 if (i
== 0 || i
== 13)
313 continue; /* IRQ0 & IRQ13 not connected */
317 continue; /* IRQ2 is never connected */
322 * If the ELCR indicates a level-sensitive interrupt, we
323 * copy that information over to the MP table in the
324 * irqflag field (level sensitive, active high polarity).
326 if (ELCR_trigger(i
)) {
327 intsrc
.irqflag
= MP_IRQTRIG_LEVEL
|
328 MP_IRQPOL_ACTIVE_HIGH
;
330 intsrc
.irqflag
= MP_IRQTRIG_DEFAULT
|
335 intsrc
.srcbusirq
= i
;
336 intsrc
.dstirq
= i
? i
: 2; /* IRQ0 to INTIN2 */
337 mp_save_irq(&intsrc
);
340 intsrc
.irqtype
= mp_ExtINT
;
341 intsrc
.srcbusirq
= 0;
342 intsrc
.dstirq
= 0; /* 8259A to INTIN0 */
343 mp_save_irq(&intsrc
);
347 static void __init
construct_ioapic_table(int mpc_default_type
)
349 struct mpc_ioapic ioapic
;
354 switch (mpc_default_type
) {
356 pr_err("???\nUnknown standard configuration %d\n",
361 memcpy(bus
.bustype
, "ISA ", 6);
366 memcpy(bus
.bustype
, "EISA ", 6);
370 if (mpc_default_type
> 4) {
372 memcpy(bus
.bustype
, "PCI ", 6);
376 ioapic
.type
= MP_IOAPIC
;
378 ioapic
.apicver
= mpc_default_type
> 4 ? 0x10 : 0x01;
379 ioapic
.flags
= MPC_APIC_USABLE
;
380 ioapic
.apicaddr
= IO_APIC_DEFAULT_PHYS_BASE
;
381 MP_ioapic_info(&ioapic
);
384 * We set up most of the low 16 IO-APIC pins according to MPS rules.
386 construct_default_ioirq_mptable(mpc_default_type
);
389 static inline void __init
construct_ioapic_table(int mpc_default_type
) { }
392 static inline void __init
construct_default_ISA_mptable(int mpc_default_type
)
394 struct mpc_cpu processor
;
395 struct mpc_lintsrc lintsrc
;
396 int linttypes
[2] = { mp_ExtINT
, mp_NMI
};
400 * local APIC has default address
402 mp_lapic_addr
= APIC_DEFAULT_PHYS_BASE
;
405 * 2 CPUs, numbered 0 & 1.
407 processor
.type
= MP_PROCESSOR
;
408 /* Either an integrated APIC or a discrete 82489DX. */
409 processor
.apicver
= mpc_default_type
> 4 ? 0x10 : 0x01;
410 processor
.cpuflag
= CPU_ENABLED
;
411 processor
.cpufeature
= (boot_cpu_data
.x86
<< 8) |
412 (boot_cpu_data
.x86_model
<< 4) | boot_cpu_data
.x86_stepping
;
413 processor
.featureflag
= boot_cpu_data
.x86_capability
[CPUID_1_EDX
];
414 processor
.reserved
[0] = 0;
415 processor
.reserved
[1] = 0;
416 for (i
= 0; i
< 2; i
++) {
417 processor
.apicid
= i
;
418 MP_processor_info(&processor
);
421 construct_ioapic_table(mpc_default_type
);
423 lintsrc
.type
= MP_LINTSRC
;
424 lintsrc
.irqflag
= MP_IRQTRIG_DEFAULT
| MP_IRQPOL_DEFAULT
;
425 lintsrc
.srcbusid
= 0;
426 lintsrc
.srcbusirq
= 0;
427 lintsrc
.destapic
= MP_APIC_ALL
;
428 for (i
= 0; i
< 2; i
++) {
429 lintsrc
.irqtype
= linttypes
[i
];
430 lintsrc
.destapiclint
= i
;
431 MP_lintsrc_info(&lintsrc
);
435 static unsigned long mpf_base
;
436 static bool mpf_found
;
438 static unsigned long __init
get_mpc_size(unsigned long physptr
)
440 struct mpc_table
*mpc
;
443 mpc
= early_memremap(physptr
, PAGE_SIZE
);
445 early_memunmap(mpc
, PAGE_SIZE
);
446 apic_printk(APIC_VERBOSE
, " mpc: %lx-%lx\n", physptr
, physptr
+ size
);
451 static int __init
check_physptr(struct mpf_intel
*mpf
, unsigned int early
)
453 struct mpc_table
*mpc
;
456 size
= get_mpc_size(mpf
->physptr
);
457 mpc
= early_memremap(mpf
->physptr
, size
);
460 * Read the physical hardware table. Anything here will
461 * override the defaults.
463 if (!smp_read_mpc(mpc
, early
)) {
464 #ifdef CONFIG_X86_LOCAL_APIC
465 smp_found_config
= 0;
467 pr_err("BIOS bug, MP table errors detected!...\n");
468 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
469 early_memunmap(mpc
, size
);
472 early_memunmap(mpc
, size
);
477 #ifdef CONFIG_X86_IO_APIC
479 * If there are no explicit MP IRQ entries, then we are
480 * broken. We set up most of the low 16 IO-APIC pins to
481 * ISA defaults and hope it will work.
483 if (!mp_irq_entries
) {
486 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
490 memcpy(bus
.bustype
, "ISA ", 6);
493 construct_default_ioirq_mptable(0);
501 * Scan the memory blocks for an SMP configuration block.
503 void __init
default_get_smp_config(unsigned int early
)
505 struct mpf_intel
*mpf
;
507 if (!smp_found_config
)
513 if (acpi_lapic
&& early
)
517 * MPS doesn't support hyperthreading, aka only have
518 * thread 0 apic id in MPS table
520 if (acpi_lapic
&& acpi_ioapic
)
523 mpf
= early_memremap(mpf_base
, sizeof(*mpf
));
525 pr_err("MPTABLE: error mapping MP table\n");
529 pr_info("Intel MultiProcessor Specification v1.%d\n",
531 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
532 if (mpf
->feature2
& (1 << 7)) {
533 pr_info(" IMCR and PIC compatibility mode.\n");
536 pr_info(" Virtual Wire compatibility mode.\n");
541 * Now see if we need to read further.
546 * local APIC has default address
548 mp_lapic_addr
= APIC_DEFAULT_PHYS_BASE
;
552 pr_info("Default MP configuration #%d\n", mpf
->feature1
);
553 construct_default_ISA_mptable(mpf
->feature1
);
555 } else if (mpf
->physptr
) {
556 if (check_physptr(mpf
, early
)) {
557 early_memunmap(mpf
, sizeof(*mpf
));
564 pr_info("Processors: %d\n", num_processors
);
566 * Only use the first configuration found.
569 early_memunmap(mpf
, sizeof(*mpf
));
572 static void __init
smp_reserve_memory(struct mpf_intel
*mpf
)
574 memblock_reserve(mpf
->physptr
, get_mpc_size(mpf
->physptr
));
577 static int __init
smp_scan_config(unsigned long base
, unsigned long length
)
580 struct mpf_intel
*mpf
;
583 apic_printk(APIC_VERBOSE
, "Scan for SMP in [mem %#010lx-%#010lx]\n",
584 base
, base
+ length
- 1);
585 BUILD_BUG_ON(sizeof(*mpf
) != 16);
588 bp
= early_memremap(base
, length
);
589 mpf
= (struct mpf_intel
*)bp
;
590 if ((*bp
== SMP_MAGIC_IDENT
) &&
591 (mpf
->length
== 1) &&
592 !mpf_checksum((unsigned char *)bp
, 16) &&
593 ((mpf
->specification
== 1)
594 || (mpf
->specification
== 4))) {
595 #ifdef CONFIG_X86_LOCAL_APIC
596 smp_found_config
= 1;
601 pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
602 base
, base
+ sizeof(*mpf
) - 1, mpf
);
604 memblock_reserve(base
, sizeof(*mpf
));
606 smp_reserve_memory(mpf
);
610 early_memunmap(bp
, length
);
621 void __init
default_find_smp_config(void)
623 unsigned int address
;
626 * FIXME: Linux assumes you have 640K of base ram..
627 * this continues the error...
629 * 1) Scan the bottom 1K for a signature
630 * 2) Scan the top 1K of base RAM
631 * 3) Scan the 64K of bios
633 if (smp_scan_config(0x0, 0x400) ||
634 smp_scan_config(639 * 0x400, 0x400) ||
635 smp_scan_config(0xF0000, 0x10000))
638 * If it is an SMP machine we should know now, unless the
639 * configuration is in an EISA bus machine with an
640 * extended bios data area.
642 * there is a real-mode segmented pointer pointing to the
643 * 4K EBDA area at 0x40E, calculate and scan it here.
645 * NOTE! There are Linux loaders that will corrupt the EBDA
646 * area, and as such this kind of SMP config may be less
647 * trustworthy, simply because the SMP table may have been
648 * stomped on during early boot. These loaders are buggy and
651 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
654 address
= get_bios_ebda();
656 smp_scan_config(address
, 0x400);
659 #ifdef CONFIG_X86_IO_APIC
660 static u8 __initdata irq_used
[MAX_IRQ_SOURCES
];
662 static int __init
get_MP_intsrc_index(struct mpc_intsrc
*m
)
666 if (m
->irqtype
!= mp_INT
)
669 if (m
->irqflag
!= (MP_IRQTRIG_LEVEL
| MP_IRQPOL_ACTIVE_LOW
))
674 for (i
= 0; i
< mp_irq_entries
; i
++) {
675 if (mp_irqs
[i
].irqtype
!= mp_INT
)
678 if (mp_irqs
[i
].irqflag
!= (MP_IRQTRIG_LEVEL
|
679 MP_IRQPOL_ACTIVE_LOW
))
682 if (mp_irqs
[i
].srcbus
!= m
->srcbus
)
684 if (mp_irqs
[i
].srcbusirq
!= m
->srcbusirq
)
687 /* already claimed */
698 #define SPARE_SLOT_NUM 20
700 static struct mpc_intsrc __initdata
*m_spare
[SPARE_SLOT_NUM
];
702 static void __init
check_irq_src(struct mpc_intsrc
*m
, int *nr_m_spare
)
706 apic_printk(APIC_VERBOSE
, "OLD ");
707 print_mp_irq_info(m
);
709 i
= get_MP_intsrc_index(m
);
711 memcpy(m
, &mp_irqs
[i
], sizeof(*m
));
712 apic_printk(APIC_VERBOSE
, "NEW ");
713 print_mp_irq_info(&mp_irqs
[i
]);
717 /* legacy, do nothing */
720 if (*nr_m_spare
< SPARE_SLOT_NUM
) {
722 * not found (-1), or duplicated (-2) are invalid entries,
723 * we need to use the slot later
725 m_spare
[*nr_m_spare
] = m
;
731 check_slot(unsigned long mpc_new_phys
, unsigned long mpc_new_length
, int count
)
733 if (!mpc_new_phys
|| count
<= mpc_new_length
) {
734 WARN(1, "update_mptable: No spare slots (length: %x)\n", count
);
740 #else /* CONFIG_X86_IO_APIC */
742 inline void __init
check_irq_src(struct mpc_intsrc
*m
, int *nr_m_spare
) {}
743 #endif /* CONFIG_X86_IO_APIC */
745 static int __init
replace_intsrc_all(struct mpc_table
*mpc
,
746 unsigned long mpc_new_phys
,
747 unsigned long mpc_new_length
)
749 #ifdef CONFIG_X86_IO_APIC
752 int count
= sizeof(*mpc
);
754 unsigned char *mpt
= ((unsigned char *)mpc
) + count
;
756 pr_info("mpc_length %x\n", mpc
->length
);
757 while (count
< mpc
->length
) {
760 skip_entry(&mpt
, &count
, sizeof(struct mpc_cpu
));
763 skip_entry(&mpt
, &count
, sizeof(struct mpc_bus
));
766 skip_entry(&mpt
, &count
, sizeof(struct mpc_ioapic
));
769 check_irq_src((struct mpc_intsrc
*)mpt
, &nr_m_spare
);
770 skip_entry(&mpt
, &count
, sizeof(struct mpc_intsrc
));
773 skip_entry(&mpt
, &count
, sizeof(struct mpc_lintsrc
));
777 smp_dump_mptable(mpc
, mpt
);
782 #ifdef CONFIG_X86_IO_APIC
783 for (i
= 0; i
< mp_irq_entries
; i
++) {
787 if (mp_irqs
[i
].irqtype
!= mp_INT
)
790 if (mp_irqs
[i
].irqflag
!= (MP_IRQTRIG_LEVEL
|
791 MP_IRQPOL_ACTIVE_LOW
))
794 if (nr_m_spare
> 0) {
795 apic_printk(APIC_VERBOSE
, "*NEW* found\n");
797 memcpy(m_spare
[nr_m_spare
], &mp_irqs
[i
], sizeof(mp_irqs
[i
]));
798 m_spare
[nr_m_spare
] = NULL
;
800 struct mpc_intsrc
*m
= (struct mpc_intsrc
*)mpt
;
801 count
+= sizeof(struct mpc_intsrc
);
802 if (check_slot(mpc_new_phys
, mpc_new_length
, count
) < 0)
804 memcpy(m
, &mp_irqs
[i
], sizeof(*m
));
806 mpt
+= sizeof(struct mpc_intsrc
);
808 print_mp_irq_info(&mp_irqs
[i
]);
812 /* update checksum */
814 mpc
->checksum
-= mpf_checksum((unsigned char *)mpc
, mpc
->length
);
819 int enable_update_mptable
;
821 static int __init
update_mptable_setup(char *str
)
823 enable_update_mptable
= 1;
829 early_param("update_mptable", update_mptable_setup
);
831 static unsigned long __initdata mpc_new_phys
;
832 static unsigned long mpc_new_length __initdata
= 4096;
834 /* alloc_mptable or alloc_mptable=4k */
835 static int __initdata alloc_mptable
;
836 static int __init
parse_alloc_mptable_opt(char *p
)
838 enable_update_mptable
= 1;
845 mpc_new_length
= memparse(p
, &p
);
848 early_param("alloc_mptable", parse_alloc_mptable_opt
);
850 void __init
e820__memblock_alloc_reserved_mpc_new(void)
852 if (enable_update_mptable
&& alloc_mptable
)
853 mpc_new_phys
= e820__memblock_alloc_reserved(mpc_new_length
, 4);
856 static int __init
update_mp_table(void)
860 struct mpf_intel
*mpf
;
861 struct mpc_table
*mpc
, *mpc_new
;
864 if (!enable_update_mptable
)
870 mpf
= early_memremap(mpf_base
, sizeof(*mpf
));
872 pr_err("MPTABLE: mpf early_memremap() failed\n");
877 * Now see if we need to go further.
885 size
= get_mpc_size(mpf
->physptr
);
886 mpc
= early_memremap(mpf
->physptr
, size
);
888 pr_err("MPTABLE: mpc early_memremap() failed\n");
892 if (!smp_check_mpc(mpc
, oem
, str
))
895 pr_info("mpf: %llx\n", (u64
)mpf_base
);
896 pr_info("physptr: %x\n", mpf
->physptr
);
898 if (mpc_new_phys
&& mpc
->length
> mpc_new_length
) {
900 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
905 unsigned char old
, new;
906 /* check if we can change the position */
908 old
= mpf_checksum((unsigned char *)mpc
, mpc
->length
);
909 mpc
->checksum
= 0xff;
910 new = mpf_checksum((unsigned char *)mpc
, mpc
->length
);
912 pr_info("mpc is readonly, please try alloc_mptable instead\n");
915 pr_info("use in-position replacing\n");
917 mpc_new
= early_memremap(mpc_new_phys
, mpc_new_length
);
919 pr_err("MPTABLE: new mpc early_memremap() failed\n");
922 mpf
->physptr
= mpc_new_phys
;
923 memcpy(mpc_new
, mpc
, mpc
->length
);
924 early_memunmap(mpc
, size
);
926 size
= mpc_new_length
;
927 /* check if we can modify that */
928 if (mpc_new_phys
- mpf
->physptr
) {
929 struct mpf_intel
*mpf_new
;
930 /* steal 16 bytes from [0, 1k) */
931 mpf_new
= early_memremap(0x400 - 16, sizeof(*mpf_new
));
933 pr_err("MPTABLE: new mpf early_memremap() failed\n");
936 pr_info("mpf new: %x\n", 0x400 - 16);
937 memcpy(mpf_new
, mpf
, 16);
938 early_memunmap(mpf
, sizeof(*mpf
));
940 mpf
->physptr
= mpc_new_phys
;
943 mpf
->checksum
-= mpf_checksum((unsigned char *)mpf
, 16);
944 pr_info("physptr new: %x\n", mpf
->physptr
);
948 * only replace the one with mp_INT and
949 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
950 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
951 * may need pci=routeirq for all coverage
953 replace_intsrc_all(mpc
, mpc_new_phys
, mpc_new_length
);
956 early_memunmap(mpc
, size
);
959 early_memunmap(mpf
, sizeof(*mpf
));
964 late_initcall(update_mp_table
);