1 // SPDX-License-Identifier: GPL-2.0
3 * prepare to run common code
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 #define DISABLE_BRANCH_PROFILING
10 /* cpu_feature_enabled() cannot be used this early */
11 #define USE_EARLY_PGTABLE_L5
13 #include <linux/init.h>
14 #include <linux/linkage.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/percpu.h>
19 #include <linux/start_kernel.h>
21 #include <linux/memblock.h>
22 #include <linux/mem_encrypt.h>
24 #include <asm/processor.h>
25 #include <asm/proto.h>
27 #include <asm/setup.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlbflush.h>
31 #include <asm/sections.h>
32 #include <asm/kdebug.h>
33 #include <asm/e820/api.h>
34 #include <asm/bios_ebda.h>
35 #include <asm/bootparam_utils.h>
36 #include <asm/microcode.h>
37 #include <asm/kasan.h>
40 * Manage page tables very early on.
42 extern pmd_t early_dynamic_pgts
[EARLY_DYNAMIC_PAGE_TABLES
][PTRS_PER_PMD
];
43 static unsigned int __initdata next_early_pgt
;
44 pmdval_t early_pmd_flags
= __PAGE_KERNEL_LARGE
& ~(_PAGE_GLOBAL
| _PAGE_NX
);
46 #ifdef CONFIG_X86_5LEVEL
47 unsigned int __pgtable_l5_enabled __ro_after_init
;
48 unsigned int pgdir_shift __ro_after_init
= 39;
49 EXPORT_SYMBOL(pgdir_shift
);
50 unsigned int ptrs_per_p4d __ro_after_init
= 1;
51 EXPORT_SYMBOL(ptrs_per_p4d
);
54 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
55 unsigned long page_offset_base __ro_after_init
= __PAGE_OFFSET_BASE_L4
;
56 EXPORT_SYMBOL(page_offset_base
);
57 unsigned long vmalloc_base __ro_after_init
= __VMALLOC_BASE_L4
;
58 EXPORT_SYMBOL(vmalloc_base
);
59 unsigned long vmemmap_base __ro_after_init
= __VMEMMAP_BASE_L4
;
60 EXPORT_SYMBOL(vmemmap_base
);
63 #define __head __section(.head.text)
65 static void __head
*fixup_pointer(void *ptr
, unsigned long physaddr
)
67 return ptr
- (void *)_text
+ (void *)physaddr
;
70 static unsigned long __head
*fixup_long(void *ptr
, unsigned long physaddr
)
72 return fixup_pointer(ptr
, physaddr
);
75 #ifdef CONFIG_X86_5LEVEL
76 static unsigned int __head
*fixup_int(void *ptr
, unsigned long physaddr
)
78 return fixup_pointer(ptr
, physaddr
);
81 static bool __head
check_la57_support(unsigned long physaddr
)
84 * 5-level paging is detected and enabled at kernel decomression
85 * stage. Only check if it has been enabled there.
87 if (!(native_read_cr4() & X86_CR4_LA57
))
90 *fixup_int(&__pgtable_l5_enabled
, physaddr
) = 1;
91 *fixup_int(&pgdir_shift
, physaddr
) = 48;
92 *fixup_int(&ptrs_per_p4d
, physaddr
) = 512;
93 *fixup_long(&page_offset_base
, physaddr
) = __PAGE_OFFSET_BASE_L5
;
94 *fixup_long(&vmalloc_base
, physaddr
) = __VMALLOC_BASE_L5
;
95 *fixup_long(&vmemmap_base
, physaddr
) = __VMEMMAP_BASE_L5
;
100 static bool __head
check_la57_support(unsigned long physaddr
)
106 /* Code in __startup_64() can be relocated during execution, but the compiler
107 * doesn't have to generate PC-relative relocations when accessing globals from
108 * that function. Clang actually does not generate them, which leads to
109 * boot-time crashes. To work around this problem, every global pointer must
110 * be adjusted using fixup_pointer().
112 unsigned long __head
__startup_64(unsigned long physaddr
,
113 struct boot_params
*bp
)
115 unsigned long load_delta
, *p
;
116 unsigned long pgtable_flags
;
120 pmdval_t
*pmd
, pmd_entry
;
124 unsigned int *next_pgt_ptr
;
126 la57
= check_la57_support(physaddr
);
128 /* Is the address too large? */
129 if (physaddr
>> MAX_PHYSMEM_BITS
)
133 * Compute the delta between the address I am compiled to run at
134 * and the address I am actually running at.
136 load_delta
= physaddr
- (unsigned long)(_text
- __START_KERNEL_map
);
138 /* Is the address not 2M aligned? */
139 if (load_delta
& ~PMD_PAGE_MASK
)
142 /* Activate Secure Memory Encryption (SME) if supported and enabled */
145 /* Include the SME encryption mask in the fixup value */
146 load_delta
+= sme_get_me_mask();
148 /* Fixup the physical addresses in the page table */
150 pgd
= fixup_pointer(&early_top_pgt
, physaddr
);
151 p
= pgd
+ pgd_index(__START_KERNEL_map
);
153 *p
= (unsigned long)level4_kernel_pgt
;
155 *p
= (unsigned long)level3_kernel_pgt
;
156 *p
+= _PAGE_TABLE_NOENC
- __START_KERNEL_map
+ load_delta
;
159 p4d
= fixup_pointer(&level4_kernel_pgt
, physaddr
);
160 p4d
[511] += load_delta
;
163 pud
= fixup_pointer(&level3_kernel_pgt
, physaddr
);
164 pud
[510] += load_delta
;
165 pud
[511] += load_delta
;
167 pmd
= fixup_pointer(level2_fixmap_pgt
, physaddr
);
168 pmd
[506] += load_delta
;
171 * Set up the identity mapping for the switchover. These
172 * entries should *NOT* have the global bit set! This also
173 * creates a bunch of nonsense entries but that is fine --
174 * it avoids problems around wraparound.
177 next_pgt_ptr
= fixup_pointer(&next_early_pgt
, physaddr
);
178 pud
= fixup_pointer(early_dynamic_pgts
[(*next_pgt_ptr
)++], physaddr
);
179 pmd
= fixup_pointer(early_dynamic_pgts
[(*next_pgt_ptr
)++], physaddr
);
181 pgtable_flags
= _KERNPG_TABLE_NOENC
+ sme_get_me_mask();
184 p4d
= fixup_pointer(early_dynamic_pgts
[next_early_pgt
++], physaddr
);
186 i
= (physaddr
>> PGDIR_SHIFT
) % PTRS_PER_PGD
;
187 pgd
[i
+ 0] = (pgdval_t
)p4d
+ pgtable_flags
;
188 pgd
[i
+ 1] = (pgdval_t
)p4d
+ pgtable_flags
;
190 i
= (physaddr
>> P4D_SHIFT
) % PTRS_PER_P4D
;
191 p4d
[i
+ 0] = (pgdval_t
)pud
+ pgtable_flags
;
192 p4d
[i
+ 1] = (pgdval_t
)pud
+ pgtable_flags
;
194 i
= (physaddr
>> PGDIR_SHIFT
) % PTRS_PER_PGD
;
195 pgd
[i
+ 0] = (pgdval_t
)pud
+ pgtable_flags
;
196 pgd
[i
+ 1] = (pgdval_t
)pud
+ pgtable_flags
;
199 i
= (physaddr
>> PUD_SHIFT
) % PTRS_PER_PUD
;
200 pud
[i
+ 0] = (pudval_t
)pmd
+ pgtable_flags
;
201 pud
[i
+ 1] = (pudval_t
)pmd
+ pgtable_flags
;
203 pmd_entry
= __PAGE_KERNEL_LARGE_EXEC
& ~_PAGE_GLOBAL
;
204 /* Filter out unsupported __PAGE_KERNEL_* bits: */
205 mask_ptr
= fixup_pointer(&__supported_pte_mask
, physaddr
);
206 pmd_entry
&= *mask_ptr
;
207 pmd_entry
+= sme_get_me_mask();
208 pmd_entry
+= physaddr
;
210 for (i
= 0; i
< DIV_ROUND_UP(_end
- _text
, PMD_SIZE
); i
++) {
211 int idx
= i
+ (physaddr
>> PMD_SHIFT
) % PTRS_PER_PMD
;
212 pmd
[idx
] = pmd_entry
+ i
* PMD_SIZE
;
216 * Fixup the kernel text+data virtual addresses. Note that
217 * we might write invalid pmds, when the kernel is relocated
218 * cleanup_highmap() fixes this up along with the mappings
222 pmd
= fixup_pointer(level2_kernel_pgt
, physaddr
);
223 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
224 if (pmd
[i
] & _PAGE_PRESENT
)
225 pmd
[i
] += load_delta
;
229 * Fixup phys_base - remove the memory encryption mask to obtain
230 * the true physical address.
232 *fixup_long(&phys_base
, physaddr
) += load_delta
- sme_get_me_mask();
234 /* Encrypt the kernel and related (if SME is active) */
235 sme_encrypt_kernel(bp
);
238 * Return the SME encryption mask (if SME is active) to be used as a
239 * modifier for the initial pgdir entry programmed into CR3.
241 return sme_get_me_mask();
244 unsigned long __startup_secondary_64(void)
247 * Return the SME encryption mask (if SME is active) to be used as a
248 * modifier for the initial pgdir entry programmed into CR3.
250 return sme_get_me_mask();
253 /* Wipe all early page tables except for the kernel symbol map */
254 static void __init
reset_early_page_tables(void)
256 memset(early_top_pgt
, 0, sizeof(pgd_t
)*(PTRS_PER_PGD
-1));
258 write_cr3(__sme_pa_nodebug(early_top_pgt
));
261 /* Create a new PMD entry */
262 int __init
__early_make_pgtable(unsigned long address
, pmdval_t pmd
)
264 unsigned long physaddr
= address
- __PAGE_OFFSET
;
265 pgdval_t pgd
, *pgd_p
;
266 p4dval_t p4d
, *p4d_p
;
267 pudval_t pud
, *pud_p
;
270 /* Invalid address or early pgt is done ? */
271 if (physaddr
>= MAXMEM
|| read_cr3_pa() != __pa_nodebug(early_top_pgt
))
275 pgd_p
= &early_top_pgt
[pgd_index(address
)].pgd
;
279 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
280 * critical -- __PAGE_OFFSET would point us back into the dynamic
281 * range and we might end up looping forever...
283 if (!pgtable_l5_enabled())
286 p4d_p
= (p4dval_t
*)((pgd
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
288 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
289 reset_early_page_tables();
293 p4d_p
= (p4dval_t
*)early_dynamic_pgts
[next_early_pgt
++];
294 memset(p4d_p
, 0, sizeof(*p4d_p
) * PTRS_PER_P4D
);
295 *pgd_p
= (pgdval_t
)p4d_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
297 p4d_p
+= p4d_index(address
);
301 pud_p
= (pudval_t
*)((p4d
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
303 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
304 reset_early_page_tables();
308 pud_p
= (pudval_t
*)early_dynamic_pgts
[next_early_pgt
++];
309 memset(pud_p
, 0, sizeof(*pud_p
) * PTRS_PER_PUD
);
310 *p4d_p
= (p4dval_t
)pud_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
312 pud_p
+= pud_index(address
);
316 pmd_p
= (pmdval_t
*)((pud
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
318 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
319 reset_early_page_tables();
323 pmd_p
= (pmdval_t
*)early_dynamic_pgts
[next_early_pgt
++];
324 memset(pmd_p
, 0, sizeof(*pmd_p
) * PTRS_PER_PMD
);
325 *pud_p
= (pudval_t
)pmd_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
327 pmd_p
[pmd_index(address
)] = pmd
;
332 int __init
early_make_pgtable(unsigned long address
)
334 unsigned long physaddr
= address
- __PAGE_OFFSET
;
337 pmd
= (physaddr
& PMD_MASK
) + early_pmd_flags
;
339 return __early_make_pgtable(address
, pmd
);
342 /* Don't add a printk in there. printk relies on the PDA which is not initialized
344 static void __init
clear_bss(void)
346 memset(__bss_start
, 0,
347 (unsigned long) __bss_stop
- (unsigned long) __bss_start
);
350 static unsigned long get_cmd_line_ptr(void)
352 unsigned long cmd_line_ptr
= boot_params
.hdr
.cmd_line_ptr
;
354 cmd_line_ptr
|= (u64
)boot_params
.ext_cmd_line_ptr
<< 32;
359 static void __init
copy_bootdata(char *real_mode_data
)
362 unsigned long cmd_line_ptr
;
365 * If SME is active, this will create decrypted mappings of the
366 * boot data in advance of the copy operations.
368 sme_map_bootdata(real_mode_data
);
370 memcpy(&boot_params
, real_mode_data
, sizeof boot_params
);
371 sanitize_boot_params(&boot_params
);
372 cmd_line_ptr
= get_cmd_line_ptr();
374 command_line
= __va(cmd_line_ptr
);
375 memcpy(boot_command_line
, command_line
, COMMAND_LINE_SIZE
);
379 * The old boot data is no longer needed and won't be reserved,
380 * freeing up that memory for use by the system. If SME is active,
381 * we need to remove the mappings that were created so that the
382 * memory doesn't remain mapped as decrypted.
384 sme_unmap_bootdata(real_mode_data
);
387 asmlinkage __visible
void __init
x86_64_start_kernel(char * real_mode_data
)
390 * Build-time sanity checks on the kernel image and module
391 * area mappings. (these are purely build-time and produce no code)
393 BUILD_BUG_ON(MODULES_VADDR
< __START_KERNEL_map
);
394 BUILD_BUG_ON(MODULES_VADDR
- __START_KERNEL_map
< KERNEL_IMAGE_SIZE
);
395 BUILD_BUG_ON(MODULES_LEN
+ KERNEL_IMAGE_SIZE
> 2*PUD_SIZE
);
396 BUILD_BUG_ON((__START_KERNEL_map
& ~PMD_MASK
) != 0);
397 BUILD_BUG_ON((MODULES_VADDR
& ~PMD_MASK
) != 0);
398 BUILD_BUG_ON(!(MODULES_VADDR
> __START_KERNEL
));
399 MAYBE_BUILD_BUG_ON(!(((MODULES_END
- 1) & PGDIR_MASK
) ==
400 (__START_KERNEL
& PGDIR_MASK
)));
401 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses
) <= MODULES_END
);
405 /* Kill off the identity-map trampoline */
406 reset_early_page_tables();
410 clear_page(init_top_pgt
);
413 * SME support may update early_pmd_flags to include the memory
414 * encryption mask, so it needs to be called before anything
415 * that may generate a page fault.
421 idt_setup_early_handler();
423 copy_bootdata(__va(real_mode_data
));
426 * Load microcode early on BSP.
430 /* set init_top_pgt kernel high mapping*/
431 init_top_pgt
[511] = early_top_pgt
[511];
433 x86_64_start_reservations(real_mode_data
);
436 void __init
x86_64_start_reservations(char *real_mode_data
)
438 /* version is always not zero if it is copied */
439 if (!boot_params
.hdr
.version
)
440 copy_bootdata(__va(real_mode_data
));
442 x86_early_init_platform_quirks();
444 switch (boot_params
.hdr
.hardware_subarch
) {
445 case X86_SUBARCH_INTEL_MID
:
446 x86_intel_mid_early_setup();