1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
15 #include <linux/init.h>
16 #include <asm/segment.h>
17 #include <asm/pgtable.h>
20 #include <asm/cache.h>
21 #include <asm/processor-flags.h>
22 #include <asm/percpu.h>
24 #include "../entry/calling.h"
25 #include <asm/export.h>
26 #include <asm/nospec-branch.h>
28 #ifdef CONFIG_PARAVIRT
29 #include <asm/asm-offsets.h>
30 #include <asm/paravirt.h>
31 #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
33 #define GET_CR2_INTO(reg) movq %cr2, reg
34 #define INTERRUPT_RETURN iretq
37 /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
38 * because we need identity-mapped pages.
42 #define l4_index(x) (((x) >> 39) & 511)
43 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
45 L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
46 L4_START_KERNEL = l4_index(__START_KERNEL_map)
48 L3_START_KERNEL = pud_index(__START_KERNEL_map)
57 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
58 * and someone has loaded an identity mapped page table
59 * for us. These identity mapped page tables map all of the
60 * kernel pages and possibly all of memory.
62 * %rsi holds a physical pointer to real_mode_data.
64 * We come here either directly from a 64bit bootloader, or from
65 * arch/x86/boot/compressed/head_64.S.
67 * We only come here initially at boot nothing else comes here.
69 * Since we may be loaded at an address different from what we were
70 * compiled to run at we first fixup the physical addresses in our page
71 * tables and then reload them.
74 /* Set up the stack for verify_cpu(), similar to initial_stack below */
75 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
77 /* Sanitize CPU configuration */
81 * Perform pagetable fixups. Additionally, if SME is active, encrypt
82 * the kernel and retrieve the modifier (SME encryption mask if SME
83 * is active) to be added to the initial pgdir entry that will be
84 * programmed into CR3.
86 leaq _text(%rip), %rdi
91 /* Form the CR3 value being sure to include the CR3 modifier */
92 addq $(early_top_pgt - __START_KERNEL_map), %rax
94 ENTRY(secondary_startup_64)
97 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
98 * and someone has loaded a mapped page table.
100 * %rsi holds a physical pointer to real_mode_data.
102 * We come here either from startup_64 (using physical addresses)
103 * or from trampoline.S (using virtual addresses).
105 * Using virtual addresses from trampoline.S removes the need
106 * to have any identity mapped pages in the kernel page table
107 * after the boot processor executes this code.
110 /* Sanitize CPU configuration */
114 * Retrieve the modifier (SME encryption mask if SME is active) to be
115 * added to the initial pgdir entry that will be programmed into CR3.
118 call __startup_secondary_64
121 /* Form the CR3 value being sure to include the CR3 modifier */
122 addq $(init_top_pgt - __START_KERNEL_map), %rax
125 /* Enable PAE mode, PGE and LA57 */
126 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
127 #ifdef CONFIG_X86_5LEVEL
128 testl $1, __pgtable_l5_enabled(%rip)
130 orl $X86_CR4_LA57, %ecx
135 /* Setup early boot stage 4-/5-level pagetables. */
136 addq phys_base(%rip), %rax
139 /* Ensure I am executing from virtual addresses */
141 ANNOTATE_RETPOLINE_SAFE
146 /* Check if nx is implemented */
147 movl $0x80000001, %eax
151 /* Setup EFER (Extended Feature Enable Register) */
154 btsl $_EFER_SCE, %eax /* Enable System Call */
155 btl $20,%edi /* No Execute supported? */
158 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
159 1: wrmsr /* Make changes effective */
162 movl $CR0_STATE, %eax
163 /* Make changes effective */
166 /* Setup a boot time stack */
167 movq initial_stack(%rip), %rsp
169 /* zero EFLAGS after setting rsp */
174 * We must switch to a new descriptor in kernel space for the GDT
175 * because soon the kernel won't have access anymore to the userspace
176 * addresses where we're currently running on. We have to do that here
177 * because in 32bit we couldn't load a 64bit linear address.
179 lgdt early_gdt_descr(%rip)
181 /* set up data segments */
188 * We don't really need to load %fs or %gs, but load them anyway
189 * to kill any stale realmode selectors. This allows execution
197 * The base of %gs always points to the bottom of the irqstack
198 * union. If the stack protector canary is enabled, it is
199 * located at %gs:40. Note that, on SMP, the boot cpu uses
200 * init data section till per cpu areas are set up.
202 movl $MSR_GS_BASE,%ecx
203 movl initial_gs(%rip),%eax
204 movl initial_gs+4(%rip),%edx
207 /* rsi is pointer to real mode structure with interesting info.
213 * Jump to run C code and to be on a real kernel address.
214 * Since we are running on identity-mapped space we have to jump
215 * to the full 64bit address, this is only possible as indirect
216 * jump. In addition we need to ensure %cs is set so we make this
219 * Note: do not change to far jump indirect with 64bit offset.
221 * AMD does not support far jump indirect with 64bit offset.
222 * AMD64 Architecture Programmer's Manual, Volume 3: states only
223 * JMP FAR mem16:16 FF /5 Far jump indirect,
224 * with the target specified by a far pointer in memory.
225 * JMP FAR mem16:32 FF /5 Far jump indirect,
226 * with the target specified by a far pointer in memory.
228 * Intel64 does support 64bit offset.
229 * Software Developer Manual Vol 2: states:
230 * FF /5 JMP m16:16 Jump far, absolute indirect,
231 * address given in m16:16
232 * FF /5 JMP m16:32 Jump far, absolute indirect,
233 * address given in m16:32.
234 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
235 * address given in m16:64.
237 pushq $.Lafter_lret # put return address on stack for unwinder
238 xorq %rbp, %rbp # clear frame pointer
239 movq initial_code(%rip), %rax
240 pushq $__KERNEL_CS # set correct cs
241 pushq %rax # target address in negative space
244 END(secondary_startup_64)
246 #include "verify_cpu.S"
248 #ifdef CONFIG_HOTPLUG_CPU
250 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
251 * up already except stack. We just set up stack here. Then call
252 * start_secondary() via .Ljump_to_C_code.
255 movq initial_stack(%rip), %rsp
261 /* Both SMP bootup and ACPI suspend change these variables */
265 .quad x86_64_start_kernel
267 .quad INIT_PER_CPU_VAR(irq_stack_union)
268 GLOBAL(initial_stack)
270 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
271 * unwinder reliably detect the end of the stack.
273 .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
277 ENTRY(early_idt_handler_array)
279 .rept NUM_EXCEPTION_VECTORS
280 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
281 UNWIND_HINT_IRET_REGS
282 pushq $0 # Dummy error code, to make stack frame uniform
284 UNWIND_HINT_IRET_REGS offset=8
286 pushq $i # 72(%rsp) Vector number
287 jmp early_idt_handler_common
288 UNWIND_HINT_IRET_REGS
290 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
292 UNWIND_HINT_IRET_REGS offset=16
293 END(early_idt_handler_array)
295 early_idt_handler_common:
297 * The stack is the hardware frame, an error code or zero, and the
302 incl early_recursion_flag(%rip)
304 /* The vector number is currently in the pt_regs->di slot. */
305 pushq %rsi /* pt_regs->si */
306 movq 8(%rsp), %rsi /* RSI = vector number */
307 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
308 pushq %rdx /* pt_regs->dx */
309 pushq %rcx /* pt_regs->cx */
310 pushq %rax /* pt_regs->ax */
311 pushq %r8 /* pt_regs->r8 */
312 pushq %r9 /* pt_regs->r9 */
313 pushq %r10 /* pt_regs->r10 */
314 pushq %r11 /* pt_regs->r11 */
315 pushq %rbx /* pt_regs->bx */
316 pushq %rbp /* pt_regs->bp */
317 pushq %r12 /* pt_regs->r12 */
318 pushq %r13 /* pt_regs->r13 */
319 pushq %r14 /* pt_regs->r14 */
320 pushq %r15 /* pt_regs->r15 */
323 cmpq $14,%rsi /* Page fault? */
325 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */
326 call early_make_pgtable
328 jz 20f /* All good */
331 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
332 call early_fixup_exception
335 decl early_recursion_flag(%rip)
336 jmp restore_regs_and_return_to_kernel
337 END(early_idt_handler_common)
342 GLOBAL(early_recursion_flag)
345 #define NEXT_PAGE(name) \
349 #ifdef CONFIG_PAGE_TABLE_ISOLATION
351 * Each PGD needs to be 8k long and 8k aligned. We do not
352 * ever go out to userspace with these, so we do not
353 * strictly *need* the second page, but this allows us to
354 * have a single set_pgd() implementation that does not
355 * need to worry about whether it has 4k or 8k to work
358 * This ensures PGDs are 8k long:
360 #define PTI_USER_PGD_FILL 512
361 /* This ensures they are 8k-aligned: */
362 #define NEXT_PGD_PAGE(name) \
363 .balign 2 * PAGE_SIZE; \
366 #define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
367 #define PTI_USER_PGD_FILL 0
370 /* Automate the creation of 1 to 1 mapping pmd entries */
371 #define PMDS(START, PERM, COUNT) \
374 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
379 NEXT_PGD_PAGE(early_top_pgt)
381 .fill PTI_USER_PGD_FILL,8,0
383 NEXT_PAGE(early_dynamic_pgts)
384 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
388 #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
389 NEXT_PGD_PAGE(init_top_pgt)
390 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
391 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
392 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
393 .org init_top_pgt + L4_START_KERNEL*8, 0
394 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
395 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
396 .fill PTI_USER_PGD_FILL,8,0
398 NEXT_PAGE(level3_ident_pgt)
399 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
401 NEXT_PAGE(level2_ident_pgt)
403 * Since I easily can, map the first 1G.
404 * Don't set NX because code runs from these pages.
406 * Note: This sets _PAGE_GLOBAL despite whether
407 * the CPU supports it or it is enabled. But,
408 * the CPU should ignore the bit.
410 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
412 NEXT_PGD_PAGE(init_top_pgt)
414 .fill PTI_USER_PGD_FILL,8,0
417 #ifdef CONFIG_X86_5LEVEL
418 NEXT_PAGE(level4_kernel_pgt)
420 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
423 NEXT_PAGE(level3_kernel_pgt)
424 .fill L3_START_KERNEL,8,0
425 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
426 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
427 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
429 NEXT_PAGE(level2_kernel_pgt)
431 * 512 MB kernel mapping. We spend a full page on this pagetable
434 * The kernel code+data+bss must not be bigger than that.
436 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
437 * If you want to increase this then increase MODULES_VADDR
440 * This table is eventually used by the kernel during normal
441 * runtime. Care must be taken to clear out undesired bits
442 * later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
444 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
445 KERNEL_IMAGE_SIZE/PMD_SIZE)
447 NEXT_PAGE(level2_fixmap_pgt)
449 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
450 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
453 NEXT_PAGE(level1_fixmap_pgt)
460 .globl early_gdt_descr
462 .word GDT_ENTRIES*8-1
463 early_gdt_descr_base:
464 .quad INIT_PER_CPU_VAR(gdt_page)
467 /* This must match the first entry in level2_kernel_pgt */
468 .quad 0x0000000000000000
469 EXPORT_SYMBOL(phys_base)
471 #include "../../x86/xen/xen-head.S"
474 NEXT_PAGE(empty_zero_page)
476 EXPORT_SYMBOL(empty_zero_page)