1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * relocate_kernel.S - put the kernel image in place to boot
4 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
7 #include <linux/linkage.h>
8 #include <asm/page_types.h>
10 #include <asm/processor-flags.h>
11 #include <asm/pgtable_types.h>
12 #include <asm/nospec-branch.h>
13 #include <asm/unwind_hints.h>
16 * Must be relocatable PIC code callable as a C function
19 #define PTR(x) (x << 3)
20 #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
27 #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
29 /* Minimal CPU state */
32 #define CR3 DATA(0x10)
33 #define CR4 DATA(0x18)
36 #define CP_PA_TABLE_PAGE DATA(0x20)
37 #define CP_PA_SWAP_PAGE DATA(0x28)
38 #define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
43 SYM_CODE_START_NOALIGN(relocate_kernel)
46 * %rdi indirection_page
49 * %rcx preserve_context
53 /* Save the CPU context, used for jumping back */
62 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11
71 /* Save CR4. Required to enable the right paging mode later. */
74 /* zero out flags, and disable interrupts */
78 /* Save SME active flag */
82 * get physical address of control page now
83 * this is impossible after page table switch
85 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
87 /* get physical address of page table now too */
88 movq PTR(PA_TABLE_PAGE)(%rsi), %r9
90 /* get physical address of swap page now */
91 movq PTR(PA_SWAP_PAGE)(%rsi), %r10
93 /* save some information for jumping back */
94 movq %r9, CP_PA_TABLE_PAGE(%r11)
95 movq %r10, CP_PA_SWAP_PAGE(%r11)
96 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
98 /* Switch to the identity mapped page tables */
101 /* setup a new stack at the end of the physical control page */
102 lea PAGE_SIZE(%r8), %rsp
104 /* jump to identity mapped page */
105 addq $(identity_mapped - relocate_kernel), %r8
108 SYM_CODE_END(relocate_kernel)
110 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
112 /* set return address to 0 if not preserving context */
114 /* store the start address on the stack */
118 * Set cr0 to a known state:
120 * - Alignment check disabled
121 * - Write protect disabled
123 * - Don't do FP software emulation.
124 * - Proctected mode enabled
127 andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
128 orl $(X86_CR0_PG | X86_CR0_PE), %eax
132 * Set cr4 to a known state:
133 * - physical address extension enabled
134 * - 5-level paging, if it was enabled before
136 movl $X86_CR4_PAE, %eax
137 testq $X86_CR4_LA57, %r13
139 orl $X86_CR4_LA57, %eax
146 /* Flush the TLB (needed?) */
150 * If SME is active, there could be old encrypted cache line
151 * entries that will conflict with the now unencrypted memory
152 * used by kexec. Flush the caches before copying the kernel.
163 * To be certain of avoiding problems with self-modifying code
164 * I need to execute a serializing instruction here.
165 * So I flush the TLB by reloading %cr3 here, it's handy,
166 * and not processor dependent.
172 * set all of the registers to known values
198 leaq PAGE_SIZE(%r10), %rsp
199 ANNOTATE_RETPOLINE_SAFE
202 /* get the re-entry point of the peer system */
204 leaq relocate_kernel(%rip), %r8
205 movq CP_PA_SWAP_PAGE(%r8), %r10
206 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
207 movq CP_PA_TABLE_PAGE(%r8), %rax
209 lea PAGE_SIZE(%r8), %rsp
211 movq $virtual_mapped, %rax
214 SYM_CODE_END(identity_mapped)
216 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
235 SYM_CODE_END(virtual_mapped)
238 SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
240 movq %rdi, %rcx /* Put the page_list in %rcx */
245 0: /* top, read another word for the indirection page */
250 testb $0x1, %cl /* is it a destination page? */
253 andq $0xfffffffffffff000, %rdi
256 testb $0x2, %cl /* is it an indirection page? */
259 andq $0xfffffffffffff000, %rbx
262 testb $0x4, %cl /* is it the done indicator? */
266 testb $0x8, %cl /* is it the source indicator? */
267 jz 0b /* Ignore it otherwise */
268 movq %rcx, %rsi /* For ever source page do a copy */
269 andq $0xfffffffffffff000, %rsi
288 lea PAGE_SIZE(%rax), %rsi
292 SYM_CODE_END(swap_pages)
294 .globl kexec_control_code_size
295 .set kexec_control_code_size, . - relocate_kernel