1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #include <asm/asm-offsets.h>
8 #include <linux/init.h>
9 #include <linux/linkage.h>
10 #include <asm/thread_info.h>
12 #include <asm/pgtable.h>
14 #include <asm/hwcap.h>
15 #include <asm/image.h>
17 #include <asm/xip_fixup.h>
18 #include "efi-header.S"
21 SYM_CODE_START(_start)
23 * Image header expected by Linux boot-loaders. The image header data
24 * structure is described in asm/image.h.
25 * Do not modify it without modifying the structure and all bootloaders
26 * that expects this header format!!
30 * This instruction decodes to "MZ" ASCII required by UEFI.
35 /* jump to start kernel */
41 #ifdef CONFIG_RISCV_M_MODE
42 /* Image load offset (0MB) from start of RAM for M-mode */
45 #if __riscv_xlen == 64
46 /* Image load offset(2MB) from start of RAM */
49 /* Image load offset(4MB) from start of RAM */
53 /* Effective size of kernel image */
56 .word RISCV_HEADER_VERSION
59 .ascii RISCV_IMAGE_MAGIC
61 .ascii RISCV_IMAGE_MAGIC2
63 .word pe_head_start - _start
73 .global relocate_enable_mmu
75 /* Relocate return address */
78 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
83 /* Point stvec to virtual address of intruction after satp write */
88 /* Compute satp for kernel page tables, but don't load it yet */
89 srl a2, a0, PAGE_SHIFT
96 * Load trampoline page directory, which will cause us to trap to
97 * stvec if VA != PA, or simply fall through if VA == PA. We need a
98 * full fence here because setup_vm() just wrote these PTEs and we need
99 * to ensure the new translations are in use.
101 la a0, trampoline_pg_dir
103 srl a0, a0, PAGE_SHIFT
109 /* Set trap vector to spin forever to help debug */
110 la a0, .Lsecondary_park
113 /* Reload the global pointer */
117 * Switch to kernel page tables. A full fence is necessary in order to
118 * avoid using the trampoline translations, which are only correct for
119 * the first superpage. Fetching the fence is guaranteed to work
120 * because that first superpage is translated the same way.
126 #endif /* CONFIG_MMU */
128 .global secondary_start_sbi
130 /* Mask all interrupts */
134 /* Load the global pointer */
138 * Disable FPU & VECTOR to detect illegal usage of
139 * floating point or vector in kernel space
144 /* Set trap vector to spin forever to help debug */
145 la a3, .Lsecondary_park
148 /* a0 contains the hartid & a1 contains boot data */
149 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
153 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
158 .Lsecondary_start_common:
161 /* Enable virtual memory and relocate to virtual address */
162 la a0, swapper_pg_dir
164 call relocate_enable_mmu
166 call .Lsetup_trap_vector
169 #endif /* CONFIG_SMP */
174 * Park this hart if we:
175 * - have too many harts on CONFIG_RISCV_BOOT_SPINWAIT
176 * - receive an early trap, before setup_trap_vector finished
177 * - fail in smp_callin(), as a successful one wouldn't return
184 /* Set trap vector to exception handler */
185 la a0, handle_exception
189 * Set sup0 scratch register to 0, indicating to exception vector that
190 * we are presently executing in kernel.
192 csrw CSR_SCRATCH, zero
197 SYM_CODE_START(_start_kernel)
198 /* Mask all interrupts */
202 #ifdef CONFIG_RISCV_M_MODE
203 /* flush the instruction cache */
206 /* Reset all registers except ra, a0, a1 */
210 * Setup a PMP to permit access to all of memory. Some machines may
211 * not implement PMPs, so we set up a quick trap handler to just skip
212 * touching the PMPs on any trap.
218 csrw CSR_PMPADDR0, a0
219 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
225 * The hartid in a0 is expected later on, and we have no firmware
229 #endif /* CONFIG_RISCV_M_MODE */
231 /* Load the global pointer */
235 * Disable FPU & VECTOR to detect illegal usage of
236 * floating point or vector in kernel space
241 #ifdef CONFIG_RISCV_BOOT_SPINWAIT
242 li t0, CONFIG_NR_CPUS
243 blt a0, t0, .Lgood_cores
244 tail .Lsecondary_park
247 /* The lottery system is only required for spinwait booting method */
248 #ifndef CONFIG_XIP_KERNEL
249 /* Pick one hart to run the main boot sequence */
252 amoadd.w a3, a2, (a3)
253 bnez a3, .Lsecondary_start
256 /* hart_lottery in flash contains a magic number */
260 XIP_FIXUP_FLASH_OFFSET a3
262 amoswap.w t0, t1, (a2)
263 /* first time here if hart_lottery in RAM is not set */
264 beq t0, t1, .Lsecondary_start
266 #endif /* CONFIG_XIP */
267 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
269 #ifdef CONFIG_XIP_KERNEL
270 la sp, _end + THREAD_SIZE
276 /* Restore a0 & a1 copy */
281 #ifndef CONFIG_XIP_KERNEL
282 /* Clear BSS for flat non-ELF images */
285 ble a4, a3, .Lclear_bss_done
288 add a3, a3, RISCV_SZPTR
289 blt a3, a4, .Lclear_bss
292 la a2, boot_cpu_hartid
296 /* Initialize page tables and relocate to virtual addresses */
298 la sp, init_thread_union + THREAD_SIZE
300 addi sp, sp, -PT_SIZE_ON_STACK
302 #ifdef CONFIG_BUILTIN_DTB
307 #endif /* CONFIG_BUILTIN_DTB */
308 /* Set trap vector to spin forever to help debug */
309 la a3, .Lsecondary_park
315 call relocate_enable_mmu
316 #endif /* CONFIG_MMU */
318 call .Lsetup_trap_vector
319 /* Restore C environment */
321 la sp, init_thread_union + THREAD_SIZE
322 addi sp, sp, -PT_SIZE_ON_STACK
326 call kasan_early_init
328 /* Start the kernel */
332 #ifdef CONFIG_RISCV_BOOT_SPINWAIT
334 /* Set trap vector to spin forever to help debug */
335 la a3, .Lsecondary_park
339 la a1, __cpu_spinwait_stack_pointer
341 la a2, __cpu_spinwait_task_pointer
347 * This hart didn't win the lottery, so we wait for the winning hart to
348 * get far enough along the boot process that it should continue.
351 /* FIXME: We should WFI to save some energy here. */
354 beqz sp, .Lwait_for_cpu_up
355 beqz tp, .Lwait_for_cpu_up
358 tail .Lsecondary_start_common
359 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
361 SYM_CODE_END(_start_kernel)
363 #ifdef CONFIG_RISCV_M_MODE
364 SYM_CODE_START_LOCAL(reset_regs)
397 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
398 beqz t0, .Lreset_regs_done_fpu
435 /* note that the caller must clear SR_FS */
436 .Lreset_regs_done_fpu:
437 #endif /* CONFIG_FPU */
439 #ifdef CONFIG_RISCV_ISA_V
441 li t1, COMPAT_HWCAP_ISA_V
443 beqz t0, .Lreset_regs_done_vector
446 * Clear vector registers and reset vcsr
447 * VLMAX has a defined value, VLEN is a constant,
448 * and this form of vsetvli is defined to set vl to VLMAX.
453 vsetvli t1, x0, e8, m8, ta, ma
458 /* note that the caller must clear SR_VS */
459 .Lreset_regs_done_vector:
460 #endif /* CONFIG_RISCV_ISA_V */
462 SYM_CODE_END(reset_regs)
463 #endif /* CONFIG_RISCV_M_MODE */