1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
7 #include <linux/init.h>
8 #include <linux/linkage.h>
12 #include <asm/unistd.h>
13 #include <asm/thread_info.h>
14 #include <asm/asm-offsets.h>
20 * Prepares to enter a system call or exception by saving all registers to the
24 LOCAL _restore_kernel_tpsp
28 * If coming from userspace, preserve the user thread pointer and load
29 * the kernel thread pointer. If we came from the kernel, the scratch
30 * register will contain 0, and we should continue on the current TP.
32 csrrw tp, CSR_SCRATCH, tp
33 bnez tp, _save_context
37 REG_S sp, TASK_TI_KERNEL_SP(tp)
39 REG_S sp, TASK_TI_USER_SP(tp)
40 REG_L sp, TASK_TI_KERNEL_SP(tp)
41 addi sp, sp, -(PT_SIZE_ON_STACK)
73 * Disable user-mode memory access as it should only be set in the
74 * actual user copy routines.
76 * Disable the FPU to detect illegal usage of floating point in kernel
81 REG_L s0, TASK_TI_USER_SP(tp)
82 csrrc s1, CSR_STATUS, t0
88 REG_S s1, PT_STATUS(sp)
90 REG_S s3, PT_BADADDR(sp)
91 REG_S s4, PT_CAUSE(sp)
96 * Prepares to return from a system call or exception by restoring all
97 * registers from the stack.
100 REG_L a0, PT_STATUS(sp)
102 * The current load reservation is effectively part of the processor's
103 * state, in the sense that load reservations cannot be shared between
104 * different hart contexts. We can't actually save and restore a load
105 * reservation, so instead here we clear any existing reservation --
106 * it's always legal for implementations to clear load reservations at
107 * any point (as long as the forward progress guarantee is kept, but
108 * we'll ignore that here).
110 * Dangling load reservations can be the result of taking a trap in the
111 * middle of an LR/SC sequence, but can also be the result of a taken
112 * forward branch around an SC -- which is how we implement CAS. As a
113 * result we need to clear reservations between the last CAS and the
114 * jump back to the new context. While it is unlikely the store
115 * completes, implementations are allowed to expand reservations to be
119 REG_SC x0, a2, PT_EPC(sp)
148 REG_L x26, PT_S10(sp)
149 REG_L x27, PT_S11(sp)
158 #if !IS_ENABLED(CONFIG_PREEMPTION)
159 .set resume_kernel, restore_all
162 ENTRY(handle_exception)
166 * Set the scratch register to 0, so that if a recursive exception
167 * occurs, the exception vector knows it came from the kernel
171 /* Load the global pointer */
174 la gp, __global_pointer$
177 la ra, ret_from_exception
179 * MSB of cause differentiates between
180 * interrupts and exceptions
184 /* Handle interrupts */
185 move a0, sp /* pt_regs */
189 * Exceptions run with interrupts enabled or disabled depending on the
190 * state of SR_PIE in m/sstatus.
194 csrs CSR_STATUS, SR_IE
197 /* Handle syscalls */
199 beq s4, t0, handle_syscall
201 /* Handle other exceptions */
202 slli t0, s4, RISCV_LGPTR
203 la t1, excp_vect_table
204 la t2, excp_vect_table_end
205 move a0, sp /* pt_regs */
207 /* Check if exception code lies within bounds */
215 /* save the initial A0 value (needed in signal handlers) */
216 REG_S a0, PT_ORIG_A0(sp)
218 * Advance SEPC to avoid executing the original
219 * scall instruction on sret
223 /* Trace syscalls, but only if requested by the user. */
224 REG_L t0, TASK_TI_FLAGS(tp)
225 andi t0, t0, _TIF_SYSCALL_WORK
226 bnez t0, handle_syscall_trace_enter
228 /* Check to make sure we don't jump to a bogus syscall number. */
230 la s0, sys_ni_syscall
232 * Syscall number held in a7.
233 * If syscall number is above allowed value, redirect to ni_syscall.
237 * Check if syscall is rejected by tracer, i.e., a7 == -1.
238 * If yes, we pretend it was executed.
241 beq a7, t1, ret_from_syscall_rejected
244 la s0, sys_call_table
245 slli t0, a7, RISCV_LGPTR
252 /* Set user a0 to kernel a0 */
255 * We didn't execute the actual syscall.
256 * Seccomp already set return value for the current task pt_regs.
257 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
259 ret_from_syscall_rejected:
260 /* Trace syscalls, but only if requested by the user. */
261 REG_L t0, TASK_TI_FLAGS(tp)
262 andi t0, t0, _TIF_SYSCALL_WORK
263 bnez t0, handle_syscall_trace_exit
266 REG_L s0, PT_STATUS(sp)
267 csrc CSR_STATUS, SR_IE
268 #ifdef CONFIG_RISCV_M_MODE
269 /* the MPP value is too large to be used as an immediate arg for addi */
275 bnez s0, resume_kernel
278 /* Interrupts must be disabled here so flags are checked atomically */
279 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
280 andi s1, s0, _TIF_WORK_MASK
281 bnez s1, work_pending
283 /* Save unwound kernel stack pointer in thread_info */
284 addi s0, sp, PT_SIZE_ON_STACK
285 REG_S s0, TASK_TI_KERNEL_SP(tp)
288 * Save TP into the scratch register , so we can find the kernel data
295 #ifdef CONFIG_RISCV_M_MODE
301 #if IS_ENABLED(CONFIG_PREEMPTION)
303 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
305 REG_L s0, TASK_TI_FLAGS(tp)
306 andi s0, s0, _TIF_NEED_RESCHED
308 call preempt_schedule_irq
313 /* Enter slow path for supplementary processing */
314 la ra, ret_from_exception
315 andi s1, s0, _TIF_NEED_RESCHED
316 bnez s1, work_resched
318 /* Handle pending signals and notify-resume requests */
319 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
320 move a0, sp /* pt_regs */
321 move a1, s0 /* current_thread_info->flags */
322 tail do_notify_resume
326 /* Slow paths for ptrace. */
327 handle_syscall_trace_enter:
329 call do_syscall_trace_enter
339 bnez t0, ret_from_syscall_rejected
341 handle_syscall_trace_exit:
343 call do_syscall_trace_exit
346 END(handle_exception)
349 la ra, ret_from_exception
351 ENDPROC(ret_from_fork)
353 ENTRY(ret_from_kernel_thread)
356 la ra, ret_from_exception
359 ENDPROC(ret_from_kernel_thread)
363 * Integer register context switch
364 * The callee-saved registers must be saved and restored.
366 * a0: previous task_struct (must be preserved across the switch)
367 * a1: next task_struct
369 * The value of a0 and a1 must be preserved by this function, as that's how
370 * arguments are passed to schedule_tail.
373 /* Save context into prev->thread */
374 li a4, TASK_THREAD_RA
377 REG_S ra, TASK_THREAD_RA_RA(a3)
378 REG_S sp, TASK_THREAD_SP_RA(a3)
379 REG_S s0, TASK_THREAD_S0_RA(a3)
380 REG_S s1, TASK_THREAD_S1_RA(a3)
381 REG_S s2, TASK_THREAD_S2_RA(a3)
382 REG_S s3, TASK_THREAD_S3_RA(a3)
383 REG_S s4, TASK_THREAD_S4_RA(a3)
384 REG_S s5, TASK_THREAD_S5_RA(a3)
385 REG_S s6, TASK_THREAD_S6_RA(a3)
386 REG_S s7, TASK_THREAD_S7_RA(a3)
387 REG_S s8, TASK_THREAD_S8_RA(a3)
388 REG_S s9, TASK_THREAD_S9_RA(a3)
389 REG_S s10, TASK_THREAD_S10_RA(a3)
390 REG_S s11, TASK_THREAD_S11_RA(a3)
391 /* Restore context from next->thread */
392 REG_L ra, TASK_THREAD_RA_RA(a4)
393 REG_L sp, TASK_THREAD_SP_RA(a4)
394 REG_L s0, TASK_THREAD_S0_RA(a4)
395 REG_L s1, TASK_THREAD_S1_RA(a4)
396 REG_L s2, TASK_THREAD_S2_RA(a4)
397 REG_L s3, TASK_THREAD_S3_RA(a4)
398 REG_L s4, TASK_THREAD_S4_RA(a4)
399 REG_L s5, TASK_THREAD_S5_RA(a4)
400 REG_L s6, TASK_THREAD_S6_RA(a4)
401 REG_L s7, TASK_THREAD_S7_RA(a4)
402 REG_L s8, TASK_THREAD_S8_RA(a4)
403 REG_L s9, TASK_THREAD_S9_RA(a4)
404 REG_L s10, TASK_THREAD_S10_RA(a4)
405 REG_L s11, TASK_THREAD_S11_RA(a4)
406 /* Swap the CPU entry around. */
407 lw a3, TASK_TI_CPU(a0)
408 lw a4, TASK_TI_CPU(a1)
409 sw a3, TASK_TI_CPU(a1)
410 sw a4, TASK_TI_CPU(a0)
412 #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
421 #define do_page_fault do_trap_unknown
425 /* Exception vector table */
426 ENTRY(excp_vect_table)
427 RISCV_PTR do_trap_insn_misaligned
428 RISCV_PTR do_trap_insn_fault
429 RISCV_PTR do_trap_insn_illegal
430 RISCV_PTR do_trap_break
431 RISCV_PTR do_trap_load_misaligned
432 RISCV_PTR do_trap_load_fault
433 RISCV_PTR do_trap_store_misaligned
434 RISCV_PTR do_trap_store_fault
435 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
436 RISCV_PTR do_trap_ecall_s
437 RISCV_PTR do_trap_unknown
438 RISCV_PTR do_trap_ecall_m
439 RISCV_PTR do_page_fault /* instruction page fault */
440 RISCV_PTR do_page_fault /* load page fault */
441 RISCV_PTR do_trap_unknown
442 RISCV_PTR do_page_fault /* store page fault */
447 ENTRY(__user_rt_sigreturn)
448 li a7, __NR_rt_sigreturn
450 END(__user_rt_sigreturn)