1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
7 #include <linux/init.h>
8 #include <linux/linkage.h>
12 #include <asm/unistd.h>
13 #include <asm/thread_info.h>
14 #include <asm/asm-offsets.h>
16 #if !IS_ENABLED(CONFIG_PREEMPTION)
17 .set resume_kernel, restore_all
20 ENTRY(handle_exception)
22 * If coming from userspace, preserve the user thread pointer and load
23 * the kernel thread pointer. If we came from the kernel, the scratch
24 * register will contain 0, and we should continue on the current TP.
26 csrrw tp, CSR_SCRATCH, tp
27 bnez tp, _save_context
31 REG_S sp, TASK_TI_KERNEL_SP(tp)
33 REG_S sp, TASK_TI_USER_SP(tp)
34 REG_L sp, TASK_TI_KERNEL_SP(tp)
35 addi sp, sp, -(PT_SIZE_ON_STACK)
67 * Disable user-mode memory access as it should only be set in the
68 * actual user copy routines.
70 * Disable the FPU to detect illegal usage of floating point in kernel
75 REG_L s0, TASK_TI_USER_SP(tp)
76 csrrc s1, CSR_STATUS, t0
82 REG_S s1, PT_STATUS(sp)
84 REG_S s3, PT_BADADDR(sp)
85 REG_S s4, PT_CAUSE(sp)
89 * Set the scratch register to 0, so that if a recursive exception
90 * occurs, the exception vector knows it came from the kernel
94 /* Load the global pointer */
97 la gp, __global_pointer$
100 la ra, ret_from_exception
102 * MSB of cause differentiates between
103 * interrupts and exceptions
107 /* Handle interrupts */
108 move a0, sp /* pt_regs */
112 * Exceptions run with interrupts enabled or disabled depending on the
113 * state of SR_PIE in m/sstatus.
117 csrs CSR_STATUS, SR_IE
120 /* Handle syscalls */
122 beq s4, t0, handle_syscall
124 /* Handle other exceptions */
125 slli t0, s4, RISCV_LGPTR
126 la t1, excp_vect_table
127 la t2, excp_vect_table_end
128 move a0, sp /* pt_regs */
130 /* Check if exception code lies within bounds */
138 /* save the initial A0 value (needed in signal handlers) */
139 REG_S a0, PT_ORIG_A0(sp)
141 * Advance SEPC to avoid executing the original
142 * scall instruction on sret
146 /* Trace syscalls, but only if requested by the user. */
147 REG_L t0, TASK_TI_FLAGS(tp)
148 andi t0, t0, _TIF_SYSCALL_WORK
149 bnez t0, handle_syscall_trace_enter
151 /* Check to make sure we don't jump to a bogus syscall number. */
153 la s0, sys_ni_syscall
155 * Syscall number held in a7.
156 * If syscall number is above allowed value, redirect to ni_syscall.
160 * Check if syscall is rejected by tracer, i.e., a7 == -1.
161 * If yes, we pretend it was executed.
164 beq a7, t1, ret_from_syscall_rejected
167 la s0, sys_call_table
168 slli t0, a7, RISCV_LGPTR
175 /* Set user a0 to kernel a0 */
178 * We didn't execute the actual syscall.
179 * Seccomp already set return value for the current task pt_regs.
180 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
182 ret_from_syscall_rejected:
183 /* Trace syscalls, but only if requested by the user. */
184 REG_L t0, TASK_TI_FLAGS(tp)
185 andi t0, t0, _TIF_SYSCALL_WORK
186 bnez t0, handle_syscall_trace_exit
189 REG_L s0, PT_STATUS(sp)
190 csrc CSR_STATUS, SR_IE
191 #ifdef CONFIG_RISCV_M_MODE
192 /* the MPP value is too large to be used as an immediate arg for addi */
198 bnez s0, resume_kernel
201 /* Interrupts must be disabled here so flags are checked atomically */
202 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
203 andi s1, s0, _TIF_WORK_MASK
204 bnez s1, work_pending
206 /* Save unwound kernel stack pointer in thread_info */
207 addi s0, sp, PT_SIZE_ON_STACK
208 REG_S s0, TASK_TI_KERNEL_SP(tp)
211 * Save TP into the scratch register , so we can find the kernel data
217 REG_L a0, PT_STATUS(sp)
219 * The current load reservation is effectively part of the processor's
220 * state, in the sense that load reservations cannot be shared between
221 * different hart contexts. We can't actually save and restore a load
222 * reservation, so instead here we clear any existing reservation --
223 * it's always legal for implementations to clear load reservations at
224 * any point (as long as the forward progress guarantee is kept, but
225 * we'll ignore that here).
227 * Dangling load reservations can be the result of taking a trap in the
228 * middle of an LR/SC sequence, but can also be the result of a taken
229 * forward branch around an SC -- which is how we implement CAS. As a
230 * result we need to clear reservations between the last CAS and the
231 * jump back to the new context. While it is unlikely the store
232 * completes, implementations are allowed to expand reservations to be
236 REG_SC x0, a2, PT_EPC(sp)
265 REG_L x26, PT_S10(sp)
266 REG_L x27, PT_S11(sp)
274 #ifdef CONFIG_RISCV_M_MODE
280 #if IS_ENABLED(CONFIG_PREEMPTION)
282 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
284 REG_L s0, TASK_TI_FLAGS(tp)
285 andi s0, s0, _TIF_NEED_RESCHED
287 call preempt_schedule_irq
292 /* Enter slow path for supplementary processing */
293 la ra, ret_from_exception
294 andi s1, s0, _TIF_NEED_RESCHED
295 bnez s1, work_resched
297 /* Handle pending signals and notify-resume requests */
298 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
299 move a0, sp /* pt_regs */
300 move a1, s0 /* current_thread_info->flags */
301 tail do_notify_resume
305 /* Slow paths for ptrace. */
306 handle_syscall_trace_enter:
308 call do_syscall_trace_enter
318 bnez t0, ret_from_syscall_rejected
320 handle_syscall_trace_exit:
322 call do_syscall_trace_exit
325 END(handle_exception)
328 la ra, ret_from_exception
330 ENDPROC(ret_from_fork)
332 ENTRY(ret_from_kernel_thread)
335 la ra, ret_from_exception
338 ENDPROC(ret_from_kernel_thread)
342 * Integer register context switch
343 * The callee-saved registers must be saved and restored.
345 * a0: previous task_struct (must be preserved across the switch)
346 * a1: next task_struct
348 * The value of a0 and a1 must be preserved by this function, as that's how
349 * arguments are passed to schedule_tail.
352 /* Save context into prev->thread */
353 li a4, TASK_THREAD_RA
356 REG_S ra, TASK_THREAD_RA_RA(a3)
357 REG_S sp, TASK_THREAD_SP_RA(a3)
358 REG_S s0, TASK_THREAD_S0_RA(a3)
359 REG_S s1, TASK_THREAD_S1_RA(a3)
360 REG_S s2, TASK_THREAD_S2_RA(a3)
361 REG_S s3, TASK_THREAD_S3_RA(a3)
362 REG_S s4, TASK_THREAD_S4_RA(a3)
363 REG_S s5, TASK_THREAD_S5_RA(a3)
364 REG_S s6, TASK_THREAD_S6_RA(a3)
365 REG_S s7, TASK_THREAD_S7_RA(a3)
366 REG_S s8, TASK_THREAD_S8_RA(a3)
367 REG_S s9, TASK_THREAD_S9_RA(a3)
368 REG_S s10, TASK_THREAD_S10_RA(a3)
369 REG_S s11, TASK_THREAD_S11_RA(a3)
370 /* Restore context from next->thread */
371 REG_L ra, TASK_THREAD_RA_RA(a4)
372 REG_L sp, TASK_THREAD_SP_RA(a4)
373 REG_L s0, TASK_THREAD_S0_RA(a4)
374 REG_L s1, TASK_THREAD_S1_RA(a4)
375 REG_L s2, TASK_THREAD_S2_RA(a4)
376 REG_L s3, TASK_THREAD_S3_RA(a4)
377 REG_L s4, TASK_THREAD_S4_RA(a4)
378 REG_L s5, TASK_THREAD_S5_RA(a4)
379 REG_L s6, TASK_THREAD_S6_RA(a4)
380 REG_L s7, TASK_THREAD_S7_RA(a4)
381 REG_L s8, TASK_THREAD_S8_RA(a4)
382 REG_L s9, TASK_THREAD_S9_RA(a4)
383 REG_L s10, TASK_THREAD_S10_RA(a4)
384 REG_L s11, TASK_THREAD_S11_RA(a4)
385 /* Swap the CPU entry around. */
386 lw a3, TASK_TI_CPU(a0)
387 lw a4, TASK_TI_CPU(a1)
388 sw a3, TASK_TI_CPU(a1)
389 sw a4, TASK_TI_CPU(a0)
391 #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
400 #define do_page_fault do_trap_unknown
404 /* Exception vector table */
405 ENTRY(excp_vect_table)
406 RISCV_PTR do_trap_insn_misaligned
407 RISCV_PTR do_trap_insn_fault
408 RISCV_PTR do_trap_insn_illegal
409 RISCV_PTR do_trap_break
410 RISCV_PTR do_trap_load_misaligned
411 RISCV_PTR do_trap_load_fault
412 RISCV_PTR do_trap_store_misaligned
413 RISCV_PTR do_trap_store_fault
414 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
415 RISCV_PTR do_trap_ecall_s
416 RISCV_PTR do_trap_unknown
417 RISCV_PTR do_trap_ecall_m
418 RISCV_PTR do_page_fault /* instruction page fault */
419 RISCV_PTR do_page_fault /* load page fault */
420 RISCV_PTR do_trap_unknown
421 RISCV_PTR do_page_fault /* store page fault */
426 ENTRY(__user_rt_sigreturn)
427 li a7, __NR_rt_sigreturn
429 END(__user_rt_sigreturn)