1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/linkage.h>
5 #include <asm/unistd.h>
6 #include <asm/assembler.h>
8 #include <asm/asm-offsets.h>
9 #include <asm/thread_info.h>
10 #include <asm/current.h>
23 .macro restore_user_regs_first
26 #if defined(CONFIG_FPU)
27 addi $sp, $sp, OSP_OFFSET
28 lmw.adm $r12, [$sp], $r25, #0x0
29 sethi $p0, hi20(has_fpu)
30 lbsi $p0, [$p0+lo12(has_fpu)]
35 addi $sp, $sp, FUCOP_CTL_OFFSET
36 lmw.adm $r12, [$sp], $r24, #0x0
49 lmw.adm $sp, [$sp], $sp, #0xe
52 .macro restore_user_regs_last
61 .macro restore_user_regs
62 restore_user_regs_first
63 lmw.adm $r0, [$sp], $r25, #0x0
64 addi $sp, $sp, OSP_OFFSET
65 restore_user_regs_last
68 .macro fast_restore_user_regs
69 restore_user_regs_first
70 lmw.adm $r1, [$sp], $r25, #0x0
71 addi $sp, $sp, OSP_OFFSET-4
72 restore_user_regs_last
75 #ifdef CONFIG_PREEMPTION
83 #define resume_kernel no_work_pending
86 ENTRY(ret_from_exception)
91 * judge Kernel or user mode
94 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
95 andi $p0, $p0, #PSW_mskINTL
96 bnez $p0, resume_kernel ! done with iret
101 * This is the fast syscall return path. We do as little as
102 * possible here, and this includes saving $r0 back into the SVC
104 * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
106 ENTRY(ret_fast_syscall)
108 lwi $r1, [tsk+#TSK_TI_FLAGS]
109 andi $p1, $r1, #_TIF_WORK_MASK
110 bnez $p1, fast_work_pending
111 fast_restore_user_regs ! iret
114 * Ok, we need to do extra processing,
115 * enter the slow path returning from syscall, while pending work.
118 swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception
120 andi $p1, $r1, #_TIF_NEED_RESCHED
121 bnez $p1, work_resched
123 andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
124 beqz $p1, no_work_pending
126 move $r0, $sp ! 'regs'
131 bal schedule ! path, return to user mode
134 * "slow" syscall return path.
136 ENTRY(resume_userspace)
137 ENTRY(ret_slow_syscall)
139 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
140 andi $p0, $p0, #PSW_mskINTL
141 bnez $p0, no_work_pending ! done with iret
142 lwi $r1, [tsk+#TSK_TI_FLAGS]
143 andi $p1, $r1, #_TIF_WORK_MASK
144 bnez $p1, work_pending ! handle work_resched, sig_pend
147 #ifdef CONFIG_TRACE_IRQFLAGS
148 lwi $p0, [$sp+(#IPSW_OFFSET)]
150 la $r10, __trace_hardirqs_off
151 la $r9, __trace_hardirqs_on
155 restore_user_regs ! return from iret
161 #ifdef CONFIG_PREEMPTION
164 lwi $t0, [tsk+#TSK_TI_PREEMPT]
165 bnez $t0, no_work_pending
167 lwi $t0, [tsk+#TSK_TI_FLAGS]
168 andi $p1, $t0, #_TIF_NEED_RESCHED
169 beqz $p1, no_work_pending
171 lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off?
173 beqz $t0, no_work_pending
175 jal preempt_schedule_irq
180 * This is how we return from a fork.
184 beqz $r6, 1f ! r6 stores fn for kernel thread
185 move $r0, $r7 ! prepare kernel thread arg
188 lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
189 andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls?
190 beqz $p1, ret_slow_syscall
192 bal syscall_trace_leave