1 /* SPDX-License-Identifier: GPL-2.0 */
3 * rtrap.S: Return from Sparc trap low-level code.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 #include <asm/ptrace.h>
13 #include <asm/contregs.h>
14 #include <asm/winmacro.h>
15 #include <asm/asmmacro.h>
16 #include <asm/thread_info.h>
26 /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
27 .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
28 .globl rtrap_7win_patch4, rtrap_7win_patch5
29 rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
30 rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
31 rtrap_7win_patch3: srl %g1, 7, %g2
32 rtrap_7win_patch4: srl %g2, 6, %g2
33 rtrap_7win_patch5: and %g1, 0x7f, %g1
34 /* END OF PATCH INSTRUCTIONS */
36 /* We need to check for a few things which are:
37 * 1) The need to call schedule() because this
38 * processes quantum is up.
39 * 2) Pending signals for this process, if any
40 * exist we need to call do_signal() to do
43 * Else we just check if the rett would land us
44 * in an invalid window, if so we need to grab
45 * it off the user/kernel stack first.
48 .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
49 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
50 .globl ret_trap_lockless_ipi
52 ret_trap_lockless_ipi:
53 andcc %t_psr, PSR_PS, %g0
54 sethi %hi(PSR_SYSCALL), %g1
56 andn %t_psr, %g1, %t_psr
63 ld [%curptr + TI_FLAGS], %g2
64 andcc %g2, (_TIF_NEED_RESCHED), %g0
71 ld [%curptr + TI_FLAGS], %g2
73 andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
74 bz,a ret_trap_continue
75 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
80 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
83 ld [%curptr + TI_FLAGS], %g2
86 sethi %hi(PSR_SYSCALL), %g1
87 andn %t_psr, %g1, %t_psr
91 ld [%curptr + TI_W_SAVED], %twin_tmp1
92 orcc %g0, %twin_tmp1, %g0
96 wr %t_psr, PSR_ET, %psr
100 call try_to_clear_window_buffer
101 add %sp, STACKFRAME_SZ, %o0
104 ld [%curptr + TI_FLAGS], %g2
107 /* Load up the user's out registers so we can pull
108 * a window from the stack, if necessary.
112 /* If there are already live user windows in the
113 * set we can return from trap safely.
115 ld [%curptr + TI_UWINMASK], %twin_tmp1
116 orcc %g0, %twin_tmp1, %g0
117 bne ret_trap_userwins_ok
120 /* Calculate new %wim, we have to pull a register
121 * window from the users stack.
123 ret_trap_pull_one_window:
125 sll %t_wim, 0x1, %twin_tmp1
126 rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
127 or %glob_tmp, %twin_tmp1, %glob_tmp
128 rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
130 wr %glob_tmp, 0x0, %wim
132 /* Here comes the architecture specific
133 * branch to the user stack checking routine
134 * for return from traps.
136 b srmmu_rett_stackchk
139 ret_trap_userwins_ok:
140 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
141 or %t_pc, %t_npc, %g2
143 sethi %hi(PSR_SYSCALL), %g2
145 andn %t_psr, %g2, %t_psr
147 b ret_trap_unaligned_pc
148 add %sp, STACKFRAME_SZ, %o0
160 ret_trap_unaligned_pc:
161 ld [%sp + STACKFRAME_SZ + PT_PC], %o1
162 ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
163 ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
165 wr %t_wim, 0x0, %wim ! or else...
167 wr %t_psr, PSR_ET, %psr
170 call do_memaccess_unaligned
174 ld [%curptr + TI_FLAGS], %g2
177 /* Will the rett land us in the invalid window? */
180 rtrap_patch3: srl %g1, 8, %g2
184 be 1f ! Nope, just return from the trap
187 /* We have to grab a window before returning. */
188 rtrap_patch4: srl %g2, 7, %g2
190 rtrap_patch5: and %g1, 0xff, %g1
194 /* Grrr, make sure we load from the right %sp... */
195 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
197 restore %g0, %g0, %g0
202 /* Reload the entire frame in case this is from a
203 * kernel system call or whatever...
206 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
208 sethi %hi(PSR_SYSCALL), %twin_tmp1
209 andn %t_psr, %twin_tmp1, %t_psr
216 ret_trap_user_stack_is_bolixed:
219 wr %t_psr, PSR_ET, %psr
222 call window_ret_fault
223 add %sp, STACKFRAME_SZ, %o0
226 ld [%curptr + TI_FLAGS], %g2
228 .globl srmmu_rett_stackchk
230 bne ret_trap_user_stack_is_bolixed
231 sethi %hi(PAGE_OFFSET), %g1
233 bleu ret_trap_user_stack_is_bolixed
235 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
236 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
238 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
239 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
241 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
242 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
244 restore %g0, %g0, %g0
251 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
252 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
255 LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
256 SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
259 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
260 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
262 be ret_trap_userwins_ok
265 b,a ret_trap_user_stack_is_bolixed