1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/errno.h>
5 #include <linux/signal.h>
6 #include <linux/ptrace.h>
7 #include <linux/personality.h>
8 #include <linux/freezer.h>
9 #include <linux/tracehook.h>
10 #include <linux/uaccess.h>
12 #include <asm/cacheflush.h>
13 #include <asm/ucontext.h>
14 #include <asm/unistd.h>
17 #include <asm/ptrace.h>
24 #if IS_ENABLED(CONFIG_FPU)
25 static inline int restore_sigcontext_fpu(struct pt_regs
*regs
,
26 struct sigcontext __user
*sc
)
28 struct task_struct
*tsk
= current
;
29 unsigned long used_math_flag
;
33 __get_user_error(used_math_flag
, &sc
->used_math_flag
, ret
);
39 #if IS_ENABLED(CONFIG_LAZY_FPU)
41 if (current
== last_task_used_math
) {
42 last_task_used_math
= NULL
;
43 disable_ptreg_fpu(regs
);
50 return __copy_from_user(&tsk
->thread
.fpu
, &sc
->fpu
,
51 sizeof(struct fpu_struct
));
54 static inline int setup_sigcontext_fpu(struct pt_regs
*regs
,
55 struct sigcontext __user
*sc
)
57 struct task_struct
*tsk
= current
;
60 __put_user_error(used_math(), &sc
->used_math_flag
, ret
);
66 #if IS_ENABLED(CONFIG_LAZY_FPU)
67 if (last_task_used_math
== tsk
)
68 save_fpu(last_task_used_math
);
72 ret
= __copy_to_user(&sc
->fpu
, &tsk
->thread
.fpu
,
73 sizeof(struct fpu_struct
));
79 static int restore_sigframe(struct pt_regs
*regs
,
80 struct rt_sigframe __user
* sf
)
85 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
87 set_current_blocked(&set
);
90 __get_user_error(regs
->uregs
[0], &sf
->uc
.uc_mcontext
.nds32_r0
, err
);
91 __get_user_error(regs
->uregs
[1], &sf
->uc
.uc_mcontext
.nds32_r1
, err
);
92 __get_user_error(regs
->uregs
[2], &sf
->uc
.uc_mcontext
.nds32_r2
, err
);
93 __get_user_error(regs
->uregs
[3], &sf
->uc
.uc_mcontext
.nds32_r3
, err
);
94 __get_user_error(regs
->uregs
[4], &sf
->uc
.uc_mcontext
.nds32_r4
, err
);
95 __get_user_error(regs
->uregs
[5], &sf
->uc
.uc_mcontext
.nds32_r5
, err
);
96 __get_user_error(regs
->uregs
[6], &sf
->uc
.uc_mcontext
.nds32_r6
, err
);
97 __get_user_error(regs
->uregs
[7], &sf
->uc
.uc_mcontext
.nds32_r7
, err
);
98 __get_user_error(regs
->uregs
[8], &sf
->uc
.uc_mcontext
.nds32_r8
, err
);
99 __get_user_error(regs
->uregs
[9], &sf
->uc
.uc_mcontext
.nds32_r9
, err
);
100 __get_user_error(regs
->uregs
[10], &sf
->uc
.uc_mcontext
.nds32_r10
, err
);
101 __get_user_error(regs
->uregs
[11], &sf
->uc
.uc_mcontext
.nds32_r11
, err
);
102 __get_user_error(regs
->uregs
[12], &sf
->uc
.uc_mcontext
.nds32_r12
, err
);
103 __get_user_error(regs
->uregs
[13], &sf
->uc
.uc_mcontext
.nds32_r13
, err
);
104 __get_user_error(regs
->uregs
[14], &sf
->uc
.uc_mcontext
.nds32_r14
, err
);
105 __get_user_error(regs
->uregs
[15], &sf
->uc
.uc_mcontext
.nds32_r15
, err
);
106 __get_user_error(regs
->uregs
[16], &sf
->uc
.uc_mcontext
.nds32_r16
, err
);
107 __get_user_error(regs
->uregs
[17], &sf
->uc
.uc_mcontext
.nds32_r17
, err
);
108 __get_user_error(regs
->uregs
[18], &sf
->uc
.uc_mcontext
.nds32_r18
, err
);
109 __get_user_error(regs
->uregs
[19], &sf
->uc
.uc_mcontext
.nds32_r19
, err
);
110 __get_user_error(regs
->uregs
[20], &sf
->uc
.uc_mcontext
.nds32_r20
, err
);
111 __get_user_error(regs
->uregs
[21], &sf
->uc
.uc_mcontext
.nds32_r21
, err
);
112 __get_user_error(regs
->uregs
[22], &sf
->uc
.uc_mcontext
.nds32_r22
, err
);
113 __get_user_error(regs
->uregs
[23], &sf
->uc
.uc_mcontext
.nds32_r23
, err
);
114 __get_user_error(regs
->uregs
[24], &sf
->uc
.uc_mcontext
.nds32_r24
, err
);
115 __get_user_error(regs
->uregs
[25], &sf
->uc
.uc_mcontext
.nds32_r25
, err
);
117 __get_user_error(regs
->fp
, &sf
->uc
.uc_mcontext
.nds32_fp
, err
);
118 __get_user_error(regs
->gp
, &sf
->uc
.uc_mcontext
.nds32_gp
, err
);
119 __get_user_error(regs
->lp
, &sf
->uc
.uc_mcontext
.nds32_lp
, err
);
120 __get_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.nds32_sp
, err
);
121 __get_user_error(regs
->ipc
, &sf
->uc
.uc_mcontext
.nds32_ipc
, err
);
122 #if defined(CONFIG_HWZOL)
123 __get_user_error(regs
->lc
, &sf
->uc
.uc_mcontext
.zol
.nds32_lc
, err
);
124 __get_user_error(regs
->le
, &sf
->uc
.uc_mcontext
.zol
.nds32_le
, err
);
125 __get_user_error(regs
->lb
, &sf
->uc
.uc_mcontext
.zol
.nds32_lb
, err
);
127 #if IS_ENABLED(CONFIG_FPU)
128 err
|= restore_sigcontext_fpu(regs
, &sf
->uc
.uc_mcontext
);
131 * Avoid sys_rt_sigreturn() restarting.
133 forget_syscall(regs
);
137 asmlinkage
long sys_rt_sigreturn(struct pt_regs
*regs
)
139 struct rt_sigframe __user
*frame
;
141 /* Always make any pending restarted system calls return -EINTR */
142 current
->restart_block
.fn
= do_no_restart_syscall
;
145 * Since we stacked the signal on a 64-bit boundary,
146 * then 'sp' should be two-word aligned here. If it's
147 * not, then the user is trying to mess with us.
152 frame
= (struct rt_sigframe __user
*)regs
->sp
;
154 if (!access_ok(frame
, sizeof(*frame
)))
157 if (restore_sigframe(regs
, frame
))
160 if (restore_altstack(&frame
->uc
.uc_stack
))
163 return regs
->uregs
[0];
166 force_sig(SIGSEGV
, current
);
171 setup_sigframe(struct rt_sigframe __user
* sf
, struct pt_regs
*regs
,
176 __put_user_error(regs
->uregs
[0], &sf
->uc
.uc_mcontext
.nds32_r0
, err
);
177 __put_user_error(regs
->uregs
[1], &sf
->uc
.uc_mcontext
.nds32_r1
, err
);
178 __put_user_error(regs
->uregs
[2], &sf
->uc
.uc_mcontext
.nds32_r2
, err
);
179 __put_user_error(regs
->uregs
[3], &sf
->uc
.uc_mcontext
.nds32_r3
, err
);
180 __put_user_error(regs
->uregs
[4], &sf
->uc
.uc_mcontext
.nds32_r4
, err
);
181 __put_user_error(regs
->uregs
[5], &sf
->uc
.uc_mcontext
.nds32_r5
, err
);
182 __put_user_error(regs
->uregs
[6], &sf
->uc
.uc_mcontext
.nds32_r6
, err
);
183 __put_user_error(regs
->uregs
[7], &sf
->uc
.uc_mcontext
.nds32_r7
, err
);
184 __put_user_error(regs
->uregs
[8], &sf
->uc
.uc_mcontext
.nds32_r8
, err
);
185 __put_user_error(regs
->uregs
[9], &sf
->uc
.uc_mcontext
.nds32_r9
, err
);
186 __put_user_error(regs
->uregs
[10], &sf
->uc
.uc_mcontext
.nds32_r10
, err
);
187 __put_user_error(regs
->uregs
[11], &sf
->uc
.uc_mcontext
.nds32_r11
, err
);
188 __put_user_error(regs
->uregs
[12], &sf
->uc
.uc_mcontext
.nds32_r12
, err
);
189 __put_user_error(regs
->uregs
[13], &sf
->uc
.uc_mcontext
.nds32_r13
, err
);
190 __put_user_error(regs
->uregs
[14], &sf
->uc
.uc_mcontext
.nds32_r14
, err
);
191 __put_user_error(regs
->uregs
[15], &sf
->uc
.uc_mcontext
.nds32_r15
, err
);
192 __put_user_error(regs
->uregs
[16], &sf
->uc
.uc_mcontext
.nds32_r16
, err
);
193 __put_user_error(regs
->uregs
[17], &sf
->uc
.uc_mcontext
.nds32_r17
, err
);
194 __put_user_error(regs
->uregs
[18], &sf
->uc
.uc_mcontext
.nds32_r18
, err
);
195 __put_user_error(regs
->uregs
[19], &sf
->uc
.uc_mcontext
.nds32_r19
, err
);
196 __put_user_error(regs
->uregs
[20], &sf
->uc
.uc_mcontext
.nds32_r20
, err
);
198 __put_user_error(regs
->uregs
[21], &sf
->uc
.uc_mcontext
.nds32_r21
, err
);
199 __put_user_error(regs
->uregs
[22], &sf
->uc
.uc_mcontext
.nds32_r22
, err
);
200 __put_user_error(regs
->uregs
[23], &sf
->uc
.uc_mcontext
.nds32_r23
, err
);
201 __put_user_error(regs
->uregs
[24], &sf
->uc
.uc_mcontext
.nds32_r24
, err
);
202 __put_user_error(regs
->uregs
[25], &sf
->uc
.uc_mcontext
.nds32_r25
, err
);
203 __put_user_error(regs
->fp
, &sf
->uc
.uc_mcontext
.nds32_fp
, err
);
204 __put_user_error(regs
->gp
, &sf
->uc
.uc_mcontext
.nds32_gp
, err
);
205 __put_user_error(regs
->lp
, &sf
->uc
.uc_mcontext
.nds32_lp
, err
);
206 __put_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.nds32_sp
, err
);
207 __put_user_error(regs
->ipc
, &sf
->uc
.uc_mcontext
.nds32_ipc
, err
);
208 #if defined(CONFIG_HWZOL)
209 __put_user_error(regs
->lc
, &sf
->uc
.uc_mcontext
.zol
.nds32_lc
, err
);
210 __put_user_error(regs
->le
, &sf
->uc
.uc_mcontext
.zol
.nds32_le
, err
);
211 __put_user_error(regs
->lb
, &sf
->uc
.uc_mcontext
.zol
.nds32_lb
, err
);
213 #if IS_ENABLED(CONFIG_FPU)
214 err
|= setup_sigcontext_fpu(regs
, &sf
->uc
.uc_mcontext
);
217 __put_user_error(current
->thread
.trap_no
, &sf
->uc
.uc_mcontext
.trap_no
,
219 __put_user_error(current
->thread
.error_code
,
220 &sf
->uc
.uc_mcontext
.error_code
, err
);
221 __put_user_error(current
->thread
.address
,
222 &sf
->uc
.uc_mcontext
.fault_address
, err
);
223 __put_user_error(set
->sig
[0], &sf
->uc
.uc_mcontext
.oldmask
, err
);
225 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
230 static inline void __user
*get_sigframe(struct ksignal
*ksig
,
231 struct pt_regs
*regs
, int framesize
)
235 /* Default to using normal stack */
239 * If we are on the alternate signal stack and would overflow it, don't.
240 * Return an always-bogus address instead so we will die with SIGSEGV.
242 if (on_sig_stack(sp
) && !likely(on_sig_stack(sp
- framesize
)))
243 return (void __user __force
*)(-1UL);
245 /* This is the X/Open sanctioned signal stack switching. */
246 sp
= (sigsp(sp
, ksig
) - framesize
);
249 * nds32 mandates 8-byte alignment
253 return (void __user
*)sp
;
257 setup_return(struct pt_regs
*regs
, struct ksignal
*ksig
, void __user
* frame
)
259 unsigned long handler
= (unsigned long)ksig
->ka
.sa
.sa_handler
;
260 unsigned long retcode
;
262 retcode
= VDSO_SYMBOL(current
->mm
->context
.vdso
, rt_sigtramp
);
263 regs
->uregs
[0] = ksig
->sig
;
264 regs
->sp
= (unsigned long)frame
;
272 setup_rt_frame(struct ksignal
*ksig
, sigset_t
* set
, struct pt_regs
*regs
)
274 struct rt_sigframe __user
*frame
=
275 get_sigframe(ksig
, regs
, sizeof(*frame
));
278 if (!access_ok(frame
, sizeof(*frame
)))
281 __put_user_error(0, &frame
->uc
.uc_flags
, err
);
282 __put_user_error(NULL
, &frame
->uc
.uc_link
, err
);
284 err
|= __save_altstack(&frame
->uc
.uc_stack
, regs
->sp
);
285 err
|= setup_sigframe(frame
, regs
, set
);
287 setup_return(regs
, ksig
, frame
);
288 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
) {
289 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
290 regs
->uregs
[1] = (unsigned long)&frame
->info
;
291 regs
->uregs
[2] = (unsigned long)&frame
->uc
;
298 * OK, we're invoking a handler
300 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
303 sigset_t
*oldset
= sigmask_to_save();
305 if (in_syscall(regs
)) {
306 /* Avoid additional syscall restarting via ret_slow_syscall. */
307 forget_syscall(regs
);
309 switch (regs
->uregs
[0]) {
310 case -ERESTART_RESTARTBLOCK
:
311 case -ERESTARTNOHAND
:
312 regs
->uregs
[0] = -EINTR
;
315 if (!(ksig
->ka
.sa
.sa_flags
& SA_RESTART
)) {
316 regs
->uregs
[0] = -EINTR
;
319 case -ERESTARTNOINTR
:
320 regs
->uregs
[0] = regs
->orig_r0
;
326 * Set up the stack frame
328 ret
= setup_rt_frame(ksig
, oldset
, regs
);
330 signal_setup_done(ret
, ksig
, 0);
334 * Note that 'init' is a special process: it doesn't get signals it doesn't
335 * want to handle. Thus you cannot kill init even with a SIGKILL even by
338 * Note that we go through the signals twice: once to check the signals that
339 * the kernel can handle, and then we build all the user-level signal handling
340 * stack-frames in one go after that.
342 static void do_signal(struct pt_regs
*regs
)
346 if (get_signal(&ksig
)) {
347 handle_signal(&ksig
, regs
);
352 * If we were from a system call, check for system call restarting...
354 if (in_syscall(regs
)) {
355 /* Restart the system call - no handlers present */
357 /* Avoid additional syscall restarting via ret_slow_syscall. */
358 forget_syscall(regs
);
360 switch (regs
->uregs
[0]) {
361 case -ERESTART_RESTARTBLOCK
:
362 regs
->uregs
[15] = __NR_restart_syscall
;
363 case -ERESTARTNOHAND
:
365 case -ERESTARTNOINTR
:
366 regs
->uregs
[0] = regs
->orig_r0
;
371 restore_saved_sigmask();
375 do_notify_resume(struct pt_regs
*regs
, unsigned int thread_flags
)
377 if (thread_flags
& _TIF_SIGPENDING
)
380 if (thread_flags
& _TIF_NOTIFY_RESUME
) {
381 clear_thread_flag(TIF_NOTIFY_RESUME
);
382 tracehook_notify_resume(regs
);