1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
7 #include <linux/errno.h>
8 #include <linux/random.h>
9 #include <linux/signal.h>
10 #include <linux/personality.h>
11 #include <linux/uaccess.h>
12 #include <linux/tracehook.h>
13 #include <linux/uprobes.h>
14 #include <linux/syscalls.h>
17 #include <asm/cacheflush.h>
18 #include <asm/traps.h>
19 #include <asm/unistd.h>
24 extern const unsigned long sigreturn_codes
[17];
26 static unsigned long signal_return_offset
;
29 static int preserve_crunch_context(struct crunch_sigframe __user
*frame
)
31 char kbuf
[sizeof(*frame
) + 8];
32 struct crunch_sigframe
*kframe
;
34 /* the crunch context must be 64 bit aligned */
35 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
36 kframe
->magic
= CRUNCH_MAGIC
;
37 kframe
->size
= CRUNCH_STORAGE_SIZE
;
38 crunch_task_copy(current_thread_info(), &kframe
->storage
);
39 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
42 static int restore_crunch_context(char __user
**auxp
)
44 struct crunch_sigframe __user
*frame
=
45 (struct crunch_sigframe __user
*)*auxp
;
46 char kbuf
[sizeof(*frame
) + 8];
47 struct crunch_sigframe
*kframe
;
49 /* the crunch context must be 64 bit aligned */
50 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
51 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
53 if (kframe
->magic
!= CRUNCH_MAGIC
||
54 kframe
->size
!= CRUNCH_STORAGE_SIZE
)
56 *auxp
+= CRUNCH_STORAGE_SIZE
;
57 crunch_task_restore(current_thread_info(), &kframe
->storage
);
64 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user
*frame
)
66 char kbuf
[sizeof(*frame
) + 8];
67 struct iwmmxt_sigframe
*kframe
;
70 /* the iWMMXt context must be 64 bit aligned */
71 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
73 if (test_thread_flag(TIF_USING_IWMMXT
)) {
74 kframe
->magic
= IWMMXT_MAGIC
;
75 kframe
->size
= IWMMXT_STORAGE_SIZE
;
76 iwmmxt_task_copy(current_thread_info(), &kframe
->storage
);
79 * For bug-compatibility with older kernels, some space
80 * has to be reserved for iWMMXt even if it's not used.
81 * Set the magic and size appropriately so that properly
82 * written userspace can skip it reliably:
84 *kframe
= (struct iwmmxt_sigframe
) {
86 .size
= IWMMXT_STORAGE_SIZE
,
90 err
= __copy_to_user(frame
, kframe
, sizeof(*kframe
));
95 static int restore_iwmmxt_context(char __user
**auxp
)
97 struct iwmmxt_sigframe __user
*frame
=
98 (struct iwmmxt_sigframe __user
*)*auxp
;
99 char kbuf
[sizeof(*frame
) + 8];
100 struct iwmmxt_sigframe
*kframe
;
102 /* the iWMMXt context must be 64 bit aligned */
103 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
104 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
108 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
109 * block is discarded for compatibility with setup_sigframe() if
110 * present, but we don't mandate its presence. If some other
111 * magic is here, it's not for us:
113 if (!test_thread_flag(TIF_USING_IWMMXT
) &&
114 kframe
->magic
!= DUMMY_MAGIC
)
117 if (kframe
->size
!= IWMMXT_STORAGE_SIZE
)
120 if (test_thread_flag(TIF_USING_IWMMXT
)) {
121 if (kframe
->magic
!= IWMMXT_MAGIC
)
124 iwmmxt_task_restore(current_thread_info(), &kframe
->storage
);
127 *auxp
+= IWMMXT_STORAGE_SIZE
;
135 static int preserve_vfp_context(struct vfp_sigframe __user
*frame
)
137 struct vfp_sigframe kframe
;
140 memset(&kframe
, 0, sizeof(kframe
));
141 kframe
.magic
= VFP_MAGIC
;
142 kframe
.size
= VFP_STORAGE_SIZE
;
144 err
= vfp_preserve_user_clear_hwstate(&kframe
.ufp
, &kframe
.ufp_exc
);
148 return __copy_to_user(frame
, &kframe
, sizeof(kframe
));
151 static int restore_vfp_context(char __user
**auxp
)
153 struct vfp_sigframe frame
;
156 err
= __copy_from_user(&frame
, *auxp
, sizeof(frame
));
160 if (frame
.magic
!= VFP_MAGIC
|| frame
.size
!= VFP_STORAGE_SIZE
)
163 *auxp
+= sizeof(frame
);
164 return vfp_restore_user_hwstate(&frame
.ufp
, &frame
.ufp_exc
);
170 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
173 static int restore_sigframe(struct pt_regs
*regs
, struct sigframe __user
*sf
)
175 struct sigcontext context
;
180 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
182 set_current_blocked(&set
);
184 err
|= __copy_from_user(&context
, &sf
->uc
.uc_mcontext
, sizeof(context
));
186 regs
->ARM_r0
= context
.arm_r0
;
187 regs
->ARM_r1
= context
.arm_r1
;
188 regs
->ARM_r2
= context
.arm_r2
;
189 regs
->ARM_r3
= context
.arm_r3
;
190 regs
->ARM_r4
= context
.arm_r4
;
191 regs
->ARM_r5
= context
.arm_r5
;
192 regs
->ARM_r6
= context
.arm_r6
;
193 regs
->ARM_r7
= context
.arm_r7
;
194 regs
->ARM_r8
= context
.arm_r8
;
195 regs
->ARM_r9
= context
.arm_r9
;
196 regs
->ARM_r10
= context
.arm_r10
;
197 regs
->ARM_fp
= context
.arm_fp
;
198 regs
->ARM_ip
= context
.arm_ip
;
199 regs
->ARM_sp
= context
.arm_sp
;
200 regs
->ARM_lr
= context
.arm_lr
;
201 regs
->ARM_pc
= context
.arm_pc
;
202 regs
->ARM_cpsr
= context
.arm_cpsr
;
205 err
|= !valid_user_regs(regs
);
207 aux
= (char __user
*) sf
->uc
.uc_regspace
;
210 err
|= restore_crunch_context(&aux
);
214 err
|= restore_iwmmxt_context(&aux
);
218 err
|= restore_vfp_context(&aux
);
224 asmlinkage
int sys_sigreturn(struct pt_regs
*regs
)
226 struct sigframe __user
*frame
;
228 /* Always make any pending restarted system calls return -EINTR */
229 current
->restart_block
.fn
= do_no_restart_syscall
;
232 * Since we stacked the signal on a 64-bit boundary,
233 * then 'sp' should be word aligned here. If it's
234 * not, then the user is trying to mess with us.
236 if (regs
->ARM_sp
& 7)
239 frame
= (struct sigframe __user
*)regs
->ARM_sp
;
241 if (!access_ok(frame
, sizeof (*frame
)))
244 if (restore_sigframe(regs
, frame
))
254 asmlinkage
int sys_rt_sigreturn(struct pt_regs
*regs
)
256 struct rt_sigframe __user
*frame
;
258 /* Always make any pending restarted system calls return -EINTR */
259 current
->restart_block
.fn
= do_no_restart_syscall
;
262 * Since we stacked the signal on a 64-bit boundary,
263 * then 'sp' should be word aligned here. If it's
264 * not, then the user is trying to mess with us.
266 if (regs
->ARM_sp
& 7)
269 frame
= (struct rt_sigframe __user
*)regs
->ARM_sp
;
271 if (!access_ok(frame
, sizeof (*frame
)))
274 if (restore_sigframe(regs
, &frame
->sig
))
277 if (restore_altstack(&frame
->sig
.uc
.uc_stack
))
288 setup_sigframe(struct sigframe __user
*sf
, struct pt_regs
*regs
, sigset_t
*set
)
290 struct aux_sigframe __user
*aux
;
291 struct sigcontext context
;
294 context
= (struct sigcontext
) {
295 .arm_r0
= regs
->ARM_r0
,
296 .arm_r1
= regs
->ARM_r1
,
297 .arm_r2
= regs
->ARM_r2
,
298 .arm_r3
= regs
->ARM_r3
,
299 .arm_r4
= regs
->ARM_r4
,
300 .arm_r5
= regs
->ARM_r5
,
301 .arm_r6
= regs
->ARM_r6
,
302 .arm_r7
= regs
->ARM_r7
,
303 .arm_r8
= regs
->ARM_r8
,
304 .arm_r9
= regs
->ARM_r9
,
305 .arm_r10
= regs
->ARM_r10
,
306 .arm_fp
= regs
->ARM_fp
,
307 .arm_ip
= regs
->ARM_ip
,
308 .arm_sp
= regs
->ARM_sp
,
309 .arm_lr
= regs
->ARM_lr
,
310 .arm_pc
= regs
->ARM_pc
,
311 .arm_cpsr
= regs
->ARM_cpsr
,
313 .trap_no
= current
->thread
.trap_no
,
314 .error_code
= current
->thread
.error_code
,
315 .fault_address
= current
->thread
.address
,
316 .oldmask
= set
->sig
[0],
319 err
|= __copy_to_user(&sf
->uc
.uc_mcontext
, &context
, sizeof(context
));
321 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
323 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
326 err
|= preserve_crunch_context(&aux
->crunch
);
330 err
|= preserve_iwmmxt_context(&aux
->iwmmxt
);
334 err
|= preserve_vfp_context(&aux
->vfp
);
336 err
|= __put_user(0, &aux
->end_magic
);
341 static inline void __user
*
342 get_sigframe(struct ksignal
*ksig
, struct pt_regs
*regs
, int framesize
)
344 unsigned long sp
= sigsp(regs
->ARM_sp
, ksig
);
348 * ATPCS B01 mandates 8-byte alignment
350 frame
= (void __user
*)((sp
- framesize
) & ~7);
353 * Check that we can actually write to the signal frame.
355 if (!access_ok(frame
, framesize
))
362 setup_return(struct pt_regs
*regs
, struct ksignal
*ksig
,
363 unsigned long __user
*rc
, void __user
*frame
)
365 unsigned long handler
= (unsigned long)ksig
->ka
.sa
.sa_handler
;
366 unsigned long handler_fdpic_GOT
= 0;
367 unsigned long retcode
;
368 unsigned int idx
, thumb
= 0;
369 unsigned long cpsr
= regs
->ARM_cpsr
& ~(PSR_f
| PSR_E_BIT
);
370 bool fdpic
= IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC
) &&
371 (current
->personality
& FDPIC_FUNCPTRS
);
374 unsigned long __user
*fdpic_func_desc
=
375 (unsigned long __user
*)handler
;
376 if (__get_user(handler
, &fdpic_func_desc
[0]) ||
377 __get_user(handler_fdpic_GOT
, &fdpic_func_desc
[1]))
381 cpsr
|= PSR_ENDSTATE
;
384 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
386 if (ksig
->ka
.sa
.sa_flags
& SA_THIRTYTWO
)
387 cpsr
= (cpsr
& ~MODE_MASK
) | USR_MODE
;
389 #ifdef CONFIG_ARM_THUMB
390 if (elf_hwcap
& HWCAP_THUMB
) {
392 * The LSB of the handler determines if we're going to
393 * be using THUMB or ARM mode for this signal handler.
398 * Clear the If-Then Thumb-2 execution state. ARM spec
399 * requires this to be all 000s in ARM mode. Snapdragon
400 * S4/Krait misbehaves on a Thumb=>ARM signal transition
403 * We must do this whenever we are running on a Thumb-2
404 * capable CPU, which includes ARMv6T2. However, we elect
405 * to always do this to simplify the code; this field is
406 * marked UNK/SBZP for older architectures.
408 cpsr
&= ~PSR_IT_MASK
;
417 if (ksig
->ka
.sa
.sa_flags
& SA_RESTORER
) {
418 retcode
= (unsigned long)ksig
->ka
.sa
.sa_restorer
;
421 * We need code to load the function descriptor.
422 * That code follows the standard sigreturn code
423 * (6 words), and is made of 3 + 2 words for each
424 * variant. The 4th copied word is the actual FD
425 * address that the assembly code expects.
428 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
430 if (__put_user(sigreturn_codes
[idx
], rc
) ||
431 __put_user(sigreturn_codes
[idx
+1], rc
+1) ||
432 __put_user(sigreturn_codes
[idx
+2], rc
+2) ||
433 __put_user(retcode
, rc
+3))
439 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
443 * Put the sigreturn code on the stack no matter which return
444 * mechanism we use in order to remain ABI compliant
446 if (__put_user(sigreturn_codes
[idx
], rc
) ||
447 __put_user(sigreturn_codes
[idx
+1], rc
+1))
452 if (cpsr
& MODE32_BIT
) {
453 struct mm_struct
*mm
= current
->mm
;
456 * 32-bit code can use the signal return page
457 * except when the MPU has protected the vectors
460 retcode
= mm
->context
.sigpage
+ signal_return_offset
+
466 * Ensure that the instruction cache sees
467 * the return code written onto the stack.
469 flush_icache_range((unsigned long)rc
,
470 (unsigned long)(rc
+ 3));
472 retcode
= ((unsigned long)rc
) + thumb
;
476 regs
->ARM_r0
= ksig
->sig
;
477 regs
->ARM_sp
= (unsigned long)frame
;
478 regs
->ARM_lr
= retcode
;
479 regs
->ARM_pc
= handler
;
481 regs
->ARM_r9
= handler_fdpic_GOT
;
482 regs
->ARM_cpsr
= cpsr
;
488 setup_frame(struct ksignal
*ksig
, sigset_t
*set
, struct pt_regs
*regs
)
490 struct sigframe __user
*frame
= get_sigframe(ksig
, regs
, sizeof(*frame
));
497 * Set uc.uc_flags to a value which sc.trap_no would never have.
499 err
= __put_user(0x5ac3c35a, &frame
->uc
.uc_flags
);
501 err
|= setup_sigframe(frame
, regs
, set
);
503 err
= setup_return(regs
, ksig
, frame
->retcode
, frame
);
509 setup_rt_frame(struct ksignal
*ksig
, sigset_t
*set
, struct pt_regs
*regs
)
511 struct rt_sigframe __user
*frame
= get_sigframe(ksig
, regs
, sizeof(*frame
));
517 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
519 err
|= __put_user(0, &frame
->sig
.uc
.uc_flags
);
520 err
|= __put_user(NULL
, &frame
->sig
.uc
.uc_link
);
522 err
|= __save_altstack(&frame
->sig
.uc
.uc_stack
, regs
->ARM_sp
);
523 err
|= setup_sigframe(&frame
->sig
, regs
, set
);
525 err
= setup_return(regs
, ksig
, frame
->sig
.retcode
, frame
);
529 * For realtime signals we must also set the second and third
530 * arguments for the signal handler.
531 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
533 regs
->ARM_r1
= (unsigned long)&frame
->info
;
534 regs
->ARM_r2
= (unsigned long)&frame
->sig
.uc
;
541 * OK, we're invoking a handler
543 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
545 sigset_t
*oldset
= sigmask_to_save();
549 * Perform fixup for the pre-signal frame.
551 rseq_signal_deliver(ksig
, regs
);
554 * Set up the stack frame
556 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
557 ret
= setup_rt_frame(ksig
, oldset
, regs
);
559 ret
= setup_frame(ksig
, oldset
, regs
);
562 * Check that the resulting registers are actually sane.
564 ret
|= !valid_user_regs(regs
);
566 signal_setup_done(ret
, ksig
, 0);
570 * Note that 'init' is a special process: it doesn't get signals it doesn't
571 * want to handle. Thus you cannot kill init even with a SIGKILL even by
574 * Note that we go through the signals twice: once to check the signals that
575 * the kernel can handle, and then we build all the user-level signal handling
576 * stack-frames in one go after that.
578 static int do_signal(struct pt_regs
*regs
, int syscall
)
580 unsigned int retval
= 0, continue_addr
= 0, restart_addr
= 0;
585 * If we were from a system call, check for system call restarting...
588 continue_addr
= regs
->ARM_pc
;
589 restart_addr
= continue_addr
- (thumb_mode(regs
) ? 2 : 4);
590 retval
= regs
->ARM_r0
;
593 * Prepare for system call restart. We do this here so that a
594 * debugger will see the already changed PSW.
597 case -ERESTART_RESTARTBLOCK
:
600 case -ERESTARTNOHAND
:
602 case -ERESTARTNOINTR
:
604 regs
->ARM_r0
= regs
->ARM_ORIG_r0
;
605 regs
->ARM_pc
= restart_addr
;
611 * Get the signal to deliver. When running under ptrace, at this
612 * point the debugger may change all our registers ...
615 * Depending on the signal settings we may need to revert the
616 * decision to restart the system call. But skip this if a
617 * debugger has chosen to restart at a different PC.
619 if (get_signal(&ksig
)) {
621 if (unlikely(restart
) && regs
->ARM_pc
== restart_addr
) {
622 if (retval
== -ERESTARTNOHAND
||
623 retval
== -ERESTART_RESTARTBLOCK
624 || (retval
== -ERESTARTSYS
625 && !(ksig
.ka
.sa
.sa_flags
& SA_RESTART
))) {
626 regs
->ARM_r0
= -EINTR
;
627 regs
->ARM_pc
= continue_addr
;
630 handle_signal(&ksig
, regs
);
633 restore_saved_sigmask();
634 if (unlikely(restart
) && regs
->ARM_pc
== restart_addr
) {
635 regs
->ARM_pc
= continue_addr
;
643 do_work_pending(struct pt_regs
*regs
, unsigned int thread_flags
, int syscall
)
646 * The assembly code enters us with IRQs off, but it hasn't
647 * informed the tracing code of that for efficiency reasons.
648 * Update the trace code with the current status.
650 trace_hardirqs_off();
652 if (likely(thread_flags
& _TIF_NEED_RESCHED
)) {
655 if (unlikely(!user_mode(regs
)))
658 if (thread_flags
& _TIF_SIGPENDING
) {
659 int restart
= do_signal(regs
, syscall
);
660 if (unlikely(restart
)) {
662 * Restart without handlers.
663 * Deal with it without leaving
669 } else if (thread_flags
& _TIF_UPROBE
) {
670 uprobe_notify_resume(regs
);
672 clear_thread_flag(TIF_NOTIFY_RESUME
);
673 tracehook_notify_resume(regs
);
674 rseq_handle_notify_resume(NULL
, regs
);
678 thread_flags
= current_thread_info()->flags
;
679 } while (thread_flags
& _TIF_WORK_MASK
);
683 struct page
*get_signal_page(void)
690 page
= alloc_pages(GFP_KERNEL
, 0);
695 addr
= page_address(page
);
697 /* Give the signal return code some randomness */
698 offset
= 0x200 + (get_random_int() & 0x7fc);
699 signal_return_offset
= offset
;
702 * Copy signal return handlers into the vector page, and
703 * set sigreturn to be a pointer to these.
705 memcpy(addr
+ offset
, sigreturn_codes
, sizeof(sigreturn_codes
));
707 ptr
= (unsigned long)addr
+ offset
;
708 flush_icache_range(ptr
, ptr
+ sizeof(sigreturn_codes
));
713 /* Defer to generic check */
714 asmlinkage
void addr_limit_check_failed(void)
716 addr_limit_user_check();
719 #ifdef CONFIG_DEBUG_RSEQ
720 asmlinkage
void do_rseq_syscall(struct pt_regs
*regs
)