2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/errno.h>
11 #include <linux/random.h>
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 #include <linux/uprobes.h>
17 #include <linux/syscalls.h>
20 #include <asm/cacheflush.h>
21 #include <asm/traps.h>
22 #include <asm/unistd.h>
27 extern const unsigned long sigreturn_codes
[17];
29 static unsigned long signal_return_offset
;
32 static int preserve_crunch_context(struct crunch_sigframe __user
*frame
)
34 char kbuf
[sizeof(*frame
) + 8];
35 struct crunch_sigframe
*kframe
;
37 /* the crunch context must be 64 bit aligned */
38 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
39 kframe
->magic
= CRUNCH_MAGIC
;
40 kframe
->size
= CRUNCH_STORAGE_SIZE
;
41 crunch_task_copy(current_thread_info(), &kframe
->storage
);
42 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
45 static int restore_crunch_context(char __user
**auxp
)
47 struct crunch_sigframe __user
*frame
=
48 (struct crunch_sigframe __user
*)*auxp
;
49 char kbuf
[sizeof(*frame
) + 8];
50 struct crunch_sigframe
*kframe
;
52 /* the crunch context must be 64 bit aligned */
53 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
54 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
56 if (kframe
->magic
!= CRUNCH_MAGIC
||
57 kframe
->size
!= CRUNCH_STORAGE_SIZE
)
59 *auxp
+= CRUNCH_STORAGE_SIZE
;
60 crunch_task_restore(current_thread_info(), &kframe
->storage
);
67 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user
*frame
)
69 char kbuf
[sizeof(*frame
) + 8];
70 struct iwmmxt_sigframe
*kframe
;
73 /* the iWMMXt context must be 64 bit aligned */
74 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
76 if (test_thread_flag(TIF_USING_IWMMXT
)) {
77 kframe
->magic
= IWMMXT_MAGIC
;
78 kframe
->size
= IWMMXT_STORAGE_SIZE
;
79 iwmmxt_task_copy(current_thread_info(), &kframe
->storage
);
82 * For bug-compatibility with older kernels, some space
83 * has to be reserved for iWMMXt even if it's not used.
84 * Set the magic and size appropriately so that properly
85 * written userspace can skip it reliably:
87 *kframe
= (struct iwmmxt_sigframe
) {
89 .size
= IWMMXT_STORAGE_SIZE
,
93 err
= __copy_to_user(frame
, kframe
, sizeof(*kframe
));
98 static int restore_iwmmxt_context(char __user
**auxp
)
100 struct iwmmxt_sigframe __user
*frame
=
101 (struct iwmmxt_sigframe __user
*)*auxp
;
102 char kbuf
[sizeof(*frame
) + 8];
103 struct iwmmxt_sigframe
*kframe
;
105 /* the iWMMXt context must be 64 bit aligned */
106 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
107 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
111 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
112 * block is discarded for compatibility with setup_sigframe() if
113 * present, but we don't mandate its presence. If some other
114 * magic is here, it's not for us:
116 if (!test_thread_flag(TIF_USING_IWMMXT
) &&
117 kframe
->magic
!= DUMMY_MAGIC
)
120 if (kframe
->size
!= IWMMXT_STORAGE_SIZE
)
123 if (test_thread_flag(TIF_USING_IWMMXT
)) {
124 if (kframe
->magic
!= IWMMXT_MAGIC
)
127 iwmmxt_task_restore(current_thread_info(), &kframe
->storage
);
130 *auxp
+= IWMMXT_STORAGE_SIZE
;
138 static int preserve_vfp_context(struct vfp_sigframe __user
*frame
)
140 struct vfp_sigframe kframe
;
143 memset(&kframe
, 0, sizeof(kframe
));
144 kframe
.magic
= VFP_MAGIC
;
145 kframe
.size
= VFP_STORAGE_SIZE
;
147 err
= vfp_preserve_user_clear_hwstate(&kframe
.ufp
, &kframe
.ufp_exc
);
151 return __copy_to_user(frame
, &kframe
, sizeof(kframe
));
154 static int restore_vfp_context(char __user
**auxp
)
156 struct vfp_sigframe frame
;
159 err
= __copy_from_user(&frame
, *auxp
, sizeof(frame
));
163 if (frame
.magic
!= VFP_MAGIC
|| frame
.size
!= VFP_STORAGE_SIZE
)
166 *auxp
+= sizeof(frame
);
167 return vfp_restore_user_hwstate(&frame
.ufp
, &frame
.ufp_exc
);
173 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
176 static int restore_sigframe(struct pt_regs
*regs
, struct sigframe __user
*sf
)
178 struct sigcontext context
;
183 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
185 set_current_blocked(&set
);
187 err
|= __copy_from_user(&context
, &sf
->uc
.uc_mcontext
, sizeof(context
));
189 regs
->ARM_r0
= context
.arm_r0
;
190 regs
->ARM_r1
= context
.arm_r1
;
191 regs
->ARM_r2
= context
.arm_r2
;
192 regs
->ARM_r3
= context
.arm_r3
;
193 regs
->ARM_r4
= context
.arm_r4
;
194 regs
->ARM_r5
= context
.arm_r5
;
195 regs
->ARM_r6
= context
.arm_r6
;
196 regs
->ARM_r7
= context
.arm_r7
;
197 regs
->ARM_r8
= context
.arm_r8
;
198 regs
->ARM_r9
= context
.arm_r9
;
199 regs
->ARM_r10
= context
.arm_r10
;
200 regs
->ARM_fp
= context
.arm_fp
;
201 regs
->ARM_ip
= context
.arm_ip
;
202 regs
->ARM_sp
= context
.arm_sp
;
203 regs
->ARM_lr
= context
.arm_lr
;
204 regs
->ARM_pc
= context
.arm_pc
;
205 regs
->ARM_cpsr
= context
.arm_cpsr
;
208 err
|= !valid_user_regs(regs
);
210 aux
= (char __user
*) sf
->uc
.uc_regspace
;
213 err
|= restore_crunch_context(&aux
);
217 err
|= restore_iwmmxt_context(&aux
);
221 err
|= restore_vfp_context(&aux
);
227 asmlinkage
int sys_sigreturn(struct pt_regs
*regs
)
229 struct sigframe __user
*frame
;
231 /* Always make any pending restarted system calls return -EINTR */
232 current
->restart_block
.fn
= do_no_restart_syscall
;
235 * Since we stacked the signal on a 64-bit boundary,
236 * then 'sp' should be word aligned here. If it's
237 * not, then the user is trying to mess with us.
239 if (regs
->ARM_sp
& 7)
242 frame
= (struct sigframe __user
*)regs
->ARM_sp
;
244 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
247 if (restore_sigframe(regs
, frame
))
253 force_sig(SIGSEGV
, current
);
257 asmlinkage
int sys_rt_sigreturn(struct pt_regs
*regs
)
259 struct rt_sigframe __user
*frame
;
261 /* Always make any pending restarted system calls return -EINTR */
262 current
->restart_block
.fn
= do_no_restart_syscall
;
265 * Since we stacked the signal on a 64-bit boundary,
266 * then 'sp' should be word aligned here. If it's
267 * not, then the user is trying to mess with us.
269 if (regs
->ARM_sp
& 7)
272 frame
= (struct rt_sigframe __user
*)regs
->ARM_sp
;
274 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
277 if (restore_sigframe(regs
, &frame
->sig
))
280 if (restore_altstack(&frame
->sig
.uc
.uc_stack
))
286 force_sig(SIGSEGV
, current
);
291 setup_sigframe(struct sigframe __user
*sf
, struct pt_regs
*regs
, sigset_t
*set
)
293 struct aux_sigframe __user
*aux
;
294 struct sigcontext context
;
297 context
= (struct sigcontext
) {
298 .arm_r0
= regs
->ARM_r0
,
299 .arm_r1
= regs
->ARM_r1
,
300 .arm_r2
= regs
->ARM_r2
,
301 .arm_r3
= regs
->ARM_r3
,
302 .arm_r4
= regs
->ARM_r4
,
303 .arm_r5
= regs
->ARM_r5
,
304 .arm_r6
= regs
->ARM_r6
,
305 .arm_r7
= regs
->ARM_r7
,
306 .arm_r8
= regs
->ARM_r8
,
307 .arm_r9
= regs
->ARM_r9
,
308 .arm_r10
= regs
->ARM_r10
,
309 .arm_fp
= regs
->ARM_fp
,
310 .arm_ip
= regs
->ARM_ip
,
311 .arm_sp
= regs
->ARM_sp
,
312 .arm_lr
= regs
->ARM_lr
,
313 .arm_pc
= regs
->ARM_pc
,
314 .arm_cpsr
= regs
->ARM_cpsr
,
316 .trap_no
= current
->thread
.trap_no
,
317 .error_code
= current
->thread
.error_code
,
318 .fault_address
= current
->thread
.address
,
319 .oldmask
= set
->sig
[0],
322 err
|= __copy_to_user(&sf
->uc
.uc_mcontext
, &context
, sizeof(context
));
324 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
326 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
329 err
|= preserve_crunch_context(&aux
->crunch
);
333 err
|= preserve_iwmmxt_context(&aux
->iwmmxt
);
337 err
|= preserve_vfp_context(&aux
->vfp
);
339 err
|= __put_user(0, &aux
->end_magic
);
344 static inline void __user
*
345 get_sigframe(struct ksignal
*ksig
, struct pt_regs
*regs
, int framesize
)
347 unsigned long sp
= sigsp(regs
->ARM_sp
, ksig
);
351 * ATPCS B01 mandates 8-byte alignment
353 frame
= (void __user
*)((sp
- framesize
) & ~7);
356 * Check that we can actually write to the signal frame.
358 if (!access_ok(VERIFY_WRITE
, frame
, framesize
))
365 setup_return(struct pt_regs
*regs
, struct ksignal
*ksig
,
366 unsigned long __user
*rc
, void __user
*frame
)
368 unsigned long handler
= (unsigned long)ksig
->ka
.sa
.sa_handler
;
369 unsigned long handler_fdpic_GOT
= 0;
370 unsigned long retcode
;
371 unsigned int idx
, thumb
= 0;
372 unsigned long cpsr
= regs
->ARM_cpsr
& ~(PSR_f
| PSR_E_BIT
);
373 bool fdpic
= IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC
) &&
374 (current
->personality
& FDPIC_FUNCPTRS
);
377 unsigned long __user
*fdpic_func_desc
=
378 (unsigned long __user
*)handler
;
379 if (__get_user(handler
, &fdpic_func_desc
[0]) ||
380 __get_user(handler_fdpic_GOT
, &fdpic_func_desc
[1]))
384 cpsr
|= PSR_ENDSTATE
;
387 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
389 if (ksig
->ka
.sa
.sa_flags
& SA_THIRTYTWO
)
390 cpsr
= (cpsr
& ~MODE_MASK
) | USR_MODE
;
392 #ifdef CONFIG_ARM_THUMB
393 if (elf_hwcap
& HWCAP_THUMB
) {
395 * The LSB of the handler determines if we're going to
396 * be using THUMB or ARM mode for this signal handler.
401 * Clear the If-Then Thumb-2 execution state. ARM spec
402 * requires this to be all 000s in ARM mode. Snapdragon
403 * S4/Krait misbehaves on a Thumb=>ARM signal transition
406 * We must do this whenever we are running on a Thumb-2
407 * capable CPU, which includes ARMv6T2. However, we elect
408 * to always do this to simplify the code; this field is
409 * marked UNK/SBZP for older architectures.
411 cpsr
&= ~PSR_IT_MASK
;
420 if (ksig
->ka
.sa
.sa_flags
& SA_RESTORER
) {
421 retcode
= (unsigned long)ksig
->ka
.sa
.sa_restorer
;
424 * We need code to load the function descriptor.
425 * That code follows the standard sigreturn code
426 * (6 words), and is made of 3 + 2 words for each
427 * variant. The 4th copied word is the actual FD
428 * address that the assembly code expects.
431 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
433 if (__put_user(sigreturn_codes
[idx
], rc
) ||
434 __put_user(sigreturn_codes
[idx
+1], rc
+1) ||
435 __put_user(sigreturn_codes
[idx
+2], rc
+2) ||
436 __put_user(retcode
, rc
+3))
442 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
446 * Put the sigreturn code on the stack no matter which return
447 * mechanism we use in order to remain ABI compliant
449 if (__put_user(sigreturn_codes
[idx
], rc
) ||
450 __put_user(sigreturn_codes
[idx
+1], rc
+1))
455 if (cpsr
& MODE32_BIT
) {
456 struct mm_struct
*mm
= current
->mm
;
459 * 32-bit code can use the signal return page
460 * except when the MPU has protected the vectors
463 retcode
= mm
->context
.sigpage
+ signal_return_offset
+
469 * Ensure that the instruction cache sees
470 * the return code written onto the stack.
472 flush_icache_range((unsigned long)rc
,
473 (unsigned long)(rc
+ 3));
475 retcode
= ((unsigned long)rc
) + thumb
;
479 regs
->ARM_r0
= ksig
->sig
;
480 regs
->ARM_sp
= (unsigned long)frame
;
481 regs
->ARM_lr
= retcode
;
482 regs
->ARM_pc
= handler
;
484 regs
->ARM_r9
= handler_fdpic_GOT
;
485 regs
->ARM_cpsr
= cpsr
;
491 setup_frame(struct ksignal
*ksig
, sigset_t
*set
, struct pt_regs
*regs
)
493 struct sigframe __user
*frame
= get_sigframe(ksig
, regs
, sizeof(*frame
));
500 * Set uc.uc_flags to a value which sc.trap_no would never have.
502 err
= __put_user(0x5ac3c35a, &frame
->uc
.uc_flags
);
504 err
|= setup_sigframe(frame
, regs
, set
);
506 err
= setup_return(regs
, ksig
, frame
->retcode
, frame
);
512 setup_rt_frame(struct ksignal
*ksig
, sigset_t
*set
, struct pt_regs
*regs
)
514 struct rt_sigframe __user
*frame
= get_sigframe(ksig
, regs
, sizeof(*frame
));
520 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
522 err
|= __put_user(0, &frame
->sig
.uc
.uc_flags
);
523 err
|= __put_user(NULL
, &frame
->sig
.uc
.uc_link
);
525 err
|= __save_altstack(&frame
->sig
.uc
.uc_stack
, regs
->ARM_sp
);
526 err
|= setup_sigframe(&frame
->sig
, regs
, set
);
528 err
= setup_return(regs
, ksig
, frame
->sig
.retcode
, frame
);
532 * For realtime signals we must also set the second and third
533 * arguments for the signal handler.
534 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
536 regs
->ARM_r1
= (unsigned long)&frame
->info
;
537 regs
->ARM_r2
= (unsigned long)&frame
->sig
.uc
;
544 * OK, we're invoking a handler
546 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
548 sigset_t
*oldset
= sigmask_to_save();
552 * Increment event counter and perform fixup for the pre-signal
555 rseq_signal_deliver(ksig
, regs
);
558 * Set up the stack frame
560 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
561 ret
= setup_rt_frame(ksig
, oldset
, regs
);
563 ret
= setup_frame(ksig
, oldset
, regs
);
566 * Check that the resulting registers are actually sane.
568 ret
|= !valid_user_regs(regs
);
570 signal_setup_done(ret
, ksig
, 0);
574 * Note that 'init' is a special process: it doesn't get signals it doesn't
575 * want to handle. Thus you cannot kill init even with a SIGKILL even by
578 * Note that we go through the signals twice: once to check the signals that
579 * the kernel can handle, and then we build all the user-level signal handling
580 * stack-frames in one go after that.
582 static int do_signal(struct pt_regs
*regs
, int syscall
)
584 unsigned int retval
= 0, continue_addr
= 0, restart_addr
= 0;
589 * If we were from a system call, check for system call restarting...
592 continue_addr
= regs
->ARM_pc
;
593 restart_addr
= continue_addr
- (thumb_mode(regs
) ? 2 : 4);
594 retval
= regs
->ARM_r0
;
597 * Prepare for system call restart. We do this here so that a
598 * debugger will see the already changed PSW.
601 case -ERESTART_RESTARTBLOCK
:
603 case -ERESTARTNOHAND
:
605 case -ERESTARTNOINTR
:
607 regs
->ARM_r0
= regs
->ARM_ORIG_r0
;
608 regs
->ARM_pc
= restart_addr
;
614 * Get the signal to deliver. When running under ptrace, at this
615 * point the debugger may change all our registers ...
618 * Depending on the signal settings we may need to revert the
619 * decision to restart the system call. But skip this if a
620 * debugger has chosen to restart at a different PC.
622 if (get_signal(&ksig
)) {
624 if (unlikely(restart
) && regs
->ARM_pc
== restart_addr
) {
625 if (retval
== -ERESTARTNOHAND
||
626 retval
== -ERESTART_RESTARTBLOCK
627 || (retval
== -ERESTARTSYS
628 && !(ksig
.ka
.sa
.sa_flags
& SA_RESTART
))) {
629 regs
->ARM_r0
= -EINTR
;
630 regs
->ARM_pc
= continue_addr
;
633 handle_signal(&ksig
, regs
);
636 restore_saved_sigmask();
637 if (unlikely(restart
) && regs
->ARM_pc
== restart_addr
) {
638 regs
->ARM_pc
= continue_addr
;
646 do_work_pending(struct pt_regs
*regs
, unsigned int thread_flags
, int syscall
)
649 * The assembly code enters us with IRQs off, but it hasn't
650 * informed the tracing code of that for efficiency reasons.
651 * Update the trace code with the current status.
653 trace_hardirqs_off();
655 if (likely(thread_flags
& _TIF_NEED_RESCHED
)) {
658 if (unlikely(!user_mode(regs
)))
661 if (thread_flags
& _TIF_SIGPENDING
) {
662 int restart
= do_signal(regs
, syscall
);
663 if (unlikely(restart
)) {
665 * Restart without handlers.
666 * Deal with it without leaving
672 } else if (thread_flags
& _TIF_UPROBE
) {
673 uprobe_notify_resume(regs
);
675 clear_thread_flag(TIF_NOTIFY_RESUME
);
676 tracehook_notify_resume(regs
);
677 rseq_handle_notify_resume(NULL
, regs
);
681 thread_flags
= current_thread_info()->flags
;
682 } while (thread_flags
& _TIF_WORK_MASK
);
686 struct page
*get_signal_page(void)
693 page
= alloc_pages(GFP_KERNEL
, 0);
698 addr
= page_address(page
);
700 /* Give the signal return code some randomness */
701 offset
= 0x200 + (get_random_int() & 0x7fc);
702 signal_return_offset
= offset
;
705 * Copy signal return handlers into the vector page, and
706 * set sigreturn to be a pointer to these.
708 memcpy(addr
+ offset
, sigreturn_codes
, sizeof(sigreturn_codes
));
710 ptr
= (unsigned long)addr
+ offset
;
711 flush_icache_range(ptr
, ptr
+ sizeof(sigreturn_codes
));
716 /* Defer to generic check */
717 asmlinkage
void addr_limit_check_failed(void)
719 addr_limit_user_check();
722 #ifdef CONFIG_DEBUG_RSEQ
723 asmlinkage
void do_rseq_syscall(struct pt_regs
*regs
)