2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/errno.h>
11 #include <linux/random.h>
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 #include <linux/uprobes.h>
17 #include <linux/syscalls.h>
20 #include <asm/cacheflush.h>
21 #include <asm/traps.h>
22 #include <asm/ucontext.h>
23 #include <asm/unistd.h>
26 extern const unsigned long sigreturn_codes
[7];
28 static unsigned long signal_return_offset
;
31 static int preserve_crunch_context(struct crunch_sigframe __user
*frame
)
33 char kbuf
[sizeof(*frame
) + 8];
34 struct crunch_sigframe
*kframe
;
36 /* the crunch context must be 64 bit aligned */
37 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
38 kframe
->magic
= CRUNCH_MAGIC
;
39 kframe
->size
= CRUNCH_STORAGE_SIZE
;
40 crunch_task_copy(current_thread_info(), &kframe
->storage
);
41 return __copy_to_user(frame
, kframe
, sizeof(*frame
));
44 static int restore_crunch_context(char __user
**auxp
)
46 struct crunch_sigframe __user
*frame
=
47 (struct crunch_sigframe __user
*)*auxp
;
48 char kbuf
[sizeof(*frame
) + 8];
49 struct crunch_sigframe
*kframe
;
51 /* the crunch context must be 64 bit aligned */
52 kframe
= (struct crunch_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
53 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
55 if (kframe
->magic
!= CRUNCH_MAGIC
||
56 kframe
->size
!= CRUNCH_STORAGE_SIZE
)
58 *auxp
+= CRUNCH_STORAGE_SIZE
;
59 crunch_task_restore(current_thread_info(), &kframe
->storage
);
66 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user
*frame
)
68 char kbuf
[sizeof(*frame
) + 8];
69 struct iwmmxt_sigframe
*kframe
;
72 /* the iWMMXt context must be 64 bit aligned */
73 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
75 if (test_thread_flag(TIF_USING_IWMMXT
)) {
76 kframe
->magic
= IWMMXT_MAGIC
;
77 kframe
->size
= IWMMXT_STORAGE_SIZE
;
78 iwmmxt_task_copy(current_thread_info(), &kframe
->storage
);
80 err
= __copy_to_user(frame
, kframe
, sizeof(*frame
));
83 * For bug-compatibility with older kernels, some space
84 * has to be reserved for iWMMXt even if it's not used.
85 * Set the magic and size appropriately so that properly
86 * written userspace can skip it reliably:
88 __put_user_error(DUMMY_MAGIC
, &frame
->magic
, err
);
89 __put_user_error(IWMMXT_STORAGE_SIZE
, &frame
->size
, err
);
95 static int restore_iwmmxt_context(char __user
**auxp
)
97 struct iwmmxt_sigframe __user
*frame
=
98 (struct iwmmxt_sigframe __user
*)*auxp
;
99 char kbuf
[sizeof(*frame
) + 8];
100 struct iwmmxt_sigframe
*kframe
;
102 /* the iWMMXt context must be 64 bit aligned */
103 kframe
= (struct iwmmxt_sigframe
*)((unsigned long)(kbuf
+ 8) & ~7);
104 if (__copy_from_user(kframe
, frame
, sizeof(*frame
)))
108 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
109 * block is discarded for compatibility with setup_sigframe() if
110 * present, but we don't mandate its presence. If some other
111 * magic is here, it's not for us:
113 if (!test_thread_flag(TIF_USING_IWMMXT
) &&
114 kframe
->magic
!= DUMMY_MAGIC
)
117 if (kframe
->size
!= IWMMXT_STORAGE_SIZE
)
120 if (test_thread_flag(TIF_USING_IWMMXT
)) {
121 if (kframe
->magic
!= IWMMXT_MAGIC
)
124 iwmmxt_task_restore(current_thread_info(), &kframe
->storage
);
127 *auxp
+= IWMMXT_STORAGE_SIZE
;
135 static int preserve_vfp_context(struct vfp_sigframe __user
*frame
)
137 const unsigned long magic
= VFP_MAGIC
;
138 const unsigned long size
= VFP_STORAGE_SIZE
;
141 __put_user_error(magic
, &frame
->magic
, err
);
142 __put_user_error(size
, &frame
->size
, err
);
147 return vfp_preserve_user_clear_hwstate(&frame
->ufp
, &frame
->ufp_exc
);
150 static int restore_vfp_context(char __user
**auxp
)
152 struct vfp_sigframe __user
*frame
=
153 (struct vfp_sigframe __user
*)*auxp
;
158 __get_user_error(magic
, &frame
->magic
, err
);
159 __get_user_error(size
, &frame
->size
, err
);
163 if (magic
!= VFP_MAGIC
|| size
!= VFP_STORAGE_SIZE
)
167 return vfp_restore_user_hwstate(&frame
->ufp
, &frame
->ufp_exc
);
173 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
177 unsigned long retcode
[2];
185 static int restore_sigframe(struct pt_regs
*regs
, struct sigframe __user
*sf
)
191 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
193 set_current_blocked(&set
);
195 __get_user_error(regs
->ARM_r0
, &sf
->uc
.uc_mcontext
.arm_r0
, err
);
196 __get_user_error(regs
->ARM_r1
, &sf
->uc
.uc_mcontext
.arm_r1
, err
);
197 __get_user_error(regs
->ARM_r2
, &sf
->uc
.uc_mcontext
.arm_r2
, err
);
198 __get_user_error(regs
->ARM_r3
, &sf
->uc
.uc_mcontext
.arm_r3
, err
);
199 __get_user_error(regs
->ARM_r4
, &sf
->uc
.uc_mcontext
.arm_r4
, err
);
200 __get_user_error(regs
->ARM_r5
, &sf
->uc
.uc_mcontext
.arm_r5
, err
);
201 __get_user_error(regs
->ARM_r6
, &sf
->uc
.uc_mcontext
.arm_r6
, err
);
202 __get_user_error(regs
->ARM_r7
, &sf
->uc
.uc_mcontext
.arm_r7
, err
);
203 __get_user_error(regs
->ARM_r8
, &sf
->uc
.uc_mcontext
.arm_r8
, err
);
204 __get_user_error(regs
->ARM_r9
, &sf
->uc
.uc_mcontext
.arm_r9
, err
);
205 __get_user_error(regs
->ARM_r10
, &sf
->uc
.uc_mcontext
.arm_r10
, err
);
206 __get_user_error(regs
->ARM_fp
, &sf
->uc
.uc_mcontext
.arm_fp
, err
);
207 __get_user_error(regs
->ARM_ip
, &sf
->uc
.uc_mcontext
.arm_ip
, err
);
208 __get_user_error(regs
->ARM_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
209 __get_user_error(regs
->ARM_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
210 __get_user_error(regs
->ARM_pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
211 __get_user_error(regs
->ARM_cpsr
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
213 err
|= !valid_user_regs(regs
);
215 aux
= (char __user
*) sf
->uc
.uc_regspace
;
218 err
|= restore_crunch_context(&aux
);
222 err
|= restore_iwmmxt_context(&aux
);
226 err
|= restore_vfp_context(&aux
);
232 asmlinkage
int sys_sigreturn(struct pt_regs
*regs
)
234 struct sigframe __user
*frame
;
236 /* Always make any pending restarted system calls return -EINTR */
237 current
->restart_block
.fn
= do_no_restart_syscall
;
240 * Since we stacked the signal on a 64-bit boundary,
241 * then 'sp' should be word aligned here. If it's
242 * not, then the user is trying to mess with us.
244 if (regs
->ARM_sp
& 7)
247 frame
= (struct sigframe __user
*)regs
->ARM_sp
;
249 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
252 if (restore_sigframe(regs
, frame
))
258 force_sig(SIGSEGV
, current
);
262 asmlinkage
int sys_rt_sigreturn(struct pt_regs
*regs
)
264 struct rt_sigframe __user
*frame
;
266 /* Always make any pending restarted system calls return -EINTR */
267 current
->restart_block
.fn
= do_no_restart_syscall
;
270 * Since we stacked the signal on a 64-bit boundary,
271 * then 'sp' should be word aligned here. If it's
272 * not, then the user is trying to mess with us.
274 if (regs
->ARM_sp
& 7)
277 frame
= (struct rt_sigframe __user
*)regs
->ARM_sp
;
279 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
282 if (restore_sigframe(regs
, &frame
->sig
))
285 if (restore_altstack(&frame
->sig
.uc
.uc_stack
))
291 force_sig(SIGSEGV
, current
);
296 setup_sigframe(struct sigframe __user
*sf
, struct pt_regs
*regs
, sigset_t
*set
)
298 struct aux_sigframe __user
*aux
;
301 __put_user_error(regs
->ARM_r0
, &sf
->uc
.uc_mcontext
.arm_r0
, err
);
302 __put_user_error(regs
->ARM_r1
, &sf
->uc
.uc_mcontext
.arm_r1
, err
);
303 __put_user_error(regs
->ARM_r2
, &sf
->uc
.uc_mcontext
.arm_r2
, err
);
304 __put_user_error(regs
->ARM_r3
, &sf
->uc
.uc_mcontext
.arm_r3
, err
);
305 __put_user_error(regs
->ARM_r4
, &sf
->uc
.uc_mcontext
.arm_r4
, err
);
306 __put_user_error(regs
->ARM_r5
, &sf
->uc
.uc_mcontext
.arm_r5
, err
);
307 __put_user_error(regs
->ARM_r6
, &sf
->uc
.uc_mcontext
.arm_r6
, err
);
308 __put_user_error(regs
->ARM_r7
, &sf
->uc
.uc_mcontext
.arm_r7
, err
);
309 __put_user_error(regs
->ARM_r8
, &sf
->uc
.uc_mcontext
.arm_r8
, err
);
310 __put_user_error(regs
->ARM_r9
, &sf
->uc
.uc_mcontext
.arm_r9
, err
);
311 __put_user_error(regs
->ARM_r10
, &sf
->uc
.uc_mcontext
.arm_r10
, err
);
312 __put_user_error(regs
->ARM_fp
, &sf
->uc
.uc_mcontext
.arm_fp
, err
);
313 __put_user_error(regs
->ARM_ip
, &sf
->uc
.uc_mcontext
.arm_ip
, err
);
314 __put_user_error(regs
->ARM_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
315 __put_user_error(regs
->ARM_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
316 __put_user_error(regs
->ARM_pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
317 __put_user_error(regs
->ARM_cpsr
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
319 __put_user_error(current
->thread
.trap_no
, &sf
->uc
.uc_mcontext
.trap_no
, err
);
320 __put_user_error(current
->thread
.error_code
, &sf
->uc
.uc_mcontext
.error_code
, err
);
321 __put_user_error(current
->thread
.address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
322 __put_user_error(set
->sig
[0], &sf
->uc
.uc_mcontext
.oldmask
, err
);
324 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
326 aux
= (struct aux_sigframe __user
*) sf
->uc
.uc_regspace
;
329 err
|= preserve_crunch_context(&aux
->crunch
);
333 err
|= preserve_iwmmxt_context(&aux
->iwmmxt
);
337 err
|= preserve_vfp_context(&aux
->vfp
);
339 __put_user_error(0, &aux
->end_magic
, err
);
344 static inline void __user
*
345 get_sigframe(struct ksignal
*ksig
, struct pt_regs
*regs
, int framesize
)
347 unsigned long sp
= sigsp(regs
->ARM_sp
, ksig
);
351 * ATPCS B01 mandates 8-byte alignment
353 frame
= (void __user
*)((sp
- framesize
) & ~7);
356 * Check that we can actually write to the signal frame.
358 if (!access_ok(VERIFY_WRITE
, frame
, framesize
))
365 setup_return(struct pt_regs
*regs
, struct ksignal
*ksig
,
366 unsigned long __user
*rc
, void __user
*frame
)
368 unsigned long handler
= (unsigned long)ksig
->ka
.sa
.sa_handler
;
369 unsigned long retcode
;
371 unsigned long cpsr
= regs
->ARM_cpsr
& ~(PSR_f
| PSR_E_BIT
);
373 cpsr
|= PSR_ENDSTATE
;
376 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
378 if (ksig
->ka
.sa
.sa_flags
& SA_THIRTYTWO
)
379 cpsr
= (cpsr
& ~MODE_MASK
) | USR_MODE
;
381 #ifdef CONFIG_ARM_THUMB
382 if (elf_hwcap
& HWCAP_THUMB
) {
384 * The LSB of the handler determines if we're going to
385 * be using THUMB or ARM mode for this signal handler.
390 * Clear the If-Then Thumb-2 execution state. ARM spec
391 * requires this to be all 000s in ARM mode. Snapdragon
392 * S4/Krait misbehaves on a Thumb=>ARM signal transition
395 * We must do this whenever we are running on a Thumb-2
396 * capable CPU, which includes ARMv6T2. However, we elect
397 * to always do this to simplify the code; this field is
398 * marked UNK/SBZP for older architectures.
400 cpsr
&= ~PSR_IT_MASK
;
409 if (ksig
->ka
.sa
.sa_flags
& SA_RESTORER
) {
410 retcode
= (unsigned long)ksig
->ka
.sa
.sa_restorer
;
412 unsigned int idx
= thumb
<< 1;
414 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
418 * Put the sigreturn code on the stack no matter which return
419 * mechanism we use in order to remain ABI compliant
421 if (__put_user(sigreturn_codes
[idx
], rc
) ||
422 __put_user(sigreturn_codes
[idx
+1], rc
+1))
426 if (cpsr
& MODE32_BIT
) {
427 struct mm_struct
*mm
= current
->mm
;
430 * 32-bit code can use the signal return page
431 * except when the MPU has protected the vectors
434 retcode
= mm
->context
.sigpage
+ signal_return_offset
+
440 * Ensure that the instruction cache sees
441 * the return code written onto the stack.
443 flush_icache_range((unsigned long)rc
,
444 (unsigned long)(rc
+ 2));
446 retcode
= ((unsigned long)rc
) + thumb
;
450 regs
->ARM_r0
= ksig
->sig
;
451 regs
->ARM_sp
= (unsigned long)frame
;
452 regs
->ARM_lr
= retcode
;
453 regs
->ARM_pc
= handler
;
454 regs
->ARM_cpsr
= cpsr
;
460 setup_frame(struct ksignal
*ksig
, sigset_t
*set
, struct pt_regs
*regs
)
462 struct sigframe __user
*frame
= get_sigframe(ksig
, regs
, sizeof(*frame
));
469 * Set uc.uc_flags to a value which sc.trap_no would never have.
471 __put_user_error(0x5ac3c35a, &frame
->uc
.uc_flags
, err
);
473 err
|= setup_sigframe(frame
, regs
, set
);
475 err
= setup_return(regs
, ksig
, frame
->retcode
, frame
);
481 setup_rt_frame(struct ksignal
*ksig
, sigset_t
*set
, struct pt_regs
*regs
)
483 struct rt_sigframe __user
*frame
= get_sigframe(ksig
, regs
, sizeof(*frame
));
489 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
491 __put_user_error(0, &frame
->sig
.uc
.uc_flags
, err
);
492 __put_user_error(NULL
, &frame
->sig
.uc
.uc_link
, err
);
494 err
|= __save_altstack(&frame
->sig
.uc
.uc_stack
, regs
->ARM_sp
);
495 err
|= setup_sigframe(&frame
->sig
, regs
, set
);
497 err
= setup_return(regs
, ksig
, frame
->sig
.retcode
, frame
);
501 * For realtime signals we must also set the second and third
502 * arguments for the signal handler.
503 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
505 regs
->ARM_r1
= (unsigned long)&frame
->info
;
506 regs
->ARM_r2
= (unsigned long)&frame
->sig
.uc
;
513 * OK, we're invoking a handler
515 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
517 sigset_t
*oldset
= sigmask_to_save();
521 * Set up the stack frame
523 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
524 ret
= setup_rt_frame(ksig
, oldset
, regs
);
526 ret
= setup_frame(ksig
, oldset
, regs
);
529 * Check that the resulting registers are actually sane.
531 ret
|= !valid_user_regs(regs
);
533 signal_setup_done(ret
, ksig
, 0);
537 * Note that 'init' is a special process: it doesn't get signals it doesn't
538 * want to handle. Thus you cannot kill init even with a SIGKILL even by
541 * Note that we go through the signals twice: once to check the signals that
542 * the kernel can handle, and then we build all the user-level signal handling
543 * stack-frames in one go after that.
545 static int do_signal(struct pt_regs
*regs
, int syscall
)
547 unsigned int retval
= 0, continue_addr
= 0, restart_addr
= 0;
552 * If we were from a system call, check for system call restarting...
555 continue_addr
= regs
->ARM_pc
;
556 restart_addr
= continue_addr
- (thumb_mode(regs
) ? 2 : 4);
557 retval
= regs
->ARM_r0
;
560 * Prepare for system call restart. We do this here so that a
561 * debugger will see the already changed PSW.
564 case -ERESTART_RESTARTBLOCK
:
566 case -ERESTARTNOHAND
:
568 case -ERESTARTNOINTR
:
570 regs
->ARM_r0
= regs
->ARM_ORIG_r0
;
571 regs
->ARM_pc
= restart_addr
;
577 * Get the signal to deliver. When running under ptrace, at this
578 * point the debugger may change all our registers ...
581 * Depending on the signal settings we may need to revert the
582 * decision to restart the system call. But skip this if a
583 * debugger has chosen to restart at a different PC.
585 if (get_signal(&ksig
)) {
587 if (unlikely(restart
) && regs
->ARM_pc
== restart_addr
) {
588 if (retval
== -ERESTARTNOHAND
||
589 retval
== -ERESTART_RESTARTBLOCK
590 || (retval
== -ERESTARTSYS
591 && !(ksig
.ka
.sa
.sa_flags
& SA_RESTART
))) {
592 regs
->ARM_r0
= -EINTR
;
593 regs
->ARM_pc
= continue_addr
;
596 handle_signal(&ksig
, regs
);
599 restore_saved_sigmask();
600 if (unlikely(restart
) && regs
->ARM_pc
== restart_addr
) {
601 regs
->ARM_pc
= continue_addr
;
609 do_work_pending(struct pt_regs
*regs
, unsigned int thread_flags
, int syscall
)
612 * The assembly code enters us with IRQs off, but it hasn't
613 * informed the tracing code of that for efficiency reasons.
614 * Update the trace code with the current status.
616 trace_hardirqs_off();
618 if (likely(thread_flags
& _TIF_NEED_RESCHED
)) {
621 if (unlikely(!user_mode(regs
)))
624 if (thread_flags
& _TIF_SIGPENDING
) {
625 int restart
= do_signal(regs
, syscall
);
626 if (unlikely(restart
)) {
628 * Restart without handlers.
629 * Deal with it without leaving
635 } else if (thread_flags
& _TIF_UPROBE
) {
636 uprobe_notify_resume(regs
);
638 clear_thread_flag(TIF_NOTIFY_RESUME
);
639 tracehook_notify_resume(regs
);
643 thread_flags
= current_thread_info()->flags
;
644 } while (thread_flags
& _TIF_WORK_MASK
);
648 struct page
*get_signal_page(void)
655 page
= alloc_pages(GFP_KERNEL
, 0);
660 addr
= page_address(page
);
662 /* Give the signal return code some randomness */
663 offset
= 0x200 + (get_random_int() & 0x7fc);
664 signal_return_offset
= offset
;
667 * Copy signal return handlers into the vector page, and
668 * set sigreturn to be a pointer to these.
670 memcpy(addr
+ offset
, sigreturn_codes
, sizeof(sigreturn_codes
));
672 ptr
= (unsigned long)addr
+ offset
;
673 flush_icache_range(ptr
, ptr
+ sizeof(sigreturn_codes
));
678 /* Defer to generic check */
679 asmlinkage
void addr_limit_check_failed(void)
681 addr_limit_user_check();