Linux 3.11-rc3
[cris-mirror.git] / arch / arm / kernel / signal.c
blob1c16c35c271af5a3cb4de53736344a9ed286d7d9
1 /*
2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/uaccess.h>
14 #include <linux/tracehook.h>
16 #include <asm/elf.h>
17 #include <asm/cacheflush.h>
18 #include <asm/ucontext.h>
19 #include <asm/unistd.h>
20 #include <asm/vfp.h>
22 #include "signal.h"
25 * For ARM syscalls, we encode the syscall number into the instruction.
27 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
28 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
31 * With EABI, the syscall number has to be loaded into r7.
33 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
34 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
37 * For Thumb syscalls, we pass the syscall number via r7. We therefore
38 * need two 16-bit instructions.
40 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
43 const unsigned long sigreturn_codes[7] = {
44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
48 #ifdef CONFIG_CRUNCH
49 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
51 char kbuf[sizeof(*frame) + 8];
52 struct crunch_sigframe *kframe;
54 /* the crunch context must be 64 bit aligned */
55 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
56 kframe->magic = CRUNCH_MAGIC;
57 kframe->size = CRUNCH_STORAGE_SIZE;
58 crunch_task_copy(current_thread_info(), &kframe->storage);
59 return __copy_to_user(frame, kframe, sizeof(*frame));
62 static int restore_crunch_context(struct crunch_sigframe __user *frame)
64 char kbuf[sizeof(*frame) + 8];
65 struct crunch_sigframe *kframe;
67 /* the crunch context must be 64 bit aligned */
68 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
69 if (__copy_from_user(kframe, frame, sizeof(*frame)))
70 return -1;
71 if (kframe->magic != CRUNCH_MAGIC ||
72 kframe->size != CRUNCH_STORAGE_SIZE)
73 return -1;
74 crunch_task_restore(current_thread_info(), &kframe->storage);
75 return 0;
77 #endif
79 #ifdef CONFIG_IWMMXT
81 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
83 char kbuf[sizeof(*frame) + 8];
84 struct iwmmxt_sigframe *kframe;
86 /* the iWMMXt context must be 64 bit aligned */
87 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
88 kframe->magic = IWMMXT_MAGIC;
89 kframe->size = IWMMXT_STORAGE_SIZE;
90 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
91 return __copy_to_user(frame, kframe, sizeof(*frame));
94 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
96 char kbuf[sizeof(*frame) + 8];
97 struct iwmmxt_sigframe *kframe;
99 /* the iWMMXt context must be 64 bit aligned */
100 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
101 if (__copy_from_user(kframe, frame, sizeof(*frame)))
102 return -1;
103 if (kframe->magic != IWMMXT_MAGIC ||
104 kframe->size != IWMMXT_STORAGE_SIZE)
105 return -1;
106 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
107 return 0;
110 #endif
112 #ifdef CONFIG_VFP
114 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
116 const unsigned long magic = VFP_MAGIC;
117 const unsigned long size = VFP_STORAGE_SIZE;
118 int err = 0;
120 __put_user_error(magic, &frame->magic, err);
121 __put_user_error(size, &frame->size, err);
123 if (err)
124 return -EFAULT;
126 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
129 static int restore_vfp_context(struct vfp_sigframe __user *frame)
131 unsigned long magic;
132 unsigned long size;
133 int err = 0;
135 __get_user_error(magic, &frame->magic, err);
136 __get_user_error(size, &frame->size, err);
138 if (err)
139 return -EFAULT;
140 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
141 return -EINVAL;
143 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
146 #endif
149 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
151 struct sigframe {
152 struct ucontext uc;
153 unsigned long retcode[2];
156 struct rt_sigframe {
157 struct siginfo info;
158 struct sigframe sig;
161 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
163 struct aux_sigframe __user *aux;
164 sigset_t set;
165 int err;
167 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
168 if (err == 0)
169 set_current_blocked(&set);
171 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
172 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
173 __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
174 __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
175 __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
176 __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
177 __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
178 __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
179 __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
180 __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
181 __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
182 __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
183 __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
184 __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
185 __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
186 __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
187 __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
189 err |= !valid_user_regs(regs);
191 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
192 #ifdef CONFIG_CRUNCH
193 if (err == 0)
194 err |= restore_crunch_context(&aux->crunch);
195 #endif
196 #ifdef CONFIG_IWMMXT
197 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
198 err |= restore_iwmmxt_context(&aux->iwmmxt);
199 #endif
200 #ifdef CONFIG_VFP
201 if (err == 0)
202 err |= restore_vfp_context(&aux->vfp);
203 #endif
205 return err;
208 asmlinkage int sys_sigreturn(struct pt_regs *regs)
210 struct sigframe __user *frame;
212 /* Always make any pending restarted system calls return -EINTR */
213 current_thread_info()->restart_block.fn = do_no_restart_syscall;
216 * Since we stacked the signal on a 64-bit boundary,
217 * then 'sp' should be word aligned here. If it's
218 * not, then the user is trying to mess with us.
220 if (regs->ARM_sp & 7)
221 goto badframe;
223 frame = (struct sigframe __user *)regs->ARM_sp;
225 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
226 goto badframe;
228 if (restore_sigframe(regs, frame))
229 goto badframe;
231 return regs->ARM_r0;
233 badframe:
234 force_sig(SIGSEGV, current);
235 return 0;
238 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
240 struct rt_sigframe __user *frame;
242 /* Always make any pending restarted system calls return -EINTR */
243 current_thread_info()->restart_block.fn = do_no_restart_syscall;
246 * Since we stacked the signal on a 64-bit boundary,
247 * then 'sp' should be word aligned here. If it's
248 * not, then the user is trying to mess with us.
250 if (regs->ARM_sp & 7)
251 goto badframe;
253 frame = (struct rt_sigframe __user *)regs->ARM_sp;
255 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
256 goto badframe;
258 if (restore_sigframe(regs, &frame->sig))
259 goto badframe;
261 if (restore_altstack(&frame->sig.uc.uc_stack))
262 goto badframe;
264 return regs->ARM_r0;
266 badframe:
267 force_sig(SIGSEGV, current);
268 return 0;
271 static int
272 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
274 struct aux_sigframe __user *aux;
275 int err = 0;
277 __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
278 __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
279 __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
280 __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
281 __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
282 __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
283 __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
284 __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
285 __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
286 __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
287 __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
288 __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
289 __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
290 __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
291 __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
292 __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
293 __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
295 __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
296 __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
297 __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
298 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
300 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
302 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
303 #ifdef CONFIG_CRUNCH
304 if (err == 0)
305 err |= preserve_crunch_context(&aux->crunch);
306 #endif
307 #ifdef CONFIG_IWMMXT
308 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
309 err |= preserve_iwmmxt_context(&aux->iwmmxt);
310 #endif
311 #ifdef CONFIG_VFP
312 if (err == 0)
313 err |= preserve_vfp_context(&aux->vfp);
314 #endif
315 __put_user_error(0, &aux->end_magic, err);
317 return err;
320 static inline void __user *
321 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
323 unsigned long sp = sigsp(regs->ARM_sp, ksig);
324 void __user *frame;
327 * ATPCS B01 mandates 8-byte alignment
329 frame = (void __user *)((sp - framesize) & ~7);
332 * Check that we can actually write to the signal frame.
334 if (!access_ok(VERIFY_WRITE, frame, framesize))
335 frame = NULL;
337 return frame;
341 * translate the signal
343 static inline int map_sig(int sig)
345 struct thread_info *thread = current_thread_info();
346 if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
347 sig = thread->exec_domain->signal_invmap[sig];
348 return sig;
351 static int
352 setup_return(struct pt_regs *regs, struct ksignal *ksig,
353 unsigned long __user *rc, void __user *frame)
355 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
356 unsigned long retcode;
357 int thumb = 0;
358 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
360 cpsr |= PSR_ENDSTATE;
363 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
365 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
366 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
368 #ifdef CONFIG_ARM_THUMB
369 if (elf_hwcap & HWCAP_THUMB) {
371 * The LSB of the handler determines if we're going to
372 * be using THUMB or ARM mode for this signal handler.
374 thumb = handler & 1;
376 if (thumb) {
377 cpsr |= PSR_T_BIT;
378 #if __LINUX_ARM_ARCH__ >= 7
379 /* clear the If-Then Thumb-2 execution state */
380 cpsr &= ~PSR_IT_MASK;
381 #endif
382 } else
383 cpsr &= ~PSR_T_BIT;
385 #endif
387 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
388 retcode = (unsigned long)ksig->ka.sa.sa_restorer;
389 } else {
390 unsigned int idx = thumb << 1;
392 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
393 idx += 3;
396 * Put the sigreturn code on the stack no matter which return
397 * mechanism we use in order to remain ABI compliant
399 if (__put_user(sigreturn_codes[idx], rc) ||
400 __put_user(sigreturn_codes[idx+1], rc+1))
401 return 1;
403 if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
405 * 32-bit code can use the new high-page
406 * signal return code support except when the MPU has
407 * protected the vectors page from PL0
409 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
410 } else {
412 * Ensure that the instruction cache sees
413 * the return code written onto the stack.
415 flush_icache_range((unsigned long)rc,
416 (unsigned long)(rc + 2));
418 retcode = ((unsigned long)rc) + thumb;
422 regs->ARM_r0 = map_sig(ksig->sig);
423 regs->ARM_sp = (unsigned long)frame;
424 regs->ARM_lr = retcode;
425 regs->ARM_pc = handler;
426 regs->ARM_cpsr = cpsr;
428 return 0;
431 static int
432 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
434 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
435 int err = 0;
437 if (!frame)
438 return 1;
441 * Set uc.uc_flags to a value which sc.trap_no would never have.
443 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
445 err |= setup_sigframe(frame, regs, set);
446 if (err == 0)
447 err = setup_return(regs, ksig, frame->retcode, frame);
449 return err;
452 static int
453 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
455 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
456 int err = 0;
458 if (!frame)
459 return 1;
461 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
463 __put_user_error(0, &frame->sig.uc.uc_flags, err);
464 __put_user_error(NULL, &frame->sig.uc.uc_link, err);
466 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
467 err |= setup_sigframe(&frame->sig, regs, set);
468 if (err == 0)
469 err = setup_return(regs, ksig, frame->sig.retcode, frame);
471 if (err == 0) {
473 * For realtime signals we must also set the second and third
474 * arguments for the signal handler.
475 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
477 regs->ARM_r1 = (unsigned long)&frame->info;
478 regs->ARM_r2 = (unsigned long)&frame->sig.uc;
481 return err;
485 * OK, we're invoking a handler
487 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
489 sigset_t *oldset = sigmask_to_save();
490 int ret;
493 * Set up the stack frame
495 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
496 ret = setup_rt_frame(ksig, oldset, regs);
497 else
498 ret = setup_frame(ksig, oldset, regs);
501 * Check that the resulting registers are actually sane.
503 ret |= !valid_user_regs(regs);
505 signal_setup_done(ret, ksig, 0);
509 * Note that 'init' is a special process: it doesn't get signals it doesn't
510 * want to handle. Thus you cannot kill init even with a SIGKILL even by
511 * mistake.
513 * Note that we go through the signals twice: once to check the signals that
514 * the kernel can handle, and then we build all the user-level signal handling
515 * stack-frames in one go after that.
517 static int do_signal(struct pt_regs *regs, int syscall)
519 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
520 struct ksignal ksig;
521 int restart = 0;
524 * If we were from a system call, check for system call restarting...
526 if (syscall) {
527 continue_addr = regs->ARM_pc;
528 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
529 retval = regs->ARM_r0;
532 * Prepare for system call restart. We do this here so that a
533 * debugger will see the already changed PSW.
535 switch (retval) {
536 case -ERESTART_RESTARTBLOCK:
537 restart -= 2;
538 case -ERESTARTNOHAND:
539 case -ERESTARTSYS:
540 case -ERESTARTNOINTR:
541 restart++;
542 regs->ARM_r0 = regs->ARM_ORIG_r0;
543 regs->ARM_pc = restart_addr;
544 break;
549 * Get the signal to deliver. When running under ptrace, at this
550 * point the debugger may change all our registers ...
553 * Depending on the signal settings we may need to revert the
554 * decision to restart the system call. But skip this if a
555 * debugger has chosen to restart at a different PC.
557 if (get_signal(&ksig)) {
558 /* handler */
559 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
560 if (retval == -ERESTARTNOHAND ||
561 retval == -ERESTART_RESTARTBLOCK
562 || (retval == -ERESTARTSYS
563 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
564 regs->ARM_r0 = -EINTR;
565 regs->ARM_pc = continue_addr;
568 handle_signal(&ksig, regs);
569 } else {
570 /* no handler */
571 restore_saved_sigmask();
572 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
573 regs->ARM_pc = continue_addr;
574 return restart;
577 return 0;
580 asmlinkage int
581 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
583 do {
584 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
585 schedule();
586 } else {
587 if (unlikely(!user_mode(regs)))
588 return 0;
589 local_irq_enable();
590 if (thread_flags & _TIF_SIGPENDING) {
591 int restart = do_signal(regs, syscall);
592 if (unlikely(restart)) {
594 * Restart without handlers.
595 * Deal with it without leaving
596 * the kernel space.
598 return restart;
600 syscall = 0;
601 } else {
602 clear_thread_flag(TIF_NOTIFY_RESUME);
603 tracehook_notify_resume(regs);
606 local_irq_disable();
607 thread_flags = current_thread_info()->flags;
608 } while (thread_flags & _TIF_WORK_MASK);
609 return 0;