mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / arm / kernel / signal.c
blob02e6b6dfffa7eda25e832e1b977b549d670ee02d
1 /*
2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/errno.h>
11 #include <linux/random.h>
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 #include <linux/uprobes.h>
17 #include <linux/syscalls.h>
19 #include <asm/elf.h>
20 #include <asm/cacheflush.h>
21 #include <asm/traps.h>
22 #include <asm/ucontext.h>
23 #include <asm/unistd.h>
24 #include <asm/vfp.h>
26 extern const unsigned long sigreturn_codes[7];
28 static unsigned long signal_return_offset;
30 #ifdef CONFIG_CRUNCH
31 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
33 char kbuf[sizeof(*frame) + 8];
34 struct crunch_sigframe *kframe;
36 /* the crunch context must be 64 bit aligned */
37 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
38 kframe->magic = CRUNCH_MAGIC;
39 kframe->size = CRUNCH_STORAGE_SIZE;
40 crunch_task_copy(current_thread_info(), &kframe->storage);
41 return __copy_to_user(frame, kframe, sizeof(*frame));
44 static int restore_crunch_context(char __user **auxp)
46 struct crunch_sigframe __user *frame =
47 (struct crunch_sigframe __user *)*auxp;
48 char kbuf[sizeof(*frame) + 8];
49 struct crunch_sigframe *kframe;
51 /* the crunch context must be 64 bit aligned */
52 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
53 if (__copy_from_user(kframe, frame, sizeof(*frame)))
54 return -1;
55 if (kframe->magic != CRUNCH_MAGIC ||
56 kframe->size != CRUNCH_STORAGE_SIZE)
57 return -1;
58 *auxp += CRUNCH_STORAGE_SIZE;
59 crunch_task_restore(current_thread_info(), &kframe->storage);
60 return 0;
62 #endif
64 #ifdef CONFIG_IWMMXT
66 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
68 char kbuf[sizeof(*frame) + 8];
69 struct iwmmxt_sigframe *kframe;
70 int err = 0;
72 /* the iWMMXt context must be 64 bit aligned */
73 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
75 if (test_thread_flag(TIF_USING_IWMMXT)) {
76 kframe->magic = IWMMXT_MAGIC;
77 kframe->size = IWMMXT_STORAGE_SIZE;
78 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
79 } else {
81 * For bug-compatibility with older kernels, some space
82 * has to be reserved for iWMMXt even if it's not used.
83 * Set the magic and size appropriately so that properly
84 * written userspace can skip it reliably:
86 *kframe = (struct iwmmxt_sigframe) {
87 .magic = DUMMY_MAGIC,
88 .size = IWMMXT_STORAGE_SIZE,
92 err = __copy_to_user(frame, kframe, sizeof(*kframe));
94 return err;
97 static int restore_iwmmxt_context(char __user **auxp)
99 struct iwmmxt_sigframe __user *frame =
100 (struct iwmmxt_sigframe __user *)*auxp;
101 char kbuf[sizeof(*frame) + 8];
102 struct iwmmxt_sigframe *kframe;
104 /* the iWMMXt context must be 64 bit aligned */
105 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
106 if (__copy_from_user(kframe, frame, sizeof(*frame)))
107 return -1;
110 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
111 * block is discarded for compatibility with setup_sigframe() if
112 * present, but we don't mandate its presence. If some other
113 * magic is here, it's not for us:
115 if (!test_thread_flag(TIF_USING_IWMMXT) &&
116 kframe->magic != DUMMY_MAGIC)
117 return 0;
119 if (kframe->size != IWMMXT_STORAGE_SIZE)
120 return -1;
122 if (test_thread_flag(TIF_USING_IWMMXT)) {
123 if (kframe->magic != IWMMXT_MAGIC)
124 return -1;
126 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
129 *auxp += IWMMXT_STORAGE_SIZE;
130 return 0;
133 #endif
135 #ifdef CONFIG_VFP
137 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
139 struct vfp_sigframe kframe;
140 int err = 0;
142 memset(&kframe, 0, sizeof(kframe));
143 kframe.magic = VFP_MAGIC;
144 kframe.size = VFP_STORAGE_SIZE;
146 err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
147 if (err)
148 return err;
150 return __copy_to_user(frame, &kframe, sizeof(kframe));
153 static int restore_vfp_context(char __user **auxp)
155 struct vfp_sigframe frame;
156 int err;
158 err = __copy_from_user(&frame, *auxp, sizeof(frame));
159 if (err)
160 return err;
162 if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
163 return -EINVAL;
165 *auxp += sizeof(frame);
166 return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
169 #endif
172 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
174 struct sigframe {
175 struct ucontext uc;
176 unsigned long retcode[2];
179 struct rt_sigframe {
180 struct siginfo info;
181 struct sigframe sig;
184 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
186 struct sigcontext context;
187 char __user *aux;
188 sigset_t set;
189 int err;
191 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
192 if (err == 0)
193 set_current_blocked(&set);
195 err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
196 if (err == 0) {
197 regs->ARM_r0 = context.arm_r0;
198 regs->ARM_r1 = context.arm_r1;
199 regs->ARM_r2 = context.arm_r2;
200 regs->ARM_r3 = context.arm_r3;
201 regs->ARM_r4 = context.arm_r4;
202 regs->ARM_r5 = context.arm_r5;
203 regs->ARM_r6 = context.arm_r6;
204 regs->ARM_r7 = context.arm_r7;
205 regs->ARM_r8 = context.arm_r8;
206 regs->ARM_r9 = context.arm_r9;
207 regs->ARM_r10 = context.arm_r10;
208 regs->ARM_fp = context.arm_fp;
209 regs->ARM_ip = context.arm_ip;
210 regs->ARM_sp = context.arm_sp;
211 regs->ARM_lr = context.arm_lr;
212 regs->ARM_pc = context.arm_pc;
213 regs->ARM_cpsr = context.arm_cpsr;
216 err |= !valid_user_regs(regs);
218 aux = (char __user *) sf->uc.uc_regspace;
219 #ifdef CONFIG_CRUNCH
220 if (err == 0)
221 err |= restore_crunch_context(&aux);
222 #endif
223 #ifdef CONFIG_IWMMXT
224 if (err == 0)
225 err |= restore_iwmmxt_context(&aux);
226 #endif
227 #ifdef CONFIG_VFP
228 if (err == 0)
229 err |= restore_vfp_context(&aux);
230 #endif
232 return err;
235 asmlinkage int sys_sigreturn(struct pt_regs *regs)
237 struct sigframe __user *frame;
239 /* Always make any pending restarted system calls return -EINTR */
240 current->restart_block.fn = do_no_restart_syscall;
243 * Since we stacked the signal on a 64-bit boundary,
244 * then 'sp' should be word aligned here. If it's
245 * not, then the user is trying to mess with us.
247 if (regs->ARM_sp & 7)
248 goto badframe;
250 frame = (struct sigframe __user *)regs->ARM_sp;
252 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
253 goto badframe;
255 if (restore_sigframe(regs, frame))
256 goto badframe;
258 return regs->ARM_r0;
260 badframe:
261 force_sig(SIGSEGV, current);
262 return 0;
265 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
267 struct rt_sigframe __user *frame;
269 /* Always make any pending restarted system calls return -EINTR */
270 current->restart_block.fn = do_no_restart_syscall;
273 * Since we stacked the signal on a 64-bit boundary,
274 * then 'sp' should be word aligned here. If it's
275 * not, then the user is trying to mess with us.
277 if (regs->ARM_sp & 7)
278 goto badframe;
280 frame = (struct rt_sigframe __user *)regs->ARM_sp;
282 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
283 goto badframe;
285 if (restore_sigframe(regs, &frame->sig))
286 goto badframe;
288 if (restore_altstack(&frame->sig.uc.uc_stack))
289 goto badframe;
291 return regs->ARM_r0;
293 badframe:
294 force_sig(SIGSEGV, current);
295 return 0;
298 static int
299 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
301 struct aux_sigframe __user *aux;
302 struct sigcontext context;
303 int err = 0;
305 context = (struct sigcontext) {
306 .arm_r0 = regs->ARM_r0,
307 .arm_r1 = regs->ARM_r1,
308 .arm_r2 = regs->ARM_r2,
309 .arm_r3 = regs->ARM_r3,
310 .arm_r4 = regs->ARM_r4,
311 .arm_r5 = regs->ARM_r5,
312 .arm_r6 = regs->ARM_r6,
313 .arm_r7 = regs->ARM_r7,
314 .arm_r8 = regs->ARM_r8,
315 .arm_r9 = regs->ARM_r9,
316 .arm_r10 = regs->ARM_r10,
317 .arm_fp = regs->ARM_fp,
318 .arm_ip = regs->ARM_ip,
319 .arm_sp = regs->ARM_sp,
320 .arm_lr = regs->ARM_lr,
321 .arm_pc = regs->ARM_pc,
322 .arm_cpsr = regs->ARM_cpsr,
324 .trap_no = current->thread.trap_no,
325 .error_code = current->thread.error_code,
326 .fault_address = current->thread.address,
327 .oldmask = set->sig[0],
330 err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
332 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
334 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
335 #ifdef CONFIG_CRUNCH
336 if (err == 0)
337 err |= preserve_crunch_context(&aux->crunch);
338 #endif
339 #ifdef CONFIG_IWMMXT
340 if (err == 0)
341 err |= preserve_iwmmxt_context(&aux->iwmmxt);
342 #endif
343 #ifdef CONFIG_VFP
344 if (err == 0)
345 err |= preserve_vfp_context(&aux->vfp);
346 #endif
347 err |= __put_user(0, &aux->end_magic);
349 return err;
352 static inline void __user *
353 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
355 unsigned long sp = sigsp(regs->ARM_sp, ksig);
356 void __user *frame;
359 * ATPCS B01 mandates 8-byte alignment
361 frame = (void __user *)((sp - framesize) & ~7);
364 * Check that we can actually write to the signal frame.
366 if (!access_ok(VERIFY_WRITE, frame, framesize))
367 frame = NULL;
369 return frame;
372 static int
373 setup_return(struct pt_regs *regs, struct ksignal *ksig,
374 unsigned long __user *rc, void __user *frame)
376 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
377 unsigned long retcode;
378 int thumb = 0;
379 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
381 cpsr |= PSR_ENDSTATE;
384 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
386 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
387 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
389 #ifdef CONFIG_ARM_THUMB
390 if (elf_hwcap & HWCAP_THUMB) {
392 * The LSB of the handler determines if we're going to
393 * be using THUMB or ARM mode for this signal handler.
395 thumb = handler & 1;
398 * Clear the If-Then Thumb-2 execution state. ARM spec
399 * requires this to be all 000s in ARM mode. Snapdragon
400 * S4/Krait misbehaves on a Thumb=>ARM signal transition
401 * without this.
403 * We must do this whenever we are running on a Thumb-2
404 * capable CPU, which includes ARMv6T2. However, we elect
405 * to always do this to simplify the code; this field is
406 * marked UNK/SBZP for older architectures.
408 cpsr &= ~PSR_IT_MASK;
410 if (thumb) {
411 cpsr |= PSR_T_BIT;
412 } else
413 cpsr &= ~PSR_T_BIT;
415 #endif
417 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
418 retcode = (unsigned long)ksig->ka.sa.sa_restorer;
419 } else {
420 unsigned int idx = thumb << 1;
422 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
423 idx += 3;
426 * Put the sigreturn code on the stack no matter which return
427 * mechanism we use in order to remain ABI compliant
429 if (__put_user(sigreturn_codes[idx], rc) ||
430 __put_user(sigreturn_codes[idx+1], rc+1))
431 return 1;
433 #ifdef CONFIG_MMU
434 if (cpsr & MODE32_BIT) {
435 struct mm_struct *mm = current->mm;
438 * 32-bit code can use the signal return page
439 * except when the MPU has protected the vectors
440 * page from PL0
442 retcode = mm->context.sigpage + signal_return_offset +
443 (idx << 2) + thumb;
444 } else
445 #endif
448 * Ensure that the instruction cache sees
449 * the return code written onto the stack.
451 flush_icache_range((unsigned long)rc,
452 (unsigned long)(rc + 2));
454 retcode = ((unsigned long)rc) + thumb;
458 regs->ARM_r0 = ksig->sig;
459 regs->ARM_sp = (unsigned long)frame;
460 regs->ARM_lr = retcode;
461 regs->ARM_pc = handler;
462 regs->ARM_cpsr = cpsr;
464 return 0;
467 static int
468 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
470 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
471 int err = 0;
473 if (!frame)
474 return 1;
477 * Set uc.uc_flags to a value which sc.trap_no would never have.
479 err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
481 err |= setup_sigframe(frame, regs, set);
482 if (err == 0)
483 err = setup_return(regs, ksig, frame->retcode, frame);
485 return err;
488 static int
489 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
491 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
492 int err = 0;
494 if (!frame)
495 return 1;
497 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
499 err |= __put_user(0, &frame->sig.uc.uc_flags);
500 err |= __put_user(NULL, &frame->sig.uc.uc_link);
502 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
503 err |= setup_sigframe(&frame->sig, regs, set);
504 if (err == 0)
505 err = setup_return(regs, ksig, frame->sig.retcode, frame);
507 if (err == 0) {
509 * For realtime signals we must also set the second and third
510 * arguments for the signal handler.
511 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
513 regs->ARM_r1 = (unsigned long)&frame->info;
514 regs->ARM_r2 = (unsigned long)&frame->sig.uc;
517 return err;
521 * OK, we're invoking a handler
523 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
525 sigset_t *oldset = sigmask_to_save();
526 int ret;
529 * Set up the stack frame
531 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
532 ret = setup_rt_frame(ksig, oldset, regs);
533 else
534 ret = setup_frame(ksig, oldset, regs);
537 * Check that the resulting registers are actually sane.
539 ret |= !valid_user_regs(regs);
541 signal_setup_done(ret, ksig, 0);
545 * Note that 'init' is a special process: it doesn't get signals it doesn't
546 * want to handle. Thus you cannot kill init even with a SIGKILL even by
547 * mistake.
549 * Note that we go through the signals twice: once to check the signals that
550 * the kernel can handle, and then we build all the user-level signal handling
551 * stack-frames in one go after that.
553 static int do_signal(struct pt_regs *regs, int syscall)
555 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
556 struct ksignal ksig;
557 int restart = 0;
560 * If we were from a system call, check for system call restarting...
562 if (syscall) {
563 continue_addr = regs->ARM_pc;
564 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
565 retval = regs->ARM_r0;
568 * Prepare for system call restart. We do this here so that a
569 * debugger will see the already changed PSW.
571 switch (retval) {
572 case -ERESTART_RESTARTBLOCK:
573 restart -= 2;
574 case -ERESTARTNOHAND:
575 case -ERESTARTSYS:
576 case -ERESTARTNOINTR:
577 restart++;
578 regs->ARM_r0 = regs->ARM_ORIG_r0;
579 regs->ARM_pc = restart_addr;
580 break;
585 * Get the signal to deliver. When running under ptrace, at this
586 * point the debugger may change all our registers ...
589 * Depending on the signal settings we may need to revert the
590 * decision to restart the system call. But skip this if a
591 * debugger has chosen to restart at a different PC.
593 if (get_signal(&ksig)) {
594 /* handler */
595 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
596 if (retval == -ERESTARTNOHAND ||
597 retval == -ERESTART_RESTARTBLOCK
598 || (retval == -ERESTARTSYS
599 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
600 regs->ARM_r0 = -EINTR;
601 regs->ARM_pc = continue_addr;
604 handle_signal(&ksig, regs);
605 } else {
606 /* no handler */
607 restore_saved_sigmask();
608 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
609 regs->ARM_pc = continue_addr;
610 return restart;
613 return 0;
616 asmlinkage int
617 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
620 * The assembly code enters us with IRQs off, but it hasn't
621 * informed the tracing code of that for efficiency reasons.
622 * Update the trace code with the current status.
624 trace_hardirqs_off();
625 do {
626 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
627 schedule();
628 } else {
629 if (unlikely(!user_mode(regs)))
630 return 0;
631 local_irq_enable();
632 if (thread_flags & _TIF_SIGPENDING) {
633 int restart = do_signal(regs, syscall);
634 if (unlikely(restart)) {
636 * Restart without handlers.
637 * Deal with it without leaving
638 * the kernel space.
640 return restart;
642 syscall = 0;
643 } else if (thread_flags & _TIF_UPROBE) {
644 uprobe_notify_resume(regs);
645 } else {
646 clear_thread_flag(TIF_NOTIFY_RESUME);
647 tracehook_notify_resume(regs);
650 local_irq_disable();
651 thread_flags = current_thread_info()->flags;
652 } while (thread_flags & _TIF_WORK_MASK);
653 return 0;
656 struct page *get_signal_page(void)
658 unsigned long ptr;
659 unsigned offset;
660 struct page *page;
661 void *addr;
663 page = alloc_pages(GFP_KERNEL, 0);
665 if (!page)
666 return NULL;
668 addr = page_address(page);
670 /* Give the signal return code some randomness */
671 offset = 0x200 + (get_random_int() & 0x7fc);
672 signal_return_offset = offset;
675 * Copy signal return handlers into the vector page, and
676 * set sigreturn to be a pointer to these.
678 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
680 ptr = (unsigned long)addr + offset;
681 flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
683 return page;
686 /* Defer to generic check */
687 asmlinkage void addr_limit_check_failed(void)
689 addr_limit_user_check();