x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / arm / kernel / signal.c
blobab330422527203417472a21727b1062c8a38e70f
1 /*
2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/errno.h>
11 #include <linux/random.h>
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
17 #include <asm/elf.h>
18 #include <asm/cacheflush.h>
19 #include <asm/traps.h>
20 #include <asm/ucontext.h>
21 #include <asm/unistd.h>
22 #include <asm/vfp.h>
25 * For ARM syscalls, we encode the syscall number into the instruction.
27 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
28 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
31 * With EABI, the syscall number has to be loaded into r7.
33 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
34 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
37 * For Thumb syscalls, we pass the syscall number via r7. We therefore
38 * need two 16-bit instructions.
40 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
41 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
43 static const unsigned long sigreturn_codes[7] = {
44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
48 static unsigned long signal_return_offset;
50 #ifdef CONFIG_CRUNCH
51 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
53 char kbuf[sizeof(*frame) + 8];
54 struct crunch_sigframe *kframe;
56 /* the crunch context must be 64 bit aligned */
57 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
58 kframe->magic = CRUNCH_MAGIC;
59 kframe->size = CRUNCH_STORAGE_SIZE;
60 crunch_task_copy(current_thread_info(), &kframe->storage);
61 return __copy_to_user(frame, kframe, sizeof(*frame));
64 static int restore_crunch_context(struct crunch_sigframe __user *frame)
66 char kbuf[sizeof(*frame) + 8];
67 struct crunch_sigframe *kframe;
69 /* the crunch context must be 64 bit aligned */
70 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
71 if (__copy_from_user(kframe, frame, sizeof(*frame)))
72 return -1;
73 if (kframe->magic != CRUNCH_MAGIC ||
74 kframe->size != CRUNCH_STORAGE_SIZE)
75 return -1;
76 crunch_task_restore(current_thread_info(), &kframe->storage);
77 return 0;
79 #endif
81 #ifdef CONFIG_IWMMXT
83 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
85 char kbuf[sizeof(*frame) + 8];
86 struct iwmmxt_sigframe *kframe;
88 /* the iWMMXt context must be 64 bit aligned */
89 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
90 kframe->magic = IWMMXT_MAGIC;
91 kframe->size = IWMMXT_STORAGE_SIZE;
92 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
93 return __copy_to_user(frame, kframe, sizeof(*frame));
96 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
98 char kbuf[sizeof(*frame) + 8];
99 struct iwmmxt_sigframe *kframe;
101 /* the iWMMXt context must be 64 bit aligned */
102 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
103 if (__copy_from_user(kframe, frame, sizeof(*frame)))
104 return -1;
105 if (kframe->magic != IWMMXT_MAGIC ||
106 kframe->size != IWMMXT_STORAGE_SIZE)
107 return -1;
108 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
109 return 0;
112 #endif
114 #ifdef CONFIG_VFP
116 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
118 const unsigned long magic = VFP_MAGIC;
119 const unsigned long size = VFP_STORAGE_SIZE;
120 int err = 0;
122 __put_user_error(magic, &frame->magic, err);
123 __put_user_error(size, &frame->size, err);
125 if (err)
126 return -EFAULT;
128 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
131 static int restore_vfp_context(struct vfp_sigframe __user *frame)
133 unsigned long magic;
134 unsigned long size;
135 int err = 0;
137 __get_user_error(magic, &frame->magic, err);
138 __get_user_error(size, &frame->size, err);
140 if (err)
141 return -EFAULT;
142 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
143 return -EINVAL;
145 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
148 #endif
151 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
153 struct sigframe {
154 struct ucontext uc;
155 unsigned long retcode[2];
158 struct rt_sigframe {
159 struct siginfo info;
160 struct sigframe sig;
163 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
165 struct aux_sigframe __user *aux;
166 sigset_t set;
167 int err;
169 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
170 if (err == 0)
171 set_current_blocked(&set);
173 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
174 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
175 __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
176 __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
177 __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
178 __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
179 __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
180 __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
181 __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
182 __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
183 __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
184 __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
185 __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
186 __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
187 __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
188 __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
189 __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
191 err |= !valid_user_regs(regs);
193 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
194 #ifdef CONFIG_CRUNCH
195 if (err == 0)
196 err |= restore_crunch_context(&aux->crunch);
197 #endif
198 #ifdef CONFIG_IWMMXT
199 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
200 err |= restore_iwmmxt_context(&aux->iwmmxt);
201 #endif
202 #ifdef CONFIG_VFP
203 if (err == 0)
204 err |= restore_vfp_context(&aux->vfp);
205 #endif
207 return err;
210 asmlinkage int sys_sigreturn(struct pt_regs *regs)
212 struct sigframe __user *frame;
214 /* Always make any pending restarted system calls return -EINTR */
215 current_thread_info()->restart_block.fn = do_no_restart_syscall;
218 * Since we stacked the signal on a 64-bit boundary,
219 * then 'sp' should be word aligned here. If it's
220 * not, then the user is trying to mess with us.
222 if (regs->ARM_sp & 7)
223 goto badframe;
225 frame = (struct sigframe __user *)regs->ARM_sp;
227 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
228 goto badframe;
230 if (restore_sigframe(regs, frame))
231 goto badframe;
233 return regs->ARM_r0;
235 badframe:
236 force_sig(SIGSEGV, current);
237 return 0;
240 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
242 struct rt_sigframe __user *frame;
244 /* Always make any pending restarted system calls return -EINTR */
245 current_thread_info()->restart_block.fn = do_no_restart_syscall;
248 * Since we stacked the signal on a 64-bit boundary,
249 * then 'sp' should be word aligned here. If it's
250 * not, then the user is trying to mess with us.
252 if (regs->ARM_sp & 7)
253 goto badframe;
255 frame = (struct rt_sigframe __user *)regs->ARM_sp;
257 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
258 goto badframe;
260 if (restore_sigframe(regs, &frame->sig))
261 goto badframe;
263 if (restore_altstack(&frame->sig.uc.uc_stack))
264 goto badframe;
266 return regs->ARM_r0;
268 badframe:
269 force_sig(SIGSEGV, current);
270 return 0;
273 static int
274 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
276 struct aux_sigframe __user *aux;
277 int err = 0;
279 __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
280 __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
281 __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
282 __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
283 __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
284 __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
285 __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
286 __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
287 __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
288 __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
289 __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
290 __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
291 __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
292 __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
293 __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
294 __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
295 __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
297 __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
298 __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
299 __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
300 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
302 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
304 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
305 #ifdef CONFIG_CRUNCH
306 if (err == 0)
307 err |= preserve_crunch_context(&aux->crunch);
308 #endif
309 #ifdef CONFIG_IWMMXT
310 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
311 err |= preserve_iwmmxt_context(&aux->iwmmxt);
312 #endif
313 #ifdef CONFIG_VFP
314 if (err == 0)
315 err |= preserve_vfp_context(&aux->vfp);
316 #endif
317 __put_user_error(0, &aux->end_magic, err);
319 return err;
322 static inline void __user *
323 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
325 unsigned long sp = sigsp(regs->ARM_sp, ksig);
326 void __user *frame;
329 * ATPCS B01 mandates 8-byte alignment
331 frame = (void __user *)((sp - framesize) & ~7);
334 * Check that we can actually write to the signal frame.
336 if (!access_ok(VERIFY_WRITE, frame, framesize))
337 frame = NULL;
339 return frame;
343 * translate the signal
345 static inline int map_sig(int sig)
347 struct thread_info *thread = current_thread_info();
348 if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
349 sig = thread->exec_domain->signal_invmap[sig];
350 return sig;
353 static int
354 setup_return(struct pt_regs *regs, struct ksignal *ksig,
355 unsigned long __user *rc, void __user *frame)
357 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
358 unsigned long retcode;
359 int thumb = 0;
360 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
362 cpsr |= PSR_ENDSTATE;
365 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
367 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
368 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
370 #ifdef CONFIG_ARM_THUMB
371 if (elf_hwcap & HWCAP_THUMB) {
373 * The LSB of the handler determines if we're going to
374 * be using THUMB or ARM mode for this signal handler.
376 thumb = handler & 1;
378 if (thumb) {
379 cpsr |= PSR_T_BIT;
380 #if __LINUX_ARM_ARCH__ >= 7
381 /* clear the If-Then Thumb-2 execution state */
382 cpsr &= ~PSR_IT_MASK;
383 #endif
384 } else
385 cpsr &= ~PSR_T_BIT;
387 #endif
389 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
390 retcode = (unsigned long)ksig->ka.sa.sa_restorer;
391 } else {
392 unsigned int idx = thumb << 1;
394 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
395 idx += 3;
398 * Put the sigreturn code on the stack no matter which return
399 * mechanism we use in order to remain ABI compliant
401 if (__put_user(sigreturn_codes[idx], rc) ||
402 __put_user(sigreturn_codes[idx+1], rc+1))
403 return 1;
405 #ifdef CONFIG_MMU
406 if (cpsr & MODE32_BIT) {
407 struct mm_struct *mm = current->mm;
410 * 32-bit code can use the signal return page
411 * except when the MPU has protected the vectors
412 * page from PL0
414 retcode = mm->context.sigpage + signal_return_offset +
415 (idx << 2) + thumb;
416 } else
417 #endif
420 * Ensure that the instruction cache sees
421 * the return code written onto the stack.
423 flush_icache_range((unsigned long)rc,
424 (unsigned long)(rc + 2));
426 retcode = ((unsigned long)rc) + thumb;
430 regs->ARM_r0 = map_sig(ksig->sig);
431 regs->ARM_sp = (unsigned long)frame;
432 regs->ARM_lr = retcode;
433 regs->ARM_pc = handler;
434 regs->ARM_cpsr = cpsr;
436 return 0;
439 static int
440 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
442 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
443 int err = 0;
445 if (!frame)
446 return 1;
449 * Set uc.uc_flags to a value which sc.trap_no would never have.
451 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
453 err |= setup_sigframe(frame, regs, set);
454 if (err == 0)
455 err = setup_return(regs, ksig, frame->retcode, frame);
457 return err;
460 static int
461 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
463 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
464 int err = 0;
466 if (!frame)
467 return 1;
469 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
471 __put_user_error(0, &frame->sig.uc.uc_flags, err);
472 __put_user_error(NULL, &frame->sig.uc.uc_link, err);
474 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
475 err |= setup_sigframe(&frame->sig, regs, set);
476 if (err == 0)
477 err = setup_return(regs, ksig, frame->sig.retcode, frame);
479 if (err == 0) {
481 * For realtime signals we must also set the second and third
482 * arguments for the signal handler.
483 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
485 regs->ARM_r1 = (unsigned long)&frame->info;
486 regs->ARM_r2 = (unsigned long)&frame->sig.uc;
489 return err;
493 * OK, we're invoking a handler
495 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
497 sigset_t *oldset = sigmask_to_save();
498 int ret;
501 * Set up the stack frame
503 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
504 ret = setup_rt_frame(ksig, oldset, regs);
505 else
506 ret = setup_frame(ksig, oldset, regs);
509 * Check that the resulting registers are actually sane.
511 ret |= !valid_user_regs(regs);
513 signal_setup_done(ret, ksig, 0);
517 * Note that 'init' is a special process: it doesn't get signals it doesn't
518 * want to handle. Thus you cannot kill init even with a SIGKILL even by
519 * mistake.
521 * Note that we go through the signals twice: once to check the signals that
522 * the kernel can handle, and then we build all the user-level signal handling
523 * stack-frames in one go after that.
525 static int do_signal(struct pt_regs *regs, int syscall)
527 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
528 struct ksignal ksig;
529 int restart = 0;
532 * If we were from a system call, check for system call restarting...
534 if (syscall) {
535 continue_addr = regs->ARM_pc;
536 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
537 retval = regs->ARM_r0;
540 * Prepare for system call restart. We do this here so that a
541 * debugger will see the already changed PSW.
543 switch (retval) {
544 case -ERESTART_RESTARTBLOCK:
545 restart -= 2;
546 case -ERESTARTNOHAND:
547 case -ERESTARTSYS:
548 case -ERESTARTNOINTR:
549 restart++;
550 regs->ARM_r0 = regs->ARM_ORIG_r0;
551 regs->ARM_pc = restart_addr;
552 break;
557 * Get the signal to deliver. When running under ptrace, at this
558 * point the debugger may change all our registers ...
561 * Depending on the signal settings we may need to revert the
562 * decision to restart the system call. But skip this if a
563 * debugger has chosen to restart at a different PC.
565 if (get_signal(&ksig)) {
566 /* handler */
567 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
568 if (retval == -ERESTARTNOHAND ||
569 retval == -ERESTART_RESTARTBLOCK
570 || (retval == -ERESTARTSYS
571 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
572 regs->ARM_r0 = -EINTR;
573 regs->ARM_pc = continue_addr;
576 handle_signal(&ksig, regs);
577 } else {
578 /* no handler */
579 restore_saved_sigmask();
580 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
581 regs->ARM_pc = continue_addr;
582 return restart;
585 return 0;
588 asmlinkage int
589 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
591 do {
592 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
593 schedule();
594 } else {
595 if (unlikely(!user_mode(regs)))
596 return 0;
597 local_irq_enable();
598 if (thread_flags & _TIF_SIGPENDING) {
599 int restart = do_signal(regs, syscall);
600 if (unlikely(restart)) {
602 * Restart without handlers.
603 * Deal with it without leaving
604 * the kernel space.
606 return restart;
608 syscall = 0;
609 } else {
610 clear_thread_flag(TIF_NOTIFY_RESUME);
611 tracehook_notify_resume(regs);
614 local_irq_disable();
615 thread_flags = current_thread_info()->flags;
616 } while (thread_flags & _TIF_WORK_MASK);
617 return 0;
620 struct page *get_signal_page(void)
622 unsigned long ptr;
623 unsigned offset;
624 struct page *page;
625 void *addr;
627 page = alloc_pages(GFP_KERNEL, 0);
629 if (!page)
630 return NULL;
632 addr = page_address(page);
634 /* Give the signal return code some randomness */
635 offset = 0x200 + (get_random_int() & 0x7fc);
636 signal_return_offset = offset;
639 * Copy signal return handlers into the vector page, and
640 * set sigreturn to be a pointer to these.
642 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
644 ptr = (unsigned long)addr + offset;
645 flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
647 return page;