2 * linux/arch/m68k/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
12 * Linux/m68k support by Hamish Macdonald
14 * 68060 fixes by Jesper Skov
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
31 #include <linux/sched.h>
33 #include <linux/kernel.h>
34 #include <linux/signal.h>
35 #include <linux/syscalls.h>
36 #include <linux/errno.h>
37 #include <linux/wait.h>
38 #include <linux/ptrace.h>
39 #include <linux/unistd.h>
40 #include <linux/stddef.h>
41 #include <linux/highuid.h>
42 #include <linux/personality.h>
43 #include <linux/tty.h>
44 #include <linux/binfmts.h>
45 #include <linux/module.h>
46 #include <linux/tracehook.h>
48 #include <asm/setup.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
51 #include <asm/traps.h>
52 #include <asm/ucontext.h>
53 #include <asm/cacheflush.h>
58 * Handle the slight differences in classic 68k and ColdFire trap frames.
60 #ifdef CONFIG_COLDFIRE
65 #define FMT4SIZE sizeof(((struct frame *)0)->un.fmt4)
68 static const int frame_size_change
[16] = {
69 [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
70 [2] = sizeof(((struct frame
*)0)->un
.fmt2
),
71 [3] = sizeof(((struct frame
*)0)->un
.fmt3
),
73 [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
74 [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
75 [7] = sizeof(((struct frame
*)0)->un
.fmt7
),
76 [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
77 [9] = sizeof(((struct frame
*)0)->un
.fmt9
),
78 [10] = sizeof(((struct frame
*)0)->un
.fmta
),
79 [11] = sizeof(((struct frame
*)0)->un
.fmtb
),
80 [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
81 [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
82 [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
83 [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
86 static inline int frame_extra_sizes(int f
)
88 return frame_size_change
[f
];
91 int handle_kernel_fault(struct pt_regs
*regs
)
93 const struct exception_table_entry
*fixup
;
94 struct pt_regs
*tregs
;
96 /* Are we prepared to handle this kernel fault? */
97 fixup
= search_exception_tables(regs
->pc
);
101 /* Create a new four word stack frame, discarding the old one. */
102 regs
->stkadj
= frame_extra_sizes(regs
->format
);
103 tregs
= (struct pt_regs
*)((long)regs
+ regs
->stkadj
);
104 tregs
->vector
= regs
->vector
;
105 tregs
->format
= FORMAT
;
106 tregs
->pc
= fixup
->fixup
;
107 tregs
->sr
= regs
->sr
;
112 void ptrace_signal_deliver(void)
114 struct pt_regs
*regs
= signal_pt_regs();
115 if (regs
->orig_d0
< 0)
118 case -ERESTARTNOHAND
:
120 case -ERESTARTNOINTR
:
121 regs
->d0
= regs
->orig_d0
;
128 static inline void push_cache (unsigned long vaddr
)
131 * Using the old cache_push_v() was really a big waste.
133 * What we are trying to do is to flush 8 bytes to ram.
134 * Flushing 2 cache lines of 16 bytes is much cheaper than
135 * flushing 1 or 2 pages, as previously done in
142 __asm__
__volatile__ (".chip 68040\n\t"
145 "movec %%mmusr,%0\n\t"
151 temp
|= vaddr
& ~PAGE_MASK
;
153 __asm__
__volatile__ (".chip 68040\n\t"
155 "cpushl %%bc,(%0)\n\t"
159 else if (CPU_IS_060
) {
161 __asm__
__volatile__ (".chip 68060\n\t"
166 __asm__
__volatile__ (".chip 68060\n\t"
167 "cpushl %%bc,(%0)\n\t"
170 } else if (!CPU_IS_COLDFIRE
) {
172 * 68030/68020 have no writeback cache;
173 * still need to clear icache.
174 * Note that vaddr is guaranteed to be long word aligned.
177 asm volatile ("movec %%cacr,%0" : "=r" (temp
));
179 asm volatile ("movec %0,%%caar\n\t"
181 : : "r" (vaddr
), "r" (temp
));
182 asm volatile ("movec %0,%%caar\n\t"
184 : : "r" (vaddr
+ 4), "r" (temp
));
186 /* CPU_IS_COLDFIRE */
187 #if defined(CONFIG_CACHE_COPYBACK)
188 flush_cf_dcache(0, DCACHE_MAX_ADDR
);
190 /* Invalidate instruction cache for the pushed bytes */
191 clear_cf_icache(vaddr
, vaddr
+ 8);
195 static inline void adjustformat(struct pt_regs
*regs
)
199 static inline void save_a5_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
203 #else /* CONFIG_MMU */
205 void ret_from_user_signal(void);
206 void ret_from_user_rt_signal(void);
208 static inline int frame_extra_sizes(int f
)
210 /* No frame size adjustments required on non-MMU CPUs */
214 static inline void adjustformat(struct pt_regs
*regs
)
216 ((struct switch_stack
*)regs
- 1)->a5
= current
->mm
->start_data
;
218 * set format byte to make stack appear modulo 4, which it will
219 * be when doing the rte
224 static inline void save_a5_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
226 sc
->sc_a5
= ((struct switch_stack
*)regs
- 1)->a5
;
229 static inline void push_cache(unsigned long vaddr
)
233 #endif /* CONFIG_MMU */
236 * Do a signal return; undo the signal stack.
238 * Keep the return code on the stack quadword aligned!
239 * That makes the cache flush below easier.
244 char __user
*pretcode
;
247 struct sigcontext __user
*psc
;
249 unsigned long extramask
[_NSIG_WORDS
-1];
250 struct sigcontext sc
;
255 char __user
*pretcode
;
257 struct siginfo __user
*pinfo
;
264 #define FPCONTEXT_SIZE 216
265 #define uc_fpstate uc_filler[0]
266 #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
267 #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
271 static unsigned char fpu_version
; /* version number of fpu, set by setup_frame */
273 static inline int restore_fpu_state(struct sigcontext
*sc
)
278 /* restore registers */
279 memcpy(current
->thread
.fpcntl
, sc
->sc_fpcntl
, 12);
280 memcpy(current
->thread
.fp
, sc
->sc_fpregs
, 24);
284 if (CPU_IS_060
? sc
->sc_fpstate
[2] : sc
->sc_fpstate
[0]) {
285 /* Verify the frame format. */
286 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
) &&
287 (sc
->sc_fpstate
[0] != fpu_version
))
289 if (CPU_IS_020_OR_030
) {
290 if (m68k_fputype
& FPU_68881
&&
291 !(sc
->sc_fpstate
[1] == 0x18 || sc
->sc_fpstate
[1] == 0xb4))
293 if (m68k_fputype
& FPU_68882
&&
294 !(sc
->sc_fpstate
[1] == 0x38 || sc
->sc_fpstate
[1] == 0xd4))
296 } else if (CPU_IS_040
) {
297 if (!(sc
->sc_fpstate
[1] == 0x00 ||
298 sc
->sc_fpstate
[1] == 0x28 ||
299 sc
->sc_fpstate
[1] == 0x60))
301 } else if (CPU_IS_060
) {
302 if (!(sc
->sc_fpstate
[3] == 0x00 ||
303 sc
->sc_fpstate
[3] == 0x60 ||
304 sc
->sc_fpstate
[3] == 0xe0))
306 } else if (CPU_IS_COLDFIRE
) {
307 if (!(sc
->sc_fpstate
[0] == 0x00 ||
308 sc
->sc_fpstate
[0] == 0x05 ||
309 sc
->sc_fpstate
[0] == 0xe5))
314 if (CPU_IS_COLDFIRE
) {
315 __asm__
volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
316 "fmovel %1,%%fpcr\n\t"
317 "fmovel %2,%%fpsr\n\t"
320 : "m" (sc
->sc_fpregs
[0]),
321 "m" (sc
->sc_fpcntl
[0]),
322 "m" (sc
->sc_fpcntl
[1]),
323 "m" (sc
->sc_fpcntl
[2]));
325 __asm__
volatile (".chip 68k/68881\n\t"
326 "fmovemx %0,%%fp0-%%fp1\n\t"
327 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
330 : "m" (*sc
->sc_fpregs
),
331 "m" (*sc
->sc_fpcntl
));
335 if (CPU_IS_COLDFIRE
) {
336 __asm__
volatile ("frestore %0" : : "m" (*sc
->sc_fpstate
));
338 __asm__
volatile (".chip 68k/68881\n\t"
341 : : "m" (*sc
->sc_fpstate
));
349 static inline int rt_restore_fpu_state(struct ucontext __user
*uc
)
351 unsigned char fpstate
[FPCONTEXT_SIZE
];
352 int context_size
= CPU_IS_060
? 8 : (CPU_IS_COLDFIRE
? 12 : 0);
357 /* restore fpu control register */
358 if (__copy_from_user(current
->thread
.fpcntl
,
359 uc
->uc_mcontext
.fpregs
.f_fpcntl
, 12))
361 /* restore all other fpu register */
362 if (__copy_from_user(current
->thread
.fp
,
363 uc
->uc_mcontext
.fpregs
.f_fpregs
, 96))
368 if (__get_user(*(long *)fpstate
, (long __user
*)&uc
->uc_fpstate
))
370 if (CPU_IS_060
? fpstate
[2] : fpstate
[0]) {
371 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
))
372 context_size
= fpstate
[1];
373 /* Verify the frame format. */
374 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
) &&
375 (fpstate
[0] != fpu_version
))
377 if (CPU_IS_020_OR_030
) {
378 if (m68k_fputype
& FPU_68881
&&
379 !(context_size
== 0x18 || context_size
== 0xb4))
381 if (m68k_fputype
& FPU_68882
&&
382 !(context_size
== 0x38 || context_size
== 0xd4))
384 } else if (CPU_IS_040
) {
385 if (!(context_size
== 0x00 ||
386 context_size
== 0x28 ||
387 context_size
== 0x60))
389 } else if (CPU_IS_060
) {
390 if (!(fpstate
[3] == 0x00 ||
391 fpstate
[3] == 0x60 ||
394 } else if (CPU_IS_COLDFIRE
) {
395 if (!(fpstate
[3] == 0x00 ||
396 fpstate
[3] == 0x05 ||
401 if (__copy_from_user(&fpregs
, &uc
->uc_mcontext
.fpregs
,
405 if (CPU_IS_COLDFIRE
) {
406 __asm__
volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
407 "fmovel %1,%%fpcr\n\t"
408 "fmovel %2,%%fpsr\n\t"
411 : "m" (fpregs
.f_fpregs
[0]),
412 "m" (fpregs
.f_fpcntl
[0]),
413 "m" (fpregs
.f_fpcntl
[1]),
414 "m" (fpregs
.f_fpcntl
[2]));
416 __asm__
volatile (".chip 68k/68881\n\t"
417 "fmovemx %0,%%fp0-%%fp7\n\t"
418 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
421 : "m" (*fpregs
.f_fpregs
),
422 "m" (*fpregs
.f_fpcntl
));
426 __copy_from_user(fpstate
+ 4, (long __user
*)&uc
->uc_fpstate
+ 1,
430 if (CPU_IS_COLDFIRE
) {
431 __asm__
volatile ("frestore %0" : : "m" (*fpstate
));
433 __asm__
volatile (".chip 68k/68881\n\t"
445 * Set up a signal frame.
447 static inline void save_fpu_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
451 memcpy(sc
->sc_fpcntl
, current
->thread
.fpcntl
, 12);
452 memcpy(sc
->sc_fpregs
, current
->thread
.fp
, 24);
456 if (CPU_IS_COLDFIRE
) {
457 __asm__
volatile ("fsave %0"
458 : : "m" (*sc
->sc_fpstate
) : "memory");
460 __asm__
volatile (".chip 68k/68881\n\t"
463 : : "m" (*sc
->sc_fpstate
) : "memory");
466 if (CPU_IS_060
? sc
->sc_fpstate
[2] : sc
->sc_fpstate
[0]) {
467 fpu_version
= sc
->sc_fpstate
[0];
468 if (CPU_IS_020_OR_030
&&
469 regs
->vector
>= (VEC_FPBRUC
* 4) &&
470 regs
->vector
<= (VEC_FPNAN
* 4)) {
471 /* Clear pending exception in 68882 idle frame */
472 if (*(unsigned short *) sc
->sc_fpstate
== 0x1f38)
473 sc
->sc_fpstate
[0x38] |= 1 << 3;
476 if (CPU_IS_COLDFIRE
) {
477 __asm__
volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
478 "fmovel %%fpcr,%1\n\t"
479 "fmovel %%fpsr,%2\n\t"
481 : "=m" (sc
->sc_fpregs
[0]),
482 "=m" (sc
->sc_fpcntl
[0]),
483 "=m" (sc
->sc_fpcntl
[1]),
484 "=m" (sc
->sc_fpcntl
[2])
488 __asm__
volatile (".chip 68k/68881\n\t"
489 "fmovemx %%fp0-%%fp1,%0\n\t"
490 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
492 : "=m" (*sc
->sc_fpregs
),
493 "=m" (*sc
->sc_fpcntl
)
500 static inline int rt_save_fpu_state(struct ucontext __user
*uc
, struct pt_regs
*regs
)
502 unsigned char fpstate
[FPCONTEXT_SIZE
];
503 int context_size
= CPU_IS_060
? 8 : (CPU_IS_COLDFIRE
? 12 : 0);
507 /* save fpu control register */
508 err
|= copy_to_user(uc
->uc_mcontext
.fpregs
.f_fpcntl
,
509 current
->thread
.fpcntl
, 12);
510 /* save all other fpu register */
511 err
|= copy_to_user(uc
->uc_mcontext
.fpregs
.f_fpregs
,
512 current
->thread
.fp
, 96);
516 if (CPU_IS_COLDFIRE
) {
517 __asm__
volatile ("fsave %0" : : "m" (*fpstate
) : "memory");
519 __asm__
volatile (".chip 68k/68881\n\t"
522 : : "m" (*fpstate
) : "memory");
525 err
|= __put_user(*(long *)fpstate
, (long __user
*)&uc
->uc_fpstate
);
526 if (CPU_IS_060
? fpstate
[2] : fpstate
[0]) {
528 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
))
529 context_size
= fpstate
[1];
530 fpu_version
= fpstate
[0];
531 if (CPU_IS_020_OR_030
&&
532 regs
->vector
>= (VEC_FPBRUC
* 4) &&
533 regs
->vector
<= (VEC_FPNAN
* 4)) {
534 /* Clear pending exception in 68882 idle frame */
535 if (*(unsigned short *) fpstate
== 0x1f38)
536 fpstate
[0x38] |= 1 << 3;
538 if (CPU_IS_COLDFIRE
) {
539 __asm__
volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
540 "fmovel %%fpcr,%1\n\t"
541 "fmovel %%fpsr,%2\n\t"
543 : "=m" (fpregs
.f_fpregs
[0]),
544 "=m" (fpregs
.f_fpcntl
[0]),
545 "=m" (fpregs
.f_fpcntl
[1]),
546 "=m" (fpregs
.f_fpcntl
[2])
550 __asm__
volatile (".chip 68k/68881\n\t"
551 "fmovemx %%fp0-%%fp7,%0\n\t"
552 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
554 : "=m" (*fpregs
.f_fpregs
),
555 "=m" (*fpregs
.f_fpcntl
)
559 err
|= copy_to_user(&uc
->uc_mcontext
.fpregs
, &fpregs
,
563 err
|= copy_to_user((long __user
*)&uc
->uc_fpstate
+ 1, fpstate
+ 4,
568 #else /* CONFIG_FPU */
571 * For the case with no FPU configured these all do nothing.
573 static inline int restore_fpu_state(struct sigcontext
*sc
)
578 static inline int rt_restore_fpu_state(struct ucontext __user
*uc
)
583 static inline void save_fpu_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
587 static inline int rt_save_fpu_state(struct ucontext __user
*uc
, struct pt_regs
*regs
)
592 #endif /* CONFIG_FPU */
594 static int mangle_kernel_stack(struct pt_regs
*regs
, int formatvec
,
597 int fsize
= frame_extra_sizes(formatvec
>> 12);
600 * user process trying to return with weird frame format
603 printk("user process returning with weird frame format\n");
608 regs
->format
= formatvec
>> 12;
609 regs
->vector
= formatvec
& 0xfff;
611 struct switch_stack
*sw
= (struct switch_stack
*)regs
- 1;
612 unsigned long buf
[fsize
/ 2]; /* yes, twice as much */
614 /* that'll make sure that expansion won't crap over data */
615 if (copy_from_user(buf
+ fsize
/ 4, fp
, fsize
))
618 /* point of no return */
619 regs
->format
= formatvec
>> 12;
620 regs
->vector
= formatvec
& 0xfff;
621 #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
622 __asm__
__volatile__ (
623 #ifdef CONFIG_COLDFIRE
625 " bra ret_from_signal\n"
628 " subl %1,%/a0\n\t" /* make room on stack */
629 " movel %/a0,%/sp\n\t" /* set stack pointer */
630 /* move switch_stack and pt_regs */
631 "1: movel %0@+,%/a0@+\n\t"
633 " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
636 /* copy to the gap we'd made */
637 "2: movel %4@+,%/a0@+\n\t"
639 " bral ret_from_signal\n"
641 : /* no outputs, it doesn't ever return */
642 : "a" (sw
), "d" (fsize
), "d" (frame_offset
/4-1),
643 "n" (frame_offset
), "a" (buf
+ fsize
/4)
651 restore_sigcontext(struct pt_regs
*regs
, struct sigcontext __user
*usc
, void __user
*fp
)
654 struct sigcontext context
;
657 /* Always make any pending restarted system calls return -EINTR */
658 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
660 /* get previous context */
661 if (copy_from_user(&context
, usc
, sizeof(context
)))
664 /* restore passed registers */
665 regs
->d0
= context
.sc_d0
;
666 regs
->d1
= context
.sc_d1
;
667 regs
->a0
= context
.sc_a0
;
668 regs
->a1
= context
.sc_a1
;
669 regs
->sr
= (regs
->sr
& 0xff00) | (context
.sc_sr
& 0xff);
670 regs
->pc
= context
.sc_pc
;
671 regs
->orig_d0
= -1; /* disable syscall checks */
672 wrusp(context
.sc_usp
);
673 formatvec
= context
.sc_formatvec
;
675 err
= restore_fpu_state(&context
);
677 if (err
|| mangle_kernel_stack(regs
, formatvec
, fp
))
687 rt_restore_ucontext(struct pt_regs
*regs
, struct switch_stack
*sw
,
688 struct ucontext __user
*uc
)
691 greg_t __user
*gregs
= uc
->uc_mcontext
.gregs
;
695 /* Always make any pending restarted system calls return -EINTR */
696 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
698 err
= __get_user(temp
, &uc
->uc_mcontext
.version
);
699 if (temp
!= MCONTEXT_VERSION
)
701 /* restore passed registers */
702 err
|= __get_user(regs
->d0
, &gregs
[0]);
703 err
|= __get_user(regs
->d1
, &gregs
[1]);
704 err
|= __get_user(regs
->d2
, &gregs
[2]);
705 err
|= __get_user(regs
->d3
, &gregs
[3]);
706 err
|= __get_user(regs
->d4
, &gregs
[4]);
707 err
|= __get_user(regs
->d5
, &gregs
[5]);
708 err
|= __get_user(sw
->d6
, &gregs
[6]);
709 err
|= __get_user(sw
->d7
, &gregs
[7]);
710 err
|= __get_user(regs
->a0
, &gregs
[8]);
711 err
|= __get_user(regs
->a1
, &gregs
[9]);
712 err
|= __get_user(regs
->a2
, &gregs
[10]);
713 err
|= __get_user(sw
->a3
, &gregs
[11]);
714 err
|= __get_user(sw
->a4
, &gregs
[12]);
715 err
|= __get_user(sw
->a5
, &gregs
[13]);
716 err
|= __get_user(sw
->a6
, &gregs
[14]);
717 err
|= __get_user(usp
, &gregs
[15]);
719 err
|= __get_user(regs
->pc
, &gregs
[16]);
720 err
|= __get_user(temp
, &gregs
[17]);
721 regs
->sr
= (regs
->sr
& 0xff00) | (temp
& 0xff);
722 regs
->orig_d0
= -1; /* disable syscall checks */
723 err
|= __get_user(temp
, &uc
->uc_formatvec
);
725 err
|= rt_restore_fpu_state(uc
);
726 err
|= restore_altstack(&uc
->uc_stack
);
731 if (mangle_kernel_stack(regs
, temp
, &uc
->uc_extra
))
740 asmlinkage
int do_sigreturn(unsigned long __unused
)
742 struct switch_stack
*sw
= (struct switch_stack
*) &__unused
;
743 struct pt_regs
*regs
= (struct pt_regs
*) (sw
+ 1);
744 unsigned long usp
= rdusp();
745 struct sigframe __user
*frame
= (struct sigframe __user
*)(usp
- 4);
748 if (!access_ok(VERIFY_READ
, frame
, sizeof(*frame
)))
750 if (__get_user(set
.sig
[0], &frame
->sc
.sc_mask
) ||
752 __copy_from_user(&set
.sig
[1], &frame
->extramask
,
753 sizeof(frame
->extramask
))))
756 set_current_blocked(&set
);
758 if (restore_sigcontext(regs
, &frame
->sc
, frame
+ 1))
763 force_sig(SIGSEGV
, current
);
767 asmlinkage
int do_rt_sigreturn(unsigned long __unused
)
769 struct switch_stack
*sw
= (struct switch_stack
*) &__unused
;
770 struct pt_regs
*regs
= (struct pt_regs
*) (sw
+ 1);
771 unsigned long usp
= rdusp();
772 struct rt_sigframe __user
*frame
= (struct rt_sigframe __user
*)(usp
- 4);
775 if (!access_ok(VERIFY_READ
, frame
, sizeof(*frame
)))
777 if (__copy_from_user(&set
, &frame
->uc
.uc_sigmask
, sizeof(set
)))
780 set_current_blocked(&set
);
782 if (rt_restore_ucontext(regs
, sw
, &frame
->uc
))
787 force_sig(SIGSEGV
, current
);
791 static void setup_sigcontext(struct sigcontext
*sc
, struct pt_regs
*regs
,
795 sc
->sc_usp
= rdusp();
796 sc
->sc_d0
= regs
->d0
;
797 sc
->sc_d1
= regs
->d1
;
798 sc
->sc_a0
= regs
->a0
;
799 sc
->sc_a1
= regs
->a1
;
800 sc
->sc_sr
= regs
->sr
;
801 sc
->sc_pc
= regs
->pc
;
802 sc
->sc_formatvec
= regs
->format
<< 12 | regs
->vector
;
803 save_a5_state(sc
, regs
);
804 save_fpu_state(sc
, regs
);
807 static inline int rt_setup_ucontext(struct ucontext __user
*uc
, struct pt_regs
*regs
)
809 struct switch_stack
*sw
= (struct switch_stack
*)regs
- 1;
810 greg_t __user
*gregs
= uc
->uc_mcontext
.gregs
;
813 err
|= __put_user(MCONTEXT_VERSION
, &uc
->uc_mcontext
.version
);
814 err
|= __put_user(regs
->d0
, &gregs
[0]);
815 err
|= __put_user(regs
->d1
, &gregs
[1]);
816 err
|= __put_user(regs
->d2
, &gregs
[2]);
817 err
|= __put_user(regs
->d3
, &gregs
[3]);
818 err
|= __put_user(regs
->d4
, &gregs
[4]);
819 err
|= __put_user(regs
->d5
, &gregs
[5]);
820 err
|= __put_user(sw
->d6
, &gregs
[6]);
821 err
|= __put_user(sw
->d7
, &gregs
[7]);
822 err
|= __put_user(regs
->a0
, &gregs
[8]);
823 err
|= __put_user(regs
->a1
, &gregs
[9]);
824 err
|= __put_user(regs
->a2
, &gregs
[10]);
825 err
|= __put_user(sw
->a3
, &gregs
[11]);
826 err
|= __put_user(sw
->a4
, &gregs
[12]);
827 err
|= __put_user(sw
->a5
, &gregs
[13]);
828 err
|= __put_user(sw
->a6
, &gregs
[14]);
829 err
|= __put_user(rdusp(), &gregs
[15]);
830 err
|= __put_user(regs
->pc
, &gregs
[16]);
831 err
|= __put_user(regs
->sr
, &gregs
[17]);
832 err
|= __put_user((regs
->format
<< 12) | regs
->vector
, &uc
->uc_formatvec
);
833 err
|= rt_save_fpu_state(uc
, regs
);
837 static inline void __user
*
838 get_sigframe(struct k_sigaction
*ka
, struct pt_regs
*regs
, size_t frame_size
)
842 /* Default to using normal stack. */
845 /* This is the X/Open sanctioned signal stack switching. */
846 if (ka
->sa
.sa_flags
& SA_ONSTACK
) {
847 if (!sas_ss_flags(usp
))
848 usp
= current
->sas_ss_sp
+ current
->sas_ss_size
;
850 return (void __user
*)((usp
- frame_size
) & -8UL);
853 static int setup_frame (int sig
, struct k_sigaction
*ka
,
854 sigset_t
*set
, struct pt_regs
*regs
)
856 struct sigframe __user
*frame
;
857 int fsize
= frame_extra_sizes(regs
->format
);
858 struct sigcontext context
;
863 printk ("setup_frame: Unknown frame format %#x\n",
869 frame
= get_sigframe(ka
, regs
, sizeof(*frame
) + fsize
);
872 err
|= copy_to_user (frame
+ 1, regs
+ 1, fsize
);
874 err
|= __put_user((current_thread_info()->exec_domain
875 && current_thread_info()->exec_domain
->signal_invmap
877 ? current_thread_info()->exec_domain
->signal_invmap
[sig
]
881 err
|= __put_user(regs
->vector
, &frame
->code
);
882 err
|= __put_user(&frame
->sc
, &frame
->psc
);
885 err
|= copy_to_user(frame
->extramask
, &set
->sig
[1],
886 sizeof(frame
->extramask
));
888 setup_sigcontext(&context
, regs
, set
->sig
[0]);
889 err
|= copy_to_user (&frame
->sc
, &context
, sizeof(context
));
891 /* Set up to return from userspace. */
893 err
|= __put_user(frame
->retcode
, &frame
->pretcode
);
894 /* moveq #,d0; trap #0 */
895 err
|= __put_user(0x70004e40 + (__NR_sigreturn
<< 16),
896 (long __user
*)(frame
->retcode
));
898 err
|= __put_user((void *) ret_from_user_signal
, &frame
->pretcode
);
904 push_cache ((unsigned long) &frame
->retcode
);
907 * Set up registers for signal handler. All the state we are about
908 * to destroy is successfully copied to sigframe.
910 wrusp ((unsigned long) frame
);
911 regs
->pc
= (unsigned long) ka
->sa
.sa_handler
;
915 * This is subtle; if we build more than one sigframe, all but the
916 * first one will see frame format 0 and have fsize == 0, so we won't
920 regs
->stkadj
= fsize
;
922 /* Prepare to skip over the extra stuff in the exception frame. */
924 struct pt_regs
*tregs
=
925 (struct pt_regs
*)((ulong
)regs
+ regs
->stkadj
);
927 printk("Performing stackadjust=%04x\n", regs
->stkadj
);
929 /* This must be copied with decreasing addresses to
933 tregs
->pc
= regs
->pc
;
934 tregs
->sr
= regs
->sr
;
939 force_sigsegv(sig
, current
);
943 static int setup_rt_frame (int sig
, struct k_sigaction
*ka
, siginfo_t
*info
,
944 sigset_t
*set
, struct pt_regs
*regs
)
946 struct rt_sigframe __user
*frame
;
947 int fsize
= frame_extra_sizes(regs
->format
);
952 printk ("setup_frame: Unknown frame format %#x\n",
958 frame
= get_sigframe(ka
, regs
, sizeof(*frame
));
961 err
|= copy_to_user (&frame
->uc
.uc_extra
, regs
+ 1, fsize
);
963 err
|= __put_user((current_thread_info()->exec_domain
964 && current_thread_info()->exec_domain
->signal_invmap
966 ? current_thread_info()->exec_domain
->signal_invmap
[sig
]
969 err
|= __put_user(&frame
->info
, &frame
->pinfo
);
970 err
|= __put_user(&frame
->uc
, &frame
->puc
);
971 err
|= copy_siginfo_to_user(&frame
->info
, info
);
973 /* Create the ucontext. */
974 err
|= __put_user(0, &frame
->uc
.uc_flags
);
975 err
|= __put_user(NULL
, &frame
->uc
.uc_link
);
976 err
|= __save_altstack(&frame
->uc
.uc_stack
, rdusp());
977 err
|= rt_setup_ucontext(&frame
->uc
, regs
);
978 err
|= copy_to_user (&frame
->uc
.uc_sigmask
, set
, sizeof(*set
));
980 /* Set up to return from userspace. */
982 err
|= __put_user(frame
->retcode
, &frame
->pretcode
);
984 /* movel #__NR_rt_sigreturn,d0; trap #0 */
985 err
|= __put_user(0x203c0000, (long __user
*)(frame
->retcode
+ 0));
986 err
|= __put_user(0x00004e40 + (__NR_rt_sigreturn
<< 16),
987 (long __user
*)(frame
->retcode
+ 4));
989 /* moveq #,d0; notb d0; trap #0 */
990 err
|= __put_user(0x70004600 + ((__NR_rt_sigreturn
^ 0xff) << 16),
991 (long __user
*)(frame
->retcode
+ 0));
992 err
|= __put_user(0x4e40, (short __user
*)(frame
->retcode
+ 4));
995 err
|= __put_user((void *) ret_from_user_rt_signal
, &frame
->pretcode
);
996 #endif /* CONFIG_MMU */
1001 push_cache ((unsigned long) &frame
->retcode
);
1004 * Set up registers for signal handler. All the state we are about
1005 * to destroy is successfully copied to sigframe.
1007 wrusp ((unsigned long) frame
);
1008 regs
->pc
= (unsigned long) ka
->sa
.sa_handler
;
1012 * This is subtle; if we build more than one sigframe, all but the
1013 * first one will see frame format 0 and have fsize == 0, so we won't
1017 regs
->stkadj
= fsize
;
1019 /* Prepare to skip over the extra stuff in the exception frame. */
1021 struct pt_regs
*tregs
=
1022 (struct pt_regs
*)((ulong
)regs
+ regs
->stkadj
);
1024 printk("Performing stackadjust=%04x\n", regs
->stkadj
);
1026 /* This must be copied with decreasing addresses to
1030 tregs
->pc
= regs
->pc
;
1031 tregs
->sr
= regs
->sr
;
1036 force_sigsegv(sig
, current
);
1041 handle_restart(struct pt_regs
*regs
, struct k_sigaction
*ka
, int has_handler
)
1044 case -ERESTARTNOHAND
:
1050 case -ERESTART_RESTARTBLOCK
:
1052 regs
->d0
= __NR_restart_syscall
;
1060 if (has_handler
&& !(ka
->sa
.sa_flags
& SA_RESTART
)) {
1065 case -ERESTARTNOINTR
:
1067 regs
->d0
= regs
->orig_d0
;
1074 * OK, we're invoking a handler
1077 handle_signal(int sig
, struct k_sigaction
*ka
, siginfo_t
*info
,
1078 struct pt_regs
*regs
)
1080 sigset_t
*oldset
= sigmask_to_save();
1082 /* are we from a system call? */
1083 if (regs
->orig_d0
>= 0)
1084 /* If so, check system call restarting.. */
1085 handle_restart(regs
, ka
, 1);
1087 /* set up the stack frame */
1088 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
1089 err
= setup_rt_frame(sig
, ka
, info
, oldset
, regs
);
1091 err
= setup_frame(sig
, ka
, oldset
, regs
);
1096 signal_delivered(sig
, info
, ka
, regs
, 0);
1098 if (test_thread_flag(TIF_DELAYED_TRACE
)) {
1099 regs
->sr
&= ~0x8000;
1100 send_sig(SIGTRAP
, current
, 1);
1105 * Note that 'init' is a special process: it doesn't get signals it doesn't
1106 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1109 static void do_signal(struct pt_regs
*regs
)
1112 struct k_sigaction ka
;
1115 current
->thread
.esp0
= (unsigned long) regs
;
1117 signr
= get_signal_to_deliver(&info
, &ka
, regs
, NULL
);
1119 /* Whee! Actually deliver the signal. */
1120 handle_signal(signr
, &ka
, &info
, regs
);
1124 /* Did we come from a system call? */
1125 if (regs
->orig_d0
>= 0)
1126 /* Restart the system call - no handlers present */
1127 handle_restart(regs
, NULL
, 0);
1129 /* If there's no signal to deliver, we just restore the saved mask. */
1130 restore_saved_sigmask();
1133 void do_notify_resume(struct pt_regs
*regs
)
1135 if (test_thread_flag(TIF_SIGPENDING
))
1138 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME
))
1139 tracehook_notify_resume(regs
);