2 * linux/arch/m68k/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
12 * Linux/m68k support by Hamish Macdonald
14 * 68060 fixes by Jesper Skov
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
31 #include <linux/sched.h>
33 #include <linux/kernel.h>
34 #include <linux/signal.h>
35 #include <linux/syscalls.h>
36 #include <linux/errno.h>
37 #include <linux/wait.h>
38 #include <linux/ptrace.h>
39 #include <linux/unistd.h>
40 #include <linux/stddef.h>
41 #include <linux/highuid.h>
42 #include <linux/personality.h>
43 #include <linux/tty.h>
44 #include <linux/binfmts.h>
45 #include <linux/module.h>
46 #include <linux/tracehook.h>
48 #include <asm/setup.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
51 #include <asm/traps.h>
52 #include <asm/ucontext.h>
57 * Handle the slight differences in classic 68k and ColdFire trap frames.
59 #ifdef CONFIG_COLDFIRE
64 #define FMT4SIZE sizeof(((struct frame *)0)->un.fmt4)
67 static const int frame_size_change
[16] = {
68 [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
69 [2] = sizeof(((struct frame
*)0)->un
.fmt2
),
70 [3] = sizeof(((struct frame
*)0)->un
.fmt3
),
72 [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
73 [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
74 [7] = sizeof(((struct frame
*)0)->un
.fmt7
),
75 [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
76 [9] = sizeof(((struct frame
*)0)->un
.fmt9
),
77 [10] = sizeof(((struct frame
*)0)->un
.fmta
),
78 [11] = sizeof(((struct frame
*)0)->un
.fmtb
),
79 [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
80 [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
81 [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
82 [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
85 static inline int frame_extra_sizes(int f
)
87 return frame_size_change
[f
];
90 int handle_kernel_fault(struct pt_regs
*regs
)
92 const struct exception_table_entry
*fixup
;
93 struct pt_regs
*tregs
;
95 /* Are we prepared to handle this kernel fault? */
96 fixup
= search_exception_tables(regs
->pc
);
100 /* Create a new four word stack frame, discarding the old one. */
101 regs
->stkadj
= frame_extra_sizes(regs
->format
);
102 tregs
= (struct pt_regs
*)((long)regs
+ regs
->stkadj
);
103 tregs
->vector
= regs
->vector
;
104 tregs
->format
= FORMAT
;
105 tregs
->pc
= fixup
->fixup
;
106 tregs
->sr
= regs
->sr
;
111 void ptrace_signal_deliver(void)
113 struct pt_regs
*regs
= signal_pt_regs();
114 if (regs
->orig_d0
< 0)
117 case -ERESTARTNOHAND
:
119 case -ERESTARTNOINTR
:
120 regs
->d0
= regs
->orig_d0
;
127 static inline void push_cache (unsigned long vaddr
)
130 * Using the old cache_push_v() was really a big waste.
132 * What we are trying to do is to flush 8 bytes to ram.
133 * Flushing 2 cache lines of 16 bytes is much cheaper than
134 * flushing 1 or 2 pages, as previously done in
141 __asm__
__volatile__ (".chip 68040\n\t"
144 "movec %%mmusr,%0\n\t"
150 temp
|= vaddr
& ~PAGE_MASK
;
152 __asm__
__volatile__ (".chip 68040\n\t"
154 "cpushl %%bc,(%0)\n\t"
158 else if (CPU_IS_060
) {
160 __asm__
__volatile__ (".chip 68060\n\t"
165 __asm__
__volatile__ (".chip 68060\n\t"
166 "cpushl %%bc,(%0)\n\t"
169 } else if (!CPU_IS_COLDFIRE
) {
171 * 68030/68020 have no writeback cache;
172 * still need to clear icache.
173 * Note that vaddr is guaranteed to be long word aligned.
176 asm volatile ("movec %%cacr,%0" : "=r" (temp
));
178 asm volatile ("movec %0,%%caar\n\t"
180 : : "r" (vaddr
), "r" (temp
));
181 asm volatile ("movec %0,%%caar\n\t"
183 : : "r" (vaddr
+ 4), "r" (temp
));
187 static inline void adjustformat(struct pt_regs
*regs
)
191 static inline void save_a5_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
195 #else /* CONFIG_MMU */
197 void ret_from_user_signal(void);
198 void ret_from_user_rt_signal(void);
200 static inline int frame_extra_sizes(int f
)
202 /* No frame size adjustments required on non-MMU CPUs */
206 static inline void adjustformat(struct pt_regs
*regs
)
208 ((struct switch_stack
*)regs
- 1)->a5
= current
->mm
->start_data
;
210 * set format byte to make stack appear modulo 4, which it will
211 * be when doing the rte
216 static inline void save_a5_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
218 sc
->sc_a5
= ((struct switch_stack
*)regs
- 1)->a5
;
221 static inline void push_cache(unsigned long vaddr
)
225 #endif /* CONFIG_MMU */
228 * Atomically swap in the new signal mask, and wait for a signal.
231 sys_sigsuspend(int unused0
, int unused1
, old_sigset_t mask
)
234 siginitset(&blocked
, mask
);
235 return sigsuspend(&blocked
);
239 sys_sigaction(int sig
, const struct old_sigaction __user
*act
,
240 struct old_sigaction __user
*oact
)
242 struct k_sigaction new_ka
, old_ka
;
247 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
248 __get_user(new_ka
.sa
.sa_handler
, &act
->sa_handler
) ||
249 __get_user(new_ka
.sa
.sa_restorer
, &act
->sa_restorer
) ||
250 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
251 __get_user(mask
, &act
->sa_mask
))
253 siginitset(&new_ka
.sa
.sa_mask
, mask
);
256 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
259 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
260 __put_user(old_ka
.sa
.sa_handler
, &oact
->sa_handler
) ||
261 __put_user(old_ka
.sa
.sa_restorer
, &oact
->sa_restorer
) ||
262 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
263 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
271 sys_sigaltstack(const stack_t __user
*uss
, stack_t __user
*uoss
)
273 return do_sigaltstack(uss
, uoss
, rdusp());
278 * Do a signal return; undo the signal stack.
280 * Keep the return code on the stack quadword aligned!
281 * That makes the cache flush below easier.
286 char __user
*pretcode
;
289 struct sigcontext __user
*psc
;
291 unsigned long extramask
[_NSIG_WORDS
-1];
292 struct sigcontext sc
;
297 char __user
*pretcode
;
299 struct siginfo __user
*pinfo
;
306 #define FPCONTEXT_SIZE 216
307 #define uc_fpstate uc_filler[0]
308 #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
309 #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
313 static unsigned char fpu_version
; /* version number of fpu, set by setup_frame */
315 static inline int restore_fpu_state(struct sigcontext
*sc
)
320 /* restore registers */
321 memcpy(current
->thread
.fpcntl
, sc
->sc_fpcntl
, 12);
322 memcpy(current
->thread
.fp
, sc
->sc_fpregs
, 24);
326 if (CPU_IS_060
? sc
->sc_fpstate
[2] : sc
->sc_fpstate
[0]) {
327 /* Verify the frame format. */
328 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
) &&
329 (sc
->sc_fpstate
[0] != fpu_version
))
331 if (CPU_IS_020_OR_030
) {
332 if (m68k_fputype
& FPU_68881
&&
333 !(sc
->sc_fpstate
[1] == 0x18 || sc
->sc_fpstate
[1] == 0xb4))
335 if (m68k_fputype
& FPU_68882
&&
336 !(sc
->sc_fpstate
[1] == 0x38 || sc
->sc_fpstate
[1] == 0xd4))
338 } else if (CPU_IS_040
) {
339 if (!(sc
->sc_fpstate
[1] == 0x00 ||
340 sc
->sc_fpstate
[1] == 0x28 ||
341 sc
->sc_fpstate
[1] == 0x60))
343 } else if (CPU_IS_060
) {
344 if (!(sc
->sc_fpstate
[3] == 0x00 ||
345 sc
->sc_fpstate
[3] == 0x60 ||
346 sc
->sc_fpstate
[3] == 0xe0))
348 } else if (CPU_IS_COLDFIRE
) {
349 if (!(sc
->sc_fpstate
[0] == 0x00 ||
350 sc
->sc_fpstate
[0] == 0x05 ||
351 sc
->sc_fpstate
[0] == 0xe5))
356 if (CPU_IS_COLDFIRE
) {
357 __asm__
volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
358 "fmovel %1,%%fpcr\n\t"
359 "fmovel %2,%%fpsr\n\t"
362 : "m" (sc
->sc_fpregs
[0]),
363 "m" (sc
->sc_fpcntl
[0]),
364 "m" (sc
->sc_fpcntl
[1]),
365 "m" (sc
->sc_fpcntl
[2]));
367 __asm__
volatile (".chip 68k/68881\n\t"
368 "fmovemx %0,%%fp0-%%fp1\n\t"
369 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
372 : "m" (*sc
->sc_fpregs
),
373 "m" (*sc
->sc_fpcntl
));
377 if (CPU_IS_COLDFIRE
) {
378 __asm__
volatile ("frestore %0" : : "m" (*sc
->sc_fpstate
));
380 __asm__
volatile (".chip 68k/68881\n\t"
383 : : "m" (*sc
->sc_fpstate
));
391 static inline int rt_restore_fpu_state(struct ucontext __user
*uc
)
393 unsigned char fpstate
[FPCONTEXT_SIZE
];
394 int context_size
= CPU_IS_060
? 8 : (CPU_IS_COLDFIRE
? 12 : 0);
399 /* restore fpu control register */
400 if (__copy_from_user(current
->thread
.fpcntl
,
401 uc
->uc_mcontext
.fpregs
.f_fpcntl
, 12))
403 /* restore all other fpu register */
404 if (__copy_from_user(current
->thread
.fp
,
405 uc
->uc_mcontext
.fpregs
.f_fpregs
, 96))
410 if (__get_user(*(long *)fpstate
, (long __user
*)&uc
->uc_fpstate
))
412 if (CPU_IS_060
? fpstate
[2] : fpstate
[0]) {
413 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
))
414 context_size
= fpstate
[1];
415 /* Verify the frame format. */
416 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
) &&
417 (fpstate
[0] != fpu_version
))
419 if (CPU_IS_020_OR_030
) {
420 if (m68k_fputype
& FPU_68881
&&
421 !(context_size
== 0x18 || context_size
== 0xb4))
423 if (m68k_fputype
& FPU_68882
&&
424 !(context_size
== 0x38 || context_size
== 0xd4))
426 } else if (CPU_IS_040
) {
427 if (!(context_size
== 0x00 ||
428 context_size
== 0x28 ||
429 context_size
== 0x60))
431 } else if (CPU_IS_060
) {
432 if (!(fpstate
[3] == 0x00 ||
433 fpstate
[3] == 0x60 ||
436 } else if (CPU_IS_COLDFIRE
) {
437 if (!(fpstate
[3] == 0x00 ||
438 fpstate
[3] == 0x05 ||
443 if (__copy_from_user(&fpregs
, &uc
->uc_mcontext
.fpregs
,
447 if (CPU_IS_COLDFIRE
) {
448 __asm__
volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
449 "fmovel %1,%%fpcr\n\t"
450 "fmovel %2,%%fpsr\n\t"
453 : "m" (fpregs
.f_fpregs
[0]),
454 "m" (fpregs
.f_fpcntl
[0]),
455 "m" (fpregs
.f_fpcntl
[1]),
456 "m" (fpregs
.f_fpcntl
[2]));
458 __asm__
volatile (".chip 68k/68881\n\t"
459 "fmovemx %0,%%fp0-%%fp7\n\t"
460 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
463 : "m" (*fpregs
.f_fpregs
),
464 "m" (*fpregs
.f_fpcntl
));
468 __copy_from_user(fpstate
+ 4, (long __user
*)&uc
->uc_fpstate
+ 1,
472 if (CPU_IS_COLDFIRE
) {
473 __asm__
volatile ("frestore %0" : : "m" (*fpstate
));
475 __asm__
volatile (".chip 68k/68881\n\t"
487 * Set up a signal frame.
489 static inline void save_fpu_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
493 memcpy(sc
->sc_fpcntl
, current
->thread
.fpcntl
, 12);
494 memcpy(sc
->sc_fpregs
, current
->thread
.fp
, 24);
498 if (CPU_IS_COLDFIRE
) {
499 __asm__
volatile ("fsave %0"
500 : : "m" (*sc
->sc_fpstate
) : "memory");
502 __asm__
volatile (".chip 68k/68881\n\t"
505 : : "m" (*sc
->sc_fpstate
) : "memory");
508 if (CPU_IS_060
? sc
->sc_fpstate
[2] : sc
->sc_fpstate
[0]) {
509 fpu_version
= sc
->sc_fpstate
[0];
510 if (CPU_IS_020_OR_030
&&
511 regs
->vector
>= (VEC_FPBRUC
* 4) &&
512 regs
->vector
<= (VEC_FPNAN
* 4)) {
513 /* Clear pending exception in 68882 idle frame */
514 if (*(unsigned short *) sc
->sc_fpstate
== 0x1f38)
515 sc
->sc_fpstate
[0x38] |= 1 << 3;
518 if (CPU_IS_COLDFIRE
) {
519 __asm__
volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
520 "fmovel %%fpcr,%1\n\t"
521 "fmovel %%fpsr,%2\n\t"
523 : "=m" (sc
->sc_fpregs
[0]),
524 "=m" (sc
->sc_fpcntl
[0]),
525 "=m" (sc
->sc_fpcntl
[1]),
526 "=m" (sc
->sc_fpcntl
[2])
530 __asm__
volatile (".chip 68k/68881\n\t"
531 "fmovemx %%fp0-%%fp1,%0\n\t"
532 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
534 : "=m" (*sc
->sc_fpregs
),
535 "=m" (*sc
->sc_fpcntl
)
542 static inline int rt_save_fpu_state(struct ucontext __user
*uc
, struct pt_regs
*regs
)
544 unsigned char fpstate
[FPCONTEXT_SIZE
];
545 int context_size
= CPU_IS_060
? 8 : (CPU_IS_COLDFIRE
? 12 : 0);
549 /* save fpu control register */
550 err
|= copy_to_user(uc
->uc_mcontext
.fpregs
.f_fpcntl
,
551 current
->thread
.fpcntl
, 12);
552 /* save all other fpu register */
553 err
|= copy_to_user(uc
->uc_mcontext
.fpregs
.f_fpregs
,
554 current
->thread
.fp
, 96);
558 if (CPU_IS_COLDFIRE
) {
559 __asm__
volatile ("fsave %0" : : "m" (*fpstate
) : "memory");
561 __asm__
volatile (".chip 68k/68881\n\t"
564 : : "m" (*fpstate
) : "memory");
567 err
|= __put_user(*(long *)fpstate
, (long __user
*)&uc
->uc_fpstate
);
568 if (CPU_IS_060
? fpstate
[2] : fpstate
[0]) {
570 if (!(CPU_IS_060
|| CPU_IS_COLDFIRE
))
571 context_size
= fpstate
[1];
572 fpu_version
= fpstate
[0];
573 if (CPU_IS_020_OR_030
&&
574 regs
->vector
>= (VEC_FPBRUC
* 4) &&
575 regs
->vector
<= (VEC_FPNAN
* 4)) {
576 /* Clear pending exception in 68882 idle frame */
577 if (*(unsigned short *) fpstate
== 0x1f38)
578 fpstate
[0x38] |= 1 << 3;
580 if (CPU_IS_COLDFIRE
) {
581 __asm__
volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
582 "fmovel %%fpcr,%1\n\t"
583 "fmovel %%fpsr,%2\n\t"
585 : "=m" (fpregs
.f_fpregs
[0]),
586 "=m" (fpregs
.f_fpcntl
[0]),
587 "=m" (fpregs
.f_fpcntl
[1]),
588 "=m" (fpregs
.f_fpcntl
[2])
592 __asm__
volatile (".chip 68k/68881\n\t"
593 "fmovemx %%fp0-%%fp7,%0\n\t"
594 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
596 : "=m" (*fpregs
.f_fpregs
),
597 "=m" (*fpregs
.f_fpcntl
)
601 err
|= copy_to_user(&uc
->uc_mcontext
.fpregs
, &fpregs
,
605 err
|= copy_to_user((long __user
*)&uc
->uc_fpstate
+ 1, fpstate
+ 4,
610 #else /* CONFIG_FPU */
613 * For the case with no FPU configured these all do nothing.
615 static inline int restore_fpu_state(struct sigcontext
*sc
)
620 static inline int rt_restore_fpu_state(struct ucontext __user
*uc
)
625 static inline void save_fpu_state(struct sigcontext
*sc
, struct pt_regs
*regs
)
629 static inline int rt_save_fpu_state(struct ucontext __user
*uc
, struct pt_regs
*regs
)
634 #endif /* CONFIG_FPU */
636 static int mangle_kernel_stack(struct pt_regs
*regs
, int formatvec
,
639 int fsize
= frame_extra_sizes(formatvec
>> 12);
642 * user process trying to return with weird frame format
645 printk("user process returning with weird frame format\n");
650 regs
->format
= formatvec
>> 12;
651 regs
->vector
= formatvec
& 0xfff;
653 struct switch_stack
*sw
= (struct switch_stack
*)regs
- 1;
654 unsigned long buf
[fsize
/ 2]; /* yes, twice as much */
656 /* that'll make sure that expansion won't crap over data */
657 if (copy_from_user(buf
+ fsize
/ 4, fp
, fsize
))
660 /* point of no return */
661 regs
->format
= formatvec
>> 12;
662 regs
->vector
= formatvec
& 0xfff;
663 #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
664 __asm__
__volatile__ (
665 #ifdef CONFIG_COLDFIRE
667 " bra ret_from_signal\n"
670 " subl %1,%/a0\n\t" /* make room on stack */
671 " movel %/a0,%/sp\n\t" /* set stack pointer */
672 /* move switch_stack and pt_regs */
673 "1: movel %0@+,%/a0@+\n\t"
675 " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
678 /* copy to the gap we'd made */
679 "2: movel %4@+,%/a0@+\n\t"
681 " bral ret_from_signal\n"
683 : /* no outputs, it doesn't ever return */
684 : "a" (sw
), "d" (fsize
), "d" (frame_offset
/4-1),
685 "n" (frame_offset
), "a" (buf
+ fsize
/4)
693 restore_sigcontext(struct pt_regs
*regs
, struct sigcontext __user
*usc
, void __user
*fp
)
696 struct sigcontext context
;
699 /* Always make any pending restarted system calls return -EINTR */
700 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
702 /* get previous context */
703 if (copy_from_user(&context
, usc
, sizeof(context
)))
706 /* restore passed registers */
707 regs
->d0
= context
.sc_d0
;
708 regs
->d1
= context
.sc_d1
;
709 regs
->a0
= context
.sc_a0
;
710 regs
->a1
= context
.sc_a1
;
711 regs
->sr
= (regs
->sr
& 0xff00) | (context
.sc_sr
& 0xff);
712 regs
->pc
= context
.sc_pc
;
713 regs
->orig_d0
= -1; /* disable syscall checks */
714 wrusp(context
.sc_usp
);
715 formatvec
= context
.sc_formatvec
;
717 err
= restore_fpu_state(&context
);
719 if (err
|| mangle_kernel_stack(regs
, formatvec
, fp
))
729 rt_restore_ucontext(struct pt_regs
*regs
, struct switch_stack
*sw
,
730 struct ucontext __user
*uc
)
733 greg_t __user
*gregs
= uc
->uc_mcontext
.gregs
;
737 /* Always make any pending restarted system calls return -EINTR */
738 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
740 err
= __get_user(temp
, &uc
->uc_mcontext
.version
);
741 if (temp
!= MCONTEXT_VERSION
)
743 /* restore passed registers */
744 err
|= __get_user(regs
->d0
, &gregs
[0]);
745 err
|= __get_user(regs
->d1
, &gregs
[1]);
746 err
|= __get_user(regs
->d2
, &gregs
[2]);
747 err
|= __get_user(regs
->d3
, &gregs
[3]);
748 err
|= __get_user(regs
->d4
, &gregs
[4]);
749 err
|= __get_user(regs
->d5
, &gregs
[5]);
750 err
|= __get_user(sw
->d6
, &gregs
[6]);
751 err
|= __get_user(sw
->d7
, &gregs
[7]);
752 err
|= __get_user(regs
->a0
, &gregs
[8]);
753 err
|= __get_user(regs
->a1
, &gregs
[9]);
754 err
|= __get_user(regs
->a2
, &gregs
[10]);
755 err
|= __get_user(sw
->a3
, &gregs
[11]);
756 err
|= __get_user(sw
->a4
, &gregs
[12]);
757 err
|= __get_user(sw
->a5
, &gregs
[13]);
758 err
|= __get_user(sw
->a6
, &gregs
[14]);
759 err
|= __get_user(usp
, &gregs
[15]);
761 err
|= __get_user(regs
->pc
, &gregs
[16]);
762 err
|= __get_user(temp
, &gregs
[17]);
763 regs
->sr
= (regs
->sr
& 0xff00) | (temp
& 0xff);
764 regs
->orig_d0
= -1; /* disable syscall checks */
765 err
|= __get_user(temp
, &uc
->uc_formatvec
);
767 err
|= rt_restore_fpu_state(uc
);
769 if (err
|| do_sigaltstack(&uc
->uc_stack
, NULL
, usp
) == -EFAULT
)
772 if (mangle_kernel_stack(regs
, temp
, &uc
->uc_extra
))
781 asmlinkage
int do_sigreturn(unsigned long __unused
)
783 struct switch_stack
*sw
= (struct switch_stack
*) &__unused
;
784 struct pt_regs
*regs
= (struct pt_regs
*) (sw
+ 1);
785 unsigned long usp
= rdusp();
786 struct sigframe __user
*frame
= (struct sigframe __user
*)(usp
- 4);
789 if (!access_ok(VERIFY_READ
, frame
, sizeof(*frame
)))
791 if (__get_user(set
.sig
[0], &frame
->sc
.sc_mask
) ||
793 __copy_from_user(&set
.sig
[1], &frame
->extramask
,
794 sizeof(frame
->extramask
))))
797 set_current_blocked(&set
);
799 if (restore_sigcontext(regs
, &frame
->sc
, frame
+ 1))
804 force_sig(SIGSEGV
, current
);
808 asmlinkage
int do_rt_sigreturn(unsigned long __unused
)
810 struct switch_stack
*sw
= (struct switch_stack
*) &__unused
;
811 struct pt_regs
*regs
= (struct pt_regs
*) (sw
+ 1);
812 unsigned long usp
= rdusp();
813 struct rt_sigframe __user
*frame
= (struct rt_sigframe __user
*)(usp
- 4);
816 if (!access_ok(VERIFY_READ
, frame
, sizeof(*frame
)))
818 if (__copy_from_user(&set
, &frame
->uc
.uc_sigmask
, sizeof(set
)))
821 set_current_blocked(&set
);
823 if (rt_restore_ucontext(regs
, sw
, &frame
->uc
))
828 force_sig(SIGSEGV
, current
);
832 static void setup_sigcontext(struct sigcontext
*sc
, struct pt_regs
*regs
,
836 sc
->sc_usp
= rdusp();
837 sc
->sc_d0
= regs
->d0
;
838 sc
->sc_d1
= regs
->d1
;
839 sc
->sc_a0
= regs
->a0
;
840 sc
->sc_a1
= regs
->a1
;
841 sc
->sc_sr
= regs
->sr
;
842 sc
->sc_pc
= regs
->pc
;
843 sc
->sc_formatvec
= regs
->format
<< 12 | regs
->vector
;
844 save_a5_state(sc
, regs
);
845 save_fpu_state(sc
, regs
);
848 static inline int rt_setup_ucontext(struct ucontext __user
*uc
, struct pt_regs
*regs
)
850 struct switch_stack
*sw
= (struct switch_stack
*)regs
- 1;
851 greg_t __user
*gregs
= uc
->uc_mcontext
.gregs
;
854 err
|= __put_user(MCONTEXT_VERSION
, &uc
->uc_mcontext
.version
);
855 err
|= __put_user(regs
->d0
, &gregs
[0]);
856 err
|= __put_user(regs
->d1
, &gregs
[1]);
857 err
|= __put_user(regs
->d2
, &gregs
[2]);
858 err
|= __put_user(regs
->d3
, &gregs
[3]);
859 err
|= __put_user(regs
->d4
, &gregs
[4]);
860 err
|= __put_user(regs
->d5
, &gregs
[5]);
861 err
|= __put_user(sw
->d6
, &gregs
[6]);
862 err
|= __put_user(sw
->d7
, &gregs
[7]);
863 err
|= __put_user(regs
->a0
, &gregs
[8]);
864 err
|= __put_user(regs
->a1
, &gregs
[9]);
865 err
|= __put_user(regs
->a2
, &gregs
[10]);
866 err
|= __put_user(sw
->a3
, &gregs
[11]);
867 err
|= __put_user(sw
->a4
, &gregs
[12]);
868 err
|= __put_user(sw
->a5
, &gregs
[13]);
869 err
|= __put_user(sw
->a6
, &gregs
[14]);
870 err
|= __put_user(rdusp(), &gregs
[15]);
871 err
|= __put_user(regs
->pc
, &gregs
[16]);
872 err
|= __put_user(regs
->sr
, &gregs
[17]);
873 err
|= __put_user((regs
->format
<< 12) | regs
->vector
, &uc
->uc_formatvec
);
874 err
|= rt_save_fpu_state(uc
, regs
);
878 static inline void __user
*
879 get_sigframe(struct k_sigaction
*ka
, struct pt_regs
*regs
, size_t frame_size
)
883 /* Default to using normal stack. */
886 /* This is the X/Open sanctioned signal stack switching. */
887 if (ka
->sa
.sa_flags
& SA_ONSTACK
) {
888 if (!sas_ss_flags(usp
))
889 usp
= current
->sas_ss_sp
+ current
->sas_ss_size
;
891 return (void __user
*)((usp
- frame_size
) & -8UL);
894 static int setup_frame (int sig
, struct k_sigaction
*ka
,
895 sigset_t
*set
, struct pt_regs
*regs
)
897 struct sigframe __user
*frame
;
898 int fsize
= frame_extra_sizes(regs
->format
);
899 struct sigcontext context
;
904 printk ("setup_frame: Unknown frame format %#x\n",
910 frame
= get_sigframe(ka
, regs
, sizeof(*frame
) + fsize
);
913 err
|= copy_to_user (frame
+ 1, regs
+ 1, fsize
);
915 err
|= __put_user((current_thread_info()->exec_domain
916 && current_thread_info()->exec_domain
->signal_invmap
918 ? current_thread_info()->exec_domain
->signal_invmap
[sig
]
922 err
|= __put_user(regs
->vector
, &frame
->code
);
923 err
|= __put_user(&frame
->sc
, &frame
->psc
);
926 err
|= copy_to_user(frame
->extramask
, &set
->sig
[1],
927 sizeof(frame
->extramask
));
929 setup_sigcontext(&context
, regs
, set
->sig
[0]);
930 err
|= copy_to_user (&frame
->sc
, &context
, sizeof(context
));
932 /* Set up to return from userspace. */
934 err
|= __put_user(frame
->retcode
, &frame
->pretcode
);
935 /* moveq #,d0; trap #0 */
936 err
|= __put_user(0x70004e40 + (__NR_sigreturn
<< 16),
937 (long __user
*)(frame
->retcode
));
939 err
|= __put_user((void *) ret_from_user_signal
, &frame
->pretcode
);
945 push_cache ((unsigned long) &frame
->retcode
);
948 * Set up registers for signal handler. All the state we are about
949 * to destroy is successfully copied to sigframe.
951 wrusp ((unsigned long) frame
);
952 regs
->pc
= (unsigned long) ka
->sa
.sa_handler
;
956 * This is subtle; if we build more than one sigframe, all but the
957 * first one will see frame format 0 and have fsize == 0, so we won't
961 regs
->stkadj
= fsize
;
963 /* Prepare to skip over the extra stuff in the exception frame. */
965 struct pt_regs
*tregs
=
966 (struct pt_regs
*)((ulong
)regs
+ regs
->stkadj
);
968 printk("Performing stackadjust=%04x\n", regs
->stkadj
);
970 /* This must be copied with decreasing addresses to
974 tregs
->pc
= regs
->pc
;
975 tregs
->sr
= regs
->sr
;
980 force_sigsegv(sig
, current
);
984 static int setup_rt_frame (int sig
, struct k_sigaction
*ka
, siginfo_t
*info
,
985 sigset_t
*set
, struct pt_regs
*regs
)
987 struct rt_sigframe __user
*frame
;
988 int fsize
= frame_extra_sizes(regs
->format
);
993 printk ("setup_frame: Unknown frame format %#x\n",
999 frame
= get_sigframe(ka
, regs
, sizeof(*frame
));
1002 err
|= copy_to_user (&frame
->uc
.uc_extra
, regs
+ 1, fsize
);
1004 err
|= __put_user((current_thread_info()->exec_domain
1005 && current_thread_info()->exec_domain
->signal_invmap
1007 ? current_thread_info()->exec_domain
->signal_invmap
[sig
]
1010 err
|= __put_user(&frame
->info
, &frame
->pinfo
);
1011 err
|= __put_user(&frame
->uc
, &frame
->puc
);
1012 err
|= copy_siginfo_to_user(&frame
->info
, info
);
1014 /* Create the ucontext. */
1015 err
|= __put_user(0, &frame
->uc
.uc_flags
);
1016 err
|= __put_user(NULL
, &frame
->uc
.uc_link
);
1017 err
|= __put_user((void __user
*)current
->sas_ss_sp
,
1018 &frame
->uc
.uc_stack
.ss_sp
);
1019 err
|= __put_user(sas_ss_flags(rdusp()),
1020 &frame
->uc
.uc_stack
.ss_flags
);
1021 err
|= __put_user(current
->sas_ss_size
, &frame
->uc
.uc_stack
.ss_size
);
1022 err
|= rt_setup_ucontext(&frame
->uc
, regs
);
1023 err
|= copy_to_user (&frame
->uc
.uc_sigmask
, set
, sizeof(*set
));
1025 /* Set up to return from userspace. */
1027 err
|= __put_user(frame
->retcode
, &frame
->pretcode
);
1028 #ifdef __mcoldfire__
1029 /* movel #__NR_rt_sigreturn,d0; trap #0 */
1030 err
|= __put_user(0x203c0000, (long __user
*)(frame
->retcode
+ 0));
1031 err
|= __put_user(0x00004e40 + (__NR_rt_sigreturn
<< 16),
1032 (long __user
*)(frame
->retcode
+ 4));
1034 /* moveq #,d0; notb d0; trap #0 */
1035 err
|= __put_user(0x70004600 + ((__NR_rt_sigreturn
^ 0xff) << 16),
1036 (long __user
*)(frame
->retcode
+ 0));
1037 err
|= __put_user(0x4e40, (short __user
*)(frame
->retcode
+ 4));
1040 err
|= __put_user((void *) ret_from_user_rt_signal
, &frame
->pretcode
);
1041 #endif /* CONFIG_MMU */
1046 push_cache ((unsigned long) &frame
->retcode
);
1049 * Set up registers for signal handler. All the state we are about
1050 * to destroy is successfully copied to sigframe.
1052 wrusp ((unsigned long) frame
);
1053 regs
->pc
= (unsigned long) ka
->sa
.sa_handler
;
1057 * This is subtle; if we build more than one sigframe, all but the
1058 * first one will see frame format 0 and have fsize == 0, so we won't
1062 regs
->stkadj
= fsize
;
1064 /* Prepare to skip over the extra stuff in the exception frame. */
1066 struct pt_regs
*tregs
=
1067 (struct pt_regs
*)((ulong
)regs
+ regs
->stkadj
);
1069 printk("Performing stackadjust=%04x\n", regs
->stkadj
);
1071 /* This must be copied with decreasing addresses to
1075 tregs
->pc
= regs
->pc
;
1076 tregs
->sr
= regs
->sr
;
1081 force_sigsegv(sig
, current
);
1086 handle_restart(struct pt_regs
*regs
, struct k_sigaction
*ka
, int has_handler
)
1089 case -ERESTARTNOHAND
:
1095 case -ERESTART_RESTARTBLOCK
:
1097 regs
->d0
= __NR_restart_syscall
;
1105 if (has_handler
&& !(ka
->sa
.sa_flags
& SA_RESTART
)) {
1110 case -ERESTARTNOINTR
:
1112 regs
->d0
= regs
->orig_d0
;
1119 * OK, we're invoking a handler
1122 handle_signal(int sig
, struct k_sigaction
*ka
, siginfo_t
*info
,
1123 struct pt_regs
*regs
)
1125 sigset_t
*oldset
= sigmask_to_save();
1127 /* are we from a system call? */
1128 if (regs
->orig_d0
>= 0)
1129 /* If so, check system call restarting.. */
1130 handle_restart(regs
, ka
, 1);
1132 /* set up the stack frame */
1133 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
1134 err
= setup_rt_frame(sig
, ka
, info
, oldset
, regs
);
1136 err
= setup_frame(sig
, ka
, oldset
, regs
);
1141 signal_delivered(sig
, info
, ka
, regs
, 0);
1143 if (test_thread_flag(TIF_DELAYED_TRACE
)) {
1144 regs
->sr
&= ~0x8000;
1145 send_sig(SIGTRAP
, current
, 1);
1150 * Note that 'init' is a special process: it doesn't get signals it doesn't
1151 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1154 static void do_signal(struct pt_regs
*regs
)
1157 struct k_sigaction ka
;
1160 current
->thread
.esp0
= (unsigned long) regs
;
1162 signr
= get_signal_to_deliver(&info
, &ka
, regs
, NULL
);
1164 /* Whee! Actually deliver the signal. */
1165 handle_signal(signr
, &ka
, &info
, regs
);
1169 /* Did we come from a system call? */
1170 if (regs
->orig_d0
>= 0)
1171 /* Restart the system call - no handlers present */
1172 handle_restart(regs
, NULL
, 0);
1174 /* If there's no signal to deliver, we just restore the saved mask. */
1175 restore_saved_sigmask();
1178 void do_notify_resume(struct pt_regs
*regs
)
1180 if (test_thread_flag(TIF_SIGPENDING
))
1183 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME
))
1184 tracehook_notify_resume(regs
);