spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / arch / m68k / kernel / signal_mm.c
blobcb856f9da655b5aa4141841db122a020876a21cd
1 /*
2 * linux/arch/m68k/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
12 * Linux/m68k support by Hamish Macdonald
14 * 68060 fixes by Jesper Skov
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28 * signal handlers!
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/kernel.h>
34 #include <linux/signal.h>
35 #include <linux/syscalls.h>
36 #include <linux/errno.h>
37 #include <linux/wait.h>
38 #include <linux/ptrace.h>
39 #include <linux/unistd.h>
40 #include <linux/stddef.h>
41 #include <linux/highuid.h>
42 #include <linux/personality.h>
43 #include <linux/tty.h>
44 #include <linux/binfmts.h>
45 #include <linux/module.h>
47 #include <asm/setup.h>
48 #include <asm/uaccess.h>
49 #include <asm/pgtable.h>
50 #include <asm/traps.h>
51 #include <asm/ucontext.h>
53 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
55 static const int frame_extra_sizes[16] = {
56 [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
57 [2] = sizeof(((struct frame *)0)->un.fmt2),
58 [3] = sizeof(((struct frame *)0)->un.fmt3),
59 #ifdef CONFIG_COLDFIRE
60 [4] = 0,
61 #else
62 [4] = sizeof(((struct frame *)0)->un.fmt4),
63 #endif
64 [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
65 [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
66 [7] = sizeof(((struct frame *)0)->un.fmt7),
67 [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
68 [9] = sizeof(((struct frame *)0)->un.fmt9),
69 [10] = sizeof(((struct frame *)0)->un.fmta),
70 [11] = sizeof(((struct frame *)0)->un.fmtb),
71 [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
72 [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
73 [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
74 [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
77 int handle_kernel_fault(struct pt_regs *regs)
79 const struct exception_table_entry *fixup;
80 struct pt_regs *tregs;
82 /* Are we prepared to handle this kernel fault? */
83 fixup = search_exception_tables(regs->pc);
84 if (!fixup)
85 return 0;
87 /* Create a new four word stack frame, discarding the old one. */
88 regs->stkadj = frame_extra_sizes[regs->format];
89 tregs = (struct pt_regs *)((long)regs + regs->stkadj);
90 tregs->vector = regs->vector;
91 #ifdef CONFIG_COLDFIRE
92 tregs->format = 4;
93 #else
94 tregs->format = 0;
95 #endif
96 tregs->pc = fixup->fixup;
97 tregs->sr = regs->sr;
99 return 1;
103 * Atomically swap in the new signal mask, and wait for a signal.
105 asmlinkage int
106 sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
108 mask &= _BLOCKABLE;
109 spin_lock_irq(&current->sighand->siglock);
110 current->saved_sigmask = current->blocked;
111 siginitset(&current->blocked, mask);
112 recalc_sigpending();
113 spin_unlock_irq(&current->sighand->siglock);
115 current->state = TASK_INTERRUPTIBLE;
116 schedule();
117 set_restore_sigmask();
119 return -ERESTARTNOHAND;
122 asmlinkage int
123 sys_sigaction(int sig, const struct old_sigaction __user *act,
124 struct old_sigaction __user *oact)
126 struct k_sigaction new_ka, old_ka;
127 int ret;
129 if (act) {
130 old_sigset_t mask;
131 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
132 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
133 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
134 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
135 __get_user(mask, &act->sa_mask))
136 return -EFAULT;
137 siginitset(&new_ka.sa.sa_mask, mask);
140 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
142 if (!ret && oact) {
143 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
144 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
145 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
146 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
147 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
148 return -EFAULT;
151 return ret;
154 asmlinkage int
155 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
157 return do_sigaltstack(uss, uoss, rdusp());
162 * Do a signal return; undo the signal stack.
164 * Keep the return code on the stack quadword aligned!
165 * That makes the cache flush below easier.
168 struct sigframe
170 char __user *pretcode;
171 int sig;
172 int code;
173 struct sigcontext __user *psc;
174 char retcode[8];
175 unsigned long extramask[_NSIG_WORDS-1];
176 struct sigcontext sc;
179 struct rt_sigframe
181 char __user *pretcode;
182 int sig;
183 struct siginfo __user *pinfo;
184 void __user *puc;
185 char retcode[8];
186 struct siginfo info;
187 struct ucontext uc;
191 static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
193 static inline int restore_fpu_state(struct sigcontext *sc)
195 int err = 1;
197 if (FPU_IS_EMU) {
198 /* restore registers */
199 memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
200 memcpy(current->thread.fp, sc->sc_fpregs, 24);
201 return 0;
204 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
205 /* Verify the frame format. */
206 if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
207 (sc->sc_fpstate[0] != fpu_version))
208 goto out;
209 if (CPU_IS_020_OR_030) {
210 if (m68k_fputype & FPU_68881 &&
211 !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
212 goto out;
213 if (m68k_fputype & FPU_68882 &&
214 !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
215 goto out;
216 } else if (CPU_IS_040) {
217 if (!(sc->sc_fpstate[1] == 0x00 ||
218 sc->sc_fpstate[1] == 0x28 ||
219 sc->sc_fpstate[1] == 0x60))
220 goto out;
221 } else if (CPU_IS_060) {
222 if (!(sc->sc_fpstate[3] == 0x00 ||
223 sc->sc_fpstate[3] == 0x60 ||
224 sc->sc_fpstate[3] == 0xe0))
225 goto out;
226 } else if (CPU_IS_COLDFIRE) {
227 if (!(sc->sc_fpstate[0] == 0x00 ||
228 sc->sc_fpstate[0] == 0x05 ||
229 sc->sc_fpstate[0] == 0xe5))
230 goto out;
231 } else
232 goto out;
234 if (CPU_IS_COLDFIRE) {
235 __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
236 "fmovel %1,%%fpcr\n\t"
237 "fmovel %2,%%fpsr\n\t"
238 "fmovel %3,%%fpiar"
239 : /* no outputs */
240 : "m" (sc->sc_fpregs[0]),
241 "m" (sc->sc_fpcntl[0]),
242 "m" (sc->sc_fpcntl[1]),
243 "m" (sc->sc_fpcntl[2]));
244 } else {
245 __asm__ volatile (".chip 68k/68881\n\t"
246 "fmovemx %0,%%fp0-%%fp1\n\t"
247 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
248 ".chip 68k"
249 : /* no outputs */
250 : "m" (*sc->sc_fpregs),
251 "m" (*sc->sc_fpcntl));
255 if (CPU_IS_COLDFIRE) {
256 __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
257 } else {
258 __asm__ volatile (".chip 68k/68881\n\t"
259 "frestore %0\n\t"
260 ".chip 68k"
261 : : "m" (*sc->sc_fpstate));
263 err = 0;
265 out:
266 return err;
269 #define FPCONTEXT_SIZE 216
270 #define uc_fpstate uc_filler[0]
271 #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
272 #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
274 static inline int rt_restore_fpu_state(struct ucontext __user *uc)
276 unsigned char fpstate[FPCONTEXT_SIZE];
277 int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
278 fpregset_t fpregs;
279 int err = 1;
281 if (FPU_IS_EMU) {
282 /* restore fpu control register */
283 if (__copy_from_user(current->thread.fpcntl,
284 uc->uc_mcontext.fpregs.f_fpcntl, 12))
285 goto out;
286 /* restore all other fpu register */
287 if (__copy_from_user(current->thread.fp,
288 uc->uc_mcontext.fpregs.f_fpregs, 96))
289 goto out;
290 return 0;
293 if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
294 goto out;
295 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
296 if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
297 context_size = fpstate[1];
298 /* Verify the frame format. */
299 if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
300 (fpstate[0] != fpu_version))
301 goto out;
302 if (CPU_IS_020_OR_030) {
303 if (m68k_fputype & FPU_68881 &&
304 !(context_size == 0x18 || context_size == 0xb4))
305 goto out;
306 if (m68k_fputype & FPU_68882 &&
307 !(context_size == 0x38 || context_size == 0xd4))
308 goto out;
309 } else if (CPU_IS_040) {
310 if (!(context_size == 0x00 ||
311 context_size == 0x28 ||
312 context_size == 0x60))
313 goto out;
314 } else if (CPU_IS_060) {
315 if (!(fpstate[3] == 0x00 ||
316 fpstate[3] == 0x60 ||
317 fpstate[3] == 0xe0))
318 goto out;
319 } else if (CPU_IS_COLDFIRE) {
320 if (!(fpstate[3] == 0x00 ||
321 fpstate[3] == 0x05 ||
322 fpstate[3] == 0xe5))
323 goto out;
324 } else
325 goto out;
326 if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
327 sizeof(fpregs)))
328 goto out;
330 if (CPU_IS_COLDFIRE) {
331 __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
332 "fmovel %1,%%fpcr\n\t"
333 "fmovel %2,%%fpsr\n\t"
334 "fmovel %3,%%fpiar"
335 : /* no outputs */
336 : "m" (fpregs.f_fpregs[0]),
337 "m" (fpregs.f_fpcntl[0]),
338 "m" (fpregs.f_fpcntl[1]),
339 "m" (fpregs.f_fpcntl[2]));
340 } else {
341 __asm__ volatile (".chip 68k/68881\n\t"
342 "fmovemx %0,%%fp0-%%fp7\n\t"
343 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
344 ".chip 68k"
345 : /* no outputs */
346 : "m" (*fpregs.f_fpregs),
347 "m" (*fpregs.f_fpcntl));
350 if (context_size &&
351 __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
352 context_size))
353 goto out;
355 if (CPU_IS_COLDFIRE) {
356 __asm__ volatile ("frestore %0" : : "m" (*fpstate));
357 } else {
358 __asm__ volatile (".chip 68k/68881\n\t"
359 "frestore %0\n\t"
360 ".chip 68k"
361 : : "m" (*fpstate));
363 err = 0;
365 out:
366 return err;
369 static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
370 void __user *fp)
372 int fsize = frame_extra_sizes[formatvec >> 12];
373 if (fsize < 0) {
375 * user process trying to return with weird frame format
377 #ifdef DEBUG
378 printk("user process returning with weird frame format\n");
379 #endif
380 return 1;
382 if (!fsize) {
383 regs->format = formatvec >> 12;
384 regs->vector = formatvec & 0xfff;
385 } else {
386 struct switch_stack *sw = (struct switch_stack *)regs - 1;
387 unsigned long buf[fsize / 2]; /* yes, twice as much */
389 /* that'll make sure that expansion won't crap over data */
390 if (copy_from_user(buf + fsize / 4, fp, fsize))
391 return 1;
393 /* point of no return */
394 regs->format = formatvec >> 12;
395 regs->vector = formatvec & 0xfff;
396 #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
397 __asm__ __volatile__ (
398 #ifdef CONFIG_COLDFIRE
399 " movel %0,%/sp\n\t"
400 " bra ret_from_signal\n"
401 #else
402 " movel %0,%/a0\n\t"
403 " subl %1,%/a0\n\t" /* make room on stack */
404 " movel %/a0,%/sp\n\t" /* set stack pointer */
405 /* move switch_stack and pt_regs */
406 "1: movel %0@+,%/a0@+\n\t"
407 " dbra %2,1b\n\t"
408 " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
409 " lsrl #2,%1\n\t"
410 " subql #1,%1\n\t"
411 /* copy to the gap we'd made */
412 "2: movel %4@+,%/a0@+\n\t"
413 " dbra %1,2b\n\t"
414 " bral ret_from_signal\n"
415 #endif
416 : /* no outputs, it doesn't ever return */
417 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
418 "n" (frame_offset), "a" (buf + fsize/4)
419 : "a0");
420 #undef frame_offset
422 return 0;
425 static inline int
426 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
428 int formatvec;
429 struct sigcontext context;
430 int err;
432 /* Always make any pending restarted system calls return -EINTR */
433 current_thread_info()->restart_block.fn = do_no_restart_syscall;
435 /* get previous context */
436 if (copy_from_user(&context, usc, sizeof(context)))
437 goto badframe;
439 /* restore passed registers */
440 regs->d0 = context.sc_d0;
441 regs->d1 = context.sc_d1;
442 regs->a0 = context.sc_a0;
443 regs->a1 = context.sc_a1;
444 regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
445 regs->pc = context.sc_pc;
446 regs->orig_d0 = -1; /* disable syscall checks */
447 wrusp(context.sc_usp);
448 formatvec = context.sc_formatvec;
450 err = restore_fpu_state(&context);
452 if (err || mangle_kernel_stack(regs, formatvec, fp))
453 goto badframe;
455 return 0;
457 badframe:
458 return 1;
461 static inline int
462 rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
463 struct ucontext __user *uc)
465 int temp;
466 greg_t __user *gregs = uc->uc_mcontext.gregs;
467 unsigned long usp;
468 int err;
470 /* Always make any pending restarted system calls return -EINTR */
471 current_thread_info()->restart_block.fn = do_no_restart_syscall;
473 err = __get_user(temp, &uc->uc_mcontext.version);
474 if (temp != MCONTEXT_VERSION)
475 goto badframe;
476 /* restore passed registers */
477 err |= __get_user(regs->d0, &gregs[0]);
478 err |= __get_user(regs->d1, &gregs[1]);
479 err |= __get_user(regs->d2, &gregs[2]);
480 err |= __get_user(regs->d3, &gregs[3]);
481 err |= __get_user(regs->d4, &gregs[4]);
482 err |= __get_user(regs->d5, &gregs[5]);
483 err |= __get_user(sw->d6, &gregs[6]);
484 err |= __get_user(sw->d7, &gregs[7]);
485 err |= __get_user(regs->a0, &gregs[8]);
486 err |= __get_user(regs->a1, &gregs[9]);
487 err |= __get_user(regs->a2, &gregs[10]);
488 err |= __get_user(sw->a3, &gregs[11]);
489 err |= __get_user(sw->a4, &gregs[12]);
490 err |= __get_user(sw->a5, &gregs[13]);
491 err |= __get_user(sw->a6, &gregs[14]);
492 err |= __get_user(usp, &gregs[15]);
493 wrusp(usp);
494 err |= __get_user(regs->pc, &gregs[16]);
495 err |= __get_user(temp, &gregs[17]);
496 regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
497 regs->orig_d0 = -1; /* disable syscall checks */
498 err |= __get_user(temp, &uc->uc_formatvec);
500 err |= rt_restore_fpu_state(uc);
502 if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
503 goto badframe;
505 if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
506 goto badframe;
508 return 0;
510 badframe:
511 return 1;
514 asmlinkage int do_sigreturn(unsigned long __unused)
516 struct switch_stack *sw = (struct switch_stack *) &__unused;
517 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
518 unsigned long usp = rdusp();
519 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
520 sigset_t set;
522 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
523 goto badframe;
524 if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
525 (_NSIG_WORDS > 1 &&
526 __copy_from_user(&set.sig[1], &frame->extramask,
527 sizeof(frame->extramask))))
528 goto badframe;
530 sigdelsetmask(&set, ~_BLOCKABLE);
531 current->blocked = set;
532 recalc_sigpending();
534 if (restore_sigcontext(regs, &frame->sc, frame + 1))
535 goto badframe;
536 return regs->d0;
538 badframe:
539 force_sig(SIGSEGV, current);
540 return 0;
543 asmlinkage int do_rt_sigreturn(unsigned long __unused)
545 struct switch_stack *sw = (struct switch_stack *) &__unused;
546 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
547 unsigned long usp = rdusp();
548 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
549 sigset_t set;
551 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
552 goto badframe;
553 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
554 goto badframe;
556 sigdelsetmask(&set, ~_BLOCKABLE);
557 current->blocked = set;
558 recalc_sigpending();
560 if (rt_restore_ucontext(regs, sw, &frame->uc))
561 goto badframe;
562 return regs->d0;
564 badframe:
565 force_sig(SIGSEGV, current);
566 return 0;
570 * Set up a signal frame.
573 static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
575 if (FPU_IS_EMU) {
576 /* save registers */
577 memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
578 memcpy(sc->sc_fpregs, current->thread.fp, 24);
579 return;
582 if (CPU_IS_COLDFIRE) {
583 __asm__ volatile ("fsave %0"
584 : : "m" (*sc->sc_fpstate) : "memory");
585 } else {
586 __asm__ volatile (".chip 68k/68881\n\t"
587 "fsave %0\n\t"
588 ".chip 68k"
589 : : "m" (*sc->sc_fpstate) : "memory");
592 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
593 fpu_version = sc->sc_fpstate[0];
594 if (CPU_IS_020_OR_030 &&
595 regs->vector >= (VEC_FPBRUC * 4) &&
596 regs->vector <= (VEC_FPNAN * 4)) {
597 /* Clear pending exception in 68882 idle frame */
598 if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
599 sc->sc_fpstate[0x38] |= 1 << 3;
602 if (CPU_IS_COLDFIRE) {
603 __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
604 "fmovel %%fpcr,%1\n\t"
605 "fmovel %%fpsr,%2\n\t"
606 "fmovel %%fpiar,%3"
607 : "=m" (sc->sc_fpregs[0]),
608 "=m" (sc->sc_fpcntl[0]),
609 "=m" (sc->sc_fpcntl[1]),
610 "=m" (sc->sc_fpcntl[2])
611 : /* no inputs */
612 : "memory");
613 } else {
614 __asm__ volatile (".chip 68k/68881\n\t"
615 "fmovemx %%fp0-%%fp1,%0\n\t"
616 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
617 ".chip 68k"
618 : "=m" (*sc->sc_fpregs),
619 "=m" (*sc->sc_fpcntl)
620 : /* no inputs */
621 : "memory");
626 static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
628 unsigned char fpstate[FPCONTEXT_SIZE];
629 int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
630 int err = 0;
632 if (FPU_IS_EMU) {
633 /* save fpu control register */
634 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
635 current->thread.fpcntl, 12);
636 /* save all other fpu register */
637 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
638 current->thread.fp, 96);
639 return err;
642 if (CPU_IS_COLDFIRE) {
643 __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
644 } else {
645 __asm__ volatile (".chip 68k/68881\n\t"
646 "fsave %0\n\t"
647 ".chip 68k"
648 : : "m" (*fpstate) : "memory");
651 err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
652 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
653 fpregset_t fpregs;
654 if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
655 context_size = fpstate[1];
656 fpu_version = fpstate[0];
657 if (CPU_IS_020_OR_030 &&
658 regs->vector >= (VEC_FPBRUC * 4) &&
659 regs->vector <= (VEC_FPNAN * 4)) {
660 /* Clear pending exception in 68882 idle frame */
661 if (*(unsigned short *) fpstate == 0x1f38)
662 fpstate[0x38] |= 1 << 3;
664 if (CPU_IS_COLDFIRE) {
665 __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
666 "fmovel %%fpcr,%1\n\t"
667 "fmovel %%fpsr,%2\n\t"
668 "fmovel %%fpiar,%3"
669 : "=m" (fpregs.f_fpregs[0]),
670 "=m" (fpregs.f_fpcntl[0]),
671 "=m" (fpregs.f_fpcntl[1]),
672 "=m" (fpregs.f_fpcntl[2])
673 : /* no inputs */
674 : "memory");
675 } else {
676 __asm__ volatile (".chip 68k/68881\n\t"
677 "fmovemx %%fp0-%%fp7,%0\n\t"
678 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
679 ".chip 68k"
680 : "=m" (*fpregs.f_fpregs),
681 "=m" (*fpregs.f_fpcntl)
682 : /* no inputs */
683 : "memory");
685 err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
686 sizeof(fpregs));
688 if (context_size)
689 err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
690 context_size);
691 return err;
694 static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
695 unsigned long mask)
697 sc->sc_mask = mask;
698 sc->sc_usp = rdusp();
699 sc->sc_d0 = regs->d0;
700 sc->sc_d1 = regs->d1;
701 sc->sc_a0 = regs->a0;
702 sc->sc_a1 = regs->a1;
703 sc->sc_sr = regs->sr;
704 sc->sc_pc = regs->pc;
705 sc->sc_formatvec = regs->format << 12 | regs->vector;
706 save_fpu_state(sc, regs);
709 static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
711 struct switch_stack *sw = (struct switch_stack *)regs - 1;
712 greg_t __user *gregs = uc->uc_mcontext.gregs;
713 int err = 0;
715 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
716 err |= __put_user(regs->d0, &gregs[0]);
717 err |= __put_user(regs->d1, &gregs[1]);
718 err |= __put_user(regs->d2, &gregs[2]);
719 err |= __put_user(regs->d3, &gregs[3]);
720 err |= __put_user(regs->d4, &gregs[4]);
721 err |= __put_user(regs->d5, &gregs[5]);
722 err |= __put_user(sw->d6, &gregs[6]);
723 err |= __put_user(sw->d7, &gregs[7]);
724 err |= __put_user(regs->a0, &gregs[8]);
725 err |= __put_user(regs->a1, &gregs[9]);
726 err |= __put_user(regs->a2, &gregs[10]);
727 err |= __put_user(sw->a3, &gregs[11]);
728 err |= __put_user(sw->a4, &gregs[12]);
729 err |= __put_user(sw->a5, &gregs[13]);
730 err |= __put_user(sw->a6, &gregs[14]);
731 err |= __put_user(rdusp(), &gregs[15]);
732 err |= __put_user(regs->pc, &gregs[16]);
733 err |= __put_user(regs->sr, &gregs[17]);
734 err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
735 err |= rt_save_fpu_state(uc, regs);
736 return err;
739 static inline void push_cache (unsigned long vaddr)
742 * Using the old cache_push_v() was really a big waste.
744 * What we are trying to do is to flush 8 bytes to ram.
745 * Flushing 2 cache lines of 16 bytes is much cheaper than
746 * flushing 1 or 2 pages, as previously done in
747 * cache_push_v().
748 * Jes
750 if (CPU_IS_040) {
751 unsigned long temp;
753 __asm__ __volatile__ (".chip 68040\n\t"
754 "nop\n\t"
755 "ptestr (%1)\n\t"
756 "movec %%mmusr,%0\n\t"
757 ".chip 68k"
758 : "=r" (temp)
759 : "a" (vaddr));
761 temp &= PAGE_MASK;
762 temp |= vaddr & ~PAGE_MASK;
764 __asm__ __volatile__ (".chip 68040\n\t"
765 "nop\n\t"
766 "cpushl %%bc,(%0)\n\t"
767 ".chip 68k"
768 : : "a" (temp));
770 else if (CPU_IS_060) {
771 unsigned long temp;
772 __asm__ __volatile__ (".chip 68060\n\t"
773 "plpar (%0)\n\t"
774 ".chip 68k"
775 : "=a" (temp)
776 : "0" (vaddr));
777 __asm__ __volatile__ (".chip 68060\n\t"
778 "cpushl %%bc,(%0)\n\t"
779 ".chip 68k"
780 : : "a" (temp));
781 } else if (!CPU_IS_COLDFIRE) {
783 * 68030/68020 have no writeback cache;
784 * still need to clear icache.
785 * Note that vaddr is guaranteed to be long word aligned.
787 unsigned long temp;
788 asm volatile ("movec %%cacr,%0" : "=r" (temp));
789 temp += 4;
790 asm volatile ("movec %0,%%caar\n\t"
791 "movec %1,%%cacr"
792 : : "r" (vaddr), "r" (temp));
793 asm volatile ("movec %0,%%caar\n\t"
794 "movec %1,%%cacr"
795 : : "r" (vaddr + 4), "r" (temp));
799 static inline void __user *
800 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
802 unsigned long usp;
804 /* Default to using normal stack. */
805 usp = rdusp();
807 /* This is the X/Open sanctioned signal stack switching. */
808 if (ka->sa.sa_flags & SA_ONSTACK) {
809 if (!sas_ss_flags(usp))
810 usp = current->sas_ss_sp + current->sas_ss_size;
812 return (void __user *)((usp - frame_size) & -8UL);
815 static int setup_frame (int sig, struct k_sigaction *ka,
816 sigset_t *set, struct pt_regs *regs)
818 struct sigframe __user *frame;
819 int fsize = frame_extra_sizes[regs->format];
820 struct sigcontext context;
821 int err = 0;
823 if (fsize < 0) {
824 #ifdef DEBUG
825 printk ("setup_frame: Unknown frame format %#x\n",
826 regs->format);
827 #endif
828 goto give_sigsegv;
831 frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
833 if (fsize)
834 err |= copy_to_user (frame + 1, regs + 1, fsize);
836 err |= __put_user((current_thread_info()->exec_domain
837 && current_thread_info()->exec_domain->signal_invmap
838 && sig < 32
839 ? current_thread_info()->exec_domain->signal_invmap[sig]
840 : sig),
841 &frame->sig);
843 err |= __put_user(regs->vector, &frame->code);
844 err |= __put_user(&frame->sc, &frame->psc);
846 if (_NSIG_WORDS > 1)
847 err |= copy_to_user(frame->extramask, &set->sig[1],
848 sizeof(frame->extramask));
850 setup_sigcontext(&context, regs, set->sig[0]);
851 err |= copy_to_user (&frame->sc, &context, sizeof(context));
853 /* Set up to return from userspace. */
854 err |= __put_user(frame->retcode, &frame->pretcode);
855 /* moveq #,d0; trap #0 */
856 err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
857 (long __user *)(frame->retcode));
859 if (err)
860 goto give_sigsegv;
862 push_cache ((unsigned long) &frame->retcode);
865 * Set up registers for signal handler. All the state we are about
866 * to destroy is successfully copied to sigframe.
868 wrusp ((unsigned long) frame);
869 regs->pc = (unsigned long) ka->sa.sa_handler;
872 * This is subtle; if we build more than one sigframe, all but the
873 * first one will see frame format 0 and have fsize == 0, so we won't
874 * screw stkadj.
876 if (fsize)
877 regs->stkadj = fsize;
879 /* Prepare to skip over the extra stuff in the exception frame. */
880 if (regs->stkadj) {
881 struct pt_regs *tregs =
882 (struct pt_regs *)((ulong)regs + regs->stkadj);
883 #ifdef DEBUG
884 printk("Performing stackadjust=%04x\n", regs->stkadj);
885 #endif
886 /* This must be copied with decreasing addresses to
887 handle overlaps. */
888 tregs->vector = 0;
889 tregs->format = 0;
890 tregs->pc = regs->pc;
891 tregs->sr = regs->sr;
893 return 0;
895 give_sigsegv:
896 force_sigsegv(sig, current);
897 return err;
900 static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
901 sigset_t *set, struct pt_regs *regs)
903 struct rt_sigframe __user *frame;
904 int fsize = frame_extra_sizes[regs->format];
905 int err = 0;
907 if (fsize < 0) {
908 #ifdef DEBUG
909 printk ("setup_frame: Unknown frame format %#x\n",
910 regs->format);
911 #endif
912 goto give_sigsegv;
915 frame = get_sigframe(ka, regs, sizeof(*frame));
917 if (fsize)
918 err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
920 err |= __put_user((current_thread_info()->exec_domain
921 && current_thread_info()->exec_domain->signal_invmap
922 && sig < 32
923 ? current_thread_info()->exec_domain->signal_invmap[sig]
924 : sig),
925 &frame->sig);
926 err |= __put_user(&frame->info, &frame->pinfo);
927 err |= __put_user(&frame->uc, &frame->puc);
928 err |= copy_siginfo_to_user(&frame->info, info);
930 /* Create the ucontext. */
931 err |= __put_user(0, &frame->uc.uc_flags);
932 err |= __put_user(NULL, &frame->uc.uc_link);
933 err |= __put_user((void __user *)current->sas_ss_sp,
934 &frame->uc.uc_stack.ss_sp);
935 err |= __put_user(sas_ss_flags(rdusp()),
936 &frame->uc.uc_stack.ss_flags);
937 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
938 err |= rt_setup_ucontext(&frame->uc, regs);
939 err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
941 /* Set up to return from userspace. */
942 err |= __put_user(frame->retcode, &frame->pretcode);
943 #ifdef __mcoldfire__
944 /* movel #__NR_rt_sigreturn,d0; trap #0 */
945 err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
946 err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
947 (long __user *)(frame->retcode + 4));
948 #else
949 /* moveq #,d0; notb d0; trap #0 */
950 err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
951 (long __user *)(frame->retcode + 0));
952 err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
953 #endif
955 if (err)
956 goto give_sigsegv;
958 push_cache ((unsigned long) &frame->retcode);
961 * Set up registers for signal handler. All the state we are about
962 * to destroy is successfully copied to sigframe.
964 wrusp ((unsigned long) frame);
965 regs->pc = (unsigned long) ka->sa.sa_handler;
968 * This is subtle; if we build more than one sigframe, all but the
969 * first one will see frame format 0 and have fsize == 0, so we won't
970 * screw stkadj.
972 if (fsize)
973 regs->stkadj = fsize;
975 /* Prepare to skip over the extra stuff in the exception frame. */
976 if (regs->stkadj) {
977 struct pt_regs *tregs =
978 (struct pt_regs *)((ulong)regs + regs->stkadj);
979 #ifdef DEBUG
980 printk("Performing stackadjust=%04x\n", regs->stkadj);
981 #endif
982 /* This must be copied with decreasing addresses to
983 handle overlaps. */
984 tregs->vector = 0;
985 tregs->format = 0;
986 tregs->pc = regs->pc;
987 tregs->sr = regs->sr;
989 return 0;
991 give_sigsegv:
992 force_sigsegv(sig, current);
993 return err;
996 static inline void
997 handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
999 switch (regs->d0) {
1000 case -ERESTARTNOHAND:
1001 if (!has_handler)
1002 goto do_restart;
1003 regs->d0 = -EINTR;
1004 break;
1006 case -ERESTART_RESTARTBLOCK:
1007 if (!has_handler) {
1008 regs->d0 = __NR_restart_syscall;
1009 regs->pc -= 2;
1010 break;
1012 regs->d0 = -EINTR;
1013 break;
1015 case -ERESTARTSYS:
1016 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1017 regs->d0 = -EINTR;
1018 break;
1020 /* fallthrough */
1021 case -ERESTARTNOINTR:
1022 do_restart:
1023 regs->d0 = regs->orig_d0;
1024 regs->pc -= 2;
1025 break;
1029 void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
1031 if (regs->orig_d0 < 0)
1032 return;
1033 switch (regs->d0) {
1034 case -ERESTARTNOHAND:
1035 case -ERESTARTSYS:
1036 case -ERESTARTNOINTR:
1037 regs->d0 = regs->orig_d0;
1038 regs->orig_d0 = -1;
1039 regs->pc -= 2;
1040 break;
1045 * OK, we're invoking a handler
1047 static void
1048 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
1049 sigset_t *oldset, struct pt_regs *regs)
1051 int err;
1052 /* are we from a system call? */
1053 if (regs->orig_d0 >= 0)
1054 /* If so, check system call restarting.. */
1055 handle_restart(regs, ka, 1);
1057 /* set up the stack frame */
1058 if (ka->sa.sa_flags & SA_SIGINFO)
1059 err = setup_rt_frame(sig, ka, info, oldset, regs);
1060 else
1061 err = setup_frame(sig, ka, oldset, regs);
1063 if (err)
1064 return;
1066 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1067 if (!(ka->sa.sa_flags & SA_NODEFER))
1068 sigaddset(&current->blocked,sig);
1069 recalc_sigpending();
1071 if (test_thread_flag(TIF_DELAYED_TRACE)) {
1072 regs->sr &= ~0x8000;
1073 send_sig(SIGTRAP, current, 1);
1076 clear_thread_flag(TIF_RESTORE_SIGMASK);
1080 * Note that 'init' is a special process: it doesn't get signals it doesn't
1081 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1082 * mistake.
1084 asmlinkage void do_signal(struct pt_regs *regs)
1086 siginfo_t info;
1087 struct k_sigaction ka;
1088 int signr;
1089 sigset_t *oldset;
1091 current->thread.esp0 = (unsigned long) regs;
1093 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1094 oldset = &current->saved_sigmask;
1095 else
1096 oldset = &current->blocked;
1098 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1099 if (signr > 0) {
1100 /* Whee! Actually deliver the signal. */
1101 handle_signal(signr, &ka, &info, oldset, regs);
1102 return;
1105 /* Did we come from a system call? */
1106 if (regs->orig_d0 >= 0)
1107 /* Restart the system call - no handlers present */
1108 handle_restart(regs, NULL, 0);
1110 /* If there's no signal to deliver, we just restore the saved mask. */
1111 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1112 clear_thread_flag(TIF_RESTORE_SIGMASK);
1113 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);