2 * arch/alpha/kernel/traps.c
4 * (C) Copyright 1994 Linus Torvalds
8 * This file initializes the trap entry points
11 #include <linux/jiffies.h>
13 #include <linux/sched.h>
14 #include <linux/tty.h>
15 #include <linux/delay.h>
16 #include <linux/module.h>
17 #include <linux/kallsyms.h>
18 #include <linux/ratelimit.h>
20 #include <asm/gentrap.h>
21 #include <asm/uaccess.h>
22 #include <asm/unaligned.h>
23 #include <asm/sysinfo.h>
24 #include <asm/hwrpb.h>
25 #include <asm/mmu_context.h>
26 #include <asm/special_insns.h>
30 /* Work-around for some SRMs which mishandle opDEC faults. */
37 __asm__
__volatile__ (
38 /* Load the address of... */
40 /* A stub instruction fault handler. Just add 4 to the
46 /* Install the instruction fault handler. */
48 " call_pal %[wrent]\n"
49 /* With that in place, the fault from the round-to-minf fp
50 insn will arrive either at the "lda 4" insn (bad) or one
51 past that (good). This places the correct fixup in %0. */
53 " cvttq/svm $f31,$f31\n"
55 : [fix
] "=r" (opDEC_fix
)
56 : [rti
] "n" (PAL_rti
), [wrent
] "n" (PAL_wrent
)
57 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
60 printk("opDEC fixup enabled.\n");
64 dik_show_regs(struct pt_regs
*regs
, unsigned long *r9_15
)
66 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
67 regs
->pc
, regs
->r26
, regs
->ps
, print_tainted());
68 printk("pc is at %pSR\n", (void *)regs
->pc
);
69 printk("ra is at %pSR\n", (void *)regs
->r26
);
70 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
71 regs
->r0
, regs
->r1
, regs
->r2
);
72 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
73 regs
->r3
, regs
->r4
, regs
->r5
);
74 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
75 regs
->r6
, regs
->r7
, regs
->r8
);
78 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
79 r9_15
[9], r9_15
[10], r9_15
[11]);
80 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
81 r9_15
[12], r9_15
[13], r9_15
[14]);
82 printk("s6 = %016lx\n", r9_15
[15]);
85 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
86 regs
->r16
, regs
->r17
, regs
->r18
);
87 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
88 regs
->r19
, regs
->r20
, regs
->r21
);
89 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
90 regs
->r22
, regs
->r23
, regs
->r24
);
91 printk("t11= %016lx pv = %016lx at = %016lx\n",
92 regs
->r25
, regs
->r27
, regs
->r28
);
93 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
100 static char * ireg_name
[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
101 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
102 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
103 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
107 dik_show_code(unsigned int *pc
)
112 for (i
= -6; i
< 2; i
++) {
114 if (__get_user(insn
, (unsigned int __user
*)pc
+ i
))
116 printk("%c%08x%c", i
? ' ' : '<', insn
, i
? ' ' : '>');
122 dik_show_trace(unsigned long *sp
)
126 while (0x1ff8 & (unsigned long) sp
) {
127 extern char _stext
[], _etext
[];
128 unsigned long tmp
= *sp
;
130 if (tmp
< (unsigned long) &_stext
)
132 if (tmp
>= (unsigned long) &_etext
)
134 printk("[<%lx>] %pSR\n", tmp
, (void *)tmp
);
143 static int kstack_depth_to_print
= 24;
145 void show_stack(struct task_struct
*task
, unsigned long *sp
)
147 unsigned long *stack
;
151 * debugging aid: "show_stack(NULL);" prints the
152 * back trace for this cpu.
155 sp
=(unsigned long*)&sp
;
158 for(i
=0; i
< kstack_depth_to_print
; i
++) {
159 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
161 if (i
&& ((i
% 4) == 0))
163 printk("%016lx ", *stack
++);
170 die_if_kernel(char * str
, struct pt_regs
*regs
, long err
, unsigned long *r9_15
)
175 printk("CPU %d ", hard_smp_processor_id());
177 printk("%s(%d): %s %ld\n", current
->comm
, task_pid_nr(current
), str
, err
);
178 dik_show_regs(regs
, r9_15
);
179 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
180 dik_show_trace((unsigned long *)(regs
+1));
181 dik_show_code((unsigned int *)regs
->pc
);
183 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
184 printk("die_if_kernel recursion detected.\n");
191 #ifndef CONFIG_MATHEMU
192 static long dummy_emul(void) { return 0; }
193 long (*alpha_fp_emul_imprecise
)(struct pt_regs
*regs
, unsigned long writemask
)
194 = (void *)dummy_emul
;
195 long (*alpha_fp_emul
) (unsigned long pc
)
196 = (void *)dummy_emul
;
198 long alpha_fp_emul_imprecise(struct pt_regs
*regs
, unsigned long writemask
);
199 long alpha_fp_emul (unsigned long pc
);
203 do_entArith(unsigned long summary
, unsigned long write_mask
,
204 struct pt_regs
*regs
)
206 long si_code
= FPE_FLTINV
;
210 /* Software-completion summary bit is set, so try to
211 emulate the instruction. If the processor supports
212 precise exceptions, we don't have to search. */
213 if (!amask(AMASK_PRECISE_TRAP
))
214 si_code
= alpha_fp_emul(regs
->pc
- 4);
216 si_code
= alpha_fp_emul_imprecise(regs
, write_mask
);
220 die_if_kernel("Arithmetic fault", regs
, 0, NULL
);
222 info
.si_signo
= SIGFPE
;
224 info
.si_code
= si_code
;
225 info
.si_addr
= (void __user
*) regs
->pc
;
226 send_sig_info(SIGFPE
, &info
, current
);
230 do_entIF(unsigned long type
, struct pt_regs
*regs
)
235 if ((regs
->ps
& ~IPL_MAX
) == 0) {
237 const unsigned int *data
238 = (const unsigned int *) regs
->pc
;
239 printk("Kernel bug at %s:%d\n",
240 (const char *)(data
[1] | (long)data
[2] << 32),
243 #ifdef CONFIG_ALPHA_WTINT
245 /* If CALL_PAL WTINT is totally unsupported by the
246 PALcode, e.g. MILO, "emulate" it by overwriting
249 = (unsigned int *) regs
->pc
- 1;
250 if (*pinsn
== PAL_wtint
) {
251 *pinsn
= 0x47e01400; /* mov 0,$0 */
257 #endif /* ALPHA_WTINT */
258 die_if_kernel((type
== 1 ? "Kernel Bug" : "Instruction fault"),
263 case 0: /* breakpoint */
264 info
.si_signo
= SIGTRAP
;
266 info
.si_code
= TRAP_BRKPT
;
268 info
.si_addr
= (void __user
*) regs
->pc
;
270 if (ptrace_cancel_bpt(current
)) {
271 regs
->pc
-= 4; /* make pc point to former bpt */
274 send_sig_info(SIGTRAP
, &info
, current
);
277 case 1: /* bugcheck */
278 info
.si_signo
= SIGTRAP
;
280 info
.si_code
= __SI_FAULT
;
281 info
.si_addr
= (void __user
*) regs
->pc
;
283 send_sig_info(SIGTRAP
, &info
, current
);
286 case 2: /* gentrap */
287 info
.si_addr
= (void __user
*) regs
->pc
;
288 info
.si_trapno
= regs
->r16
;
289 switch ((long) regs
->r16
) {
346 info
.si_signo
= signo
;
349 info
.si_addr
= (void __user
*) regs
->pc
;
350 send_sig_info(signo
, &info
, current
);
354 if (implver() == IMPLVER_EV4
) {
357 /* The some versions of SRM do not handle
358 the opDEC properly - they return the PC of the
359 opDEC fault, not the instruction after as the
360 Alpha architecture requires. Here we fix it up.
361 We do this by intentionally causing an opDEC
362 fault during the boot sequence and testing if
363 we get the correct PC. If not, we set a flag
364 to correct it every time through. */
365 regs
->pc
+= opDEC_fix
;
367 /* EV4 does not implement anything except normal
368 rounding. Everything else will come here as
369 an illegal instruction. Emulate them. */
370 si_code
= alpha_fp_emul(regs
->pc
- 4);
374 info
.si_signo
= SIGFPE
;
376 info
.si_code
= si_code
;
377 info
.si_addr
= (void __user
*) regs
->pc
;
378 send_sig_info(SIGFPE
, &info
, current
);
384 case 3: /* FEN fault */
385 /* Irritating users can call PAL_clrfen to disable the
386 FPU for the process. The kernel will then trap in
387 do_switch_stack and undo_switch_stack when we try
388 to save and restore the FP registers.
390 Given that GCC by default generates code that uses the
391 FP registers, PAL_clrfen is not useful except for DoS
392 attacks. So turn the bleeding FPU back on and be done
394 current_thread_info()->pcb
.flags
|= 1;
395 __reload_thread(¤t_thread_info()->pcb
);
399 default: /* unexpected instruction-fault type */
403 info
.si_signo
= SIGILL
;
405 info
.si_code
= ILL_ILLOPC
;
406 info
.si_addr
= (void __user
*) regs
->pc
;
407 send_sig_info(SIGILL
, &info
, current
);
410 /* There is an ifdef in the PALcode in MILO that enables a
411 "kernel debugging entry point" as an unprivileged call_pal.
413 We don't want to have anything to do with it, but unfortunately
414 several versions of MILO included in distributions have it enabled,
415 and if we don't put something on the entry point we'll oops. */
418 do_entDbg(struct pt_regs
*regs
)
422 die_if_kernel("Instruction fault", regs
, 0, NULL
);
424 info
.si_signo
= SIGILL
;
426 info
.si_code
= ILL_ILLOPC
;
427 info
.si_addr
= (void __user
*) regs
->pc
;
428 force_sig_info(SIGILL
, &info
, current
);
433 * entUna has a different register layout to be reasonably simple. It
434 * needs access to all the integer registers (the kernel doesn't use
435 * fp-regs), and it needs to have them in order for simpler access.
437 * Due to the non-standard register layout (and because we don't want
438 * to handle floating-point regs), user-mode unaligned accesses are
439 * handled separately by do_entUnaUser below.
441 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
442 * on a gp-register unaligned load/store, something is _very_ wrong
443 * in the kernel anyway..
446 unsigned long regs
[32];
447 unsigned long ps
, pc
, gp
, a0
, a1
, a2
;
450 struct unaligned_stat
{
451 unsigned long count
, va
, pc
;
455 /* Macro for exception fixup code to access integer registers. */
456 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
460 do_entUna(void * va
, unsigned long opcode
, unsigned long reg
,
461 struct allregs
*regs
)
463 long error
, tmp1
, tmp2
, tmp3
, tmp4
;
464 unsigned long pc
= regs
->pc
- 4;
465 unsigned long *_regs
= regs
->regs
;
466 const struct exception_table_entry
*fixup
;
468 unaligned
[0].count
++;
469 unaligned
[0].va
= (unsigned long) va
;
470 unaligned
[0].pc
= pc
;
472 /* We don't want to use the generic get/put unaligned macros as
473 we want to trap exceptions. Only if we actually get an
474 exception will we decide whether we should have caught it. */
477 case 0x0c: /* ldwu */
478 __asm__
__volatile__(
479 "1: ldq_u %1,0(%3)\n"
480 "2: ldq_u %2,1(%3)\n"
484 ".section __ex_table,\"a\"\n"
486 " lda %1,3b-1b(%0)\n"
488 " lda %2,3b-2b(%0)\n"
490 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
494 una_reg(reg
) = tmp1
|tmp2
;
498 __asm__
__volatile__(
499 "1: ldq_u %1,0(%3)\n"
500 "2: ldq_u %2,3(%3)\n"
504 ".section __ex_table,\"a\"\n"
506 " lda %1,3b-1b(%0)\n"
508 " lda %2,3b-2b(%0)\n"
510 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
514 una_reg(reg
) = (int)(tmp1
|tmp2
);
518 __asm__
__volatile__(
519 "1: ldq_u %1,0(%3)\n"
520 "2: ldq_u %2,7(%3)\n"
524 ".section __ex_table,\"a\"\n"
526 " lda %1,3b-1b(%0)\n"
528 " lda %2,3b-2b(%0)\n"
530 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
534 una_reg(reg
) = tmp1
|tmp2
;
537 /* Note that the store sequences do not indicate that they change
538 memory because it _should_ be affecting nothing in this context.
539 (Otherwise we have other, much larger, problems.) */
541 __asm__
__volatile__(
542 "1: ldq_u %2,1(%5)\n"
543 "2: ldq_u %1,0(%5)\n"
550 "3: stq_u %2,1(%5)\n"
551 "4: stq_u %1,0(%5)\n"
553 ".section __ex_table,\"a\"\n"
555 " lda %2,5b-1b(%0)\n"
557 " lda %1,5b-2b(%0)\n"
559 " lda $31,5b-3b(%0)\n"
561 " lda $31,5b-4b(%0)\n"
563 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
564 "=&r"(tmp3
), "=&r"(tmp4
)
565 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
571 __asm__
__volatile__(
572 "1: ldq_u %2,3(%5)\n"
573 "2: ldq_u %1,0(%5)\n"
580 "3: stq_u %2,3(%5)\n"
581 "4: stq_u %1,0(%5)\n"
583 ".section __ex_table,\"a\"\n"
585 " lda %2,5b-1b(%0)\n"
587 " lda %1,5b-2b(%0)\n"
589 " lda $31,5b-3b(%0)\n"
591 " lda $31,5b-4b(%0)\n"
593 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
594 "=&r"(tmp3
), "=&r"(tmp4
)
595 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
601 __asm__
__volatile__(
602 "1: ldq_u %2,7(%5)\n"
603 "2: ldq_u %1,0(%5)\n"
610 "3: stq_u %2,7(%5)\n"
611 "4: stq_u %1,0(%5)\n"
613 ".section __ex_table,\"a\"\n\t"
615 " lda %2,5b-1b(%0)\n"
617 " lda %1,5b-2b(%0)\n"
619 " lda $31,5b-3b(%0)\n"
621 " lda $31,5b-4b(%0)\n"
623 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
624 "=&r"(tmp3
), "=&r"(tmp4
)
625 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
631 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
632 pc
, va
, opcode
, reg
);
636 /* Ok, we caught the exception, but we don't want it. Is there
637 someone to pass it along to? */
638 if ((fixup
= search_exception_tables(pc
)) != 0) {
640 newpc
= fixup_exception(una_reg
, fixup
, pc
);
642 printk("Forwarding unaligned exception at %lx (%lx)\n",
650 * Yikes! No one to forward the exception to.
651 * Since the registers are in a weird format, dump them ourselves.
654 printk("%s(%d): unhandled unaligned exception\n",
655 current
->comm
, task_pid_nr(current
));
657 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
658 pc
, una_reg(26), regs
->ps
);
659 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
660 una_reg(0), una_reg(1), una_reg(2));
661 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
662 una_reg(3), una_reg(4), una_reg(5));
663 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
664 una_reg(6), una_reg(7), una_reg(8));
665 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
666 una_reg(9), una_reg(10), una_reg(11));
667 printk("r12= %016lx r13= %016lx r14= %016lx\n",
668 una_reg(12), una_reg(13), una_reg(14));
669 printk("r15= %016lx\n", una_reg(15));
670 printk("r16= %016lx r17= %016lx r18= %016lx\n",
671 una_reg(16), una_reg(17), una_reg(18));
672 printk("r19= %016lx r20= %016lx r21= %016lx\n",
673 una_reg(19), una_reg(20), una_reg(21));
674 printk("r22= %016lx r23= %016lx r24= %016lx\n",
675 una_reg(22), una_reg(23), una_reg(24));
676 printk("r25= %016lx r27= %016lx r28= %016lx\n",
677 una_reg(25), una_reg(27), una_reg(28));
678 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
680 dik_show_code((unsigned int *)pc
);
681 dik_show_trace((unsigned long *)(regs
+1));
683 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
684 printk("die_if_kernel recursion detected.\n");
692 * Convert an s-floating point value in memory format to the
693 * corresponding value in register format. The exponent
694 * needs to be remapped to preserve non-finite values
695 * (infinities, not-a-numbers, denormals).
697 static inline unsigned long
698 s_mem_to_reg (unsigned long s_mem
)
700 unsigned long frac
= (s_mem
>> 0) & 0x7fffff;
701 unsigned long sign
= (s_mem
>> 31) & 0x1;
702 unsigned long exp_msb
= (s_mem
>> 30) & 0x1;
703 unsigned long exp_low
= (s_mem
>> 23) & 0x7f;
706 exp
= (exp_msb
<< 10) | exp_low
; /* common case */
708 if (exp_low
== 0x7f) {
712 if (exp_low
== 0x00) {
718 return (sign
<< 63) | (exp
<< 52) | (frac
<< 29);
722 * Convert an s-floating point value in register format to the
723 * corresponding value in memory format.
725 static inline unsigned long
726 s_reg_to_mem (unsigned long s_reg
)
728 return ((s_reg
>> 62) << 30) | ((s_reg
<< 5) >> 34);
732 * Handle user-level unaligned fault. Handling user-level unaligned
733 * faults is *extremely* slow and produces nasty messages. A user
734 * program *should* fix unaligned faults ASAP.
736 * Notice that we have (almost) the regular kernel stack layout here,
737 * so finding the appropriate registers is a little more difficult
738 * than in the kernel case.
740 * Finally, we handle regular integer load/stores only. In
741 * particular, load-linked/store-conditionally and floating point
742 * load/stores are not supported. The former make no sense with
743 * unaligned faults (they are guaranteed to fail) and I don't think
744 * the latter will occur in any decent program.
746 * Sigh. We *do* have to handle some FP operations, because GCC will
747 * uses them as temporary storage for integer memory to memory copies.
748 * However, we need to deal with stt/ldt and sts/lds only.
751 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
752 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
753 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
754 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
756 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
757 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
758 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
760 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
762 static int unauser_reg_offsets
[32] = {
763 R(r0
), R(r1
), R(r2
), R(r3
), R(r4
), R(r5
), R(r6
), R(r7
), R(r8
),
764 /* r9 ... r15 are stored in front of regs. */
765 -56, -48, -40, -32, -24, -16, -8,
766 R(r16
), R(r17
), R(r18
),
767 R(r19
), R(r20
), R(r21
), R(r22
), R(r23
), R(r24
), R(r25
), R(r26
),
768 R(r27
), R(r28
), R(gp
),
775 do_entUnaUser(void __user
* va
, unsigned long opcode
,
776 unsigned long reg
, struct pt_regs
*regs
)
778 static DEFINE_RATELIMIT_STATE(ratelimit
, 5 * HZ
, 5);
780 unsigned long tmp1
, tmp2
, tmp3
, tmp4
;
781 unsigned long fake_reg
, *reg_addr
= &fake_reg
;
785 /* Check the UAC bits to decide what the user wants us to do
786 with the unaliged access. */
788 if (!(current_thread_info()->status
& TS_UAC_NOPRINT
)) {
789 if (__ratelimit(&ratelimit
)) {
790 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
791 current
->comm
, task_pid_nr(current
),
792 regs
->pc
- 4, va
, opcode
, reg
);
795 if ((current_thread_info()->status
& TS_UAC_SIGBUS
))
797 /* Not sure why you'd want to use this, but... */
798 if ((current_thread_info()->status
& TS_UAC_NOFIX
))
801 /* Don't bother reading ds in the access check since we already
802 know that this came from the user. Also rely on the fact that
803 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
804 if (!__access_ok((unsigned long)va
, 0, USER_DS
))
807 ++unaligned
[1].count
;
808 unaligned
[1].va
= (unsigned long)va
;
809 unaligned
[1].pc
= regs
->pc
- 4;
811 if ((1L << opcode
) & OP_INT_MASK
) {
812 /* it's an integer load/store */
814 reg_addr
= (unsigned long *)
815 ((char *)regs
+ unauser_reg_offsets
[reg
]);
816 } else if (reg
== 30) {
817 /* usp in PAL regs */
820 /* zero "register" */
825 /* We don't want to use the generic get/put unaligned macros as
826 we want to trap exceptions. Only if we actually get an
827 exception will we decide whether we should have caught it. */
830 case 0x0c: /* ldwu */
831 __asm__
__volatile__(
832 "1: ldq_u %1,0(%3)\n"
833 "2: ldq_u %2,1(%3)\n"
837 ".section __ex_table,\"a\"\n"
839 " lda %1,3b-1b(%0)\n"
841 " lda %2,3b-2b(%0)\n"
843 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
847 *reg_addr
= tmp1
|tmp2
;
851 __asm__
__volatile__(
852 "1: ldq_u %1,0(%3)\n"
853 "2: ldq_u %2,3(%3)\n"
857 ".section __ex_table,\"a\"\n"
859 " lda %1,3b-1b(%0)\n"
861 " lda %2,3b-2b(%0)\n"
863 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
867 alpha_write_fp_reg(reg
, s_mem_to_reg((int)(tmp1
|tmp2
)));
871 __asm__
__volatile__(
872 "1: ldq_u %1,0(%3)\n"
873 "2: ldq_u %2,7(%3)\n"
877 ".section __ex_table,\"a\"\n"
879 " lda %1,3b-1b(%0)\n"
881 " lda %2,3b-2b(%0)\n"
883 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
887 alpha_write_fp_reg(reg
, tmp1
|tmp2
);
891 __asm__
__volatile__(
892 "1: ldq_u %1,0(%3)\n"
893 "2: ldq_u %2,3(%3)\n"
897 ".section __ex_table,\"a\"\n"
899 " lda %1,3b-1b(%0)\n"
901 " lda %2,3b-2b(%0)\n"
903 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
907 *reg_addr
= (int)(tmp1
|tmp2
);
911 __asm__
__volatile__(
912 "1: ldq_u %1,0(%3)\n"
913 "2: ldq_u %2,7(%3)\n"
917 ".section __ex_table,\"a\"\n"
919 " lda %1,3b-1b(%0)\n"
921 " lda %2,3b-2b(%0)\n"
923 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
927 *reg_addr
= tmp1
|tmp2
;
930 /* Note that the store sequences do not indicate that they change
931 memory because it _should_ be affecting nothing in this context.
932 (Otherwise we have other, much larger, problems.) */
934 __asm__
__volatile__(
935 "1: ldq_u %2,1(%5)\n"
936 "2: ldq_u %1,0(%5)\n"
943 "3: stq_u %2,1(%5)\n"
944 "4: stq_u %1,0(%5)\n"
946 ".section __ex_table,\"a\"\n"
948 " lda %2,5b-1b(%0)\n"
950 " lda %1,5b-2b(%0)\n"
952 " lda $31,5b-3b(%0)\n"
954 " lda $31,5b-4b(%0)\n"
956 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
957 "=&r"(tmp3
), "=&r"(tmp4
)
958 : "r"(va
), "r"(*reg_addr
), "0"(0));
964 fake_reg
= s_reg_to_mem(alpha_read_fp_reg(reg
));
968 __asm__
__volatile__(
969 "1: ldq_u %2,3(%5)\n"
970 "2: ldq_u %1,0(%5)\n"
977 "3: stq_u %2,3(%5)\n"
978 "4: stq_u %1,0(%5)\n"
980 ".section __ex_table,\"a\"\n"
982 " lda %2,5b-1b(%0)\n"
984 " lda %1,5b-2b(%0)\n"
986 " lda $31,5b-3b(%0)\n"
988 " lda $31,5b-4b(%0)\n"
990 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
991 "=&r"(tmp3
), "=&r"(tmp4
)
992 : "r"(va
), "r"(*reg_addr
), "0"(0));
998 fake_reg
= alpha_read_fp_reg(reg
);
1001 case 0x2d: /* stq */
1002 __asm__
__volatile__(
1003 "1: ldq_u %2,7(%5)\n"
1004 "2: ldq_u %1,0(%5)\n"
1011 "3: stq_u %2,7(%5)\n"
1012 "4: stq_u %1,0(%5)\n"
1014 ".section __ex_table,\"a\"\n\t"
1016 " lda %2,5b-1b(%0)\n"
1018 " lda %1,5b-2b(%0)\n"
1020 " lda $31,5b-3b(%0)\n"
1022 " lda $31,5b-4b(%0)\n"
1024 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
1025 "=&r"(tmp3
), "=&r"(tmp4
)
1026 : "r"(va
), "r"(*reg_addr
), "0"(0));
1032 /* What instruction were you trying to use, exactly? */
1036 /* Only integer loads should get here; everyone else returns early. */
1042 regs
->pc
-= 4; /* make pc point to faulting insn */
1043 info
.si_signo
= SIGSEGV
;
1046 /* We need to replicate some of the logic in mm/fault.c,
1047 since we don't have access to the fault code in the
1048 exception handling return path. */
1049 if (!__access_ok((unsigned long)va
, 0, USER_DS
))
1050 info
.si_code
= SEGV_ACCERR
;
1052 struct mm_struct
*mm
= current
->mm
;
1053 down_read(&mm
->mmap_sem
);
1054 if (find_vma(mm
, (unsigned long)va
))
1055 info
.si_code
= SEGV_ACCERR
;
1057 info
.si_code
= SEGV_MAPERR
;
1058 up_read(&mm
->mmap_sem
);
1061 send_sig_info(SIGSEGV
, &info
, current
);
1066 info
.si_signo
= SIGBUS
;
1068 info
.si_code
= BUS_ADRALN
;
1070 send_sig_info(SIGBUS
, &info
, current
);
1077 /* Tell PAL-code what global pointer we want in the kernel. */
1078 register unsigned long gptr
__asm__("$29");
1081 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1082 a bug in the handling of the opDEC fault. Fix it up if so. */
1083 if (implver() == IMPLVER_EV4
)