1 // SPDX-License-Identifier: GPL-2.0
3 * arch/alpha/kernel/traps.c
5 * (C) Copyright 1994 Linus Torvalds
9 * This file initializes the trap entry points
12 #include <linux/jiffies.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/debug.h>
16 #include <linux/tty.h>
17 #include <linux/delay.h>
18 #include <linux/extable.h>
19 #include <linux/kallsyms.h>
20 #include <linux/ratelimit.h>
22 #include <asm/gentrap.h>
23 #include <linux/uaccess.h>
24 #include <asm/unaligned.h>
25 #include <asm/sysinfo.h>
26 #include <asm/hwrpb.h>
27 #include <asm/mmu_context.h>
28 #include <asm/special_insns.h>
32 /* Work-around for some SRMs which mishandle opDEC faults. */
39 __asm__
__volatile__ (
40 /* Load the address of... */
42 /* A stub instruction fault handler. Just add 4 to the
48 /* Install the instruction fault handler. */
50 " call_pal %[wrent]\n"
51 /* With that in place, the fault from the round-to-minf fp
52 insn will arrive either at the "lda 4" insn (bad) or one
53 past that (good). This places the correct fixup in %0. */
55 " cvttq/svm $f31,$f31\n"
57 : [fix
] "=r" (opDEC_fix
)
58 : [rti
] "n" (PAL_rti
), [wrent
] "n" (PAL_wrent
)
59 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
62 printk("opDEC fixup enabled.\n");
66 dik_show_regs(struct pt_regs
*regs
, unsigned long *r9_15
)
68 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
69 regs
->pc
, regs
->r26
, regs
->ps
, print_tainted());
70 printk("pc is at %pSR\n", (void *)regs
->pc
);
71 printk("ra is at %pSR\n", (void *)regs
->r26
);
72 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
73 regs
->r0
, regs
->r1
, regs
->r2
);
74 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
75 regs
->r3
, regs
->r4
, regs
->r5
);
76 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
77 regs
->r6
, regs
->r7
, regs
->r8
);
80 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
81 r9_15
[9], r9_15
[10], r9_15
[11]);
82 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
83 r9_15
[12], r9_15
[13], r9_15
[14]);
84 printk("s6 = %016lx\n", r9_15
[15]);
87 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
88 regs
->r16
, regs
->r17
, regs
->r18
);
89 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
90 regs
->r19
, regs
->r20
, regs
->r21
);
91 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
92 regs
->r22
, regs
->r23
, regs
->r24
);
93 printk("t11= %016lx pv = %016lx at = %016lx\n",
94 regs
->r25
, regs
->r27
, regs
->r28
);
95 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
102 static char * ireg_name
[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
103 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
104 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
105 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
109 dik_show_code(unsigned int *pc
)
114 for (i
= -6; i
< 2; i
++) {
116 if (__get_user(insn
, (unsigned int __user
*)pc
+ i
))
118 printk("%c%08x%c", i
? ' ' : '<', insn
, i
? ' ' : '>');
124 dik_show_trace(unsigned long *sp
)
128 while (0x1ff8 & (unsigned long) sp
) {
129 extern char _stext
[], _etext
[];
130 unsigned long tmp
= *sp
;
132 if (tmp
< (unsigned long) &_stext
)
134 if (tmp
>= (unsigned long) &_etext
)
136 printk("[<%lx>] %pSR\n", tmp
, (void *)tmp
);
145 static int kstack_depth_to_print
= 24;
147 void show_stack(struct task_struct
*task
, unsigned long *sp
)
149 unsigned long *stack
;
153 * debugging aid: "show_stack(NULL);" prints the
154 * back trace for this cpu.
157 sp
=(unsigned long*)&sp
;
160 for(i
=0; i
< kstack_depth_to_print
; i
++) {
161 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
170 pr_cont("%016lx", *stack
++);
177 die_if_kernel(char * str
, struct pt_regs
*regs
, long err
, unsigned long *r9_15
)
182 printk("CPU %d ", hard_smp_processor_id());
184 printk("%s(%d): %s %ld\n", current
->comm
, task_pid_nr(current
), str
, err
);
185 dik_show_regs(regs
, r9_15
);
186 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
187 dik_show_trace((unsigned long *)(regs
+1));
188 dik_show_code((unsigned int *)regs
->pc
);
190 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
191 printk("die_if_kernel recursion detected.\n");
198 #ifndef CONFIG_MATHEMU
199 static long dummy_emul(void) { return 0; }
200 long (*alpha_fp_emul_imprecise
)(struct pt_regs
*regs
, unsigned long writemask
)
201 = (void *)dummy_emul
;
202 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise
);
203 long (*alpha_fp_emul
) (unsigned long pc
)
204 = (void *)dummy_emul
;
205 EXPORT_SYMBOL_GPL(alpha_fp_emul
);
207 long alpha_fp_emul_imprecise(struct pt_regs
*regs
, unsigned long writemask
);
208 long alpha_fp_emul (unsigned long pc
);
212 do_entArith(unsigned long summary
, unsigned long write_mask
,
213 struct pt_regs
*regs
)
215 long si_code
= FPE_FLTINV
;
218 /* Software-completion summary bit is set, so try to
219 emulate the instruction. If the processor supports
220 precise exceptions, we don't have to search. */
221 if (!amask(AMASK_PRECISE_TRAP
))
222 si_code
= alpha_fp_emul(regs
->pc
- 4);
224 si_code
= alpha_fp_emul_imprecise(regs
, write_mask
);
228 die_if_kernel("Arithmetic fault", regs
, 0, NULL
);
230 send_sig_fault(SIGFPE
, si_code
, (void __user
*) regs
->pc
, 0, current
);
234 do_entIF(unsigned long type
, struct pt_regs
*regs
)
238 if ((regs
->ps
& ~IPL_MAX
) == 0) {
240 const unsigned int *data
241 = (const unsigned int *) regs
->pc
;
242 printk("Kernel bug at %s:%d\n",
243 (const char *)(data
[1] | (long)data
[2] << 32),
246 #ifdef CONFIG_ALPHA_WTINT
248 /* If CALL_PAL WTINT is totally unsupported by the
249 PALcode, e.g. MILO, "emulate" it by overwriting
252 = (unsigned int *) regs
->pc
- 1;
253 if (*pinsn
== PAL_wtint
) {
254 *pinsn
= 0x47e01400; /* mov 0,$0 */
260 #endif /* ALPHA_WTINT */
261 die_if_kernel((type
== 1 ? "Kernel Bug" : "Instruction fault"),
266 case 0: /* breakpoint */
267 if (ptrace_cancel_bpt(current
)) {
268 regs
->pc
-= 4; /* make pc point to former bpt */
271 send_sig_fault(SIGTRAP
, TRAP_BRKPT
, (void __user
*)regs
->pc
, 0,
275 case 1: /* bugcheck */
276 send_sig_fault(SIGTRAP
, TRAP_UNK
, (void __user
*) regs
->pc
, 0,
280 case 2: /* gentrap */
281 switch ((long) regs
->r16
) {
338 send_sig_fault(signo
, code
, (void __user
*) regs
->pc
, regs
->r16
,
343 if (implver() == IMPLVER_EV4
) {
346 /* The some versions of SRM do not handle
347 the opDEC properly - they return the PC of the
348 opDEC fault, not the instruction after as the
349 Alpha architecture requires. Here we fix it up.
350 We do this by intentionally causing an opDEC
351 fault during the boot sequence and testing if
352 we get the correct PC. If not, we set a flag
353 to correct it every time through. */
354 regs
->pc
+= opDEC_fix
;
356 /* EV4 does not implement anything except normal
357 rounding. Everything else will come here as
358 an illegal instruction. Emulate them. */
359 si_code
= alpha_fp_emul(regs
->pc
- 4);
363 send_sig_fault(SIGFPE
, si_code
,
364 (void __user
*) regs
->pc
, 0,
371 case 3: /* FEN fault */
372 /* Irritating users can call PAL_clrfen to disable the
373 FPU for the process. The kernel will then trap in
374 do_switch_stack and undo_switch_stack when we try
375 to save and restore the FP registers.
377 Given that GCC by default generates code that uses the
378 FP registers, PAL_clrfen is not useful except for DoS
379 attacks. So turn the bleeding FPU back on and be done
381 current_thread_info()->pcb
.flags
|= 1;
382 __reload_thread(¤t_thread_info()->pcb
);
386 default: /* unexpected instruction-fault type */
390 send_sig_fault(SIGILL
, ILL_ILLOPC
, (void __user
*)regs
->pc
, 0, current
);
393 /* There is an ifdef in the PALcode in MILO that enables a
394 "kernel debugging entry point" as an unprivileged call_pal.
396 We don't want to have anything to do with it, but unfortunately
397 several versions of MILO included in distributions have it enabled,
398 and if we don't put something on the entry point we'll oops. */
401 do_entDbg(struct pt_regs
*regs
)
403 die_if_kernel("Instruction fault", regs
, 0, NULL
);
405 force_sig_fault(SIGILL
, ILL_ILLOPC
, (void __user
*)regs
->pc
, 0);
410 * entUna has a different register layout to be reasonably simple. It
411 * needs access to all the integer registers (the kernel doesn't use
412 * fp-regs), and it needs to have them in order for simpler access.
414 * Due to the non-standard register layout (and because we don't want
415 * to handle floating-point regs), user-mode unaligned accesses are
416 * handled separately by do_entUnaUser below.
418 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
419 * on a gp-register unaligned load/store, something is _very_ wrong
420 * in the kernel anyway..
423 unsigned long regs
[32];
424 unsigned long ps
, pc
, gp
, a0
, a1
, a2
;
427 struct unaligned_stat
{
428 unsigned long count
, va
, pc
;
432 /* Macro for exception fixup code to access integer registers. */
433 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
437 do_entUna(void * va
, unsigned long opcode
, unsigned long reg
,
438 struct allregs
*regs
)
440 long error
, tmp1
, tmp2
, tmp3
, tmp4
;
441 unsigned long pc
= regs
->pc
- 4;
442 unsigned long *_regs
= regs
->regs
;
443 const struct exception_table_entry
*fixup
;
445 unaligned
[0].count
++;
446 unaligned
[0].va
= (unsigned long) va
;
447 unaligned
[0].pc
= pc
;
449 /* We don't want to use the generic get/put unaligned macros as
450 we want to trap exceptions. Only if we actually get an
451 exception will we decide whether we should have caught it. */
454 case 0x0c: /* ldwu */
455 __asm__
__volatile__(
456 "1: ldq_u %1,0(%3)\n"
457 "2: ldq_u %2,1(%3)\n"
463 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
467 una_reg(reg
) = tmp1
|tmp2
;
471 __asm__
__volatile__(
472 "1: ldq_u %1,0(%3)\n"
473 "2: ldq_u %2,3(%3)\n"
479 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
483 una_reg(reg
) = (int)(tmp1
|tmp2
);
487 __asm__
__volatile__(
488 "1: ldq_u %1,0(%3)\n"
489 "2: ldq_u %2,7(%3)\n"
495 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
499 una_reg(reg
) = tmp1
|tmp2
;
502 /* Note that the store sequences do not indicate that they change
503 memory because it _should_ be affecting nothing in this context.
504 (Otherwise we have other, much larger, problems.) */
506 __asm__
__volatile__(
507 "1: ldq_u %2,1(%5)\n"
508 "2: ldq_u %1,0(%5)\n"
515 "3: stq_u %2,1(%5)\n"
516 "4: stq_u %1,0(%5)\n"
522 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
523 "=&r"(tmp3
), "=&r"(tmp4
)
524 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
530 __asm__
__volatile__(
531 "1: ldq_u %2,3(%5)\n"
532 "2: ldq_u %1,0(%5)\n"
539 "3: stq_u %2,3(%5)\n"
540 "4: stq_u %1,0(%5)\n"
546 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
547 "=&r"(tmp3
), "=&r"(tmp4
)
548 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
554 __asm__
__volatile__(
555 "1: ldq_u %2,7(%5)\n"
556 "2: ldq_u %1,0(%5)\n"
563 "3: stq_u %2,7(%5)\n"
564 "4: stq_u %1,0(%5)\n"
570 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
571 "=&r"(tmp3
), "=&r"(tmp4
)
572 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
578 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
579 pc
, va
, opcode
, reg
);
583 /* Ok, we caught the exception, but we don't want it. Is there
584 someone to pass it along to? */
585 if ((fixup
= search_exception_tables(pc
)) != 0) {
587 newpc
= fixup_exception(una_reg
, fixup
, pc
);
589 printk("Forwarding unaligned exception at %lx (%lx)\n",
597 * Yikes! No one to forward the exception to.
598 * Since the registers are in a weird format, dump them ourselves.
601 printk("%s(%d): unhandled unaligned exception\n",
602 current
->comm
, task_pid_nr(current
));
604 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
605 pc
, una_reg(26), regs
->ps
);
606 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
607 una_reg(0), una_reg(1), una_reg(2));
608 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
609 una_reg(3), una_reg(4), una_reg(5));
610 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
611 una_reg(6), una_reg(7), una_reg(8));
612 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
613 una_reg(9), una_reg(10), una_reg(11));
614 printk("r12= %016lx r13= %016lx r14= %016lx\n",
615 una_reg(12), una_reg(13), una_reg(14));
616 printk("r15= %016lx\n", una_reg(15));
617 printk("r16= %016lx r17= %016lx r18= %016lx\n",
618 una_reg(16), una_reg(17), una_reg(18));
619 printk("r19= %016lx r20= %016lx r21= %016lx\n",
620 una_reg(19), una_reg(20), una_reg(21));
621 printk("r22= %016lx r23= %016lx r24= %016lx\n",
622 una_reg(22), una_reg(23), una_reg(24));
623 printk("r25= %016lx r27= %016lx r28= %016lx\n",
624 una_reg(25), una_reg(27), una_reg(28));
625 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
627 dik_show_code((unsigned int *)pc
);
628 dik_show_trace((unsigned long *)(regs
+1));
630 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
631 printk("die_if_kernel recursion detected.\n");
639 * Convert an s-floating point value in memory format to the
640 * corresponding value in register format. The exponent
641 * needs to be remapped to preserve non-finite values
642 * (infinities, not-a-numbers, denormals).
644 static inline unsigned long
645 s_mem_to_reg (unsigned long s_mem
)
647 unsigned long frac
= (s_mem
>> 0) & 0x7fffff;
648 unsigned long sign
= (s_mem
>> 31) & 0x1;
649 unsigned long exp_msb
= (s_mem
>> 30) & 0x1;
650 unsigned long exp_low
= (s_mem
>> 23) & 0x7f;
653 exp
= (exp_msb
<< 10) | exp_low
; /* common case */
655 if (exp_low
== 0x7f) {
659 if (exp_low
== 0x00) {
665 return (sign
<< 63) | (exp
<< 52) | (frac
<< 29);
669 * Convert an s-floating point value in register format to the
670 * corresponding value in memory format.
672 static inline unsigned long
673 s_reg_to_mem (unsigned long s_reg
)
675 return ((s_reg
>> 62) << 30) | ((s_reg
<< 5) >> 34);
679 * Handle user-level unaligned fault. Handling user-level unaligned
680 * faults is *extremely* slow and produces nasty messages. A user
681 * program *should* fix unaligned faults ASAP.
683 * Notice that we have (almost) the regular kernel stack layout here,
684 * so finding the appropriate registers is a little more difficult
685 * than in the kernel case.
687 * Finally, we handle regular integer load/stores only. In
688 * particular, load-linked/store-conditionally and floating point
689 * load/stores are not supported. The former make no sense with
690 * unaligned faults (they are guaranteed to fail) and I don't think
691 * the latter will occur in any decent program.
693 * Sigh. We *do* have to handle some FP operations, because GCC will
694 * uses them as temporary storage for integer memory to memory copies.
695 * However, we need to deal with stt/ldt and sts/lds only.
698 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
699 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
700 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
701 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
703 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
704 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
705 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
707 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
709 static int unauser_reg_offsets
[32] = {
710 R(r0
), R(r1
), R(r2
), R(r3
), R(r4
), R(r5
), R(r6
), R(r7
), R(r8
),
711 /* r9 ... r15 are stored in front of regs. */
712 -56, -48, -40, -32, -24, -16, -8,
713 R(r16
), R(r17
), R(r18
),
714 R(r19
), R(r20
), R(r21
), R(r22
), R(r23
), R(r24
), R(r25
), R(r26
),
715 R(r27
), R(r28
), R(gp
),
722 do_entUnaUser(void __user
* va
, unsigned long opcode
,
723 unsigned long reg
, struct pt_regs
*regs
)
725 static DEFINE_RATELIMIT_STATE(ratelimit
, 5 * HZ
, 5);
727 unsigned long tmp1
, tmp2
, tmp3
, tmp4
;
728 unsigned long fake_reg
, *reg_addr
= &fake_reg
;
732 /* Check the UAC bits to decide what the user wants us to do
733 with the unaliged access. */
735 if (!(current_thread_info()->status
& TS_UAC_NOPRINT
)) {
736 if (__ratelimit(&ratelimit
)) {
737 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
738 current
->comm
, task_pid_nr(current
),
739 regs
->pc
- 4, va
, opcode
, reg
);
742 if ((current_thread_info()->status
& TS_UAC_SIGBUS
))
744 /* Not sure why you'd want to use this, but... */
745 if ((current_thread_info()->status
& TS_UAC_NOFIX
))
748 /* Don't bother reading ds in the access check since we already
749 know that this came from the user. Also rely on the fact that
750 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
751 if ((unsigned long)va
>= TASK_SIZE
)
754 ++unaligned
[1].count
;
755 unaligned
[1].va
= (unsigned long)va
;
756 unaligned
[1].pc
= regs
->pc
- 4;
758 if ((1L << opcode
) & OP_INT_MASK
) {
759 /* it's an integer load/store */
761 reg_addr
= (unsigned long *)
762 ((char *)regs
+ unauser_reg_offsets
[reg
]);
763 } else if (reg
== 30) {
764 /* usp in PAL regs */
767 /* zero "register" */
772 /* We don't want to use the generic get/put unaligned macros as
773 we want to trap exceptions. Only if we actually get an
774 exception will we decide whether we should have caught it. */
777 case 0x0c: /* ldwu */
778 __asm__
__volatile__(
779 "1: ldq_u %1,0(%3)\n"
780 "2: ldq_u %2,1(%3)\n"
786 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
790 *reg_addr
= tmp1
|tmp2
;
794 __asm__
__volatile__(
795 "1: ldq_u %1,0(%3)\n"
796 "2: ldq_u %2,3(%3)\n"
802 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
806 alpha_write_fp_reg(reg
, s_mem_to_reg((int)(tmp1
|tmp2
)));
810 __asm__
__volatile__(
811 "1: ldq_u %1,0(%3)\n"
812 "2: ldq_u %2,7(%3)\n"
818 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
822 alpha_write_fp_reg(reg
, tmp1
|tmp2
);
826 __asm__
__volatile__(
827 "1: ldq_u %1,0(%3)\n"
828 "2: ldq_u %2,3(%3)\n"
834 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
838 *reg_addr
= (int)(tmp1
|tmp2
);
842 __asm__
__volatile__(
843 "1: ldq_u %1,0(%3)\n"
844 "2: ldq_u %2,7(%3)\n"
850 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
854 *reg_addr
= tmp1
|tmp2
;
857 /* Note that the store sequences do not indicate that they change
858 memory because it _should_ be affecting nothing in this context.
859 (Otherwise we have other, much larger, problems.) */
861 __asm__
__volatile__(
862 "1: ldq_u %2,1(%5)\n"
863 "2: ldq_u %1,0(%5)\n"
870 "3: stq_u %2,1(%5)\n"
871 "4: stq_u %1,0(%5)\n"
877 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
878 "=&r"(tmp3
), "=&r"(tmp4
)
879 : "r"(va
), "r"(*reg_addr
), "0"(0));
885 fake_reg
= s_reg_to_mem(alpha_read_fp_reg(reg
));
889 __asm__
__volatile__(
890 "1: ldq_u %2,3(%5)\n"
891 "2: ldq_u %1,0(%5)\n"
898 "3: stq_u %2,3(%5)\n"
899 "4: stq_u %1,0(%5)\n"
905 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
906 "=&r"(tmp3
), "=&r"(tmp4
)
907 : "r"(va
), "r"(*reg_addr
), "0"(0));
913 fake_reg
= alpha_read_fp_reg(reg
);
917 __asm__
__volatile__(
918 "1: ldq_u %2,7(%5)\n"
919 "2: ldq_u %1,0(%5)\n"
926 "3: stq_u %2,7(%5)\n"
927 "4: stq_u %1,0(%5)\n"
933 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
934 "=&r"(tmp3
), "=&r"(tmp4
)
935 : "r"(va
), "r"(*reg_addr
), "0"(0));
941 /* What instruction were you trying to use, exactly? */
945 /* Only integer loads should get here; everyone else returns early. */
951 regs
->pc
-= 4; /* make pc point to faulting insn */
953 /* We need to replicate some of the logic in mm/fault.c,
954 since we don't have access to the fault code in the
955 exception handling return path. */
956 if ((unsigned long)va
>= TASK_SIZE
)
957 si_code
= SEGV_ACCERR
;
959 struct mm_struct
*mm
= current
->mm
;
960 down_read(&mm
->mmap_sem
);
961 if (find_vma(mm
, (unsigned long)va
))
962 si_code
= SEGV_ACCERR
;
964 si_code
= SEGV_MAPERR
;
965 up_read(&mm
->mmap_sem
);
967 send_sig_fault(SIGSEGV
, si_code
, va
, 0, current
);
972 send_sig_fault(SIGBUS
, BUS_ADRALN
, va
, 0, current
);
979 /* Tell PAL-code what global pointer we want in the kernel. */
980 register unsigned long gptr
__asm__("$29");
983 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
984 a bug in the handling of the opDEC fault. Fix it up if so. */
985 if (implver() == IMPLVER_EV4
)