1 // SPDX-License-Identifier: GPL-2.0
3 * arch/alpha/kernel/traps.c
5 * (C) Copyright 1994 Linus Torvalds
9 * This file initializes the trap entry points
12 #include <linux/cpu.h>
13 #include <linux/jiffies.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/debug.h>
17 #include <linux/tty.h>
18 #include <linux/delay.h>
19 #include <linux/extable.h>
20 #include <linux/kallsyms.h>
21 #include <linux/ratelimit.h>
23 #include <asm/gentrap.h>
24 #include <linux/uaccess.h>
25 #include <linux/unaligned.h>
26 #include <asm/sysinfo.h>
27 #include <asm/hwrpb.h>
28 #include <asm/mmu_context.h>
29 #include <asm/special_insns.h>
34 dik_show_regs(struct pt_regs
*regs
, unsigned long *r9_15
)
36 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
37 regs
->pc
, regs
->r26
, regs
->ps
, print_tainted());
38 printk("pc is at %pSR\n", (void *)regs
->pc
);
39 printk("ra is at %pSR\n", (void *)regs
->r26
);
40 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
41 regs
->r0
, regs
->r1
, regs
->r2
);
42 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
43 regs
->r3
, regs
->r4
, regs
->r5
);
44 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
45 regs
->r6
, regs
->r7
, regs
->r8
);
48 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
49 r9_15
[9], r9_15
[10], r9_15
[11]);
50 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
51 r9_15
[12], r9_15
[13], r9_15
[14]);
52 printk("s6 = %016lx\n", r9_15
[15]);
55 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
56 regs
->r16
, regs
->r17
, regs
->r18
);
57 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
58 regs
->r19
, regs
->r20
, regs
->r21
);
59 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
60 regs
->r22
, regs
->r23
, regs
->r24
);
61 printk("t11= %016lx pv = %016lx at = %016lx\n",
62 regs
->r25
, regs
->r27
, regs
->r28
);
63 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
70 static char * ireg_name
[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
71 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
72 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
73 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
77 dik_show_code(unsigned int *pc
)
82 for (i
= -6; i
< 2; i
++) {
84 if (__get_user(insn
, (unsigned int __user
*)pc
+ i
))
86 printk("%c%08x%c", i
? ' ' : '<', insn
, i
? ' ' : '>');
92 dik_show_trace(unsigned long *sp
, const char *loglvl
)
95 printk("%sTrace:\n", loglvl
);
96 while (0x1ff8 & (unsigned long) sp
) {
97 extern char _stext
[], _etext
[];
98 unsigned long tmp
= *sp
;
100 if (!is_kernel_text(tmp
))
102 printk("%s[<%lx>] %pSR\n", loglvl
, tmp
, (void *)tmp
);
104 printk("%s ...", loglvl
);
108 printk("%s\n", loglvl
);
111 static int kstack_depth_to_print
= 24;
113 void show_stack(struct task_struct
*task
, unsigned long *sp
, const char *loglvl
)
115 unsigned long *stack
;
119 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
120 * back trace for this cpu.
123 sp
=(unsigned long*)&sp
;
126 for(i
=0; i
< kstack_depth_to_print
; i
++) {
127 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
132 printk("%s ", loglvl
);
136 pr_cont("%016lx", *stack
++);
139 dik_show_trace(sp
, loglvl
);
143 die_if_kernel(char * str
, struct pt_regs
*regs
, long err
, unsigned long *r9_15
)
148 printk("CPU %d ", hard_smp_processor_id());
150 printk("%s(%d): %s %ld\n", current
->comm
, task_pid_nr(current
), str
, err
);
151 dik_show_regs(regs
, r9_15
);
152 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
153 dik_show_trace((unsigned long *)(regs
+1), KERN_DEFAULT
);
154 dik_show_code((unsigned int *)regs
->pc
);
156 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
157 printk("die_if_kernel recursion detected.\n");
161 make_task_dead(SIGSEGV
);
164 #ifndef CONFIG_MATHEMU
165 static long dummy_emul(void) { return 0; }
166 long (*alpha_fp_emul_imprecise
)(struct pt_regs
*regs
, unsigned long writemask
)
167 = (void *)dummy_emul
;
168 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise
);
169 long (*alpha_fp_emul
) (unsigned long pc
)
170 = (void *)dummy_emul
;
171 EXPORT_SYMBOL_GPL(alpha_fp_emul
);
173 long alpha_fp_emul_imprecise(struct pt_regs
*regs
, unsigned long writemask
);
174 long alpha_fp_emul (unsigned long pc
);
178 do_entArith(unsigned long summary
, unsigned long write_mask
,
179 struct pt_regs
*regs
)
181 long si_code
= FPE_FLTINV
;
184 /* Software-completion summary bit is set, so try to
185 emulate the instruction. If the processor supports
186 precise exceptions, we don't have to search. */
187 if (!amask(AMASK_PRECISE_TRAP
))
188 si_code
= alpha_fp_emul(regs
->pc
- 4);
190 si_code
= alpha_fp_emul_imprecise(regs
, write_mask
);
194 die_if_kernel("Arithmetic fault", regs
, 0, NULL
);
196 send_sig_fault_trapno(SIGFPE
, si_code
, (void __user
*) regs
->pc
, 0, current
);
200 do_entIF(unsigned long type
, struct pt_regs
*regs
)
204 if (type
== 3) { /* FEN fault */
205 /* Irritating users can call PAL_clrfen to disable the
206 FPU for the process. The kernel will then trap in
207 do_switch_stack and undo_switch_stack when we try
208 to save and restore the FP registers.
210 Given that GCC by default generates code that uses the
211 FP registers, PAL_clrfen is not useful except for DoS
212 attacks. So turn the bleeding FPU back on and be done
214 current_thread_info()->pcb
.flags
|= 1;
215 __reload_thread(¤t_thread_info()->pcb
);
218 if (!user_mode(regs
)) {
220 const unsigned int *data
221 = (const unsigned int *) regs
->pc
;
222 printk("Kernel bug at %s:%d\n",
223 (const char *)(data
[1] | (long)data
[2] << 32),
226 #ifdef CONFIG_ALPHA_WTINT
228 /* If CALL_PAL WTINT is totally unsupported by the
229 PALcode, e.g. MILO, "emulate" it by overwriting
232 = (unsigned int *) regs
->pc
- 1;
233 if (*pinsn
== PAL_wtint
) {
234 *pinsn
= 0x47e01400; /* mov 0,$0 */
240 #endif /* ALPHA_WTINT */
241 die_if_kernel((type
== 1 ? "Kernel Bug" : "Instruction fault"),
246 case 0: /* breakpoint */
247 if (ptrace_cancel_bpt(current
)) {
248 regs
->pc
-= 4; /* make pc point to former bpt */
251 send_sig_fault(SIGTRAP
, TRAP_BRKPT
, (void __user
*)regs
->pc
,
255 case 1: /* bugcheck */
256 send_sig_fault_trapno(SIGTRAP
, TRAP_UNK
,
257 (void __user
*) regs
->pc
, 0, current
);
260 case 2: /* gentrap */
261 switch ((long) regs
->r16
) {
318 send_sig_fault_trapno(signo
, code
, (void __user
*) regs
->pc
,
326 default: /* unexpected instruction-fault type */
330 send_sig_fault(SIGILL
, ILL_ILLOPC
, (void __user
*)regs
->pc
, current
);
333 /* There is an ifdef in the PALcode in MILO that enables a
334 "kernel debugging entry point" as an unprivileged call_pal.
336 We don't want to have anything to do with it, but unfortunately
337 several versions of MILO included in distributions have it enabled,
338 and if we don't put something on the entry point we'll oops. */
341 do_entDbg(struct pt_regs
*regs
)
343 die_if_kernel("Instruction fault", regs
, 0, NULL
);
345 force_sig_fault(SIGILL
, ILL_ILLOPC
, (void __user
*)regs
->pc
);
350 * entUna has a different register layout to be reasonably simple. It
351 * needs access to all the integer registers (the kernel doesn't use
352 * fp-regs), and it needs to have them in order for simpler access.
354 * Due to the non-standard register layout (and because we don't want
355 * to handle floating-point regs), user-mode unaligned accesses are
356 * handled separately by do_entUnaUser below.
358 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
359 * on a gp-register unaligned load/store, something is _very_ wrong
360 * in the kernel anyway..
363 unsigned long regs
[32];
364 unsigned long ps
, pc
, gp
, a0
, a1
, a2
;
367 struct unaligned_stat
{
368 unsigned long count
, va
, pc
;
372 /* Macro for exception fixup code to access integer registers. */
373 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
377 do_entUna(void * va
, unsigned long opcode
, unsigned long reg
,
378 struct allregs
*regs
)
380 long error
, tmp1
, tmp2
, tmp3
, tmp4
;
381 unsigned long pc
= regs
->pc
- 4;
382 unsigned long *_regs
= regs
->regs
;
383 const struct exception_table_entry
*fixup
;
385 unaligned
[0].count
++;
386 unaligned
[0].va
= (unsigned long) va
;
387 unaligned
[0].pc
= pc
;
389 /* We don't want to use the generic get/put unaligned macros as
390 we want to trap exceptions. Only if we actually get an
391 exception will we decide whether we should have caught it. */
394 case 0x0c: /* ldwu */
395 __asm__
__volatile__(
396 "1: ldq_u %1,0(%3)\n"
397 "2: ldq_u %2,1(%3)\n"
403 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
407 una_reg(reg
) = tmp1
|tmp2
;
411 __asm__
__volatile__(
412 "1: ldq_u %1,0(%3)\n"
413 "2: ldq_u %2,3(%3)\n"
419 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
423 una_reg(reg
) = (int)(tmp1
|tmp2
);
427 __asm__
__volatile__(
428 "1: ldq_u %1,0(%3)\n"
429 "2: ldq_u %2,7(%3)\n"
435 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
439 una_reg(reg
) = tmp1
|tmp2
;
442 /* Note that the store sequences do not indicate that they change
443 memory because it _should_ be affecting nothing in this context.
444 (Otherwise we have other, much larger, problems.) */
446 __asm__
__volatile__(
447 "1: ldq_u %2,1(%5)\n"
448 "2: ldq_u %1,0(%5)\n"
455 "3: stq_u %2,1(%5)\n"
456 "4: stq_u %1,0(%5)\n"
462 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
463 "=&r"(tmp3
), "=&r"(tmp4
)
464 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
470 __asm__
__volatile__(
471 "1: ldq_u %2,3(%5)\n"
472 "2: ldq_u %1,0(%5)\n"
479 "3: stq_u %2,3(%5)\n"
480 "4: stq_u %1,0(%5)\n"
486 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
487 "=&r"(tmp3
), "=&r"(tmp4
)
488 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
494 __asm__
__volatile__(
495 "1: ldq_u %2,7(%5)\n"
496 "2: ldq_u %1,0(%5)\n"
503 "3: stq_u %2,7(%5)\n"
504 "4: stq_u %1,0(%5)\n"
510 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
511 "=&r"(tmp3
), "=&r"(tmp4
)
512 : "r"(va
), "r"(una_reg(reg
)), "0"(0));
518 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
519 pc
, va
, opcode
, reg
);
520 make_task_dead(SIGSEGV
);
523 /* Ok, we caught the exception, but we don't want it. Is there
524 someone to pass it along to? */
525 if ((fixup
= search_exception_tables(pc
)) != 0) {
527 newpc
= fixup_exception(una_reg
, fixup
, pc
);
529 printk("Forwarding unaligned exception at %lx (%lx)\n",
537 * Yikes! No one to forward the exception to.
538 * Since the registers are in a weird format, dump them ourselves.
541 printk("%s(%d): unhandled unaligned exception\n",
542 current
->comm
, task_pid_nr(current
));
544 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
545 pc
, una_reg(26), regs
->ps
);
546 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
547 una_reg(0), una_reg(1), una_reg(2));
548 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
549 una_reg(3), una_reg(4), una_reg(5));
550 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
551 una_reg(6), una_reg(7), una_reg(8));
552 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
553 una_reg(9), una_reg(10), una_reg(11));
554 printk("r12= %016lx r13= %016lx r14= %016lx\n",
555 una_reg(12), una_reg(13), una_reg(14));
556 printk("r15= %016lx\n", una_reg(15));
557 printk("r16= %016lx r17= %016lx r18= %016lx\n",
558 una_reg(16), una_reg(17), una_reg(18));
559 printk("r19= %016lx r20= %016lx r21= %016lx\n",
560 una_reg(19), una_reg(20), una_reg(21));
561 printk("r22= %016lx r23= %016lx r24= %016lx\n",
562 una_reg(22), una_reg(23), una_reg(24));
563 printk("r25= %016lx r27= %016lx r28= %016lx\n",
564 una_reg(25), una_reg(27), una_reg(28));
565 printk("gp = %016lx sp = %p\n", regs
->gp
, regs
+1);
567 dik_show_code((unsigned int *)pc
);
568 dik_show_trace((unsigned long *)(regs
+1), KERN_DEFAULT
);
570 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL
)) {
571 printk("die_if_kernel recursion detected.\n");
575 make_task_dead(SIGSEGV
);
579 * Convert an s-floating point value in memory format to the
580 * corresponding value in register format. The exponent
581 * needs to be remapped to preserve non-finite values
582 * (infinities, not-a-numbers, denormals).
584 static inline unsigned long
585 s_mem_to_reg (unsigned long s_mem
)
587 unsigned long frac
= (s_mem
>> 0) & 0x7fffff;
588 unsigned long sign
= (s_mem
>> 31) & 0x1;
589 unsigned long exp_msb
= (s_mem
>> 30) & 0x1;
590 unsigned long exp_low
= (s_mem
>> 23) & 0x7f;
593 exp
= (exp_msb
<< 10) | exp_low
; /* common case */
595 if (exp_low
== 0x7f) {
599 if (exp_low
== 0x00) {
605 return (sign
<< 63) | (exp
<< 52) | (frac
<< 29);
609 * Convert an s-floating point value in register format to the
610 * corresponding value in memory format.
612 static inline unsigned long
613 s_reg_to_mem (unsigned long s_reg
)
615 return ((s_reg
>> 62) << 30) | ((s_reg
<< 5) >> 34);
619 * Handle user-level unaligned fault. Handling user-level unaligned
620 * faults is *extremely* slow and produces nasty messages. A user
621 * program *should* fix unaligned faults ASAP.
623 * Notice that we have (almost) the regular kernel stack layout here,
624 * so finding the appropriate registers is a little more difficult
625 * than in the kernel case.
627 * Finally, we handle regular integer load/stores only. In
628 * particular, load-linked/store-conditionally and floating point
629 * load/stores are not supported. The former make no sense with
630 * unaligned faults (they are guaranteed to fail) and I don't think
631 * the latter will occur in any decent program.
633 * Sigh. We *do* have to handle some FP operations, because GCC will
634 * uses them as temporary storage for integer memory to memory copies.
635 * However, we need to deal with stt/ldt and sts/lds only.
638 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
639 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
640 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
641 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
643 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
644 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
645 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
647 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
649 static int unauser_reg_offsets
[32] = {
650 R(r0
), R(r1
), R(r2
), R(r3
), R(r4
), R(r5
), R(r6
), R(r7
), R(r8
),
651 /* r9 ... r15 are stored in front of regs. */
652 -56, -48, -40, -32, -24, -16, -8,
653 R(r16
), R(r17
), R(r18
),
654 R(r19
), R(r20
), R(r21
), R(r22
), R(r23
), R(r24
), R(r25
), R(r26
),
655 R(r27
), R(r28
), R(gp
),
662 do_entUnaUser(void __user
* va
, unsigned long opcode
,
663 unsigned long reg
, struct pt_regs
*regs
)
665 static DEFINE_RATELIMIT_STATE(ratelimit
, 5 * HZ
, 5);
667 unsigned long tmp1
, tmp2
, tmp3
, tmp4
;
668 unsigned long fake_reg
, *reg_addr
= &fake_reg
;
672 /* Check the UAC bits to decide what the user wants us to do
673 with the unaligned access. */
675 if (!(current_thread_info()->status
& TS_UAC_NOPRINT
)) {
676 if (__ratelimit(&ratelimit
)) {
677 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
678 current
->comm
, task_pid_nr(current
),
679 regs
->pc
- 4, va
, opcode
, reg
);
682 if ((current_thread_info()->status
& TS_UAC_SIGBUS
))
684 /* Not sure why you'd want to use this, but... */
685 if ((current_thread_info()->status
& TS_UAC_NOFIX
))
688 /* Don't bother reading ds in the access check since we already
689 know that this came from the user. Also rely on the fact that
690 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
691 if ((unsigned long)va
>= TASK_SIZE
)
694 ++unaligned
[1].count
;
695 unaligned
[1].va
= (unsigned long)va
;
696 unaligned
[1].pc
= regs
->pc
- 4;
698 if ((1L << opcode
) & OP_INT_MASK
) {
699 /* it's an integer load/store */
701 reg_addr
= (unsigned long *)
702 ((char *)regs
+ unauser_reg_offsets
[reg
]);
703 } else if (reg
== 30) {
704 /* usp in PAL regs */
707 /* zero "register" */
712 /* We don't want to use the generic get/put unaligned macros as
713 we want to trap exceptions. Only if we actually get an
714 exception will we decide whether we should have caught it. */
717 case 0x0c: /* ldwu */
718 __asm__
__volatile__(
719 "1: ldq_u %1,0(%3)\n"
720 "2: ldq_u %2,1(%3)\n"
726 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
730 *reg_addr
= tmp1
|tmp2
;
734 __asm__
__volatile__(
735 "1: ldq_u %1,0(%3)\n"
736 "2: ldq_u %2,3(%3)\n"
742 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
746 alpha_write_fp_reg(reg
, s_mem_to_reg((int)(tmp1
|tmp2
)));
750 __asm__
__volatile__(
751 "1: ldq_u %1,0(%3)\n"
752 "2: ldq_u %2,7(%3)\n"
758 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
762 alpha_write_fp_reg(reg
, tmp1
|tmp2
);
766 __asm__
__volatile__(
767 "1: ldq_u %1,0(%3)\n"
768 "2: ldq_u %2,3(%3)\n"
774 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
778 *reg_addr
= (int)(tmp1
|tmp2
);
782 __asm__
__volatile__(
783 "1: ldq_u %1,0(%3)\n"
784 "2: ldq_u %2,7(%3)\n"
790 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
)
794 *reg_addr
= tmp1
|tmp2
;
797 /* Note that the store sequences do not indicate that they change
798 memory because it _should_ be affecting nothing in this context.
799 (Otherwise we have other, much larger, problems.) */
801 __asm__
__volatile__(
802 "1: ldq_u %2,1(%5)\n"
803 "2: ldq_u %1,0(%5)\n"
810 "3: stq_u %2,1(%5)\n"
811 "4: stq_u %1,0(%5)\n"
817 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
818 "=&r"(tmp3
), "=&r"(tmp4
)
819 : "r"(va
), "r"(*reg_addr
), "0"(0));
825 fake_reg
= s_reg_to_mem(alpha_read_fp_reg(reg
));
829 __asm__
__volatile__(
830 "1: ldq_u %2,3(%5)\n"
831 "2: ldq_u %1,0(%5)\n"
838 "3: stq_u %2,3(%5)\n"
839 "4: stq_u %1,0(%5)\n"
845 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
846 "=&r"(tmp3
), "=&r"(tmp4
)
847 : "r"(va
), "r"(*reg_addr
), "0"(0));
853 fake_reg
= alpha_read_fp_reg(reg
);
857 __asm__
__volatile__(
858 "1: ldq_u %2,7(%5)\n"
859 "2: ldq_u %1,0(%5)\n"
866 "3: stq_u %2,7(%5)\n"
867 "4: stq_u %1,0(%5)\n"
873 : "=r"(error
), "=&r"(tmp1
), "=&r"(tmp2
),
874 "=&r"(tmp3
), "=&r"(tmp4
)
875 : "r"(va
), "r"(*reg_addr
), "0"(0));
881 /* What instruction were you trying to use, exactly? */
885 /* Only integer loads should get here; everyone else returns early. */
891 regs
->pc
-= 4; /* make pc point to faulting insn */
893 /* We need to replicate some of the logic in mm/fault.c,
894 since we don't have access to the fault code in the
895 exception handling return path. */
896 if ((unsigned long)va
>= TASK_SIZE
)
897 si_code
= SEGV_ACCERR
;
899 struct mm_struct
*mm
= current
->mm
;
901 if (find_vma(mm
, (unsigned long)va
))
902 si_code
= SEGV_ACCERR
;
904 si_code
= SEGV_MAPERR
;
905 mmap_read_unlock(mm
);
907 send_sig_fault(SIGSEGV
, si_code
, va
, current
);
912 send_sig_fault(SIGBUS
, BUS_ADRALN
, va
, current
);
919 /* Tell PAL-code what global pointer we want in the kernel. */
920 register unsigned long gptr
__asm__("$29");