2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependent on pt_regs structure.
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
29 #include <linux/module.h>
30 #include <linux/bootmem.h>
31 #include <linux/elf.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
36 #include <asm/unwind.h>
38 #include <asm/delay.h>
40 #include <asm/ptrace.h>
41 #include <asm/ptrace_offsets.h>
43 #include <asm/sections.h>
44 #include <asm/uaccess.h>
49 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
50 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
52 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
53 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
55 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
58 static unsigned int unw_debug_level
= UNW_DEBUG
;
59 # define UNW_DEBUG_ON(n) unw_debug_level >= n
60 /* Do not code a printk level, not all debug lines end in newline */
61 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
64 #else /* !UNW_DEBUG */
65 # define UNW_DEBUG_ON(n) 0
66 # define UNW_DPRINT(n, ...)
67 #endif /* UNW_DEBUG */
75 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
76 #define free_reg_state(usr) kfree(usr)
77 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
78 #define free_labeled_state(usr) kfree(usr)
80 typedef unsigned long unw_word
;
81 typedef unsigned char unw_hash_index_t
;
84 spinlock_t lock
; /* spinlock for unwind data */
86 /* list of unwind tables (one per load-module) */
87 struct unw_table
*tables
;
89 unsigned long r0
; /* constant 0 for r0 */
91 /* table of registers that prologues can save (and order in which they're saved): */
92 const unsigned char save_order
[8];
94 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
95 unsigned short sw_off
[sizeof(struct unw_frame_info
) / 8];
97 unsigned short lru_head
; /* index of lead-recently used script */
98 unsigned short lru_tail
; /* index of most-recently used script */
100 /* index into unw_frame_info for preserved register i */
101 unsigned short preg_index
[UNW_NUM_REGS
];
103 short pt_regs_offsets
[32];
105 /* unwind table for the kernel: */
106 struct unw_table kernel_table
;
108 /* unwind table describing the gate page (kernel code that is mapped into user space): */
109 size_t gate_table_size
;
110 unsigned long *gate_table
;
112 /* hash table that maps instruction pointer to script index: */
113 unsigned short hash
[UNW_HASH_SIZE
];
116 struct unw_script cache
[UNW_CACHE_SIZE
];
119 const char *preg_name
[UNW_NUM_REGS
];
127 int collision_chain_traversals
;
130 unsigned long build_time
;
131 unsigned long run_time
;
132 unsigned long parse_time
;
139 unsigned long init_time
;
140 unsigned long unwind_time
;
147 .tables
= &unw
.kernel_table
,
148 .lock
= __SPIN_LOCK_UNLOCKED(unw
.lock
),
150 UNW_REG_RP
, UNW_REG_PFS
, UNW_REG_PSP
, UNW_REG_PR
,
151 UNW_REG_UNAT
, UNW_REG_LC
, UNW_REG_FPSR
, UNW_REG_PRI_UNAT_GR
154 offsetof(struct unw_frame_info
, pri_unat_loc
)/8, /* PRI_UNAT_GR */
155 offsetof(struct unw_frame_info
, pri_unat_loc
)/8, /* PRI_UNAT_MEM */
156 offsetof(struct unw_frame_info
, bsp_loc
)/8,
157 offsetof(struct unw_frame_info
, bspstore_loc
)/8,
158 offsetof(struct unw_frame_info
, pfs_loc
)/8,
159 offsetof(struct unw_frame_info
, rnat_loc
)/8,
160 offsetof(struct unw_frame_info
, psp
)/8,
161 offsetof(struct unw_frame_info
, rp_loc
)/8,
162 offsetof(struct unw_frame_info
, r4
)/8,
163 offsetof(struct unw_frame_info
, r5
)/8,
164 offsetof(struct unw_frame_info
, r6
)/8,
165 offsetof(struct unw_frame_info
, r7
)/8,
166 offsetof(struct unw_frame_info
, unat_loc
)/8,
167 offsetof(struct unw_frame_info
, pr_loc
)/8,
168 offsetof(struct unw_frame_info
, lc_loc
)/8,
169 offsetof(struct unw_frame_info
, fpsr_loc
)/8,
170 offsetof(struct unw_frame_info
, b1_loc
)/8,
171 offsetof(struct unw_frame_info
, b2_loc
)/8,
172 offsetof(struct unw_frame_info
, b3_loc
)/8,
173 offsetof(struct unw_frame_info
, b4_loc
)/8,
174 offsetof(struct unw_frame_info
, b5_loc
)/8,
175 offsetof(struct unw_frame_info
, f2_loc
)/8,
176 offsetof(struct unw_frame_info
, f3_loc
)/8,
177 offsetof(struct unw_frame_info
, f4_loc
)/8,
178 offsetof(struct unw_frame_info
, f5_loc
)/8,
179 offsetof(struct unw_frame_info
, fr_loc
[16 - 16])/8,
180 offsetof(struct unw_frame_info
, fr_loc
[17 - 16])/8,
181 offsetof(struct unw_frame_info
, fr_loc
[18 - 16])/8,
182 offsetof(struct unw_frame_info
, fr_loc
[19 - 16])/8,
183 offsetof(struct unw_frame_info
, fr_loc
[20 - 16])/8,
184 offsetof(struct unw_frame_info
, fr_loc
[21 - 16])/8,
185 offsetof(struct unw_frame_info
, fr_loc
[22 - 16])/8,
186 offsetof(struct unw_frame_info
, fr_loc
[23 - 16])/8,
187 offsetof(struct unw_frame_info
, fr_loc
[24 - 16])/8,
188 offsetof(struct unw_frame_info
, fr_loc
[25 - 16])/8,
189 offsetof(struct unw_frame_info
, fr_loc
[26 - 16])/8,
190 offsetof(struct unw_frame_info
, fr_loc
[27 - 16])/8,
191 offsetof(struct unw_frame_info
, fr_loc
[28 - 16])/8,
192 offsetof(struct unw_frame_info
, fr_loc
[29 - 16])/8,
193 offsetof(struct unw_frame_info
, fr_loc
[30 - 16])/8,
194 offsetof(struct unw_frame_info
, fr_loc
[31 - 16])/8,
198 offsetof(struct pt_regs
, r1
),
199 offsetof(struct pt_regs
, r2
),
200 offsetof(struct pt_regs
, r3
),
201 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
202 offsetof(struct pt_regs
, r8
),
203 offsetof(struct pt_regs
, r9
),
204 offsetof(struct pt_regs
, r10
),
205 offsetof(struct pt_regs
, r11
),
206 offsetof(struct pt_regs
, r12
),
207 offsetof(struct pt_regs
, r13
),
208 offsetof(struct pt_regs
, r14
),
209 offsetof(struct pt_regs
, r15
),
210 offsetof(struct pt_regs
, r16
),
211 offsetof(struct pt_regs
, r17
),
212 offsetof(struct pt_regs
, r18
),
213 offsetof(struct pt_regs
, r19
),
214 offsetof(struct pt_regs
, r20
),
215 offsetof(struct pt_regs
, r21
),
216 offsetof(struct pt_regs
, r22
),
217 offsetof(struct pt_regs
, r23
),
218 offsetof(struct pt_regs
, r24
),
219 offsetof(struct pt_regs
, r25
),
220 offsetof(struct pt_regs
, r26
),
221 offsetof(struct pt_regs
, r27
),
222 offsetof(struct pt_regs
, r28
),
223 offsetof(struct pt_regs
, r29
),
224 offsetof(struct pt_regs
, r30
),
225 offsetof(struct pt_regs
, r31
),
227 .hash
= { [0 ... UNW_HASH_SIZE
- 1] = -1 },
230 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
231 "r4", "r5", "r6", "r7",
232 "ar.unat", "pr", "ar.lc", "ar.fpsr",
233 "b1", "b2", "b3", "b4", "b5",
234 "f2", "f3", "f4", "f5",
235 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
236 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
242 read_only (void *addr
)
244 return (unsigned long) ((char *) addr
- (char *) &unw
.r0
) < sizeof(unw
.r0
);
248 * Returns offset of rREG in struct pt_regs.
250 static inline unsigned long
251 pt_regs_off (unsigned long reg
)
255 if (reg
< ARRAY_SIZE(unw
.pt_regs_offsets
))
256 off
= unw
.pt_regs_offsets
[reg
];
259 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__
, reg
);
262 return (unsigned long) off
;
265 static inline struct pt_regs
*
266 get_scratch_regs (struct unw_frame_info
*info
)
269 /* This should not happen with valid unwind info. */
270 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__
);
271 if (info
->flags
& UNW_FLAG_INTERRUPT_FRAME
)
272 info
->pt
= (unsigned long) ((struct pt_regs
*) info
->psp
- 1);
274 info
->pt
= info
->sp
- 16;
276 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__
, info
->sp
, info
->pt
);
277 return (struct pt_regs
*) info
->pt
;
280 /* Unwind accessors. */
283 unw_access_gr (struct unw_frame_info
*info
, int regnum
, unsigned long *val
, char *nat
, int write
)
285 unsigned long *addr
, *nat_addr
, nat_mask
= 0, dummy_nat
;
286 struct unw_ireg
*ireg
;
289 if ((unsigned) regnum
- 1 >= 127) {
290 if (regnum
== 0 && !write
) {
291 *val
= 0; /* read r0 always returns 0 */
295 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
301 if (regnum
>= 4 && regnum
<= 7) {
302 /* access a preserved register */
303 ireg
= &info
->r4
+ (regnum
- 4);
306 nat_addr
= addr
+ ireg
->nat
.off
;
307 switch (ireg
->nat
.type
) {
309 /* simulate getf.sig/setf.sig */
312 /* write NaTVal and be done with it */
319 if (addr
[0] == 0 && addr
[1] == 0x1ffe) {
320 /* return NaT and be done with it */
329 nat_addr
= &dummy_nat
;
333 nat_mask
= (1UL << ((long) addr
& 0x1f8)/8);
337 nat_addr
= ia64_rse_rnat_addr(addr
);
338 if ((unsigned long) addr
< info
->regstk
.limit
339 || (unsigned long) addr
>= info
->regstk
.top
)
341 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
343 __func__
, (void *) addr
,
348 if ((unsigned long) nat_addr
>= info
->regstk
.top
)
349 nat_addr
= &info
->sw
->ar_rnat
;
350 nat_mask
= (1UL << ia64_rse_slot_num(addr
));
354 addr
= &info
->sw
->r4
+ (regnum
- 4);
355 nat_addr
= &info
->sw
->ar_unat
;
356 nat_mask
= (1UL << ((long) addr
& 0x1f8)/8);
359 /* access a scratch register */
360 pt
= get_scratch_regs(info
);
361 addr
= (unsigned long *) ((unsigned long)pt
+ pt_regs_off(regnum
));
362 if (info
->pri_unat_loc
)
363 nat_addr
= info
->pri_unat_loc
;
365 nat_addr
= &info
->sw
->caller_unat
;
366 nat_mask
= (1UL << ((long) addr
& 0x1f8)/8);
369 /* access a stacked register */
370 addr
= ia64_rse_skip_regs((unsigned long *) info
->bsp
, regnum
- 32);
371 nat_addr
= ia64_rse_rnat_addr(addr
);
372 if ((unsigned long) addr
< info
->regstk
.limit
373 || (unsigned long) addr
>= info
->regstk
.top
)
375 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
376 "of rbs\n", __func__
);
379 if ((unsigned long) nat_addr
>= info
->regstk
.top
)
380 nat_addr
= &info
->sw
->ar_rnat
;
381 nat_mask
= (1UL << ia64_rse_slot_num(addr
));
385 if (read_only(addr
)) {
386 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
391 *nat_addr
|= nat_mask
;
393 *nat_addr
&= ~nat_mask
;
396 if ((*nat_addr
& nat_mask
) == 0) {
400 *val
= 0; /* if register is a NaT, *addr may contain kernel data! */
406 EXPORT_SYMBOL(unw_access_gr
);
409 unw_access_br (struct unw_frame_info
*info
, int regnum
, unsigned long *val
, int write
)
416 case 0: pt
= get_scratch_regs(info
); addr
= &pt
->b0
; break;
417 case 6: pt
= get_scratch_regs(info
); addr
= &pt
->b6
; break;
418 case 7: pt
= get_scratch_regs(info
); addr
= &pt
->b7
; break;
421 case 1: case 2: case 3: case 4: case 5:
422 addr
= *(&info
->b1_loc
+ (regnum
- 1));
424 addr
= &info
->sw
->b1
+ (regnum
- 1);
428 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
433 if (read_only(addr
)) {
434 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
442 EXPORT_SYMBOL(unw_access_br
);
445 unw_access_fr (struct unw_frame_info
*info
, int regnum
, struct ia64_fpreg
*val
, int write
)
447 struct ia64_fpreg
*addr
= NULL
;
450 if ((unsigned) (regnum
- 2) >= 126) {
451 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
457 addr
= *(&info
->f2_loc
+ (regnum
- 2));
459 addr
= &info
->sw
->f2
+ (regnum
- 2);
460 } else if (regnum
<= 15) {
462 pt
= get_scratch_regs(info
);
463 addr
= &pt
->f6
+ (regnum
- 6);
466 addr
= &info
->sw
->f12
+ (regnum
- 12);
467 } else if (regnum
<= 31) {
468 addr
= info
->fr_loc
[regnum
- 16];
470 addr
= &info
->sw
->f16
+ (regnum
- 16);
472 struct task_struct
*t
= info
->task
;
478 addr
= t
->thread
.fph
+ (regnum
- 32);
482 if (read_only(addr
)) {
483 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
491 EXPORT_SYMBOL(unw_access_fr
);
494 unw_access_ar (struct unw_frame_info
*info
, int regnum
, unsigned long *val
, int write
)
501 addr
= info
->bsp_loc
;
503 addr
= &info
->sw
->ar_bspstore
;
506 case UNW_AR_BSPSTORE
:
507 addr
= info
->bspstore_loc
;
509 addr
= &info
->sw
->ar_bspstore
;
513 addr
= info
->pfs_loc
;
515 addr
= &info
->sw
->ar_pfs
;
519 addr
= info
->rnat_loc
;
521 addr
= &info
->sw
->ar_rnat
;
525 addr
= info
->unat_loc
;
527 addr
= &info
->sw
->caller_unat
;
533 addr
= &info
->sw
->ar_lc
;
541 (*info
->cfm_loc
& ~(0x3fUL
<< 52)) | ((*val
& 0x3f) << 52);
543 *val
= (*info
->cfm_loc
>> 52) & 0x3f;
547 addr
= info
->fpsr_loc
;
549 addr
= &info
->sw
->ar_fpsr
;
553 pt
= get_scratch_regs(info
);
558 pt
= get_scratch_regs(info
);
563 pt
= get_scratch_regs(info
);
568 pt
= get_scratch_regs(info
);
573 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
579 if (read_only(addr
)) {
580 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
588 EXPORT_SYMBOL(unw_access_ar
);
591 unw_access_pr (struct unw_frame_info
*info
, unsigned long *val
, int write
)
597 addr
= &info
->sw
->pr
;
600 if (read_only(addr
)) {
601 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
609 EXPORT_SYMBOL(unw_access_pr
);
612 /* Routines to manipulate the state stack. */
615 push (struct unw_state_record
*sr
)
617 struct unw_reg_state
*rs
;
619 rs
= alloc_reg_state();
621 printk(KERN_ERR
"unwind: cannot stack reg state!\n");
624 memcpy(rs
, &sr
->curr
, sizeof(*rs
));
629 pop (struct unw_state_record
*sr
)
631 struct unw_reg_state
*rs
= sr
->curr
.next
;
634 printk(KERN_ERR
"unwind: stack underflow!\n");
637 memcpy(&sr
->curr
, rs
, sizeof(*rs
));
641 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
642 static struct unw_reg_state
*
643 dup_state_stack (struct unw_reg_state
*rs
)
645 struct unw_reg_state
*copy
, *prev
= NULL
, *first
= NULL
;
648 copy
= alloc_reg_state();
650 printk(KERN_ERR
"unwind.dup_state_stack: out of memory\n");
653 memcpy(copy
, rs
, sizeof(*copy
));
664 /* Free all stacked register states (but not RS itself). */
666 free_state_stack (struct unw_reg_state
*rs
)
668 struct unw_reg_state
*p
, *next
;
670 for (p
= rs
->next
; p
!= NULL
; p
= next
) {
677 /* Unwind decoder routines */
679 static enum unw_register_index __attribute_const__
680 decode_abreg (unsigned char abreg
, int memory
)
683 case 0x04 ... 0x07: return UNW_REG_R4
+ (abreg
- 0x04);
684 case 0x22 ... 0x25: return UNW_REG_F2
+ (abreg
- 0x22);
685 case 0x30 ... 0x3f: return UNW_REG_F16
+ (abreg
- 0x30);
686 case 0x41 ... 0x45: return UNW_REG_B1
+ (abreg
- 0x41);
687 case 0x60: return UNW_REG_PR
;
688 case 0x61: return UNW_REG_PSP
;
689 case 0x62: return memory
? UNW_REG_PRI_UNAT_MEM
: UNW_REG_PRI_UNAT_GR
;
690 case 0x63: return UNW_REG_RP
;
691 case 0x64: return UNW_REG_BSP
;
692 case 0x65: return UNW_REG_BSPSTORE
;
693 case 0x66: return UNW_REG_RNAT
;
694 case 0x67: return UNW_REG_UNAT
;
695 case 0x68: return UNW_REG_FPSR
;
696 case 0x69: return UNW_REG_PFS
;
697 case 0x6a: return UNW_REG_LC
;
701 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__
, abreg
);
706 set_reg (struct unw_reg_info
*reg
, enum unw_where where
, int when
, unsigned long val
)
710 if (reg
->when
== UNW_WHEN_NEVER
)
715 alloc_spill_area (unsigned long *offp
, unsigned long regsize
,
716 struct unw_reg_info
*lo
, struct unw_reg_info
*hi
)
718 struct unw_reg_info
*reg
;
720 for (reg
= hi
; reg
>= lo
; --reg
) {
721 if (reg
->where
== UNW_WHERE_SPILL_HOME
) {
722 reg
->where
= UNW_WHERE_PSPREL
;
730 spill_next_when (struct unw_reg_info
**regp
, struct unw_reg_info
*lim
, unw_word t
)
732 struct unw_reg_info
*reg
;
734 for (reg
= *regp
; reg
<= lim
; ++reg
) {
735 if (reg
->where
== UNW_WHERE_SPILL_HOME
) {
741 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__
);
745 finish_prologue (struct unw_state_record
*sr
)
747 struct unw_reg_info
*reg
;
752 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
753 * for Using Unwind Descriptors", rule 3):
755 for (i
= 0; i
< (int) ARRAY_SIZE(unw
.save_order
); ++i
) {
756 reg
= sr
->curr
.reg
+ unw
.save_order
[i
];
757 if (reg
->where
== UNW_WHERE_GR_SAVE
) {
758 reg
->where
= UNW_WHERE_GR
;
759 reg
->val
= sr
->gr_save_loc
++;
764 * Next, compute when the fp, general, and branch registers get
765 * saved. This must come before alloc_spill_area() because
766 * we need to know which registers are spilled to their home
770 unsigned char kind
, mask
= 0, *cp
= sr
->imask
;
772 static const unsigned char limit
[3] = {
773 UNW_REG_F31
, UNW_REG_R7
, UNW_REG_B5
775 struct unw_reg_info
*(regs
[3]);
777 regs
[0] = sr
->curr
.reg
+ UNW_REG_F2
;
778 regs
[1] = sr
->curr
.reg
+ UNW_REG_R4
;
779 regs
[2] = sr
->curr
.reg
+ UNW_REG_B1
;
781 for (t
= 0; t
< sr
->region_len
; ++t
) {
784 kind
= (mask
>> 2*(3-(t
& 3))) & 3;
786 spill_next_when(®s
[kind
- 1], sr
->curr
.reg
+ limit
[kind
- 1],
787 sr
->region_start
+ t
);
791 * Next, lay out the memory stack spill area:
793 if (sr
->any_spills
) {
794 off
= sr
->spill_offset
;
795 alloc_spill_area(&off
, 16, sr
->curr
.reg
+ UNW_REG_F2
, sr
->curr
.reg
+ UNW_REG_F31
);
796 alloc_spill_area(&off
, 8, sr
->curr
.reg
+ UNW_REG_B1
, sr
->curr
.reg
+ UNW_REG_B5
);
797 alloc_spill_area(&off
, 8, sr
->curr
.reg
+ UNW_REG_R4
, sr
->curr
.reg
+ UNW_REG_R7
);
802 * Region header descriptors.
806 desc_prologue (int body
, unw_word rlen
, unsigned char mask
, unsigned char grsave
,
807 struct unw_state_record
*sr
)
811 if (!(sr
->in_body
|| sr
->first_region
))
813 sr
->first_region
= 0;
815 /* check if we're done: */
816 if (sr
->when_target
< sr
->region_start
+ sr
->region_len
) {
821 region_start
= sr
->region_start
+ sr
->region_len
;
823 for (i
= 0; i
< sr
->epilogue_count
; ++i
)
825 sr
->epilogue_count
= 0;
826 sr
->epilogue_start
= UNW_WHEN_NEVER
;
828 sr
->region_start
= region_start
;
829 sr
->region_len
= rlen
;
835 for (i
= 0; i
< 4; ++i
) {
837 set_reg(sr
->curr
.reg
+ unw
.save_order
[i
], UNW_WHERE_GR
,
838 sr
->region_start
+ sr
->region_len
- 1, grsave
++);
841 sr
->gr_save_loc
= grsave
;
844 sr
->spill_offset
= 0x10; /* default to psp+16 */
849 * Prologue descriptors.
853 desc_abi (unsigned char abi
, unsigned char context
, struct unw_state_record
*sr
)
855 if (abi
== 3 && context
== 'i') {
856 sr
->flags
|= UNW_FLAG_INTERRUPT_FRAME
;
857 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__
);
860 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
861 __func__
, abi
, context
);
865 desc_br_gr (unsigned char brmask
, unsigned char gr
, struct unw_state_record
*sr
)
869 for (i
= 0; i
< 5; ++i
) {
871 set_reg(sr
->curr
.reg
+ UNW_REG_B1
+ i
, UNW_WHERE_GR
,
872 sr
->region_start
+ sr
->region_len
- 1, gr
++);
878 desc_br_mem (unsigned char brmask
, struct unw_state_record
*sr
)
882 for (i
= 0; i
< 5; ++i
) {
884 set_reg(sr
->curr
.reg
+ UNW_REG_B1
+ i
, UNW_WHERE_SPILL_HOME
,
885 sr
->region_start
+ sr
->region_len
- 1, 0);
893 desc_frgr_mem (unsigned char grmask
, unw_word frmask
, struct unw_state_record
*sr
)
897 for (i
= 0; i
< 4; ++i
) {
898 if ((grmask
& 1) != 0) {
899 set_reg(sr
->curr
.reg
+ UNW_REG_R4
+ i
, UNW_WHERE_SPILL_HOME
,
900 sr
->region_start
+ sr
->region_len
- 1, 0);
905 for (i
= 0; i
< 20; ++i
) {
906 if ((frmask
& 1) != 0) {
907 int base
= (i
< 4) ? UNW_REG_F2
: UNW_REG_F16
- 4;
908 set_reg(sr
->curr
.reg
+ base
+ i
, UNW_WHERE_SPILL_HOME
,
909 sr
->region_start
+ sr
->region_len
- 1, 0);
917 desc_fr_mem (unsigned char frmask
, struct unw_state_record
*sr
)
921 for (i
= 0; i
< 4; ++i
) {
922 if ((frmask
& 1) != 0) {
923 set_reg(sr
->curr
.reg
+ UNW_REG_F2
+ i
, UNW_WHERE_SPILL_HOME
,
924 sr
->region_start
+ sr
->region_len
- 1, 0);
932 desc_gr_gr (unsigned char grmask
, unsigned char gr
, struct unw_state_record
*sr
)
936 for (i
= 0; i
< 4; ++i
) {
937 if ((grmask
& 1) != 0)
938 set_reg(sr
->curr
.reg
+ UNW_REG_R4
+ i
, UNW_WHERE_GR
,
939 sr
->region_start
+ sr
->region_len
- 1, gr
++);
945 desc_gr_mem (unsigned char grmask
, struct unw_state_record
*sr
)
949 for (i
= 0; i
< 4; ++i
) {
950 if ((grmask
& 1) != 0) {
951 set_reg(sr
->curr
.reg
+ UNW_REG_R4
+ i
, UNW_WHERE_SPILL_HOME
,
952 sr
->region_start
+ sr
->region_len
- 1, 0);
960 desc_mem_stack_f (unw_word t
, unw_word size
, struct unw_state_record
*sr
)
962 set_reg(sr
->curr
.reg
+ UNW_REG_PSP
, UNW_WHERE_NONE
,
963 sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1), 16*size
);
967 desc_mem_stack_v (unw_word t
, struct unw_state_record
*sr
)
969 sr
->curr
.reg
[UNW_REG_PSP
].when
= sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1);
973 desc_reg_gr (unsigned char reg
, unsigned char dst
, struct unw_state_record
*sr
)
975 set_reg(sr
->curr
.reg
+ reg
, UNW_WHERE_GR
, sr
->region_start
+ sr
->region_len
- 1, dst
);
979 desc_reg_psprel (unsigned char reg
, unw_word pspoff
, struct unw_state_record
*sr
)
981 set_reg(sr
->curr
.reg
+ reg
, UNW_WHERE_PSPREL
, sr
->region_start
+ sr
->region_len
- 1,
986 desc_reg_sprel (unsigned char reg
, unw_word spoff
, struct unw_state_record
*sr
)
988 set_reg(sr
->curr
.reg
+ reg
, UNW_WHERE_SPREL
, sr
->region_start
+ sr
->region_len
- 1,
993 desc_rp_br (unsigned char dst
, struct unw_state_record
*sr
)
995 sr
->return_link_reg
= dst
;
999 desc_reg_when (unsigned char regnum
, unw_word t
, struct unw_state_record
*sr
)
1001 struct unw_reg_info
*reg
= sr
->curr
.reg
+ regnum
;
1003 if (reg
->where
== UNW_WHERE_NONE
)
1004 reg
->where
= UNW_WHERE_GR_SAVE
;
1005 reg
->when
= sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1);
1009 desc_spill_base (unw_word pspoff
, struct unw_state_record
*sr
)
1011 sr
->spill_offset
= 0x10 - 4*pspoff
;
1014 static inline unsigned char *
1015 desc_spill_mask (unsigned char *imaskp
, struct unw_state_record
*sr
)
1018 return imaskp
+ (2*sr
->region_len
+ 7)/8;
1025 desc_epilogue (unw_word t
, unw_word ecount
, struct unw_state_record
*sr
)
1027 sr
->epilogue_start
= sr
->region_start
+ sr
->region_len
- 1 - t
;
1028 sr
->epilogue_count
= ecount
+ 1;
1032 desc_copy_state (unw_word label
, struct unw_state_record
*sr
)
1034 struct unw_labeled_state
*ls
;
1036 for (ls
= sr
->labeled_states
; ls
; ls
= ls
->next
) {
1037 if (ls
->label
== label
) {
1038 free_state_stack(&sr
->curr
);
1039 memcpy(&sr
->curr
, &ls
->saved_state
, sizeof(sr
->curr
));
1040 sr
->curr
.next
= dup_state_stack(ls
->saved_state
.next
);
1044 printk(KERN_ERR
"unwind: failed to find state labeled 0x%lx\n", label
);
1048 desc_label_state (unw_word label
, struct unw_state_record
*sr
)
1050 struct unw_labeled_state
*ls
;
1052 ls
= alloc_labeled_state();
1054 printk(KERN_ERR
"unwind.desc_label_state(): out of memory\n");
1058 memcpy(&ls
->saved_state
, &sr
->curr
, sizeof(ls
->saved_state
));
1059 ls
->saved_state
.next
= dup_state_stack(sr
->curr
.next
);
1061 /* insert into list of labeled states: */
1062 ls
->next
= sr
->labeled_states
;
1063 sr
->labeled_states
= ls
;
1067 * General descriptors.
1071 desc_is_active (unsigned char qp
, unw_word t
, struct unw_state_record
*sr
)
1073 if (sr
->when_target
<= sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1))
1076 if ((sr
->pr_val
& (1UL << qp
)) == 0)
1078 sr
->pr_mask
|= (1UL << qp
);
1084 desc_restore_p (unsigned char qp
, unw_word t
, unsigned char abreg
, struct unw_state_record
*sr
)
1086 struct unw_reg_info
*r
;
1088 if (!desc_is_active(qp
, t
, sr
))
1091 r
= sr
->curr
.reg
+ decode_abreg(abreg
, 0);
1092 r
->where
= UNW_WHERE_NONE
;
1093 r
->when
= UNW_WHEN_NEVER
;
1098 desc_spill_reg_p (unsigned char qp
, unw_word t
, unsigned char abreg
, unsigned char x
,
1099 unsigned char ytreg
, struct unw_state_record
*sr
)
1101 enum unw_where where
= UNW_WHERE_GR
;
1102 struct unw_reg_info
*r
;
1104 if (!desc_is_active(qp
, t
, sr
))
1108 where
= UNW_WHERE_BR
;
1109 else if (ytreg
& 0x80)
1110 where
= UNW_WHERE_FR
;
1112 r
= sr
->curr
.reg
+ decode_abreg(abreg
, 0);
1114 r
->when
= sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1);
1115 r
->val
= (ytreg
& 0x7f);
1119 desc_spill_psprel_p (unsigned char qp
, unw_word t
, unsigned char abreg
, unw_word pspoff
,
1120 struct unw_state_record
*sr
)
1122 struct unw_reg_info
*r
;
1124 if (!desc_is_active(qp
, t
, sr
))
1127 r
= sr
->curr
.reg
+ decode_abreg(abreg
, 1);
1128 r
->where
= UNW_WHERE_PSPREL
;
1129 r
->when
= sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1);
1130 r
->val
= 0x10 - 4*pspoff
;
1134 desc_spill_sprel_p (unsigned char qp
, unw_word t
, unsigned char abreg
, unw_word spoff
,
1135 struct unw_state_record
*sr
)
1137 struct unw_reg_info
*r
;
1139 if (!desc_is_active(qp
, t
, sr
))
1142 r
= sr
->curr
.reg
+ decode_abreg(abreg
, 1);
1143 r
->where
= UNW_WHERE_SPREL
;
1144 r
->when
= sr
->region_start
+ min_t(int, t
, sr
->region_len
- 1);
1148 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1154 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1155 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1157 * prologue descriptors:
1159 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1160 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1161 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1162 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1163 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1164 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1165 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1166 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1167 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1168 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1169 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1170 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1171 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1172 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1173 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1174 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1175 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1176 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1178 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1179 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1183 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1184 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1185 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1187 * general unwind descriptors:
1189 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1190 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1191 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1192 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1193 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1194 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1195 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1196 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1198 #include "unwind_decoder.c"
1201 /* Unwind scripts. */
1203 static inline unw_hash_index_t
1204 hash (unsigned long ip
)
1206 /* magic number = ((sqrt(5)-1)/2)*2^64 */
1207 static const unsigned long hashmagic
= 0x9e3779b97f4a7c16UL
;
1209 return (ip
>> 4) * hashmagic
>> (64 - UNW_LOG_HASH_SIZE
);
1213 cache_match (struct unw_script
*script
, unsigned long ip
, unsigned long pr
)
1215 read_lock(&script
->lock
);
1216 if (ip
== script
->ip
&& ((pr
^ script
->pr_val
) & script
->pr_mask
) == 0)
1217 /* keep the read lock... */
1219 read_unlock(&script
->lock
);
1223 static inline struct unw_script
*
1224 script_lookup (struct unw_frame_info
*info
)
1226 struct unw_script
*script
= unw
.cache
+ info
->hint
;
1227 unsigned short index
;
1228 unsigned long ip
, pr
;
1230 if (UNW_DEBUG_ON(0))
1231 return NULL
; /* Always regenerate scripts in debug mode */
1233 STAT(++unw
.stat
.cache
.lookups
);
1238 if (cache_match(script
, ip
, pr
)) {
1239 STAT(++unw
.stat
.cache
.hinted_hits
);
1243 index
= unw
.hash
[hash(ip
)];
1244 if (index
>= UNW_CACHE_SIZE
)
1247 script
= unw
.cache
+ index
;
1249 if (cache_match(script
, ip
, pr
)) {
1250 /* update hint; no locking required as single-word writes are atomic */
1251 STAT(++unw
.stat
.cache
.normal_hits
);
1252 unw
.cache
[info
->prev_script
].hint
= script
- unw
.cache
;
1255 if (script
->coll_chain
>= UNW_HASH_SIZE
)
1257 script
= unw
.cache
+ script
->coll_chain
;
1258 STAT(++unw
.stat
.cache
.collision_chain_traversals
);
1263 * On returning, a write lock for the SCRIPT is still being held.
1265 static inline struct unw_script
*
1266 script_new (unsigned long ip
)
1268 struct unw_script
*script
, *prev
, *tmp
;
1269 unw_hash_index_t index
;
1270 unsigned short head
;
1272 STAT(++unw
.stat
.script
.news
);
1275 * Can't (easily) use cmpxchg() here because of ABA problem
1276 * that is intrinsic in cmpxchg()...
1278 head
= unw
.lru_head
;
1279 script
= unw
.cache
+ head
;
1280 unw
.lru_head
= script
->lru_chain
;
1283 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1284 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1285 * alternative would be to disable interrupts whenever we hold a read-lock, but
1288 if (!write_trylock(&script
->lock
))
1291 /* re-insert script at the tail of the LRU chain: */
1292 unw
.cache
[unw
.lru_tail
].lru_chain
= head
;
1293 unw
.lru_tail
= head
;
1295 /* remove the old script from the hash table (if it's there): */
1297 index
= hash(script
->ip
);
1298 tmp
= unw
.cache
+ unw
.hash
[index
];
1301 if (tmp
== script
) {
1303 prev
->coll_chain
= tmp
->coll_chain
;
1305 unw
.hash
[index
] = tmp
->coll_chain
;
1309 if (tmp
->coll_chain
>= UNW_CACHE_SIZE
)
1310 /* old script wasn't in the hash-table */
1312 tmp
= unw
.cache
+ tmp
->coll_chain
;
1316 /* enter new script in the hash table */
1318 script
->coll_chain
= unw
.hash
[index
];
1319 unw
.hash
[index
] = script
- unw
.cache
;
1321 script
->ip
= ip
; /* set new IP while we're holding the locks */
1323 STAT(if (script
->coll_chain
< UNW_CACHE_SIZE
) ++unw
.stat
.script
.collisions
);
1332 script_finalize (struct unw_script
*script
, struct unw_state_record
*sr
)
1334 script
->pr_mask
= sr
->pr_mask
;
1335 script
->pr_val
= sr
->pr_val
;
1337 * We could down-grade our write-lock on script->lock here but
1338 * the rwlock API doesn't offer atomic lock downgrading, so
1339 * we'll just keep the write-lock and release it later when
1340 * we're done using the script.
1345 script_emit (struct unw_script
*script
, struct unw_insn insn
)
1347 if (script
->count
>= UNW_MAX_SCRIPT_LEN
) {
1348 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1349 __func__
, UNW_MAX_SCRIPT_LEN
);
1352 script
->insn
[script
->count
++] = insn
;
1356 emit_nat_info (struct unw_state_record
*sr
, int i
, struct unw_script
*script
)
1358 struct unw_reg_info
*r
= sr
->curr
.reg
+ i
;
1359 enum unw_insn_opcode opc
;
1360 struct unw_insn insn
;
1361 unsigned long val
= 0;
1366 /* register got spilled to a stacked register */
1367 opc
= UNW_INSN_SETNAT_TYPE
;
1368 val
= UNW_NAT_REGSTK
;
1370 /* register got spilled to a scratch register */
1371 opc
= UNW_INSN_SETNAT_MEMSTK
;
1375 opc
= UNW_INSN_SETNAT_TYPE
;
1380 opc
= UNW_INSN_SETNAT_TYPE
;
1384 case UNW_WHERE_PSPREL
:
1385 case UNW_WHERE_SPREL
:
1386 opc
= UNW_INSN_SETNAT_MEMSTK
;
1390 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1391 __func__
, r
->where
);
1395 insn
.dst
= unw
.preg_index
[i
];
1397 script_emit(script
, insn
);
1401 compile_reg (struct unw_state_record
*sr
, int i
, struct unw_script
*script
)
1403 struct unw_reg_info
*r
= sr
->curr
.reg
+ i
;
1404 enum unw_insn_opcode opc
;
1405 unsigned long val
, rval
;
1406 struct unw_insn insn
;
1409 if (r
->where
== UNW_WHERE_NONE
|| r
->when
>= sr
->when_target
)
1412 opc
= UNW_INSN_MOVE
;
1413 val
= rval
= r
->val
;
1414 need_nat_info
= (i
>= UNW_REG_R4
&& i
<= UNW_REG_R7
);
1419 opc
= UNW_INSN_MOVE_STACKED
;
1421 } else if (rval
>= 4 && rval
<= 7) {
1422 if (need_nat_info
) {
1423 opc
= UNW_INSN_MOVE2
;
1426 val
= unw
.preg_index
[UNW_REG_R4
+ (rval
- 4)];
1427 } else if (rval
== 0) {
1428 opc
= UNW_INSN_MOVE_CONST
;
1431 /* register got spilled to a scratch register */
1432 opc
= UNW_INSN_MOVE_SCRATCH
;
1433 val
= pt_regs_off(rval
);
1439 val
= unw
.preg_index
[UNW_REG_F2
+ (rval
- 2)];
1440 else if (rval
>= 16 && rval
<= 31)
1441 val
= unw
.preg_index
[UNW_REG_F16
+ (rval
- 16)];
1443 opc
= UNW_INSN_MOVE_SCRATCH
;
1445 val
= offsetof(struct pt_regs
, f6
) + 16*(rval
- 6);
1447 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1453 if (rval
>= 1 && rval
<= 5)
1454 val
= unw
.preg_index
[UNW_REG_B1
+ (rval
- 1)];
1456 opc
= UNW_INSN_MOVE_SCRATCH
;
1458 val
= offsetof(struct pt_regs
, b0
);
1460 val
= offsetof(struct pt_regs
, b6
);
1462 val
= offsetof(struct pt_regs
, b7
);
1466 case UNW_WHERE_SPREL
:
1467 opc
= UNW_INSN_ADD_SP
;
1470 case UNW_WHERE_PSPREL
:
1471 opc
= UNW_INSN_ADD_PSP
;
1475 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1476 __func__
, i
, r
->where
);
1480 insn
.dst
= unw
.preg_index
[i
];
1482 script_emit(script
, insn
);
1484 emit_nat_info(sr
, i
, script
);
1486 if (i
== UNW_REG_PSP
) {
1488 * info->psp must contain the _value_ of the previous
1489 * sp, not it's save location. We get this by
1490 * dereferencing the value we just stored in
1493 insn
.opc
= UNW_INSN_LOAD
;
1494 insn
.dst
= insn
.val
= unw
.preg_index
[UNW_REG_PSP
];
1495 script_emit(script
, insn
);
1499 static inline const struct unw_table_entry
*
1500 lookup (struct unw_table
*table
, unsigned long rel_ip
)
1502 const struct unw_table_entry
*e
= NULL
;
1503 unsigned long lo
, hi
, mid
;
1505 /* do a binary search for right entry: */
1506 for (lo
= 0, hi
= table
->length
; lo
< hi
; ) {
1507 mid
= (lo
+ hi
) / 2;
1508 e
= &table
->array
[mid
];
1509 if (rel_ip
< e
->start_offset
)
1511 else if (rel_ip
>= e
->end_offset
)
1516 if (rel_ip
< e
->start_offset
|| rel_ip
>= e
->end_offset
)
1522 * Build an unwind script that unwinds from state OLD_STATE to the
1523 * entrypoint of the function that called OLD_STATE.
1525 static inline struct unw_script
*
1526 build_script (struct unw_frame_info
*info
)
1528 const struct unw_table_entry
*e
= NULL
;
1529 struct unw_script
*script
= NULL
;
1530 struct unw_labeled_state
*ls
, *next
;
1531 unsigned long ip
= info
->ip
;
1532 struct unw_state_record sr
;
1533 struct unw_table
*table
, *prev
;
1534 struct unw_reg_info
*r
;
1535 struct unw_insn insn
;
1539 STAT(unsigned long start
, parse_start
;)
1541 STAT(++unw
.stat
.script
.builds
; start
= ia64_get_itc());
1543 /* build state record */
1544 memset(&sr
, 0, sizeof(sr
));
1545 for (r
= sr
.curr
.reg
; r
< sr
.curr
.reg
+ UNW_NUM_REGS
; ++r
)
1546 r
->when
= UNW_WHEN_NEVER
;
1547 sr
.pr_val
= info
->pr
;
1549 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__
, ip
);
1550 script
= script_new(ip
);
1552 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__
);
1553 STAT(unw
.stat
.script
.build_time
+= ia64_get_itc() - start
);
1556 unw
.cache
[info
->prev_script
].hint
= script
- unw
.cache
;
1558 /* search the kernels and the modules' unwind tables for IP: */
1560 STAT(parse_start
= ia64_get_itc());
1563 for (table
= unw
.tables
; table
; table
= table
->next
) {
1564 if (ip
>= table
->start
&& ip
< table
->end
) {
1566 * Leave the kernel unwind table at the very front,
1567 * lest moving it breaks some assumption elsewhere.
1568 * Otherwise, move the matching table to the second
1569 * position in the list so that traversals can benefit
1570 * from commonality in backtrace paths.
1572 if (prev
&& prev
!= unw
.tables
) {
1573 /* unw is safe - we're already spinlocked */
1574 prev
->next
= table
->next
;
1575 table
->next
= unw
.tables
->next
;
1576 unw
.tables
->next
= table
;
1578 e
= lookup(table
, ip
- table
->segment_base
);
1584 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1585 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1586 __func__
, ip
, unw
.cache
[info
->prev_script
].ip
);
1587 sr
.curr
.reg
[UNW_REG_RP
].where
= UNW_WHERE_BR
;
1588 sr
.curr
.reg
[UNW_REG_RP
].when
= -1;
1589 sr
.curr
.reg
[UNW_REG_RP
].val
= 0;
1590 compile_reg(&sr
, UNW_REG_RP
, script
);
1591 script_finalize(script
, &sr
);
1592 STAT(unw
.stat
.script
.parse_time
+= ia64_get_itc() - parse_start
);
1593 STAT(unw
.stat
.script
.build_time
+= ia64_get_itc() - start
);
1597 sr
.when_target
= (3*((ip
& ~0xfUL
) - (table
->segment_base
+ e
->start_offset
))/16
1599 hdr
= *(u64
*) (table
->segment_base
+ e
->info_offset
);
1600 dp
= (u8
*) (table
->segment_base
+ e
->info_offset
+ 8);
1601 desc_end
= dp
+ 8*UNW_LENGTH(hdr
);
1603 while (!sr
.done
&& dp
< desc_end
)
1604 dp
= unw_decode(dp
, sr
.in_body
, &sr
);
1606 if (sr
.when_target
> sr
.epilogue_start
) {
1608 * sp has been restored and all values on the memory stack below
1609 * psp also have been restored.
1611 sr
.curr
.reg
[UNW_REG_PSP
].val
= 0;
1612 sr
.curr
.reg
[UNW_REG_PSP
].where
= UNW_WHERE_NONE
;
1613 sr
.curr
.reg
[UNW_REG_PSP
].when
= UNW_WHEN_NEVER
;
1614 for (r
= sr
.curr
.reg
; r
< sr
.curr
.reg
+ UNW_NUM_REGS
; ++r
)
1615 if ((r
->where
== UNW_WHERE_PSPREL
&& r
->val
<= 0x10)
1616 || r
->where
== UNW_WHERE_SPREL
)
1619 r
->where
= UNW_WHERE_NONE
;
1620 r
->when
= UNW_WHEN_NEVER
;
1624 script
->flags
= sr
.flags
;
1627 * If RP did't get saved, generate entry for the return link
1630 if (sr
.curr
.reg
[UNW_REG_RP
].when
>= sr
.when_target
) {
1631 sr
.curr
.reg
[UNW_REG_RP
].where
= UNW_WHERE_BR
;
1632 sr
.curr
.reg
[UNW_REG_RP
].when
= -1;
1633 sr
.curr
.reg
[UNW_REG_RP
].val
= sr
.return_link_reg
;
1634 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1635 __func__
, ip
, sr
.curr
.reg
[UNW_REG_RP
].where
,
1636 sr
.curr
.reg
[UNW_REG_RP
].val
);
1640 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1641 __func__
, table
->segment_base
+ e
->start_offset
, sr
.when_target
);
1642 for (r
= sr
.curr
.reg
; r
< sr
.curr
.reg
+ UNW_NUM_REGS
; ++r
) {
1643 if (r
->where
!= UNW_WHERE_NONE
|| r
->when
!= UNW_WHEN_NEVER
) {
1644 UNW_DPRINT(1, " %s <- ", unw
.preg_name
[r
- sr
.curr
.reg
]);
1646 case UNW_WHERE_GR
: UNW_DPRINT(1, "r%lu", r
->val
); break;
1647 case UNW_WHERE_FR
: UNW_DPRINT(1, "f%lu", r
->val
); break;
1648 case UNW_WHERE_BR
: UNW_DPRINT(1, "b%lu", r
->val
); break;
1649 case UNW_WHERE_SPREL
: UNW_DPRINT(1, "[sp+0x%lx]", r
->val
); break;
1650 case UNW_WHERE_PSPREL
: UNW_DPRINT(1, "[psp+0x%lx]", r
->val
); break;
1651 case UNW_WHERE_NONE
:
1652 UNW_DPRINT(1, "%s+0x%lx", unw
.preg_name
[r
- sr
.curr
.reg
], r
->val
);
1656 UNW_DPRINT(1, "BADWHERE(%d)", r
->where
);
1659 UNW_DPRINT(1, "\t\t%d\n", r
->when
);
1664 STAT(unw
.stat
.script
.parse_time
+= ia64_get_itc() - parse_start
);
1666 /* translate state record into unwinder instructions: */
1669 * First, set psp if we're dealing with a fixed-size frame;
1670 * subsequent instructions may depend on this value.
1672 if (sr
.when_target
> sr
.curr
.reg
[UNW_REG_PSP
].when
1673 && (sr
.curr
.reg
[UNW_REG_PSP
].where
== UNW_WHERE_NONE
)
1674 && sr
.curr
.reg
[UNW_REG_PSP
].val
!= 0) {
1675 /* new psp is sp plus frame size */
1676 insn
.opc
= UNW_INSN_ADD
;
1677 insn
.dst
= offsetof(struct unw_frame_info
, psp
)/8;
1678 insn
.val
= sr
.curr
.reg
[UNW_REG_PSP
].val
; /* frame size */
1679 script_emit(script
, insn
);
1682 /* determine where the primary UNaT is: */
1683 if (sr
.when_target
< sr
.curr
.reg
[UNW_REG_PRI_UNAT_GR
].when
)
1684 i
= UNW_REG_PRI_UNAT_MEM
;
1685 else if (sr
.when_target
< sr
.curr
.reg
[UNW_REG_PRI_UNAT_MEM
].when
)
1686 i
= UNW_REG_PRI_UNAT_GR
;
1687 else if (sr
.curr
.reg
[UNW_REG_PRI_UNAT_MEM
].when
> sr
.curr
.reg
[UNW_REG_PRI_UNAT_GR
].when
)
1688 i
= UNW_REG_PRI_UNAT_MEM
;
1690 i
= UNW_REG_PRI_UNAT_GR
;
1692 compile_reg(&sr
, i
, script
);
1694 for (i
= UNW_REG_BSP
; i
< UNW_NUM_REGS
; ++i
)
1695 compile_reg(&sr
, i
, script
);
1697 /* free labeled register states & stack: */
1699 STAT(parse_start
= ia64_get_itc());
1700 for (ls
= sr
.labeled_states
; ls
; ls
= next
) {
1702 free_state_stack(&ls
->saved_state
);
1703 free_labeled_state(ls
);
1705 free_state_stack(&sr
.curr
);
1706 STAT(unw
.stat
.script
.parse_time
+= ia64_get_itc() - parse_start
);
1708 script_finalize(script
, &sr
);
1709 STAT(unw
.stat
.script
.build_time
+= ia64_get_itc() - start
);
1714 * Apply the unwinding actions represented by OPS and update SR to
1715 * reflect the state that existed upon entry to the function that this
1716 * unwinder represents.
1719 run_script (struct unw_script
*script
, struct unw_frame_info
*state
)
1721 struct unw_insn
*ip
, *limit
, next_insn
;
1722 unsigned long opc
, dst
, val
, off
;
1723 unsigned long *s
= (unsigned long *) state
;
1724 STAT(unsigned long start
;)
1726 STAT(++unw
.stat
.script
.runs
; start
= ia64_get_itc());
1727 state
->flags
= script
->flags
;
1729 limit
= script
->insn
+ script
->count
;
1732 while (ip
++ < limit
) {
1733 opc
= next_insn
.opc
;
1734 dst
= next_insn
.dst
;
1735 val
= next_insn
.val
;
1744 case UNW_INSN_MOVE2
:
1747 s
[dst
+1] = s
[val
+1];
1757 case UNW_INSN_MOVE_SCRATCH
:
1759 s
[dst
] = (unsigned long) get_scratch_regs(state
) + val
;
1762 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1763 __func__
, dst
, val
);
1767 case UNW_INSN_MOVE_CONST
:
1769 s
[dst
] = (unsigned long) &unw
.r0
;
1772 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1778 case UNW_INSN_MOVE_STACKED
:
1779 s
[dst
] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state
->bsp
,
1783 case UNW_INSN_ADD_PSP
:
1784 s
[dst
] = state
->psp
+ val
;
1787 case UNW_INSN_ADD_SP
:
1788 s
[dst
] = state
->sp
+ val
;
1791 case UNW_INSN_SETNAT_MEMSTK
:
1792 if (!state
->pri_unat_loc
)
1793 state
->pri_unat_loc
= &state
->sw
->caller_unat
;
1794 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1795 s
[dst
+1] = ((unsigned long) state
->pri_unat_loc
- s
[dst
]) | UNW_NAT_MEMSTK
;
1798 case UNW_INSN_SETNAT_TYPE
:
1804 if ((s
[val
] & (local_cpu_data
->unimpl_va_mask
| 0x7)) != 0
1805 || s
[val
] < TASK_SIZE
)
1807 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1812 s
[dst
] = *(unsigned long *) s
[val
];
1816 STAT(unw
.stat
.script
.run_time
+= ia64_get_itc() - start
);
1820 off
= unw
.sw_off
[val
];
1821 s
[val
] = (unsigned long) state
->sw
+ off
;
1822 if (off
>= offsetof(struct switch_stack
, r4
) && off
<= offsetof(struct switch_stack
, r7
))
1824 * We're initializing a general register: init NaT info, too. Note that
1825 * the offset is a multiple of 8 which gives us the 3 bits needed for
1828 s
[val
+1] = (offsetof(struct switch_stack
, ar_unat
) - off
) | UNW_NAT_MEMSTK
;
1833 find_save_locs (struct unw_frame_info
*info
)
1835 int have_write_lock
= 0;
1836 struct unw_script
*scr
;
1837 unsigned long flags
= 0;
1839 if ((info
->ip
& (local_cpu_data
->unimpl_va_mask
| 0xf)) || info
->ip
< TASK_SIZE
) {
1840 /* don't let obviously bad addresses pollute the cache */
1841 /* FIXME: should really be level 0 but it occurs too often. KAO */
1842 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__
, info
->ip
);
1843 info
->rp_loc
= NULL
;
1847 scr
= script_lookup(info
);
1849 spin_lock_irqsave(&unw
.lock
, flags
);
1850 scr
= build_script(info
);
1852 spin_unlock_irqrestore(&unw
.lock
, flags
);
1854 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1855 __func__
, info
->ip
);
1858 have_write_lock
= 1;
1860 info
->hint
= scr
->hint
;
1861 info
->prev_script
= scr
- unw
.cache
;
1863 run_script(scr
, info
);
1865 if (have_write_lock
) {
1866 write_unlock(&scr
->lock
);
1867 spin_unlock_irqrestore(&unw
.lock
, flags
);
1869 read_unlock(&scr
->lock
);
1874 unw_valid(const struct unw_frame_info
*info
, unsigned long* p
)
1876 unsigned long loc
= (unsigned long)p
;
1877 return (loc
>= info
->regstk
.limit
&& loc
< info
->regstk
.top
) ||
1878 (loc
>= info
->memstk
.top
&& loc
< info
->memstk
.limit
);
1882 unw_unwind (struct unw_frame_info
*info
)
1884 unsigned long prev_ip
, prev_sp
, prev_bsp
;
1885 unsigned long ip
, pr
, num_regs
;
1886 STAT(unsigned long start
, flags
;)
1889 STAT(local_irq_save(flags
); ++unw
.stat
.api
.unwinds
; start
= ia64_get_itc());
1893 prev_bsp
= info
->bsp
;
1895 /* validate the return IP pointer */
1896 if (!unw_valid(info
, info
->rp_loc
)) {
1897 /* FIXME: should really be level 0 but it occurs too often. KAO */
1898 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1899 __func__
, info
->ip
);
1900 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1903 /* restore the ip */
1904 ip
= info
->ip
= *info
->rp_loc
;
1905 if (ip
< GATE_ADDR
) {
1906 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__
, ip
);
1907 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1911 /* validate the previous stack frame pointer */
1912 if (!unw_valid(info
, info
->pfs_loc
)) {
1913 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__
);
1914 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1917 /* restore the cfm: */
1918 info
->cfm_loc
= info
->pfs_loc
;
1920 /* restore the bsp: */
1923 if ((info
->flags
& UNW_FLAG_INTERRUPT_FRAME
)) {
1924 info
->pt
= info
->sp
+ 16;
1925 if ((pr
& (1UL << PRED_NON_SYSCALL
)) != 0)
1926 num_regs
= *info
->cfm_loc
& 0x7f; /* size of frame */
1928 (unsigned long *) (info
->pt
+ offsetof(struct pt_regs
, ar_pfs
));
1929 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__
, info
->pt
);
1931 num_regs
= (*info
->cfm_loc
>> 7) & 0x7f; /* size of locals */
1932 info
->bsp
= (unsigned long) ia64_rse_skip_regs((unsigned long *) info
->bsp
, -num_regs
);
1933 if (info
->bsp
< info
->regstk
.limit
|| info
->bsp
> info
->regstk
.top
) {
1934 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1935 __func__
, info
->bsp
, info
->regstk
.limit
, info
->regstk
.top
);
1936 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1940 /* restore the sp: */
1941 info
->sp
= info
->psp
;
1942 if (info
->sp
< info
->memstk
.top
|| info
->sp
> info
->memstk
.limit
) {
1943 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1944 __func__
, info
->sp
, info
->memstk
.top
, info
->memstk
.limit
);
1945 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1949 if (info
->ip
== prev_ip
&& info
->sp
== prev_sp
&& info
->bsp
== prev_bsp
) {
1950 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1952 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1956 /* as we unwind, the saved ar.unat becomes the primary unat: */
1957 info
->pri_unat_loc
= info
->unat_loc
;
1959 /* finally, restore the predicates: */
1960 unw_get_pr(info
, &info
->pr
);
1962 retval
= find_save_locs(info
);
1963 STAT(unw
.stat
.api
.unwind_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
1966 EXPORT_SYMBOL(unw_unwind
);
1969 unw_unwind_to_user (struct unw_frame_info
*info
)
1971 unsigned long ip
, sp
, pr
= info
->pr
;
1974 unw_get_sp(info
, &sp
);
1975 if ((long)((unsigned long)info
->task
+ IA64_STK_OFFSET
- sp
)
1976 < IA64_PT_REGS_SIZE
) {
1977 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1981 if (unw_is_intr_frame(info
) &&
1982 (pr
& (1UL << PRED_USER_STACK
)))
1984 if (unw_get_pr (info
, &pr
) < 0) {
1985 unw_get_rp(info
, &ip
);
1986 UNW_DPRINT(0, "unwind.%s: failed to read "
1987 "predicate register (ip=0x%lx)\n",
1991 } while (unw_unwind(info
) >= 0);
1992 unw_get_ip(info
, &ip
);
1993 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1997 EXPORT_SYMBOL(unw_unwind_to_user
);
2000 init_frame_info (struct unw_frame_info
*info
, struct task_struct
*t
,
2001 struct switch_stack
*sw
, unsigned long stktop
)
2003 unsigned long rbslimit
, rbstop
, stklimit
;
2004 STAT(unsigned long start
, flags
;)
2006 STAT(local_irq_save(flags
); ++unw
.stat
.api
.inits
; start
= ia64_get_itc());
2009 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2010 * don't want to do that because it would be slow as each preserved register would
2011 * have to be processed. Instead, what we do here is zero out the frame info and
2012 * start the unwind process at the function that created the switch_stack frame.
2013 * When a preserved value in switch_stack needs to be accessed, run_script() will
2014 * initialize the appropriate pointer on demand.
2016 memset(info
, 0, sizeof(*info
));
2018 rbslimit
= (unsigned long) t
+ IA64_RBS_OFFSET
;
2019 stklimit
= (unsigned long) t
+ IA64_STK_OFFSET
;
2021 rbstop
= sw
->ar_bspstore
;
2022 if (rbstop
> stklimit
|| rbstop
< rbslimit
)
2025 if (stktop
<= rbstop
)
2027 if (stktop
> stklimit
)
2030 info
->regstk
.limit
= rbslimit
;
2031 info
->regstk
.top
= rbstop
;
2032 info
->memstk
.limit
= stklimit
;
2033 info
->memstk
.top
= stktop
;
2036 info
->sp
= info
->psp
= stktop
;
2038 UNW_DPRINT(3, "unwind.%s:\n"
2040 " rbs = [0x%lx-0x%lx)\n"
2041 " stk = [0x%lx-0x%lx)\n"
2045 __func__
, (unsigned long) t
, rbslimit
, rbstop
, stktop
, stklimit
,
2046 info
->pr
, (unsigned long) info
->sw
, info
->sp
);
2047 STAT(unw
.stat
.api
.init_time
+= ia64_get_itc() - start
; local_irq_restore(flags
));
2051 unw_init_frame_info (struct unw_frame_info
*info
, struct task_struct
*t
, struct switch_stack
*sw
)
2055 init_frame_info(info
, t
, sw
, (unsigned long) (sw
+ 1) - 16);
2056 info
->cfm_loc
= &sw
->ar_pfs
;
2057 sol
= (*info
->cfm_loc
>> 7) & 0x7f;
2058 info
->bsp
= (unsigned long) ia64_rse_skip_regs((unsigned long *) info
->regstk
.top
, -sol
);
2060 UNW_DPRINT(3, "unwind.%s:\n"
2064 __func__
, info
->bsp
, sol
, info
->ip
);
2065 find_save_locs(info
);
2068 EXPORT_SYMBOL(unw_init_frame_info
);
2071 unw_init_from_blocked_task (struct unw_frame_info
*info
, struct task_struct
*t
)
2073 struct switch_stack
*sw
= (struct switch_stack
*) (t
->thread
.ksp
+ 16);
2075 UNW_DPRINT(1, "unwind.%s\n", __func__
);
2076 unw_init_frame_info(info
, t
, sw
);
2078 EXPORT_SYMBOL(unw_init_from_blocked_task
);
2081 init_unwind_table (struct unw_table
*table
, const char *name
, unsigned long segment_base
,
2082 unsigned long gp
, const void *table_start
, const void *table_end
)
2084 const struct unw_table_entry
*start
= table_start
, *end
= table_end
;
2087 table
->segment_base
= segment_base
;
2089 table
->start
= segment_base
+ start
[0].start_offset
;
2090 table
->end
= segment_base
+ end
[-1].end_offset
;
2091 table
->array
= start
;
2092 table
->length
= end
- start
;
2096 unw_add_unwind_table (const char *name
, unsigned long segment_base
, unsigned long gp
,
2097 const void *table_start
, const void *table_end
)
2099 const struct unw_table_entry
*start
= table_start
, *end
= table_end
;
2100 struct unw_table
*table
;
2101 unsigned long flags
;
2103 if (end
- start
<= 0) {
2104 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2109 table
= kmalloc(sizeof(*table
), GFP_USER
);
2113 init_unwind_table(table
, name
, segment_base
, gp
, table_start
, table_end
);
2115 spin_lock_irqsave(&unw
.lock
, flags
);
2117 /* keep kernel unwind table at the front (it's searched most commonly): */
2118 table
->next
= unw
.tables
->next
;
2119 unw
.tables
->next
= table
;
2121 spin_unlock_irqrestore(&unw
.lock
, flags
);
2127 unw_remove_unwind_table (void *handle
)
2129 struct unw_table
*table
, *prev
;
2130 struct unw_script
*tmp
;
2131 unsigned long flags
;
2135 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2141 if (table
== &unw
.kernel_table
) {
2142 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2143 "no-can-do!\n", __func__
);
2147 spin_lock_irqsave(&unw
.lock
, flags
);
2149 /* first, delete the table: */
2151 for (prev
= (struct unw_table
*) &unw
.tables
; prev
; prev
= prev
->next
)
2152 if (prev
->next
== table
)
2155 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2156 __func__
, (void *) table
);
2157 spin_unlock_irqrestore(&unw
.lock
, flags
);
2160 prev
->next
= table
->next
;
2162 spin_unlock_irqrestore(&unw
.lock
, flags
);
2164 /* next, remove hash table entries for this table */
2166 for (index
= 0; index
< UNW_HASH_SIZE
; ++index
) {
2167 tmp
= unw
.cache
+ unw
.hash
[index
];
2168 if (unw
.hash
[index
] >= UNW_CACHE_SIZE
2169 || tmp
->ip
< table
->start
|| tmp
->ip
>= table
->end
)
2172 write_lock(&tmp
->lock
);
2174 if (tmp
->ip
>= table
->start
&& tmp
->ip
< table
->end
) {
2175 unw
.hash
[index
] = tmp
->coll_chain
;
2179 write_unlock(&tmp
->lock
);
2186 create_gate_table (void)
2188 const struct unw_table_entry
*entry
, *start
, *end
;
2189 unsigned long *lp
, segbase
= GATE_ADDR
;
2190 size_t info_size
, size
;
2192 Elf64_Phdr
*punw
= NULL
, *phdr
= (Elf64_Phdr
*) (GATE_ADDR
+ GATE_EHDR
->e_phoff
);
2195 for (i
= 0; i
< GATE_EHDR
->e_phnum
; ++i
, ++phdr
)
2196 if (phdr
->p_type
== PT_IA_64_UNWIND
) {
2202 printk("%s: failed to find gate DSO's unwind table!\n", __func__
);
2206 start
= (const struct unw_table_entry
*) punw
->p_vaddr
;
2207 end
= (struct unw_table_entry
*) ((char *) start
+ punw
->p_memsz
);
2210 unw_add_unwind_table("linux-gate.so", segbase
, 0, start
, end
);
2212 for (entry
= start
; entry
< end
; ++entry
)
2213 size
+= 3*8 + 8 + 8*UNW_LENGTH(*(u64
*) (segbase
+ entry
->info_offset
));
2214 size
+= 8; /* reserve space for "end of table" marker */
2216 unw
.gate_table
= kmalloc(size
, GFP_KERNEL
);
2217 if (!unw
.gate_table
) {
2218 unw
.gate_table_size
= 0;
2219 printk(KERN_ERR
"%s: unable to create unwind data for gate page!\n", __func__
);
2222 unw
.gate_table_size
= size
;
2224 lp
= unw
.gate_table
;
2225 info
= (char *) unw
.gate_table
+ size
;
2227 for (entry
= start
; entry
< end
; ++entry
, lp
+= 3) {
2228 info_size
= 8 + 8*UNW_LENGTH(*(u64
*) (segbase
+ entry
->info_offset
));
2230 memcpy(info
, (char *) segbase
+ entry
->info_offset
, info_size
);
2232 lp
[0] = segbase
+ entry
->start_offset
; /* start */
2233 lp
[1] = segbase
+ entry
->end_offset
; /* end */
2234 lp
[2] = info
- (char *) unw
.gate_table
; /* info */
2236 *lp
= 0; /* end-of-table marker */
2240 __initcall(create_gate_table
);
2246 extern void unw_hash_index_t_is_too_narrow (void);
2249 if (8*sizeof(unw_hash_index_t
) < UNW_LOG_HASH_SIZE
)
2250 unw_hash_index_t_is_too_narrow();
2252 unw
.sw_off
[unw
.preg_index
[UNW_REG_PRI_UNAT_GR
]] = SW(CALLER_UNAT
);
2253 unw
.sw_off
[unw
.preg_index
[UNW_REG_BSPSTORE
]] = SW(AR_BSPSTORE
);
2254 unw
.sw_off
[unw
.preg_index
[UNW_REG_PFS
]] = SW(AR_PFS
);
2255 unw
.sw_off
[unw
.preg_index
[UNW_REG_RP
]] = SW(B0
);
2256 unw
.sw_off
[unw
.preg_index
[UNW_REG_UNAT
]] = SW(CALLER_UNAT
);
2257 unw
.sw_off
[unw
.preg_index
[UNW_REG_PR
]] = SW(PR
);
2258 unw
.sw_off
[unw
.preg_index
[UNW_REG_LC
]] = SW(AR_LC
);
2259 unw
.sw_off
[unw
.preg_index
[UNW_REG_FPSR
]] = SW(AR_FPSR
);
2260 for (i
= UNW_REG_R4
, off
= SW(R4
); i
<= UNW_REG_R7
; ++i
, off
+= 8)
2261 unw
.sw_off
[unw
.preg_index
[i
]] = off
;
2262 for (i
= UNW_REG_B1
, off
= SW(B1
); i
<= UNW_REG_B5
; ++i
, off
+= 8)
2263 unw
.sw_off
[unw
.preg_index
[i
]] = off
;
2264 for (i
= UNW_REG_F2
, off
= SW(F2
); i
<= UNW_REG_F5
; ++i
, off
+= 16)
2265 unw
.sw_off
[unw
.preg_index
[i
]] = off
;
2266 for (i
= UNW_REG_F16
, off
= SW(F16
); i
<= UNW_REG_F31
; ++i
, off
+= 16)
2267 unw
.sw_off
[unw
.preg_index
[i
]] = off
;
2269 for (i
= 0; i
< UNW_CACHE_SIZE
; ++i
) {
2271 unw
.cache
[i
].lru_chain
= (i
- 1);
2272 unw
.cache
[i
].coll_chain
= -1;
2273 rwlock_init(&unw
.cache
[i
].lock
);
2275 unw
.lru_head
= UNW_CACHE_SIZE
- 1;
2278 init_unwind_table(&unw
.kernel_table
, "kernel", KERNEL_START
, (unsigned long) __gp
,
2279 __start_unwind
, __end_unwind
);
2283 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2285 * This system call has been deprecated. The new and improved way to get
2286 * at the kernel's unwind info is via the gate DSO. The address of the
2287 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2289 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2291 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2292 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2293 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2296 * The first portion of the unwind data contains an unwind table and rest contains the
2297 * associated unwind info (in no particular order). The unwind table consists of a table
2298 * of entries of the form:
2300 * u64 start; (64-bit address of start of function)
2301 * u64 end; (64-bit address of start of function)
2302 * u64 info; (BUF-relative offset to unwind info)
2304 * The end of the unwind table is indicated by an entry with a START address of zero.
2306 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2307 * on the format of the unwind info.
2310 * EFAULT BUF points outside your accessible address space.
2313 sys_getunwind (void __user
*buf
, size_t buf_size
)
2315 if (buf
&& buf_size
>= unw
.gate_table_size
)
2316 if (copy_to_user(buf
, unw
.gate_table
, unw
.gate_table_size
) != 0)
2318 return unw
.gate_table_size
;