1 /* libunwind - a platform-independent unwind library
2 Copyright (C) 2001-2005 Hewlett-Packard Co
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5 This file is part of libunwind.
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
15 The above copyright notice and this permission notice shall be
16 included in all copies or substantial portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
30 enum ia64_script_insn_opcode
32 IA64_INSN_INC_PSP
, /* psp += val */
33 IA64_INSN_LOAD_PSP
, /* psp = *psp_loc */
34 IA64_INSN_ADD_PSP
, /* s[dst] = (s.psp + val) */
35 IA64_INSN_ADD_PSP_NAT
, /* like above, but with NaT info */
36 IA64_INSN_ADD_SP
, /* s[dst] = (s.sp + val) */
37 IA64_INSN_ADD_SP_NAT
, /* like above, but with NaT info */
38 IA64_INSN_MOVE
, /* s[dst] = s[val] */
39 IA64_INSN_MOVE_NAT
, /* like above, but with NaT info */
40 IA64_INSN_MOVE_NO_NAT
, /* like above, but clear NaT info */
41 IA64_INSN_MOVE_STACKED
, /* s[dst] = rse_skip(*s.bsp_loc, val) */
42 IA64_INSN_MOVE_STACKED_NAT
, /* like above, but with NaT info */
43 IA64_INSN_MOVE_SCRATCH
, /* s[dst] = scratch reg "val" */
44 IA64_INSN_MOVE_SCRATCH_NAT
, /* like above, but with NaT info */
45 IA64_INSN_MOVE_SCRATCH_NO_NAT
/* like above, but clear NaT info */
49 static __thread
struct ia64_script_cache ia64_per_thread_cache
=
51 #ifdef HAVE_ATOMIC_OPS_H
52 .busy
= AO_TS_INITIALIZER
54 .lock
= PTHREAD_MUTEX_INITIALIZER
59 static inline unw_hash_index_t CONST_ATTR
62 /* based on (sqrt(5)/2-1)*2^64 */
63 # define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL)
65 return (ip
>> 4) * magic
>> (64 - IA64_LOG_UNW_HASH_SIZE
);
69 cache_match (struct ia64_script
*script
, unw_word_t ip
, unw_word_t pr
)
71 if (ip
== script
->ip
&& ((pr
^ script
->pr_val
) & script
->pr_mask
) == 0)
77 flush_script_cache (struct ia64_script_cache
*cache
)
81 cache
->lru_head
= IA64_UNW_CACHE_SIZE
- 1;
84 for (i
= 0; i
< IA64_UNW_CACHE_SIZE
; ++i
)
87 cache
->buckets
[i
].lru_chain
= (i
- 1);
88 cache
->buckets
[i
].coll_chain
= -1;
89 cache
->buckets
[i
].ip
= 0;
91 for (i
= 0; i
<IA64_UNW_HASH_SIZE
; ++i
)
95 static inline struct ia64_script_cache
*
96 get_script_cache (unw_addr_space_t as
, intrmask_t
*saved_maskp
)
98 struct ia64_script_cache
*cache
= &as
->global_cache
;
99 unw_caching_policy_t caching
= as
->caching_policy
;
101 if (caching
== UNW_CACHE_NONE
)
105 if (!spin_trylock_irqsave (&cache
->busy
, *saved_maskp
))
108 # ifdef HAVE___THREAD
109 if (as
->caching_policy
== UNW_CACHE_PER_THREAD
)
110 cache
= &ia64_per_thread_cache
;
112 # ifdef HAVE_ATOMIC_OPS_H
113 if (AO_test_and_set (&cache
->busy
) == AO_TS_SET
)
116 if (likely (caching
== UNW_CACHE_GLOBAL
))
118 Debug (16, "acquiring lock\n");
119 lock_acquire (&cache
->lock
, *saved_maskp
);
124 if (atomic_read (&as
->cache_generation
) != atomic_read (&cache
->generation
))
126 flush_script_cache (cache
);
127 cache
->generation
= as
->cache_generation
;
133 put_script_cache (unw_addr_space_t as
, struct ia64_script_cache
*cache
,
134 intrmask_t
*saved_maskp
)
136 assert (as
->caching_policy
!= UNW_CACHE_NONE
);
138 Debug (16, "unmasking signals/interrupts and releasing lock\n");
140 spin_unlock_irqrestore (&cache
->busy
, *saved_maskp
);
142 # ifdef HAVE_ATOMIC_OPS_H
143 AO_CLEAR (&cache
->busy
);
145 if (likely (as
->caching_policy
== UNW_CACHE_GLOBAL
))
146 lock_release (&cache
->lock
, *saved_maskp
);
151 static struct ia64_script
*
152 script_lookup (struct ia64_script_cache
*cache
, struct cursor
*c
)
154 struct ia64_script
*script
= cache
->buckets
+ c
->hint
;
155 unsigned short index
;
161 if (cache_match (script
, ip
, pr
))
164 index
= cache
->hash
[hash (ip
)];
165 if (index
>= IA64_UNW_CACHE_SIZE
)
168 script
= cache
->buckets
+ index
;
171 if (cache_match (script
, ip
, pr
))
173 /* update hint; no locking needed: single-word writes are atomic */
174 c
->hint
= cache
->buckets
[c
->prev_script
].hint
=
175 (script
- cache
->buckets
);
178 if (script
->coll_chain
>= IA64_UNW_HASH_SIZE
)
180 script
= cache
->buckets
+ script
->coll_chain
;
185 script_init (struct ia64_script
*script
, unw_word_t ip
)
190 script
->abi_marker
= 0;
193 static inline struct ia64_script
*
194 script_new (struct ia64_script_cache
*cache
, unw_word_t ip
)
196 struct ia64_script
*script
, *prev
, *tmp
;
197 unw_hash_index_t index
;
200 head
= cache
->lru_head
;
201 script
= cache
->buckets
+ head
;
202 cache
->lru_head
= script
->lru_chain
;
204 /* re-insert script at the tail of the LRU chain: */
205 cache
->buckets
[cache
->lru_tail
].lru_chain
= head
;
206 cache
->lru_tail
= head
;
208 /* remove the old script from the hash table (if it's there): */
211 index
= hash (script
->ip
);
212 tmp
= cache
->buckets
+ cache
->hash
[index
];
219 prev
->coll_chain
= tmp
->coll_chain
;
221 cache
->hash
[index
] = tmp
->coll_chain
;
226 if (tmp
->coll_chain
>= IA64_UNW_CACHE_SIZE
)
227 /* old script wasn't in the hash-table */
229 tmp
= cache
->buckets
+ tmp
->coll_chain
;
233 /* enter new script in the hash table */
235 script
->coll_chain
= cache
->hash
[index
];
236 cache
->hash
[index
] = script
- cache
->buckets
;
238 script_init (script
, ip
);
243 script_finalize (struct ia64_script
*script
, struct cursor
*c
,
244 struct ia64_state_record
*sr
)
246 script
->pr_mask
= sr
->pr_mask
;
247 script
->pr_val
= sr
->pr_val
;
252 script_emit (struct ia64_script
*script
, struct ia64_script_insn insn
)
254 if (script
->count
>= IA64_MAX_SCRIPT_LEN
)
256 Dprintf ("%s: script exceeds maximum size of %u instructions!\n",
257 __FUNCTION__
, IA64_MAX_SCRIPT_LEN
);
260 script
->insn
[script
->count
++] = insn
;
264 compile_reg (struct ia64_state_record
*sr
, int i
, struct ia64_reg_info
*r
,
265 struct ia64_script
*script
)
267 enum ia64_script_insn_opcode opc
;
268 unsigned long val
, rval
;
269 struct ia64_script_insn insn
;
270 long is_preserved_gr
;
272 if (r
->where
== IA64_WHERE_NONE
|| r
->when
>= sr
->when_target
)
275 opc
= IA64_INSN_MOVE
;
277 is_preserved_gr
= (i
>= IA64_REG_R4
&& i
<= IA64_REG_R7
);
279 if (r
->where
== IA64_WHERE_GR
)
281 /* Handle most common case first... */
284 /* register got spilled to a stacked register */
286 opc
= IA64_INSN_MOVE_STACKED_NAT
;
288 opc
= IA64_INSN_MOVE_STACKED
;
291 else if (rval
>= 4 && rval
<= 7)
293 /* register got spilled to a preserved register */
294 val
= IA64_REG_R4
+ (rval
- 4);
296 opc
= IA64_INSN_MOVE_NAT
;
300 /* register got spilled to a scratch register */
302 opc
= IA64_INSN_MOVE_SCRATCH_NAT
;
304 opc
= IA64_INSN_MOVE_SCRATCH
;
305 val
= UNW_IA64_GR
+ rval
;
313 /* Note: There is no need to handle NaT-bit info here
314 (indepent of is_preserved_gr), because for floating-point
315 NaTs are represented as NaTVal, so the NaT-info never
316 needs to be consulated. */
317 if (rval
>= 2 && rval
<= 5)
318 val
= IA64_REG_F2
+ (rval
- 2);
319 else if (rval
>= 16 && rval
<= 31)
320 val
= IA64_REG_F16
+ (rval
- 16);
323 opc
= IA64_INSN_MOVE_SCRATCH
;
324 val
= UNW_IA64_FR
+ rval
;
329 if (rval
>= 1 && rval
<= 5)
331 val
= IA64_REG_B1
+ (rval
- 1);
333 opc
= IA64_INSN_MOVE_NO_NAT
;
337 opc
= IA64_INSN_MOVE_SCRATCH
;
339 opc
= IA64_INSN_MOVE_SCRATCH_NO_NAT
;
340 val
= UNW_IA64_BR
+ rval
;
344 case IA64_WHERE_SPREL
:
346 opc
= IA64_INSN_ADD_SP_NAT
;
349 opc
= IA64_INSN_ADD_SP
;
350 if (i
>= IA64_REG_F2
&& i
<= IA64_REG_F31
)
351 val
|= IA64_LOC_TYPE_FP
;
355 case IA64_WHERE_PSPREL
:
357 opc
= IA64_INSN_ADD_PSP_NAT
;
360 opc
= IA64_INSN_ADD_PSP
;
361 if (i
>= IA64_REG_F2
&& i
<= IA64_REG_F31
)
362 val
|= IA64_LOC_TYPE_FP
;
367 Dprintf ("%s: register %u has unexpected `where' value of %u\n",
368 __FUNCTION__
, i
, r
->where
);
375 script_emit (script
, insn
);
377 if (i
== IA64_REG_PSP
)
379 /* c->psp must contain the _value_ of the previous sp, not it's
380 save-location. We get this by dereferencing the value we
381 just stored in loc[IA64_REG_PSP]: */
382 insn
.opc
= IA64_INSN_LOAD_PSP
;
383 script_emit (script
, insn
);
387 /* Sort the registers which got saved in decreasing order of WHEN
388 value. This is needed to ensure that the save-locations are
389 updated in the proper order. For example, suppose r4 gets spilled
390 to memory and then r5 gets saved in r4. In this case, we need to
391 update the save location of r5 before the one of r4. */
394 sort_regs (struct ia64_state_record
*sr
, int regorder
[])
396 int r
, i
, j
, max
, max_reg
, max_when
, num_regs
= 0;
398 assert (IA64_REG_BSP
== 3);
400 for (r
= IA64_REG_BSP
; r
< IA64_NUM_PREGS
; ++r
)
402 if (sr
->curr
.reg
[r
].where
== IA64_WHERE_NONE
403 || sr
->curr
.reg
[r
].when
>= sr
->when_target
)
406 regorder
[num_regs
++] = r
;
409 /* Simple insertion-sort. Involves about N^2/2 comparisons and N
410 exchanges. N is often small (say, 2-5) so a fancier sorting
411 algorithm may not be worthwhile. */
413 for (i
= max
= 0; i
< num_regs
- 1; ++i
)
415 max_reg
= regorder
[max
];
416 max_when
= sr
->curr
.reg
[max_reg
].when
;
418 for (j
= i
+ 1; j
< num_regs
; ++j
)
419 if (sr
->curr
.reg
[regorder
[j
]].when
> max_when
)
422 max_reg
= regorder
[j
];
423 max_when
= sr
->curr
.reg
[max_reg
].when
;
427 regorder
[max
] = regorder
[i
];
428 regorder
[i
] = max_reg
;
434 /* Build an unwind script that unwinds from state OLD_STATE to the
435 entrypoint of the function that called OLD_STATE. */
438 build_script (struct cursor
*c
, struct ia64_script
*script
)
440 int num_regs
, i
, ret
, regorder
[IA64_NUM_PREGS
- 3];
441 struct ia64_reg_info
*pri_unat
;
442 struct ia64_state_record sr
;
443 struct ia64_script_insn insn
;
445 ret
= ia64_create_state_record (c
, &sr
);
449 /* First, compile the update for IA64_REG_PSP. This is important
450 because later save-locations may depend on it's correct (updated)
451 value. Fixed-size frames are handled specially and variable-size
452 frames get handled via the normal compile_reg(). */
454 if (sr
.when_target
> sr
.curr
.reg
[IA64_REG_PSP
].when
455 && (sr
.curr
.reg
[IA64_REG_PSP
].where
== IA64_WHERE_NONE
)
456 && sr
.curr
.reg
[IA64_REG_PSP
].val
!= 0)
458 /* new psp is psp plus frame size */
459 insn
.opc
= IA64_INSN_INC_PSP
;
460 insn
.val
= sr
.curr
.reg
[IA64_REG_PSP
].val
; /* frame size */
461 script_emit (script
, insn
);
464 compile_reg (&sr
, IA64_REG_PSP
, sr
.curr
.reg
+ IA64_REG_PSP
, script
);
466 /* Second, compile the update for the primary UNaT, if any: */
468 if (sr
.when_target
>= sr
.curr
.reg
[IA64_REG_PRI_UNAT_GR
].when
469 || sr
.when_target
>= sr
.curr
.reg
[IA64_REG_PRI_UNAT_MEM
].when
)
471 if (sr
.when_target
< sr
.curr
.reg
[IA64_REG_PRI_UNAT_GR
].when
)
472 /* (primary) NaT bits were saved to memory only */
473 pri_unat
= sr
.curr
.reg
+ IA64_REG_PRI_UNAT_MEM
;
474 else if (sr
.when_target
< sr
.curr
.reg
[IA64_REG_PRI_UNAT_MEM
].when
)
475 /* (primary) NaT bits were saved to a register only */
476 pri_unat
= sr
.curr
.reg
+ IA64_REG_PRI_UNAT_GR
;
477 else if (sr
.curr
.reg
[IA64_REG_PRI_UNAT_MEM
].when
>
478 sr
.curr
.reg
[IA64_REG_PRI_UNAT_GR
].when
)
479 /* (primary) NaT bits were last saved to memory */
480 pri_unat
= sr
.curr
.reg
+ IA64_REG_PRI_UNAT_MEM
;
482 /* (primary) NaT bits were last saved to a register */
483 pri_unat
= sr
.curr
.reg
+ IA64_REG_PRI_UNAT_GR
;
485 /* Note: we always store the final primary-UNaT location in UNAT_MEM. */
486 compile_reg (&sr
, IA64_REG_PRI_UNAT_MEM
, pri_unat
, script
);
489 /* Third, compile the other register in decreasing order of WHEN values. */
491 num_regs
= sort_regs (&sr
, regorder
);
492 for (i
= 0; i
< num_regs
; ++i
)
493 compile_reg (&sr
, regorder
[i
], sr
.curr
.reg
+ regorder
[i
], script
);
495 script
->abi_marker
= sr
.abi_marker
;
496 script_finalize (script
, c
, &sr
);
498 ia64_free_state_record (&sr
);
503 set_nat_info (struct cursor
*c
, unsigned long dst
,
504 ia64_loc_t nat_loc
, uint8_t bitnr
)
506 assert (dst
>= IA64_REG_R4
&& dst
<= IA64_REG_R7
);
508 c
->loc
[dst
- IA64_REG_R4
+ IA64_REG_NAT4
] = nat_loc
;
509 c
->nat_bitnr
[dst
- IA64_REG_R4
] = bitnr
;
512 /* Apply the unwinding actions represented by OPS and update SR to
513 reflect the state that existed upon entry to the function that this
514 unwinder represents. */
517 run_script (struct ia64_script
*script
, struct cursor
*c
)
519 struct ia64_script_insn
*ip
, *limit
, next_insn
;
520 ia64_loc_t loc
, nat_loc
;
521 unsigned long opc
, dst
;
528 limit
= script
->insn
+ script
->count
;
530 c
->abi_marker
= script
->abi_marker
;
539 /* This is by far the most common operation: */
540 if (likely (opc
== IA64_INSN_MOVE_STACKED
))
542 if ((ret
= ia64_get_stacked (c
, val
, &loc
, NULL
)) < 0)
548 case IA64_INSN_INC_PSP
:
552 case IA64_INSN_LOAD_PSP
:
553 if ((ret
= ia64_get (c
, c
->loc
[IA64_REG_PSP
], &c
->psp
)) < 0)
557 case IA64_INSN_ADD_PSP
:
558 loc
= IA64_LOC_ADDR (c
->psp
+ val
, (val
& IA64_LOC_TYPE_FP
));
561 case IA64_INSN_ADD_SP
:
562 loc
= IA64_LOC_ADDR (c
->sp
+ val
, (val
& IA64_LOC_TYPE_FP
));
565 case IA64_INSN_MOVE_NO_NAT
:
566 set_nat_info (c
, dst
, IA64_NULL_LOC
, 0);
571 case IA64_INSN_MOVE_SCRATCH_NO_NAT
:
572 set_nat_info (c
, dst
, IA64_NULL_LOC
, 0);
573 case IA64_INSN_MOVE_SCRATCH
:
574 loc
= ia64_scratch_loc (c
, val
, NULL
);
577 case IA64_INSN_ADD_PSP_NAT
:
578 loc
= IA64_LOC_ADDR (c
->psp
+ val
, 0);
579 assert (!IA64_IS_REG_LOC (loc
));
580 set_nat_info (c
, dst
,
581 c
->loc
[IA64_REG_PRI_UNAT_MEM
],
582 ia64_unat_slot_num (IA64_GET_ADDR (loc
)));
585 case IA64_INSN_ADD_SP_NAT
:
586 loc
= IA64_LOC_ADDR (c
->sp
+ val
, 0);
587 assert (!IA64_IS_REG_LOC (loc
));
588 set_nat_info (c
, dst
,
589 c
->loc
[IA64_REG_PRI_UNAT_MEM
],
590 ia64_unat_slot_num (IA64_GET_ADDR (loc
)));
593 case IA64_INSN_MOVE_NAT
:
595 set_nat_info (c
, dst
,
596 c
->loc
[val
- IA64_REG_R4
+ IA64_REG_NAT4
],
597 c
->nat_bitnr
[val
- IA64_REG_R4
]);
600 case IA64_INSN_MOVE_STACKED_NAT
:
601 if ((ret
= ia64_get_stacked (c
, val
, &loc
, &nat_loc
)) < 0)
603 assert (!IA64_IS_REG_LOC (loc
));
604 set_nat_info (c
, dst
, nat_loc
, rse_slot_num (IA64_GET_ADDR (loc
)));
607 case IA64_INSN_MOVE_SCRATCH_NAT
:
608 loc
= ia64_scratch_loc (c
, val
, NULL
);
609 nat_loc
= ia64_scratch_loc (c
, val
+ (UNW_IA64_NAT
- UNW_IA64_GR
),
611 set_nat_info (c
, dst
, nat_loc
, nat_bitnr
);
620 uncached_find_save_locs (struct cursor
*c
)
622 struct ia64_script script
;
625 if ((ret
= ia64_fetch_proc_info (c
, c
->ip
, 1)) < 0)
628 script_init (&script
, c
->ip
);
629 if ((ret
= build_script (c
, &script
)) < 0)
631 if (ret
!= -UNW_ESTOPUNWIND
)
632 Dprintf ("%s: failed to build unwind script for ip %lx\n",
633 __FUNCTION__
, (long) c
->ip
);
636 return run_script (&script
, c
);
640 ia64_find_save_locs (struct cursor
*c
)
642 struct ia64_script_cache
*cache
= NULL
;
643 struct ia64_script
*script
= NULL
;
644 intrmask_t saved_mask
;
647 if (c
->as
->caching_policy
== UNW_CACHE_NONE
)
648 return uncached_find_save_locs (c
);
650 cache
= get_script_cache (c
->as
, &saved_mask
);
653 Debug (1, "contention on script-cache; doing uncached lookup\n");
654 return uncached_find_save_locs (c
);
657 script
= script_lookup (cache
, c
);
658 Debug (8, "ip %lx %s in script cache\n", (long) c
->ip
,
659 script
? "hit" : "missed");
661 if (!script
|| (script
->count
== 0 && !script
->pi
.unwind_info
))
663 if ((ret
= ia64_fetch_proc_info (c
, c
->ip
, 1)) < 0)
669 script
= script_new (cache
, c
->ip
);
672 Dprintf ("%s: failed to create unwind script\n", __FUNCTION__
);
677 cache
->buckets
[c
->prev_script
].hint
= script
- cache
->buckets
;
679 if (script
->count
== 0)
680 ret
= build_script (c
, script
);
682 assert (script
->count
> 0);
684 c
->hint
= script
->hint
;
685 c
->prev_script
= script
- cache
->buckets
;
689 if (ret
!= -UNW_ESTOPUNWIND
)
690 Dprintf ("%s: failed to locate/build unwind script for ip %lx\n",
691 __FUNCTION__
, (long) c
->ip
);
695 ret
= run_script (script
, c
);
698 put_script_cache (c
->as
, cache
, &saved_mask
);
703 ia64_validate_cache (unw_addr_space_t as
, void *arg
)
705 #ifndef UNW_REMOTE_ONLY
706 if (as
== unw_local_addr_space
&& ia64_local_validate_cache (as
, arg
) == 1)
710 #ifndef UNW_LOCAL_ONLY
711 /* local info is up-to-date, check dynamic info. */
712 unwi_dyn_validate_cache (as
, arg
);
717 ia64_cache_proc_info (struct cursor
*c
)
719 struct ia64_script_cache
*cache
;
720 struct ia64_script
*script
;
721 intrmask_t saved_mask
;
724 cache
= get_script_cache (c
->as
, &saved_mask
);
726 return ret
; /* cache is busy */
728 /* Re-check to see if a cache entry has been added in the meantime: */
729 script
= script_lookup (cache
, c
);
733 script
= script_new (cache
, c
->ip
);
736 Dprintf ("%s: failed to create unwind script\n", __FUNCTION__
);
744 put_script_cache (c
->as
, cache
, &saved_mask
);
749 ia64_get_cached_proc_info (struct cursor
*c
)
751 struct ia64_script_cache
*cache
;
752 struct ia64_script
*script
;
753 intrmask_t saved_mask
;
755 cache
= get_script_cache (c
->as
, &saved_mask
);
757 return -UNW_ENOINFO
; /* cache is busy */
759 script
= script_lookup (cache
, c
);
763 put_script_cache (c
->as
, cache
, &saved_mask
);
764 return script
? 0 : -UNW_ENOINFO
;