1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2006
3 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of the GNU C Library.
8 The GNU C Library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 The GNU C Library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with the GNU C Library; if not, write to the Free
20 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 # include <shlib-compat.h>
27 #if !defined _LIBC || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_5)
32 #include <bits/libc-lock.h>
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include <unwind-pe.h>
37 #include <unwind-dw2-fde.h>
39 #ifndef _Unwind_Find_FDE
44 #define NO_BASE_OF_ENCODED_VALUE
45 #include "unwind-pe.h"
46 #include "unwind-dw2-fde.h"
51 /* The unseen_objects list contains objects that have been registered
52 but not yet categorized in any way. The seen_objects list has had
53 it's pc_begin and count fields initialized at minimum, and is sorted
54 by decreasing value of pc_begin. */
55 static struct object
*unseen_objects
;
56 static struct object
*seen_objects
;
60 __libc_lock_define_initialized (static, object_mutex
)
61 #define init_object_mutex_once()
62 #define __gthread_mutex_lock(m) __libc_lock_lock (*(m))
63 #define __gthread_mutex_unlock(m) __libc_lock_unlock (*(m))
65 void __register_frame_info_bases_internal (void *begin
, struct object
*ob
,
66 void *tbase
, void *dbase
);
67 void __register_frame_info_table_bases_internal (void *begin
,
69 void *tbase
, void *dbase
);
70 void *__deregister_frame_info_bases_internal (void *begin
);
74 #ifdef __GTHREAD_MUTEX_INIT
75 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
77 static __gthread_mutex_t object_mutex
;
80 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
82 init_object_mutex (void)
84 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
88 init_object_mutex_once (void)
90 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
91 __gthread_once (&once
, init_object_mutex
);
94 #define init_object_mutex_once()
99 /* Called from crtbegin.o to register the unwind info for an object. */
102 __register_frame_info_bases (void *begin
, struct object
*ob
,
103 void *tbase
, void *dbase
)
105 /* If .eh_frame is empty, don't register at all. */
106 if (*(uword
*) begin
== 0)
109 ob
->pc_begin
= (void *)-1;
112 ob
->u
.single
= begin
;
114 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
115 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
119 init_object_mutex_once ();
120 __gthread_mutex_lock (&object_mutex
);
122 ob
->next
= unseen_objects
;
125 __gthread_mutex_unlock (&object_mutex
);
127 INTDEF(__register_frame_info_bases
)
130 __register_frame_info (void *begin
, struct object
*ob
)
132 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
136 __register_frame (void *begin
)
140 /* If .eh_frame is empty, don't register at all. */
141 if (*(uword
*) begin
== 0)
144 ob
= (struct object
*) malloc (sizeof (struct object
));
145 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
148 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
149 for different translation units. Called from the file generated by
153 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
154 void *tbase
, void *dbase
)
156 ob
->pc_begin
= (void *)-1;
161 ob
->s
.b
.from_array
= 1;
162 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
164 init_object_mutex_once ();
165 __gthread_mutex_lock (&object_mutex
);
167 ob
->next
= unseen_objects
;
170 __gthread_mutex_unlock (&object_mutex
);
172 INTDEF(__register_frame_info_table_bases
)
175 __register_frame_info_table (void *begin
, struct object
*ob
)
177 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
181 __register_frame_table (void *begin
)
183 struct object
*ob
= (struct object
*) malloc (sizeof (struct object
));
184 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
187 /* Called from crtbegin.o to deregister the unwind info for an object. */
188 /* ??? Glibc has for a while now exported __register_frame_info and
189 __deregister_frame_info. If we call __register_frame_info_bases
190 from crtbegin (wherein it is declared weak), and this object does
191 not get pulled from libgcc.a for other reasons, then the
192 invocation of __deregister_frame_info will be resolved from glibc.
193 Since the registration did not happen there, we'll abort.
195 Therefore, declare a new deregistration entry point that does the
196 exact same thing, but will resolve to the same library as
197 implements __register_frame_info_bases. */
200 __deregister_frame_info_bases (void *begin
)
203 struct object
*ob
= 0;
205 /* If .eh_frame is empty, we haven't registered. */
206 if (*(uword
*) begin
== 0)
209 init_object_mutex_once ();
210 __gthread_mutex_lock (&object_mutex
);
212 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
213 if ((*p
)->u
.single
== begin
)
220 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
221 if ((*p
)->s
.b
.sorted
)
223 if ((*p
)->u
.sort
->orig_data
== begin
)
233 if ((*p
)->u
.single
== begin
)
241 __gthread_mutex_unlock (&object_mutex
);
245 __gthread_mutex_unlock (&object_mutex
);
248 INTDEF(__deregister_frame_info_bases
)
251 __deregister_frame_info (void *begin
)
253 return INTUSE(__deregister_frame_info_bases
) (begin
);
257 __deregister_frame (void *begin
)
259 /* If .eh_frame is empty, we haven't registered. */
260 if (*(uword
*) begin
!= 0)
261 free (INTUSE(__deregister_frame_info_bases
) (begin
));
265 /* Like base_of_encoded_value, but take the base from a struct object
266 instead of an _Unwind_Context. */
269 base_from_object (unsigned char encoding
, struct object
*ob
)
271 if (encoding
== DW_EH_PE_omit
)
274 switch (encoding
& 0x70)
276 case DW_EH_PE_absptr
:
278 case DW_EH_PE_aligned
:
281 case DW_EH_PE_textrel
:
282 return (_Unwind_Ptr
) ob
->tbase
;
283 case DW_EH_PE_datarel
:
284 return (_Unwind_Ptr
) ob
->dbase
;
289 /* Return the FDE pointer encoding from the CIE. */
290 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
293 get_cie_encoding (struct dwarf_cie
*cie
)
295 const unsigned char *aug
, *p
;
300 aug
= cie
->augmentation
;
302 return DW_EH_PE_absptr
;
304 p
= aug
+ strlen (aug
) + 1; /* Skip the augmentation string. */
305 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
306 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
307 p
++; /* Skip return address column. */
309 aug
++; /* Skip 'z' */
310 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
313 /* This is what we're looking for. */
316 /* Personality encoding and pointer. */
317 else if (*aug
== 'P')
319 /* ??? Avoid dereferencing indirect pointers, since we're
320 faking the base address. Gotta keep DW_EH_PE_aligned
322 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
325 else if (*aug
== 'L')
327 /* Otherwise end of string, or unknown augmentation. */
329 return DW_EH_PE_absptr
;
335 get_fde_encoding (struct dwarf_fde
*f
)
337 return get_cie_encoding (get_cie (f
));
341 /* Sorting an array of FDEs by address.
342 (Ideally we would have the linker sort the FDEs so we don't have to do
343 it at run time. But the linkers are not yet prepared for this.) */
345 /* Comparison routines. Three variants of increasing complexity. */
348 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
351 _Unwind_Ptr x_ptr
= *(_Unwind_Ptr
*) x
->pc_begin
;
352 _Unwind_Ptr y_ptr
= *(_Unwind_Ptr
*) y
->pc_begin
;
362 fde_single_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
364 _Unwind_Ptr base
, x_ptr
, y_ptr
;
366 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
367 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
368 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
378 fde_mixed_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
380 int x_encoding
, y_encoding
;
381 _Unwind_Ptr x_ptr
, y_ptr
;
383 x_encoding
= get_fde_encoding (x
);
384 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
385 x
->pc_begin
, &x_ptr
);
387 y_encoding
= get_fde_encoding (y
);
388 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
389 y
->pc_begin
, &y_ptr
);
398 typedef int (*fde_compare_t
) (struct object
*, fde
*, fde
*);
401 /* This is a special mix of insertion sort and heap sort, optimized for
402 the data sets that actually occur. They look like
403 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
404 I.e. a linearly increasing sequence (coming from functions in the text
405 section), with additionally a few unordered elements (coming from functions
406 in gnu_linkonce sections) whose values are higher than the values in the
407 surrounding linear sequence (but not necessarily higher than the values
408 at the end of the linear sequence!).
409 The worst-case total run time is O(N) + O(n log (n)), where N is the
410 total number of FDEs and n is the number of erratic ones. */
412 struct fde_accumulator
414 struct fde_vector
*linear
;
415 struct fde_vector
*erratic
;
419 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
425 size
= sizeof (struct fde_vector
) + sizeof (fde
*) * count
;
426 if ((accu
->linear
= (struct fde_vector
*) malloc (size
)))
428 accu
->linear
->count
= 0;
429 if ((accu
->erratic
= (struct fde_vector
*) malloc (size
)))
430 accu
->erratic
->count
= 0;
438 fde_insert (struct fde_accumulator
*accu
, fde
*this_fde
)
441 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
444 /* Split LINEAR into a linear sequence with low values and an erratic
445 sequence with high values, put the linear one (of longest possible
446 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
448 Because the longest linear sequence we are trying to locate within the
449 incoming LINEAR array can be interspersed with (high valued) erratic
450 entries. We construct a chain indicating the sequenced entries.
451 To avoid having to allocate this chain, we overlay it onto the space of
452 the ERRATIC array during construction. A final pass iterates over the
453 chain to determine what should be placed in the ERRATIC array, and
454 what is the linear sequence. This overlay is safe from aliasing. */
457 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
458 struct fde_vector
*linear
, struct fde_vector
*erratic
)
461 size_t count
= linear
->count
;
462 fde
**chain_end
= &marker
;
465 /* This should optimize out, but it is wise to make sure this assumption
466 is correct. Should these have different sizes, we cannot cast between
467 them and the overlaying onto ERRATIC will not work. */
468 if (sizeof (fde
*) != sizeof (fde
**))
471 for (i
= 0; i
< count
; i
++)
475 for (probe
= chain_end
;
476 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
479 chain_end
= (fde
**) erratic
->array
[probe
- linear
->array
];
480 erratic
->array
[probe
- linear
->array
] = NULL
;
482 erratic
->array
[i
] = (fde
*) chain_end
;
483 chain_end
= &linear
->array
[i
];
486 /* Each entry in LINEAR which is part of the linear sequence we have
487 discovered will correspond to a non-NULL entry in the chain we built in
488 the ERRATIC array. */
489 for (i
= j
= k
= 0; i
< count
; i
++)
490 if (erratic
->array
[i
])
491 linear
->array
[j
++] = linear
->array
[i
];
493 erratic
->array
[k
++] = linear
->array
[i
];
498 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
499 use a name that does not conflict. */
502 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
503 struct fde_vector
*erratic
)
505 /* For a description of this algorithm, see:
506 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
508 fde
** a
= erratic
->array
;
509 /* A portion of the array is called a "heap" if for all i>=0:
510 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
511 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
512 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
513 size_t n
= erratic
->count
;
519 /* Invariant: a[m..n-1] is a heap. */
521 for (i
= m
; 2*i
+1 < n
; )
524 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
525 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
527 SWAP (a
[i
], a
[2*i
+2]);
530 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
532 SWAP (a
[i
], a
[2*i
+1]);
541 /* Invariant: a[0..n-1] is a heap. */
544 for (i
= 0; 2*i
+1 < n
; )
547 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
548 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
550 SWAP (a
[i
], a
[2*i
+2]);
553 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
555 SWAP (a
[i
], a
[2*i
+1]);
565 /* Merge V1 and V2, both sorted, and put the result into V1. */
567 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
568 struct fde_vector
*v1
, struct fde_vector
*v2
)
580 fde2
= v2
->array
[i2
];
581 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
583 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
586 v1
->array
[i1
+i2
] = fde2
;
589 v1
->count
+= v2
->count
;
594 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
596 fde_compare_t fde_compare
;
598 if (accu
->linear
->count
!= count
)
601 if (ob
->s
.b
.mixed_encoding
)
602 fde_compare
= fde_mixed_encoding_compare
;
603 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
604 fde_compare
= fde_unencoded_compare
;
606 fde_compare
= fde_single_encoding_compare
;
610 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
611 if (accu
->linear
->count
+ accu
->erratic
->count
!= count
)
613 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
614 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
615 free (accu
->erratic
);
619 /* We've not managed to malloc an erratic array,
620 so heap sort in the linear one. */
621 frame_heapsort (ob
, fde_compare
, accu
->linear
);
626 /* Update encoding, mixed_encoding, and pc_begin for OB for the
627 fde array beginning at THIS_FDE. Return the number of fdes
628 encountered along the way. */
631 classify_object_over_fdes (struct object
*ob
, fde
*this_fde
)
633 struct dwarf_cie
*last_cie
= 0;
635 int encoding
= DW_EH_PE_absptr
;
636 _Unwind_Ptr base
= 0;
638 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
640 struct dwarf_cie
*this_cie
;
641 _Unwind_Ptr mask
, pc_begin
;
644 if (this_fde
->CIE_delta
== 0)
647 /* Determine the encoding for this FDE. Note mixed encoded
648 objects for later. */
649 this_cie
= get_cie (this_fde
);
650 if (this_cie
!= last_cie
)
653 encoding
= get_cie_encoding (this_cie
);
654 base
= base_from_object (encoding
, ob
);
655 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
656 ob
->s
.b
.encoding
= encoding
;
657 else if (ob
->s
.b
.encoding
!= encoding
)
658 ob
->s
.b
.mixed_encoding
= 1;
661 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
664 /* Take care to ignore link-once functions that were removed.
665 In these cases, the function address will be NULL, but if
666 the encoding is smaller than a pointer a true NULL may not
667 be representable. Assume 0 in the representable bits is NULL. */
668 mask
= size_of_encoded_value (encoding
);
669 if (mask
< sizeof (void *))
670 mask
= (1L << (mask
<< 3)) - 1;
674 if ((pc_begin
& mask
) == 0)
678 if ((void *) pc_begin
< ob
->pc_begin
)
679 ob
->pc_begin
= (void *) pc_begin
;
686 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, fde
*this_fde
)
688 struct dwarf_cie
*last_cie
= 0;
689 int encoding
= ob
->s
.b
.encoding
;
690 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
692 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
694 struct dwarf_cie
*this_cie
;
697 if (this_fde
->CIE_delta
== 0)
700 if (ob
->s
.b
.mixed_encoding
)
702 /* Determine the encoding for this FDE. Note mixed encoded
703 objects for later. */
704 this_cie
= get_cie (this_fde
);
705 if (this_cie
!= last_cie
)
708 encoding
= get_cie_encoding (this_cie
);
709 base
= base_from_object (encoding
, ob
);
713 if (encoding
== DW_EH_PE_absptr
)
715 if (*(_Unwind_Ptr
*) this_fde
->pc_begin
== 0)
720 _Unwind_Ptr pc_begin
, mask
;
722 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
725 /* Take care to ignore link-once functions that were removed.
726 In these cases, the function address will be NULL, but if
727 the encoding is smaller than a pointer a true NULL may not
728 be representable. Assume 0 in the representable bits is NULL. */
729 mask
= size_of_encoded_value (encoding
);
730 if (mask
< sizeof (void *))
731 mask
= (1L << (mask
<< 3)) - 1;
735 if ((pc_begin
& mask
) == 0)
739 fde_insert (accu
, this_fde
);
743 /* Set up a sorted array of pointers to FDEs for a loaded object. We
744 count up the entries before allocating the array because it's likely to
745 be faster. We can be called multiple times, should we have failed to
746 allocate a sorted fde array on a previous occasion. */
749 init_object (struct object
* ob
)
751 struct fde_accumulator accu
;
754 count
= ob
->s
.b
.count
;
757 if (ob
->s
.b
.from_array
)
759 fde
**p
= ob
->u
.array
;
760 for (count
= 0; *p
; ++p
)
761 count
+= classify_object_over_fdes (ob
, *p
);
764 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
766 /* The count field we have in the main struct object is somewhat
767 limited, but should suffice for virtually all cases. If the
768 counted value doesn't fit, re-write a zero. The worst that
769 happens is that we re-count next time -- admittedly non-trivial
770 in that this implies some 2M fdes, but at least we function. */
771 ob
->s
.b
.count
= count
;
772 if (ob
->s
.b
.count
!= count
)
776 if (!start_fde_sort (&accu
, count
))
779 if (ob
->s
.b
.from_array
)
782 for (p
= ob
->u
.array
; *p
; ++p
)
783 add_fdes (ob
, &accu
, *p
);
786 add_fdes (ob
, &accu
, ob
->u
.single
);
788 end_fde_sort (ob
, &accu
, count
);
790 /* Save the original fde pointer, since this is the key by which the
791 DSO will deregister the object. */
792 accu
.linear
->orig_data
= ob
->u
.single
;
793 ob
->u
.sort
= accu
.linear
;
798 /* A linear search through a set of FDEs for the given PC. This is
799 used when there was insufficient memory to allocate and sort an
803 linear_search_fdes (struct object
*ob
, fde
*this_fde
, void *pc
)
805 struct dwarf_cie
*last_cie
= 0;
806 int encoding
= ob
->s
.b
.encoding
;
807 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
809 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
811 struct dwarf_cie
*this_cie
;
812 _Unwind_Ptr pc_begin
, pc_range
;
815 if (this_fde
->CIE_delta
== 0)
818 if (ob
->s
.b
.mixed_encoding
)
820 /* Determine the encoding for this FDE. Note mixed encoded
821 objects for later. */
822 this_cie
= get_cie (this_fde
);
823 if (this_cie
!= last_cie
)
826 encoding
= get_cie_encoding (this_cie
);
827 base
= base_from_object (encoding
, ob
);
831 if (encoding
== DW_EH_PE_absptr
)
833 pc_begin
= ((_Unwind_Ptr
*) this_fde
->pc_begin
)[0];
834 pc_range
= ((_Unwind_Ptr
*) this_fde
->pc_begin
)[1];
843 p
= read_encoded_value_with_base (encoding
, base
,
844 this_fde
->pc_begin
, &pc_begin
);
845 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
847 /* Take care to ignore link-once functions that were removed.
848 In these cases, the function address will be NULL, but if
849 the encoding is smaller than a pointer a true NULL may not
850 be representable. Assume 0 in the representable bits is NULL. */
851 mask
= size_of_encoded_value (encoding
);
852 if (mask
< sizeof (void *))
853 mask
= (1L << (mask
<< 3)) - 1;
857 if ((pc_begin
& mask
) == 0)
861 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
868 /* Binary search for an FDE containing the given PC. Here are three
869 implementations of increasing complexity. */
872 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
874 struct fde_vector
*vec
= ob
->u
.sort
;
877 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
879 size_t i
= (lo
+ hi
) / 2;
880 fde
*f
= vec
->array
[i
];
884 pc_begin
= ((void **) f
->pc_begin
)[0];
885 pc_range
= ((uaddr
*) f
->pc_begin
)[1];
889 else if (pc
>= pc_begin
+ pc_range
)
899 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
901 struct fde_vector
*vec
= ob
->u
.sort
;
902 int encoding
= ob
->s
.b
.encoding
;
903 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
906 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
908 size_t i
= (lo
+ hi
) / 2;
909 fde
*f
= vec
->array
[i
];
910 _Unwind_Ptr pc_begin
, pc_range
;
913 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
915 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
917 if ((_Unwind_Ptr
) pc
< pc_begin
)
919 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
929 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
931 struct fde_vector
*vec
= ob
->u
.sort
;
934 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
936 size_t i
= (lo
+ hi
) / 2;
937 fde
*f
= vec
->array
[i
];
938 _Unwind_Ptr pc_begin
, pc_range
;
942 encoding
= get_fde_encoding (f
);
943 p
= read_encoded_value_with_base (encoding
,
944 base_from_object (encoding
, ob
),
945 f
->pc_begin
, &pc_begin
);
946 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
948 if ((_Unwind_Ptr
) pc
< pc_begin
)
950 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
960 search_object (struct object
* ob
, void *pc
)
962 /* If the data hasn't been sorted, try to do this now. We may have
963 more memory available than last time we tried. */
964 if (! ob
->s
.b
.sorted
)
968 /* Despite the above comment, the normal reason to get here is
969 that we've not processed this object before. A quick range
970 check is in order. */
971 if (pc
< ob
->pc_begin
)
977 if (ob
->s
.b
.mixed_encoding
)
978 return binary_search_mixed_encoding_fdes (ob
, pc
);
979 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
980 return binary_search_unencoded_fdes (ob
, pc
);
982 return binary_search_single_encoding_fdes (ob
, pc
);
986 /* Long slow labourious linear search, cos we've no memory. */
987 if (ob
->s
.b
.from_array
)
990 for (p
= ob
->u
.array
; *p
; p
++)
992 fde
*f
= linear_search_fdes (ob
, *p
, pc
);
999 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1004 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1009 init_object_mutex_once ();
1010 __gthread_mutex_lock (&object_mutex
);
1012 /* Linear search through the classified objects, to find the one
1013 containing the pc. Note that pc_begin is sorted descending, and
1014 we expect objects to be non-overlapping. */
1015 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1016 if (pc
>= ob
->pc_begin
)
1018 f
= search_object (ob
, pc
);
1024 /* Classify and search the objects we've not yet processed. */
1025 while ((ob
= unseen_objects
))
1029 unseen_objects
= ob
->next
;
1030 f
= search_object (ob
, pc
);
1032 /* Insert the object into the classified list. */
1033 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1034 if ((*p
)->pc_begin
< ob
->pc_begin
)
1044 __gthread_mutex_unlock (&object_mutex
);
1050 bases
->tbase
= ob
->tbase
;
1051 bases
->dbase
= ob
->dbase
;
1053 encoding
= ob
->s
.b
.encoding
;
1054 if (ob
->s
.b
.mixed_encoding
)
1055 encoding
= get_fde_encoding (f
);
1056 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1057 f
->pc_begin
, (_Unwind_Ptr
*)&bases
->func
);