]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/unwind-dw2-fde.c
reload1: Change return type of predicate function from int to bool
[thirdparty/gcc.git] / libgcc / unwind-dw2-fde.c
CommitLineData
0021b564 1/* Subroutines needed for unwinding stack frames for exception handling. */
83ffe9cd 2/* Copyright (C) 1997-2023 Free Software Foundation, Inc.
0021b564
JM
3 Contributed by Jason Merrill <jason@cygnus.com>.
4
1322177d 5This file is part of GCC.
0021b564 6
1322177d
LB
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
748086b7 9Software Foundation; either version 3, or (at your option) any later
1322177d 10version.
0021b564 11
1322177d
LB
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
0021b564 16
748086b7
JJ
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
0021b564 25
275b60d6 26#ifndef _Unwind_Find_FDE
52a11cbf
RH
27#include "tconfig.h"
28#include "tsystem.h"
4977bab6
ZW
29#include "coretypes.h"
30#include "tm.h"
852b75ed 31#include "libgcc_tm.h"
a80b0574 32#include "dwarf2.h"
e1f9550a 33#include "unwind.h"
bda33a6e 34#define NO_BASE_OF_ENCODED_VALUE
e1f9550a 35#include "unwind-pe.h"
52a11cbf
RH
36#include "unwind-dw2-fde.h"
37#include "gthr.h"
29991048
JJ
38#else
39#if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41#define ATOMIC_FDE_FAST_PATH 1
42#endif
275b60d6 43#endif
52a11cbf 44
d458f806
TN
45typedef __UINTPTR_TYPE__ uintptr_type;
46
6e80a1d1
TN
47#ifdef ATOMIC_FDE_FAST_PATH
48#include "unwind-dw2-btree.h"
49
50static struct btree registered_frames;
94ccaf62 51static bool in_shutdown;
6e80a1d1
TN
52
53static void
386ebf75 54release_registered_frames (void) __attribute__ ((destructor));
6e80a1d1
TN
55static void
56release_registered_frames (void)
57{
58 /* Release the b-tree and all frames. Frame releases that happen later are
59 * silently ignored */
60 btree_destroy (&registered_frames);
94ccaf62 61 in_shutdown = true;
6e80a1d1
TN
62}
63
64static void
d458f806 65get_pc_range (const struct object *ob, uintptr_type *range);
6e80a1d1
TN
66
67#else
386ebf75
TN
68/* Without fast path frame deregistration must always succeed. */
69static const int in_shutdown = 0;
6e80a1d1 70
e1f9550a
RH
71/* The unseen_objects list contains objects that have been registered
72 but not yet categorized in any way. The seen_objects list has had
0ab2a34f 73 its pc_begin and count fields initialized at minimum, and is sorted
e1f9550a
RH
74 by decreasing value of pc_begin. */
75static struct object *unseen_objects;
76static struct object *seen_objects;
6e56633d 77#endif
52a11cbf
RH
78
79#ifdef __GTHREAD_MUTEX_INIT
80static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
33e3e24d 81#define init_object_mutex_once()
52a11cbf 82#else
9c4271f3 83#ifdef __GTHREAD_MUTEX_INIT_FUNCTION
52a11cbf 84static __gthread_mutex_t object_mutex;
52a11cbf 85
41077ce4 86static void
52a11cbf
RH
87init_object_mutex (void)
88{
89 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
90}
91
92static void
93init_object_mutex_once (void)
94{
95 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
96 __gthread_once (&once, init_object_mutex);
97}
9c4271f3
RG
98#else
99/* ??? Several targets include this file with stubbing parts of gthr.h
100 and expect no locking to be done. */
101#define init_object_mutex_once()
102static __gthread_mutex_t object_mutex;
103#endif
52a11cbf
RH
104#endif
105
106/* Called from crtbegin.o to register the unwind info for an object. */
107
108void
a30794da 109__register_frame_info_bases (const void *begin, struct object *ob,
e1f9550a 110 void *tbase, void *dbase)
52a11cbf 111{
55fae1a9 112 /* If .eh_frame is empty, don't register at all. */
5f754896 113 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
55fae1a9
JJ
114 return;
115
e1f9550a
RH
116 ob->pc_begin = (void *)-1;
117 ob->tbase = tbase;
118 ob->dbase = dbase;
119 ob->u.single = begin;
120 ob->s.i = 0;
121 ob->s.b.encoding = DW_EH_PE_omit;
3cfe49da
GK
122#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
123 ob->fde_end = NULL;
124#endif
52a11cbf 125
6e80a1d1 126#ifdef ATOMIC_FDE_FAST_PATH
6e56633d 127 // Register the frame in the b-tree
d458f806 128 uintptr_type range[2];
6e80a1d1
TN
129 get_pc_range (ob, range);
130 btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
131#else
52a11cbf
RH
132 init_object_mutex_once ();
133 __gthread_mutex_lock (&object_mutex);
134
e1f9550a
RH
135 ob->next = unseen_objects;
136 unseen_objects = ob;
52a11cbf
RH
137
138 __gthread_mutex_unlock (&object_mutex);
6e80a1d1 139#endif
52a11cbf
RH
140}
141
e1f9550a 142void
a30794da 143__register_frame_info (const void *begin, struct object *ob)
e1f9550a
RH
144{
145 __register_frame_info_bases (begin, ob, 0, 0);
146}
147
52a11cbf
RH
148void
149__register_frame (void *begin)
150{
55fae1a9
JJ
151 struct object *ob;
152
153 /* If .eh_frame is empty, don't register at all. */
e9d1b155 154 if (*(uword *) begin == 0)
55fae1a9
JJ
155 return;
156
703ad42b 157 ob = malloc (sizeof (struct object));
41077ce4 158 __register_frame_info (begin, ob);
52a11cbf
RH
159}
160
161/* Similar, but BEGIN is actually a pointer to a table of unwind entries
162 for different translation units. Called from the file generated by
163 collect2. */
164
165void
e1f9550a
RH
166__register_frame_info_table_bases (void *begin, struct object *ob,
167 void *tbase, void *dbase)
52a11cbf 168{
e1f9550a
RH
169 ob->pc_begin = (void *)-1;
170 ob->tbase = tbase;
171 ob->dbase = dbase;
172 ob->u.array = begin;
173 ob->s.i = 0;
174 ob->s.b.from_array = 1;
175 ob->s.b.encoding = DW_EH_PE_omit;
52a11cbf 176
6e80a1d1 177#ifdef ATOMIC_FDE_FAST_PATH
6e56633d 178 // Register the frame in the b-tree
d458f806 179 uintptr_type range[2];
6e80a1d1
TN
180 get_pc_range (ob, range);
181 btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
182#else
52a11cbf
RH
183 init_object_mutex_once ();
184 __gthread_mutex_lock (&object_mutex);
185
e1f9550a
RH
186 ob->next = unseen_objects;
187 unseen_objects = ob;
52a11cbf
RH
188
189 __gthread_mutex_unlock (&object_mutex);
6e80a1d1 190#endif
52a11cbf
RH
191}
192
e1f9550a
RH
193void
194__register_frame_info_table (void *begin, struct object *ob)
195{
196 __register_frame_info_table_bases (begin, ob, 0, 0);
197}
198
52a11cbf
RH
199void
200__register_frame_table (void *begin)
201{
703ad42b 202 struct object *ob = malloc (sizeof (struct object));
52a11cbf
RH
203 __register_frame_info_table (begin, ob);
204}
205
206/* Called from crtbegin.o to deregister the unwind info for an object. */
101fa48c
RH
207/* ??? Glibc has for a while now exported __register_frame_info and
208 __deregister_frame_info. If we call __register_frame_info_bases
209 from crtbegin (wherein it is declared weak), and this object does
210 not get pulled from libgcc.a for other reasons, then the
211 invocation of __deregister_frame_info will be resolved from glibc.
79d0dfa3 212 Since the registration did not happen there, we'll die.
101fa48c
RH
213
214 Therefore, declare a new deregistration entry point that does the
41077ce4 215 exact same thing, but will resolve to the same library as
101fa48c 216 implements __register_frame_info_bases. */
52a11cbf
RH
217
218void *
a30794da 219__deregister_frame_info_bases (const void *begin)
52a11cbf 220{
e1f9550a 221 struct object *ob = 0;
52a11cbf 222
55fae1a9 223 /* If .eh_frame is empty, we haven't registered. */
5f754896 224 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
5d393c8e 225 return ob;
55fae1a9 226
6e80a1d1
TN
227#ifdef ATOMIC_FDE_FAST_PATH
228 // Find the corresponding PC range
229 struct object lookupob;
230 lookupob.tbase = 0;
231 lookupob.dbase = 0;
232 lookupob.u.single = begin;
233 lookupob.s.i = 0;
234 lookupob.s.b.encoding = DW_EH_PE_omit;
235#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
236 lookupob.fde_end = NULL;
237#endif
d458f806 238 uintptr_type range[2];
6e80a1d1
TN
239 get_pc_range (&lookupob, range);
240
241 // And remove
242 ob = btree_remove (&registered_frames, range[0]);
30adfb85 243 bool empty_table = (range[1] - range[0]) == 0;
5cf60b6b
TN
244
245 // Deallocate the sort array if any.
246 if (ob && ob->s.b.sorted)
247 {
248 free (ob->u.sort);
249 }
6e80a1d1 250#else
52a11cbf
RH
251 init_object_mutex_once ();
252 __gthread_mutex_lock (&object_mutex);
253
6e80a1d1 254 struct object **p;
e1f9550a
RH
255 for (p = &unseen_objects; *p ; p = &(*p)->next)
256 if ((*p)->u.single == begin)
257 {
258 ob = *p;
259 *p = ob->next;
260 goto out;
261 }
262
263 for (p = &seen_objects; *p ; p = &(*p)->next)
264 if ((*p)->s.b.sorted)
265 {
266 if ((*p)->u.sort->orig_data == begin)
267 {
268 ob = *p;
269 *p = ob->next;
270 free (ob->u.sort);
271 goto out;
272 }
273 }
274 else
275 {
276 if ((*p)->u.single == begin)
277 {
278 ob = *p;
279 *p = ob->next;
280 goto out;
281 }
282 }
52a11cbf 283
e1f9550a
RH
284 out:
285 __gthread_mutex_unlock (&object_mutex);
30adfb85 286 const int empty_table = 0; // The non-atomic path stores all tables.
6e80a1d1
TN
287#endif
288
9be9be82
ST
289 // If we didn't find anything in the lookup data structures then they
290 // were either already destroyed or we tried to remove an empty range.
30adfb85 291 gcc_assert (in_shutdown || (empty_table || ob));
e1f9550a 292 return (void *) ob;
52a11cbf
RH
293}
294
101fa48c 295void *
a30794da 296__deregister_frame_info (const void *begin)
101fa48c
RH
297{
298 return __deregister_frame_info_bases (begin);
299}
101fa48c 300
52a11cbf
RH
301void
302__deregister_frame (void *begin)
303{
55fae1a9 304 /* If .eh_frame is empty, we haven't registered. */
e9d1b155 305 if (*(uword *) begin != 0)
55fae1a9 306 free (__deregister_frame_info (begin));
52a11cbf
RH
307}
308
e1f9550a
RH
309\f
310/* Like base_of_encoded_value, but take the base from a struct object
311 instead of an _Unwind_Context. */
312
313static _Unwind_Ptr
6e80a1d1 314base_from_object (unsigned char encoding, const struct object *ob)
e1f9550a
RH
315{
316 if (encoding == DW_EH_PE_omit)
317 return 0;
318
319 switch (encoding & 0x70)
320 {
321 case DW_EH_PE_absptr:
322 case DW_EH_PE_pcrel:
099c8b17 323 case DW_EH_PE_aligned:
e1f9550a
RH
324 return 0;
325
326 case DW_EH_PE_textrel:
327 return (_Unwind_Ptr) ob->tbase;
328 case DW_EH_PE_datarel:
329 return (_Unwind_Ptr) ob->dbase;
79d0dfa3
NS
330 default:
331 gcc_unreachable ();
e1f9550a 332 }
e1f9550a
RH
333}
334
335/* Return the FDE pointer encoding from the CIE. */
336/* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
337
338static int
a30794da 339get_cie_encoding (const struct dwarf_cie *cie)
e1f9550a
RH
340{
341 const unsigned char *aug, *p;
342 _Unwind_Ptr dummy;
f767122b
AK
343 _uleb128_t utmp;
344 _sleb128_t stmp;
e1f9550a
RH
345
346 aug = cie->augmentation;
8f65940d
JJ
347 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
348 if (__builtin_expect (cie->version >= 4, 0))
349 {
350 if (p[0] != sizeof (void *) || p[1] != 0)
351 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
352 address sizes or segment selectors. */
353 p += 2; /* Skip address size and segment size. */
354 }
355
e1f9550a
RH
356 if (aug[0] != 'z')
357 return DW_EH_PE_absptr;
358
a9985a92
JM
359 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
360 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
0ef54a47
PB
361 if (cie->version == 1) /* Skip return address column. */
362 p++;
363 else
364 p = read_uleb128 (p, &utmp);
e1f9550a
RH
365
366 aug++; /* Skip 'z' */
a9985a92 367 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
e1f9550a
RH
368 while (1)
369 {
370 /* This is what we're looking for. */
371 if (*aug == 'R')
372 return *p;
373 /* Personality encoding and pointer. */
374 else if (*aug == 'P')
099c8b17
RH
375 {
376 /* ??? Avoid dereferencing indirect pointers, since we're
377 faking the base address. Gotta keep DW_EH_PE_aligned
378 intact, however. */
379 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
380 }
e1f9550a
RH
381 /* LSDA encoding. */
382 else if (*aug == 'L')
383 p++;
8fc16d72
ST
384 /* aarch64 b-key pointer authentication. */
385 else if (*aug == 'B')
386 p++;
e1f9550a
RH
387 /* Otherwise end of string, or unknown augmentation. */
388 else
389 return DW_EH_PE_absptr;
390 aug++;
391 }
392}
393
394static inline int
a30794da 395get_fde_encoding (const struct dwarf_fde *f)
e1f9550a
RH
396{
397 return get_cie_encoding (get_cie (f));
398}
399
52a11cbf 400\f
72dd050a
BH
401/* Sorting an array of FDEs by address.
402 (Ideally we would have the linker sort the FDEs so we don't have to do
403 it at run time. But the linkers are not yet prepared for this.) */
404
e1f9550a
RH
405/* Comparison routines. Three variants of increasing complexity. */
406
bde257ff 407static int
e1f9550a 408fde_unencoded_compare (struct object *ob __attribute__((unused)),
a30794da 409 const fde *x, const fde *y)
e1f9550a 410{
ce883f54
BE
411 _Unwind_Ptr x_ptr, y_ptr;
412 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
413 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
2f9ec5e5
HPN
414
415 if (x_ptr > y_ptr)
bde257ff 416 return 1;
2f9ec5e5 417 if (x_ptr < y_ptr)
bde257ff
RH
418 return -1;
419 return 0;
e1f9550a
RH
420}
421
bde257ff 422static int
a30794da 423fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
e1f9550a
RH
424{
425 _Unwind_Ptr base, x_ptr, y_ptr;
426
427 base = base_from_object (ob->s.b.encoding, ob);
428 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
429 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
430
bde257ff
RH
431 if (x_ptr > y_ptr)
432 return 1;
433 if (x_ptr < y_ptr)
434 return -1;
435 return 0;
e1f9550a
RH
436}
437
bde257ff 438static int
a30794da 439fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
e1f9550a
RH
440{
441 int x_encoding, y_encoding;
442 _Unwind_Ptr x_ptr, y_ptr;
443
444 x_encoding = get_fde_encoding (x);
445 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
446 x->pc_begin, &x_ptr);
447
448 y_encoding = get_fde_encoding (y);
449 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
450 y->pc_begin, &y_ptr);
451
bde257ff
RH
452 if (x_ptr > y_ptr)
453 return 1;
454 if (x_ptr < y_ptr)
455 return -1;
456 return 0;
e1f9550a
RH
457}
458
a30794da 459typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
e1f9550a 460
1c118c99
TN
461// The extractor functions compute the pointer values for a block of
462// fdes. The block processing hides the call overhead.
e1f9550a 463
1c118c99
TN
464static void
465fde_unencoded_extract (struct object *ob __attribute__ ((unused)),
466 _Unwind_Ptr *target, const fde **x, int count)
467{
468 for (int index = 0; index < count; ++index)
469 memcpy (target + index, x[index]->pc_begin, sizeof (_Unwind_Ptr));
470}
471
472static void
473fde_single_encoding_extract (struct object *ob, _Unwind_Ptr *target,
474 const fde **x, int count)
475{
476 _Unwind_Ptr base;
477
478 base = base_from_object (ob->s.b.encoding, ob);
479 for (int index = 0; index < count; ++index)
480 read_encoded_value_with_base (ob->s.b.encoding, base, x[index]->pc_begin,
481 target + index);
482}
483
484static void
485fde_mixed_encoding_extract (struct object *ob, _Unwind_Ptr *target,
486 const fde **x, int count)
487{
488 for (int index = 0; index < count; ++index)
489 {
490 int encoding = get_fde_encoding (x[index]);
491 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
492 x[index]->pc_begin, target + index);
493 }
494}
495
496typedef void (*fde_extractor_t) (struct object *, _Unwind_Ptr *, const fde **,
497 int);
498
499// Data is is sorted using radix sort if possible, using an temporary
500// auxiliary data structure of the same size as the input. When running
501// out of memory do in-place heap sort.
72dd050a 502
e1f9550a 503struct fde_accumulator
72dd050a 504{
e1f9550a 505 struct fde_vector *linear;
1c118c99 506 struct fde_vector *aux;
e1f9550a 507};
52a11cbf 508
d593d1d2 509static inline int
e1f9550a 510start_fde_sort (struct fde_accumulator *accu, size_t count)
72dd050a 511{
e1f9550a
RH
512 size_t size;
513 if (! count)
514 return 0;
515
a30794da 516 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
703ad42b 517 if ((accu->linear = malloc (size)))
e1f9550a
RH
518 {
519 accu->linear->count = 0;
1c118c99
TN
520 if ((accu->aux = malloc (size)))
521 accu->aux->count = 0;
e1f9550a
RH
522 return 1;
523 }
524 else
41077ce4 525 return 0;
72dd050a 526}
0021b564 527
72dd050a 528static inline void
a30794da 529fde_insert (struct fde_accumulator *accu, const fde *this_fde)
72dd050a 530{
e1f9550a
RH
531 if (accu->linear)
532 accu->linear->array[accu->linear->count++] = this_fde;
72dd050a
BH
533}
534
a30794da 535#define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
80d83b16
JM
536
537/* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
538 for the first (root) node; push it down to its rightful place. */
539
540static void
a30794da 541frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
80d83b16
JM
542 int lo, int hi)
543{
544 int i, j;
545
546 for (i = lo, j = 2*i+1;
547 j < hi;
548 j = 2*i+1)
549 {
550 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
551 ++j;
552
553 if (fde_compare (ob, a[i], a[j]) < 0)
554 {
555 SWAP (a[i], a[j]);
556 i = j;
557 }
558 else
559 break;
560 }
561}
562
99b13ed3
JW
563/* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
564 use a name that does not conflict. */
e1f9550a
RH
565
566static void
567frame_heapsort (struct object *ob, fde_compare_t fde_compare,
568 struct fde_vector *erratic)
72dd050a
BH
569{
570 /* For a description of this algorithm, see:
571 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
2d76cb1a 572 p. 60-61. */
a30794da 573 const fde ** a = erratic->array;
72dd050a
BH
574 /* A portion of the array is called a "heap" if for all i>=0:
575 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
2d76cb1a 576 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
72dd050a 577 size_t n = erratic->count;
80d83b16
JM
578 int m;
579
580 /* Expand our heap incrementally from the end of the array, heapifying
581 each resulting semi-heap as we go. After each step, a[m] is the top
582 of a heap. */
583 for (m = n/2-1; m >= 0; --m)
584 frame_downheap (ob, fde_compare, a, m, n);
585
586 /* Shrink our heap incrementally from the end of the array, first
587 swapping out the largest element a[0] and then re-heapifying the
588 resulting semi-heap. After each step, a[0..m) is a heap. */
589 for (m = n-1; m >= 1; --m)
72dd050a 590 {
80d83b16
JM
591 SWAP (a[0], a[m]);
592 frame_downheap (ob, fde_compare, a, 0, m);
72dd050a
BH
593 }
594#undef SWAP
595}
596
1c118c99 597// Radix sort data in V1 using V2 as aux memory. Runtime O(n).
e1f9550a 598static inline void
1c118c99
TN
599fde_radixsort (struct object *ob, fde_extractor_t fde_extractor,
600 struct fde_vector *v1, struct fde_vector *v2)
0021b564 601{
1c118c99
TN
602#define FANOUTBITS 8
603#define FANOUT (1 << FANOUTBITS)
604#define BLOCKSIZE 128
605 const unsigned rounds
606 = (__CHAR_BIT__ * sizeof (_Unwind_Ptr) + FANOUTBITS - 1) / FANOUTBITS;
607 const fde **a1 = v1->array, **a2 = v2->array;
608 _Unwind_Ptr ptrs[BLOCKSIZE + 1];
609 unsigned n = v1->count;
610 for (unsigned round = 0; round != rounds; ++round)
0021b564 611 {
1c118c99
TN
612 unsigned counts[FANOUT] = {0};
613 unsigned violations = 0;
614
615 // Count the number of elements per bucket and check if we are already
616 // sorted.
617 _Unwind_Ptr last = 0;
618 for (unsigned i = 0; i < n;)
a01da83b 619 {
1c118c99
TN
620 unsigned chunk = ((n - i) <= BLOCKSIZE) ? (n - i) : BLOCKSIZE;
621 fde_extractor (ob, ptrs + 1, a1 + i, chunk);
622 ptrs[0] = last;
623 for (unsigned j = 0; j < chunk; ++j)
a01da83b 624 {
1c118c99
TN
625 unsigned b = (ptrs[j + 1] >> (round * FANOUTBITS)) & (FANOUT - 1);
626 counts[b]++;
627 // Use summation instead of an if to eliminate branches.
628 violations += ptrs[j + 1] < ptrs[j];
a01da83b 629 }
1c118c99
TN
630 i += chunk;
631 last = ptrs[chunk];
a01da83b 632 }
1c118c99
TN
633
634 // Stop if we are already sorted.
635 if (!violations)
636 {
1c118c99
TN
637 break;
638 }
639
640 // Compute the prefix sum.
641 unsigned sum = 0;
642 for (unsigned i = 0; i != FANOUT; ++i)
643 {
644 unsigned s = sum;
645 sum += counts[i];
646 counts[i] = s;
647 }
648
649 // Place all elements.
650 for (unsigned i = 0; i < n;)
651 {
652 unsigned chunk = ((n - i) <= BLOCKSIZE) ? (n - i) : BLOCKSIZE;
653 fde_extractor (ob, ptrs, a1 + i, chunk);
654 for (unsigned j = 0; j < chunk; ++j)
655 {
656 unsigned b = (ptrs[j] >> (round * FANOUTBITS)) & (FANOUT - 1);
657 a2[counts[b]++] = a1[i + j];
658 }
659 i += chunk;
660 }
661
662 // Swap a1 and a2.
663 const fde **tmp = a1;
664 a1 = a2;
665 a2 = tmp;
0021b564 666 }
1c118c99
TN
667#undef BLOCKSIZE
668#undef FANOUT
669#undef FANOUTBITS
670
38e88d41
TN
671 // The data is in a1 now, move in place if needed.
672 if (a1 != v1->array)
673 memcpy (v1->array, a1, sizeof (const fde *) * n);
0021b564
JM
674}
675
e1f9550a
RH
676static inline void
677end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
72dd050a 678{
79d0dfa3 679 gcc_assert (!accu->linear || accu->linear->count == count);
e1f9550a 680
1c118c99 681 if (accu->aux)
d593d1d2 682 {
1c118c99
TN
683 fde_extractor_t fde_extractor;
684 if (ob->s.b.mixed_encoding)
685 fde_extractor = fde_mixed_encoding_extract;
686 else if (ob->s.b.encoding == DW_EH_PE_absptr)
687 fde_extractor = fde_unencoded_extract;
688 else
689 fde_extractor = fde_single_encoding_extract;
690
691 fde_radixsort (ob, fde_extractor, accu->linear, accu->aux);
692 free (accu->aux);
d593d1d2
NS
693 }
694 else
695 {
1c118c99
TN
696 fde_compare_t fde_compare;
697 if (ob->s.b.mixed_encoding)
698 fde_compare = fde_mixed_encoding_compare;
699 else if (ob->s.b.encoding == DW_EH_PE_absptr)
700 fde_compare = fde_unencoded_compare;
701 else
702 fde_compare = fde_single_encoding_compare;
703
704 /* We've not managed to malloc an aux array,
e1f9550a
RH
705 so heap sort in the linear one. */
706 frame_heapsort (ob, fde_compare, accu->linear);
d593d1d2 707 }
72dd050a
BH
708}
709
6e80a1d1
TN
710/* Inspect the fde array beginning at this_fde. This
711 function can be used either in query mode (RANGE is
712 not null, OB is const), or in update mode (RANGE is
713 null, OB is modified). In query mode the function computes
714 the range of PC values and stores it in RANGE. In
715 update mode it updates encoding, mixed_encoding, and pc_begin
716 for OB. Return the number of fdes encountered along the way. */
e1f9550a 717
52a11cbf 718static size_t
6e80a1d1 719classify_object_over_fdes (struct object *ob, const fde *this_fde,
d458f806 720 uintptr_type *range)
52a11cbf 721{
a30794da 722 const struct dwarf_cie *last_cie = 0;
e1f9550a
RH
723 size_t count = 0;
724 int encoding = DW_EH_PE_absptr;
725 _Unwind_Ptr base = 0;
0021b564 726
3cfe49da 727 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
e1f9550a 728 {
a30794da 729 const struct dwarf_cie *this_cie;
e1f9550a 730 _Unwind_Ptr mask, pc_begin;
52a11cbf 731
e1f9550a
RH
732 /* Skip CIEs. */
733 if (this_fde->CIE_delta == 0)
734 continue;
52a11cbf 735
e1f9550a
RH
736 /* Determine the encoding for this FDE. Note mixed encoded
737 objects for later. */
738 this_cie = get_cie (this_fde);
739 if (this_cie != last_cie)
740 {
741 last_cie = this_cie;
742 encoding = get_cie_encoding (this_cie);
8f65940d
JJ
743 if (encoding == DW_EH_PE_omit)
744 return -1;
e1f9550a 745 base = base_from_object (encoding, ob);
6e80a1d1
TN
746 if (!range)
747 {
748 if (ob->s.b.encoding == DW_EH_PE_omit)
749 ob->s.b.encoding = encoding;
750 else if (ob->s.b.encoding != encoding)
751 ob->s.b.mixed_encoding = 1;
752 }
e1f9550a 753 }
0021b564 754
6e80a1d1
TN
755 const unsigned char *p;
756 p = read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
757 &pc_begin);
0021b564 758
e1f9550a
RH
759 /* Take care to ignore link-once functions that were removed.
760 In these cases, the function address will be NULL, but if
761 the encoding is smaller than a pointer a true NULL may not
762 be representable. Assume 0 in the representable bits is NULL. */
763 mask = size_of_encoded_value (encoding);
764 if (mask < sizeof (void *))
0b1d7060 765 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
e1f9550a
RH
766 else
767 mask = -1;
768
769 if ((pc_begin & mask) == 0)
770 continue;
154bba13 771
e1f9550a 772 count += 1;
6e80a1d1
TN
773 if (range)
774 {
775 _Unwind_Ptr pc_range, pc_end;
776 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
777 pc_end = pc_begin + pc_range;
778 if ((!range[0]) && (!range[1]))
779 {
780 range[0] = pc_begin;
781 range[1] = pc_end;
782 }
783 else
784 {
785 if (pc_begin < range[0])
786 range[0] = pc_begin;
787 if (pc_end > range[1])
788 range[1] = pc_end;
789 }
790 }
791 else
792 {
793 if ((void *) pc_begin < ob->pc_begin)
794 ob->pc_begin = (void *) pc_begin;
795 }
52a11cbf 796 }
154bba13 797
e1f9550a 798 return count;
0021b564
JM
799}
800
e1f9550a 801static void
a30794da 802add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
a3fd4e75 803{
a30794da 804 const struct dwarf_cie *last_cie = 0;
e1f9550a
RH
805 int encoding = ob->s.b.encoding;
806 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
807
3cfe49da 808 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
52a11cbf 809 {
a30794da 810 const struct dwarf_cie *this_cie;
52a11cbf 811
e1f9550a
RH
812 /* Skip CIEs. */
813 if (this_fde->CIE_delta == 0)
814 continue;
815
816 if (ob->s.b.mixed_encoding)
817 {
818 /* Determine the encoding for this FDE. Note mixed encoded
819 objects for later. */
820 this_cie = get_cie (this_fde);
821 if (this_cie != last_cie)
822 {
823 last_cie = this_cie;
824 encoding = get_cie_encoding (this_cie);
825 base = base_from_object (encoding, ob);
826 }
827 }
828
829 if (encoding == DW_EH_PE_absptr)
830 {
ce883f54
BE
831 _Unwind_Ptr ptr;
832 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
833 if (ptr == 0)
e1f9550a
RH
834 continue;
835 }
836 else
837 {
838 _Unwind_Ptr pc_begin, mask;
839
840 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
841 &pc_begin);
842
843 /* Take care to ignore link-once functions that were removed.
844 In these cases, the function address will be NULL, but if
845 the encoding is smaller than a pointer a true NULL may not
846 be representable. Assume 0 in the representable bits is NULL. */
847 mask = size_of_encoded_value (encoding);
848 if (mask < sizeof (void *))
0b1d7060 849 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
e1f9550a
RH
850 else
851 mask = -1;
852
853 if ((pc_begin & mask) == 0)
854 continue;
855 }
856
857 fde_insert (accu, this_fde);
52a11cbf 858 }
a3fd4e75
JL
859}
860
52a11cbf
RH
861/* Set up a sorted array of pointers to FDEs for a loaded object. We
862 count up the entries before allocating the array because it's likely to
863 be faster. We can be called multiple times, should we have failed to
864 allocate a sorted fde array on a previous occasion. */
0021b564 865
e1f9550a
RH
866static inline void
867init_object (struct object* ob)
0021b564 868{
e1f9550a 869 struct fde_accumulator accu;
52a11cbf 870 size_t count;
0021b564 871
e1f9550a
RH
872 count = ob->s.b.count;
873 if (count == 0)
52a11cbf 874 {
e1f9550a
RH
875 if (ob->s.b.from_array)
876 {
877 fde **p = ob->u.array;
878 for (count = 0; *p; ++p)
8f65940d 879 {
6e80a1d1 880 size_t cur_count = classify_object_over_fdes (ob, *p, NULL);
8f65940d
JJ
881 if (cur_count == (size_t) -1)
882 goto unhandled_fdes;
883 count += cur_count;
884 }
e1f9550a
RH
885 }
886 else
8f65940d 887 {
6e80a1d1 888 count = classify_object_over_fdes (ob, ob->u.single, NULL);
8f65940d
JJ
889 if (count == (size_t) -1)
890 {
891 static const fde terminator;
892 unhandled_fdes:
893 ob->s.i = 0;
894 ob->s.b.encoding = DW_EH_PE_omit;
895 ob->u.single = &terminator;
896 return;
897 }
898 }
e1f9550a
RH
899
900 /* The count field we have in the main struct object is somewhat
901 limited, but should suffice for virtually all cases. If the
902 counted value doesn't fit, re-write a zero. The worst that
903 happens is that we re-count next time -- admittedly non-trivial
904 in that this implies some 2M fdes, but at least we function. */
905 ob->s.b.count = count;
906 if (ob->s.b.count != count)
907 ob->s.b.count = 0;
52a11cbf 908 }
154bba13 909
e1f9550a 910 if (!start_fde_sort (&accu, count))
52a11cbf 911 return;
154bba13 912
e1f9550a 913 if (ob->s.b.from_array)
52a11cbf 914 {
e1f9550a
RH
915 fde **p;
916 for (p = ob->u.array; *p; ++p)
41077ce4 917 add_fdes (ob, &accu, *p);
52a11cbf
RH
918 }
919 else
e1f9550a
RH
920 add_fdes (ob, &accu, ob->u.single);
921
922 end_fde_sort (ob, &accu, count);
923
924 /* Save the original fde pointer, since this is the key by which the
925 DSO will deregister the object. */
926 accu.linear->orig_data = ob->u.single;
927 ob->u.sort = accu.linear;
928
6e56633d
TN
929#ifdef ATOMIC_FDE_FAST_PATH
930 // We must update the sorted bit with an atomic operation
931 struct object tmp;
932 tmp.s.b = ob->s.b;
933 tmp.s.b.sorted = 1;
934 __atomic_store (&(ob->s.b), &(tmp.s.b), __ATOMIC_RELEASE);
935#else
e1f9550a 936 ob->s.b.sorted = 1;
6e56633d 937#endif
a3fd4e75
JL
938}
939
6e80a1d1
TN
940#ifdef ATOMIC_FDE_FAST_PATH
941/* Get the PC range for lookup */
942static void
d458f806 943get_pc_range (const struct object *ob, uintptr_type *range)
6e80a1d1
TN
944{
945 // It is safe to cast to non-const object* here as
946 // classify_object_over_fdes does not modify ob in query mode.
d458f806 947 struct object *ncob = (struct object *) (uintptr_type) ob;
6e80a1d1
TN
948 range[0] = range[1] = 0;
949 if (ob->s.b.sorted)
950 {
951 classify_object_over_fdes (ncob, ob->u.sort->orig_data, range);
952 }
953 else if (ob->s.b.from_array)
954 {
955 fde **p = ob->u.array;
956 for (; *p; ++p)
957 classify_object_over_fdes (ncob, *p, range);
958 }
959 else
960 {
961 classify_object_over_fdes (ncob, ob->u.single, range);
962 }
963}
964#endif
965
e1f9550a
RH
966/* A linear search through a set of FDEs for the given PC. This is
967 used when there was insufficient memory to allocate and sort an
968 array. */
969
a30794da
AJ
970static const fde *
971linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
0021b564 972{
a30794da 973 const struct dwarf_cie *last_cie = 0;
e1f9550a
RH
974 int encoding = ob->s.b.encoding;
975 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
976
3cfe49da 977 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
e1f9550a 978 {
a30794da 979 const struct dwarf_cie *this_cie;
e1f9550a
RH
980 _Unwind_Ptr pc_begin, pc_range;
981
982 /* Skip CIEs. */
983 if (this_fde->CIE_delta == 0)
984 continue;
985
986 if (ob->s.b.mixed_encoding)
987 {
988 /* Determine the encoding for this FDE. Note mixed encoded
989 objects for later. */
990 this_cie = get_cie (this_fde);
991 if (this_cie != last_cie)
992 {
993 last_cie = this_cie;
994 encoding = get_cie_encoding (this_cie);
995 base = base_from_object (encoding, ob);
996 }
997 }
998
999 if (encoding == DW_EH_PE_absptr)
1000 {
ce883f54
BE
1001 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
1002 pc_begin = pc_array[0];
1003 pc_range = pc_array[1];
e1f9550a
RH
1004 if (pc_begin == 0)
1005 continue;
1006 }
1007 else
1008 {
1009 _Unwind_Ptr mask;
ca29916b 1010 const unsigned char *p;
e1f9550a
RH
1011
1012 p = read_encoded_value_with_base (encoding, base,
1013 this_fde->pc_begin, &pc_begin);
1014 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1015
1016 /* Take care to ignore link-once functions that were removed.
1017 In these cases, the function address will be NULL, but if
1018 the encoding is smaller than a pointer a true NULL may not
1019 be representable. Assume 0 in the representable bits is NULL. */
1020 mask = size_of_encoded_value (encoding);
1021 if (mask < sizeof (void *))
0b1d7060 1022 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
e1f9550a
RH
1023 else
1024 mask = -1;
1025
1026 if ((pc_begin & mask) == 0)
1027 continue;
1028 }
1029
e9d1b155 1030 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
41077ce4 1031 return this_fde;
e1f9550a
RH
1032 }
1033
1034 return NULL;
1035}
1036
1037/* Binary search for an FDE containing the given PC. Here are three
1038 implementations of increasing complexity. */
1039
a30794da 1040static inline const fde *
e1f9550a
RH
1041binary_search_unencoded_fdes (struct object *ob, void *pc)
1042{
1043 struct fde_vector *vec = ob->u.sort;
52a11cbf 1044 size_t lo, hi;
41077ce4 1045
e1f9550a
RH
1046 for (lo = 0, hi = vec->count; lo < hi; )
1047 {
1048 size_t i = (lo + hi) / 2;
5f754896 1049 const fde *const f = vec->array[i];
ce883f54
BE
1050 void *pc_begin;
1051 uaddr pc_range;
1052 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
1053 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
e1f9550a
RH
1054
1055 if (pc < pc_begin)
1056 hi = i;
1057 else if (pc >= pc_begin + pc_range)
1058 lo = i + 1;
1059 else
1060 return f;
1061 }
0021b564 1062
e1f9550a
RH
1063 return NULL;
1064}
154bba13 1065
a30794da 1066static inline const fde *
e1f9550a
RH
1067binary_search_single_encoding_fdes (struct object *ob, void *pc)
1068{
1069 struct fde_vector *vec = ob->u.sort;
1070 int encoding = ob->s.b.encoding;
1071 _Unwind_Ptr base = base_from_object (encoding, ob);
1072 size_t lo, hi;
41077ce4 1073
e1f9550a 1074 for (lo = 0, hi = vec->count; lo < hi; )
0021b564 1075 {
e1f9550a 1076 size_t i = (lo + hi) / 2;
a30794da 1077 const fde *f = vec->array[i];
e1f9550a 1078 _Unwind_Ptr pc_begin, pc_range;
ca29916b 1079 const unsigned char *p;
e1f9550a
RH
1080
1081 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
1082 &pc_begin);
1083 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1084
e9d1b155 1085 if ((_Unwind_Ptr) pc < pc_begin)
e1f9550a 1086 hi = i;
e9d1b155 1087 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
e1f9550a
RH
1088 lo = i + 1;
1089 else
1090 return f;
52a11cbf 1091 }
0021b564 1092
e1f9550a
RH
1093 return NULL;
1094}
1095
a30794da 1096static inline const fde *
e1f9550a
RH
1097binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
1098{
1099 struct fde_vector *vec = ob->u.sort;
1100 size_t lo, hi;
41077ce4 1101
e1f9550a 1102 for (lo = 0, hi = vec->count; lo < hi; )
52a11cbf 1103 {
e1f9550a 1104 size_t i = (lo + hi) / 2;
a30794da 1105 const fde *f = vec->array[i];
e1f9550a 1106 _Unwind_Ptr pc_begin, pc_range;
ca29916b 1107 const unsigned char *p;
e1f9550a
RH
1108 int encoding;
1109
1110 encoding = get_fde_encoding (f);
1111 p = read_encoded_value_with_base (encoding,
1112 base_from_object (encoding, ob),
1113 f->pc_begin, &pc_begin);
1114 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1115
e9d1b155 1116 if ((_Unwind_Ptr) pc < pc_begin)
e1f9550a 1117 hi = i;
e9d1b155 1118 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
e1f9550a
RH
1119 lo = i + 1;
1120 else
1121 return f;
52a11cbf 1122 }
0021b564 1123
e1f9550a
RH
1124 return NULL;
1125}
52a11cbf 1126
a30794da 1127static const fde *
e1f9550a
RH
1128search_object (struct object* ob, void *pc)
1129{
6e80a1d1
TN
1130 /* The fast path initializes objects eagerly to avoid locking.
1131 * On the slow path we initialize them now */
1132#ifndef ATOMIC_FDE_FAST_PATH
e1f9550a
RH
1133 /* If the data hasn't been sorted, try to do this now. We may have
1134 more memory available than last time we tried. */
1135 if (! ob->s.b.sorted)
52a11cbf 1136 {
e1f9550a 1137 init_object (ob);
52a11cbf 1138
e1f9550a
RH
1139 /* Despite the above comment, the normal reason to get here is
1140 that we've not processed this object before. A quick range
1141 check is in order. */
1142 if (pc < ob->pc_begin)
1143 return NULL;
1144 }
6e80a1d1 1145#endif
e1f9550a
RH
1146
1147 if (ob->s.b.sorted)
1148 {
1149 if (ob->s.b.mixed_encoding)
1150 return binary_search_mixed_encoding_fdes (ob, pc);
1151 else if (ob->s.b.encoding == DW_EH_PE_absptr)
1152 return binary_search_unencoded_fdes (ob, pc);
1153 else
1154 return binary_search_single_encoding_fdes (ob, pc);
0021b564 1155 }
52a11cbf
RH
1156 else
1157 {
fa10beec 1158 /* Long slow laborious linear search, cos we've no memory. */
e1f9550a 1159 if (ob->s.b.from_array)
41077ce4
KH
1160 {
1161 fde **p;
e1f9550a
RH
1162 for (p = ob->u.array; *p ; p++)
1163 {
a30794da 1164 const fde *f = linear_search_fdes (ob, *p, pc);
41077ce4 1165 if (f)
e1f9550a 1166 return f;
41077ce4 1167 }
e1f9550a
RH
1168 return NULL;
1169 }
52a11cbf 1170 else
e1f9550a
RH
1171 return linear_search_fdes (ob, ob->u.single, pc);
1172 }
1173}
1174
6e56633d
TN
1175#ifdef ATOMIC_FDE_FAST_PATH
1176
1177// Check if the object was already initialized
1178static inline bool
1179is_object_initialized (struct object *ob)
1180{
1181 // We have to use acquire atomics for the read, which
1182 // is a bit involved as we read from a bitfield
1183 struct object tmp;
1184 __atomic_load (&(ob->s.b), &(tmp.s.b), __ATOMIC_ACQUIRE);
1185 return tmp.s.b.sorted;
1186}
1187
1188#endif
1189
a30794da 1190const fde *
e1f9550a
RH
1191_Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1192{
1193 struct object *ob;
a30794da 1194 const fde *f = NULL;
e1f9550a 1195
29991048 1196#ifdef ATOMIC_FDE_FAST_PATH
d458f806 1197 ob = btree_lookup (&registered_frames, (uintptr_type) pc);
6e80a1d1 1198 if (!ob)
29991048 1199 return NULL;
6e80a1d1 1200
6e56633d
TN
1201 // Initialize the object lazily
1202 if (!is_object_initialized (ob))
1203 {
1204 // Check again under mutex
1205 init_object_mutex_once ();
1206 __gthread_mutex_lock (&object_mutex);
1207
1208 if (!ob->s.b.sorted)
1209 {
1210 init_object (ob);
1211 }
1212
1213 __gthread_mutex_unlock (&object_mutex);
1214 }
1215
6e80a1d1
TN
1216 f = search_object (ob, pc);
1217#else
29991048 1218
e1f9550a
RH
1219 init_object_mutex_once ();
1220 __gthread_mutex_lock (&object_mutex);
1221
1222 /* Linear search through the classified objects, to find the one
e3aafbad 1223 containing the pc. Note that pc_begin is sorted descending, and
e1f9550a
RH
1224 we expect objects to be non-overlapping. */
1225 for (ob = seen_objects; ob; ob = ob->next)
1226 if (pc >= ob->pc_begin)
1227 {
1228 f = search_object (ob, pc);
1229 if (f)
1230 goto fini;
1231 break;
1232 }
1233
1234 /* Classify and search the objects we've not yet processed. */
1235 while ((ob = unseen_objects))
1236 {
1237 struct object **p;
1238
1239 unseen_objects = ob->next;
1240 f = search_object (ob, pc);
1241
1242 /* Insert the object into the classified list. */
1243 for (p = &seen_objects; *p ; p = &(*p)->next)
1244 if ((*p)->pc_begin < ob->pc_begin)
1245 break;
1246 ob->next = *p;
1247 *p = ob;
1248
1249 if (f)
1250 goto fini;
1251 }
1252
1253 fini:
1254 __gthread_mutex_unlock (&object_mutex);
6e80a1d1 1255#endif
e1f9550a
RH
1256
1257 if (f)
1258 {
1259 int encoding;
950ccbc4 1260 _Unwind_Ptr func;
e1f9550a
RH
1261
1262 bases->tbase = ob->tbase;
1263 bases->dbase = ob->dbase;
154bba13 1264
e1f9550a
RH
1265 encoding = ob->s.b.encoding;
1266 if (ob->s.b.mixed_encoding)
1267 encoding = get_fde_encoding (f);
1268 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
950ccbc4
NS
1269 f->pc_begin, &func);
1270 bases->func = (void *) func;
52a11cbf 1271 }
0021b564 1272
e1f9550a 1273 return f;
a3fd4e75 1274}