Fix multilib build of libgcc on Linux/sparc.
[gcc.git] / libgcc / unwind-dw2-fde.c
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
21
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
26
27 #ifndef _Unwind_Find_FDE
28 #include "tconfig.h"
29 #include "tsystem.h"
30 #include "coretypes.h"
31 #include "tm.h"
32 #include "libgcc_tm.h"
33 #include "dwarf2.h"
34 #include "unwind.h"
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include "unwind-pe.h"
37 #include "unwind-dw2-fde.h"
38 #include "gthr.h"
39 #endif
40
41 /* The unseen_objects list contains objects that have been registered
42 but not yet categorized in any way. The seen_objects list has had
43 its pc_begin and count fields initialized at minimum, and is sorted
44 by decreasing value of pc_begin. */
45 static struct object *unseen_objects;
46 static struct object *seen_objects;
47
48 #ifdef __GTHREAD_MUTEX_INIT
49 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
50 #else
51 static __gthread_mutex_t object_mutex;
52 #endif
53
54 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
55 static void
56 init_object_mutex (void)
57 {
58 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
59 }
60
61 static void
62 init_object_mutex_once (void)
63 {
64 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
65 __gthread_once (&once, init_object_mutex);
66 }
67 #else
68 #define init_object_mutex_once()
69 #endif
70
71 /* Called from crtbegin.o to register the unwind info for an object. */
72
73 void
74 __register_frame_info_bases (const void *begin, struct object *ob,
75 void *tbase, void *dbase)
76 {
77 /* If .eh_frame is empty, don't register at all. */
78 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
79 return;
80
81 ob->pc_begin = (void *)-1;
82 ob->tbase = tbase;
83 ob->dbase = dbase;
84 ob->u.single = begin;
85 ob->s.i = 0;
86 ob->s.b.encoding = DW_EH_PE_omit;
87 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
88 ob->fde_end = NULL;
89 #endif
90
91 init_object_mutex_once ();
92 __gthread_mutex_lock (&object_mutex);
93
94 ob->next = unseen_objects;
95 unseen_objects = ob;
96
97 __gthread_mutex_unlock (&object_mutex);
98 }
99
100 void
101 __register_frame_info (const void *begin, struct object *ob)
102 {
103 __register_frame_info_bases (begin, ob, 0, 0);
104 }
105
106 void
107 __register_frame (void *begin)
108 {
109 struct object *ob;
110
111 /* If .eh_frame is empty, don't register at all. */
112 if (*(uword *) begin == 0)
113 return;
114
115 ob = malloc (sizeof (struct object));
116 __register_frame_info (begin, ob);
117 }
118
119 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
120 for different translation units. Called from the file generated by
121 collect2. */
122
123 void
124 __register_frame_info_table_bases (void *begin, struct object *ob,
125 void *tbase, void *dbase)
126 {
127 ob->pc_begin = (void *)-1;
128 ob->tbase = tbase;
129 ob->dbase = dbase;
130 ob->u.array = begin;
131 ob->s.i = 0;
132 ob->s.b.from_array = 1;
133 ob->s.b.encoding = DW_EH_PE_omit;
134
135 init_object_mutex_once ();
136 __gthread_mutex_lock (&object_mutex);
137
138 ob->next = unseen_objects;
139 unseen_objects = ob;
140
141 __gthread_mutex_unlock (&object_mutex);
142 }
143
144 void
145 __register_frame_info_table (void *begin, struct object *ob)
146 {
147 __register_frame_info_table_bases (begin, ob, 0, 0);
148 }
149
150 void
151 __register_frame_table (void *begin)
152 {
153 struct object *ob = malloc (sizeof (struct object));
154 __register_frame_info_table (begin, ob);
155 }
156
157 /* Called from crtbegin.o to deregister the unwind info for an object. */
158 /* ??? Glibc has for a while now exported __register_frame_info and
159 __deregister_frame_info. If we call __register_frame_info_bases
160 from crtbegin (wherein it is declared weak), and this object does
161 not get pulled from libgcc.a for other reasons, then the
162 invocation of __deregister_frame_info will be resolved from glibc.
163 Since the registration did not happen there, we'll die.
164
165 Therefore, declare a new deregistration entry point that does the
166 exact same thing, but will resolve to the same library as
167 implements __register_frame_info_bases. */
168
169 void *
170 __deregister_frame_info_bases (const void *begin)
171 {
172 struct object **p;
173 struct object *ob = 0;
174
175 /* If .eh_frame is empty, we haven't registered. */
176 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
177 return ob;
178
179 init_object_mutex_once ();
180 __gthread_mutex_lock (&object_mutex);
181
182 for (p = &unseen_objects; *p ; p = &(*p)->next)
183 if ((*p)->u.single == begin)
184 {
185 ob = *p;
186 *p = ob->next;
187 goto out;
188 }
189
190 for (p = &seen_objects; *p ; p = &(*p)->next)
191 if ((*p)->s.b.sorted)
192 {
193 if ((*p)->u.sort->orig_data == begin)
194 {
195 ob = *p;
196 *p = ob->next;
197 free (ob->u.sort);
198 goto out;
199 }
200 }
201 else
202 {
203 if ((*p)->u.single == begin)
204 {
205 ob = *p;
206 *p = ob->next;
207 goto out;
208 }
209 }
210
211 out:
212 __gthread_mutex_unlock (&object_mutex);
213 gcc_assert (ob);
214 return (void *) ob;
215 }
216
217 void *
218 __deregister_frame_info (const void *begin)
219 {
220 return __deregister_frame_info_bases (begin);
221 }
222
223 void
224 __deregister_frame (void *begin)
225 {
226 /* If .eh_frame is empty, we haven't registered. */
227 if (*(uword *) begin != 0)
228 free (__deregister_frame_info (begin));
229 }
230
231 \f
232 /* Like base_of_encoded_value, but take the base from a struct object
233 instead of an _Unwind_Context. */
234
235 static _Unwind_Ptr
236 base_from_object (unsigned char encoding, struct object *ob)
237 {
238 if (encoding == DW_EH_PE_omit)
239 return 0;
240
241 switch (encoding & 0x70)
242 {
243 case DW_EH_PE_absptr:
244 case DW_EH_PE_pcrel:
245 case DW_EH_PE_aligned:
246 return 0;
247
248 case DW_EH_PE_textrel:
249 return (_Unwind_Ptr) ob->tbase;
250 case DW_EH_PE_datarel:
251 return (_Unwind_Ptr) ob->dbase;
252 default:
253 gcc_unreachable ();
254 }
255 }
256
257 /* Return the FDE pointer encoding from the CIE. */
258 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
259
260 static int
261 get_cie_encoding (const struct dwarf_cie *cie)
262 {
263 const unsigned char *aug, *p;
264 _Unwind_Ptr dummy;
265 _uleb128_t utmp;
266 _sleb128_t stmp;
267
268 aug = cie->augmentation;
269 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
270 if (__builtin_expect (cie->version >= 4, 0))
271 {
272 if (p[0] != sizeof (void *) || p[1] != 0)
273 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
274 address sizes or segment selectors. */
275 p += 2; /* Skip address size and segment size. */
276 }
277
278 if (aug[0] != 'z')
279 return DW_EH_PE_absptr;
280
281 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
282 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
283 if (cie->version == 1) /* Skip return address column. */
284 p++;
285 else
286 p = read_uleb128 (p, &utmp);
287
288 aug++; /* Skip 'z' */
289 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
290 while (1)
291 {
292 /* This is what we're looking for. */
293 if (*aug == 'R')
294 return *p;
295 /* Personality encoding and pointer. */
296 else if (*aug == 'P')
297 {
298 /* ??? Avoid dereferencing indirect pointers, since we're
299 faking the base address. Gotta keep DW_EH_PE_aligned
300 intact, however. */
301 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
302 }
303 /* LSDA encoding. */
304 else if (*aug == 'L')
305 p++;
306 /* Otherwise end of string, or unknown augmentation. */
307 else
308 return DW_EH_PE_absptr;
309 aug++;
310 }
311 }
312
313 static inline int
314 get_fde_encoding (const struct dwarf_fde *f)
315 {
316 return get_cie_encoding (get_cie (f));
317 }
318
319 \f
320 /* Sorting an array of FDEs by address.
321 (Ideally we would have the linker sort the FDEs so we don't have to do
322 it at run time. But the linkers are not yet prepared for this.) */
323
324 /* Comparison routines. Three variants of increasing complexity. */
325
326 static int
327 fde_unencoded_compare (struct object *ob __attribute__((unused)),
328 const fde *x, const fde *y)
329 {
330 _Unwind_Ptr x_ptr, y_ptr;
331 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
332 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
333
334 if (x_ptr > y_ptr)
335 return 1;
336 if (x_ptr < y_ptr)
337 return -1;
338 return 0;
339 }
340
341 static int
342 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
343 {
344 _Unwind_Ptr base, x_ptr, y_ptr;
345
346 base = base_from_object (ob->s.b.encoding, ob);
347 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
348 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
349
350 if (x_ptr > y_ptr)
351 return 1;
352 if (x_ptr < y_ptr)
353 return -1;
354 return 0;
355 }
356
357 static int
358 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
359 {
360 int x_encoding, y_encoding;
361 _Unwind_Ptr x_ptr, y_ptr;
362
363 x_encoding = get_fde_encoding (x);
364 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
365 x->pc_begin, &x_ptr);
366
367 y_encoding = get_fde_encoding (y);
368 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
369 y->pc_begin, &y_ptr);
370
371 if (x_ptr > y_ptr)
372 return 1;
373 if (x_ptr < y_ptr)
374 return -1;
375 return 0;
376 }
377
378 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
379
380
381 /* This is a special mix of insertion sort and heap sort, optimized for
382 the data sets that actually occur. They look like
383 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
384 I.e. a linearly increasing sequence (coming from functions in the text
385 section), with additionally a few unordered elements (coming from functions
386 in gnu_linkonce sections) whose values are higher than the values in the
387 surrounding linear sequence (but not necessarily higher than the values
388 at the end of the linear sequence!).
389 The worst-case total run time is O(N) + O(n log (n)), where N is the
390 total number of FDEs and n is the number of erratic ones. */
391
392 struct fde_accumulator
393 {
394 struct fde_vector *linear;
395 struct fde_vector *erratic;
396 };
397
398 static inline int
399 start_fde_sort (struct fde_accumulator *accu, size_t count)
400 {
401 size_t size;
402 if (! count)
403 return 0;
404
405 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
406 if ((accu->linear = malloc (size)))
407 {
408 accu->linear->count = 0;
409 if ((accu->erratic = malloc (size)))
410 accu->erratic->count = 0;
411 return 1;
412 }
413 else
414 return 0;
415 }
416
417 static inline void
418 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
419 {
420 if (accu->linear)
421 accu->linear->array[accu->linear->count++] = this_fde;
422 }
423
424 /* Split LINEAR into a linear sequence with low values and an erratic
425 sequence with high values, put the linear one (of longest possible
426 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
427
428 Because the longest linear sequence we are trying to locate within the
429 incoming LINEAR array can be interspersed with (high valued) erratic
430 entries. We construct a chain indicating the sequenced entries.
431 To avoid having to allocate this chain, we overlay it onto the space of
432 the ERRATIC array during construction. A final pass iterates over the
433 chain to determine what should be placed in the ERRATIC array, and
434 what is the linear sequence. This overlay is safe from aliasing. */
435
436 static inline void
437 fde_split (struct object *ob, fde_compare_t fde_compare,
438 struct fde_vector *linear, struct fde_vector *erratic)
439 {
440 static const fde *marker;
441 size_t count = linear->count;
442 const fde *const *chain_end = &marker;
443 size_t i, j, k;
444
445 /* This should optimize out, but it is wise to make sure this assumption
446 is correct. Should these have different sizes, we cannot cast between
447 them and the overlaying onto ERRATIC will not work. */
448 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
449
450 for (i = 0; i < count; i++)
451 {
452 const fde *const *probe;
453
454 for (probe = chain_end;
455 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
456 probe = chain_end)
457 {
458 chain_end = (const fde *const*) erratic->array[probe - linear->array];
459 erratic->array[probe - linear->array] = NULL;
460 }
461 erratic->array[i] = (const fde *) chain_end;
462 chain_end = &linear->array[i];
463 }
464
465 /* Each entry in LINEAR which is part of the linear sequence we have
466 discovered will correspond to a non-NULL entry in the chain we built in
467 the ERRATIC array. */
468 for (i = j = k = 0; i < count; i++)
469 if (erratic->array[i])
470 linear->array[j++] = linear->array[i];
471 else
472 erratic->array[k++] = linear->array[i];
473 linear->count = j;
474 erratic->count = k;
475 }
476
477 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
478
479 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
480 for the first (root) node; push it down to its rightful place. */
481
482 static void
483 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
484 int lo, int hi)
485 {
486 int i, j;
487
488 for (i = lo, j = 2*i+1;
489 j < hi;
490 j = 2*i+1)
491 {
492 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
493 ++j;
494
495 if (fde_compare (ob, a[i], a[j]) < 0)
496 {
497 SWAP (a[i], a[j]);
498 i = j;
499 }
500 else
501 break;
502 }
503 }
504
505 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
506 use a name that does not conflict. */
507
508 static void
509 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
510 struct fde_vector *erratic)
511 {
512 /* For a description of this algorithm, see:
513 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
514 p. 60-61. */
515 const fde ** a = erratic->array;
516 /* A portion of the array is called a "heap" if for all i>=0:
517 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
518 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
519 size_t n = erratic->count;
520 int m;
521
522 /* Expand our heap incrementally from the end of the array, heapifying
523 each resulting semi-heap as we go. After each step, a[m] is the top
524 of a heap. */
525 for (m = n/2-1; m >= 0; --m)
526 frame_downheap (ob, fde_compare, a, m, n);
527
528 /* Shrink our heap incrementally from the end of the array, first
529 swapping out the largest element a[0] and then re-heapifying the
530 resulting semi-heap. After each step, a[0..m) is a heap. */
531 for (m = n-1; m >= 1; --m)
532 {
533 SWAP (a[0], a[m]);
534 frame_downheap (ob, fde_compare, a, 0, m);
535 }
536 #undef SWAP
537 }
538
539 /* Merge V1 and V2, both sorted, and put the result into V1. */
540 static inline void
541 fde_merge (struct object *ob, fde_compare_t fde_compare,
542 struct fde_vector *v1, struct fde_vector *v2)
543 {
544 size_t i1, i2;
545 const fde * fde2;
546
547 i2 = v2->count;
548 if (i2 > 0)
549 {
550 i1 = v1->count;
551 do
552 {
553 i2--;
554 fde2 = v2->array[i2];
555 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
556 {
557 v1->array[i1+i2] = v1->array[i1-1];
558 i1--;
559 }
560 v1->array[i1+i2] = fde2;
561 }
562 while (i2 > 0);
563 v1->count += v2->count;
564 }
565 }
566
567 static inline void
568 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
569 {
570 fde_compare_t fde_compare;
571
572 gcc_assert (!accu->linear || accu->linear->count == count);
573
574 if (ob->s.b.mixed_encoding)
575 fde_compare = fde_mixed_encoding_compare;
576 else if (ob->s.b.encoding == DW_EH_PE_absptr)
577 fde_compare = fde_unencoded_compare;
578 else
579 fde_compare = fde_single_encoding_compare;
580
581 if (accu->erratic)
582 {
583 fde_split (ob, fde_compare, accu->linear, accu->erratic);
584 gcc_assert (accu->linear->count + accu->erratic->count == count);
585 frame_heapsort (ob, fde_compare, accu->erratic);
586 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
587 free (accu->erratic);
588 }
589 else
590 {
591 /* We've not managed to malloc an erratic array,
592 so heap sort in the linear one. */
593 frame_heapsort (ob, fde_compare, accu->linear);
594 }
595 }
596
597 \f
598 /* Update encoding, mixed_encoding, and pc_begin for OB for the
599 fde array beginning at THIS_FDE. Return the number of fdes
600 encountered along the way. */
601
602 static size_t
603 classify_object_over_fdes (struct object *ob, const fde *this_fde)
604 {
605 const struct dwarf_cie *last_cie = 0;
606 size_t count = 0;
607 int encoding = DW_EH_PE_absptr;
608 _Unwind_Ptr base = 0;
609
610 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
611 {
612 const struct dwarf_cie *this_cie;
613 _Unwind_Ptr mask, pc_begin;
614
615 /* Skip CIEs. */
616 if (this_fde->CIE_delta == 0)
617 continue;
618
619 /* Determine the encoding for this FDE. Note mixed encoded
620 objects for later. */
621 this_cie = get_cie (this_fde);
622 if (this_cie != last_cie)
623 {
624 last_cie = this_cie;
625 encoding = get_cie_encoding (this_cie);
626 if (encoding == DW_EH_PE_omit)
627 return -1;
628 base = base_from_object (encoding, ob);
629 if (ob->s.b.encoding == DW_EH_PE_omit)
630 ob->s.b.encoding = encoding;
631 else if (ob->s.b.encoding != encoding)
632 ob->s.b.mixed_encoding = 1;
633 }
634
635 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
636 &pc_begin);
637
638 /* Take care to ignore link-once functions that were removed.
639 In these cases, the function address will be NULL, but if
640 the encoding is smaller than a pointer a true NULL may not
641 be representable. Assume 0 in the representable bits is NULL. */
642 mask = size_of_encoded_value (encoding);
643 if (mask < sizeof (void *))
644 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
645 else
646 mask = -1;
647
648 if ((pc_begin & mask) == 0)
649 continue;
650
651 count += 1;
652 if ((void *) pc_begin < ob->pc_begin)
653 ob->pc_begin = (void *) pc_begin;
654 }
655
656 return count;
657 }
658
659 static void
660 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
661 {
662 const struct dwarf_cie *last_cie = 0;
663 int encoding = ob->s.b.encoding;
664 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
665
666 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
667 {
668 const struct dwarf_cie *this_cie;
669
670 /* Skip CIEs. */
671 if (this_fde->CIE_delta == 0)
672 continue;
673
674 if (ob->s.b.mixed_encoding)
675 {
676 /* Determine the encoding for this FDE. Note mixed encoded
677 objects for later. */
678 this_cie = get_cie (this_fde);
679 if (this_cie != last_cie)
680 {
681 last_cie = this_cie;
682 encoding = get_cie_encoding (this_cie);
683 base = base_from_object (encoding, ob);
684 }
685 }
686
687 if (encoding == DW_EH_PE_absptr)
688 {
689 _Unwind_Ptr ptr;
690 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
691 if (ptr == 0)
692 continue;
693 }
694 else
695 {
696 _Unwind_Ptr pc_begin, mask;
697
698 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
699 &pc_begin);
700
701 /* Take care to ignore link-once functions that were removed.
702 In these cases, the function address will be NULL, but if
703 the encoding is smaller than a pointer a true NULL may not
704 be representable. Assume 0 in the representable bits is NULL. */
705 mask = size_of_encoded_value (encoding);
706 if (mask < sizeof (void *))
707 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
708 else
709 mask = -1;
710
711 if ((pc_begin & mask) == 0)
712 continue;
713 }
714
715 fde_insert (accu, this_fde);
716 }
717 }
718
719 /* Set up a sorted array of pointers to FDEs for a loaded object. We
720 count up the entries before allocating the array because it's likely to
721 be faster. We can be called multiple times, should we have failed to
722 allocate a sorted fde array on a previous occasion. */
723
724 static inline void
725 init_object (struct object* ob)
726 {
727 struct fde_accumulator accu;
728 size_t count;
729
730 count = ob->s.b.count;
731 if (count == 0)
732 {
733 if (ob->s.b.from_array)
734 {
735 fde **p = ob->u.array;
736 for (count = 0; *p; ++p)
737 {
738 size_t cur_count = classify_object_over_fdes (ob, *p);
739 if (cur_count == (size_t) -1)
740 goto unhandled_fdes;
741 count += cur_count;
742 }
743 }
744 else
745 {
746 count = classify_object_over_fdes (ob, ob->u.single);
747 if (count == (size_t) -1)
748 {
749 static const fde terminator;
750 unhandled_fdes:
751 ob->s.i = 0;
752 ob->s.b.encoding = DW_EH_PE_omit;
753 ob->u.single = &terminator;
754 return;
755 }
756 }
757
758 /* The count field we have in the main struct object is somewhat
759 limited, but should suffice for virtually all cases. If the
760 counted value doesn't fit, re-write a zero. The worst that
761 happens is that we re-count next time -- admittedly non-trivial
762 in that this implies some 2M fdes, but at least we function. */
763 ob->s.b.count = count;
764 if (ob->s.b.count != count)
765 ob->s.b.count = 0;
766 }
767
768 if (!start_fde_sort (&accu, count))
769 return;
770
771 if (ob->s.b.from_array)
772 {
773 fde **p;
774 for (p = ob->u.array; *p; ++p)
775 add_fdes (ob, &accu, *p);
776 }
777 else
778 add_fdes (ob, &accu, ob->u.single);
779
780 end_fde_sort (ob, &accu, count);
781
782 /* Save the original fde pointer, since this is the key by which the
783 DSO will deregister the object. */
784 accu.linear->orig_data = ob->u.single;
785 ob->u.sort = accu.linear;
786
787 ob->s.b.sorted = 1;
788 }
789
790 /* A linear search through a set of FDEs for the given PC. This is
791 used when there was insufficient memory to allocate and sort an
792 array. */
793
794 static const fde *
795 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
796 {
797 const struct dwarf_cie *last_cie = 0;
798 int encoding = ob->s.b.encoding;
799 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
800
801 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
802 {
803 const struct dwarf_cie *this_cie;
804 _Unwind_Ptr pc_begin, pc_range;
805
806 /* Skip CIEs. */
807 if (this_fde->CIE_delta == 0)
808 continue;
809
810 if (ob->s.b.mixed_encoding)
811 {
812 /* Determine the encoding for this FDE. Note mixed encoded
813 objects for later. */
814 this_cie = get_cie (this_fde);
815 if (this_cie != last_cie)
816 {
817 last_cie = this_cie;
818 encoding = get_cie_encoding (this_cie);
819 base = base_from_object (encoding, ob);
820 }
821 }
822
823 if (encoding == DW_EH_PE_absptr)
824 {
825 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
826 pc_begin = pc_array[0];
827 pc_range = pc_array[1];
828 if (pc_begin == 0)
829 continue;
830 }
831 else
832 {
833 _Unwind_Ptr mask;
834 const unsigned char *p;
835
836 p = read_encoded_value_with_base (encoding, base,
837 this_fde->pc_begin, &pc_begin);
838 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
839
840 /* Take care to ignore link-once functions that were removed.
841 In these cases, the function address will be NULL, but if
842 the encoding is smaller than a pointer a true NULL may not
843 be representable. Assume 0 in the representable bits is NULL. */
844 mask = size_of_encoded_value (encoding);
845 if (mask < sizeof (void *))
846 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
847 else
848 mask = -1;
849
850 if ((pc_begin & mask) == 0)
851 continue;
852 }
853
854 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
855 return this_fde;
856 }
857
858 return NULL;
859 }
860
861 /* Binary search for an FDE containing the given PC. Here are three
862 implementations of increasing complexity. */
863
864 static inline const fde *
865 binary_search_unencoded_fdes (struct object *ob, void *pc)
866 {
867 struct fde_vector *vec = ob->u.sort;
868 size_t lo, hi;
869
870 for (lo = 0, hi = vec->count; lo < hi; )
871 {
872 size_t i = (lo + hi) / 2;
873 const fde *const f = vec->array[i];
874 void *pc_begin;
875 uaddr pc_range;
876 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
877 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
878
879 if (pc < pc_begin)
880 hi = i;
881 else if (pc >= pc_begin + pc_range)
882 lo = i + 1;
883 else
884 return f;
885 }
886
887 return NULL;
888 }
889
890 static inline const fde *
891 binary_search_single_encoding_fdes (struct object *ob, void *pc)
892 {
893 struct fde_vector *vec = ob->u.sort;
894 int encoding = ob->s.b.encoding;
895 _Unwind_Ptr base = base_from_object (encoding, ob);
896 size_t lo, hi;
897
898 for (lo = 0, hi = vec->count; lo < hi; )
899 {
900 size_t i = (lo + hi) / 2;
901 const fde *f = vec->array[i];
902 _Unwind_Ptr pc_begin, pc_range;
903 const unsigned char *p;
904
905 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
906 &pc_begin);
907 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
908
909 if ((_Unwind_Ptr) pc < pc_begin)
910 hi = i;
911 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
912 lo = i + 1;
913 else
914 return f;
915 }
916
917 return NULL;
918 }
919
920 static inline const fde *
921 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
922 {
923 struct fde_vector *vec = ob->u.sort;
924 size_t lo, hi;
925
926 for (lo = 0, hi = vec->count; lo < hi; )
927 {
928 size_t i = (lo + hi) / 2;
929 const fde *f = vec->array[i];
930 _Unwind_Ptr pc_begin, pc_range;
931 const unsigned char *p;
932 int encoding;
933
934 encoding = get_fde_encoding (f);
935 p = read_encoded_value_with_base (encoding,
936 base_from_object (encoding, ob),
937 f->pc_begin, &pc_begin);
938 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
939
940 if ((_Unwind_Ptr) pc < pc_begin)
941 hi = i;
942 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
943 lo = i + 1;
944 else
945 return f;
946 }
947
948 return NULL;
949 }
950
951 static const fde *
952 search_object (struct object* ob, void *pc)
953 {
954 /* If the data hasn't been sorted, try to do this now. We may have
955 more memory available than last time we tried. */
956 if (! ob->s.b.sorted)
957 {
958 init_object (ob);
959
960 /* Despite the above comment, the normal reason to get here is
961 that we've not processed this object before. A quick range
962 check is in order. */
963 if (pc < ob->pc_begin)
964 return NULL;
965 }
966
967 if (ob->s.b.sorted)
968 {
969 if (ob->s.b.mixed_encoding)
970 return binary_search_mixed_encoding_fdes (ob, pc);
971 else if (ob->s.b.encoding == DW_EH_PE_absptr)
972 return binary_search_unencoded_fdes (ob, pc);
973 else
974 return binary_search_single_encoding_fdes (ob, pc);
975 }
976 else
977 {
978 /* Long slow laborious linear search, cos we've no memory. */
979 if (ob->s.b.from_array)
980 {
981 fde **p;
982 for (p = ob->u.array; *p ; p++)
983 {
984 const fde *f = linear_search_fdes (ob, *p, pc);
985 if (f)
986 return f;
987 }
988 return NULL;
989 }
990 else
991 return linear_search_fdes (ob, ob->u.single, pc);
992 }
993 }
994
995 const fde *
996 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
997 {
998 struct object *ob;
999 const fde *f = NULL;
1000
1001 init_object_mutex_once ();
1002 __gthread_mutex_lock (&object_mutex);
1003
1004 /* Linear search through the classified objects, to find the one
1005 containing the pc. Note that pc_begin is sorted descending, and
1006 we expect objects to be non-overlapping. */
1007 for (ob = seen_objects; ob; ob = ob->next)
1008 if (pc >= ob->pc_begin)
1009 {
1010 f = search_object (ob, pc);
1011 if (f)
1012 goto fini;
1013 break;
1014 }
1015
1016 /* Classify and search the objects we've not yet processed. */
1017 while ((ob = unseen_objects))
1018 {
1019 struct object **p;
1020
1021 unseen_objects = ob->next;
1022 f = search_object (ob, pc);
1023
1024 /* Insert the object into the classified list. */
1025 for (p = &seen_objects; *p ; p = &(*p)->next)
1026 if ((*p)->pc_begin < ob->pc_begin)
1027 break;
1028 ob->next = *p;
1029 *p = ob;
1030
1031 if (f)
1032 goto fini;
1033 }
1034
1035 fini:
1036 __gthread_mutex_unlock (&object_mutex);
1037
1038 if (f)
1039 {
1040 int encoding;
1041 _Unwind_Ptr func;
1042
1043 bases->tbase = ob->tbase;
1044 bases->dbase = ob->dbase;
1045
1046 encoding = ob->s.b.encoding;
1047 if (ob->s.b.mixed_encoding)
1048 encoding = get_fde_encoding (f);
1049 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1050 f->pc_begin, &func);
1051 bases->func = (void *) func;
1052 }
1053
1054 return f;
1055 }