Makefile.in, [...]: replace "GNU CC" with "GCC".
[gcc.git] / gcc / unwind-dw2-fde.c
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
19 executable.)
20
21 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
22 WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with GCC; see the file COPYING. If not, write to the Free
28 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 02111-1307, USA. */
30
31 #include "tconfig.h"
32 #include "tsystem.h"
33 #include "dwarf2.h"
34 #include "unwind.h"
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include "unwind-pe.h"
37 #include "unwind-dw2-fde.h"
38 #include "gthr.h"
39
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 it's pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
46
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 #else
50 static __gthread_mutex_t object_mutex;
51 #endif
52
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
54 static void
55 init_object_mutex (void)
56 {
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
58 }
59
60 static void
61 init_object_mutex_once (void)
62 {
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64 __gthread_once (&once, init_object_mutex);
65 }
66 #else
67 #define init_object_mutex_once()
68 #endif
69
70 /* Called from crtbegin.o to register the unwind info for an object. */
71
72 void
73 __register_frame_info_bases (void *begin, struct object *ob,
74 void *tbase, void *dbase)
75 {
76 ob->pc_begin = (void *)-1;
77 ob->tbase = tbase;
78 ob->dbase = dbase;
79 ob->u.single = begin;
80 ob->s.i = 0;
81 ob->s.b.encoding = DW_EH_PE_omit;
82
83 init_object_mutex_once ();
84 __gthread_mutex_lock (&object_mutex);
85
86 ob->next = unseen_objects;
87 unseen_objects = ob;
88
89 __gthread_mutex_unlock (&object_mutex);
90 }
91
92 void
93 __register_frame_info (void *begin, struct object *ob)
94 {
95 __register_frame_info_bases (begin, ob, 0, 0);
96 }
97
98 void
99 __register_frame (void *begin)
100 {
101 struct object *ob = (struct object *) malloc (sizeof (struct object));
102 __register_frame_info (begin, ob);
103 }
104
105 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
106 for different translation units. Called from the file generated by
107 collect2. */
108
109 void
110 __register_frame_info_table_bases (void *begin, struct object *ob,
111 void *tbase, void *dbase)
112 {
113 ob->pc_begin = (void *)-1;
114 ob->tbase = tbase;
115 ob->dbase = dbase;
116 ob->u.array = begin;
117 ob->s.i = 0;
118 ob->s.b.from_array = 1;
119 ob->s.b.encoding = DW_EH_PE_omit;
120
121 init_object_mutex_once ();
122 __gthread_mutex_lock (&object_mutex);
123
124 ob->next = unseen_objects;
125 unseen_objects = ob;
126
127 __gthread_mutex_unlock (&object_mutex);
128 }
129
130 void
131 __register_frame_info_table (void *begin, struct object *ob)
132 {
133 __register_frame_info_table_bases (begin, ob, 0, 0);
134 }
135
136 void
137 __register_frame_table (void *begin)
138 {
139 struct object *ob = (struct object *) malloc (sizeof (struct object));
140 __register_frame_info_table (begin, ob);
141 }
142
143 /* Called from crtbegin.o to deregister the unwind info for an object. */
144 /* ??? Glibc has for a while now exported __register_frame_info and
145 __deregister_frame_info. If we call __register_frame_info_bases
146 from crtbegin (wherein it is declared weak), and this object does
147 not get pulled from libgcc.a for other reasons, then the
148 invocation of __deregister_frame_info will be resolved from glibc.
149 Since the registration did not happen there, we'll abort.
150
151 Therefore, declare a new deregistration entry point that does the
152 exact same thing, but will resolve to the same library as
153 implements __register_frame_info_bases. */
154
155 void *
156 __deregister_frame_info_bases (void *begin)
157 {
158 struct object **p;
159 struct object *ob = 0;
160
161 init_object_mutex_once ();
162 __gthread_mutex_lock (&object_mutex);
163
164 for (p = &unseen_objects; *p ; p = &(*p)->next)
165 if ((*p)->u.single == begin)
166 {
167 ob = *p;
168 *p = ob->next;
169 goto out;
170 }
171
172 for (p = &seen_objects; *p ; p = &(*p)->next)
173 if ((*p)->s.b.sorted)
174 {
175 if ((*p)->u.sort->orig_data == begin)
176 {
177 ob = *p;
178 *p = ob->next;
179 free (ob->u.sort);
180 goto out;
181 }
182 }
183 else
184 {
185 if ((*p)->u.single == begin)
186 {
187 ob = *p;
188 *p = ob->next;
189 goto out;
190 }
191 }
192
193 __gthread_mutex_unlock (&object_mutex);
194 abort ();
195
196 out:
197 __gthread_mutex_unlock (&object_mutex);
198 return (void *) ob;
199 }
200
201 void *
202 __deregister_frame_info (void *begin)
203 {
204 return __deregister_frame_info_bases (begin);
205 }
206
207 void
208 __deregister_frame (void *begin)
209 {
210 free (__deregister_frame_info (begin));
211 }
212
213 \f
214 /* Like base_of_encoded_value, but take the base from a struct object
215 instead of an _Unwind_Context. */
216
217 static _Unwind_Ptr
218 base_from_object (unsigned char encoding, struct object *ob)
219 {
220 if (encoding == DW_EH_PE_omit)
221 return 0;
222
223 switch (encoding & 0x70)
224 {
225 case DW_EH_PE_absptr:
226 case DW_EH_PE_pcrel:
227 case DW_EH_PE_aligned:
228 return 0;
229
230 case DW_EH_PE_textrel:
231 return (_Unwind_Ptr) ob->tbase;
232 case DW_EH_PE_datarel:
233 return (_Unwind_Ptr) ob->dbase;
234 }
235 abort ();
236 }
237
238 /* Return the FDE pointer encoding from the CIE. */
239 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
240
241 static int
242 get_cie_encoding (struct dwarf_cie *cie)
243 {
244 const unsigned char *aug, *p;
245 _Unwind_Ptr dummy;
246
247 aug = cie->augmentation;
248 if (aug[0] != 'z')
249 return DW_EH_PE_absptr;
250
251 p = aug + strlen (aug) + 1; /* Skip the augmentation string. */
252 p = read_uleb128 (p, &dummy); /* Skip code alignment. */
253 p = read_sleb128 (p, &dummy); /* Skip data alignment. */
254 p++; /* Skip return address column. */
255
256 aug++; /* Skip 'z' */
257 p = read_uleb128 (p, &dummy); /* Skip augmentation length. */
258 while (1)
259 {
260 /* This is what we're looking for. */
261 if (*aug == 'R')
262 return *p;
263 /* Personality encoding and pointer. */
264 else if (*aug == 'P')
265 {
266 /* ??? Avoid dereferencing indirect pointers, since we're
267 faking the base address. Gotta keep DW_EH_PE_aligned
268 intact, however. */
269 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
270 }
271 /* LSDA encoding. */
272 else if (*aug == 'L')
273 p++;
274 /* Otherwise end of string, or unknown augmentation. */
275 else
276 return DW_EH_PE_absptr;
277 aug++;
278 }
279 }
280
281 static inline int
282 get_fde_encoding (struct dwarf_fde *f)
283 {
284 return get_cie_encoding (get_cie (f));
285 }
286
287 \f
288 /* Sorting an array of FDEs by address.
289 (Ideally we would have the linker sort the FDEs so we don't have to do
290 it at run time. But the linkers are not yet prepared for this.) */
291
292 /* Comparison routines. Three variants of increasing complexity. */
293
294 static saddr
295 fde_unencoded_compare (struct object *ob __attribute__((unused)),
296 fde *x, fde *y)
297 {
298 return *(saddr *)x->pc_begin - *(saddr *)y->pc_begin;
299 }
300
301 static saddr
302 fde_single_encoding_compare (struct object *ob, fde *x, fde *y)
303 {
304 _Unwind_Ptr base, x_ptr, y_ptr;
305
306 base = base_from_object (ob->s.b.encoding, ob);
307 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
308 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
309
310 return x_ptr - y_ptr;
311 }
312
313 static saddr
314 fde_mixed_encoding_compare (struct object *ob, fde *x, fde *y)
315 {
316 int x_encoding, y_encoding;
317 _Unwind_Ptr x_ptr, y_ptr;
318
319 x_encoding = get_fde_encoding (x);
320 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
321 x->pc_begin, &x_ptr);
322
323 y_encoding = get_fde_encoding (y);
324 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
325 y->pc_begin, &y_ptr);
326
327 return x_ptr - y_ptr;
328 }
329
330 typedef saddr (*fde_compare_t) (struct object *, fde *, fde *);
331
332
333 /* This is a special mix of insertion sort and heap sort, optimized for
334 the data sets that actually occur. They look like
335 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
336 I.e. a linearly increasing sequence (coming from functions in the text
337 section), with additionally a few unordered elements (coming from functions
338 in gnu_linkonce sections) whose values are higher than the values in the
339 surrounding linear sequence (but not necessarily higher than the values
340 at the end of the linear sequence!).
341 The worst-case total run time is O(N) + O(n log (n)), where N is the
342 total number of FDEs and n is the number of erratic ones. */
343
344 struct fde_accumulator
345 {
346 struct fde_vector *linear;
347 struct fde_vector *erratic;
348 };
349
350 static inline int
351 start_fde_sort (struct fde_accumulator *accu, size_t count)
352 {
353 size_t size;
354 if (! count)
355 return 0;
356
357 size = sizeof (struct fde_vector) + sizeof (fde *) * count;
358 if ((accu->linear = (struct fde_vector *) malloc (size)))
359 {
360 accu->linear->count = 0;
361 if ((accu->erratic = (struct fde_vector *) malloc (size)))
362 accu->erratic->count = 0;
363 return 1;
364 }
365 else
366 return 0;
367 }
368
369 static inline void
370 fde_insert (struct fde_accumulator *accu, fde *this_fde)
371 {
372 if (accu->linear)
373 accu->linear->array[accu->linear->count++] = this_fde;
374 }
375
376 /* Split LINEAR into a linear sequence with low values and an erratic
377 sequence with high values, put the linear one (of longest possible
378 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
379
380 Because the longest linear sequence we are trying to locate within the
381 incoming LINEAR array can be interspersed with (high valued) erratic
382 entries. We construct a chain indicating the sequenced entries.
383 To avoid having to allocate this chain, we overlay it onto the space of
384 the ERRATIC array during construction. A final pass iterates over the
385 chain to determine what should be placed in the ERRATIC array, and
386 what is the linear sequence. This overlay is safe from aliasing. */
387
388 static inline void
389 fde_split (struct object *ob, fde_compare_t fde_compare,
390 struct fde_vector *linear, struct fde_vector *erratic)
391 {
392 static fde *marker;
393 size_t count = linear->count;
394 fde **chain_end = &marker;
395 size_t i, j, k;
396
397 /* This should optimize out, but it is wise to make sure this assumption
398 is correct. Should these have different sizes, we cannot cast between
399 them and the overlaying onto ERRATIC will not work. */
400 if (sizeof (fde *) != sizeof (fde **))
401 abort ();
402
403 for (i = 0; i < count; i++)
404 {
405 fde **probe;
406
407 for (probe = chain_end;
408 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
409 probe = chain_end)
410 {
411 chain_end = (fde **)erratic->array[probe - linear->array];
412 erratic->array[probe - linear->array] = NULL;
413 }
414 erratic->array[i] = (fde *)chain_end;
415 chain_end = &linear->array[i];
416 }
417
418 /* Each entry in LINEAR which is part of the linear sequence we have
419 discovered will correspond to a non-NULL entry in the chain we built in
420 the ERRATIC array. */
421 for (i = j = k = 0; i < count; i++)
422 if (erratic->array[i])
423 linear->array[j++] = linear->array[i];
424 else
425 erratic->array[k++] = linear->array[i];
426 linear->count = j;
427 erratic->count = k;
428 }
429
430 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
431 use a name that does not conflict. */
432
433 static void
434 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
435 struct fde_vector *erratic)
436 {
437 /* For a description of this algorithm, see:
438 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
439 p. 60-61. */
440 fde ** a = erratic->array;
441 /* A portion of the array is called a "heap" if for all i>=0:
442 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
443 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
444 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
445 size_t n = erratic->count;
446 size_t m = n;
447 size_t i;
448
449 while (m > 0)
450 {
451 /* Invariant: a[m..n-1] is a heap. */
452 m--;
453 for (i = m; 2*i+1 < n; )
454 {
455 if (2*i+2 < n
456 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
457 && fde_compare (ob, a[2*i+2], a[i]) > 0)
458 {
459 SWAP (a[i], a[2*i+2]);
460 i = 2*i+2;
461 }
462 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
463 {
464 SWAP (a[i], a[2*i+1]);
465 i = 2*i+1;
466 }
467 else
468 break;
469 }
470 }
471 while (n > 1)
472 {
473 /* Invariant: a[0..n-1] is a heap. */
474 n--;
475 SWAP (a[0], a[n]);
476 for (i = 0; 2*i+1 < n; )
477 {
478 if (2*i+2 < n
479 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
480 && fde_compare (ob, a[2*i+2], a[i]) > 0)
481 {
482 SWAP (a[i], a[2*i+2]);
483 i = 2*i+2;
484 }
485 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
486 {
487 SWAP (a[i], a[2*i+1]);
488 i = 2*i+1;
489 }
490 else
491 break;
492 }
493 }
494 #undef SWAP
495 }
496
497 /* Merge V1 and V2, both sorted, and put the result into V1. */
498 static inline void
499 fde_merge (struct object *ob, fde_compare_t fde_compare,
500 struct fde_vector *v1, struct fde_vector *v2)
501 {
502 size_t i1, i2;
503 fde * fde2;
504
505 i2 = v2->count;
506 if (i2 > 0)
507 {
508 i1 = v1->count;
509 do {
510 i2--;
511 fde2 = v2->array[i2];
512 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
513 {
514 v1->array[i1+i2] = v1->array[i1-1];
515 i1--;
516 }
517 v1->array[i1+i2] = fde2;
518 } while (i2 > 0);
519 v1->count += v2->count;
520 }
521 }
522
523 static inline void
524 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
525 {
526 fde_compare_t fde_compare;
527
528 if (accu->linear && accu->linear->count != count)
529 abort ();
530
531 if (ob->s.b.mixed_encoding)
532 fde_compare = fde_mixed_encoding_compare;
533 else if (ob->s.b.encoding == DW_EH_PE_absptr)
534 fde_compare = fde_unencoded_compare;
535 else
536 fde_compare = fde_single_encoding_compare;
537
538 if (accu->erratic)
539 {
540 fde_split (ob, fde_compare, accu->linear, accu->erratic);
541 if (accu->linear->count + accu->erratic->count != count)
542 abort ();
543 frame_heapsort (ob, fde_compare, accu->erratic);
544 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
545 free (accu->erratic);
546 }
547 else
548 {
549 /* We've not managed to malloc an erratic array,
550 so heap sort in the linear one. */
551 frame_heapsort (ob, fde_compare, accu->linear);
552 }
553 }
554
555 \f
556 /* Update encoding, mixed_encoding, and pc_begin for OB for the
557 fde array beginning at THIS_FDE. Return the number of fdes
558 encountered along the way. */
559
560 static size_t
561 classify_object_over_fdes (struct object *ob, fde *this_fde)
562 {
563 struct dwarf_cie *last_cie = 0;
564 size_t count = 0;
565 int encoding = DW_EH_PE_absptr;
566 _Unwind_Ptr base = 0;
567
568 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
569 {
570 struct dwarf_cie *this_cie;
571 _Unwind_Ptr mask, pc_begin;
572
573 /* Skip CIEs. */
574 if (this_fde->CIE_delta == 0)
575 continue;
576
577 /* Determine the encoding for this FDE. Note mixed encoded
578 objects for later. */
579 this_cie = get_cie (this_fde);
580 if (this_cie != last_cie)
581 {
582 last_cie = this_cie;
583 encoding = get_cie_encoding (this_cie);
584 base = base_from_object (encoding, ob);
585 if (ob->s.b.encoding == DW_EH_PE_omit)
586 ob->s.b.encoding = encoding;
587 else if (ob->s.b.encoding != encoding)
588 ob->s.b.mixed_encoding = 1;
589 }
590
591 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
592 &pc_begin);
593
594 /* Take care to ignore link-once functions that were removed.
595 In these cases, the function address will be NULL, but if
596 the encoding is smaller than a pointer a true NULL may not
597 be representable. Assume 0 in the representable bits is NULL. */
598 mask = size_of_encoded_value (encoding);
599 if (mask < sizeof (void *))
600 mask = (1L << (mask << 3)) - 1;
601 else
602 mask = -1;
603
604 if ((pc_begin & mask) == 0)
605 continue;
606
607 count += 1;
608 if ((void *)pc_begin < ob->pc_begin)
609 ob->pc_begin = (void *)pc_begin;
610 }
611
612 return count;
613 }
614
615 static void
616 add_fdes (struct object *ob, struct fde_accumulator *accu, fde *this_fde)
617 {
618 struct dwarf_cie *last_cie = 0;
619 int encoding = ob->s.b.encoding;
620 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
621
622 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
623 {
624 struct dwarf_cie *this_cie;
625
626 /* Skip CIEs. */
627 if (this_fde->CIE_delta == 0)
628 continue;
629
630 if (ob->s.b.mixed_encoding)
631 {
632 /* Determine the encoding for this FDE. Note mixed encoded
633 objects for later. */
634 this_cie = get_cie (this_fde);
635 if (this_cie != last_cie)
636 {
637 last_cie = this_cie;
638 encoding = get_cie_encoding (this_cie);
639 base = base_from_object (encoding, ob);
640 }
641 }
642
643 if (encoding == DW_EH_PE_absptr)
644 {
645 if (*(_Unwind_Ptr *)this_fde->pc_begin == 0)
646 continue;
647 }
648 else
649 {
650 _Unwind_Ptr pc_begin, mask;
651
652 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
653 &pc_begin);
654
655 /* Take care to ignore link-once functions that were removed.
656 In these cases, the function address will be NULL, but if
657 the encoding is smaller than a pointer a true NULL may not
658 be representable. Assume 0 in the representable bits is NULL. */
659 mask = size_of_encoded_value (encoding);
660 if (mask < sizeof (void *))
661 mask = (1L << (mask << 3)) - 1;
662 else
663 mask = -1;
664
665 if ((pc_begin & mask) == 0)
666 continue;
667 }
668
669 fde_insert (accu, this_fde);
670 }
671 }
672
673 /* Set up a sorted array of pointers to FDEs for a loaded object. We
674 count up the entries before allocating the array because it's likely to
675 be faster. We can be called multiple times, should we have failed to
676 allocate a sorted fde array on a previous occasion. */
677
678 static inline void
679 init_object (struct object* ob)
680 {
681 struct fde_accumulator accu;
682 size_t count;
683
684 count = ob->s.b.count;
685 if (count == 0)
686 {
687 if (ob->s.b.from_array)
688 {
689 fde **p = ob->u.array;
690 for (count = 0; *p; ++p)
691 count += classify_object_over_fdes (ob, *p);
692 }
693 else
694 count = classify_object_over_fdes (ob, ob->u.single);
695
696 /* The count field we have in the main struct object is somewhat
697 limited, but should suffice for virtually all cases. If the
698 counted value doesn't fit, re-write a zero. The worst that
699 happens is that we re-count next time -- admittedly non-trivial
700 in that this implies some 2M fdes, but at least we function. */
701 ob->s.b.count = count;
702 if (ob->s.b.count != count)
703 ob->s.b.count = 0;
704 }
705
706 if (!start_fde_sort (&accu, count))
707 return;
708
709 if (ob->s.b.from_array)
710 {
711 fde **p;
712 for (p = ob->u.array; *p; ++p)
713 add_fdes (ob, &accu, *p);
714 }
715 else
716 add_fdes (ob, &accu, ob->u.single);
717
718 end_fde_sort (ob, &accu, count);
719
720 /* Save the original fde pointer, since this is the key by which the
721 DSO will deregister the object. */
722 accu.linear->orig_data = ob->u.single;
723 ob->u.sort = accu.linear;
724
725 ob->s.b.sorted = 1;
726 }
727
728 /* A linear search through a set of FDEs for the given PC. This is
729 used when there was insufficient memory to allocate and sort an
730 array. */
731
732 static fde *
733 linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
734 {
735 struct dwarf_cie *last_cie = 0;
736 int encoding = ob->s.b.encoding;
737 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
738
739 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
740 {
741 struct dwarf_cie *this_cie;
742 _Unwind_Ptr pc_begin, pc_range;
743
744 /* Skip CIEs. */
745 if (this_fde->CIE_delta == 0)
746 continue;
747
748 if (ob->s.b.mixed_encoding)
749 {
750 /* Determine the encoding for this FDE. Note mixed encoded
751 objects for later. */
752 this_cie = get_cie (this_fde);
753 if (this_cie != last_cie)
754 {
755 last_cie = this_cie;
756 encoding = get_cie_encoding (this_cie);
757 base = base_from_object (encoding, ob);
758 }
759 }
760
761 if (encoding == DW_EH_PE_absptr)
762 {
763 pc_begin = ((_Unwind_Ptr *)this_fde->pc_begin)[0];
764 pc_range = ((_Unwind_Ptr *)this_fde->pc_begin)[1];
765 if (pc_begin == 0)
766 continue;
767 }
768 else
769 {
770 _Unwind_Ptr mask;
771 const char *p;
772
773 p = read_encoded_value_with_base (encoding, base,
774 this_fde->pc_begin, &pc_begin);
775 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
776
777 /* Take care to ignore link-once functions that were removed.
778 In these cases, the function address will be NULL, but if
779 the encoding is smaller than a pointer a true NULL may not
780 be representable. Assume 0 in the representable bits is NULL. */
781 mask = size_of_encoded_value (encoding);
782 if (mask < sizeof (void *))
783 mask = (1L << (mask << 3)) - 1;
784 else
785 mask = -1;
786
787 if ((pc_begin & mask) == 0)
788 continue;
789 }
790
791 if ((_Unwind_Ptr)pc - pc_begin < pc_range)
792 return this_fde;
793 }
794
795 return NULL;
796 }
797
798 /* Binary search for an FDE containing the given PC. Here are three
799 implementations of increasing complexity. */
800
801 static inline fde *
802 binary_search_unencoded_fdes (struct object *ob, void *pc)
803 {
804 struct fde_vector *vec = ob->u.sort;
805 size_t lo, hi;
806
807 for (lo = 0, hi = vec->count; lo < hi; )
808 {
809 size_t i = (lo + hi) / 2;
810 fde *f = vec->array[i];
811 void *pc_begin;
812 uaddr pc_range;
813
814 pc_begin = ((void **)f->pc_begin)[0];
815 pc_range = ((uaddr *)f->pc_begin)[1];
816
817 if (pc < pc_begin)
818 hi = i;
819 else if (pc >= pc_begin + pc_range)
820 lo = i + 1;
821 else
822 return f;
823 }
824
825 return NULL;
826 }
827
828 static inline fde *
829 binary_search_single_encoding_fdes (struct object *ob, void *pc)
830 {
831 struct fde_vector *vec = ob->u.sort;
832 int encoding = ob->s.b.encoding;
833 _Unwind_Ptr base = base_from_object (encoding, ob);
834 size_t lo, hi;
835
836 for (lo = 0, hi = vec->count; lo < hi; )
837 {
838 size_t i = (lo + hi) / 2;
839 fde *f = vec->array[i];
840 _Unwind_Ptr pc_begin, pc_range;
841 const char *p;
842
843 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
844 &pc_begin);
845 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
846
847 if ((_Unwind_Ptr)pc < pc_begin)
848 hi = i;
849 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
850 lo = i + 1;
851 else
852 return f;
853 }
854
855 return NULL;
856 }
857
858 static inline fde *
859 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
860 {
861 struct fde_vector *vec = ob->u.sort;
862 size_t lo, hi;
863
864 for (lo = 0, hi = vec->count; lo < hi; )
865 {
866 size_t i = (lo + hi) / 2;
867 fde *f = vec->array[i];
868 _Unwind_Ptr pc_begin, pc_range;
869 const char *p;
870 int encoding;
871
872 encoding = get_fde_encoding (f);
873 p = read_encoded_value_with_base (encoding,
874 base_from_object (encoding, ob),
875 f->pc_begin, &pc_begin);
876 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
877
878 if ((_Unwind_Ptr)pc < pc_begin)
879 hi = i;
880 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
881 lo = i + 1;
882 else
883 return f;
884 }
885
886 return NULL;
887 }
888
889 static fde *
890 search_object (struct object* ob, void *pc)
891 {
892 /* If the data hasn't been sorted, try to do this now. We may have
893 more memory available than last time we tried. */
894 if (! ob->s.b.sorted)
895 {
896 init_object (ob);
897
898 /* Despite the above comment, the normal reason to get here is
899 that we've not processed this object before. A quick range
900 check is in order. */
901 if (pc < ob->pc_begin)
902 return NULL;
903 }
904
905 if (ob->s.b.sorted)
906 {
907 if (ob->s.b.mixed_encoding)
908 return binary_search_mixed_encoding_fdes (ob, pc);
909 else if (ob->s.b.encoding == DW_EH_PE_absptr)
910 return binary_search_unencoded_fdes (ob, pc);
911 else
912 return binary_search_single_encoding_fdes (ob, pc);
913 }
914 else
915 {
916 /* Long slow labourious linear search, cos we've no memory. */
917 if (ob->s.b.from_array)
918 {
919 fde **p;
920 for (p = ob->u.array; *p ; p++)
921 {
922 fde *f = linear_search_fdes (ob, *p, pc);
923 if (f)
924 return f;
925 }
926 return NULL;
927 }
928 else
929 return linear_search_fdes (ob, ob->u.single, pc);
930 }
931 }
932
933 fde *
934 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
935 {
936 struct object *ob;
937 fde *f = NULL;
938
939 init_object_mutex_once ();
940 __gthread_mutex_lock (&object_mutex);
941
942 /* Linear search through the classified objects, to find the one
943 containing the pc. Note that pc_begin is sorted decending, and
944 we expect objects to be non-overlapping. */
945 for (ob = seen_objects; ob; ob = ob->next)
946 if (pc >= ob->pc_begin)
947 {
948 f = search_object (ob, pc);
949 if (f)
950 goto fini;
951 break;
952 }
953
954 /* Classify and search the objects we've not yet processed. */
955 while ((ob = unseen_objects))
956 {
957 struct object **p;
958
959 unseen_objects = ob->next;
960 f = search_object (ob, pc);
961
962 /* Insert the object into the classified list. */
963 for (p = &seen_objects; *p ; p = &(*p)->next)
964 if ((*p)->pc_begin < ob->pc_begin)
965 break;
966 ob->next = *p;
967 *p = ob;
968
969 if (f)
970 goto fini;
971 }
972
973 fini:
974 __gthread_mutex_unlock (&object_mutex);
975
976 if (f)
977 {
978 int encoding;
979
980 bases->tbase = ob->tbase;
981 bases->dbase = ob->dbase;
982
983 encoding = ob->s.b.encoding;
984 if (ob->s.b.mixed_encoding)
985 encoding = get_fde_encoding (f);
986 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
987 f->pc_begin, (_Unwind_Ptr *)&bases->func);
988 }
989
990 return f;
991 }