libgcc-std.ver (GCC_3.0): Add __terminate_func_set to list of EH symbols.
[gcc.git] / gcc / libgcc2.c
1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000
4 2001 Free Software Foundation, Inc.
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
21
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
26
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
31
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
34 do not apply. */
35
36 #include "tconfig.h"
37 #include "tsystem.h"
38
39 #include "machmode.h"
40 #include "defaults.h"
41
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
43 #ifdef abort
44 #undef abort
45 #endif
46
47 #include "libgcc2.h"
48 \f
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
51 static inline
52 #endif
53 DWtype
54 __negdi2 (DWtype u)
55 {
56 DWunion w;
57 DWunion uu;
58
59 uu.ll = u;
60
61 w.s.low = -uu.s.low;
62 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
63
64 return w.ll;
65 }
66 #endif
67
68 #ifdef L_addvsi3
69 Wtype
70 __addvsi3 (Wtype a, Wtype b)
71 {
72 Wtype w;
73
74 w = a + b;
75
76 if (b >= 0 ? w < a : w > a)
77 abort ();
78
79 return w;
80 }
81 #endif
82 \f
83 #ifdef L_addvdi3
84 DWtype
85 __addvdi3 (DWtype a, DWtype b)
86 {
87 DWtype w;
88
89 w = a + b;
90
91 if (b >= 0 ? w < a : w > a)
92 abort ();
93
94 return w;
95 }
96 #endif
97 \f
98 #ifdef L_subvsi3
99 Wtype
100 __subvsi3 (Wtype a, Wtype b)
101 {
102 #ifdef L_addvsi3
103 return __addvsi3 (a, (-b));
104 #else
105 DWtype w;
106
107 w = a - b;
108
109 if (b >= 0 ? w > a : w < a)
110 abort ();
111
112 return w;
113 #endif
114 }
115 #endif
116 \f
117 #ifdef L_subvdi3
118 DWtype
119 __subvdi3 (DWtype a, DWtype b)
120 {
121 #ifdef L_addvdi3
122 return (a, (-b));
123 #else
124 DWtype w;
125
126 w = a - b;
127
128 if (b >= 0 ? w > a : w < a)
129 abort ();
130
131 return w;
132 #endif
133 }
134 #endif
135 \f
136 #ifdef L_mulvsi3
137 Wtype
138 __mulvsi3 (Wtype a, Wtype b)
139 {
140 DWtype w;
141
142 w = a * b;
143
144 if (((a >= 0) == (b >= 0)) ? w < 0 : w > 0)
145 abort ();
146
147 return w;
148 }
149 #endif
150 \f
151 #ifdef L_negvsi2
152 Wtype
153 __negvsi2 (Wtype a)
154 {
155 Wtype w;
156
157 w = -a;
158
159 if (a >= 0 ? w > 0 : w < 0)
160 abort ();
161
162 return w;
163 }
164 #endif
165 \f
166 #ifdef L_negvdi2
167 DWtype
168 __negvdi2 (DWtype a)
169 {
170 DWtype w;
171
172 w = -a;
173
174 if (a >= 0 ? w > 0 : w < 0)
175 abort ();
176
177 return w;
178 }
179 #endif
180 \f
181 #ifdef L_absvsi2
182 Wtype
183 __absvsi2 (Wtype a)
184 {
185 Wtype w = a;
186
187 if (a < 0)
188 #ifdef L_negvsi2
189 w = __negvsi2 (a);
190 #else
191 w = -a;
192
193 if (w < 0)
194 abort ();
195 #endif
196
197 return w;
198 }
199 #endif
200 \f
201 #ifdef L_absvdi2
202 DWtype
203 __absvdi2 (DWtype a)
204 {
205 DWtype w = a;
206
207 if (a < 0)
208 #ifdef L_negvsi2
209 w = __negvsi2 (a);
210 #else
211 w = -a;
212
213 if (w < 0)
214 abort ();
215 #endif
216
217 return w;
218 }
219 #endif
220 \f
221 #ifdef L_mulvdi3
222 DWtype
223 __mulvdi3 (DWtype u, DWtype v)
224 {
225 DWtype w;
226
227 w = u * v;
228
229 if (((u >= 0) == (v >= 0)) ? w < 0 : w > 0)
230 abort ();
231
232 return w;
233 }
234 #endif
235 \f
236
237 /* Unless shift functions are defined whith full ANSI prototypes,
238 parameter b will be promoted to int if word_type is smaller than an int. */
239 #ifdef L_lshrdi3
240 DWtype
241 __lshrdi3 (DWtype u, word_type b)
242 {
243 DWunion w;
244 word_type bm;
245 DWunion uu;
246
247 if (b == 0)
248 return u;
249
250 uu.ll = u;
251
252 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
253 if (bm <= 0)
254 {
255 w.s.high = 0;
256 w.s.low = (UWtype) uu.s.high >> -bm;
257 }
258 else
259 {
260 UWtype carries = (UWtype) uu.s.high << bm;
261
262 w.s.high = (UWtype) uu.s.high >> b;
263 w.s.low = ((UWtype) uu.s.low >> b) | carries;
264 }
265
266 return w.ll;
267 }
268 #endif
269
270 #ifdef L_ashldi3
271 DWtype
272 __ashldi3 (DWtype u, word_type b)
273 {
274 DWunion w;
275 word_type bm;
276 DWunion uu;
277
278 if (b == 0)
279 return u;
280
281 uu.ll = u;
282
283 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
284 if (bm <= 0)
285 {
286 w.s.low = 0;
287 w.s.high = (UWtype) uu.s.low << -bm;
288 }
289 else
290 {
291 UWtype carries = (UWtype) uu.s.low >> bm;
292
293 w.s.low = (UWtype) uu.s.low << b;
294 w.s.high = ((UWtype) uu.s.high << b) | carries;
295 }
296
297 return w.ll;
298 }
299 #endif
300
301 #ifdef L_ashrdi3
302 DWtype
303 __ashrdi3 (DWtype u, word_type b)
304 {
305 DWunion w;
306 word_type bm;
307 DWunion uu;
308
309 if (b == 0)
310 return u;
311
312 uu.ll = u;
313
314 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
315 if (bm <= 0)
316 {
317 /* w.s.high = 1..1 or 0..0 */
318 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
319 w.s.low = uu.s.high >> -bm;
320 }
321 else
322 {
323 UWtype carries = (UWtype) uu.s.high << bm;
324
325 w.s.high = uu.s.high >> b;
326 w.s.low = ((UWtype) uu.s.low >> b) | carries;
327 }
328
329 return w.ll;
330 }
331 #endif
332 \f
333 #ifdef L_ffsdi2
334 DWtype
335 __ffsdi2 (DWtype u)
336 {
337 DWunion uu;
338 UWtype word, count, add;
339
340 uu.ll = u;
341 if (uu.s.low != 0)
342 word = uu.s.low, add = 0;
343 else if (uu.s.high != 0)
344 word = uu.s.high, add = BITS_PER_UNIT * sizeof (Wtype);
345 else
346 return 0;
347
348 count_trailing_zeros (count, word);
349 return count + add + 1;
350 }
351 #endif
352 \f
353 #ifdef L_muldi3
354 DWtype
355 __muldi3 (DWtype u, DWtype v)
356 {
357 DWunion w;
358 DWunion uu, vv;
359
360 uu.ll = u,
361 vv.ll = v;
362
363 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
364 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
365 + (UWtype) uu.s.high * (UWtype) vv.s.low);
366
367 return w.ll;
368 }
369 #endif
370 \f
371 #ifdef L_udiv_w_sdiv
372 #if defined (sdiv_qrnnd)
373 UWtype
374 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
375 {
376 UWtype q, r;
377 UWtype c0, c1, b1;
378
379 if ((Wtype) d >= 0)
380 {
381 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
382 {
383 /* dividend, divisor, and quotient are nonnegative */
384 sdiv_qrnnd (q, r, a1, a0, d);
385 }
386 else
387 {
388 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
389 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
390 /* Divide (c1*2^32 + c0) by d */
391 sdiv_qrnnd (q, r, c1, c0, d);
392 /* Add 2^31 to quotient */
393 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
394 }
395 }
396 else
397 {
398 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
399 c1 = a1 >> 1; /* A/2 */
400 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
401
402 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
403 {
404 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
405
406 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
407 if ((d & 1) != 0)
408 {
409 if (r >= q)
410 r = r - q;
411 else if (q - r <= d)
412 {
413 r = r - q + d;
414 q--;
415 }
416 else
417 {
418 r = r - q + 2*d;
419 q -= 2;
420 }
421 }
422 }
423 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
424 {
425 c1 = (b1 - 1) - c1;
426 c0 = ~c0; /* logical NOT */
427
428 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
429
430 q = ~q; /* (A/2)/b1 */
431 r = (b1 - 1) - r;
432
433 r = 2*r + (a0 & 1); /* A/(2*b1) */
434
435 if ((d & 1) != 0)
436 {
437 if (r >= q)
438 r = r - q;
439 else if (q - r <= d)
440 {
441 r = r - q + d;
442 q--;
443 }
444 else
445 {
446 r = r - q + 2*d;
447 q -= 2;
448 }
449 }
450 }
451 else /* Implies c1 = b1 */
452 { /* Hence a1 = d - 1 = 2*b1 - 1 */
453 if (a0 >= -d)
454 {
455 q = -1;
456 r = a0 + d;
457 }
458 else
459 {
460 q = -2;
461 r = a0 + 2*d;
462 }
463 }
464 }
465
466 *rp = r;
467 return q;
468 }
469 #else
470 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
471 UWtype
472 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
473 UWtype a1 __attribute__ ((__unused__)),
474 UWtype a0 __attribute__ ((__unused__)),
475 UWtype d __attribute__ ((__unused__)))
476 {
477 return 0;
478 }
479 #endif
480 #endif
481 \f
482 #if (defined (L_udivdi3) || defined (L_divdi3) || \
483 defined (L_umoddi3) || defined (L_moddi3))
484 #define L_udivmoddi4
485 #endif
486
487 #ifdef L_clz
488 const UQItype __clz_tab[] =
489 {
490 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
491 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
492 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
493 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
494 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
495 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
496 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
497 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
498 };
499 #endif
500
501 #ifdef L_udivmoddi4
502
503 #if (defined (L_udivdi3) || defined (L_divdi3) || \
504 defined (L_umoddi3) || defined (L_moddi3))
505 static inline
506 #endif
507 UDWtype
508 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
509 {
510 DWunion ww;
511 DWunion nn, dd;
512 DWunion rr;
513 UWtype d0, d1, n0, n1, n2;
514 UWtype q0, q1;
515 UWtype b, bm;
516
517 nn.ll = n;
518 dd.ll = d;
519
520 d0 = dd.s.low;
521 d1 = dd.s.high;
522 n0 = nn.s.low;
523 n1 = nn.s.high;
524
525 #if !UDIV_NEEDS_NORMALIZATION
526 if (d1 == 0)
527 {
528 if (d0 > n1)
529 {
530 /* 0q = nn / 0D */
531
532 udiv_qrnnd (q0, n0, n1, n0, d0);
533 q1 = 0;
534
535 /* Remainder in n0. */
536 }
537 else
538 {
539 /* qq = NN / 0d */
540
541 if (d0 == 0)
542 d0 = 1 / d0; /* Divide intentionally by zero. */
543
544 udiv_qrnnd (q1, n1, 0, n1, d0);
545 udiv_qrnnd (q0, n0, n1, n0, d0);
546
547 /* Remainder in n0. */
548 }
549
550 if (rp != 0)
551 {
552 rr.s.low = n0;
553 rr.s.high = 0;
554 *rp = rr.ll;
555 }
556 }
557
558 #else /* UDIV_NEEDS_NORMALIZATION */
559
560 if (d1 == 0)
561 {
562 if (d0 > n1)
563 {
564 /* 0q = nn / 0D */
565
566 count_leading_zeros (bm, d0);
567
568 if (bm != 0)
569 {
570 /* Normalize, i.e. make the most significant bit of the
571 denominator set. */
572
573 d0 = d0 << bm;
574 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
575 n0 = n0 << bm;
576 }
577
578 udiv_qrnnd (q0, n0, n1, n0, d0);
579 q1 = 0;
580
581 /* Remainder in n0 >> bm. */
582 }
583 else
584 {
585 /* qq = NN / 0d */
586
587 if (d0 == 0)
588 d0 = 1 / d0; /* Divide intentionally by zero. */
589
590 count_leading_zeros (bm, d0);
591
592 if (bm == 0)
593 {
594 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
595 conclude (the most significant bit of n1 is set) /\ (the
596 leading quotient digit q1 = 1).
597
598 This special case is necessary, not an optimization.
599 (Shifts counts of W_TYPE_SIZE are undefined.) */
600
601 n1 -= d0;
602 q1 = 1;
603 }
604 else
605 {
606 /* Normalize. */
607
608 b = W_TYPE_SIZE - bm;
609
610 d0 = d0 << bm;
611 n2 = n1 >> b;
612 n1 = (n1 << bm) | (n0 >> b);
613 n0 = n0 << bm;
614
615 udiv_qrnnd (q1, n1, n2, n1, d0);
616 }
617
618 /* n1 != d0... */
619
620 udiv_qrnnd (q0, n0, n1, n0, d0);
621
622 /* Remainder in n0 >> bm. */
623 }
624
625 if (rp != 0)
626 {
627 rr.s.low = n0 >> bm;
628 rr.s.high = 0;
629 *rp = rr.ll;
630 }
631 }
632 #endif /* UDIV_NEEDS_NORMALIZATION */
633
634 else
635 {
636 if (d1 > n1)
637 {
638 /* 00 = nn / DD */
639
640 q0 = 0;
641 q1 = 0;
642
643 /* Remainder in n1n0. */
644 if (rp != 0)
645 {
646 rr.s.low = n0;
647 rr.s.high = n1;
648 *rp = rr.ll;
649 }
650 }
651 else
652 {
653 /* 0q = NN / dd */
654
655 count_leading_zeros (bm, d1);
656 if (bm == 0)
657 {
658 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
659 conclude (the most significant bit of n1 is set) /\ (the
660 quotient digit q0 = 0 or 1).
661
662 This special case is necessary, not an optimization. */
663
664 /* The condition on the next line takes advantage of that
665 n1 >= d1 (true due to program flow). */
666 if (n1 > d1 || n0 >= d0)
667 {
668 q0 = 1;
669 sub_ddmmss (n1, n0, n1, n0, d1, d0);
670 }
671 else
672 q0 = 0;
673
674 q1 = 0;
675
676 if (rp != 0)
677 {
678 rr.s.low = n0;
679 rr.s.high = n1;
680 *rp = rr.ll;
681 }
682 }
683 else
684 {
685 UWtype m1, m0;
686 /* Normalize. */
687
688 b = W_TYPE_SIZE - bm;
689
690 d1 = (d1 << bm) | (d0 >> b);
691 d0 = d0 << bm;
692 n2 = n1 >> b;
693 n1 = (n1 << bm) | (n0 >> b);
694 n0 = n0 << bm;
695
696 udiv_qrnnd (q0, n1, n2, n1, d1);
697 umul_ppmm (m1, m0, q0, d0);
698
699 if (m1 > n1 || (m1 == n1 && m0 > n0))
700 {
701 q0--;
702 sub_ddmmss (m1, m0, m1, m0, d1, d0);
703 }
704
705 q1 = 0;
706
707 /* Remainder in (n1n0 - m1m0) >> bm. */
708 if (rp != 0)
709 {
710 sub_ddmmss (n1, n0, n1, n0, m1, m0);
711 rr.s.low = (n1 << b) | (n0 >> bm);
712 rr.s.high = n1 >> bm;
713 *rp = rr.ll;
714 }
715 }
716 }
717 }
718
719 ww.s.low = q0;
720 ww.s.high = q1;
721 return ww.ll;
722 }
723 #endif
724
725 #ifdef L_divdi3
726 DWtype
727 __divdi3 (DWtype u, DWtype v)
728 {
729 word_type c = 0;
730 DWunion uu, vv;
731 DWtype w;
732
733 uu.ll = u;
734 vv.ll = v;
735
736 if (uu.s.high < 0)
737 c = ~c,
738 uu.ll = __negdi2 (uu.ll);
739 if (vv.s.high < 0)
740 c = ~c,
741 vv.ll = __negdi2 (vv.ll);
742
743 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
744 if (c)
745 w = __negdi2 (w);
746
747 return w;
748 }
749 #endif
750
751 #ifdef L_moddi3
752 DWtype
753 __moddi3 (DWtype u, DWtype v)
754 {
755 word_type c = 0;
756 DWunion uu, vv;
757 DWtype w;
758
759 uu.ll = u;
760 vv.ll = v;
761
762 if (uu.s.high < 0)
763 c = ~c,
764 uu.ll = __negdi2 (uu.ll);
765 if (vv.s.high < 0)
766 vv.ll = __negdi2 (vv.ll);
767
768 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
769 if (c)
770 w = __negdi2 (w);
771
772 return w;
773 }
774 #endif
775
776 #ifdef L_umoddi3
777 UDWtype
778 __umoddi3 (UDWtype u, UDWtype v)
779 {
780 UDWtype w;
781
782 (void) __udivmoddi4 (u, v, &w);
783
784 return w;
785 }
786 #endif
787
788 #ifdef L_udivdi3
789 UDWtype
790 __udivdi3 (UDWtype n, UDWtype d)
791 {
792 return __udivmoddi4 (n, d, (UDWtype *) 0);
793 }
794 #endif
795 \f
796 #ifdef L_cmpdi2
797 word_type
798 __cmpdi2 (DWtype a, DWtype b)
799 {
800 DWunion au, bu;
801
802 au.ll = a, bu.ll = b;
803
804 if (au.s.high < bu.s.high)
805 return 0;
806 else if (au.s.high > bu.s.high)
807 return 2;
808 if ((UWtype) au.s.low < (UWtype) bu.s.low)
809 return 0;
810 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
811 return 2;
812 return 1;
813 }
814 #endif
815
816 #ifdef L_ucmpdi2
817 word_type
818 __ucmpdi2 (DWtype a, DWtype b)
819 {
820 DWunion au, bu;
821
822 au.ll = a, bu.ll = b;
823
824 if ((UWtype) au.s.high < (UWtype) bu.s.high)
825 return 0;
826 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
827 return 2;
828 if ((UWtype) au.s.low < (UWtype) bu.s.low)
829 return 0;
830 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
831 return 2;
832 return 1;
833 }
834 #endif
835 \f
836 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
837 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
838 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
839
840 DWtype
841 __fixunstfDI (TFtype a)
842 {
843 TFtype b;
844 UDWtype v;
845
846 if (a < 0)
847 return 0;
848
849 /* Compute high word of result, as a flonum. */
850 b = (a / HIGH_WORD_COEFF);
851 /* Convert that to fixed (but not to DWtype!),
852 and shift it into the high word. */
853 v = (UWtype) b;
854 v <<= WORD_SIZE;
855 /* Remove high part from the TFtype, leaving the low part as flonum. */
856 a -= (TFtype)v;
857 /* Convert that to fixed (but not to DWtype!) and add it in.
858 Sometimes A comes out negative. This is significant, since
859 A has more bits than a long int does. */
860 if (a < 0)
861 v -= (UWtype) (- a);
862 else
863 v += (UWtype) a;
864 return v;
865 }
866 #endif
867
868 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
869 DWtype
870 __fixtfdi (TFtype a)
871 {
872 if (a < 0)
873 return - __fixunstfDI (-a);
874 return __fixunstfDI (a);
875 }
876 #endif
877
878 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
879 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
880 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
881
882 DWtype
883 __fixunsxfDI (XFtype a)
884 {
885 XFtype b;
886 UDWtype v;
887
888 if (a < 0)
889 return 0;
890
891 /* Compute high word of result, as a flonum. */
892 b = (a / HIGH_WORD_COEFF);
893 /* Convert that to fixed (but not to DWtype!),
894 and shift it into the high word. */
895 v = (UWtype) b;
896 v <<= WORD_SIZE;
897 /* Remove high part from the XFtype, leaving the low part as flonum. */
898 a -= (XFtype)v;
899 /* Convert that to fixed (but not to DWtype!) and add it in.
900 Sometimes A comes out negative. This is significant, since
901 A has more bits than a long int does. */
902 if (a < 0)
903 v -= (UWtype) (- a);
904 else
905 v += (UWtype) a;
906 return v;
907 }
908 #endif
909
910 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
911 DWtype
912 __fixxfdi (XFtype a)
913 {
914 if (a < 0)
915 return - __fixunsxfDI (-a);
916 return __fixunsxfDI (a);
917 }
918 #endif
919
920 #ifdef L_fixunsdfdi
921 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
922 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
923
924 DWtype
925 __fixunsdfDI (DFtype a)
926 {
927 DFtype b;
928 UDWtype v;
929
930 if (a < 0)
931 return 0;
932
933 /* Compute high word of result, as a flonum. */
934 b = (a / HIGH_WORD_COEFF);
935 /* Convert that to fixed (but not to DWtype!),
936 and shift it into the high word. */
937 v = (UWtype) b;
938 v <<= WORD_SIZE;
939 /* Remove high part from the DFtype, leaving the low part as flonum. */
940 a -= (DFtype)v;
941 /* Convert that to fixed (but not to DWtype!) and add it in.
942 Sometimes A comes out negative. This is significant, since
943 A has more bits than a long int does. */
944 if (a < 0)
945 v -= (UWtype) (- a);
946 else
947 v += (UWtype) a;
948 return v;
949 }
950 #endif
951
952 #ifdef L_fixdfdi
953 DWtype
954 __fixdfdi (DFtype a)
955 {
956 if (a < 0)
957 return - __fixunsdfDI (-a);
958 return __fixunsdfDI (a);
959 }
960 #endif
961
962 #ifdef L_fixunssfdi
963 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
964 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
965
966 DWtype
967 __fixunssfDI (SFtype original_a)
968 {
969 /* Convert the SFtype to a DFtype, because that is surely not going
970 to lose any bits. Some day someone else can write a faster version
971 that avoids converting to DFtype, and verify it really works right. */
972 DFtype a = original_a;
973 DFtype b;
974 UDWtype v;
975
976 if (a < 0)
977 return 0;
978
979 /* Compute high word of result, as a flonum. */
980 b = (a / HIGH_WORD_COEFF);
981 /* Convert that to fixed (but not to DWtype!),
982 and shift it into the high word. */
983 v = (UWtype) b;
984 v <<= WORD_SIZE;
985 /* Remove high part from the DFtype, leaving the low part as flonum. */
986 a -= (DFtype) v;
987 /* Convert that to fixed (but not to DWtype!) and add it in.
988 Sometimes A comes out negative. This is significant, since
989 A has more bits than a long int does. */
990 if (a < 0)
991 v -= (UWtype) (- a);
992 else
993 v += (UWtype) a;
994 return v;
995 }
996 #endif
997
998 #ifdef L_fixsfdi
999 DWtype
1000 __fixsfdi (SFtype a)
1001 {
1002 if (a < 0)
1003 return - __fixunssfDI (-a);
1004 return __fixunssfDI (a);
1005 }
1006 #endif
1007
1008 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
1009 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1010 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1011 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1012
1013 XFtype
1014 __floatdixf (DWtype u)
1015 {
1016 XFtype d;
1017
1018 d = (Wtype) (u >> WORD_SIZE);
1019 d *= HIGH_HALFWORD_COEFF;
1020 d *= HIGH_HALFWORD_COEFF;
1021 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1022
1023 return d;
1024 }
1025 #endif
1026
1027 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
1028 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1029 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1030 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1031
1032 TFtype
1033 __floatditf (DWtype u)
1034 {
1035 TFtype d;
1036
1037 d = (Wtype) (u >> WORD_SIZE);
1038 d *= HIGH_HALFWORD_COEFF;
1039 d *= HIGH_HALFWORD_COEFF;
1040 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1041
1042 return d;
1043 }
1044 #endif
1045
1046 #ifdef L_floatdidf
1047 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1048 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1049 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1050
1051 DFtype
1052 __floatdidf (DWtype u)
1053 {
1054 DFtype d;
1055
1056 d = (Wtype) (u >> WORD_SIZE);
1057 d *= HIGH_HALFWORD_COEFF;
1058 d *= HIGH_HALFWORD_COEFF;
1059 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1060
1061 return d;
1062 }
1063 #endif
1064
1065 #ifdef L_floatdisf
1066 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1067 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1068 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1069 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
1070
1071 /* Define codes for all the float formats that we know of. Note
1072 that this is copied from real.h. */
1073
1074 #define UNKNOWN_FLOAT_FORMAT 0
1075 #define IEEE_FLOAT_FORMAT 1
1076 #define VAX_FLOAT_FORMAT 2
1077 #define IBM_FLOAT_FORMAT 3
1078
1079 /* Default to IEEE float if not specified. Nearly all machines use it. */
1080 #ifndef HOST_FLOAT_FORMAT
1081 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1082 #endif
1083
1084 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1085 #define DF_SIZE 53
1086 #define SF_SIZE 24
1087 #endif
1088
1089 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1090 #define DF_SIZE 56
1091 #define SF_SIZE 24
1092 #endif
1093
1094 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1095 #define DF_SIZE 56
1096 #define SF_SIZE 24
1097 #endif
1098
1099 SFtype
1100 __floatdisf (DWtype u)
1101 {
1102 /* Do the calculation in DFmode
1103 so that we don't lose any of the precision of the high word
1104 while multiplying it. */
1105 DFtype f;
1106
1107 /* Protect against double-rounding error.
1108 Represent any low-order bits, that might be truncated in DFmode,
1109 by a bit that won't be lost. The bit can go in anywhere below the
1110 rounding position of the SFmode. A fixed mask and bit position
1111 handles all usual configurations. It doesn't handle the case
1112 of 128-bit DImode, however. */
1113 if (DF_SIZE < DI_SIZE
1114 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
1115 {
1116 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
1117 if (! (- ((DWtype) 1 << DF_SIZE) < u
1118 && u < ((DWtype) 1 << DF_SIZE)))
1119 {
1120 if ((UDWtype) u & (REP_BIT - 1))
1121 u |= REP_BIT;
1122 }
1123 }
1124 f = (Wtype) (u >> WORD_SIZE);
1125 f *= HIGH_HALFWORD_COEFF;
1126 f *= HIGH_HALFWORD_COEFF;
1127 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
1128
1129 return (SFtype) f;
1130 }
1131 #endif
1132
1133 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1134 /* Reenable the normal types, in case limits.h needs them. */
1135 #undef char
1136 #undef short
1137 #undef int
1138 #undef long
1139 #undef unsigned
1140 #undef float
1141 #undef double
1142 #undef MIN
1143 #undef MAX
1144 #include <limits.h>
1145
1146 UWtype
1147 __fixunsxfSI (XFtype a)
1148 {
1149 if (a >= - (DFtype) LONG_MIN)
1150 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1151 return (Wtype) a;
1152 }
1153 #endif
1154
1155 #ifdef L_fixunsdfsi
1156 /* Reenable the normal types, in case limits.h needs them. */
1157 #undef char
1158 #undef short
1159 #undef int
1160 #undef long
1161 #undef unsigned
1162 #undef float
1163 #undef double
1164 #undef MIN
1165 #undef MAX
1166 #include <limits.h>
1167
1168 UWtype
1169 __fixunsdfSI (DFtype a)
1170 {
1171 if (a >= - (DFtype) LONG_MIN)
1172 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1173 return (Wtype) a;
1174 }
1175 #endif
1176
1177 #ifdef L_fixunssfsi
1178 /* Reenable the normal types, in case limits.h needs them. */
1179 #undef char
1180 #undef short
1181 #undef int
1182 #undef long
1183 #undef unsigned
1184 #undef float
1185 #undef double
1186 #undef MIN
1187 #undef MAX
1188 #include <limits.h>
1189
1190 UWtype
1191 __fixunssfSI (SFtype a)
1192 {
1193 if (a >= - (SFtype) LONG_MIN)
1194 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1195 return (Wtype) a;
1196 }
1197 #endif
1198 \f
1199 /* From here on down, the routines use normal data types. */
1200
1201 #define SItype bogus_type
1202 #define USItype bogus_type
1203 #define DItype bogus_type
1204 #define UDItype bogus_type
1205 #define SFtype bogus_type
1206 #define DFtype bogus_type
1207 #undef Wtype
1208 #undef UWtype
1209 #undef HWtype
1210 #undef UHWtype
1211 #undef DWtype
1212 #undef UDWtype
1213
1214 #undef char
1215 #undef short
1216 #undef int
1217 #undef long
1218 #undef unsigned
1219 #undef float
1220 #undef double
1221 \f
1222 #ifdef L__gcc_bcmp
1223
1224 /* Like bcmp except the sign is meaningful.
1225 Result is negative if S1 is less than S2,
1226 positive if S1 is greater, 0 if S1 and S2 are equal. */
1227
1228 int
1229 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1230 {
1231 while (size > 0)
1232 {
1233 unsigned char c1 = *s1++, c2 = *s2++;
1234 if (c1 != c2)
1235 return c1 - c2;
1236 size--;
1237 }
1238 return 0;
1239 }
1240
1241 #endif
1242 \f\f
1243 #ifdef L__dummy
1244 void
1245 __dummy (void) {}
1246 #endif
1247
1248 #ifdef L_varargs
1249 #ifdef __i860__
1250 #if defined(__svr4__) || defined(__alliant__)
1251 asm (" .text");
1252 asm (" .align 4");
1253
1254 /* The Alliant needs the added underscore. */
1255 asm (".globl __builtin_saveregs");
1256 asm ("__builtin_saveregs:");
1257 asm (".globl ___builtin_saveregs");
1258 asm ("___builtin_saveregs:");
1259
1260 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1261 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1262 area and also for a new va_list
1263 structure */
1264 /* Save all argument registers in the arg reg save area. The
1265 arg reg save area must have the following layout (according
1266 to the svr4 ABI):
1267
1268 struct {
1269 union {
1270 float freg[8];
1271 double dreg[4];
1272 } float_regs;
1273 long ireg[12];
1274 };
1275 */
1276
1277 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1278 asm (" fst.q %f12,16(%sp)");
1279
1280 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1281 asm (" st.l %r17,36(%sp)");
1282 asm (" st.l %r18,40(%sp)");
1283 asm (" st.l %r19,44(%sp)");
1284 asm (" st.l %r20,48(%sp)");
1285 asm (" st.l %r21,52(%sp)");
1286 asm (" st.l %r22,56(%sp)");
1287 asm (" st.l %r23,60(%sp)");
1288 asm (" st.l %r24,64(%sp)");
1289 asm (" st.l %r25,68(%sp)");
1290 asm (" st.l %r26,72(%sp)");
1291 asm (" st.l %r27,76(%sp)");
1292
1293 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1294 va_list structure. Put in into
1295 r16 so that it will be returned
1296 to the caller. */
1297
1298 /* Initialize all fields of the new va_list structure. This
1299 structure looks like:
1300
1301 typedef struct {
1302 unsigned long ireg_used;
1303 unsigned long freg_used;
1304 long *reg_base;
1305 long *mem_ptr;
1306 } va_list;
1307 */
1308
1309 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1310 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1311 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1312 asm (" bri %r1"); /* delayed return */
1313 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1314
1315 #else /* not __svr4__ */
1316 #if defined(__PARAGON__)
1317 /*
1318 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1319 * and we stand a better chance of hooking into libraries
1320 * compiled by PGI. [andyp@ssd.intel.com]
1321 */
1322 asm (" .text");
1323 asm (" .align 4");
1324 asm (".globl __builtin_saveregs");
1325 asm ("__builtin_saveregs:");
1326 asm (".globl ___builtin_saveregs");
1327 asm ("___builtin_saveregs:");
1328
1329 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1330 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1331 area and also for a new va_list
1332 structure */
1333 /* Save all argument registers in the arg reg save area. The
1334 arg reg save area must have the following layout (according
1335 to the svr4 ABI):
1336
1337 struct {
1338 union {
1339 float freg[8];
1340 double dreg[4];
1341 } float_regs;
1342 long ireg[12];
1343 };
1344 */
1345
1346 asm (" fst.q f8, 0(sp)");
1347 asm (" fst.q f12,16(sp)");
1348 asm (" st.l r16,32(sp)");
1349 asm (" st.l r17,36(sp)");
1350 asm (" st.l r18,40(sp)");
1351 asm (" st.l r19,44(sp)");
1352 asm (" st.l r20,48(sp)");
1353 asm (" st.l r21,52(sp)");
1354 asm (" st.l r22,56(sp)");
1355 asm (" st.l r23,60(sp)");
1356 asm (" st.l r24,64(sp)");
1357 asm (" st.l r25,68(sp)");
1358 asm (" st.l r26,72(sp)");
1359 asm (" st.l r27,76(sp)");
1360
1361 asm (" adds 80,sp,r16"); /* compute the address of the new
1362 va_list structure. Put in into
1363 r16 so that it will be returned
1364 to the caller. */
1365
1366 /* Initialize all fields of the new va_list structure. This
1367 structure looks like:
1368
1369 typedef struct {
1370 unsigned long ireg_used;
1371 unsigned long freg_used;
1372 long *reg_base;
1373 long *mem_ptr;
1374 } va_list;
1375 */
1376
1377 asm (" st.l r0, 0(r16)"); /* nfixed */
1378 asm (" st.l r0, 4(r16)"); /* nfloating */
1379 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1380 asm (" bri r1"); /* delayed return */
1381 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1382 #else /* not __PARAGON__ */
1383 asm (" .text");
1384 asm (" .align 4");
1385
1386 asm (".globl ___builtin_saveregs");
1387 asm ("___builtin_saveregs:");
1388 asm (" mov sp,r30");
1389 asm (" andnot 0x0f,sp,sp");
1390 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1391
1392 /* Fill in the __va_struct. */
1393 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1394 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1395 asm (" st.l r18, 8(sp)");
1396 asm (" st.l r19,12(sp)");
1397 asm (" st.l r20,16(sp)");
1398 asm (" st.l r21,20(sp)");
1399 asm (" st.l r22,24(sp)");
1400 asm (" st.l r23,28(sp)");
1401 asm (" st.l r24,32(sp)");
1402 asm (" st.l r25,36(sp)");
1403 asm (" st.l r26,40(sp)");
1404 asm (" st.l r27,44(sp)");
1405
1406 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1407 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1408
1409 /* Fill in the __va_ctl. */
1410 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1411 asm (" st.l r28,84(sp)"); /* pointer to more args */
1412 asm (" st.l r0, 88(sp)"); /* nfixed */
1413 asm (" st.l r0, 92(sp)"); /* nfloating */
1414
1415 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1416 asm (" bri r1");
1417 asm (" mov r30,sp");
1418 /* recover stack and pass address to start
1419 of data. */
1420 #endif /* not __PARAGON__ */
1421 #endif /* not __svr4__ */
1422 #else /* not __i860__ */
1423 #ifdef __sparc__
1424 asm (".global __builtin_saveregs");
1425 asm ("__builtin_saveregs:");
1426 asm (".global ___builtin_saveregs");
1427 asm ("___builtin_saveregs:");
1428 #ifdef NEED_PROC_COMMAND
1429 asm (".proc 020");
1430 #endif
1431 asm ("st %i0,[%fp+68]");
1432 asm ("st %i1,[%fp+72]");
1433 asm ("st %i2,[%fp+76]");
1434 asm ("st %i3,[%fp+80]");
1435 asm ("st %i4,[%fp+84]");
1436 asm ("retl");
1437 asm ("st %i5,[%fp+88]");
1438 #ifdef NEED_TYPE_COMMAND
1439 asm (".type __builtin_saveregs,#function");
1440 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1441 #endif
1442 #else /* not __sparc__ */
1443 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1444
1445 asm (" .text");
1446 #ifdef __mips16
1447 asm (" .set nomips16");
1448 #endif
1449 asm (" .ent __builtin_saveregs");
1450 asm (" .globl __builtin_saveregs");
1451 asm ("__builtin_saveregs:");
1452 asm (" sw $4,0($30)");
1453 asm (" sw $5,4($30)");
1454 asm (" sw $6,8($30)");
1455 asm (" sw $7,12($30)");
1456 asm (" j $31");
1457 asm (" .end __builtin_saveregs");
1458 #else /* not __mips__, etc. */
1459
1460 void * ATTRIBUTE_NORETURN
1461 __builtin_saveregs ()
1462 {
1463 abort ();
1464 }
1465
1466 #endif /* not __mips__ */
1467 #endif /* not __sparc__ */
1468 #endif /* not __i860__ */
1469 #endif
1470 \f
1471 #ifdef L_eprintf
1472 #ifndef inhibit_libc
1473
1474 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1475 #include <stdio.h>
1476 /* This is used by the `assert' macro. */
1477 void
1478 __eprintf (const char *string, const char *expression,
1479 unsigned int line, const char *filename)
1480 {
1481 fprintf (stderr, string, expression, line, filename);
1482 fflush (stderr);
1483 abort ();
1484 }
1485
1486 #endif
1487 #endif
1488
1489 #ifdef L_bb
1490
1491 /* Structure emitted by -a */
1492 struct bb
1493 {
1494 long zero_word;
1495 const char *filename;
1496 long *counts;
1497 long ncounts;
1498 struct bb *next;
1499 const unsigned long *addresses;
1500
1501 /* Older GCC's did not emit these fields. */
1502 long nwords;
1503 const char **functions;
1504 const long *line_nums;
1505 const char **filenames;
1506 char *flags;
1507 };
1508
1509 #ifdef BLOCK_PROFILER_CODE
1510 BLOCK_PROFILER_CODE
1511 #else
1512 #ifndef inhibit_libc
1513
1514 /* Simple minded basic block profiling output dumper for
1515 systems that don't provide tcov support. At present,
1516 it requires atexit and stdio. */
1517
1518 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1519 #include <stdio.h>
1520 char *ctime PARAMS ((const time_t *));
1521
1522 #include "gbl-ctors.h"
1523 #include "gcov-io.h"
1524 #include <string.h>
1525 #ifdef TARGET_HAS_F_SETLKW
1526 #include <fcntl.h>
1527 #include <errno.h>
1528 #endif
1529
1530 static struct bb *bb_head;
1531
1532 static int num_digits (long value, int base) __attribute__ ((const));
1533
1534 /* Return the number of digits needed to print a value */
1535 /* __inline__ */ static int num_digits (long value, int base)
1536 {
1537 int minus = (value < 0 && base != 16);
1538 unsigned long v = (minus) ? -value : value;
1539 int ret = minus;
1540
1541 do
1542 {
1543 v /= base;
1544 ret++;
1545 }
1546 while (v);
1547
1548 return ret;
1549 }
1550
1551 void
1552 __bb_exit_func (void)
1553 {
1554 FILE *da_file, *file;
1555 long time_value;
1556 int i;
1557
1558 if (bb_head == 0)
1559 return;
1560
1561 i = strlen (bb_head->filename) - 3;
1562
1563 if (!strcmp (bb_head->filename+i, ".da"))
1564 {
1565 /* Must be -fprofile-arcs not -a.
1566 Dump data in a form that gcov expects. */
1567
1568 struct bb *ptr;
1569
1570 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1571 {
1572 int firstchar;
1573
1574 /* Make sure the output file exists -
1575 but don't clobber exiting data. */
1576 if ((da_file = fopen (ptr->filename, "a")) != 0)
1577 fclose (da_file);
1578
1579 /* Need to re-open in order to be able to write from the start. */
1580 da_file = fopen (ptr->filename, "r+b");
1581 /* Some old systems might not allow the 'b' mode modifier.
1582 Therefore, try to open without it. This can lead to a race
1583 condition so that when you delete and re-create the file, the
1584 file might be opened in text mode, but then, you shouldn't
1585 delete the file in the first place. */
1586 if (da_file == 0)
1587 da_file = fopen (ptr->filename, "r+");
1588 if (da_file == 0)
1589 {
1590 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1591 ptr->filename);
1592 continue;
1593 }
1594
1595 /* After a fork, another process might try to read and/or write
1596 the same file simultanously. So if we can, lock the file to
1597 avoid race conditions. */
1598 #if defined (TARGET_HAS_F_SETLKW)
1599 {
1600 struct flock s_flock;
1601
1602 s_flock.l_type = F_WRLCK;
1603 s_flock.l_whence = SEEK_SET;
1604 s_flock.l_start = 0;
1605 s_flock.l_len = 1;
1606 s_flock.l_pid = getpid ();
1607
1608 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1609 && errno == EINTR);
1610 }
1611 #endif
1612
1613 /* If the file is not empty, and the number of counts in it is the
1614 same, then merge them in. */
1615 firstchar = fgetc (da_file);
1616 if (firstchar == EOF)
1617 {
1618 if (ferror (da_file))
1619 {
1620 fprintf (stderr, "arc profiling: Can't read output file ");
1621 perror (ptr->filename);
1622 }
1623 }
1624 else
1625 {
1626 long n_counts = 0;
1627
1628 if (ungetc (firstchar, da_file) == EOF)
1629 rewind (da_file);
1630 if (__read_long (&n_counts, da_file, 8) != 0)
1631 {
1632 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1633 ptr->filename);
1634 continue;
1635 }
1636
1637 if (n_counts == ptr->ncounts)
1638 {
1639 int i;
1640
1641 for (i = 0; i < n_counts; i++)
1642 {
1643 long v = 0;
1644
1645 if (__read_long (&v, da_file, 8) != 0)
1646 {
1647 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1648 ptr->filename);
1649 break;
1650 }
1651 ptr->counts[i] += v;
1652 }
1653 }
1654
1655 }
1656
1657 rewind (da_file);
1658
1659 /* ??? Should first write a header to the file. Preferably, a 4 byte
1660 magic number, 4 bytes containing the time the program was
1661 compiled, 4 bytes containing the last modification time of the
1662 source file, and 4 bytes indicating the compiler options used.
1663
1664 That way we can easily verify that the proper source/executable/
1665 data file combination is being used from gcov. */
1666
1667 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1668 {
1669
1670 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1671 ptr->filename);
1672 }
1673 else
1674 {
1675 int j;
1676 long *count_ptr = ptr->counts;
1677 int ret = 0;
1678 for (j = ptr->ncounts; j > 0; j--)
1679 {
1680 if (__write_long (*count_ptr, da_file, 8) != 0)
1681 {
1682 ret=1;
1683 break;
1684 }
1685 count_ptr++;
1686 }
1687 if (ret)
1688 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1689 ptr->filename);
1690 }
1691
1692 if (fclose (da_file) == EOF)
1693 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1694 ptr->filename);
1695 }
1696
1697 return;
1698 }
1699
1700 /* Must be basic block profiling. Emit a human readable output file. */
1701
1702 file = fopen ("bb.out", "a");
1703
1704 if (!file)
1705 perror ("bb.out");
1706
1707 else
1708 {
1709 struct bb *ptr;
1710
1711 /* This is somewhat type incorrect, but it avoids worrying about
1712 exactly where time.h is included from. It should be ok unless
1713 a void * differs from other pointer formats, or if sizeof (long)
1714 is < sizeof (time_t). It would be nice if we could assume the
1715 use of rationale standards here. */
1716
1717 time ((void *) &time_value);
1718 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1719
1720 /* We check the length field explicitly in order to allow compatibility
1721 with older GCC's which did not provide it. */
1722
1723 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1724 {
1725 int i;
1726 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1727 && ptr->nwords <= 1000
1728 && ptr->functions);
1729 int line_p = (func_p && ptr->line_nums);
1730 int file_p = (func_p && ptr->filenames);
1731 int addr_p = (ptr->addresses != 0);
1732 long ncounts = ptr->ncounts;
1733 long cnt_max = 0;
1734 long line_max = 0;
1735 long addr_max = 0;
1736 int file_len = 0;
1737 int func_len = 0;
1738 int blk_len = num_digits (ncounts, 10);
1739 int cnt_len;
1740 int line_len;
1741 int addr_len;
1742
1743 fprintf (file, "File %s, %ld basic blocks \n\n",
1744 ptr->filename, ncounts);
1745
1746 /* Get max values for each field. */
1747 for (i = 0; i < ncounts; i++)
1748 {
1749 const char *p;
1750 int len;
1751
1752 if (cnt_max < ptr->counts[i])
1753 cnt_max = ptr->counts[i];
1754
1755 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1756 addr_max = ptr->addresses[i];
1757
1758 if (line_p && line_max < ptr->line_nums[i])
1759 line_max = ptr->line_nums[i];
1760
1761 if (func_p)
1762 {
1763 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1764 len = strlen (p);
1765 if (func_len < len)
1766 func_len = len;
1767 }
1768
1769 if (file_p)
1770 {
1771 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1772 len = strlen (p);
1773 if (file_len < len)
1774 file_len = len;
1775 }
1776 }
1777
1778 addr_len = num_digits (addr_max, 16);
1779 cnt_len = num_digits (cnt_max, 10);
1780 line_len = num_digits (line_max, 10);
1781
1782 /* Now print out the basic block information. */
1783 for (i = 0; i < ncounts; i++)
1784 {
1785 fprintf (file,
1786 " Block #%*d: executed %*ld time(s)",
1787 blk_len, i+1,
1788 cnt_len, ptr->counts[i]);
1789
1790 if (addr_p)
1791 fprintf (file, " address= 0x%.*lx", addr_len,
1792 ptr->addresses[i]);
1793
1794 if (func_p)
1795 fprintf (file, " function= %-*s", func_len,
1796 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1797
1798 if (line_p)
1799 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1800
1801 if (file_p)
1802 fprintf (file, " file= %s",
1803 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1804
1805 fprintf (file, "\n");
1806 }
1807
1808 fprintf (file, "\n");
1809 fflush (file);
1810 }
1811
1812 fprintf (file, "\n\n");
1813 fclose (file);
1814 }
1815 }
1816
1817 void
1818 __bb_init_func (struct bb *blocks)
1819 {
1820 /* User is supposed to check whether the first word is non-0,
1821 but just in case.... */
1822
1823 if (blocks->zero_word)
1824 return;
1825
1826 /* Initialize destructor. */
1827 if (!bb_head)
1828 atexit (__bb_exit_func);
1829
1830 /* Set up linked list. */
1831 blocks->zero_word = 1;
1832 blocks->next = bb_head;
1833 bb_head = blocks;
1834 }
1835
1836 /* Called before fork or exec - write out profile information gathered so
1837 far and reset it to zero. This avoids duplication or loss of the
1838 profile information gathered so far. */
1839 void
1840 __bb_fork_func (void)
1841 {
1842 struct bb *ptr;
1843
1844 __bb_exit_func ();
1845 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1846 {
1847 long i;
1848 for (i = ptr->ncounts - 1; i >= 0; i--)
1849 ptr->counts[i] = 0;
1850 }
1851 }
1852
1853 #ifndef MACHINE_STATE_SAVE
1854 #define MACHINE_STATE_SAVE(ID)
1855 #endif
1856 #ifndef MACHINE_STATE_RESTORE
1857 #define MACHINE_STATE_RESTORE(ID)
1858 #endif
1859
1860 /* Number of buckets in hashtable of basic block addresses. */
1861
1862 #define BB_BUCKETS 311
1863
1864 /* Maximum length of string in file bb.in. */
1865
1866 #define BBINBUFSIZE 500
1867
1868 struct bb_edge
1869 {
1870 struct bb_edge *next;
1871 unsigned long src_addr;
1872 unsigned long dst_addr;
1873 unsigned long count;
1874 };
1875
1876 enum bb_func_mode
1877 {
1878 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1879 };
1880
1881 struct bb_func
1882 {
1883 struct bb_func *next;
1884 char *funcname;
1885 char *filename;
1886 enum bb_func_mode mode;
1887 };
1888
1889 /* This is the connection to the outside world.
1890 The BLOCK_PROFILER macro must set __bb.blocks
1891 and __bb.blockno. */
1892
1893 struct {
1894 unsigned long blockno;
1895 struct bb *blocks;
1896 } __bb;
1897
1898 /* Vars to store addrs of source and destination basic blocks
1899 of a jump. */
1900
1901 static unsigned long bb_src = 0;
1902 static unsigned long bb_dst = 0;
1903
1904 static FILE *bb_tracefile = (FILE *) 0;
1905 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1906 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1907 static unsigned long bb_callcount = 0;
1908 static int bb_mode = 0;
1909
1910 static unsigned long *bb_stack = (unsigned long *) 0;
1911 static size_t bb_stacksize = 0;
1912
1913 static int reported = 0;
1914
1915 /* Trace modes:
1916 Always : Print execution frequencies of basic blocks
1917 to file bb.out.
1918 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1919 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1920 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1921 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1922 */
1923
1924 #ifdef HAVE_POPEN
1925
1926 /*#include <sys/types.h>*/
1927 #include <sys/stat.h>
1928 /*#include <malloc.h>*/
1929
1930 /* Commands executed by gopen. */
1931
1932 #define GOPENDECOMPRESS "gzip -cd "
1933 #define GOPENCOMPRESS "gzip -c >"
1934
1935 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1936 If it does not compile, simply replace gopen by fopen and delete
1937 '.gz' from any first parameter to gopen. */
1938
1939 static FILE *
1940 gopen (char *fn, char *mode)
1941 {
1942 int use_gzip;
1943 char *p;
1944
1945 if (mode[1])
1946 return (FILE *) 0;
1947
1948 if (mode[0] != 'r' && mode[0] != 'w')
1949 return (FILE *) 0;
1950
1951 p = fn + strlen (fn)-1;
1952 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1953 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1954
1955 if (use_gzip)
1956 {
1957 if (mode[0]=='r')
1958 {
1959 FILE *f;
1960 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1961 + sizeof (GOPENDECOMPRESS));
1962 strcpy (s, GOPENDECOMPRESS);
1963 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1964 f = popen (s, mode);
1965 free (s);
1966 return f;
1967 }
1968
1969 else
1970 {
1971 FILE *f;
1972 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1973 + sizeof (GOPENCOMPRESS));
1974 strcpy (s, GOPENCOMPRESS);
1975 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1976 if (!(f = popen (s, mode)))
1977 f = fopen (s, mode);
1978 free (s);
1979 return f;
1980 }
1981 }
1982
1983 else
1984 return fopen (fn, mode);
1985 }
1986
1987 static int
1988 gclose (FILE *f)
1989 {
1990 struct stat buf;
1991
1992 if (f != 0)
1993 {
1994 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1995 return pclose (f);
1996
1997 return fclose (f);
1998 }
1999 return 0;
2000 }
2001
2002 #endif /* HAVE_POPEN */
2003
2004 /* Called once per program. */
2005
2006 static void
2007 __bb_exit_trace_func (void)
2008 {
2009 FILE *file = fopen ("bb.out", "a");
2010 struct bb_func *f;
2011 struct bb *b;
2012
2013 if (!file)
2014 perror ("bb.out");
2015
2016 if (bb_mode & 1)
2017 {
2018 if (!bb_tracefile)
2019 perror ("bbtrace");
2020 else
2021 #ifdef HAVE_POPEN
2022 gclose (bb_tracefile);
2023 #else
2024 fclose (bb_tracefile);
2025 #endif /* HAVE_POPEN */
2026 }
2027
2028 /* Check functions in `bb.in'. */
2029
2030 if (file)
2031 {
2032 long time_value;
2033 const struct bb_func *p;
2034 int printed_something = 0;
2035 struct bb *ptr;
2036 long blk;
2037
2038 /* This is somewhat type incorrect. */
2039 time ((void *) &time_value);
2040
2041 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
2042 {
2043 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
2044 {
2045 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
2046 continue;
2047 for (blk = 0; blk < ptr->ncounts; blk++)
2048 {
2049 if (!strcmp (p->funcname, ptr->functions[blk]))
2050 goto found;
2051 }
2052 }
2053
2054 if (!printed_something)
2055 {
2056 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
2057 printed_something = 1;
2058 }
2059
2060 fprintf (file, "\tFunction %s", p->funcname);
2061 if (p->filename)
2062 fprintf (file, " of file %s", p->filename);
2063 fprintf (file, "\n" );
2064
2065 found: ;
2066 }
2067
2068 if (printed_something)
2069 fprintf (file, "\n");
2070
2071 }
2072
2073 if (bb_mode & 2)
2074 {
2075 if (!bb_hashbuckets)
2076 {
2077 if (!reported)
2078 {
2079 fprintf (stderr, "Profiler: out of memory\n");
2080 reported = 1;
2081 }
2082 return;
2083 }
2084
2085 else if (file)
2086 {
2087 long time_value;
2088 int i;
2089 unsigned long addr_max = 0;
2090 unsigned long cnt_max = 0;
2091 int cnt_len;
2092 int addr_len;
2093
2094 /* This is somewhat type incorrect, but it avoids worrying about
2095 exactly where time.h is included from. It should be ok unless
2096 a void * differs from other pointer formats, or if sizeof (long)
2097 is < sizeof (time_t). It would be nice if we could assume the
2098 use of rationale standards here. */
2099
2100 time ((void *) &time_value);
2101 fprintf (file, "Basic block jump tracing");
2102
2103 switch (bb_mode & 12)
2104 {
2105 case 0:
2106 fprintf (file, " (with call)");
2107 break;
2108
2109 case 4:
2110 /* Print nothing. */
2111 break;
2112
2113 case 8:
2114 fprintf (file, " (with call & ret)");
2115 break;
2116
2117 case 12:
2118 fprintf (file, " (with ret)");
2119 break;
2120 }
2121
2122 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
2123
2124 for (i = 0; i < BB_BUCKETS; i++)
2125 {
2126 struct bb_edge *bucket = bb_hashbuckets[i];
2127 for ( ; bucket; bucket = bucket->next )
2128 {
2129 if (addr_max < bucket->src_addr)
2130 addr_max = bucket->src_addr;
2131 if (addr_max < bucket->dst_addr)
2132 addr_max = bucket->dst_addr;
2133 if (cnt_max < bucket->count)
2134 cnt_max = bucket->count;
2135 }
2136 }
2137 addr_len = num_digits (addr_max, 16);
2138 cnt_len = num_digits (cnt_max, 10);
2139
2140 for ( i = 0; i < BB_BUCKETS; i++)
2141 {
2142 struct bb_edge *bucket = bb_hashbuckets[i];
2143 for ( ; bucket; bucket = bucket->next )
2144 {
2145 fprintf (file,
2146 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
2147 addr_len, bucket->src_addr,
2148 addr_len, bucket->dst_addr,
2149 cnt_len, bucket->count);
2150 }
2151 }
2152
2153 fprintf (file, "\n");
2154
2155 }
2156 }
2157
2158 if (file)
2159 fclose (file);
2160
2161 /* Free allocated memory. */
2162
2163 f = bb_func_head;
2164 while (f)
2165 {
2166 struct bb_func *old = f;
2167
2168 f = f->next;
2169 if (old->funcname) free (old->funcname);
2170 if (old->filename) free (old->filename);
2171 free (old);
2172 }
2173
2174 if (bb_stack)
2175 free (bb_stack);
2176
2177 if (bb_hashbuckets)
2178 {
2179 int i;
2180
2181 for (i = 0; i < BB_BUCKETS; i++)
2182 {
2183 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2184
2185 while (bucket)
2186 {
2187 old = bucket;
2188 bucket = bucket->next;
2189 free (old);
2190 }
2191 }
2192 free (bb_hashbuckets);
2193 }
2194
2195 for (b = bb_head; b; b = b->next)
2196 if (b->flags) free (b->flags);
2197 }
2198
2199 /* Called once per program. */
2200
2201 static void
2202 __bb_init_prg (void)
2203 {
2204 FILE *file;
2205 char buf[BBINBUFSIZE];
2206 const char *p;
2207 const char *pos;
2208 enum bb_func_mode m;
2209 int i;
2210
2211 /* Initialize destructor. */
2212 atexit (__bb_exit_func);
2213
2214 if (!(file = fopen ("bb.in", "r")))
2215 return;
2216
2217 while(fgets (buf, BBINBUFSIZE, file) != 0)
2218 {
2219 i = strlen (buf);
2220 if (buf[i] == '\n')
2221 buf[i--] = '\0';
2222
2223 p = buf;
2224 if (*p == '-')
2225 {
2226 m = TRACE_OFF;
2227 p++;
2228 }
2229 else
2230 {
2231 m = TRACE_ON;
2232 }
2233 if (!strcmp (p, "__bb_trace__"))
2234 bb_mode |= 1;
2235 else if (!strcmp (p, "__bb_jumps__"))
2236 bb_mode |= 2;
2237 else if (!strcmp (p, "__bb_hidecall__"))
2238 bb_mode |= 4;
2239 else if (!strcmp (p, "__bb_showret__"))
2240 bb_mode |= 8;
2241 else
2242 {
2243 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2244 if (f)
2245 {
2246 unsigned long l;
2247 f->next = bb_func_head;
2248 if ((pos = strchr (p, ':')))
2249 {
2250 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2251 continue;
2252 strcpy (f->funcname, pos+1);
2253 l = pos-p;
2254 if ((f->filename = (char *) malloc (l+1)))
2255 {
2256 strncpy (f->filename, p, l);
2257 f->filename[l] = '\0';
2258 }
2259 else
2260 f->filename = (char *) 0;
2261 }
2262 else
2263 {
2264 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2265 continue;
2266 strcpy (f->funcname, p);
2267 f->filename = (char *) 0;
2268 }
2269 f->mode = m;
2270 bb_func_head = f;
2271 }
2272 }
2273 }
2274 fclose (file);
2275
2276 #ifdef HAVE_POPEN
2277
2278 if (bb_mode & 1)
2279 bb_tracefile = gopen ("bbtrace.gz", "w");
2280
2281 #else
2282
2283 if (bb_mode & 1)
2284 bb_tracefile = fopen ("bbtrace", "w");
2285
2286 #endif /* HAVE_POPEN */
2287
2288 if (bb_mode & 2)
2289 {
2290 bb_hashbuckets = (struct bb_edge **)
2291 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2292 if (bb_hashbuckets)
2293 /* Use a loop here rather than calling bzero to avoid having to
2294 conditionalize its existance. */
2295 for (i = 0; i < BB_BUCKETS; i++)
2296 bb_hashbuckets[i] = 0;
2297 }
2298
2299 if (bb_mode & 12)
2300 {
2301 bb_stacksize = 10;
2302 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2303 }
2304
2305 /* Initialize destructor. */
2306 atexit (__bb_exit_trace_func);
2307 }
2308
2309 /* Called upon entering a basic block. */
2310
2311 void
2312 __bb_trace_func (void)
2313 {
2314 struct bb_edge *bucket;
2315
2316 MACHINE_STATE_SAVE("1")
2317
2318 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2319 goto skip;
2320
2321 bb_dst = __bb.blocks->addresses[__bb.blockno];
2322 __bb.blocks->counts[__bb.blockno]++;
2323
2324 if (bb_tracefile)
2325 {
2326 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2327 }
2328
2329 if (bb_hashbuckets)
2330 {
2331 struct bb_edge **startbucket, **oldnext;
2332
2333 oldnext = startbucket
2334 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2335 bucket = *startbucket;
2336
2337 for (bucket = *startbucket; bucket;
2338 oldnext = &(bucket->next), bucket = *oldnext)
2339 {
2340 if (bucket->src_addr == bb_src
2341 && bucket->dst_addr == bb_dst)
2342 {
2343 bucket->count++;
2344 *oldnext = bucket->next;
2345 bucket->next = *startbucket;
2346 *startbucket = bucket;
2347 goto ret;
2348 }
2349 }
2350
2351 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2352
2353 if (!bucket)
2354 {
2355 if (!reported)
2356 {
2357 fprintf (stderr, "Profiler: out of memory\n");
2358 reported = 1;
2359 }
2360 }
2361
2362 else
2363 {
2364 bucket->src_addr = bb_src;
2365 bucket->dst_addr = bb_dst;
2366 bucket->next = *startbucket;
2367 *startbucket = bucket;
2368 bucket->count = 1;
2369 }
2370 }
2371
2372 ret:
2373 bb_src = bb_dst;
2374
2375 skip:
2376 ;
2377
2378 MACHINE_STATE_RESTORE("1")
2379
2380 }
2381
2382 /* Called when returning from a function and `__bb_showret__' is set. */
2383
2384 static void
2385 __bb_trace_func_ret (void)
2386 {
2387 struct bb_edge *bucket;
2388
2389 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2390 goto skip;
2391
2392 if (bb_hashbuckets)
2393 {
2394 struct bb_edge **startbucket, **oldnext;
2395
2396 oldnext = startbucket
2397 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2398 bucket = *startbucket;
2399
2400 for (bucket = *startbucket; bucket;
2401 oldnext = &(bucket->next), bucket = *oldnext)
2402 {
2403 if (bucket->src_addr == bb_dst
2404 && bucket->dst_addr == bb_src)
2405 {
2406 bucket->count++;
2407 *oldnext = bucket->next;
2408 bucket->next = *startbucket;
2409 *startbucket = bucket;
2410 goto ret;
2411 }
2412 }
2413
2414 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2415
2416 if (!bucket)
2417 {
2418 if (!reported)
2419 {
2420 fprintf (stderr, "Profiler: out of memory\n");
2421 reported = 1;
2422 }
2423 }
2424
2425 else
2426 {
2427 bucket->src_addr = bb_dst;
2428 bucket->dst_addr = bb_src;
2429 bucket->next = *startbucket;
2430 *startbucket = bucket;
2431 bucket->count = 1;
2432 }
2433 }
2434
2435 ret:
2436 bb_dst = bb_src;
2437
2438 skip:
2439 ;
2440
2441 }
2442
2443 /* Called upon entering the first function of a file. */
2444
2445 static void
2446 __bb_init_file (struct bb *blocks)
2447 {
2448
2449 const struct bb_func *p;
2450 long blk, ncounts = blocks->ncounts;
2451 const char **functions = blocks->functions;
2452
2453 /* Set up linked list. */
2454 blocks->zero_word = 1;
2455 blocks->next = bb_head;
2456 bb_head = blocks;
2457
2458 blocks->flags = 0;
2459 if (!bb_func_head
2460 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2461 return;
2462
2463 for (blk = 0; blk < ncounts; blk++)
2464 blocks->flags[blk] = 0;
2465
2466 for (blk = 0; blk < ncounts; blk++)
2467 {
2468 for (p = bb_func_head; p; p = p->next)
2469 {
2470 if (!strcmp (p->funcname, functions[blk])
2471 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2472 {
2473 blocks->flags[blk] |= p->mode;
2474 }
2475 }
2476 }
2477
2478 }
2479
2480 /* Called when exiting from a function. */
2481
2482 void
2483 __bb_trace_ret (void)
2484 {
2485
2486 MACHINE_STATE_SAVE("2")
2487
2488 if (bb_callcount)
2489 {
2490 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2491 {
2492 bb_src = bb_stack[bb_callcount];
2493 if (bb_mode & 8)
2494 __bb_trace_func_ret ();
2495 }
2496
2497 bb_callcount -= 1;
2498 }
2499
2500 MACHINE_STATE_RESTORE("2")
2501
2502 }
2503
2504 /* Called when entering a function. */
2505
2506 void
2507 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2508 {
2509 static int trace_init = 0;
2510
2511 MACHINE_STATE_SAVE("3")
2512
2513 if (!blocks->zero_word)
2514 {
2515 if (!trace_init)
2516 {
2517 trace_init = 1;
2518 __bb_init_prg ();
2519 }
2520 __bb_init_file (blocks);
2521 }
2522
2523 if (bb_callcount)
2524 {
2525
2526 bb_callcount += 1;
2527
2528 if (bb_mode & 12)
2529 {
2530 if (bb_callcount >= bb_stacksize)
2531 {
2532 size_t newsize = bb_callcount + 100;
2533
2534 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2535 if (! bb_stack)
2536 {
2537 if (!reported)
2538 {
2539 fprintf (stderr, "Profiler: out of memory\n");
2540 reported = 1;
2541 }
2542 bb_stacksize = 0;
2543 goto stack_overflow;
2544 }
2545 bb_stacksize = newsize;
2546 }
2547 bb_stack[bb_callcount] = bb_src;
2548
2549 if (bb_mode & 4)
2550 bb_src = 0;
2551
2552 }
2553
2554 stack_overflow:;
2555
2556 }
2557
2558 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2559 {
2560 bb_callcount = 1;
2561 bb_src = 0;
2562
2563 if (bb_stack)
2564 bb_stack[bb_callcount] = bb_src;
2565 }
2566
2567 MACHINE_STATE_RESTORE("3")
2568 }
2569
2570 #endif /* not inhibit_libc */
2571 #endif /* not BLOCK_PROFILER_CODE */
2572 #endif /* L_bb */
2573 \f
2574 #ifdef L_clear_cache
2575 /* Clear part of an instruction cache. */
2576
2577 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2578
2579 void
2580 __clear_cache (char *beg __attribute__((__unused__)),
2581 char *end __attribute__((__unused__)))
2582 {
2583 #ifdef CLEAR_INSN_CACHE
2584 CLEAR_INSN_CACHE (beg, end);
2585 #else
2586 #ifdef INSN_CACHE_SIZE
2587 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2588 static int initialized;
2589 int offset;
2590 void *start_addr
2591 void *end_addr;
2592 typedef (*function_ptr) (void);
2593
2594 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2595 /* It's cheaper to clear the whole cache.
2596 Put in a series of jump instructions so that calling the beginning
2597 of the cache will clear the whole thing. */
2598
2599 if (! initialized)
2600 {
2601 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2602 & -INSN_CACHE_LINE_WIDTH);
2603 int end_ptr = ptr + INSN_CACHE_SIZE;
2604
2605 while (ptr < end_ptr)
2606 {
2607 *(INSTRUCTION_TYPE *)ptr
2608 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2609 ptr += INSN_CACHE_LINE_WIDTH;
2610 }
2611 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2612
2613 initialized = 1;
2614 }
2615
2616 /* Call the beginning of the sequence. */
2617 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2618 & -INSN_CACHE_LINE_WIDTH))
2619 ());
2620
2621 #else /* Cache is large. */
2622
2623 if (! initialized)
2624 {
2625 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2626 & -INSN_CACHE_LINE_WIDTH);
2627
2628 while (ptr < (int) array + sizeof array)
2629 {
2630 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2631 ptr += INSN_CACHE_LINE_WIDTH;
2632 }
2633
2634 initialized = 1;
2635 }
2636
2637 /* Find the location in array that occupies the same cache line as BEG. */
2638
2639 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2640 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2641 & -INSN_CACHE_PLANE_SIZE)
2642 + offset);
2643
2644 /* Compute the cache alignment of the place to stop clearing. */
2645 #if 0 /* This is not needed for gcc's purposes. */
2646 /* If the block to clear is bigger than a cache plane,
2647 we clear the entire cache, and OFFSET is already correct. */
2648 if (end < beg + INSN_CACHE_PLANE_SIZE)
2649 #endif
2650 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2651 & -INSN_CACHE_LINE_WIDTH)
2652 & (INSN_CACHE_PLANE_SIZE - 1));
2653
2654 #if INSN_CACHE_DEPTH > 1
2655 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2656 if (end_addr <= start_addr)
2657 end_addr += INSN_CACHE_PLANE_SIZE;
2658
2659 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2660 {
2661 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2662 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2663
2664 while (addr != stop)
2665 {
2666 /* Call the return instruction at ADDR. */
2667 ((function_ptr) addr) ();
2668
2669 addr += INSN_CACHE_LINE_WIDTH;
2670 }
2671 }
2672 #else /* just one plane */
2673 do
2674 {
2675 /* Call the return instruction at START_ADDR. */
2676 ((function_ptr) start_addr) ();
2677
2678 start_addr += INSN_CACHE_LINE_WIDTH;
2679 }
2680 while ((start_addr % INSN_CACHE_SIZE) != offset);
2681 #endif /* just one plane */
2682 #endif /* Cache is large */
2683 #endif /* Cache exists */
2684 #endif /* CLEAR_INSN_CACHE */
2685 }
2686
2687 #endif /* L_clear_cache */
2688 \f
2689 #ifdef L_trampoline
2690
2691 /* Jump to a trampoline, loading the static chain address. */
2692
2693 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2694
2695 long
2696 getpagesize (void)
2697 {
2698 #ifdef _ALPHA_
2699 return 8192;
2700 #else
2701 return 4096;
2702 #endif
2703 }
2704
2705 #ifdef __i386__
2706 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2707 #endif
2708
2709 int
2710 mprotect (char *addr, int len, int prot)
2711 {
2712 int np, op;
2713
2714 if (prot == 7)
2715 np = 0x40;
2716 else if (prot == 5)
2717 np = 0x20;
2718 else if (prot == 4)
2719 np = 0x10;
2720 else if (prot == 3)
2721 np = 0x04;
2722 else if (prot == 1)
2723 np = 0x02;
2724 else if (prot == 0)
2725 np = 0x01;
2726
2727 if (VirtualProtect (addr, len, np, &op))
2728 return 0;
2729 else
2730 return -1;
2731 }
2732
2733 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2734
2735 #ifdef TRANSFER_FROM_TRAMPOLINE
2736 TRANSFER_FROM_TRAMPOLINE
2737 #endif
2738
2739 #if defined (NeXT) && defined (__MACH__)
2740
2741 /* Make stack executable so we can call trampolines on stack.
2742 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2743 #ifdef NeXTStep21
2744 #include <mach.h>
2745 #else
2746 #include <mach/mach.h>
2747 #endif
2748
2749 void
2750 __enable_execute_stack (char *addr)
2751 {
2752 kern_return_t r;
2753 char *eaddr = addr + TRAMPOLINE_SIZE;
2754 vm_address_t a = (vm_address_t) addr;
2755
2756 /* turn on execute access on stack */
2757 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2758 if (r != KERN_SUCCESS)
2759 {
2760 mach_error("vm_protect VM_PROT_ALL", r);
2761 exit(1);
2762 }
2763
2764 /* We inline the i-cache invalidation for speed */
2765
2766 #ifdef CLEAR_INSN_CACHE
2767 CLEAR_INSN_CACHE (addr, eaddr);
2768 #else
2769 __clear_cache ((int) addr, (int) eaddr);
2770 #endif
2771 }
2772
2773 #endif /* defined (NeXT) && defined (__MACH__) */
2774
2775 #ifdef __convex__
2776
2777 /* Make stack executable so we can call trampolines on stack.
2778 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2779
2780 #include <sys/mman.h>
2781 #include <sys/vmparam.h>
2782 #include <machine/machparam.h>
2783
2784 void
2785 __enable_execute_stack (void)
2786 {
2787 int fp;
2788 static unsigned lowest = USRSTACK;
2789 unsigned current = (unsigned) &fp & -NBPG;
2790
2791 if (lowest > current)
2792 {
2793 unsigned len = lowest - current;
2794 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2795 lowest = current;
2796 }
2797
2798 /* Clear instruction cache in case an old trampoline is in it. */
2799 asm ("pich");
2800 }
2801 #endif /* __convex__ */
2802
2803 #ifdef __sysV88__
2804
2805 /* Modified from the convex -code above. */
2806
2807 #include <sys/param.h>
2808 #include <errno.h>
2809 #include <sys/m88kbcs.h>
2810
2811 void
2812 __enable_execute_stack (void)
2813 {
2814 int save_errno;
2815 static unsigned long lowest = USRSTACK;
2816 unsigned long current = (unsigned long) &save_errno & -NBPC;
2817
2818 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2819 address is seen as 'negative'. That is the case with the stack. */
2820
2821 save_errno=errno;
2822 if (lowest > current)
2823 {
2824 unsigned len=lowest-current;
2825 memctl(current,len,MCT_TEXT);
2826 lowest = current;
2827 }
2828 else
2829 memctl(current,NBPC,MCT_TEXT);
2830 errno=save_errno;
2831 }
2832
2833 #endif /* __sysV88__ */
2834
2835 #ifdef __sysV68__
2836
2837 #include <sys/signal.h>
2838 #include <errno.h>
2839
2840 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2841 so define it here, because we need it in __clear_insn_cache below */
2842 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2843 hence we enable this stuff only if MCT_TEXT is #define'd. */
2844
2845 #ifdef MCT_TEXT
2846 asm("\n\
2847 global memctl\n\
2848 memctl:\n\
2849 movq &75,%d0\n\
2850 trap &0\n\
2851 bcc.b noerror\n\
2852 jmp cerror%\n\
2853 noerror:\n\
2854 movq &0,%d0\n\
2855 rts");
2856 #endif
2857
2858 /* Clear instruction cache so we can call trampolines on stack.
2859 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2860
2861 void
2862 __clear_insn_cache (void)
2863 {
2864 #ifdef MCT_TEXT
2865 int save_errno;
2866
2867 /* Preserve errno, because users would be surprised to have
2868 errno changing without explicitly calling any system-call. */
2869 save_errno = errno;
2870
2871 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2872 No need to use an address derived from _start or %sp, as 0 works also. */
2873 memctl(0, 4096, MCT_TEXT);
2874 errno = save_errno;
2875 #endif
2876 }
2877
2878 #endif /* __sysV68__ */
2879
2880 #ifdef __pyr__
2881
2882 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2883 #include <stdio.h>
2884 #include <sys/mman.h>
2885 #include <sys/types.h>
2886 #include <sys/param.h>
2887 #include <sys/vmmac.h>
2888
2889 /* Modified from the convex -code above.
2890 mremap promises to clear the i-cache. */
2891
2892 void
2893 __enable_execute_stack (void)
2894 {
2895 int fp;
2896 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2897 PROT_READ|PROT_WRITE|PROT_EXEC))
2898 {
2899 perror ("mprotect in __enable_execute_stack");
2900 fflush (stderr);
2901 abort ();
2902 }
2903 }
2904 #endif /* __pyr__ */
2905
2906 #if defined (sony_news) && defined (SYSTYPE_BSD)
2907
2908 #include <stdio.h>
2909 #include <sys/types.h>
2910 #include <sys/param.h>
2911 #include <syscall.h>
2912 #include <machine/sysnews.h>
2913
2914 /* cacheflush function for NEWS-OS 4.2.
2915 This function is called from trampoline-initialize code
2916 defined in config/mips/mips.h. */
2917
2918 void
2919 cacheflush (char *beg, int size, int flag)
2920 {
2921 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2922 {
2923 perror ("cache_flush");
2924 fflush (stderr);
2925 abort ();
2926 }
2927 }
2928
2929 #endif /* sony_news */
2930 #endif /* L_trampoline */
2931 \f
2932 #ifndef __CYGWIN__
2933 #ifdef L__main
2934
2935 #include "gbl-ctors.h"
2936 /* Some systems use __main in a way incompatible with its use in gcc, in these
2937 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2938 give the same symbol without quotes for an alternative entry point. You
2939 must define both, or neither. */
2940 #ifndef NAME__MAIN
2941 #define NAME__MAIN "__main"
2942 #define SYMBOL__MAIN __main
2943 #endif
2944
2945 #ifdef INIT_SECTION_ASM_OP
2946 #undef HAS_INIT_SECTION
2947 #define HAS_INIT_SECTION
2948 #endif
2949
2950 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2951
2952 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2953 code to run constructors. In that case, we need to handle EH here, too. */
2954
2955 #ifdef EH_FRAME_SECTION
2956 #include "frame.h"
2957 extern unsigned char __EH_FRAME_BEGIN__[];
2958 #endif
2959
2960 /* Run all the global destructors on exit from the program. */
2961
2962 void
2963 __do_global_dtors (void)
2964 {
2965 #ifdef DO_GLOBAL_DTORS_BODY
2966 DO_GLOBAL_DTORS_BODY;
2967 #else
2968 static func_ptr *p = __DTOR_LIST__ + 1;
2969 while (*p)
2970 {
2971 p++;
2972 (*(p-1)) ();
2973 }
2974 #endif
2975 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2976 {
2977 static int completed = 0;
2978 if (! completed)
2979 {
2980 completed = 1;
2981 __deregister_frame_info (__EH_FRAME_BEGIN__);
2982 }
2983 }
2984 #endif
2985 }
2986 #endif
2987
2988 #ifndef HAS_INIT_SECTION
2989 /* Run all the global constructors on entry to the program. */
2990
2991 void
2992 __do_global_ctors (void)
2993 {
2994 #ifdef EH_FRAME_SECTION
2995 {
2996 static struct object object;
2997 __register_frame_info (__EH_FRAME_BEGIN__, &object);
2998 }
2999 #endif
3000 DO_GLOBAL_CTORS_BODY;
3001 atexit (__do_global_dtors);
3002 }
3003 #endif /* no HAS_INIT_SECTION */
3004
3005 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
3006 /* Subroutine called automatically by `main'.
3007 Compiling a global function named `main'
3008 produces an automatic call to this function at the beginning.
3009
3010 For many systems, this routine calls __do_global_ctors.
3011 For systems which support a .init section we use the .init section
3012 to run __do_global_ctors, so we need not do anything here. */
3013
3014 void
3015 SYMBOL__MAIN ()
3016 {
3017 /* Support recursive calls to `main': run initializers just once. */
3018 static int initialized;
3019 if (! initialized)
3020 {
3021 initialized = 1;
3022 __do_global_ctors ();
3023 }
3024 }
3025 #endif /* no HAS_INIT_SECTION or INVOKE__main */
3026
3027 #endif /* L__main */
3028 #endif /* __CYGWIN__ */
3029 \f
3030 #ifdef L_ctors
3031
3032 #include "gbl-ctors.h"
3033
3034 /* Provide default definitions for the lists of constructors and
3035 destructors, so that we don't get linker errors. These symbols are
3036 intentionally bss symbols, so that gld and/or collect will provide
3037 the right values. */
3038
3039 /* We declare the lists here with two elements each,
3040 so that they are valid empty lists if no other definition is loaded.
3041
3042 If we are using the old "set" extensions to have the gnu linker
3043 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3044 must be in the bss/common section.
3045
3046 Long term no port should use those extensions. But many still do. */
3047 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
3048 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
3049 func_ptr __CTOR_LIST__[2] = {0, 0};
3050 func_ptr __DTOR_LIST__[2] = {0, 0};
3051 #else
3052 func_ptr __CTOR_LIST__[2];
3053 func_ptr __DTOR_LIST__[2];
3054 #endif
3055 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
3056 #endif /* L_ctors */
3057 \f
3058 #ifdef L_exit
3059
3060 #include "gbl-ctors.h"
3061
3062 #ifdef NEED_ATEXIT
3063
3064 #ifndef ON_EXIT
3065
3066 # include <errno.h>
3067
3068 static func_ptr *atexit_chain = 0;
3069 static long atexit_chain_length = 0;
3070 static volatile long last_atexit_chain_slot = -1;
3071
3072 int
3073 atexit (func_ptr func)
3074 {
3075 if (++last_atexit_chain_slot == atexit_chain_length)
3076 {
3077 atexit_chain_length += 32;
3078 if (atexit_chain)
3079 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
3080 * sizeof (func_ptr));
3081 else
3082 atexit_chain = (func_ptr *) malloc (atexit_chain_length
3083 * sizeof (func_ptr));
3084 if (! atexit_chain)
3085 {
3086 atexit_chain_length = 0;
3087 last_atexit_chain_slot = -1;
3088 errno = ENOMEM;
3089 return (-1);
3090 }
3091 }
3092 atexit_chain[last_atexit_chain_slot] = func;
3093 return (0);
3094 }
3095
3096 extern void _cleanup (void);
3097 extern void _exit (int) __attribute__ ((__noreturn__));
3098
3099 void
3100 exit (int status)
3101 {
3102 if (atexit_chain)
3103 {
3104 for ( ; last_atexit_chain_slot-- >= 0; )
3105 {
3106 (*atexit_chain[last_atexit_chain_slot + 1]) ();
3107 atexit_chain[last_atexit_chain_slot + 1] = 0;
3108 }
3109 free (atexit_chain);
3110 atexit_chain = 0;
3111 }
3112 #ifdef EXIT_BODY
3113 EXIT_BODY;
3114 #else
3115 _cleanup ();
3116 #endif
3117 _exit (status);
3118 }
3119
3120 #else /* ON_EXIT */
3121
3122 /* Simple; we just need a wrapper for ON_EXIT. */
3123 int
3124 atexit (func_ptr func)
3125 {
3126 return ON_EXIT (func);
3127 }
3128
3129 #endif /* ON_EXIT */
3130 #endif /* NEED_ATEXIT */
3131
3132 #endif /* L_exit */
3133 \f
3134 #ifdef L_eh
3135
3136 #include "gthr.h"
3137
3138 /* Shared exception handling support routines. */
3139
3140 void
3141 __default_terminate (void)
3142 {
3143 abort ();
3144 }
3145
3146 static __terminate_func_ptr __terminate_func =
3147 __default_terminate;
3148
3149 void __attribute__((__noreturn__))
3150 __terminate (void)
3151 {
3152 (*__terminate_func)();
3153 }
3154
3155 __terminate_func_ptr
3156 __terminate_set_func (__terminate_func_ptr newfunc)
3157 {
3158 __terminate_func_ptr oldfunc = __terminate_func;
3159
3160 __terminate_func = newfunc;
3161 return (oldfunc);
3162 }
3163
3164 void *
3165 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3166 {
3167 #if 0
3168 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3169 catch_type, throw_type);
3170 #endif
3171 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3172 return obj;
3173 return 0;
3174 }
3175
3176 void
3177 __empty (void)
3178 {
3179 }
3180 \f
3181
3182 /* Include definitions of EH context and table layout */
3183
3184 #include "eh-common.h"
3185 #ifndef inhibit_libc
3186 #include <stdio.h>
3187 #endif
3188
3189 /* Allocate and return a new EH context structure. */
3190
3191 #if __GTHREADS
3192 static void *
3193 new_eh_context (void)
3194 {
3195 struct eh_full_context {
3196 struct eh_context c;
3197 void *top_elt[2];
3198 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3199
3200 if (! ehfc)
3201 __terminate ();
3202
3203 memset (ehfc, 0, sizeof *ehfc);
3204
3205 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3206
3207 /* This should optimize out entirely. This should always be true,
3208 but just in case it ever isn't, don't allow bogus code to be
3209 generated. */
3210
3211 if ((void*)(&ehfc->c) != (void*)ehfc)
3212 __terminate ();
3213
3214 return &ehfc->c;
3215 }
3216
3217 static __gthread_key_t eh_context_key;
3218
3219 /* Destructor for struct eh_context. */
3220 static void
3221 eh_context_free (void *ptr)
3222 {
3223 __gthread_key_dtor (eh_context_key, ptr);
3224 if (ptr)
3225 free (ptr);
3226 }
3227 #endif
3228
3229 /* Pointer to function to return EH context. */
3230
3231 static struct eh_context *eh_context_initialize (void);
3232 static struct eh_context *eh_context_static (void);
3233 #if __GTHREADS
3234 static struct eh_context *eh_context_specific (void);
3235 #endif
3236
3237 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3238
3239 /* Routine to get EH context.
3240 This one will simply call the function pointer. */
3241
3242 void *
3243 __get_eh_context (void)
3244 {
3245 return (void *) (*get_eh_context) ();
3246 }
3247
3248 /* Get and set the language specific info pointer. */
3249
3250 void **
3251 __get_eh_info (void)
3252 {
3253 struct eh_context *eh = (*get_eh_context) ();
3254 return &eh->info;
3255 }
3256 \f
3257 #ifdef DWARF2_UNWIND_INFO
3258 static int dwarf_reg_size_table_initialized = 0;
3259 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3260
3261 static void
3262 init_reg_size_table (void)
3263 {
3264 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3265 dwarf_reg_size_table_initialized = 1;
3266 }
3267 #endif
3268
3269 #if __GTHREADS
3270 static void
3271 eh_threads_initialize (void)
3272 {
3273 /* Try to create the key. If it fails, revert to static method,
3274 otherwise start using thread specific EH contexts. */
3275 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3276 get_eh_context = &eh_context_specific;
3277 else
3278 get_eh_context = &eh_context_static;
3279 }
3280 #endif /* no __GTHREADS */
3281
3282 /* Initialize EH context.
3283 This will be called only once, since we change GET_EH_CONTEXT
3284 pointer to another routine. */
3285
3286 static struct eh_context *
3287 eh_context_initialize (void)
3288 {
3289 #if __GTHREADS
3290
3291 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3292 /* Make sure that get_eh_context does not point to us anymore.
3293 Some systems have dummy thread routines in their libc that
3294 return a success (Solaris 2.6 for example). */
3295 if (__gthread_once (&once, eh_threads_initialize) != 0
3296 || get_eh_context == &eh_context_initialize)
3297 {
3298 /* Use static version of EH context. */
3299 get_eh_context = &eh_context_static;
3300 }
3301 #ifdef DWARF2_UNWIND_INFO
3302 {
3303 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3304 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3305 || ! dwarf_reg_size_table_initialized)
3306 init_reg_size_table ();
3307 }
3308 #endif
3309
3310 #else /* no __GTHREADS */
3311
3312 /* Use static version of EH context. */
3313 get_eh_context = &eh_context_static;
3314
3315 #ifdef DWARF2_UNWIND_INFO
3316 init_reg_size_table ();
3317 #endif
3318
3319 #endif /* no __GTHREADS */
3320
3321 return (*get_eh_context) ();
3322 }
3323
3324 /* Return a static EH context. */
3325
3326 static struct eh_context *
3327 eh_context_static (void)
3328 {
3329 static struct eh_context eh;
3330 static int initialized;
3331 static void *top_elt[2];
3332
3333 if (! initialized)
3334 {
3335 initialized = 1;
3336 memset (&eh, 0, sizeof eh);
3337 eh.dynamic_handler_chain = top_elt;
3338 }
3339 return &eh;
3340 }
3341
3342 #if __GTHREADS
3343 /* Return a thread specific EH context. */
3344
3345 static struct eh_context *
3346 eh_context_specific (void)
3347 {
3348 struct eh_context *eh;
3349 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3350 if (! eh)
3351 {
3352 eh = new_eh_context ();
3353 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3354 __terminate ();
3355 }
3356
3357 return eh;
3358 }
3359 #endif /* __GTHREADS */
3360 \f
3361 /* Support routines for alloc/free during exception handling */
3362
3363 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3364 the small arena in the eh_context. This is needed because throwing an
3365 out-of-memory exception would fail otherwise. The emergency space is
3366 allocated in blocks of size EH_ALLOC_ALIGN, the
3367 minimum allocation being two blocks. A bitmask indicates which blocks
3368 have been allocated. To indicate the size of an allocation, the bit for
3369 the final block is not set. Hence each allocation is a run of 1s followed
3370 by a zero. */
3371 void *
3372 __eh_alloc (size_t size)
3373 {
3374 void *p;
3375
3376 if (!size)
3377 abort();
3378 p = malloc (size);
3379 if (p == 0)
3380 {
3381 struct eh_context *eh = __get_eh_context ();
3382 unsigned blocks = (size + EH_ALLOC_ALIGN - 1) / EH_ALLOC_ALIGN;
3383 unsigned real_mask = eh->alloc_mask | (eh->alloc_mask << 1);
3384 unsigned our_mask;
3385 unsigned ix;
3386
3387 if (blocks > EH_ALLOC_SIZE / EH_ALLOC_ALIGN)
3388 __terminate ();
3389 blocks += blocks == 1;
3390 our_mask = (1 << blocks) - 1;
3391
3392 for (ix = EH_ALLOC_SIZE / EH_ALLOC_ALIGN - blocks; ix; ix--)
3393 if (! ((real_mask >> ix) & our_mask))
3394 {
3395 /* found some space */
3396 p = &eh->alloc_buffer[ix * EH_ALLOC_ALIGN];
3397 eh->alloc_mask |= (our_mask >> 1) << ix;
3398 return p;
3399 }
3400 __terminate ();
3401 }
3402 return p;
3403 }
3404
3405 /* Free the memory for an cp_eh_info and associated exception, given
3406 a pointer to the cp_eh_info. */
3407 void
3408 __eh_free (void *p)
3409 {
3410 struct eh_context *eh = __get_eh_context ();
3411
3412 ptrdiff_t diff = (char *)p - &eh->alloc_buffer[0];
3413 if (diff >= 0 && diff < EH_ALLOC_SIZE)
3414 {
3415 unsigned mask = eh->alloc_mask;
3416 unsigned bit = 1 << (diff / EH_ALLOC_ALIGN);
3417
3418 do
3419 {
3420 mask ^= bit;
3421 bit <<= 1;
3422 }
3423 while (mask & bit);
3424 eh->alloc_mask = mask;
3425 }
3426 else
3427 free (p);
3428 }
3429 \f
3430 /* Support routines for setjmp/longjmp exception handling. */
3431
3432 /* Calls to __sjthrow are generated by the compiler when an exception
3433 is raised when using the setjmp/longjmp exception handling codegen
3434 method. */
3435
3436 #ifdef DONT_USE_BUILTIN_SETJMP
3437 extern void longjmp (void *, int);
3438 #endif
3439
3440 /* Routine to get the head of the current thread's dynamic handler chain
3441 use for exception handling. */
3442
3443 void ***
3444 __get_dynamic_handler_chain (void)
3445 {
3446 struct eh_context *eh = (*get_eh_context) ();
3447 return &eh->dynamic_handler_chain;
3448 }
3449
3450 /* This is used to throw an exception when the setjmp/longjmp codegen
3451 method is used for exception handling.
3452
3453 We call __terminate if there are no handlers left. Otherwise we run the
3454 cleanup actions off the dynamic cleanup stack, and pop the top of the
3455 dynamic handler chain, and use longjmp to transfer back to the associated
3456 handler. */
3457
3458 void
3459 __sjthrow (void)
3460 {
3461 struct eh_context *eh = (*get_eh_context) ();
3462 void ***dhc = &eh->dynamic_handler_chain;
3463 void *jmpbuf;
3464 void (*func)(void *, int);
3465 void *arg;
3466 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3467 void ***cleanup = (void***)&(*dhc)[1];
3468
3469 /* If there are any cleanups in the chain, run them now. */
3470 if (cleanup[0])
3471 {
3472 double store[200];
3473 void **buf = (void**)store;
3474 buf[1] = 0;
3475 buf[0] = (*dhc);
3476
3477 /* try { */
3478 #ifdef DONT_USE_BUILTIN_SETJMP
3479 if (! setjmp (&buf[2]))
3480 #else
3481 if (! __builtin_setjmp (&buf[2]))
3482 #endif
3483 {
3484 *dhc = buf;
3485 while (cleanup[0])
3486 {
3487 func = (void(*)(void*, int))cleanup[0][1];
3488 arg = (void*)cleanup[0][2];
3489
3490 /* Update this before running the cleanup. */
3491 cleanup[0] = (void **)cleanup[0][0];
3492
3493 (*func)(arg, 2);
3494 }
3495 *dhc = buf[0];
3496 }
3497 /* catch (...) */
3498 else
3499 {
3500 __terminate ();
3501 }
3502 }
3503
3504 /* We must call terminate if we try and rethrow an exception, when
3505 there is no exception currently active and when there are no
3506 handlers left. */
3507 if (! eh->info || (*dhc)[0] == 0)
3508 __terminate ();
3509
3510 /* Find the jmpbuf associated with the top element of the dynamic
3511 handler chain. The jumpbuf starts two words into the buffer. */
3512 jmpbuf = &(*dhc)[2];
3513
3514 /* Then we pop the top element off the dynamic handler chain. */
3515 *dhc = (void**)(*dhc)[0];
3516
3517 /* And then we jump to the handler. */
3518
3519 #ifdef DONT_USE_BUILTIN_SETJMP
3520 longjmp (jmpbuf, 1);
3521 #else
3522 __builtin_longjmp (jmpbuf, 1);
3523 #endif
3524 }
3525
3526 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3527 handler, then pop the handler off the dynamic handler stack, and
3528 then throw. This is used to skip the first handler, and transfer
3529 control to the next handler in the dynamic handler stack. */
3530
3531 void
3532 __sjpopnthrow (void)
3533 {
3534 struct eh_context *eh = (*get_eh_context) ();
3535 void ***dhc = &eh->dynamic_handler_chain;
3536 void (*func)(void *, int);
3537 void *arg;
3538 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3539 void ***cleanup = (void***)&(*dhc)[1];
3540
3541 /* If there are any cleanups in the chain, run them now. */
3542 if (cleanup[0])
3543 {
3544 double store[200];
3545 void **buf = (void**)store;
3546 buf[1] = 0;
3547 buf[0] = (*dhc);
3548
3549 /* try { */
3550 #ifdef DONT_USE_BUILTIN_SETJMP
3551 if (! setjmp (&buf[2]))
3552 #else
3553 if (! __builtin_setjmp (&buf[2]))
3554 #endif
3555 {
3556 *dhc = buf;
3557 while (cleanup[0])
3558 {
3559 func = (void(*)(void*, int))cleanup[0][1];
3560 arg = (void*)cleanup[0][2];
3561
3562 /* Update this before running the cleanup. */
3563 cleanup[0] = (void **)cleanup[0][0];
3564
3565 (*func)(arg, 2);
3566 }
3567 *dhc = buf[0];
3568 }
3569 /* catch (...) */
3570 else
3571 {
3572 __terminate ();
3573 }
3574 }
3575
3576 /* Then we pop the top element off the dynamic handler chain. */
3577 *dhc = (void**)(*dhc)[0];
3578
3579 __sjthrow ();
3580 }
3581 \f
3582 /* Support code for all exception region-based exception handling. */
3583
3584 int
3585 __eh_rtime_match (void *rtime)
3586 {
3587 void *info;
3588 __eh_matcher matcher;
3589 void *ret;
3590
3591 info = *(__get_eh_info ());
3592 matcher = ((__eh_info *)info)->match_function;
3593 if (! matcher)
3594 {
3595 #ifndef inhibit_libc
3596 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3597 #endif
3598 return 0;
3599 }
3600 ret = (*matcher) (info, rtime, (void *)0);
3601 return (ret != NULL);
3602 }
3603
3604 /* This value identifies the place from which an exception is being
3605 thrown. */
3606
3607 #ifdef EH_TABLE_LOOKUP
3608
3609 EH_TABLE_LOOKUP
3610
3611 #else
3612
3613 #ifdef DWARF2_UNWIND_INFO
3614
3615 /* Return the table version of an exception descriptor */
3616
3617 short
3618 __get_eh_table_version (exception_descriptor *table)
3619 {
3620 return table->lang.version;
3621 }
3622
3623 /* Return the originating table language of an exception descriptor */
3624
3625 short
3626 __get_eh_table_language (exception_descriptor *table)
3627 {
3628 return table->lang.language;
3629 }
3630
3631 /* This routine takes a PC and a pointer to the exception region TABLE for
3632 its translation unit, and returns the address of the exception handler
3633 associated with the closest exception table handler entry associated
3634 with that PC, or 0 if there are no table entries the PC fits in.
3635
3636 In the advent of a tie, we have to give the last entry, as it represents
3637 an inner block. */
3638
3639 static void *
3640 old_find_exception_handler (void *pc, old_exception_table *table)
3641 {
3642 if (table)
3643 {
3644 int pos;
3645 int best = -1;
3646
3647 /* We can't do a binary search because the table isn't guaranteed
3648 to be sorted from function to function. */
3649 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3650 {
3651 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3652 {
3653 /* This can apply. Make sure it is at least as small as
3654 the previous best. */
3655 if (best == -1 || (table[pos].end_region <= table[best].end_region
3656 && table[pos].start_region >= table[best].start_region))
3657 best = pos;
3658 }
3659 /* But it is sorted by starting PC within a function. */
3660 else if (best >= 0 && table[pos].start_region > pc)
3661 break;
3662 }
3663 if (best != -1)
3664 return table[best].exception_handler;
3665 }
3666
3667 return (void *) 0;
3668 }
3669
3670 /* find_exception_handler finds the correct handler, if there is one, to
3671 handle an exception.
3672 returns a pointer to the handler which controlled should be transferred
3673 to, or NULL if there is nothing left.
3674 Parameters:
3675 PC - pc where the exception originates. If this is a rethrow,
3676 then this starts out as a pointer to the exception table
3677 entry we wish to rethrow out of.
3678 TABLE - exception table for the current module.
3679 EH_INFO - eh info pointer for this exception.
3680 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3681 CLEANUP - returned flag indicating whether this is a cleanup handler.
3682 */
3683 static void *
3684 find_exception_handler (void *pc, exception_descriptor *table,
3685 __eh_info *eh_info, int rethrow, int *cleanup)
3686 {
3687
3688 void *retval = NULL;
3689 *cleanup = 1;
3690 if (table)
3691 {
3692 int pos = 0;
3693 /* The new model assumed the table is sorted inner-most out so the
3694 first region we find which matches is the correct one */
3695
3696 exception_table *tab = &(table->table[0]);
3697
3698 /* Subtract 1 from the PC to avoid hitting the next region */
3699 if (rethrow)
3700 {
3701 /* pc is actually the region table entry to rethrow out of */
3702 pos = ((exception_table *) pc) - tab;
3703 pc = ((exception_table *) pc)->end_region - 1;
3704
3705 /* The label is always on the LAST handler entry for a region,
3706 so we know the next entry is a different region, even if the
3707 addresses are the same. Make sure its not end of table tho. */
3708 if (tab[pos].start_region != (void *) -1)
3709 pos++;
3710 }
3711 else
3712 pc--;
3713
3714 /* We can't do a binary search because the table is in inner-most
3715 to outermost address ranges within functions */
3716 for ( ; tab[pos].start_region != (void *) -1; pos++)
3717 {
3718 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3719 {
3720 if (tab[pos].match_info)
3721 {
3722 __eh_matcher matcher = eh_info->match_function;
3723 /* match info but no matcher is NOT a match */
3724 if (matcher)
3725 {
3726 void *ret = (*matcher)((void *) eh_info,
3727 tab[pos].match_info, table);
3728 if (ret)
3729 {
3730 if (retval == NULL)
3731 retval = tab[pos].exception_handler;
3732 *cleanup = 0;
3733 break;
3734 }
3735 }
3736 }
3737 else
3738 {
3739 if (retval == NULL)
3740 retval = tab[pos].exception_handler;
3741 }
3742 }
3743 }
3744 }
3745 return retval;
3746 }
3747 #endif /* DWARF2_UNWIND_INFO */
3748 #endif /* EH_TABLE_LOOKUP */
3749 \f
3750 #ifdef DWARF2_UNWIND_INFO
3751 /* Support code for exception handling using static unwind information. */
3752
3753 #include "frame.h"
3754
3755 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3756 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3757 avoid a warning about casting between int and pointer of different
3758 sizes. */
3759
3760 typedef int ptr_type __attribute__ ((mode (pointer)));
3761
3762 typedef struct
3763 {
3764 word_type *reg[DWARF_FRAME_REGISTERS];
3765 } saved_regs_t;
3766
3767 #ifdef INCOMING_REGNO
3768 /* Is the saved value for register REG in frame UDATA stored in a register
3769 window in the previous frame? */
3770
3771 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3772 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3773 compiled functions won't work with the frame-unwind stuff here.
3774 Perhaps the entireity of in_reg_window should be conditional on having
3775 seen a DW_CFA_GNU_window_save? */
3776 #define target_flags 0
3777
3778 static int
3779 in_reg_window (int reg, frame_state *udata)
3780 {
3781 if (udata->saved[reg] == REG_SAVED_REG)
3782 return INCOMING_REGNO (reg) == reg;
3783 if (udata->saved[reg] != REG_SAVED_OFFSET)
3784 return 0;
3785
3786 #ifdef STACK_GROWS_DOWNWARD
3787 return udata->reg_or_offset[reg] > 0;
3788 #else
3789 return udata->reg_or_offset[reg] < 0;
3790 #endif
3791 }
3792 #else
3793 static inline int
3794 in_reg_window (int reg __attribute__ ((__unused__)),
3795 frame_state *udata __attribute__ ((__unused__)))
3796 {
3797 return 0;
3798 }
3799 #endif /* INCOMING_REGNO */
3800
3801 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3802 frame called by UDATA or 0. */
3803
3804 static word_type *
3805 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3806 {
3807 while (udata->saved[reg] == REG_SAVED_REG)
3808 {
3809 reg = udata->reg_or_offset[reg];
3810 if (in_reg_window (reg, udata))
3811 {
3812 udata = sub_udata;
3813 sub_udata = NULL;
3814 }
3815 }
3816 if (udata->saved[reg] == REG_SAVED_OFFSET)
3817 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3818 else
3819 /* We don't have a saved copy of this register. */
3820 return NULL;
3821 }
3822
3823 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3824 frame called by UDATA or 0. */
3825
3826 static inline void *
3827 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3828 {
3829 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3830 }
3831
3832 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3833
3834 static inline void
3835 put_reg (unsigned reg, void *val, frame_state *udata)
3836 {
3837 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3838 }
3839
3840 /* Copy the saved value for register REG from PTREG to frame
3841 TARGET_UDATA. Unlike the previous two functions, this can handle
3842 registers that are not one word large. */
3843
3844 static void
3845 copy_reg (unsigned reg, word_type *preg, frame_state *target_udata)
3846 {
3847 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3848 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3849 }
3850
3851 /* Retrieve the return address for frame UDATA. */
3852
3853 static inline void *
3854 get_return_addr (frame_state *udata, frame_state *sub_udata)
3855 {
3856 return __builtin_extract_return_addr
3857 (get_reg (udata->retaddr_column, udata, sub_udata));
3858 }
3859
3860 /* Overwrite the return address for frame UDATA with VAL. */
3861
3862 static inline void
3863 put_return_addr (void *val, frame_state *udata)
3864 {
3865 val = __builtin_frob_return_addr (val);
3866 put_reg (udata->retaddr_column, val, udata);
3867 }
3868
3869 /* Given the current frame UDATA and its return address PC, return the
3870 information about the calling frame in CALLER_UDATA and update the
3871 register array in SAVED_REGS. */
3872
3873 static void *
3874 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata,
3875 saved_regs_t *saved_regs)
3876 {
3877 int i;
3878 word_type *p;
3879
3880 /* Collect all of the registers for the current frame. */
3881 for (i = 0; i < DWARF_FRAME_REGISTERS; i++)
3882 if (udata->saved[i])
3883 saved_regs->reg[i] = get_reg_addr (i, udata, caller_udata);
3884
3885 caller_udata = __frame_state_for (pc, caller_udata);
3886 if (! caller_udata)
3887 return 0;
3888
3889 /* Now go back to our caller's stack frame. If our caller's CFA was
3890 saved in a register in this stack frame or a previous one,
3891 restore it; otherwise, assume CFA register was saved in SP and
3892 restore it to our CFA value. */
3893
3894 p = saved_regs->reg[caller_udata->cfa_reg];
3895 if (p)
3896 caller_udata->cfa = (void *)(ptr_type)*p;
3897 else
3898 caller_udata->cfa = udata->cfa;
3899
3900 if (caller_udata->indirect)
3901 caller_udata->cfa = * (void **) ((unsigned char *)caller_udata->cfa
3902 + caller_udata->base_offset);
3903 caller_udata->cfa += caller_udata->cfa_offset;
3904
3905 return caller_udata;
3906 }
3907
3908 /* Hook to call before __terminate if only cleanup handlers remain. */
3909 void
3910 __unwinding_cleanup (void)
3911 {
3912 }
3913
3914 /* throw_helper performs some of the common grunt work for a throw. This
3915 routine is called by throw and rethrows. This is pretty much split
3916 out from the old __throw routine. An addition has been added which allows
3917 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3918 but cleanups remaining. This allows a debugger to examine the state
3919 at which the throw was executed, before any cleanups, rather than
3920 at the terminate point after the stack has been unwound.
3921
3922 EH is the current eh_context structure.
3923 PC is the address of the call to __throw.
3924 MY_UDATA is the unwind information for __throw.
3925 OFFSET_P is where we return the SP adjustment offset. */
3926
3927 static void *
3928 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3929 long *offset_p)
3930 {
3931 frame_state ustruct2, *udata = &ustruct2;
3932 frame_state ustruct;
3933 frame_state *sub_udata = &ustruct;
3934 void *saved_pc = pc;
3935 void *handler;
3936 void *handler_p = 0;
3937 void *pc_p = 0;
3938 void *callee_cfa = 0;
3939 frame_state saved_ustruct;
3940 int new_eh_model;
3941 int cleanup = 0;
3942 int only_cleanup = 0;
3943 int rethrow = 0;
3944 int saved_state = 0;
3945 long args_size;
3946 saved_regs_t saved_regs, cleanup_regs;
3947 __eh_info *eh_info = (__eh_info *)eh->info;
3948 int i;
3949
3950 memset (saved_regs.reg, 0, sizeof saved_regs.reg);
3951 memset (sub_udata->saved, REG_UNSAVED, sizeof sub_udata->saved);
3952
3953 /* Do we find a handler based on a re-throw PC? */
3954 if (eh->table_index != (void *) 0)
3955 rethrow = 1;
3956
3957 memcpy (udata, my_udata, sizeof (*udata));
3958
3959 handler = (void *) 0;
3960 for (;;)
3961 {
3962 frame_state *p = udata;
3963
3964 udata = next_stack_level (pc, udata, sub_udata, &saved_regs);
3965 sub_udata = p;
3966
3967 /* If we couldn't find the next frame, we lose. */
3968 if (! udata)
3969 break;
3970
3971 if (udata->eh_ptr == NULL)
3972 new_eh_model = 0;
3973 else
3974 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3975 runtime_id_field == NEW_EH_RUNTIME);
3976
3977 if (rethrow)
3978 {
3979 rethrow = 0;
3980 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3981 eh_info, 1, &cleanup);
3982 eh->table_index = (void *)0;
3983 }
3984 else
3985 if (new_eh_model)
3986 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3987 0, &cleanup);
3988 else
3989 handler = old_find_exception_handler (pc, udata->eh_ptr);
3990
3991 /* If we found one, we can stop searching, if its not a cleanup.
3992 for cleanups, we save the state, and keep looking. This allows
3993 us to call a debug hook if there are nothing but cleanups left. */
3994 if (handler)
3995 {
3996 /* sub_udata now refers to the frame called by the handler frame. */
3997
3998 if (cleanup)
3999 {
4000 if (!saved_state)
4001 {
4002 saved_ustruct = *udata;
4003 cleanup_regs = saved_regs;
4004 handler_p = handler;
4005 pc_p = pc;
4006 saved_state = 1;
4007 only_cleanup = 1;
4008 /* Save the CFA of the frame called by the handler
4009 frame. */
4010 callee_cfa = sub_udata->cfa;
4011 }
4012 }
4013 else
4014 {
4015 only_cleanup = 0;
4016 if (!saved_state)
4017 callee_cfa = sub_udata->cfa;
4018 break;
4019 }
4020 }
4021
4022 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
4023 hitting the beginning of the next region. */
4024 pc = get_return_addr (udata, sub_udata) - 1;
4025 }
4026
4027 if (saved_state)
4028 {
4029 udata = &saved_ustruct;
4030 saved_regs = cleanup_regs;
4031 handler = handler_p;
4032 pc = pc_p;
4033 if (only_cleanup)
4034 __unwinding_cleanup ();
4035 }
4036
4037 /* If we haven't found a handler by now, this is an unhandled
4038 exception. */
4039 if (! handler)
4040 __terminate();
4041
4042 eh->handler_label = handler;
4043
4044 args_size = udata->args_size;
4045
4046 /* We adjust SP by the difference between __throw's CFA and the CFA for
4047 the frame called by the handler frame, because those CFAs correspond
4048 to the SP values at the two call sites. We need to further adjust by
4049 the args_size of the handler frame itself to get the handler frame's
4050 SP from before the args were pushed for that call. */
4051 #ifdef STACK_GROWS_DOWNWARD
4052 *offset_p = callee_cfa - my_udata->cfa + args_size;
4053 #else
4054 *offset_p = my_udata->cfa - callee_cfa - args_size;
4055 #endif
4056
4057 /* If we found a handler in the throw context there's no need to
4058 unwind. */
4059 if (pc != saved_pc)
4060 {
4061 /* Copy saved register values into our register save slots. */
4062 for (i = 0; i < DWARF_FRAME_REGISTERS; i++)
4063 if (i != udata->retaddr_column && saved_regs.reg[i])
4064 copy_reg (i, saved_regs.reg[i], my_udata);
4065 }
4066
4067 return handler;
4068 }
4069
4070
4071 /* We first search for an exception handler, and if we don't find
4072 it, we call __terminate on the current stack frame so that we may
4073 use the debugger to walk the stack and understand why no handler
4074 was found.
4075
4076 If we find one, then we unwind the frames down to the one that
4077 has the handler and transfer control into the handler. */
4078
4079 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
4080
4081 void
4082 __throw (void)
4083 {
4084 struct eh_context *eh = (*get_eh_context) ();
4085 void *pc, *handler;
4086 long offset;
4087
4088 /* XXX maybe make my_ustruct static so we don't have to look it up for
4089 each throw. */
4090 frame_state my_ustruct, *my_udata = &my_ustruct;
4091
4092 /* This is required for C++ semantics. We must call terminate if we
4093 try and rethrow an exception, when there is no exception currently
4094 active. */
4095 if (! eh->info)
4096 __terminate ();
4097
4098 /* Start at our stack frame. */
4099 label:
4100 my_udata = __frame_state_for (&&label, my_udata);
4101 if (! my_udata)
4102 __terminate ();
4103
4104 /* We need to get the value from the CFA register. */
4105 my_udata->cfa = __builtin_dwarf_cfa ();
4106
4107 /* Do any necessary initialization to access arbitrary stack frames.
4108 On the SPARC, this means flushing the register windows. */
4109 __builtin_unwind_init ();
4110
4111 /* Now reset pc to the right throw point. */
4112 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4113
4114 handler = throw_helper (eh, pc, my_udata, &offset);
4115
4116 /* Now go! */
4117
4118 __builtin_eh_return ((void *)eh, offset, handler);
4119
4120 /* Epilogue: restore the handler frame's register values and return
4121 to the stub. */
4122 }
4123
4124 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
4125
4126 void
4127 __rethrow (void *index)
4128 {
4129 struct eh_context *eh = (*get_eh_context) ();
4130 void *pc, *handler;
4131 long offset;
4132
4133 /* XXX maybe make my_ustruct static so we don't have to look it up for
4134 each throw. */
4135 frame_state my_ustruct, *my_udata = &my_ustruct;
4136
4137 /* This is required for C++ semantics. We must call terminate if we
4138 try and rethrow an exception, when there is no exception currently
4139 active. */
4140 if (! eh->info)
4141 __terminate ();
4142
4143 /* This is the table index we want to rethrow from. The value of
4144 the END_REGION label is used for the PC of the throw, and the
4145 search begins with the next table entry. */
4146 eh->table_index = index;
4147
4148 /* Start at our stack frame. */
4149 label:
4150 my_udata = __frame_state_for (&&label, my_udata);
4151 if (! my_udata)
4152 __terminate ();
4153
4154 /* We need to get the value from the CFA register. */
4155 my_udata->cfa = __builtin_dwarf_cfa ();
4156
4157 /* Do any necessary initialization to access arbitrary stack frames.
4158 On the SPARC, this means flushing the register windows. */
4159 __builtin_unwind_init ();
4160
4161 /* Now reset pc to the right throw point. */
4162 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4163
4164 handler = throw_helper (eh, pc, my_udata, &offset);
4165
4166 /* Now go! */
4167
4168 __builtin_eh_return ((void *)eh, offset, handler);
4169
4170 /* Epilogue: restore the handler frame's register values and return
4171 to the stub. */
4172 }
4173 #endif /* DWARF2_UNWIND_INFO */
4174
4175 #ifdef IA64_UNWIND_INFO
4176 #include "frame.h"
4177
4178 /* Return handler to which we want to transfer control, NULL if we don't
4179 intend to handle this exception here. */
4180 void *
4181 __ia64_personality_v1 (void *pc, old_exception_table *table)
4182 {
4183 if (table)
4184 {
4185 int pos;
4186 int best = -1;
4187
4188 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
4189 {
4190 if (table[pos].start_region <= pc && table[pos].end_region > pc)
4191 {
4192 /* This can apply. Make sure it is at least as small as
4193 the previous best. */
4194 if (best == -1 || (table[pos].end_region <= table[best].end_region
4195 && table[pos].start_region >= table[best].start_region))
4196 best = pos;
4197 }
4198 /* It is sorted by starting PC within a function. */
4199 else if (best >= 0 && table[pos].start_region > pc)
4200 break;
4201 }
4202 if (best != -1)
4203 return table[best].exception_handler;
4204 }
4205 return (void *) 0;
4206 }
4207
4208 static void
4209 ia64_throw_helper (ia64_frame_state *throw_frame, ia64_frame_state *caller,
4210 void *throw_bsp, void *throw_sp)
4211 {
4212 void *throw_pc = __builtin_return_address (0);
4213 unwind_info_ptr *info;
4214 void *pc, *handler = NULL;
4215 void *pc_base;
4216 int frame_count;
4217 void *bsp;
4218
4219 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4220
4221 /* Start at our stack frame, get our state. */
4222 __build_ia64_frame_state (throw_pc, throw_frame, throw_bsp, throw_sp,
4223 &pc_base);
4224
4225 /* Now we have to find the proper frame for pc, and see if there
4226 is a handler for it. if not, we keep going back frames until
4227 we do find one. Otherwise we call uncaught (). */
4228
4229 frame_count = 0;
4230 memcpy (caller, throw_frame, sizeof (*caller));
4231 while (!handler)
4232 {
4233 void *(*personality) ();
4234 void *eh_table;
4235
4236 frame_count++;
4237 /* We only care about the RP right now, so we dont need to keep
4238 any other information about a call frame right now. */
4239 pc = __get_real_reg_value (&caller->rp) - 1;
4240 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4241 caller->my_bsp);
4242 info = __build_ia64_frame_state (pc, caller, bsp, caller->my_psp,
4243 &pc_base);
4244
4245 /* If we couldn't find the next frame, we lose. */
4246 if (! info)
4247 break;
4248
4249 personality = __get_personality (info);
4250 /* TODO Haven't figured out how to actually load the personality address
4251 yet, so just always default to the one we expect for now. */
4252 if (personality != 0)
4253 personality = __ia64_personality_v1;
4254 eh_table = __get_except_table (info);
4255 /* If there is no personality routine, we'll keep unwinding. */
4256 if (personality)
4257 /* Pass a segment relative PC address to the personality routine,
4258 because the unwind_info section uses segrel relocs. */
4259 handler = personality (pc - pc_base, eh_table);
4260 }
4261
4262 if (!handler)
4263 __terminate ();
4264
4265 /* Handler is a segment relative address, so we must adjust it here. */
4266 handler += (long) pc_base;
4267
4268 /* If we found a handler, we need to unwind the stack to that point.
4269 We do this by copying saved values from previous frames into the
4270 save slot for the throw_frame saved slots. when __throw returns,
4271 it'll pickup the correct values. */
4272
4273 /* Start with where __throw saved things, and copy each saved register
4274 of each previous frame until we get to the one before we're
4275 throwing back to. */
4276 memcpy (caller, throw_frame, sizeof (*caller));
4277 for ( ; frame_count > 0; frame_count--)
4278 {
4279 pc = __get_real_reg_value (&caller->rp) - 1;
4280 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4281 caller->my_bsp);
4282 __build_ia64_frame_state (pc, caller, bsp, caller->my_psp, &pc_base);
4283 /* Any regs that were saved can be put in the throw frame now. */
4284 /* We don't want to copy any saved register from the
4285 target destination, but we do want to load up it's frame. */
4286 if (frame_count > 1)
4287 __copy_saved_reg_state (throw_frame, caller);
4288 }
4289
4290 /* Set return address of the throw frame to the handler. */
4291 __set_real_reg_value (&throw_frame->rp, handler);
4292
4293 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4294 /* DO we need to go through the whole loadrs seqeunce? */
4295 }
4296
4297
4298 void
4299 __throw ()
4300 {
4301 register void *stack_pointer __asm__("r12");
4302 struct eh_context *eh = (*get_eh_context) ();
4303 ia64_frame_state my_frame;
4304 ia64_frame_state originator; /* For the context handler is in. */
4305 void *bsp, *tmp_bsp;
4306 long offset;
4307
4308 /* This is required for C++ semantics. We must call terminate if we
4309 try and rethrow an exception, when there is no exception currently
4310 active. */
4311 if (! eh->info)
4312 __terminate ();
4313
4314 __builtin_unwind_init ();
4315
4316 /* We have to call another routine to actually process the frame
4317 information, which will force all of __throw's local registers into
4318 backing store. */
4319
4320 /* Get the value of ar.bsp while we're here. */
4321
4322 bsp = __builtin_ia64_bsp ();
4323 ia64_throw_helper (&my_frame, &originator, bsp, stack_pointer);
4324
4325 /* Now we have to fudge the bsp by the amount in our (__throw)
4326 frame marker, since the return is going to adjust it by that much. */
4327
4328 tmp_bsp = __calc_caller_bsp ((long)__get_real_reg_value (&my_frame.pfs),
4329 my_frame.my_bsp);
4330 offset = (char *)my_frame.my_bsp - (char *)tmp_bsp;
4331 tmp_bsp = (char *)originator.my_bsp + offset;
4332
4333 __builtin_eh_return (tmp_bsp, offset, originator.my_sp);
4334
4335 /* The return address was already set by throw_helper. */
4336 }
4337
4338 #endif /* IA64_UNWIND_INFO */
4339
4340 #endif /* L_eh */