acconfig.h: _GLIBCPP_USING_THREADS and some workaround types added.
[gcc.git] / gcc / libgcc2.c
1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
21
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
26
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
31
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
34 do not apply. */
35
36 #include "tconfig.h"
37 #include "tsystem.h"
38
39 #include "machmode.h"
40 #include "defaults.h"
41
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
43 #ifdef abort
44 #undef abort
45 #endif
46
47 #include "libgcc2.h"
48 \f
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
51 static inline
52 #endif
53 DWtype
54 __negdi2 (DWtype u)
55 {
56 DWunion w;
57 DWunion uu;
58
59 uu.ll = u;
60
61 w.s.low = -uu.s.low;
62 w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
63
64 return w.ll;
65 }
66 #endif
67 \f
68 /* Unless shift functions are defined whith full ANSI prototypes,
69 parameter b will be promoted to int if word_type is smaller than an int. */
70 #ifdef L_lshrdi3
71 DWtype
72 __lshrdi3 (DWtype u, word_type b)
73 {
74 DWunion w;
75 word_type bm;
76 DWunion uu;
77
78 if (b == 0)
79 return u;
80
81 uu.ll = u;
82
83 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
84 if (bm <= 0)
85 {
86 w.s.high = 0;
87 w.s.low = (UWtype) uu.s.high >> -bm;
88 }
89 else
90 {
91 UWtype carries = (UWtype) uu.s.high << bm;
92
93 w.s.high = (UWtype) uu.s.high >> b;
94 w.s.low = ((UWtype) uu.s.low >> b) | carries;
95 }
96
97 return w.ll;
98 }
99 #endif
100
101 #ifdef L_ashldi3
102 DWtype
103 __ashldi3 (DWtype u, word_type b)
104 {
105 DWunion w;
106 word_type bm;
107 DWunion uu;
108
109 if (b == 0)
110 return u;
111
112 uu.ll = u;
113
114 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
115 if (bm <= 0)
116 {
117 w.s.low = 0;
118 w.s.high = (UWtype) uu.s.low << -bm;
119 }
120 else
121 {
122 UWtype carries = (UWtype) uu.s.low >> bm;
123
124 w.s.low = (UWtype) uu.s.low << b;
125 w.s.high = ((UWtype) uu.s.high << b) | carries;
126 }
127
128 return w.ll;
129 }
130 #endif
131
132 #ifdef L_ashrdi3
133 DWtype
134 __ashrdi3 (DWtype u, word_type b)
135 {
136 DWunion w;
137 word_type bm;
138 DWunion uu;
139
140 if (b == 0)
141 return u;
142
143 uu.ll = u;
144
145 bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
146 if (bm <= 0)
147 {
148 /* w.s.high = 1..1 or 0..0 */
149 w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
150 w.s.low = uu.s.high >> -bm;
151 }
152 else
153 {
154 UWtype carries = (UWtype) uu.s.high << bm;
155
156 w.s.high = uu.s.high >> b;
157 w.s.low = ((UWtype) uu.s.low >> b) | carries;
158 }
159
160 return w.ll;
161 }
162 #endif
163 \f
164 #ifdef L_ffsdi2
165 DWtype
166 __ffsdi2 (DWtype u)
167 {
168 DWunion uu, w;
169 uu.ll = u;
170 w.s.high = 0;
171 w.s.low = ffs (uu.s.low);
172 if (w.s.low != 0)
173 return w.ll;
174 w.s.low = ffs (uu.s.high);
175 if (w.s.low != 0)
176 {
177 w.s.low += BITS_PER_UNIT * sizeof (Wtype);
178 return w.ll;
179 }
180 return w.ll;
181 }
182 #endif
183 \f
184 #ifdef L_muldi3
185 DWtype
186 __muldi3 (DWtype u, DWtype v)
187 {
188 DWunion w;
189 DWunion uu, vv;
190
191 uu.ll = u,
192 vv.ll = v;
193
194 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
195 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
196 + (UWtype) uu.s.high * (UWtype) vv.s.low);
197
198 return w.ll;
199 }
200 #endif
201 \f
202 #ifdef L_udiv_w_sdiv
203 #if defined (sdiv_qrnnd)
204 UWtype
205 __udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
206 {
207 UWtype q, r;
208 UWtype c0, c1, b1;
209
210 if ((Wtype) d >= 0)
211 {
212 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
213 {
214 /* dividend, divisor, and quotient are nonnegative */
215 sdiv_qrnnd (q, r, a1, a0, d);
216 }
217 else
218 {
219 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
220 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
221 /* Divide (c1*2^32 + c0) by d */
222 sdiv_qrnnd (q, r, c1, c0, d);
223 /* Add 2^31 to quotient */
224 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
225 }
226 }
227 else
228 {
229 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
230 c1 = a1 >> 1; /* A/2 */
231 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
232
233 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
234 {
235 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
236
237 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
238 if ((d & 1) != 0)
239 {
240 if (r >= q)
241 r = r - q;
242 else if (q - r <= d)
243 {
244 r = r - q + d;
245 q--;
246 }
247 else
248 {
249 r = r - q + 2*d;
250 q -= 2;
251 }
252 }
253 }
254 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
255 {
256 c1 = (b1 - 1) - c1;
257 c0 = ~c0; /* logical NOT */
258
259 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
260
261 q = ~q; /* (A/2)/b1 */
262 r = (b1 - 1) - r;
263
264 r = 2*r + (a0 & 1); /* A/(2*b1) */
265
266 if ((d & 1) != 0)
267 {
268 if (r >= q)
269 r = r - q;
270 else if (q - r <= d)
271 {
272 r = r - q + d;
273 q--;
274 }
275 else
276 {
277 r = r - q + 2*d;
278 q -= 2;
279 }
280 }
281 }
282 else /* Implies c1 = b1 */
283 { /* Hence a1 = d - 1 = 2*b1 - 1 */
284 if (a0 >= -d)
285 {
286 q = -1;
287 r = a0 + d;
288 }
289 else
290 {
291 q = -2;
292 r = a0 + 2*d;
293 }
294 }
295 }
296
297 *rp = r;
298 return q;
299 }
300 #else
301 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
302 UWtype
303 __udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
304 UWtype a1 __attribute__ ((__unused__)),
305 UWtype a0 __attribute__ ((__unused__)),
306 UWtype d __attribute__ ((__unused__)))
307 {
308 return 0;
309 }
310 #endif
311 #endif
312 \f
313 #if (defined (L_udivdi3) || defined (L_divdi3) || \
314 defined (L_umoddi3) || defined (L_moddi3))
315 #define L_udivmoddi4
316 #endif
317
318 #ifdef L_udivmoddi4
319 static const UQItype __clz_tab[] =
320 {
321 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
322 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
323 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
324 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
325 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
326 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
327 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
328 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
329 };
330
331 #if (defined (L_udivdi3) || defined (L_divdi3) || \
332 defined (L_umoddi3) || defined (L_moddi3))
333 static inline
334 #endif
335 UDWtype
336 __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
337 {
338 DWunion ww;
339 DWunion nn, dd;
340 DWunion rr;
341 UWtype d0, d1, n0, n1, n2;
342 UWtype q0, q1;
343 UWtype b, bm;
344
345 nn.ll = n;
346 dd.ll = d;
347
348 d0 = dd.s.low;
349 d1 = dd.s.high;
350 n0 = nn.s.low;
351 n1 = nn.s.high;
352
353 #if !UDIV_NEEDS_NORMALIZATION
354 if (d1 == 0)
355 {
356 if (d0 > n1)
357 {
358 /* 0q = nn / 0D */
359
360 udiv_qrnnd (q0, n0, n1, n0, d0);
361 q1 = 0;
362
363 /* Remainder in n0. */
364 }
365 else
366 {
367 /* qq = NN / 0d */
368
369 if (d0 == 0)
370 d0 = 1 / d0; /* Divide intentionally by zero. */
371
372 udiv_qrnnd (q1, n1, 0, n1, d0);
373 udiv_qrnnd (q0, n0, n1, n0, d0);
374
375 /* Remainder in n0. */
376 }
377
378 if (rp != 0)
379 {
380 rr.s.low = n0;
381 rr.s.high = 0;
382 *rp = rr.ll;
383 }
384 }
385
386 #else /* UDIV_NEEDS_NORMALIZATION */
387
388 if (d1 == 0)
389 {
390 if (d0 > n1)
391 {
392 /* 0q = nn / 0D */
393
394 count_leading_zeros (bm, d0);
395
396 if (bm != 0)
397 {
398 /* Normalize, i.e. make the most significant bit of the
399 denominator set. */
400
401 d0 = d0 << bm;
402 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
403 n0 = n0 << bm;
404 }
405
406 udiv_qrnnd (q0, n0, n1, n0, d0);
407 q1 = 0;
408
409 /* Remainder in n0 >> bm. */
410 }
411 else
412 {
413 /* qq = NN / 0d */
414
415 if (d0 == 0)
416 d0 = 1 / d0; /* Divide intentionally by zero. */
417
418 count_leading_zeros (bm, d0);
419
420 if (bm == 0)
421 {
422 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
423 conclude (the most significant bit of n1 is set) /\ (the
424 leading quotient digit q1 = 1).
425
426 This special case is necessary, not an optimization.
427 (Shifts counts of W_TYPE_SIZE are undefined.) */
428
429 n1 -= d0;
430 q1 = 1;
431 }
432 else
433 {
434 /* Normalize. */
435
436 b = W_TYPE_SIZE - bm;
437
438 d0 = d0 << bm;
439 n2 = n1 >> b;
440 n1 = (n1 << bm) | (n0 >> b);
441 n0 = n0 << bm;
442
443 udiv_qrnnd (q1, n1, n2, n1, d0);
444 }
445
446 /* n1 != d0... */
447
448 udiv_qrnnd (q0, n0, n1, n0, d0);
449
450 /* Remainder in n0 >> bm. */
451 }
452
453 if (rp != 0)
454 {
455 rr.s.low = n0 >> bm;
456 rr.s.high = 0;
457 *rp = rr.ll;
458 }
459 }
460 #endif /* UDIV_NEEDS_NORMALIZATION */
461
462 else
463 {
464 if (d1 > n1)
465 {
466 /* 00 = nn / DD */
467
468 q0 = 0;
469 q1 = 0;
470
471 /* Remainder in n1n0. */
472 if (rp != 0)
473 {
474 rr.s.low = n0;
475 rr.s.high = n1;
476 *rp = rr.ll;
477 }
478 }
479 else
480 {
481 /* 0q = NN / dd */
482
483 count_leading_zeros (bm, d1);
484 if (bm == 0)
485 {
486 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
487 conclude (the most significant bit of n1 is set) /\ (the
488 quotient digit q0 = 0 or 1).
489
490 This special case is necessary, not an optimization. */
491
492 /* The condition on the next line takes advantage of that
493 n1 >= d1 (true due to program flow). */
494 if (n1 > d1 || n0 >= d0)
495 {
496 q0 = 1;
497 sub_ddmmss (n1, n0, n1, n0, d1, d0);
498 }
499 else
500 q0 = 0;
501
502 q1 = 0;
503
504 if (rp != 0)
505 {
506 rr.s.low = n0;
507 rr.s.high = n1;
508 *rp = rr.ll;
509 }
510 }
511 else
512 {
513 UWtype m1, m0;
514 /* Normalize. */
515
516 b = W_TYPE_SIZE - bm;
517
518 d1 = (d1 << bm) | (d0 >> b);
519 d0 = d0 << bm;
520 n2 = n1 >> b;
521 n1 = (n1 << bm) | (n0 >> b);
522 n0 = n0 << bm;
523
524 udiv_qrnnd (q0, n1, n2, n1, d1);
525 umul_ppmm (m1, m0, q0, d0);
526
527 if (m1 > n1 || (m1 == n1 && m0 > n0))
528 {
529 q0--;
530 sub_ddmmss (m1, m0, m1, m0, d1, d0);
531 }
532
533 q1 = 0;
534
535 /* Remainder in (n1n0 - m1m0) >> bm. */
536 if (rp != 0)
537 {
538 sub_ddmmss (n1, n0, n1, n0, m1, m0);
539 rr.s.low = (n1 << b) | (n0 >> bm);
540 rr.s.high = n1 >> bm;
541 *rp = rr.ll;
542 }
543 }
544 }
545 }
546
547 ww.s.low = q0;
548 ww.s.high = q1;
549 return ww.ll;
550 }
551 #endif
552
553 #ifdef L_divdi3
554 DWtype
555 __divdi3 (DWtype u, DWtype v)
556 {
557 word_type c = 0;
558 DWunion uu, vv;
559 DWtype w;
560
561 uu.ll = u;
562 vv.ll = v;
563
564 if (uu.s.high < 0)
565 c = ~c,
566 uu.ll = __negdi2 (uu.ll);
567 if (vv.s.high < 0)
568 c = ~c,
569 vv.ll = __negdi2 (vv.ll);
570
571 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
572 if (c)
573 w = __negdi2 (w);
574
575 return w;
576 }
577 #endif
578
579 #ifdef L_moddi3
580 DWtype
581 __moddi3 (DWtype u, DWtype v)
582 {
583 word_type c = 0;
584 DWunion uu, vv;
585 DWtype w;
586
587 uu.ll = u;
588 vv.ll = v;
589
590 if (uu.s.high < 0)
591 c = ~c,
592 uu.ll = __negdi2 (uu.ll);
593 if (vv.s.high < 0)
594 vv.ll = __negdi2 (vv.ll);
595
596 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
597 if (c)
598 w = __negdi2 (w);
599
600 return w;
601 }
602 #endif
603
604 #ifdef L_umoddi3
605 UDWtype
606 __umoddi3 (UDWtype u, UDWtype v)
607 {
608 UDWtype w;
609
610 (void) __udivmoddi4 (u, v, &w);
611
612 return w;
613 }
614 #endif
615
616 #ifdef L_udivdi3
617 UDWtype
618 __udivdi3 (UDWtype n, UDWtype d)
619 {
620 return __udivmoddi4 (n, d, (UDWtype *) 0);
621 }
622 #endif
623 \f
624 #ifdef L_cmpdi2
625 word_type
626 __cmpdi2 (DWtype a, DWtype b)
627 {
628 DWunion au, bu;
629
630 au.ll = a, bu.ll = b;
631
632 if (au.s.high < bu.s.high)
633 return 0;
634 else if (au.s.high > bu.s.high)
635 return 2;
636 if ((UWtype) au.s.low < (UWtype) bu.s.low)
637 return 0;
638 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
639 return 2;
640 return 1;
641 }
642 #endif
643
644 #ifdef L_ucmpdi2
645 word_type
646 __ucmpdi2 (DWtype a, DWtype b)
647 {
648 DWunion au, bu;
649
650 au.ll = a, bu.ll = b;
651
652 if ((UWtype) au.s.high < (UWtype) bu.s.high)
653 return 0;
654 else if ((UWtype) au.s.high > (UWtype) bu.s.high)
655 return 2;
656 if ((UWtype) au.s.low < (UWtype) bu.s.low)
657 return 0;
658 else if ((UWtype) au.s.low > (UWtype) bu.s.low)
659 return 2;
660 return 1;
661 }
662 #endif
663 \f
664 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
665 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
666 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
667
668 DWtype
669 __fixunstfDI (TFtype a)
670 {
671 TFtype b;
672 UDWtype v;
673
674 if (a < 0)
675 return 0;
676
677 /* Compute high word of result, as a flonum. */
678 b = (a / HIGH_WORD_COEFF);
679 /* Convert that to fixed (but not to DWtype!),
680 and shift it into the high word. */
681 v = (UWtype) b;
682 v <<= WORD_SIZE;
683 /* Remove high part from the TFtype, leaving the low part as flonum. */
684 a -= (TFtype)v;
685 /* Convert that to fixed (but not to DWtype!) and add it in.
686 Sometimes A comes out negative. This is significant, since
687 A has more bits than a long int does. */
688 if (a < 0)
689 v -= (UWtype) (- a);
690 else
691 v += (UWtype) a;
692 return v;
693 }
694 #endif
695
696 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
697 DWtype
698 __fixtfdi (TFtype a)
699 {
700 if (a < 0)
701 return - __fixunstfDI (-a);
702 return __fixunstfDI (a);
703 }
704 #endif
705
706 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
707 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
708 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
709
710 DWtype
711 __fixunsxfDI (XFtype a)
712 {
713 XFtype b;
714 UDWtype v;
715
716 if (a < 0)
717 return 0;
718
719 /* Compute high word of result, as a flonum. */
720 b = (a / HIGH_WORD_COEFF);
721 /* Convert that to fixed (but not to DWtype!),
722 and shift it into the high word. */
723 v = (UWtype) b;
724 v <<= WORD_SIZE;
725 /* Remove high part from the XFtype, leaving the low part as flonum. */
726 a -= (XFtype)v;
727 /* Convert that to fixed (but not to DWtype!) and add it in.
728 Sometimes A comes out negative. This is significant, since
729 A has more bits than a long int does. */
730 if (a < 0)
731 v -= (UWtype) (- a);
732 else
733 v += (UWtype) a;
734 return v;
735 }
736 #endif
737
738 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
739 DWtype
740 __fixxfdi (XFtype a)
741 {
742 if (a < 0)
743 return - __fixunsxfDI (-a);
744 return __fixunsxfDI (a);
745 }
746 #endif
747
748 #ifdef L_fixunsdfdi
749 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
750 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
751
752 DWtype
753 __fixunsdfDI (DFtype a)
754 {
755 DFtype b;
756 UDWtype v;
757
758 if (a < 0)
759 return 0;
760
761 /* Compute high word of result, as a flonum. */
762 b = (a / HIGH_WORD_COEFF);
763 /* Convert that to fixed (but not to DWtype!),
764 and shift it into the high word. */
765 v = (UWtype) b;
766 v <<= WORD_SIZE;
767 /* Remove high part from the DFtype, leaving the low part as flonum. */
768 a -= (DFtype)v;
769 /* Convert that to fixed (but not to DWtype!) and add it in.
770 Sometimes A comes out negative. This is significant, since
771 A has more bits than a long int does. */
772 if (a < 0)
773 v -= (UWtype) (- a);
774 else
775 v += (UWtype) a;
776 return v;
777 }
778 #endif
779
780 #ifdef L_fixdfdi
781 DWtype
782 __fixdfdi (DFtype a)
783 {
784 if (a < 0)
785 return - __fixunsdfDI (-a);
786 return __fixunsdfDI (a);
787 }
788 #endif
789
790 #ifdef L_fixunssfdi
791 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
792 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
793
794 DWtype
795 __fixunssfDI (SFtype original_a)
796 {
797 /* Convert the SFtype to a DFtype, because that is surely not going
798 to lose any bits. Some day someone else can write a faster version
799 that avoids converting to DFtype, and verify it really works right. */
800 DFtype a = original_a;
801 DFtype b;
802 UDWtype v;
803
804 if (a < 0)
805 return 0;
806
807 /* Compute high word of result, as a flonum. */
808 b = (a / HIGH_WORD_COEFF);
809 /* Convert that to fixed (but not to DWtype!),
810 and shift it into the high word. */
811 v = (UWtype) b;
812 v <<= WORD_SIZE;
813 /* Remove high part from the DFtype, leaving the low part as flonum. */
814 a -= (DFtype) v;
815 /* Convert that to fixed (but not to DWtype!) and add it in.
816 Sometimes A comes out negative. This is significant, since
817 A has more bits than a long int does. */
818 if (a < 0)
819 v -= (UWtype) (- a);
820 else
821 v += (UWtype) a;
822 return v;
823 }
824 #endif
825
826 #ifdef L_fixsfdi
827 DWtype
828 __fixsfdi (SFtype a)
829 {
830 if (a < 0)
831 return - __fixunssfDI (-a);
832 return __fixunssfDI (a);
833 }
834 #endif
835
836 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
837 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
838 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
839 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
840
841 XFtype
842 __floatdixf (DWtype u)
843 {
844 XFtype d;
845
846 d = (Wtype) (u >> WORD_SIZE);
847 d *= HIGH_HALFWORD_COEFF;
848 d *= HIGH_HALFWORD_COEFF;
849 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
850
851 return d;
852 }
853 #endif
854
855 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
856 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
857 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
858 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
859
860 TFtype
861 __floatditf (DWtype u)
862 {
863 TFtype d;
864
865 d = (Wtype) (u >> WORD_SIZE);
866 d *= HIGH_HALFWORD_COEFF;
867 d *= HIGH_HALFWORD_COEFF;
868 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
869
870 return d;
871 }
872 #endif
873
874 #ifdef L_floatdidf
875 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
876 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
877 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
878
879 DFtype
880 __floatdidf (DWtype u)
881 {
882 DFtype d;
883
884 d = (Wtype) (u >> WORD_SIZE);
885 d *= HIGH_HALFWORD_COEFF;
886 d *= HIGH_HALFWORD_COEFF;
887 d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
888
889 return d;
890 }
891 #endif
892
893 #ifdef L_floatdisf
894 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
895 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
896 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
897 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
898
899 /* Define codes for all the float formats that we know of. Note
900 that this is copied from real.h. */
901
902 #define UNKNOWN_FLOAT_FORMAT 0
903 #define IEEE_FLOAT_FORMAT 1
904 #define VAX_FLOAT_FORMAT 2
905 #define IBM_FLOAT_FORMAT 3
906
907 /* Default to IEEE float if not specified. Nearly all machines use it. */
908 #ifndef HOST_FLOAT_FORMAT
909 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
910 #endif
911
912 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
913 #define DF_SIZE 53
914 #define SF_SIZE 24
915 #endif
916
917 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
918 #define DF_SIZE 56
919 #define SF_SIZE 24
920 #endif
921
922 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
923 #define DF_SIZE 56
924 #define SF_SIZE 24
925 #endif
926
927 SFtype
928 __floatdisf (DWtype u)
929 {
930 /* Do the calculation in DFmode
931 so that we don't lose any of the precision of the high word
932 while multiplying it. */
933 DFtype f;
934
935 /* Protect against double-rounding error.
936 Represent any low-order bits, that might be truncated in DFmode,
937 by a bit that won't be lost. The bit can go in anywhere below the
938 rounding position of the SFmode. A fixed mask and bit position
939 handles all usual configurations. It doesn't handle the case
940 of 128-bit DImode, however. */
941 if (DF_SIZE < DI_SIZE
942 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
943 {
944 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
945 if (! (- ((DWtype) 1 << DF_SIZE) < u
946 && u < ((DWtype) 1 << DF_SIZE)))
947 {
948 if ((UDWtype) u & (REP_BIT - 1))
949 u |= REP_BIT;
950 }
951 }
952 f = (Wtype) (u >> WORD_SIZE);
953 f *= HIGH_HALFWORD_COEFF;
954 f *= HIGH_HALFWORD_COEFF;
955 f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
956
957 return (SFtype) f;
958 }
959 #endif
960
961 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
962 /* Reenable the normal types, in case limits.h needs them. */
963 #undef char
964 #undef short
965 #undef int
966 #undef long
967 #undef unsigned
968 #undef float
969 #undef double
970 #undef MIN
971 #undef MAX
972 #include <limits.h>
973
974 UWtype
975 __fixunsxfSI (XFtype a)
976 {
977 if (a >= - (DFtype) LONG_MIN)
978 return (Wtype) (a + LONG_MIN) - LONG_MIN;
979 return (Wtype) a;
980 }
981 #endif
982
983 #ifdef L_fixunsdfsi
984 /* Reenable the normal types, in case limits.h needs them. */
985 #undef char
986 #undef short
987 #undef int
988 #undef long
989 #undef unsigned
990 #undef float
991 #undef double
992 #undef MIN
993 #undef MAX
994 #include <limits.h>
995
996 UWtype
997 __fixunsdfSI (DFtype a)
998 {
999 if (a >= - (DFtype) LONG_MIN)
1000 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1001 return (Wtype) a;
1002 }
1003 #endif
1004
1005 #ifdef L_fixunssfsi
1006 /* Reenable the normal types, in case limits.h needs them. */
1007 #undef char
1008 #undef short
1009 #undef int
1010 #undef long
1011 #undef unsigned
1012 #undef float
1013 #undef double
1014 #undef MIN
1015 #undef MAX
1016 #include <limits.h>
1017
1018 UWtype
1019 __fixunssfSI (SFtype a)
1020 {
1021 if (a >= - (SFtype) LONG_MIN)
1022 return (Wtype) (a + LONG_MIN) - LONG_MIN;
1023 return (Wtype) a;
1024 }
1025 #endif
1026 \f
1027 /* From here on down, the routines use normal data types. */
1028
1029 #define SItype bogus_type
1030 #define USItype bogus_type
1031 #define DItype bogus_type
1032 #define UDItype bogus_type
1033 #define SFtype bogus_type
1034 #define DFtype bogus_type
1035 #undef Wtype
1036 #undef UWtype
1037 #undef HWtype
1038 #undef UHWtype
1039 #undef DWtype
1040 #undef UDWtype
1041
1042 #undef char
1043 #undef short
1044 #undef int
1045 #undef long
1046 #undef unsigned
1047 #undef float
1048 #undef double
1049 \f
1050 #ifdef L__gcc_bcmp
1051
1052 /* Like bcmp except the sign is meaningful.
1053 Result is negative if S1 is less than S2,
1054 positive if S1 is greater, 0 if S1 and S2 are equal. */
1055
1056 int
1057 __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
1058 {
1059 while (size > 0)
1060 {
1061 unsigned char c1 = *s1++, c2 = *s2++;
1062 if (c1 != c2)
1063 return c1 - c2;
1064 size--;
1065 }
1066 return 0;
1067 }
1068
1069 #endif
1070 \f\f
1071 #ifdef L__dummy
1072 void
1073 __dummy (void) {}
1074 #endif
1075
1076 #ifdef L_varargs
1077 #ifdef __i860__
1078 #if defined(__svr4__) || defined(__alliant__)
1079 asm (" .text");
1080 asm (" .align 4");
1081
1082 /* The Alliant needs the added underscore. */
1083 asm (".globl __builtin_saveregs");
1084 asm ("__builtin_saveregs:");
1085 asm (".globl ___builtin_saveregs");
1086 asm ("___builtin_saveregs:");
1087
1088 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1089 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1090 area and also for a new va_list
1091 structure */
1092 /* Save all argument registers in the arg reg save area. The
1093 arg reg save area must have the following layout (according
1094 to the svr4 ABI):
1095
1096 struct {
1097 union {
1098 float freg[8];
1099 double dreg[4];
1100 } float_regs;
1101 long ireg[12];
1102 };
1103 */
1104
1105 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1106 asm (" fst.q %f12,16(%sp)");
1107
1108 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1109 asm (" st.l %r17,36(%sp)");
1110 asm (" st.l %r18,40(%sp)");
1111 asm (" st.l %r19,44(%sp)");
1112 asm (" st.l %r20,48(%sp)");
1113 asm (" st.l %r21,52(%sp)");
1114 asm (" st.l %r22,56(%sp)");
1115 asm (" st.l %r23,60(%sp)");
1116 asm (" st.l %r24,64(%sp)");
1117 asm (" st.l %r25,68(%sp)");
1118 asm (" st.l %r26,72(%sp)");
1119 asm (" st.l %r27,76(%sp)");
1120
1121 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1122 va_list structure. Put in into
1123 r16 so that it will be returned
1124 to the caller. */
1125
1126 /* Initialize all fields of the new va_list structure. This
1127 structure looks like:
1128
1129 typedef struct {
1130 unsigned long ireg_used;
1131 unsigned long freg_used;
1132 long *reg_base;
1133 long *mem_ptr;
1134 } va_list;
1135 */
1136
1137 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1138 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1139 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1140 asm (" bri %r1"); /* delayed return */
1141 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1142
1143 #else /* not __svr4__ */
1144 #if defined(__PARAGON__)
1145 /*
1146 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1147 * and we stand a better chance of hooking into libraries
1148 * compiled by PGI. [andyp@ssd.intel.com]
1149 */
1150 asm (" .text");
1151 asm (" .align 4");
1152 asm (".globl __builtin_saveregs");
1153 asm ("__builtin_saveregs:");
1154 asm (".globl ___builtin_saveregs");
1155 asm ("___builtin_saveregs:");
1156
1157 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1158 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1159 area and also for a new va_list
1160 structure */
1161 /* Save all argument registers in the arg reg save area. The
1162 arg reg save area must have the following layout (according
1163 to the svr4 ABI):
1164
1165 struct {
1166 union {
1167 float freg[8];
1168 double dreg[4];
1169 } float_regs;
1170 long ireg[12];
1171 };
1172 */
1173
1174 asm (" fst.q f8, 0(sp)");
1175 asm (" fst.q f12,16(sp)");
1176 asm (" st.l r16,32(sp)");
1177 asm (" st.l r17,36(sp)");
1178 asm (" st.l r18,40(sp)");
1179 asm (" st.l r19,44(sp)");
1180 asm (" st.l r20,48(sp)");
1181 asm (" st.l r21,52(sp)");
1182 asm (" st.l r22,56(sp)");
1183 asm (" st.l r23,60(sp)");
1184 asm (" st.l r24,64(sp)");
1185 asm (" st.l r25,68(sp)");
1186 asm (" st.l r26,72(sp)");
1187 asm (" st.l r27,76(sp)");
1188
1189 asm (" adds 80,sp,r16"); /* compute the address of the new
1190 va_list structure. Put in into
1191 r16 so that it will be returned
1192 to the caller. */
1193
1194 /* Initialize all fields of the new va_list structure. This
1195 structure looks like:
1196
1197 typedef struct {
1198 unsigned long ireg_used;
1199 unsigned long freg_used;
1200 long *reg_base;
1201 long *mem_ptr;
1202 } va_list;
1203 */
1204
1205 asm (" st.l r0, 0(r16)"); /* nfixed */
1206 asm (" st.l r0, 4(r16)"); /* nfloating */
1207 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1208 asm (" bri r1"); /* delayed return */
1209 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1210 #else /* not __PARAGON__ */
1211 asm (" .text");
1212 asm (" .align 4");
1213
1214 asm (".globl ___builtin_saveregs");
1215 asm ("___builtin_saveregs:");
1216 asm (" mov sp,r30");
1217 asm (" andnot 0x0f,sp,sp");
1218 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1219
1220 /* Fill in the __va_struct. */
1221 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1222 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1223 asm (" st.l r18, 8(sp)");
1224 asm (" st.l r19,12(sp)");
1225 asm (" st.l r20,16(sp)");
1226 asm (" st.l r21,20(sp)");
1227 asm (" st.l r22,24(sp)");
1228 asm (" st.l r23,28(sp)");
1229 asm (" st.l r24,32(sp)");
1230 asm (" st.l r25,36(sp)");
1231 asm (" st.l r26,40(sp)");
1232 asm (" st.l r27,44(sp)");
1233
1234 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1235 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1236
1237 /* Fill in the __va_ctl. */
1238 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1239 asm (" st.l r28,84(sp)"); /* pointer to more args */
1240 asm (" st.l r0, 88(sp)"); /* nfixed */
1241 asm (" st.l r0, 92(sp)"); /* nfloating */
1242
1243 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1244 asm (" bri r1");
1245 asm (" mov r30,sp");
1246 /* recover stack and pass address to start
1247 of data. */
1248 #endif /* not __PARAGON__ */
1249 #endif /* not __svr4__ */
1250 #else /* not __i860__ */
1251 #ifdef __sparc__
1252 asm (".global __builtin_saveregs");
1253 asm ("__builtin_saveregs:");
1254 asm (".global ___builtin_saveregs");
1255 asm ("___builtin_saveregs:");
1256 #ifdef NEED_PROC_COMMAND
1257 asm (".proc 020");
1258 #endif
1259 asm ("st %i0,[%fp+68]");
1260 asm ("st %i1,[%fp+72]");
1261 asm ("st %i2,[%fp+76]");
1262 asm ("st %i3,[%fp+80]");
1263 asm ("st %i4,[%fp+84]");
1264 asm ("retl");
1265 asm ("st %i5,[%fp+88]");
1266 #ifdef NEED_TYPE_COMMAND
1267 asm (".type __builtin_saveregs,#function");
1268 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1269 #endif
1270 #else /* not __sparc__ */
1271 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1272
1273 asm (" .text");
1274 #ifdef __mips16
1275 asm (" .set nomips16");
1276 #endif
1277 asm (" .ent __builtin_saveregs");
1278 asm (" .globl __builtin_saveregs");
1279 asm ("__builtin_saveregs:");
1280 asm (" sw $4,0($30)");
1281 asm (" sw $5,4($30)");
1282 asm (" sw $6,8($30)");
1283 asm (" sw $7,12($30)");
1284 asm (" j $31");
1285 asm (" .end __builtin_saveregs");
1286 #else /* not __mips__, etc. */
1287
1288 void * __attribute__ ((__noreturn__))
1289 __builtin_saveregs (void)
1290 {
1291 abort ();
1292 }
1293
1294 #endif /* not __mips__ */
1295 #endif /* not __sparc__ */
1296 #endif /* not __i860__ */
1297 #endif
1298 \f
1299 #ifdef L_eprintf
1300 #ifndef inhibit_libc
1301
1302 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1303 #include <stdio.h>
1304 /* This is used by the `assert' macro. */
1305 void
1306 __eprintf (const char *string, const char *expression,
1307 unsigned int line, const char *filename)
1308 {
1309 fprintf (stderr, string, expression, line, filename);
1310 fflush (stderr);
1311 abort ();
1312 }
1313
1314 #endif
1315 #endif
1316
1317 #ifdef L_bb
1318
1319 /* Structure emitted by -a */
1320 struct bb
1321 {
1322 long zero_word;
1323 const char *filename;
1324 long *counts;
1325 long ncounts;
1326 struct bb *next;
1327 const unsigned long *addresses;
1328
1329 /* Older GCC's did not emit these fields. */
1330 long nwords;
1331 const char **functions;
1332 const long *line_nums;
1333 const char **filenames;
1334 char *flags;
1335 };
1336
1337 #ifdef BLOCK_PROFILER_CODE
1338 BLOCK_PROFILER_CODE
1339 #else
1340 #ifndef inhibit_libc
1341
1342 /* Simple minded basic block profiling output dumper for
1343 systems that don't provide tcov support. At present,
1344 it requires atexit and stdio. */
1345
1346 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1347 #include <stdio.h>
1348 char *ctime ();
1349
1350 #include "gbl-ctors.h"
1351 #include "gcov-io.h"
1352 #include <string.h>
1353 #ifdef TARGET_HAS_F_SETLKW
1354 #include <fcntl.h>
1355 #include <errno.h>
1356 #endif
1357
1358 static struct bb *bb_head;
1359
1360 static int num_digits (long value, int base) __attribute__ ((const));
1361
1362 /* Return the number of digits needed to print a value */
1363 /* __inline__ */ static int num_digits (long value, int base)
1364 {
1365 int minus = (value < 0 && base != 16);
1366 unsigned long v = (minus) ? -value : value;
1367 int ret = minus;
1368
1369 do
1370 {
1371 v /= base;
1372 ret++;
1373 }
1374 while (v);
1375
1376 return ret;
1377 }
1378
1379 void
1380 __bb_exit_func (void)
1381 {
1382 FILE *da_file, *file;
1383 long time_value;
1384 int i;
1385
1386 if (bb_head == 0)
1387 return;
1388
1389 i = strlen (bb_head->filename) - 3;
1390
1391 if (!strcmp (bb_head->filename+i, ".da"))
1392 {
1393 /* Must be -fprofile-arcs not -a.
1394 Dump data in a form that gcov expects. */
1395
1396 struct bb *ptr;
1397
1398 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1399 {
1400 int firstchar;
1401
1402 /* Make sure the output file exists -
1403 but don't clobber exiting data. */
1404 if ((da_file = fopen (ptr->filename, "a")) != 0)
1405 fclose (da_file);
1406
1407 /* Need to re-open in order to be able to write from the start. */
1408 da_file = fopen (ptr->filename, "r+b");
1409 /* Some old systems might not allow the 'b' mode modifier.
1410 Therefore, try to open without it. This can lead to a race
1411 condition so that when you delete and re-create the file, the
1412 file might be opened in text mode, but then, you shouldn't
1413 delete the file in the first place. */
1414 if (da_file == 0)
1415 da_file = fopen (ptr->filename, "r+");
1416 if (da_file == 0)
1417 {
1418 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1419 ptr->filename);
1420 continue;
1421 }
1422
1423 /* After a fork, another process might try to read and/or write
1424 the same file simultanously. So if we can, lock the file to
1425 avoid race conditions. */
1426 #if defined (TARGET_HAS_F_SETLKW)
1427 {
1428 struct flock s_flock;
1429
1430 s_flock.l_type = F_WRLCK;
1431 s_flock.l_whence = SEEK_SET;
1432 s_flock.l_start = 0;
1433 s_flock.l_len = 1;
1434 s_flock.l_pid = getpid ();
1435
1436 while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
1437 && errno == EINTR);
1438 }
1439 #endif
1440
1441 /* If the file is not empty, and the number of counts in it is the
1442 same, then merge them in. */
1443 firstchar = fgetc (da_file);
1444 if (firstchar == EOF)
1445 {
1446 if (ferror (da_file))
1447 {
1448 fprintf (stderr, "arc profiling: Can't read output file ");
1449 perror (ptr->filename);
1450 }
1451 }
1452 else
1453 {
1454 long n_counts = 0;
1455
1456 if (ungetc (firstchar, da_file) == EOF)
1457 rewind (da_file);
1458 if (__read_long (&n_counts, da_file, 8) != 0)
1459 {
1460 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1461 ptr->filename);
1462 continue;
1463 }
1464
1465 if (n_counts == ptr->ncounts)
1466 {
1467 int i;
1468
1469 for (i = 0; i < n_counts; i++)
1470 {
1471 long v = 0;
1472
1473 if (__read_long (&v, da_file, 8) != 0)
1474 {
1475 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1476 ptr->filename);
1477 break;
1478 }
1479 ptr->counts[i] += v;
1480 }
1481 }
1482
1483 }
1484
1485 rewind (da_file);
1486
1487 /* ??? Should first write a header to the file. Preferably, a 4 byte
1488 magic number, 4 bytes containing the time the program was
1489 compiled, 4 bytes containing the last modification time of the
1490 source file, and 4 bytes indicating the compiler options used.
1491
1492 That way we can easily verify that the proper source/executable/
1493 data file combination is being used from gcov. */
1494
1495 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1496 {
1497
1498 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1499 ptr->filename);
1500 }
1501 else
1502 {
1503 int j;
1504 long *count_ptr = ptr->counts;
1505 int ret = 0;
1506 for (j = ptr->ncounts; j > 0; j--)
1507 {
1508 if (__write_long (*count_ptr, da_file, 8) != 0)
1509 {
1510 ret=1;
1511 break;
1512 }
1513 count_ptr++;
1514 }
1515 if (ret)
1516 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1517 ptr->filename);
1518 }
1519
1520 if (fclose (da_file) == EOF)
1521 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1522 ptr->filename);
1523 }
1524
1525 return;
1526 }
1527
1528 /* Must be basic block profiling. Emit a human readable output file. */
1529
1530 file = fopen ("bb.out", "a");
1531
1532 if (!file)
1533 perror ("bb.out");
1534
1535 else
1536 {
1537 struct bb *ptr;
1538
1539 /* This is somewhat type incorrect, but it avoids worrying about
1540 exactly where time.h is included from. It should be ok unless
1541 a void * differs from other pointer formats, or if sizeof (long)
1542 is < sizeof (time_t). It would be nice if we could assume the
1543 use of rationale standards here. */
1544
1545 time ((void *) &time_value);
1546 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1547
1548 /* We check the length field explicitly in order to allow compatibility
1549 with older GCC's which did not provide it. */
1550
1551 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1552 {
1553 int i;
1554 int func_p = (ptr->nwords >= (long) sizeof (struct bb)
1555 && ptr->nwords <= 1000
1556 && ptr->functions);
1557 int line_p = (func_p && ptr->line_nums);
1558 int file_p = (func_p && ptr->filenames);
1559 int addr_p = (ptr->addresses != 0);
1560 long ncounts = ptr->ncounts;
1561 long cnt_max = 0;
1562 long line_max = 0;
1563 long addr_max = 0;
1564 int file_len = 0;
1565 int func_len = 0;
1566 int blk_len = num_digits (ncounts, 10);
1567 int cnt_len;
1568 int line_len;
1569 int addr_len;
1570
1571 fprintf (file, "File %s, %ld basic blocks \n\n",
1572 ptr->filename, ncounts);
1573
1574 /* Get max values for each field. */
1575 for (i = 0; i < ncounts; i++)
1576 {
1577 const char *p;
1578 int len;
1579
1580 if (cnt_max < ptr->counts[i])
1581 cnt_max = ptr->counts[i];
1582
1583 if (addr_p && (unsigned long) addr_max < ptr->addresses[i])
1584 addr_max = ptr->addresses[i];
1585
1586 if (line_p && line_max < ptr->line_nums[i])
1587 line_max = ptr->line_nums[i];
1588
1589 if (func_p)
1590 {
1591 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1592 len = strlen (p);
1593 if (func_len < len)
1594 func_len = len;
1595 }
1596
1597 if (file_p)
1598 {
1599 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1600 len = strlen (p);
1601 if (file_len < len)
1602 file_len = len;
1603 }
1604 }
1605
1606 addr_len = num_digits (addr_max, 16);
1607 cnt_len = num_digits (cnt_max, 10);
1608 line_len = num_digits (line_max, 10);
1609
1610 /* Now print out the basic block information. */
1611 for (i = 0; i < ncounts; i++)
1612 {
1613 fprintf (file,
1614 " Block #%*d: executed %*ld time(s)",
1615 blk_len, i+1,
1616 cnt_len, ptr->counts[i]);
1617
1618 if (addr_p)
1619 fprintf (file, " address= 0x%.*lx", addr_len,
1620 ptr->addresses[i]);
1621
1622 if (func_p)
1623 fprintf (file, " function= %-*s", func_len,
1624 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1625
1626 if (line_p)
1627 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1628
1629 if (file_p)
1630 fprintf (file, " file= %s",
1631 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1632
1633 fprintf (file, "\n");
1634 }
1635
1636 fprintf (file, "\n");
1637 fflush (file);
1638 }
1639
1640 fprintf (file, "\n\n");
1641 fclose (file);
1642 }
1643 }
1644
1645 void
1646 __bb_init_func (struct bb *blocks)
1647 {
1648 /* User is supposed to check whether the first word is non-0,
1649 but just in case.... */
1650
1651 if (blocks->zero_word)
1652 return;
1653
1654 /* Initialize destructor. */
1655 if (!bb_head)
1656 atexit (__bb_exit_func);
1657
1658 /* Set up linked list. */
1659 blocks->zero_word = 1;
1660 blocks->next = bb_head;
1661 bb_head = blocks;
1662 }
1663
1664 /* Called before fork or exec - write out profile information gathered so
1665 far and reset it to zero. This avoids duplication or loss of the
1666 profile information gathered so far. */
1667 void
1668 __bb_fork_func (void)
1669 {
1670 struct bb *ptr;
1671
1672 __bb_exit_func ();
1673 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1674 {
1675 long i;
1676 for (i = ptr->ncounts - 1; i >= 0; i--)
1677 ptr->counts[i] = 0;
1678 }
1679 }
1680
1681 #ifndef MACHINE_STATE_SAVE
1682 #define MACHINE_STATE_SAVE(ID)
1683 #endif
1684 #ifndef MACHINE_STATE_RESTORE
1685 #define MACHINE_STATE_RESTORE(ID)
1686 #endif
1687
1688 /* Number of buckets in hashtable of basic block addresses. */
1689
1690 #define BB_BUCKETS 311
1691
1692 /* Maximum length of string in file bb.in. */
1693
1694 #define BBINBUFSIZE 500
1695
1696 struct bb_edge
1697 {
1698 struct bb_edge *next;
1699 unsigned long src_addr;
1700 unsigned long dst_addr;
1701 unsigned long count;
1702 };
1703
1704 enum bb_func_mode
1705 {
1706 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1707 };
1708
1709 struct bb_func
1710 {
1711 struct bb_func *next;
1712 char *funcname;
1713 char *filename;
1714 enum bb_func_mode mode;
1715 };
1716
1717 /* This is the connection to the outside world.
1718 The BLOCK_PROFILER macro must set __bb.blocks
1719 and __bb.blockno. */
1720
1721 struct {
1722 unsigned long blockno;
1723 struct bb *blocks;
1724 } __bb;
1725
1726 /* Vars to store addrs of source and destination basic blocks
1727 of a jump. */
1728
1729 static unsigned long bb_src = 0;
1730 static unsigned long bb_dst = 0;
1731
1732 static FILE *bb_tracefile = (FILE *) 0;
1733 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1734 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1735 static unsigned long bb_callcount = 0;
1736 static int bb_mode = 0;
1737
1738 static unsigned long *bb_stack = (unsigned long *) 0;
1739 static size_t bb_stacksize = 0;
1740
1741 static int reported = 0;
1742
1743 /* Trace modes:
1744 Always : Print execution frequencies of basic blocks
1745 to file bb.out.
1746 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1747 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1748 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1749 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1750 */
1751
1752 #ifdef HAVE_POPEN
1753
1754 /*#include <sys/types.h>*/
1755 #include <sys/stat.h>
1756 /*#include <malloc.h>*/
1757
1758 /* Commands executed by gopen. */
1759
1760 #define GOPENDECOMPRESS "gzip -cd "
1761 #define GOPENCOMPRESS "gzip -c >"
1762
1763 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1764 If it does not compile, simply replace gopen by fopen and delete
1765 '.gz' from any first parameter to gopen. */
1766
1767 static FILE *
1768 gopen (char *fn, char *mode)
1769 {
1770 int use_gzip;
1771 char *p;
1772
1773 if (mode[1])
1774 return (FILE *) 0;
1775
1776 if (mode[0] != 'r' && mode[0] != 'w')
1777 return (FILE *) 0;
1778
1779 p = fn + strlen (fn)-1;
1780 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1781 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1782
1783 if (use_gzip)
1784 {
1785 if (mode[0]=='r')
1786 {
1787 FILE *f;
1788 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1789 + sizeof (GOPENDECOMPRESS));
1790 strcpy (s, GOPENDECOMPRESS);
1791 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1792 f = popen (s, mode);
1793 free (s);
1794 return f;
1795 }
1796
1797 else
1798 {
1799 FILE *f;
1800 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1801 + sizeof (GOPENCOMPRESS));
1802 strcpy (s, GOPENCOMPRESS);
1803 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1804 if (!(f = popen (s, mode)))
1805 f = fopen (s, mode);
1806 free (s);
1807 return f;
1808 }
1809 }
1810
1811 else
1812 return fopen (fn, mode);
1813 }
1814
1815 static int
1816 gclose (FILE *f)
1817 {
1818 struct stat buf;
1819
1820 if (f != 0)
1821 {
1822 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1823 return pclose (f);
1824
1825 return fclose (f);
1826 }
1827 return 0;
1828 }
1829
1830 #endif /* HAVE_POPEN */
1831
1832 /* Called once per program. */
1833
1834 static void
1835 __bb_exit_trace_func (void)
1836 {
1837 FILE *file = fopen ("bb.out", "a");
1838 struct bb_func *f;
1839 struct bb *b;
1840
1841 if (!file)
1842 perror ("bb.out");
1843
1844 if (bb_mode & 1)
1845 {
1846 if (!bb_tracefile)
1847 perror ("bbtrace");
1848 else
1849 #ifdef HAVE_POPEN
1850 gclose (bb_tracefile);
1851 #else
1852 fclose (bb_tracefile);
1853 #endif /* HAVE_POPEN */
1854 }
1855
1856 /* Check functions in `bb.in'. */
1857
1858 if (file)
1859 {
1860 long time_value;
1861 const struct bb_func *p;
1862 int printed_something = 0;
1863 struct bb *ptr;
1864 long blk;
1865
1866 /* This is somewhat type incorrect. */
1867 time ((void *) &time_value);
1868
1869 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
1870 {
1871 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1872 {
1873 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
1874 continue;
1875 for (blk = 0; blk < ptr->ncounts; blk++)
1876 {
1877 if (!strcmp (p->funcname, ptr->functions[blk]))
1878 goto found;
1879 }
1880 }
1881
1882 if (!printed_something)
1883 {
1884 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
1885 printed_something = 1;
1886 }
1887
1888 fprintf (file, "\tFunction %s", p->funcname);
1889 if (p->filename)
1890 fprintf (file, " of file %s", p->filename);
1891 fprintf (file, "\n" );
1892
1893 found: ;
1894 }
1895
1896 if (printed_something)
1897 fprintf (file, "\n");
1898
1899 }
1900
1901 if (bb_mode & 2)
1902 {
1903 if (!bb_hashbuckets)
1904 {
1905 if (!reported)
1906 {
1907 fprintf (stderr, "Profiler: out of memory\n");
1908 reported = 1;
1909 }
1910 return;
1911 }
1912
1913 else if (file)
1914 {
1915 long time_value;
1916 int i;
1917 unsigned long addr_max = 0;
1918 unsigned long cnt_max = 0;
1919 int cnt_len;
1920 int addr_len;
1921
1922 /* This is somewhat type incorrect, but it avoids worrying about
1923 exactly where time.h is included from. It should be ok unless
1924 a void * differs from other pointer formats, or if sizeof (long)
1925 is < sizeof (time_t). It would be nice if we could assume the
1926 use of rationale standards here. */
1927
1928 time ((void *) &time_value);
1929 fprintf (file, "Basic block jump tracing");
1930
1931 switch (bb_mode & 12)
1932 {
1933 case 0:
1934 fprintf (file, " (with call)");
1935 break;
1936
1937 case 4:
1938 /* Print nothing. */
1939 break;
1940
1941 case 8:
1942 fprintf (file, " (with call & ret)");
1943 break;
1944
1945 case 12:
1946 fprintf (file, " (with ret)");
1947 break;
1948 }
1949
1950 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
1951
1952 for (i = 0; i < BB_BUCKETS; i++)
1953 {
1954 struct bb_edge *bucket = bb_hashbuckets[i];
1955 for ( ; bucket; bucket = bucket->next )
1956 {
1957 if (addr_max < bucket->src_addr)
1958 addr_max = bucket->src_addr;
1959 if (addr_max < bucket->dst_addr)
1960 addr_max = bucket->dst_addr;
1961 if (cnt_max < bucket->count)
1962 cnt_max = bucket->count;
1963 }
1964 }
1965 addr_len = num_digits (addr_max, 16);
1966 cnt_len = num_digits (cnt_max, 10);
1967
1968 for ( i = 0; i < BB_BUCKETS; i++)
1969 {
1970 struct bb_edge *bucket = bb_hashbuckets[i];
1971 for ( ; bucket; bucket = bucket->next )
1972 {
1973 fprintf (file,
1974 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
1975 addr_len, bucket->src_addr,
1976 addr_len, bucket->dst_addr,
1977 cnt_len, bucket->count);
1978 }
1979 }
1980
1981 fprintf (file, "\n");
1982
1983 }
1984 }
1985
1986 if (file)
1987 fclose (file);
1988
1989 /* Free allocated memory. */
1990
1991 f = bb_func_head;
1992 while (f)
1993 {
1994 struct bb_func *old = f;
1995
1996 f = f->next;
1997 if (old->funcname) free (old->funcname);
1998 if (old->filename) free (old->filename);
1999 free (old);
2000 }
2001
2002 if (bb_stack)
2003 free (bb_stack);
2004
2005 if (bb_hashbuckets)
2006 {
2007 int i;
2008
2009 for (i = 0; i < BB_BUCKETS; i++)
2010 {
2011 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2012
2013 while (bucket)
2014 {
2015 old = bucket;
2016 bucket = bucket->next;
2017 free (old);
2018 }
2019 }
2020 free (bb_hashbuckets);
2021 }
2022
2023 for (b = bb_head; b; b = b->next)
2024 if (b->flags) free (b->flags);
2025 }
2026
2027 /* Called once per program. */
2028
2029 static void
2030 __bb_init_prg (void)
2031 {
2032 FILE *file;
2033 char buf[BBINBUFSIZE];
2034 const char *p;
2035 const char *pos;
2036 enum bb_func_mode m;
2037 int i;
2038
2039 /* Initialize destructor. */
2040 atexit (__bb_exit_func);
2041
2042 if (!(file = fopen ("bb.in", "r")))
2043 return;
2044
2045 while(fgets (buf, BBINBUFSIZE, file) != 0)
2046 {
2047 i = strlen (buf);
2048 if (buf[i] == '\n')
2049 buf[i--] = '\0';
2050
2051 p = buf;
2052 if (*p == '-')
2053 {
2054 m = TRACE_OFF;
2055 p++;
2056 }
2057 else
2058 {
2059 m = TRACE_ON;
2060 }
2061 if (!strcmp (p, "__bb_trace__"))
2062 bb_mode |= 1;
2063 else if (!strcmp (p, "__bb_jumps__"))
2064 bb_mode |= 2;
2065 else if (!strcmp (p, "__bb_hidecall__"))
2066 bb_mode |= 4;
2067 else if (!strcmp (p, "__bb_showret__"))
2068 bb_mode |= 8;
2069 else
2070 {
2071 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2072 if (f)
2073 {
2074 unsigned long l;
2075 f->next = bb_func_head;
2076 if ((pos = strchr (p, ':')))
2077 {
2078 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2079 continue;
2080 strcpy (f->funcname, pos+1);
2081 l = pos-p;
2082 if ((f->filename = (char *) malloc (l+1)))
2083 {
2084 strncpy (f->filename, p, l);
2085 f->filename[l] = '\0';
2086 }
2087 else
2088 f->filename = (char *) 0;
2089 }
2090 else
2091 {
2092 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2093 continue;
2094 strcpy (f->funcname, p);
2095 f->filename = (char *) 0;
2096 }
2097 f->mode = m;
2098 bb_func_head = f;
2099 }
2100 }
2101 }
2102 fclose (file);
2103
2104 #ifdef HAVE_POPEN
2105
2106 if (bb_mode & 1)
2107 bb_tracefile = gopen ("bbtrace.gz", "w");
2108
2109 #else
2110
2111 if (bb_mode & 1)
2112 bb_tracefile = fopen ("bbtrace", "w");
2113
2114 #endif /* HAVE_POPEN */
2115
2116 if (bb_mode & 2)
2117 {
2118 bb_hashbuckets = (struct bb_edge **)
2119 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2120 if (bb_hashbuckets)
2121 /* Use a loop here rather than calling bzero to avoid having to
2122 conditionalize its existance. */
2123 for (i = 0; i < BB_BUCKETS; i++)
2124 bb_hashbuckets[i] = 0;
2125 }
2126
2127 if (bb_mode & 12)
2128 {
2129 bb_stacksize = 10;
2130 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2131 }
2132
2133 /* Initialize destructor. */
2134 atexit (__bb_exit_trace_func);
2135 }
2136
2137 /* Called upon entering a basic block. */
2138
2139 void
2140 __bb_trace_func (void)
2141 {
2142 struct bb_edge *bucket;
2143
2144 MACHINE_STATE_SAVE("1")
2145
2146 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2147 goto skip;
2148
2149 bb_dst = __bb.blocks->addresses[__bb.blockno];
2150 __bb.blocks->counts[__bb.blockno]++;
2151
2152 if (bb_tracefile)
2153 {
2154 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2155 }
2156
2157 if (bb_hashbuckets)
2158 {
2159 struct bb_edge **startbucket, **oldnext;
2160
2161 oldnext = startbucket
2162 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2163 bucket = *startbucket;
2164
2165 for (bucket = *startbucket; bucket;
2166 oldnext = &(bucket->next), bucket = *oldnext)
2167 {
2168 if (bucket->src_addr == bb_src
2169 && bucket->dst_addr == bb_dst)
2170 {
2171 bucket->count++;
2172 *oldnext = bucket->next;
2173 bucket->next = *startbucket;
2174 *startbucket = bucket;
2175 goto ret;
2176 }
2177 }
2178
2179 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2180
2181 if (!bucket)
2182 {
2183 if (!reported)
2184 {
2185 fprintf (stderr, "Profiler: out of memory\n");
2186 reported = 1;
2187 }
2188 }
2189
2190 else
2191 {
2192 bucket->src_addr = bb_src;
2193 bucket->dst_addr = bb_dst;
2194 bucket->next = *startbucket;
2195 *startbucket = bucket;
2196 bucket->count = 1;
2197 }
2198 }
2199
2200 ret:
2201 bb_src = bb_dst;
2202
2203 skip:
2204 ;
2205
2206 MACHINE_STATE_RESTORE("1")
2207
2208 }
2209
2210 /* Called when returning from a function and `__bb_showret__' is set. */
2211
2212 static void
2213 __bb_trace_func_ret (void)
2214 {
2215 struct bb_edge *bucket;
2216
2217 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2218 goto skip;
2219
2220 if (bb_hashbuckets)
2221 {
2222 struct bb_edge **startbucket, **oldnext;
2223
2224 oldnext = startbucket
2225 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2226 bucket = *startbucket;
2227
2228 for (bucket = *startbucket; bucket;
2229 oldnext = &(bucket->next), bucket = *oldnext)
2230 {
2231 if (bucket->src_addr == bb_dst
2232 && bucket->dst_addr == bb_src)
2233 {
2234 bucket->count++;
2235 *oldnext = bucket->next;
2236 bucket->next = *startbucket;
2237 *startbucket = bucket;
2238 goto ret;
2239 }
2240 }
2241
2242 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2243
2244 if (!bucket)
2245 {
2246 if (!reported)
2247 {
2248 fprintf (stderr, "Profiler: out of memory\n");
2249 reported = 1;
2250 }
2251 }
2252
2253 else
2254 {
2255 bucket->src_addr = bb_dst;
2256 bucket->dst_addr = bb_src;
2257 bucket->next = *startbucket;
2258 *startbucket = bucket;
2259 bucket->count = 1;
2260 }
2261 }
2262
2263 ret:
2264 bb_dst = bb_src;
2265
2266 skip:
2267 ;
2268
2269 }
2270
2271 /* Called upon entering the first function of a file. */
2272
2273 static void
2274 __bb_init_file (struct bb *blocks)
2275 {
2276
2277 const struct bb_func *p;
2278 long blk, ncounts = blocks->ncounts;
2279 const char **functions = blocks->functions;
2280
2281 /* Set up linked list. */
2282 blocks->zero_word = 1;
2283 blocks->next = bb_head;
2284 bb_head = blocks;
2285
2286 blocks->flags = 0;
2287 if (!bb_func_head
2288 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2289 return;
2290
2291 for (blk = 0; blk < ncounts; blk++)
2292 blocks->flags[blk] = 0;
2293
2294 for (blk = 0; blk < ncounts; blk++)
2295 {
2296 for (p = bb_func_head; p; p = p->next)
2297 {
2298 if (!strcmp (p->funcname, functions[blk])
2299 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2300 {
2301 blocks->flags[blk] |= p->mode;
2302 }
2303 }
2304 }
2305
2306 }
2307
2308 /* Called when exiting from a function. */
2309
2310 void
2311 __bb_trace_ret (void)
2312 {
2313
2314 MACHINE_STATE_SAVE("2")
2315
2316 if (bb_callcount)
2317 {
2318 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2319 {
2320 bb_src = bb_stack[bb_callcount];
2321 if (bb_mode & 8)
2322 __bb_trace_func_ret ();
2323 }
2324
2325 bb_callcount -= 1;
2326 }
2327
2328 MACHINE_STATE_RESTORE("2")
2329
2330 }
2331
2332 /* Called when entering a function. */
2333
2334 void
2335 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2336 {
2337 static int trace_init = 0;
2338
2339 MACHINE_STATE_SAVE("3")
2340
2341 if (!blocks->zero_word)
2342 {
2343 if (!trace_init)
2344 {
2345 trace_init = 1;
2346 __bb_init_prg ();
2347 }
2348 __bb_init_file (blocks);
2349 }
2350
2351 if (bb_callcount)
2352 {
2353
2354 bb_callcount += 1;
2355
2356 if (bb_mode & 12)
2357 {
2358 if (bb_callcount >= bb_stacksize)
2359 {
2360 size_t newsize = bb_callcount + 100;
2361
2362 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2363 if (! bb_stack)
2364 {
2365 if (!reported)
2366 {
2367 fprintf (stderr, "Profiler: out of memory\n");
2368 reported = 1;
2369 }
2370 bb_stacksize = 0;
2371 goto stack_overflow;
2372 }
2373 bb_stacksize = newsize;
2374 }
2375 bb_stack[bb_callcount] = bb_src;
2376
2377 if (bb_mode & 4)
2378 bb_src = 0;
2379
2380 }
2381
2382 stack_overflow:;
2383
2384 }
2385
2386 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2387 {
2388 bb_callcount = 1;
2389 bb_src = 0;
2390
2391 if (bb_stack)
2392 bb_stack[bb_callcount] = bb_src;
2393 }
2394
2395 MACHINE_STATE_RESTORE("3")
2396 }
2397
2398 #endif /* not inhibit_libc */
2399 #endif /* not BLOCK_PROFILER_CODE */
2400 #endif /* L_bb */
2401 \f
2402 #ifdef L_shtab
2403 unsigned int __shtab[] = {
2404 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2405 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2406 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2407 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2408 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2409 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2410 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2411 0x10000000, 0x20000000, 0x40000000, 0x80000000
2412 };
2413 #endif
2414 \f
2415 #ifdef L_clear_cache
2416 /* Clear part of an instruction cache. */
2417
2418 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2419
2420 void
2421 __clear_cache (char *beg __attribute__((__unused__)),
2422 char *end __attribute__((__unused__)))
2423 {
2424 #ifdef CLEAR_INSN_CACHE
2425 CLEAR_INSN_CACHE (beg, end);
2426 #else
2427 #ifdef INSN_CACHE_SIZE
2428 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2429 static int initialized;
2430 int offset;
2431 void *start_addr
2432 void *end_addr;
2433 typedef (*function_ptr) (void);
2434
2435 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2436 /* It's cheaper to clear the whole cache.
2437 Put in a series of jump instructions so that calling the beginning
2438 of the cache will clear the whole thing. */
2439
2440 if (! initialized)
2441 {
2442 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2443 & -INSN_CACHE_LINE_WIDTH);
2444 int end_ptr = ptr + INSN_CACHE_SIZE;
2445
2446 while (ptr < end_ptr)
2447 {
2448 *(INSTRUCTION_TYPE *)ptr
2449 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2450 ptr += INSN_CACHE_LINE_WIDTH;
2451 }
2452 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2453
2454 initialized = 1;
2455 }
2456
2457 /* Call the beginning of the sequence. */
2458 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2459 & -INSN_CACHE_LINE_WIDTH))
2460 ());
2461
2462 #else /* Cache is large. */
2463
2464 if (! initialized)
2465 {
2466 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2467 & -INSN_CACHE_LINE_WIDTH);
2468
2469 while (ptr < (int) array + sizeof array)
2470 {
2471 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2472 ptr += INSN_CACHE_LINE_WIDTH;
2473 }
2474
2475 initialized = 1;
2476 }
2477
2478 /* Find the location in array that occupies the same cache line as BEG. */
2479
2480 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2481 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2482 & -INSN_CACHE_PLANE_SIZE)
2483 + offset);
2484
2485 /* Compute the cache alignment of the place to stop clearing. */
2486 #if 0 /* This is not needed for gcc's purposes. */
2487 /* If the block to clear is bigger than a cache plane,
2488 we clear the entire cache, and OFFSET is already correct. */
2489 if (end < beg + INSN_CACHE_PLANE_SIZE)
2490 #endif
2491 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2492 & -INSN_CACHE_LINE_WIDTH)
2493 & (INSN_CACHE_PLANE_SIZE - 1));
2494
2495 #if INSN_CACHE_DEPTH > 1
2496 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2497 if (end_addr <= start_addr)
2498 end_addr += INSN_CACHE_PLANE_SIZE;
2499
2500 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2501 {
2502 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2503 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2504
2505 while (addr != stop)
2506 {
2507 /* Call the return instruction at ADDR. */
2508 ((function_ptr) addr) ();
2509
2510 addr += INSN_CACHE_LINE_WIDTH;
2511 }
2512 }
2513 #else /* just one plane */
2514 do
2515 {
2516 /* Call the return instruction at START_ADDR. */
2517 ((function_ptr) start_addr) ();
2518
2519 start_addr += INSN_CACHE_LINE_WIDTH;
2520 }
2521 while ((start_addr % INSN_CACHE_SIZE) != offset);
2522 #endif /* just one plane */
2523 #endif /* Cache is large */
2524 #endif /* Cache exists */
2525 #endif /* CLEAR_INSN_CACHE */
2526 }
2527
2528 #endif /* L_clear_cache */
2529 \f
2530 #ifdef L_trampoline
2531
2532 /* Jump to a trampoline, loading the static chain address. */
2533
2534 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2535
2536 long
2537 getpagesize (void)
2538 {
2539 #ifdef _ALPHA_
2540 return 8192;
2541 #else
2542 return 4096;
2543 #endif
2544 }
2545
2546 #ifdef __i386__
2547 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2548 #endif
2549
2550 int
2551 mprotect (char *addr, int len, int prot)
2552 {
2553 int np, op;
2554
2555 if (prot == 7)
2556 np = 0x40;
2557 else if (prot == 5)
2558 np = 0x20;
2559 else if (prot == 4)
2560 np = 0x10;
2561 else if (prot == 3)
2562 np = 0x04;
2563 else if (prot == 1)
2564 np = 0x02;
2565 else if (prot == 0)
2566 np = 0x01;
2567
2568 if (VirtualProtect (addr, len, np, &op))
2569 return 0;
2570 else
2571 return -1;
2572 }
2573
2574 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2575
2576 #ifdef TRANSFER_FROM_TRAMPOLINE
2577 TRANSFER_FROM_TRAMPOLINE
2578 #endif
2579
2580 #if defined (NeXT) && defined (__MACH__)
2581
2582 /* Make stack executable so we can call trampolines on stack.
2583 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2584 #ifdef NeXTStep21
2585 #include <mach.h>
2586 #else
2587 #include <mach/mach.h>
2588 #endif
2589
2590 void
2591 __enable_execute_stack (char *addr)
2592 {
2593 kern_return_t r;
2594 char *eaddr = addr + TRAMPOLINE_SIZE;
2595 vm_address_t a = (vm_address_t) addr;
2596
2597 /* turn on execute access on stack */
2598 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2599 if (r != KERN_SUCCESS)
2600 {
2601 mach_error("vm_protect VM_PROT_ALL", r);
2602 exit(1);
2603 }
2604
2605 /* We inline the i-cache invalidation for speed */
2606
2607 #ifdef CLEAR_INSN_CACHE
2608 CLEAR_INSN_CACHE (addr, eaddr);
2609 #else
2610 __clear_cache ((int) addr, (int) eaddr);
2611 #endif
2612 }
2613
2614 #endif /* defined (NeXT) && defined (__MACH__) */
2615
2616 #ifdef __convex__
2617
2618 /* Make stack executable so we can call trampolines on stack.
2619 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2620
2621 #include <sys/mman.h>
2622 #include <sys/vmparam.h>
2623 #include <machine/machparam.h>
2624
2625 void
2626 __enable_execute_stack (void)
2627 {
2628 int fp;
2629 static unsigned lowest = USRSTACK;
2630 unsigned current = (unsigned) &fp & -NBPG;
2631
2632 if (lowest > current)
2633 {
2634 unsigned len = lowest - current;
2635 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2636 lowest = current;
2637 }
2638
2639 /* Clear instruction cache in case an old trampoline is in it. */
2640 asm ("pich");
2641 }
2642 #endif /* __convex__ */
2643
2644 #ifdef __sysV88__
2645
2646 /* Modified from the convex -code above. */
2647
2648 #include <sys/param.h>
2649 #include <errno.h>
2650 #include <sys/m88kbcs.h>
2651
2652 void
2653 __enable_execute_stack (void)
2654 {
2655 int save_errno;
2656 static unsigned long lowest = USRSTACK;
2657 unsigned long current = (unsigned long) &save_errno & -NBPC;
2658
2659 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2660 address is seen as 'negative'. That is the case with the stack. */
2661
2662 save_errno=errno;
2663 if (lowest > current)
2664 {
2665 unsigned len=lowest-current;
2666 memctl(current,len,MCT_TEXT);
2667 lowest = current;
2668 }
2669 else
2670 memctl(current,NBPC,MCT_TEXT);
2671 errno=save_errno;
2672 }
2673
2674 #endif /* __sysV88__ */
2675
2676 #ifdef __sysV68__
2677
2678 #include <sys/signal.h>
2679 #include <errno.h>
2680
2681 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2682 so define it here, because we need it in __clear_insn_cache below */
2683 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2684 hence we enable this stuff only if MCT_TEXT is #define'd. */
2685
2686 #ifdef MCT_TEXT
2687 asm("\n\
2688 global memctl\n\
2689 memctl:\n\
2690 movq &75,%d0\n\
2691 trap &0\n\
2692 bcc.b noerror\n\
2693 jmp cerror%\n\
2694 noerror:\n\
2695 movq &0,%d0\n\
2696 rts");
2697 #endif
2698
2699 /* Clear instruction cache so we can call trampolines on stack.
2700 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2701
2702 void
2703 __clear_insn_cache (void)
2704 {
2705 #ifdef MCT_TEXT
2706 int save_errno;
2707
2708 /* Preserve errno, because users would be surprised to have
2709 errno changing without explicitly calling any system-call. */
2710 save_errno = errno;
2711
2712 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2713 No need to use an address derived from _start or %sp, as 0 works also. */
2714 memctl(0, 4096, MCT_TEXT);
2715 errno = save_errno;
2716 #endif
2717 }
2718
2719 #endif /* __sysV68__ */
2720
2721 #ifdef __pyr__
2722
2723 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2724 #include <stdio.h>
2725 #include <sys/mman.h>
2726 #include <sys/types.h>
2727 #include <sys/param.h>
2728 #include <sys/vmmac.h>
2729
2730 /* Modified from the convex -code above.
2731 mremap promises to clear the i-cache. */
2732
2733 void
2734 __enable_execute_stack (void)
2735 {
2736 int fp;
2737 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2738 PROT_READ|PROT_WRITE|PROT_EXEC))
2739 {
2740 perror ("mprotect in __enable_execute_stack");
2741 fflush (stderr);
2742 abort ();
2743 }
2744 }
2745 #endif /* __pyr__ */
2746
2747 #if defined (sony_news) && defined (SYSTYPE_BSD)
2748
2749 #include <stdio.h>
2750 #include <sys/types.h>
2751 #include <sys/param.h>
2752 #include <syscall.h>
2753 #include <machine/sysnews.h>
2754
2755 /* cacheflush function for NEWS-OS 4.2.
2756 This function is called from trampoline-initialize code
2757 defined in config/mips/mips.h. */
2758
2759 void
2760 cacheflush (char *beg, int size, int flag)
2761 {
2762 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2763 {
2764 perror ("cache_flush");
2765 fflush (stderr);
2766 abort ();
2767 }
2768 }
2769
2770 #endif /* sony_news */
2771 #endif /* L_trampoline */
2772 \f
2773 #ifndef __CYGWIN__
2774 #ifdef L__main
2775
2776 #include "gbl-ctors.h"
2777 /* Some systems use __main in a way incompatible with its use in gcc, in these
2778 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2779 give the same symbol without quotes for an alternative entry point. You
2780 must define both, or neither. */
2781 #ifndef NAME__MAIN
2782 #define NAME__MAIN "__main"
2783 #define SYMBOL__MAIN __main
2784 #endif
2785
2786 #ifdef INIT_SECTION_ASM_OP
2787 #undef HAS_INIT_SECTION
2788 #define HAS_INIT_SECTION
2789 #endif
2790
2791 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2792
2793 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2794 code to run constructors. In that case, we need to handle EH here, too. */
2795
2796 #ifdef EH_FRAME_SECTION
2797 #include "frame.h"
2798 extern unsigned char __EH_FRAME_BEGIN__[];
2799 #endif
2800
2801 /* Run all the global destructors on exit from the program. */
2802
2803 void
2804 __do_global_dtors (void)
2805 {
2806 #ifdef DO_GLOBAL_DTORS_BODY
2807 DO_GLOBAL_DTORS_BODY;
2808 #else
2809 static func_ptr *p = __DTOR_LIST__ + 1;
2810 while (*p)
2811 {
2812 p++;
2813 (*(p-1)) ();
2814 }
2815 #endif
2816 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2817 {
2818 static int completed = 0;
2819 if (! completed)
2820 {
2821 completed = 1;
2822 __deregister_frame_info (__EH_FRAME_BEGIN__);
2823 }
2824 }
2825 #endif
2826 }
2827 #endif
2828
2829 #ifndef HAS_INIT_SECTION
2830 /* Run all the global constructors on entry to the program. */
2831
2832 void
2833 __do_global_ctors (void)
2834 {
2835 #ifdef EH_FRAME_SECTION
2836 {
2837 static struct object object;
2838 __register_frame_info (__EH_FRAME_BEGIN__, &object);
2839 }
2840 #endif
2841 DO_GLOBAL_CTORS_BODY;
2842 atexit (__do_global_dtors);
2843 }
2844 #endif /* no HAS_INIT_SECTION */
2845
2846 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2847 /* Subroutine called automatically by `main'.
2848 Compiling a global function named `main'
2849 produces an automatic call to this function at the beginning.
2850
2851 For many systems, this routine calls __do_global_ctors.
2852 For systems which support a .init section we use the .init section
2853 to run __do_global_ctors, so we need not do anything here. */
2854
2855 void
2856 SYMBOL__MAIN ()
2857 {
2858 /* Support recursive calls to `main': run initializers just once. */
2859 static int initialized;
2860 if (! initialized)
2861 {
2862 initialized = 1;
2863 __do_global_ctors ();
2864 }
2865 }
2866 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2867
2868 #endif /* L__main */
2869 #endif /* __CYGWIN__ */
2870 \f
2871 #ifdef L_ctors
2872
2873 #include "gbl-ctors.h"
2874
2875 /* Provide default definitions for the lists of constructors and
2876 destructors, so that we don't get linker errors. These symbols are
2877 intentionally bss symbols, so that gld and/or collect will provide
2878 the right values. */
2879
2880 /* We declare the lists here with two elements each,
2881 so that they are valid empty lists if no other definition is loaded.
2882
2883 If we are using the old "set" extensions to have the gnu linker
2884 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2885 must be in the bss/common section.
2886
2887 Long term no port should use those extensions. But many still do. */
2888 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2889 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2890 func_ptr __CTOR_LIST__[2] = {0, 0};
2891 func_ptr __DTOR_LIST__[2] = {0, 0};
2892 #else
2893 func_ptr __CTOR_LIST__[2];
2894 func_ptr __DTOR_LIST__[2];
2895 #endif
2896 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2897 #endif /* L_ctors */
2898 \f
2899 #ifdef L_exit
2900
2901 #include "gbl-ctors.h"
2902
2903 #ifdef NEED_ATEXIT
2904
2905 #ifndef ON_EXIT
2906
2907 # include <errno.h>
2908
2909 static func_ptr *atexit_chain = 0;
2910 static long atexit_chain_length = 0;
2911 static volatile long last_atexit_chain_slot = -1;
2912
2913 int
2914 atexit (func_ptr func)
2915 {
2916 if (++last_atexit_chain_slot == atexit_chain_length)
2917 {
2918 atexit_chain_length += 32;
2919 if (atexit_chain)
2920 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
2921 * sizeof (func_ptr));
2922 else
2923 atexit_chain = (func_ptr *) malloc (atexit_chain_length
2924 * sizeof (func_ptr));
2925 if (! atexit_chain)
2926 {
2927 atexit_chain_length = 0;
2928 last_atexit_chain_slot = -1;
2929 errno = ENOMEM;
2930 return (-1);
2931 }
2932 }
2933 atexit_chain[last_atexit_chain_slot] = func;
2934 return (0);
2935 }
2936
2937 extern void _cleanup (void);
2938 extern void _exit (int) __attribute__ ((__noreturn__));
2939
2940 void
2941 exit (int status)
2942 {
2943 if (atexit_chain)
2944 {
2945 for ( ; last_atexit_chain_slot-- >= 0; )
2946 {
2947 (*atexit_chain[last_atexit_chain_slot + 1]) ();
2948 atexit_chain[last_atexit_chain_slot + 1] = 0;
2949 }
2950 free (atexit_chain);
2951 atexit_chain = 0;
2952 }
2953 #ifdef EXIT_BODY
2954 EXIT_BODY;
2955 #else
2956 _cleanup ();
2957 #endif
2958 _exit (status);
2959 }
2960
2961 #else /* ON_EXIT */
2962
2963 /* Simple; we just need a wrapper for ON_EXIT. */
2964 int
2965 atexit (func_ptr func)
2966 {
2967 return ON_EXIT (func);
2968 }
2969
2970 #endif /* ON_EXIT */
2971 #endif /* NEED_ATEXIT */
2972
2973 #endif /* L_exit */
2974 \f
2975 #ifdef L_eh
2976
2977 #include "gthr.h"
2978
2979 /* Shared exception handling support routines. */
2980
2981 void
2982 __default_terminate (void)
2983 {
2984 abort ();
2985 }
2986
2987 void (*__terminate_func)(void) __attribute__ ((__noreturn__)) =
2988 __default_terminate;
2989
2990 void
2991 __terminate (void)
2992 {
2993 (*__terminate_func)();
2994 }
2995
2996 void *
2997 __throw_type_match (void *catch_type, void *throw_type, void *obj)
2998 {
2999 #if 0
3000 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3001 catch_type, throw_type);
3002 #endif
3003 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3004 return obj;
3005 return 0;
3006 }
3007
3008 void
3009 __empty (void)
3010 {
3011 }
3012 \f
3013
3014 /* Include definitions of EH context and table layout */
3015
3016 #include "eh-common.h"
3017 #ifndef inhibit_libc
3018 #include <stdio.h>
3019 #endif
3020
3021 /* Allocate and return a new EH context structure. */
3022
3023 #if __GTHREADS
3024 static void *
3025 new_eh_context (void)
3026 {
3027 struct eh_full_context {
3028 struct eh_context c;
3029 void *top_elt[2];
3030 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3031
3032 if (! ehfc)
3033 __terminate ();
3034
3035 memset (ehfc, 0, sizeof *ehfc);
3036
3037 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3038
3039 /* This should optimize out entirely. This should always be true,
3040 but just in case it ever isn't, don't allow bogus code to be
3041 generated. */
3042
3043 if ((void*)(&ehfc->c) != (void*)ehfc)
3044 __terminate ();
3045
3046 return &ehfc->c;
3047 }
3048
3049 static __gthread_key_t eh_context_key;
3050
3051 /* Destructor for struct eh_context. */
3052 static void
3053 eh_context_free (void *ptr)
3054 {
3055 __gthread_key_dtor (eh_context_key, ptr);
3056 if (ptr)
3057 free (ptr);
3058 }
3059 #endif
3060
3061 /* Pointer to function to return EH context. */
3062
3063 static struct eh_context *eh_context_initialize (void);
3064 static struct eh_context *eh_context_static (void);
3065 #if __GTHREADS
3066 static struct eh_context *eh_context_specific (void);
3067 #endif
3068
3069 static struct eh_context *(*get_eh_context) (void) = &eh_context_initialize;
3070
3071 /* Routine to get EH context.
3072 This one will simply call the function pointer. */
3073
3074 void *
3075 __get_eh_context (void)
3076 {
3077 return (void *) (*get_eh_context) ();
3078 }
3079
3080 /* Get and set the language specific info pointer. */
3081
3082 void **
3083 __get_eh_info (void)
3084 {
3085 struct eh_context *eh = (*get_eh_context) ();
3086 return &eh->info;
3087 }
3088 \f
3089 #ifdef DWARF2_UNWIND_INFO
3090 static int dwarf_reg_size_table_initialized = 0;
3091 static char dwarf_reg_size_table[DWARF_FRAME_REGISTERS];
3092
3093 static void
3094 init_reg_size_table (void)
3095 {
3096 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
3097 dwarf_reg_size_table_initialized = 1;
3098 }
3099 #endif
3100
3101 #if __GTHREADS
3102 static void
3103 eh_threads_initialize (void)
3104 {
3105 /* Try to create the key. If it fails, revert to static method,
3106 otherwise start using thread specific EH contexts. */
3107 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3108 get_eh_context = &eh_context_specific;
3109 else
3110 get_eh_context = &eh_context_static;
3111 }
3112 #endif /* no __GTHREADS */
3113
3114 /* Initialize EH context.
3115 This will be called only once, since we change GET_EH_CONTEXT
3116 pointer to another routine. */
3117
3118 static struct eh_context *
3119 eh_context_initialize (void)
3120 {
3121 #if __GTHREADS
3122
3123 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3124 /* Make sure that get_eh_context does not point to us anymore.
3125 Some systems have dummy thread routines in their libc that
3126 return a success (Solaris 2.6 for example). */
3127 if (__gthread_once (&once, eh_threads_initialize) != 0
3128 || get_eh_context == &eh_context_initialize)
3129 {
3130 /* Use static version of EH context. */
3131 get_eh_context = &eh_context_static;
3132 }
3133 #ifdef DWARF2_UNWIND_INFO
3134 {
3135 static __gthread_once_t once_regsizes = __GTHREAD_ONCE_INIT;
3136 if (__gthread_once (&once_regsizes, init_reg_size_table) != 0
3137 || ! dwarf_reg_size_table_initialized)
3138 init_reg_size_table ();
3139 }
3140 #endif
3141
3142 #else /* no __GTHREADS */
3143
3144 /* Use static version of EH context. */
3145 get_eh_context = &eh_context_static;
3146
3147 #ifdef DWARF2_UNWIND_INFO
3148 init_reg_size_table ();
3149 #endif
3150
3151 #endif /* no __GTHREADS */
3152
3153 return (*get_eh_context) ();
3154 }
3155
3156 /* Return a static EH context. */
3157
3158 static struct eh_context *
3159 eh_context_static (void)
3160 {
3161 static struct eh_context eh;
3162 static int initialized;
3163 static void *top_elt[2];
3164
3165 if (! initialized)
3166 {
3167 initialized = 1;
3168 memset (&eh, 0, sizeof eh);
3169 eh.dynamic_handler_chain = top_elt;
3170 }
3171 return &eh;
3172 }
3173
3174 #if __GTHREADS
3175 /* Return a thread specific EH context. */
3176
3177 static struct eh_context *
3178 eh_context_specific (void)
3179 {
3180 struct eh_context *eh;
3181 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3182 if (! eh)
3183 {
3184 eh = new_eh_context ();
3185 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3186 __terminate ();
3187 }
3188
3189 return eh;
3190 }
3191 #endif /* __GTHREADS */
3192 \f
3193 /* Support routines for alloc/free during exception handling */
3194
3195 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3196 the small arena in the eh_context. This is needed because throwing an
3197 out-of-memory exception would fail otherwise. The emergency space is
3198 allocated in blocks of size EH_ALLOC_ALIGN, the
3199 minimum allocation being two blocks. A bitmask indicates which blocks
3200 have been allocated. To indicate the size of an allocation, the bit for
3201 the final block is not set. Hence each allocation is a run of 1s followed
3202 by a zero. */
3203 void *
3204 __eh_alloc (size_t size)
3205 {
3206 void *p;
3207
3208 if (!size)
3209 abort();
3210 p = malloc (size);
3211 if (p == 0)
3212 {
3213 struct eh_context *eh = __get_eh_context ();
3214 unsigned blocks = (size + EH_ALLOC_ALIGN - 1) / EH_ALLOC_ALIGN;
3215 unsigned real_mask = eh->alloc_mask | (eh->alloc_mask << 1);
3216 unsigned our_mask;
3217 unsigned ix;
3218
3219 if (blocks > EH_ALLOC_SIZE / EH_ALLOC_ALIGN)
3220 __terminate ();
3221 blocks += blocks == 1;
3222 our_mask = (1 << blocks) - 1;
3223
3224 for (ix = EH_ALLOC_SIZE / EH_ALLOC_ALIGN - blocks; ix; ix--)
3225 if (! ((real_mask >> ix) & our_mask))
3226 {
3227 /* found some space */
3228 p = &eh->alloc_buffer[ix * EH_ALLOC_ALIGN];
3229 eh->alloc_mask |= (our_mask >> 1) << ix;
3230 return p;
3231 }
3232 __terminate ();
3233 }
3234 return p;
3235 }
3236
3237 /* Free the memory for an cp_eh_info and associated exception, given
3238 a pointer to the cp_eh_info. */
3239 void
3240 __eh_free (void *p)
3241 {
3242 struct eh_context *eh = __get_eh_context ();
3243
3244 ptrdiff_t diff = (char *)p - &eh->alloc_buffer[0];
3245 if (diff >= 0 && diff < EH_ALLOC_SIZE)
3246 {
3247 unsigned mask = eh->alloc_mask;
3248 unsigned bit = 1 << (diff / EH_ALLOC_ALIGN);
3249
3250 do
3251 {
3252 mask ^= bit;
3253 bit <<= 1;
3254 }
3255 while (mask & bit);
3256 eh->alloc_mask = mask;
3257 }
3258 else
3259 free (p);
3260 }
3261 \f
3262 /* Support routines for setjmp/longjmp exception handling. */
3263
3264 /* Calls to __sjthrow are generated by the compiler when an exception
3265 is raised when using the setjmp/longjmp exception handling codegen
3266 method. */
3267
3268 #ifdef DONT_USE_BUILTIN_SETJMP
3269 extern void longjmp (void *, int);
3270 #endif
3271
3272 /* Routine to get the head of the current thread's dynamic handler chain
3273 use for exception handling. */
3274
3275 void ***
3276 __get_dynamic_handler_chain (void)
3277 {
3278 struct eh_context *eh = (*get_eh_context) ();
3279 return &eh->dynamic_handler_chain;
3280 }
3281
3282 /* This is used to throw an exception when the setjmp/longjmp codegen
3283 method is used for exception handling.
3284
3285 We call __terminate if there are no handlers left. Otherwise we run the
3286 cleanup actions off the dynamic cleanup stack, and pop the top of the
3287 dynamic handler chain, and use longjmp to transfer back to the associated
3288 handler. */
3289
3290 void
3291 __sjthrow (void)
3292 {
3293 struct eh_context *eh = (*get_eh_context) ();
3294 void ***dhc = &eh->dynamic_handler_chain;
3295 void *jmpbuf;
3296 void (*func)(void *, int);
3297 void *arg;
3298 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3299 void ***cleanup = (void***)&(*dhc)[1];
3300
3301 /* If there are any cleanups in the chain, run them now. */
3302 if (cleanup[0])
3303 {
3304 double store[200];
3305 void **buf = (void**)store;
3306 buf[1] = 0;
3307 buf[0] = (*dhc);
3308
3309 /* try { */
3310 #ifdef DONT_USE_BUILTIN_SETJMP
3311 if (! setjmp (&buf[2]))
3312 #else
3313 if (! __builtin_setjmp (&buf[2]))
3314 #endif
3315 {
3316 *dhc = buf;
3317 while (cleanup[0])
3318 {
3319 func = (void(*)(void*, int))cleanup[0][1];
3320 arg = (void*)cleanup[0][2];
3321
3322 /* Update this before running the cleanup. */
3323 cleanup[0] = (void **)cleanup[0][0];
3324
3325 (*func)(arg, 2);
3326 }
3327 *dhc = buf[0];
3328 }
3329 /* catch (...) */
3330 else
3331 {
3332 __terminate ();
3333 }
3334 }
3335
3336 /* We must call terminate if we try and rethrow an exception, when
3337 there is no exception currently active and when there are no
3338 handlers left. */
3339 if (! eh->info || (*dhc)[0] == 0)
3340 __terminate ();
3341
3342 /* Find the jmpbuf associated with the top element of the dynamic
3343 handler chain. The jumpbuf starts two words into the buffer. */
3344 jmpbuf = &(*dhc)[2];
3345
3346 /* Then we pop the top element off the dynamic handler chain. */
3347 *dhc = (void**)(*dhc)[0];
3348
3349 /* And then we jump to the handler. */
3350
3351 #ifdef DONT_USE_BUILTIN_SETJMP
3352 longjmp (jmpbuf, 1);
3353 #else
3354 __builtin_longjmp (jmpbuf, 1);
3355 #endif
3356 }
3357
3358 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3359 handler, then pop the handler off the dynamic handler stack, and
3360 then throw. This is used to skip the first handler, and transfer
3361 control to the next handler in the dynamic handler stack. */
3362
3363 void
3364 __sjpopnthrow (void)
3365 {
3366 struct eh_context *eh = (*get_eh_context) ();
3367 void ***dhc = &eh->dynamic_handler_chain;
3368 void (*func)(void *, int);
3369 void *arg;
3370 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3371 void ***cleanup = (void***)&(*dhc)[1];
3372
3373 /* If there are any cleanups in the chain, run them now. */
3374 if (cleanup[0])
3375 {
3376 double store[200];
3377 void **buf = (void**)store;
3378 buf[1] = 0;
3379 buf[0] = (*dhc);
3380
3381 /* try { */
3382 #ifdef DONT_USE_BUILTIN_SETJMP
3383 if (! setjmp (&buf[2]))
3384 #else
3385 if (! __builtin_setjmp (&buf[2]))
3386 #endif
3387 {
3388 *dhc = buf;
3389 while (cleanup[0])
3390 {
3391 func = (void(*)(void*, int))cleanup[0][1];
3392 arg = (void*)cleanup[0][2];
3393
3394 /* Update this before running the cleanup. */
3395 cleanup[0] = (void **)cleanup[0][0];
3396
3397 (*func)(arg, 2);
3398 }
3399 *dhc = buf[0];
3400 }
3401 /* catch (...) */
3402 else
3403 {
3404 __terminate ();
3405 }
3406 }
3407
3408 /* Then we pop the top element off the dynamic handler chain. */
3409 *dhc = (void**)(*dhc)[0];
3410
3411 __sjthrow ();
3412 }
3413 \f
3414 /* Support code for all exception region-based exception handling. */
3415
3416 int
3417 __eh_rtime_match (void *rtime)
3418 {
3419 void *info;
3420 __eh_matcher matcher;
3421 void *ret;
3422
3423 info = *(__get_eh_info ());
3424 matcher = ((__eh_info *)info)->match_function;
3425 if (! matcher)
3426 {
3427 #ifndef inhibit_libc
3428 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3429 #endif
3430 return 0;
3431 }
3432 ret = (*matcher) (info, rtime, (void *)0);
3433 return (ret != NULL);
3434 }
3435
3436 /* This value identifies the place from which an exception is being
3437 thrown. */
3438
3439 #ifdef EH_TABLE_LOOKUP
3440
3441 EH_TABLE_LOOKUP
3442
3443 #else
3444
3445 #ifdef DWARF2_UNWIND_INFO
3446
3447 /* Return the table version of an exception descriptor */
3448
3449 short
3450 __get_eh_table_version (exception_descriptor *table)
3451 {
3452 return table->lang.version;
3453 }
3454
3455 /* Return the originating table language of an exception descriptor */
3456
3457 short
3458 __get_eh_table_language (exception_descriptor *table)
3459 {
3460 return table->lang.language;
3461 }
3462
3463 /* This routine takes a PC and a pointer to the exception region TABLE for
3464 its translation unit, and returns the address of the exception handler
3465 associated with the closest exception table handler entry associated
3466 with that PC, or 0 if there are no table entries the PC fits in.
3467
3468 In the advent of a tie, we have to give the last entry, as it represents
3469 an inner block. */
3470
3471 static void *
3472 old_find_exception_handler (void *pc, old_exception_table *table)
3473 {
3474 if (table)
3475 {
3476 int pos;
3477 int best = -1;
3478
3479 /* We can't do a binary search because the table isn't guaranteed
3480 to be sorted from function to function. */
3481 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3482 {
3483 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3484 {
3485 /* This can apply. Make sure it is at least as small as
3486 the previous best. */
3487 if (best == -1 || (table[pos].end_region <= table[best].end_region
3488 && table[pos].start_region >= table[best].start_region))
3489 best = pos;
3490 }
3491 /* But it is sorted by starting PC within a function. */
3492 else if (best >= 0 && table[pos].start_region > pc)
3493 break;
3494 }
3495 if (best != -1)
3496 return table[best].exception_handler;
3497 }
3498
3499 return (void *) 0;
3500 }
3501
3502 /* find_exception_handler finds the correct handler, if there is one, to
3503 handle an exception.
3504 returns a pointer to the handler which controlled should be transferred
3505 to, or NULL if there is nothing left.
3506 Parameters:
3507 PC - pc where the exception originates. If this is a rethrow,
3508 then this starts out as a pointer to the exception table
3509 entry we wish to rethrow out of.
3510 TABLE - exception table for the current module.
3511 EH_INFO - eh info pointer for this exception.
3512 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3513 CLEANUP - returned flag indicating whether this is a cleanup handler.
3514 */
3515 static void *
3516 find_exception_handler (void *pc, exception_descriptor *table,
3517 __eh_info *eh_info, int rethrow, int *cleanup)
3518 {
3519
3520 void *retval = NULL;
3521 *cleanup = 1;
3522 if (table)
3523 {
3524 int pos = 0;
3525 /* The new model assumed the table is sorted inner-most out so the
3526 first region we find which matches is the correct one */
3527
3528 exception_table *tab = &(table->table[0]);
3529
3530 /* Subtract 1 from the PC to avoid hitting the next region */
3531 if (rethrow)
3532 {
3533 /* pc is actually the region table entry to rethrow out of */
3534 pos = ((exception_table *) pc) - tab;
3535 pc = ((exception_table *) pc)->end_region - 1;
3536
3537 /* The label is always on the LAST handler entry for a region,
3538 so we know the next entry is a different region, even if the
3539 addresses are the same. Make sure its not end of table tho. */
3540 if (tab[pos].start_region != (void *) -1)
3541 pos++;
3542 }
3543 else
3544 pc--;
3545
3546 /* We can't do a binary search because the table is in inner-most
3547 to outermost address ranges within functions */
3548 for ( ; tab[pos].start_region != (void *) -1; pos++)
3549 {
3550 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3551 {
3552 if (tab[pos].match_info)
3553 {
3554 __eh_matcher matcher = eh_info->match_function;
3555 /* match info but no matcher is NOT a match */
3556 if (matcher)
3557 {
3558 void *ret = (*matcher)((void *) eh_info,
3559 tab[pos].match_info, table);
3560 if (ret)
3561 {
3562 if (retval == NULL)
3563 retval = tab[pos].exception_handler;
3564 *cleanup = 0;
3565 break;
3566 }
3567 }
3568 }
3569 else
3570 {
3571 if (retval == NULL)
3572 retval = tab[pos].exception_handler;
3573 }
3574 }
3575 }
3576 }
3577 return retval;
3578 }
3579 #endif /* DWARF2_UNWIND_INFO */
3580 #endif /* EH_TABLE_LOOKUP */
3581 \f
3582 #ifdef DWARF2_UNWIND_INFO
3583 /* Support code for exception handling using static unwind information. */
3584
3585 #include "frame.h"
3586
3587 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3588 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3589 avoid a warning about casting between int and pointer of different
3590 sizes. */
3591
3592 typedef int ptr_type __attribute__ ((mode (pointer)));
3593
3594 #ifdef INCOMING_REGNO
3595 /* Is the saved value for register REG in frame UDATA stored in a register
3596 window in the previous frame? */
3597
3598 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3599 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3600 compiled functions won't work with the frame-unwind stuff here.
3601 Perhaps the entireity of in_reg_window should be conditional on having
3602 seen a DW_CFA_GNU_window_save? */
3603 #define target_flags 0
3604
3605 static int
3606 in_reg_window (int reg, frame_state *udata)
3607 {
3608 if (udata->saved[reg] == REG_SAVED_REG)
3609 return INCOMING_REGNO (reg) == reg;
3610 if (udata->saved[reg] != REG_SAVED_OFFSET)
3611 return 0;
3612
3613 #ifdef STACK_GROWS_DOWNWARD
3614 return udata->reg_or_offset[reg] > 0;
3615 #else
3616 return udata->reg_or_offset[reg] < 0;
3617 #endif
3618 }
3619 #else
3620 static inline int
3621 in_reg_window (int reg __attribute__ ((__unused__)),
3622 frame_state *udata __attribute__ ((__unused__)))
3623 {
3624 return 0;
3625 }
3626 #endif /* INCOMING_REGNO */
3627
3628 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3629 frame called by UDATA or 0. */
3630
3631 static word_type *
3632 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3633 {
3634 while (udata->saved[reg] == REG_SAVED_REG)
3635 {
3636 reg = udata->reg_or_offset[reg];
3637 if (in_reg_window (reg, udata))
3638 {
3639 udata = sub_udata;
3640 sub_udata = NULL;
3641 }
3642 }
3643 if (udata->saved[reg] == REG_SAVED_OFFSET)
3644 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3645 else
3646 abort ();
3647 }
3648
3649 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3650 frame called by UDATA or 0. */
3651
3652 static inline void *
3653 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3654 {
3655 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3656 }
3657
3658 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3659
3660 static inline void
3661 put_reg (unsigned reg, void *val, frame_state *udata)
3662 {
3663 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3664 }
3665
3666 /* Copy the saved value for register REG from frame UDATA to frame
3667 TARGET_UDATA. Unlike the previous two functions, this can handle
3668 registers that are not one word large. */
3669
3670 static void
3671 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3672 {
3673 word_type *preg = get_reg_addr (reg, udata, NULL);
3674 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3675
3676 memcpy (ptreg, preg, dwarf_reg_size_table [reg]);
3677 }
3678
3679 /* Retrieve the return address for frame UDATA. */
3680
3681 static inline void *
3682 get_return_addr (frame_state *udata, frame_state *sub_udata)
3683 {
3684 return __builtin_extract_return_addr
3685 (get_reg (udata->retaddr_column, udata, sub_udata));
3686 }
3687
3688 /* Overwrite the return address for frame UDATA with VAL. */
3689
3690 static inline void
3691 put_return_addr (void *val, frame_state *udata)
3692 {
3693 val = __builtin_frob_return_addr (val);
3694 put_reg (udata->retaddr_column, val, udata);
3695 }
3696
3697 /* Given the current frame UDATA and its return address PC, return the
3698 information about the calling frame in CALLER_UDATA. */
3699
3700 static void *
3701 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3702 {
3703 caller_udata = __frame_state_for (pc, caller_udata);
3704 if (! caller_udata)
3705 return 0;
3706
3707 /* Now go back to our caller's stack frame. If our caller's CFA register
3708 was saved in our stack frame, restore it; otherwise, assume the CFA
3709 register is SP and restore it to our CFA value. */
3710 if (udata->saved[caller_udata->cfa_reg])
3711 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3712 else
3713 caller_udata->cfa = udata->cfa;
3714 if (caller_udata->indirect)
3715 caller_udata->cfa = * (void **) ((unsigned char *)caller_udata->cfa
3716 + caller_udata->base_offset);
3717 caller_udata->cfa += caller_udata->cfa_offset;
3718
3719 return caller_udata;
3720 }
3721
3722 /* Hook to call before __terminate if only cleanup handlers remain. */
3723 void
3724 __unwinding_cleanup (void)
3725 {
3726 }
3727
3728 /* throw_helper performs some of the common grunt work for a throw. This
3729 routine is called by throw and rethrows. This is pretty much split
3730 out from the old __throw routine. An addition has been added which allows
3731 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3732 but cleanups remaining. This allows a debugger to examine the state
3733 at which the throw was executed, before any cleanups, rather than
3734 at the terminate point after the stack has been unwound.
3735
3736 EH is the current eh_context structure.
3737 PC is the address of the call to __throw.
3738 MY_UDATA is the unwind information for __throw.
3739 OFFSET_P is where we return the SP adjustment offset. */
3740
3741 static void *
3742 throw_helper (struct eh_context *eh, void *pc, frame_state *my_udata,
3743 long *offset_p)
3744 {
3745 frame_state ustruct2, *udata = &ustruct2;
3746 frame_state ustruct;
3747 frame_state *sub_udata = &ustruct;
3748 void *saved_pc = pc;
3749 void *handler;
3750 void *handler_p = 0;
3751 void *pc_p = 0;
3752 frame_state saved_ustruct;
3753 int new_eh_model;
3754 int cleanup = 0;
3755 int only_cleanup = 0;
3756 int rethrow = 0;
3757 int saved_state = 0;
3758 long args_size;
3759 __eh_info *eh_info = (__eh_info *)eh->info;
3760
3761 /* Do we find a handler based on a re-throw PC? */
3762 if (eh->table_index != (void *) 0)
3763 rethrow = 1;
3764
3765 memcpy (udata, my_udata, sizeof (*udata));
3766
3767 handler = (void *) 0;
3768 for (;;)
3769 {
3770 frame_state *p = udata;
3771 udata = next_stack_level (pc, udata, sub_udata);
3772 sub_udata = p;
3773
3774 /* If we couldn't find the next frame, we lose. */
3775 if (! udata)
3776 break;
3777
3778 if (udata->eh_ptr == NULL)
3779 new_eh_model = 0;
3780 else
3781 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3782 runtime_id_field == NEW_EH_RUNTIME);
3783
3784 if (rethrow)
3785 {
3786 rethrow = 0;
3787 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3788 eh_info, 1, &cleanup);
3789 eh->table_index = (void *)0;
3790 }
3791 else
3792 if (new_eh_model)
3793 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3794 0, &cleanup);
3795 else
3796 handler = old_find_exception_handler (pc, udata->eh_ptr);
3797
3798 /* If we found one, we can stop searching, if its not a cleanup.
3799 for cleanups, we save the state, and keep looking. This allows
3800 us to call a debug hook if there are nothing but cleanups left. */
3801 if (handler)
3802 {
3803 if (cleanup)
3804 {
3805 if (!saved_state)
3806 {
3807 saved_ustruct = *udata;
3808 handler_p = handler;
3809 pc_p = pc;
3810 saved_state = 1;
3811 only_cleanup = 1;
3812 }
3813 }
3814 else
3815 {
3816 only_cleanup = 0;
3817 break;
3818 }
3819 }
3820
3821 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3822 hitting the beginning of the next region. */
3823 pc = get_return_addr (udata, sub_udata) - 1;
3824 }
3825
3826 if (saved_state)
3827 {
3828 udata = &saved_ustruct;
3829 handler = handler_p;
3830 pc = pc_p;
3831 if (only_cleanup)
3832 __unwinding_cleanup ();
3833 }
3834
3835 /* If we haven't found a handler by now, this is an unhandled
3836 exception. */
3837 if (! handler)
3838 __terminate();
3839
3840 eh->handler_label = handler;
3841
3842 args_size = udata->args_size;
3843
3844 if (pc == saved_pc)
3845 /* We found a handler in the throw context, no need to unwind. */
3846 udata = my_udata;
3847 else
3848 {
3849 int i;
3850
3851 /* Unwind all the frames between this one and the handler by copying
3852 their saved register values into our register save slots. */
3853
3854 /* Remember the PC where we found the handler. */
3855 void *handler_pc = pc;
3856
3857 /* Start from the throw context again. */
3858 pc = saved_pc;
3859 memcpy (udata, my_udata, sizeof (*udata));
3860
3861 while (pc != handler_pc)
3862 {
3863 frame_state *p = udata;
3864 udata = next_stack_level (pc, udata, sub_udata);
3865 sub_udata = p;
3866
3867 for (i = 0; i < DWARF_FRAME_REGISTERS; ++i)
3868 if (i != udata->retaddr_column && udata->saved[i])
3869 {
3870 /* If you modify the saved value of the return address
3871 register on the SPARC, you modify the return address for
3872 your caller's frame. Don't do that here, as it will
3873 confuse get_return_addr. */
3874 if (in_reg_window (i, udata)
3875 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
3876 && udata->reg_or_offset[udata->retaddr_column] == i)
3877 continue;
3878 copy_reg (i, udata, my_udata);
3879 }
3880
3881 pc = get_return_addr (udata, sub_udata) - 1;
3882 }
3883
3884 /* But we do need to update the saved return address register from
3885 the last frame we unwind, or the handler frame will have the wrong
3886 return address. */
3887 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
3888 {
3889 i = udata->reg_or_offset[udata->retaddr_column];
3890 if (in_reg_window (i, udata))
3891 copy_reg (i, udata, my_udata);
3892 }
3893 }
3894 /* udata now refers to the frame called by the handler frame. */
3895
3896 /* We adjust SP by the difference between __throw's CFA and the CFA for
3897 the frame called by the handler frame, because those CFAs correspond
3898 to the SP values at the two call sites. We need to further adjust by
3899 the args_size of the handler frame itself to get the handler frame's
3900 SP from before the args were pushed for that call. */
3901 #ifdef STACK_GROWS_DOWNWARD
3902 *offset_p = udata->cfa - my_udata->cfa + args_size;
3903 #else
3904 *offset_p = my_udata->cfa - udata->cfa - args_size;
3905 #endif
3906
3907 return handler;
3908 }
3909
3910
3911 /* We first search for an exception handler, and if we don't find
3912 it, we call __terminate on the current stack frame so that we may
3913 use the debugger to walk the stack and understand why no handler
3914 was found.
3915
3916 If we find one, then we unwind the frames down to the one that
3917 has the handler and transfer control into the handler. */
3918
3919 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3920
3921 void
3922 __throw (void)
3923 {
3924 struct eh_context *eh = (*get_eh_context) ();
3925 void *pc, *handler;
3926 long offset;
3927
3928 /* XXX maybe make my_ustruct static so we don't have to look it up for
3929 each throw. */
3930 frame_state my_ustruct, *my_udata = &my_ustruct;
3931
3932 /* This is required for C++ semantics. We must call terminate if we
3933 try and rethrow an exception, when there is no exception currently
3934 active. */
3935 if (! eh->info)
3936 __terminate ();
3937
3938 /* Start at our stack frame. */
3939 label:
3940 my_udata = __frame_state_for (&&label, my_udata);
3941 if (! my_udata)
3942 __terminate ();
3943
3944 /* We need to get the value from the CFA register. */
3945 my_udata->cfa = __builtin_dwarf_cfa ();
3946
3947 /* Do any necessary initialization to access arbitrary stack frames.
3948 On the SPARC, this means flushing the register windows. */
3949 __builtin_unwind_init ();
3950
3951 /* Now reset pc to the right throw point. */
3952 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3953
3954 handler = throw_helper (eh, pc, my_udata, &offset);
3955
3956 /* Now go! */
3957
3958 __builtin_eh_return ((void *)eh, offset, handler);
3959
3960 /* Epilogue: restore the handler frame's register values and return
3961 to the stub. */
3962 }
3963
3964 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3965
3966 void
3967 __rethrow (void *index)
3968 {
3969 struct eh_context *eh = (*get_eh_context) ();
3970 void *pc, *handler;
3971 long offset;
3972
3973 /* XXX maybe make my_ustruct static so we don't have to look it up for
3974 each throw. */
3975 frame_state my_ustruct, *my_udata = &my_ustruct;
3976
3977 /* This is required for C++ semantics. We must call terminate if we
3978 try and rethrow an exception, when there is no exception currently
3979 active. */
3980 if (! eh->info)
3981 __terminate ();
3982
3983 /* This is the table index we want to rethrow from. The value of
3984 the END_REGION label is used for the PC of the throw, and the
3985 search begins with the next table entry. */
3986 eh->table_index = index;
3987
3988 /* Start at our stack frame. */
3989 label:
3990 my_udata = __frame_state_for (&&label, my_udata);
3991 if (! my_udata)
3992 __terminate ();
3993
3994 /* We need to get the value from the CFA register. */
3995 my_udata->cfa = __builtin_dwarf_cfa ();
3996
3997 /* Do any necessary initialization to access arbitrary stack frames.
3998 On the SPARC, this means flushing the register windows. */
3999 __builtin_unwind_init ();
4000
4001 /* Now reset pc to the right throw point. */
4002 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4003
4004 handler = throw_helper (eh, pc, my_udata, &offset);
4005
4006 /* Now go! */
4007
4008 __builtin_eh_return ((void *)eh, offset, handler);
4009
4010 /* Epilogue: restore the handler frame's register values and return
4011 to the stub. */
4012 }
4013 #endif /* DWARF2_UNWIND_INFO */
4014
4015 #ifdef IA64_UNWIND_INFO
4016 #include "frame.h"
4017
4018 /* Return handler to which we want to transfer control, NULL if we don't
4019 intend to handle this exception here. */
4020 void *
4021 __ia64_personality_v1 (void *pc, old_exception_table *table)
4022 {
4023 if (table)
4024 {
4025 int pos;
4026 int best = -1;
4027
4028 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
4029 {
4030 if (table[pos].start_region <= pc && table[pos].end_region > pc)
4031 {
4032 /* This can apply. Make sure it is at least as small as
4033 the previous best. */
4034 if (best == -1 || (table[pos].end_region <= table[best].end_region
4035 && table[pos].start_region >= table[best].start_region))
4036 best = pos;
4037 }
4038 /* It is sorted by starting PC within a function. */
4039 else if (best >= 0 && table[pos].start_region > pc)
4040 break;
4041 }
4042 if (best != -1)
4043 return table[best].exception_handler;
4044 }
4045 return (void *) 0;
4046 }
4047
4048 static void
4049 ia64_throw_helper (ia64_frame_state *throw_frame, ia64_frame_state *caller,
4050 void *throw_bsp, void *throw_sp)
4051 {
4052 void *throw_pc = __builtin_return_address (0);
4053 unwind_info_ptr *info;
4054 void *pc, *handler = NULL;
4055 void *pc_base;
4056 int frame_count;
4057 void *bsp;
4058
4059 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4060
4061 /* Start at our stack frame, get our state. */
4062 __build_ia64_frame_state (throw_pc, throw_frame, throw_bsp, throw_sp,
4063 &pc_base);
4064
4065 /* Now we have to find the proper frame for pc, and see if there
4066 is a handler for it. if not, we keep going back frames until
4067 we do find one. Otherwise we call uncaught (). */
4068
4069 frame_count = 0;
4070 memcpy (caller, throw_frame, sizeof (*caller));
4071 while (!handler)
4072 {
4073 void *(*personality) ();
4074 void *eh_table;
4075
4076 frame_count++;
4077 /* We only care about the RP right now, so we dont need to keep
4078 any other information about a call frame right now. */
4079 pc = __get_real_reg_value (&caller->rp) - 1;
4080 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4081 caller->my_bsp);
4082 info = __build_ia64_frame_state (pc, caller, bsp, caller->my_psp,
4083 &pc_base);
4084
4085 /* If we couldn't find the next frame, we lose. */
4086 if (! info)
4087 break;
4088
4089 personality = __get_personality (info);
4090 /* TODO Haven't figured out how to actually load the personality address
4091 yet, so just always default to the one we expect for now. */
4092 if (personality != 0)
4093 personality = __ia64_personality_v1;
4094 eh_table = __get_except_table (info);
4095 /* If there is no personality routine, we'll keep unwinding. */
4096 if (personality)
4097 /* Pass a segment relative PC address to the personality routine,
4098 because the unwind_info section uses segrel relocs. */
4099 handler = personality (pc - pc_base, eh_table);
4100 }
4101
4102 if (!handler)
4103 __terminate ();
4104
4105 /* Handler is a segment relative address, so we must adjust it here. */
4106 handler += (long) pc_base;
4107
4108 /* If we found a handler, we need to unwind the stack to that point.
4109 We do this by copying saved values from previous frames into the
4110 save slot for the throw_frame saved slots. when __throw returns,
4111 it'll pickup the correct values. */
4112
4113 /* Start with where __throw saved things, and copy each saved register
4114 of each previous frame until we get to the one before we're
4115 throwing back to. */
4116 memcpy (caller, throw_frame, sizeof (*caller));
4117 for ( ; frame_count > 0; frame_count--)
4118 {
4119 pc = __get_real_reg_value (&caller->rp) - 1;
4120 bsp = __calc_caller_bsp ((long)__get_real_reg_value (&caller->pfs),
4121 caller->my_bsp);
4122 __build_ia64_frame_state (pc, caller, bsp, caller->my_psp, &pc_base);
4123 /* Any regs that were saved can be put in the throw frame now. */
4124 /* We don't want to copy any saved register from the
4125 target destination, but we do want to load up it's frame. */
4126 if (frame_count > 1)
4127 __copy_saved_reg_state (throw_frame, caller);
4128 }
4129
4130 /* Set return address of the throw frame to the handler. */
4131 __set_real_reg_value (&throw_frame->rp, handler);
4132
4133 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4134 /* DO we need to go through the whole loadrs seqeunce? */
4135 }
4136
4137
4138 void
4139 __throw ()
4140 {
4141 register void *stack_pointer __asm__("r12");
4142 struct eh_context *eh = (*get_eh_context) ();
4143 ia64_frame_state my_frame;
4144 ia64_frame_state originator; /* For the context handler is in. */
4145 void *bsp, *tmp_bsp;
4146 long offset;
4147
4148 /* This is required for C++ semantics. We must call terminate if we
4149 try and rethrow an exception, when there is no exception currently
4150 active. */
4151 if (! eh->info)
4152 __terminate ();
4153
4154 __builtin_unwind_init ();
4155
4156 /* We have to call another routine to actually process the frame
4157 information, which will force all of __throw's local registers into
4158 backing store. */
4159
4160 /* Get the value of ar.bsp while we're here. */
4161
4162 bsp = __builtin_ia64_bsp ();
4163 ia64_throw_helper (&my_frame, &originator, bsp, stack_pointer);
4164
4165 /* Now we have to fudge the bsp by the amount in our (__throw)
4166 frame marker, since the return is going to adjust it by that much. */
4167
4168 tmp_bsp = __calc_caller_bsp ((long)__get_real_reg_value (&my_frame.pfs),
4169 my_frame.my_bsp);
4170 offset = (char *)my_frame.my_bsp - (char *)tmp_bsp;
4171 tmp_bsp = (char *)originator.my_bsp + offset;
4172
4173 __builtin_eh_return (tmp_bsp, offset, originator.my_sp);
4174
4175 /* The return address was already set by throw_helper. */
4176 }
4177
4178 #endif /* IA64_UNWIND_INFO */
4179
4180 #endif /* L_eh */
4181 \f
4182 #ifdef L_pure
4183 #ifndef inhibit_libc
4184 /* This gets us __GNU_LIBRARY__. */
4185 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
4186 #include <stdio.h>
4187
4188 #ifdef __GNU_LIBRARY__
4189 /* Avoid forcing the library's meaning of `write' on the user program
4190 by using the "internal" name (for use within the library) */
4191 #define write(fd, buf, n) __write((fd), (buf), (n))
4192 #endif
4193 #endif /* inhibit_libc */
4194
4195 #define MESSAGE "pure virtual method called\n"
4196
4197 void
4198 __pure_virtual (void)
4199 {
4200 #ifndef inhibit_libc
4201 write (2, MESSAGE, sizeof (MESSAGE) - 1);
4202 #endif
4203 __terminate ();
4204 }
4205 #endif