]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/libgcc2.c
Merge in gcc2-ss-010999
[thirdparty/gcc.git] / gcc / libgcc2.c
1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License. */
28
29 /* It is incorrect to include config.h here, because this file is being
30 compiled for the target, and hence definitions concerning only the host
31 do not apply. */
32
33 #include "tconfig.h"
34
35 /* We disable this when inhibit_libc, so that gcc can still be built without
36 needing header files first. */
37 /* ??? This is not a good solution, since prototypes may be required in
38 some cases for correct code. See also frame.c. */
39 #ifndef inhibit_libc
40 /* fixproto guarantees these system headers exist. */
41 #include <stdlib.h>
42 #include <unistd.h>
43 #endif
44
45 #include "machmode.h"
46 #include "defaults.h"
47 #ifndef L_trampoline
48 #include <stddef.h>
49 #endif
50
51 /* Don't use `fancy_abort' here even if config.h says to use it. */
52 #ifdef abort
53 #undef abort
54 #endif
55
56 #if (SUPPORTS_WEAK == 1) && (defined (ASM_OUTPUT_DEF) || defined (ASM_OUTPUT_WEAK_ALIAS))
57 #define WEAK_ALIAS
58 #endif
59
60 /* In a cross-compilation situation, default to inhibiting compilation
61 of routines that use libc. */
62
63 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
64 #define inhibit_libc
65 #endif
66
67 /* Permit the tm.h file to select the endianness to use just for this
68 file. This is used when the endianness is determined when the
69 compiler is run. */
70
71 #ifndef LIBGCC2_WORDS_BIG_ENDIAN
72 #define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
73 #endif
74
75 #ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
76 #define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
77 #endif
78
79 /* In the first part of this file, we are interfacing to calls generated
80 by the compiler itself. These calls pass values into these routines
81 which have very specific modes (rather than very specific types), and
82 these compiler-generated calls also expect any return values to have
83 very specific modes (rather than very specific types). Thus, we need
84 to avoid using regular C language type names in this part of the file
85 because the sizes for those types can be configured to be anything.
86 Instead we use the following special type names. */
87
88 typedef unsigned int UQItype __attribute__ ((mode (QI)));
89 typedef int SItype __attribute__ ((mode (SI)));
90 typedef unsigned int USItype __attribute__ ((mode (SI)));
91 typedef int DItype __attribute__ ((mode (DI)));
92 typedef unsigned int UDItype __attribute__ ((mode (DI)));
93
94 typedef float SFtype __attribute__ ((mode (SF)));
95 typedef float DFtype __attribute__ ((mode (DF)));
96
97 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
98 typedef float XFtype __attribute__ ((mode (XF)));
99 #endif
100 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
101 typedef float TFtype __attribute__ ((mode (TF)));
102 #endif
103
104 typedef int word_type __attribute__ ((mode (__word__)));
105
106 /* Make sure that we don't accidentally use any normal C language built-in
107 type names in the first part of this file. Instead we want to use *only*
108 the type names defined above. The following macro definitions insure
109 that if we *do* accidentally use some normal C language built-in type name,
110 we will get a syntax error. */
111
112 #define char bogus_type
113 #define short bogus_type
114 #define int bogus_type
115 #define long bogus_type
116 #define unsigned bogus_type
117 #define float bogus_type
118 #define double bogus_type
119
120 #define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
121
122 /* DIstructs are pairs of SItype values in the order determined by
123 LIBGCC2_WORDS_BIG_ENDIAN. */
124
125 #if LIBGCC2_WORDS_BIG_ENDIAN
126 struct DIstruct {SItype high, low;};
127 #else
128 struct DIstruct {SItype low, high;};
129 #endif
130
131 /* We need this union to unpack/pack DImode values, since we don't have
132 any arithmetic yet. Incoming DImode parameters are stored into the
133 `ll' field, and the unpacked result is read from the struct `s'. */
134
135 typedef union
136 {
137 struct DIstruct s;
138 DItype ll;
139 } DIunion;
140
141 #if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
142 || defined (L_divdi3) || defined (L_udivdi3) \
143 || defined (L_moddi3) || defined (L_umoddi3))
144
145 #include "longlong.h"
146
147 #endif /* udiv or mul */
148
149 extern DItype __fixunssfdi (SFtype a);
150 extern DItype __fixunsdfdi (DFtype a);
151 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
152 extern DItype __fixunsxfdi (XFtype a);
153 #endif
154 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
155 extern DItype __fixunstfdi (TFtype a);
156 #endif
157 \f
158 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
159 #if defined (L_divdi3) || defined (L_moddi3)
160 static inline
161 #endif
162 DItype
163 __negdi2 (DItype u)
164 {
165 DIunion w;
166 DIunion uu;
167
168 uu.ll = u;
169
170 w.s.low = -uu.s.low;
171 w.s.high = -uu.s.high - ((USItype) w.s.low > 0);
172
173 return w.ll;
174 }
175 #endif
176 \f
177 /* Unless shift functions are defined whith full ANSI prototypes,
178 parameter b will be promoted to int if word_type is smaller than an int. */
179 #ifdef L_lshrdi3
180 DItype
181 __lshrdi3 (DItype u, word_type b)
182 {
183 DIunion w;
184 word_type bm;
185 DIunion uu;
186
187 if (b == 0)
188 return u;
189
190 uu.ll = u;
191
192 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
193 if (bm <= 0)
194 {
195 w.s.high = 0;
196 w.s.low = (USItype)uu.s.high >> -bm;
197 }
198 else
199 {
200 USItype carries = (USItype)uu.s.high << bm;
201 w.s.high = (USItype)uu.s.high >> b;
202 w.s.low = ((USItype)uu.s.low >> b) | carries;
203 }
204
205 return w.ll;
206 }
207 #endif
208
209 #ifdef L_ashldi3
210 DItype
211 __ashldi3 (DItype u, word_type b)
212 {
213 DIunion w;
214 word_type bm;
215 DIunion uu;
216
217 if (b == 0)
218 return u;
219
220 uu.ll = u;
221
222 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
223 if (bm <= 0)
224 {
225 w.s.low = 0;
226 w.s.high = (USItype)uu.s.low << -bm;
227 }
228 else
229 {
230 USItype carries = (USItype)uu.s.low >> bm;
231 w.s.low = (USItype)uu.s.low << b;
232 w.s.high = ((USItype)uu.s.high << b) | carries;
233 }
234
235 return w.ll;
236 }
237 #endif
238
239 #ifdef L_ashrdi3
240 DItype
241 __ashrdi3 (DItype u, word_type b)
242 {
243 DIunion w;
244 word_type bm;
245 DIunion uu;
246
247 if (b == 0)
248 return u;
249
250 uu.ll = u;
251
252 bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
253 if (bm <= 0)
254 {
255 /* w.s.high = 1..1 or 0..0 */
256 w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
257 w.s.low = uu.s.high >> -bm;
258 }
259 else
260 {
261 USItype carries = (USItype)uu.s.high << bm;
262 w.s.high = uu.s.high >> b;
263 w.s.low = ((USItype)uu.s.low >> b) | carries;
264 }
265
266 return w.ll;
267 }
268 #endif
269 \f
270 #ifdef L_ffsdi2
271 DItype
272 __ffsdi2 (DItype u)
273 {
274 DIunion uu, w;
275 uu.ll = u;
276 w.s.high = 0;
277 w.s.low = ffs (uu.s.low);
278 if (w.s.low != 0)
279 return w.ll;
280 w.s.low = ffs (uu.s.high);
281 if (w.s.low != 0)
282 {
283 w.s.low += BITS_PER_UNIT * sizeof (SItype);
284 return w.ll;
285 }
286 return w.ll;
287 }
288 #endif
289 \f
290 #ifdef L_muldi3
291 DItype
292 __muldi3 (DItype u, DItype v)
293 {
294 DIunion w;
295 DIunion uu, vv;
296
297 uu.ll = u,
298 vv.ll = v;
299
300 w.ll = __umulsidi3 (uu.s.low, vv.s.low);
301 w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
302 + (USItype) uu.s.high * (USItype) vv.s.low);
303
304 return w.ll;
305 }
306 #endif
307 \f
308 #ifdef L_udiv_w_sdiv
309 #if defined (sdiv_qrnnd)
310 USItype
311 __udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d)
312 {
313 USItype q, r;
314 USItype c0, c1, b1;
315
316 if ((SItype) d >= 0)
317 {
318 if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1)))
319 {
320 /* dividend, divisor, and quotient are nonnegative */
321 sdiv_qrnnd (q, r, a1, a0, d);
322 }
323 else
324 {
325 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
326 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1));
327 /* Divide (c1*2^32 + c0) by d */
328 sdiv_qrnnd (q, r, c1, c0, d);
329 /* Add 2^31 to quotient */
330 q += (USItype) 1 << (SI_TYPE_SIZE - 1);
331 }
332 }
333 else
334 {
335 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
336 c1 = a1 >> 1; /* A/2 */
337 c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1);
338
339 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
340 {
341 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
342
343 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
344 if ((d & 1) != 0)
345 {
346 if (r >= q)
347 r = r - q;
348 else if (q - r <= d)
349 {
350 r = r - q + d;
351 q--;
352 }
353 else
354 {
355 r = r - q + 2*d;
356 q -= 2;
357 }
358 }
359 }
360 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
361 {
362 c1 = (b1 - 1) - c1;
363 c0 = ~c0; /* logical NOT */
364
365 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
366
367 q = ~q; /* (A/2)/b1 */
368 r = (b1 - 1) - r;
369
370 r = 2*r + (a0 & 1); /* A/(2*b1) */
371
372 if ((d & 1) != 0)
373 {
374 if (r >= q)
375 r = r - q;
376 else if (q - r <= d)
377 {
378 r = r - q + d;
379 q--;
380 }
381 else
382 {
383 r = r - q + 2*d;
384 q -= 2;
385 }
386 }
387 }
388 else /* Implies c1 = b1 */
389 { /* Hence a1 = d - 1 = 2*b1 - 1 */
390 if (a0 >= -d)
391 {
392 q = -1;
393 r = a0 + d;
394 }
395 else
396 {
397 q = -2;
398 r = a0 + 2*d;
399 }
400 }
401 }
402
403 *rp = r;
404 return q;
405 }
406 #else
407 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
408 USItype
409 __udiv_w_sdiv (USItype *rp __attribute__ ((__unused__)),
410 USItype a1 __attribute__ ((__unused__)),
411 USItype a0 __attribute__ ((__unused__)),
412 USItype d __attribute__ ((__unused__)))
413 {
414 return 0;
415 }
416 #endif
417 #endif
418 \f
419 #if (defined (L_udivdi3) || defined (L_divdi3) || \
420 defined (L_umoddi3) || defined (L_moddi3))
421 #define L_udivmoddi4
422 #endif
423
424 #ifdef L_udivmoddi4
425 static const UQItype __clz_tab[] =
426 {
427 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
428 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
429 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
430 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
431 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
432 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
433 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
434 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
435 };
436
437 #if (defined (L_udivdi3) || defined (L_divdi3) || \
438 defined (L_umoddi3) || defined (L_moddi3))
439 static inline
440 #endif
441 UDItype
442 __udivmoddi4 (UDItype n, UDItype d, UDItype *rp)
443 {
444 DIunion ww;
445 DIunion nn, dd;
446 DIunion rr;
447 USItype d0, d1, n0, n1, n2;
448 USItype q0, q1;
449 USItype b, bm;
450
451 nn.ll = n;
452 dd.ll = d;
453
454 d0 = dd.s.low;
455 d1 = dd.s.high;
456 n0 = nn.s.low;
457 n1 = nn.s.high;
458
459 #if !UDIV_NEEDS_NORMALIZATION
460 if (d1 == 0)
461 {
462 if (d0 > n1)
463 {
464 /* 0q = nn / 0D */
465
466 udiv_qrnnd (q0, n0, n1, n0, d0);
467 q1 = 0;
468
469 /* Remainder in n0. */
470 }
471 else
472 {
473 /* qq = NN / 0d */
474
475 if (d0 == 0)
476 d0 = 1 / d0; /* Divide intentionally by zero. */
477
478 udiv_qrnnd (q1, n1, 0, n1, d0);
479 udiv_qrnnd (q0, n0, n1, n0, d0);
480
481 /* Remainder in n0. */
482 }
483
484 if (rp != 0)
485 {
486 rr.s.low = n0;
487 rr.s.high = 0;
488 *rp = rr.ll;
489 }
490 }
491
492 #else /* UDIV_NEEDS_NORMALIZATION */
493
494 if (d1 == 0)
495 {
496 if (d0 > n1)
497 {
498 /* 0q = nn / 0D */
499
500 count_leading_zeros (bm, d0);
501
502 if (bm != 0)
503 {
504 /* Normalize, i.e. make the most significant bit of the
505 denominator set. */
506
507 d0 = d0 << bm;
508 n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
509 n0 = n0 << bm;
510 }
511
512 udiv_qrnnd (q0, n0, n1, n0, d0);
513 q1 = 0;
514
515 /* Remainder in n0 >> bm. */
516 }
517 else
518 {
519 /* qq = NN / 0d */
520
521 if (d0 == 0)
522 d0 = 1 / d0; /* Divide intentionally by zero. */
523
524 count_leading_zeros (bm, d0);
525
526 if (bm == 0)
527 {
528 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
529 conclude (the most significant bit of n1 is set) /\ (the
530 leading quotient digit q1 = 1).
531
532 This special case is necessary, not an optimization.
533 (Shifts counts of SI_TYPE_SIZE are undefined.) */
534
535 n1 -= d0;
536 q1 = 1;
537 }
538 else
539 {
540 /* Normalize. */
541
542 b = SI_TYPE_SIZE - bm;
543
544 d0 = d0 << bm;
545 n2 = n1 >> b;
546 n1 = (n1 << bm) | (n0 >> b);
547 n0 = n0 << bm;
548
549 udiv_qrnnd (q1, n1, n2, n1, d0);
550 }
551
552 /* n1 != d0... */
553
554 udiv_qrnnd (q0, n0, n1, n0, d0);
555
556 /* Remainder in n0 >> bm. */
557 }
558
559 if (rp != 0)
560 {
561 rr.s.low = n0 >> bm;
562 rr.s.high = 0;
563 *rp = rr.ll;
564 }
565 }
566 #endif /* UDIV_NEEDS_NORMALIZATION */
567
568 else
569 {
570 if (d1 > n1)
571 {
572 /* 00 = nn / DD */
573
574 q0 = 0;
575 q1 = 0;
576
577 /* Remainder in n1n0. */
578 if (rp != 0)
579 {
580 rr.s.low = n0;
581 rr.s.high = n1;
582 *rp = rr.ll;
583 }
584 }
585 else
586 {
587 /* 0q = NN / dd */
588
589 count_leading_zeros (bm, d1);
590 if (bm == 0)
591 {
592 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
593 conclude (the most significant bit of n1 is set) /\ (the
594 quotient digit q0 = 0 or 1).
595
596 This special case is necessary, not an optimization. */
597
598 /* The condition on the next line takes advantage of that
599 n1 >= d1 (true due to program flow). */
600 if (n1 > d1 || n0 >= d0)
601 {
602 q0 = 1;
603 sub_ddmmss (n1, n0, n1, n0, d1, d0);
604 }
605 else
606 q0 = 0;
607
608 q1 = 0;
609
610 if (rp != 0)
611 {
612 rr.s.low = n0;
613 rr.s.high = n1;
614 *rp = rr.ll;
615 }
616 }
617 else
618 {
619 USItype m1, m0;
620 /* Normalize. */
621
622 b = SI_TYPE_SIZE - bm;
623
624 d1 = (d1 << bm) | (d0 >> b);
625 d0 = d0 << bm;
626 n2 = n1 >> b;
627 n1 = (n1 << bm) | (n0 >> b);
628 n0 = n0 << bm;
629
630 udiv_qrnnd (q0, n1, n2, n1, d1);
631 umul_ppmm (m1, m0, q0, d0);
632
633 if (m1 > n1 || (m1 == n1 && m0 > n0))
634 {
635 q0--;
636 sub_ddmmss (m1, m0, m1, m0, d1, d0);
637 }
638
639 q1 = 0;
640
641 /* Remainder in (n1n0 - m1m0) >> bm. */
642 if (rp != 0)
643 {
644 sub_ddmmss (n1, n0, n1, n0, m1, m0);
645 rr.s.low = (n1 << b) | (n0 >> bm);
646 rr.s.high = n1 >> bm;
647 *rp = rr.ll;
648 }
649 }
650 }
651 }
652
653 ww.s.low = q0;
654 ww.s.high = q1;
655 return ww.ll;
656 }
657 #endif
658
659 #ifdef L_divdi3
660 UDItype __udivmoddi4 ();
661
662 DItype
663 __divdi3 (DItype u, DItype v)
664 {
665 word_type c = 0;
666 DIunion uu, vv;
667 DItype w;
668
669 uu.ll = u;
670 vv.ll = v;
671
672 if (uu.s.high < 0)
673 c = ~c,
674 uu.ll = __negdi2 (uu.ll);
675 if (vv.s.high < 0)
676 c = ~c,
677 vv.ll = __negdi2 (vv.ll);
678
679 w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0);
680 if (c)
681 w = __negdi2 (w);
682
683 return w;
684 }
685 #endif
686
687 #ifdef L_moddi3
688 UDItype __udivmoddi4 ();
689 DItype
690 __moddi3 (DItype u, DItype v)
691 {
692 word_type c = 0;
693 DIunion uu, vv;
694 DItype w;
695
696 uu.ll = u;
697 vv.ll = v;
698
699 if (uu.s.high < 0)
700 c = ~c,
701 uu.ll = __negdi2 (uu.ll);
702 if (vv.s.high < 0)
703 vv.ll = __negdi2 (vv.ll);
704
705 (void) __udivmoddi4 (uu.ll, vv.ll, &w);
706 if (c)
707 w = __negdi2 (w);
708
709 return w;
710 }
711 #endif
712
713 #ifdef L_umoddi3
714 UDItype __udivmoddi4 ();
715 UDItype
716 __umoddi3 (UDItype u, UDItype v)
717 {
718 UDItype w;
719
720 (void) __udivmoddi4 (u, v, &w);
721
722 return w;
723 }
724 #endif
725
726 #ifdef L_udivdi3
727 UDItype __udivmoddi4 ();
728 UDItype
729 __udivdi3 (UDItype n, UDItype d)
730 {
731 return __udivmoddi4 (n, d, (UDItype *) 0);
732 }
733 #endif
734 \f
735 #ifdef L_cmpdi2
736 word_type
737 __cmpdi2 (DItype a, DItype b)
738 {
739 DIunion au, bu;
740
741 au.ll = a, bu.ll = b;
742
743 if (au.s.high < bu.s.high)
744 return 0;
745 else if (au.s.high > bu.s.high)
746 return 2;
747 if ((USItype) au.s.low < (USItype) bu.s.low)
748 return 0;
749 else if ((USItype) au.s.low > (USItype) bu.s.low)
750 return 2;
751 return 1;
752 }
753 #endif
754
755 #ifdef L_ucmpdi2
756 word_type
757 __ucmpdi2 (DItype a, DItype b)
758 {
759 DIunion au, bu;
760
761 au.ll = a, bu.ll = b;
762
763 if ((USItype) au.s.high < (USItype) bu.s.high)
764 return 0;
765 else if ((USItype) au.s.high > (USItype) bu.s.high)
766 return 2;
767 if ((USItype) au.s.low < (USItype) bu.s.low)
768 return 0;
769 else if ((USItype) au.s.low > (USItype) bu.s.low)
770 return 2;
771 return 1;
772 }
773 #endif
774 \f
775 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
776 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
777 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
778
779 DItype
780 __fixunstfdi (TFtype a)
781 {
782 TFtype b;
783 UDItype v;
784
785 if (a < 0)
786 return 0;
787
788 /* Compute high word of result, as a flonum. */
789 b = (a / HIGH_WORD_COEFF);
790 /* Convert that to fixed (but not to DItype!),
791 and shift it into the high word. */
792 v = (USItype) b;
793 v <<= WORD_SIZE;
794 /* Remove high part from the TFtype, leaving the low part as flonum. */
795 a -= (TFtype)v;
796 /* Convert that to fixed (but not to DItype!) and add it in.
797 Sometimes A comes out negative. This is significant, since
798 A has more bits than a long int does. */
799 if (a < 0)
800 v -= (USItype) (- a);
801 else
802 v += (USItype) a;
803 return v;
804 }
805 #endif
806
807 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
808 DItype
809 __fixtfdi (TFtype a)
810 {
811 if (a < 0)
812 return - __fixunstfdi (-a);
813 return __fixunstfdi (a);
814 }
815 #endif
816
817 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
818 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
819 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
820
821 DItype
822 __fixunsxfdi (XFtype a)
823 {
824 XFtype b;
825 UDItype v;
826
827 if (a < 0)
828 return 0;
829
830 /* Compute high word of result, as a flonum. */
831 b = (a / HIGH_WORD_COEFF);
832 /* Convert that to fixed (but not to DItype!),
833 and shift it into the high word. */
834 v = (USItype) b;
835 v <<= WORD_SIZE;
836 /* Remove high part from the XFtype, leaving the low part as flonum. */
837 a -= (XFtype)v;
838 /* Convert that to fixed (but not to DItype!) and add it in.
839 Sometimes A comes out negative. This is significant, since
840 A has more bits than a long int does. */
841 if (a < 0)
842 v -= (USItype) (- a);
843 else
844 v += (USItype) a;
845 return v;
846 }
847 #endif
848
849 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
850 DItype
851 __fixxfdi (XFtype a)
852 {
853 if (a < 0)
854 return - __fixunsxfdi (-a);
855 return __fixunsxfdi (a);
856 }
857 #endif
858
859 #ifdef L_fixunsdfdi
860 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
861 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
862
863 DItype
864 __fixunsdfdi (DFtype a)
865 {
866 DFtype b;
867 UDItype v;
868
869 if (a < 0)
870 return 0;
871
872 /* Compute high word of result, as a flonum. */
873 b = (a / HIGH_WORD_COEFF);
874 /* Convert that to fixed (but not to DItype!),
875 and shift it into the high word. */
876 v = (USItype) b;
877 v <<= WORD_SIZE;
878 /* Remove high part from the DFtype, leaving the low part as flonum. */
879 a -= (DFtype)v;
880 /* Convert that to fixed (but not to DItype!) and add it in.
881 Sometimes A comes out negative. This is significant, since
882 A has more bits than a long int does. */
883 if (a < 0)
884 v -= (USItype) (- a);
885 else
886 v += (USItype) a;
887 return v;
888 }
889 #endif
890
891 #ifdef L_fixdfdi
892 DItype
893 __fixdfdi (DFtype a)
894 {
895 if (a < 0)
896 return - __fixunsdfdi (-a);
897 return __fixunsdfdi (a);
898 }
899 #endif
900
901 #ifdef L_fixunssfdi
902 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
903 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
904
905 DItype
906 __fixunssfdi (SFtype original_a)
907 {
908 /* Convert the SFtype to a DFtype, because that is surely not going
909 to lose any bits. Some day someone else can write a faster version
910 that avoids converting to DFtype, and verify it really works right. */
911 DFtype a = original_a;
912 DFtype b;
913 UDItype v;
914
915 if (a < 0)
916 return 0;
917
918 /* Compute high word of result, as a flonum. */
919 b = (a / HIGH_WORD_COEFF);
920 /* Convert that to fixed (but not to DItype!),
921 and shift it into the high word. */
922 v = (USItype) b;
923 v <<= WORD_SIZE;
924 /* Remove high part from the DFtype, leaving the low part as flonum. */
925 a -= (DFtype)v;
926 /* Convert that to fixed (but not to DItype!) and add it in.
927 Sometimes A comes out negative. This is significant, since
928 A has more bits than a long int does. */
929 if (a < 0)
930 v -= (USItype) (- a);
931 else
932 v += (USItype) a;
933 return v;
934 }
935 #endif
936
937 #ifdef L_fixsfdi
938 DItype
939 __fixsfdi (SFtype a)
940 {
941 if (a < 0)
942 return - __fixunssfdi (-a);
943 return __fixunssfdi (a);
944 }
945 #endif
946
947 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
948 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
949 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
950 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
951
952 XFtype
953 __floatdixf (DItype u)
954 {
955 XFtype d;
956
957 d = (SItype) (u >> WORD_SIZE);
958 d *= HIGH_HALFWORD_COEFF;
959 d *= HIGH_HALFWORD_COEFF;
960 d += (USItype) (u & (HIGH_WORD_COEFF - 1));
961
962 return d;
963 }
964 #endif
965
966 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
967 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
968 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
969 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
970
971 TFtype
972 __floatditf (DItype u)
973 {
974 TFtype d;
975
976 d = (SItype) (u >> WORD_SIZE);
977 d *= HIGH_HALFWORD_COEFF;
978 d *= HIGH_HALFWORD_COEFF;
979 d += (USItype) (u & (HIGH_WORD_COEFF - 1));
980
981 return d;
982 }
983 #endif
984
985 #ifdef L_floatdidf
986 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
987 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
988 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
989
990 DFtype
991 __floatdidf (DItype u)
992 {
993 DFtype d;
994
995 d = (SItype) (u >> WORD_SIZE);
996 d *= HIGH_HALFWORD_COEFF;
997 d *= HIGH_HALFWORD_COEFF;
998 d += (USItype) (u & (HIGH_WORD_COEFF - 1));
999
1000 return d;
1001 }
1002 #endif
1003
1004 #ifdef L_floatdisf
1005 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
1006 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
1007 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1008 #define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
1009
1010 /* Define codes for all the float formats that we know of. Note
1011 that this is copied from real.h. */
1012
1013 #define UNKNOWN_FLOAT_FORMAT 0
1014 #define IEEE_FLOAT_FORMAT 1
1015 #define VAX_FLOAT_FORMAT 2
1016 #define IBM_FLOAT_FORMAT 3
1017
1018 /* Default to IEEE float if not specified. Nearly all machines use it. */
1019 #ifndef HOST_FLOAT_FORMAT
1020 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1021 #endif
1022
1023 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1024 #define DF_SIZE 53
1025 #define SF_SIZE 24
1026 #endif
1027
1028 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1029 #define DF_SIZE 56
1030 #define SF_SIZE 24
1031 #endif
1032
1033 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1034 #define DF_SIZE 56
1035 #define SF_SIZE 24
1036 #endif
1037
1038 SFtype
1039 __floatdisf (DItype u)
1040 {
1041 /* Do the calculation in DFmode
1042 so that we don't lose any of the precision of the high word
1043 while multiplying it. */
1044 DFtype f;
1045
1046 /* Protect against double-rounding error.
1047 Represent any low-order bits, that might be truncated in DFmode,
1048 by a bit that won't be lost. The bit can go in anywhere below the
1049 rounding position of the SFmode. A fixed mask and bit position
1050 handles all usual configurations. It doesn't handle the case
1051 of 128-bit DImode, however. */
1052 if (DF_SIZE < DI_SIZE
1053 && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
1054 {
1055 #define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
1056 if (! (- ((DItype) 1 << DF_SIZE) < u
1057 && u < ((DItype) 1 << DF_SIZE)))
1058 {
1059 if ((USItype) u & (REP_BIT - 1))
1060 u |= REP_BIT;
1061 }
1062 }
1063 f = (SItype) (u >> WORD_SIZE);
1064 f *= HIGH_HALFWORD_COEFF;
1065 f *= HIGH_HALFWORD_COEFF;
1066 f += (USItype) (u & (HIGH_WORD_COEFF - 1));
1067
1068 return (SFtype) f;
1069 }
1070 #endif
1071
1072 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1073 /* Reenable the normal types, in case limits.h needs them. */
1074 #undef char
1075 #undef short
1076 #undef int
1077 #undef long
1078 #undef unsigned
1079 #undef float
1080 #undef double
1081 #undef MIN
1082 #undef MAX
1083 #include <limits.h>
1084
1085 USItype
1086 __fixunsxfsi (XFtype a)
1087 {
1088 if (a >= - (DFtype) LONG_MIN)
1089 return (SItype) (a + LONG_MIN) - LONG_MIN;
1090 return (SItype) a;
1091 }
1092 #endif
1093
1094 #ifdef L_fixunsdfsi
1095 /* Reenable the normal types, in case limits.h needs them. */
1096 #undef char
1097 #undef short
1098 #undef int
1099 #undef long
1100 #undef unsigned
1101 #undef float
1102 #undef double
1103 #undef MIN
1104 #undef MAX
1105 #include <limits.h>
1106
1107 USItype
1108 __fixunsdfsi (DFtype a)
1109 {
1110 if (a >= - (DFtype) LONG_MIN)
1111 return (SItype) (a + LONG_MIN) - LONG_MIN;
1112 return (SItype) a;
1113 }
1114 #endif
1115
1116 #ifdef L_fixunssfsi
1117 /* Reenable the normal types, in case limits.h needs them. */
1118 #undef char
1119 #undef short
1120 #undef int
1121 #undef long
1122 #undef unsigned
1123 #undef float
1124 #undef double
1125 #undef MIN
1126 #undef MAX
1127 #include <limits.h>
1128
1129 USItype
1130 __fixunssfsi (SFtype a)
1131 {
1132 if (a >= - (SFtype) LONG_MIN)
1133 return (SItype) (a + LONG_MIN) - LONG_MIN;
1134 return (SItype) a;
1135 }
1136 #endif
1137 \f
1138 /* From here on down, the routines use normal data types. */
1139
1140 #define SItype bogus_type
1141 #define USItype bogus_type
1142 #define DItype bogus_type
1143 #define UDItype bogus_type
1144 #define SFtype bogus_type
1145 #define DFtype bogus_type
1146
1147 #undef char
1148 #undef short
1149 #undef int
1150 #undef long
1151 #undef unsigned
1152 #undef float
1153 #undef double
1154 \f
1155 #ifdef L__gcc_bcmp
1156
1157 /* Like bcmp except the sign is meaningful.
1158 Result is negative if S1 is less than S2,
1159 positive if S1 is greater, 0 if S1 and S2 are equal. */
1160
1161 int
1162 __gcc_bcmp (unsigned char *s1, unsigned char *s2, size_t size)
1163 {
1164 while (size > 0)
1165 {
1166 unsigned char c1 = *s1++, c2 = *s2++;
1167 if (c1 != c2)
1168 return c1 - c2;
1169 size--;
1170 }
1171 return 0;
1172 }
1173
1174 #endif
1175 \f\f
1176 #ifdef L__dummy
1177 void
1178 __dummy () {}
1179 #endif
1180
1181 #ifdef L_varargs
1182 #ifdef __i860__
1183 #if defined(__svr4__) || defined(__alliant__)
1184 asm (" .text");
1185 asm (" .align 4");
1186
1187 /* The Alliant needs the added underscore. */
1188 asm (".globl __builtin_saveregs");
1189 asm ("__builtin_saveregs:");
1190 asm (".globl ___builtin_saveregs");
1191 asm ("___builtin_saveregs:");
1192
1193 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1194 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1195 area and also for a new va_list
1196 structure */
1197 /* Save all argument registers in the arg reg save area. The
1198 arg reg save area must have the following layout (according
1199 to the svr4 ABI):
1200
1201 struct {
1202 union {
1203 float freg[8];
1204 double dreg[4];
1205 } float_regs;
1206 long ireg[12];
1207 };
1208 */
1209
1210 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1211 asm (" fst.q %f12,16(%sp)");
1212
1213 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1214 asm (" st.l %r17,36(%sp)");
1215 asm (" st.l %r18,40(%sp)");
1216 asm (" st.l %r19,44(%sp)");
1217 asm (" st.l %r20,48(%sp)");
1218 asm (" st.l %r21,52(%sp)");
1219 asm (" st.l %r22,56(%sp)");
1220 asm (" st.l %r23,60(%sp)");
1221 asm (" st.l %r24,64(%sp)");
1222 asm (" st.l %r25,68(%sp)");
1223 asm (" st.l %r26,72(%sp)");
1224 asm (" st.l %r27,76(%sp)");
1225
1226 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1227 va_list structure. Put in into
1228 r16 so that it will be returned
1229 to the caller. */
1230
1231 /* Initialize all fields of the new va_list structure. This
1232 structure looks like:
1233
1234 typedef struct {
1235 unsigned long ireg_used;
1236 unsigned long freg_used;
1237 long *reg_base;
1238 long *mem_ptr;
1239 } va_list;
1240 */
1241
1242 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1243 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1244 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1245 asm (" bri %r1"); /* delayed return */
1246 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1247
1248 #else /* not __svr4__ */
1249 #if defined(__PARAGON__)
1250 /*
1251 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1252 * and we stand a better chance of hooking into libraries
1253 * compiled by PGI. [andyp@ssd.intel.com]
1254 */
1255 asm (" .text");
1256 asm (" .align 4");
1257 asm (".globl __builtin_saveregs");
1258 asm ("__builtin_saveregs:");
1259 asm (".globl ___builtin_saveregs");
1260 asm ("___builtin_saveregs:");
1261
1262 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1263 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1264 area and also for a new va_list
1265 structure */
1266 /* Save all argument registers in the arg reg save area. The
1267 arg reg save area must have the following layout (according
1268 to the svr4 ABI):
1269
1270 struct {
1271 union {
1272 float freg[8];
1273 double dreg[4];
1274 } float_regs;
1275 long ireg[12];
1276 };
1277 */
1278
1279 asm (" fst.q f8, 0(sp)");
1280 asm (" fst.q f12,16(sp)");
1281 asm (" st.l r16,32(sp)");
1282 asm (" st.l r17,36(sp)");
1283 asm (" st.l r18,40(sp)");
1284 asm (" st.l r19,44(sp)");
1285 asm (" st.l r20,48(sp)");
1286 asm (" st.l r21,52(sp)");
1287 asm (" st.l r22,56(sp)");
1288 asm (" st.l r23,60(sp)");
1289 asm (" st.l r24,64(sp)");
1290 asm (" st.l r25,68(sp)");
1291 asm (" st.l r26,72(sp)");
1292 asm (" st.l r27,76(sp)");
1293
1294 asm (" adds 80,sp,r16"); /* compute the address of the new
1295 va_list structure. Put in into
1296 r16 so that it will be returned
1297 to the caller. */
1298
1299 /* Initialize all fields of the new va_list structure. This
1300 structure looks like:
1301
1302 typedef struct {
1303 unsigned long ireg_used;
1304 unsigned long freg_used;
1305 long *reg_base;
1306 long *mem_ptr;
1307 } va_list;
1308 */
1309
1310 asm (" st.l r0, 0(r16)"); /* nfixed */
1311 asm (" st.l r0, 4(r16)"); /* nfloating */
1312 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1313 asm (" bri r1"); /* delayed return */
1314 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1315 #else /* not __PARAGON__ */
1316 asm (" .text");
1317 asm (" .align 4");
1318
1319 asm (".globl ___builtin_saveregs");
1320 asm ("___builtin_saveregs:");
1321 asm (" mov sp,r30");
1322 asm (" andnot 0x0f,sp,sp");
1323 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1324
1325 /* Fill in the __va_struct. */
1326 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1327 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1328 asm (" st.l r18, 8(sp)");
1329 asm (" st.l r19,12(sp)");
1330 asm (" st.l r20,16(sp)");
1331 asm (" st.l r21,20(sp)");
1332 asm (" st.l r22,24(sp)");
1333 asm (" st.l r23,28(sp)");
1334 asm (" st.l r24,32(sp)");
1335 asm (" st.l r25,36(sp)");
1336 asm (" st.l r26,40(sp)");
1337 asm (" st.l r27,44(sp)");
1338
1339 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1340 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1341
1342 /* Fill in the __va_ctl. */
1343 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1344 asm (" st.l r28,84(sp)"); /* pointer to more args */
1345 asm (" st.l r0, 88(sp)"); /* nfixed */
1346 asm (" st.l r0, 92(sp)"); /* nfloating */
1347
1348 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1349 asm (" bri r1");
1350 asm (" mov r30,sp");
1351 /* recover stack and pass address to start
1352 of data. */
1353 #endif /* not __PARAGON__ */
1354 #endif /* not __svr4__ */
1355 #else /* not __i860__ */
1356 #ifdef __sparc__
1357 asm (".global __builtin_saveregs");
1358 asm ("__builtin_saveregs:");
1359 asm (".global ___builtin_saveregs");
1360 asm ("___builtin_saveregs:");
1361 #ifdef NEED_PROC_COMMAND
1362 asm (".proc 020");
1363 #endif
1364 asm ("st %i0,[%fp+68]");
1365 asm ("st %i1,[%fp+72]");
1366 asm ("st %i2,[%fp+76]");
1367 asm ("st %i3,[%fp+80]");
1368 asm ("st %i4,[%fp+84]");
1369 asm ("retl");
1370 asm ("st %i5,[%fp+88]");
1371 #ifdef NEED_TYPE_COMMAND
1372 asm (".type __builtin_saveregs,#function");
1373 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1374 #endif
1375 #else /* not __sparc__ */
1376 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1377
1378 asm (" .text");
1379 #ifdef __mips16
1380 asm (" .set nomips16");
1381 #endif
1382 asm (" .ent __builtin_saveregs");
1383 asm (" .globl __builtin_saveregs");
1384 asm ("__builtin_saveregs:");
1385 asm (" sw $4,0($30)");
1386 asm (" sw $5,4($30)");
1387 asm (" sw $6,8($30)");
1388 asm (" sw $7,12($30)");
1389 asm (" j $31");
1390 asm (" .end __builtin_saveregs");
1391 #else /* not __mips__, etc. */
1392
1393 void *
1394 __builtin_saveregs ()
1395 {
1396 abort ();
1397 }
1398
1399 #endif /* not __mips__ */
1400 #endif /* not __sparc__ */
1401 #endif /* not __i860__ */
1402 #endif
1403 \f
1404 #ifdef L_eprintf
1405 #ifndef inhibit_libc
1406
1407 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1408 #include <stdio.h>
1409 /* This is used by the `assert' macro. */
1410 extern void __eprintf (const char *, const char *, unsigned int, const char *)
1411 __attribute__ ((__noreturn__));
1412
1413 void
1414 __eprintf (const char *string, const char *expression,
1415 unsigned int line, const char *filename)
1416 {
1417 fprintf (stderr, string, expression, line, filename);
1418 fflush (stderr);
1419 abort ();
1420 }
1421
1422 #endif
1423 #endif
1424
1425 #ifdef L_bb
1426
1427 /* Structure emitted by -a */
1428 struct bb
1429 {
1430 long zero_word;
1431 const char *filename;
1432 long *counts;
1433 long ncounts;
1434 struct bb *next;
1435 const unsigned long *addresses;
1436
1437 /* Older GCC's did not emit these fields. */
1438 long nwords;
1439 const char **functions;
1440 const long *line_nums;
1441 const char **filenames;
1442 char *flags;
1443 };
1444
1445 #ifdef BLOCK_PROFILER_CODE
1446 BLOCK_PROFILER_CODE
1447 #else
1448 #ifndef inhibit_libc
1449
1450 /* Simple minded basic block profiling output dumper for
1451 systems that don't provide tcov support. At present,
1452 it requires atexit and stdio. */
1453
1454 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1455 #include <stdio.h>
1456 char *ctime ();
1457
1458 #include "gbl-ctors.h"
1459 #include "gcov-io.h"
1460 #include <string.h>
1461
1462 static struct bb *bb_head;
1463
1464 /* Return the number of digits needed to print a value */
1465 /* __inline__ */ static int num_digits (long value, int base)
1466 {
1467 int minus = (value < 0 && base != 16);
1468 unsigned long v = (minus) ? -value : value;
1469 int ret = minus;
1470
1471 do
1472 {
1473 v /= base;
1474 ret++;
1475 }
1476 while (v);
1477
1478 return ret;
1479 }
1480
1481 void
1482 __bb_exit_func (void)
1483 {
1484 FILE *da_file, *file;
1485 long time_value;
1486 int i;
1487
1488 if (bb_head == 0)
1489 return;
1490
1491 i = strlen (bb_head->filename) - 3;
1492
1493 if (!strcmp (bb_head->filename+i, ".da"))
1494 {
1495 /* Must be -fprofile-arcs not -a.
1496 Dump data in a form that gcov expects. */
1497
1498 struct bb *ptr;
1499
1500 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1501 {
1502 /* If the file exists, and the number of counts in it is the same,
1503 then merge them in. */
1504
1505 if ((da_file = fopen (ptr->filename, "r")) != 0)
1506 {
1507 long n_counts = 0;
1508
1509 if (__read_long (&n_counts, da_file, 8) != 0)
1510 {
1511 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1512 ptr->filename);
1513 continue;
1514 }
1515
1516 if (n_counts == ptr->ncounts)
1517 {
1518 int i;
1519
1520 for (i = 0; i < n_counts; i++)
1521 {
1522 long v = 0;
1523
1524 if (__read_long (&v, da_file, 8) != 0)
1525 {
1526 fprintf (stderr, "arc profiling: Can't read output file %s.\n",
1527 ptr->filename);
1528 break;
1529 }
1530 ptr->counts[i] += v;
1531 }
1532 }
1533
1534 if (fclose (da_file) == EOF)
1535 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1536 ptr->filename);
1537 }
1538 if ((da_file = fopen (ptr->filename, "w")) == 0)
1539 {
1540 fprintf (stderr, "arc profiling: Can't open output file %s.\n",
1541 ptr->filename);
1542 continue;
1543 }
1544
1545 /* ??? Should first write a header to the file. Preferably, a 4 byte
1546 magic number, 4 bytes containing the time the program was
1547 compiled, 4 bytes containing the last modification time of the
1548 source file, and 4 bytes indicating the compiler options used.
1549
1550 That way we can easily verify that the proper source/executable/
1551 data file combination is being used from gcov. */
1552
1553 if (__write_long (ptr->ncounts, da_file, 8) != 0)
1554 {
1555
1556 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1557 ptr->filename);
1558 }
1559 else
1560 {
1561 int j;
1562 long *count_ptr = ptr->counts;
1563 int ret = 0;
1564 for (j = ptr->ncounts; j > 0; j--)
1565 {
1566 if (__write_long (*count_ptr, da_file, 8) != 0)
1567 {
1568 ret=1;
1569 break;
1570 }
1571 count_ptr++;
1572 }
1573 if (ret)
1574 fprintf (stderr, "arc profiling: Error writing output file %s.\n",
1575 ptr->filename);
1576 }
1577
1578 if (fclose (da_file) == EOF)
1579 fprintf (stderr, "arc profiling: Error closing output file %s.\n",
1580 ptr->filename);
1581 }
1582
1583 return;
1584 }
1585
1586 /* Must be basic block profiling. Emit a human readable output file. */
1587
1588 file = fopen ("bb.out", "a");
1589
1590 if (!file)
1591 perror ("bb.out");
1592
1593 else
1594 {
1595 struct bb *ptr;
1596
1597 /* This is somewhat type incorrect, but it avoids worrying about
1598 exactly where time.h is included from. It should be ok unless
1599 a void * differs from other pointer formats, or if sizeof (long)
1600 is < sizeof (time_t). It would be nice if we could assume the
1601 use of rationale standards here. */
1602
1603 time ((void *) &time_value);
1604 fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
1605
1606 /* We check the length field explicitly in order to allow compatibility
1607 with older GCC's which did not provide it. */
1608
1609 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1610 {
1611 int i;
1612 int func_p = (ptr->nwords >= sizeof (struct bb)
1613 && ptr->nwords <= 1000
1614 && ptr->functions);
1615 int line_p = (func_p && ptr->line_nums);
1616 int file_p = (func_p && ptr->filenames);
1617 int addr_p = (ptr->addresses != 0);
1618 long ncounts = ptr->ncounts;
1619 long cnt_max = 0;
1620 long line_max = 0;
1621 long addr_max = 0;
1622 int file_len = 0;
1623 int func_len = 0;
1624 int blk_len = num_digits (ncounts, 10);
1625 int cnt_len;
1626 int line_len;
1627 int addr_len;
1628
1629 fprintf (file, "File %s, %ld basic blocks \n\n",
1630 ptr->filename, ncounts);
1631
1632 /* Get max values for each field. */
1633 for (i = 0; i < ncounts; i++)
1634 {
1635 const char *p;
1636 int len;
1637
1638 if (cnt_max < ptr->counts[i])
1639 cnt_max = ptr->counts[i];
1640
1641 if (addr_p && addr_max < ptr->addresses[i])
1642 addr_max = ptr->addresses[i];
1643
1644 if (line_p && line_max < ptr->line_nums[i])
1645 line_max = ptr->line_nums[i];
1646
1647 if (func_p)
1648 {
1649 p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
1650 len = strlen (p);
1651 if (func_len < len)
1652 func_len = len;
1653 }
1654
1655 if (file_p)
1656 {
1657 p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
1658 len = strlen (p);
1659 if (file_len < len)
1660 file_len = len;
1661 }
1662 }
1663
1664 addr_len = num_digits (addr_max, 16);
1665 cnt_len = num_digits (cnt_max, 10);
1666 line_len = num_digits (line_max, 10);
1667
1668 /* Now print out the basic block information. */
1669 for (i = 0; i < ncounts; i++)
1670 {
1671 fprintf (file,
1672 " Block #%*d: executed %*ld time(s)",
1673 blk_len, i+1,
1674 cnt_len, ptr->counts[i]);
1675
1676 if (addr_p)
1677 fprintf (file, " address= 0x%.*lx", addr_len,
1678 ptr->addresses[i]);
1679
1680 if (func_p)
1681 fprintf (file, " function= %-*s", func_len,
1682 (ptr->functions[i]) ? ptr->functions[i] : "<none>");
1683
1684 if (line_p)
1685 fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
1686
1687 if (file_p)
1688 fprintf (file, " file= %s",
1689 (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
1690
1691 fprintf (file, "\n");
1692 }
1693
1694 fprintf (file, "\n");
1695 fflush (file);
1696 }
1697
1698 fprintf (file, "\n\n");
1699 fclose (file);
1700 }
1701 }
1702
1703 void
1704 __bb_init_func (struct bb *blocks)
1705 {
1706 /* User is supposed to check whether the first word is non-0,
1707 but just in case.... */
1708
1709 if (blocks->zero_word)
1710 return;
1711
1712 #ifdef ON_EXIT
1713 /* Initialize destructor. */
1714 if (!bb_head)
1715 ON_EXIT (__bb_exit_func, 0);
1716 #endif
1717
1718 /* Set up linked list. */
1719 blocks->zero_word = 1;
1720 blocks->next = bb_head;
1721 bb_head = blocks;
1722 }
1723
1724 #ifndef MACHINE_STATE_SAVE
1725 #define MACHINE_STATE_SAVE(ID)
1726 #endif
1727 #ifndef MACHINE_STATE_RESTORE
1728 #define MACHINE_STATE_RESTORE(ID)
1729 #endif
1730
1731 /* Number of buckets in hashtable of basic block addresses. */
1732
1733 #define BB_BUCKETS 311
1734
1735 /* Maximum length of string in file bb.in. */
1736
1737 #define BBINBUFSIZE 500
1738
1739 /* BBINBUFSIZE-1 with double quotes. We could use #BBINBUFSIZE or
1740 "BBINBUFSIZE" but want to avoid trouble with preprocessors. */
1741
1742 #define BBINBUFSIZESTR "499"
1743
1744 struct bb_edge
1745 {
1746 struct bb_edge *next;
1747 unsigned long src_addr;
1748 unsigned long dst_addr;
1749 unsigned long count;
1750 };
1751
1752 enum bb_func_mode
1753 {
1754 TRACE_KEEP = 0, TRACE_ON = 1, TRACE_OFF = 2
1755 };
1756
1757 struct bb_func
1758 {
1759 struct bb_func *next;
1760 char *funcname;
1761 char *filename;
1762 enum bb_func_mode mode;
1763 };
1764
1765 /* This is the connection to the outside world.
1766 The BLOCK_PROFILER macro must set __bb.blocks
1767 and __bb.blockno. */
1768
1769 struct {
1770 unsigned long blockno;
1771 struct bb *blocks;
1772 } __bb;
1773
1774 /* Vars to store addrs of source and destination basic blocks
1775 of a jump. */
1776
1777 static unsigned long bb_src = 0;
1778 static unsigned long bb_dst = 0;
1779
1780 static FILE *bb_tracefile = (FILE *) 0;
1781 static struct bb_edge **bb_hashbuckets = (struct bb_edge **) 0;
1782 static struct bb_func *bb_func_head = (struct bb_func *) 0;
1783 static unsigned long bb_callcount = 0;
1784 static int bb_mode = 0;
1785
1786 static unsigned long *bb_stack = (unsigned long *) 0;
1787 static size_t bb_stacksize = 0;
1788
1789 static int reported = 0;
1790
1791 /* Trace modes:
1792 Always : Print execution frequencies of basic blocks
1793 to file bb.out.
1794 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1795 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1796 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1797 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1798 */
1799
1800 #ifdef HAVE_POPEN
1801
1802 /*#include <sys/types.h>*/
1803 #include <sys/stat.h>
1804 /*#include <malloc.h>*/
1805
1806 /* Commands executed by gopen. */
1807
1808 #define GOPENDECOMPRESS "gzip -cd "
1809 #define GOPENCOMPRESS "gzip -c >"
1810
1811 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1812 If it does not compile, simply replace gopen by fopen and delete
1813 '.gz' from any first parameter to gopen. */
1814
1815 static FILE *
1816 gopen (char *fn, char *mode)
1817 {
1818 int use_gzip;
1819 char *p;
1820
1821 if (mode[1])
1822 return (FILE *) 0;
1823
1824 if (mode[0] != 'r' && mode[0] != 'w')
1825 return (FILE *) 0;
1826
1827 p = fn + strlen (fn)-1;
1828 use_gzip = ((p[-1] == '.' && (p[0] == 'Z' || p[0] == 'z'))
1829 || (p[-2] == '.' && p[-1] == 'g' && p[0] == 'z'));
1830
1831 if (use_gzip)
1832 {
1833 if (mode[0]=='r')
1834 {
1835 FILE *f;
1836 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1837 + sizeof (GOPENDECOMPRESS));
1838 strcpy (s, GOPENDECOMPRESS);
1839 strcpy (s + (sizeof (GOPENDECOMPRESS)-1), fn);
1840 f = popen (s, mode);
1841 free (s);
1842 return f;
1843 }
1844
1845 else
1846 {
1847 FILE *f;
1848 char *s = (char *) malloc (sizeof (char) * strlen (fn)
1849 + sizeof (GOPENCOMPRESS));
1850 strcpy (s, GOPENCOMPRESS);
1851 strcpy (s + (sizeof (GOPENCOMPRESS)-1), fn);
1852 if (!(f = popen (s, mode)))
1853 f = fopen (s, mode);
1854 free (s);
1855 return f;
1856 }
1857 }
1858
1859 else
1860 return fopen (fn, mode);
1861 }
1862
1863 static int
1864 gclose (FILE *f)
1865 {
1866 struct stat buf;
1867
1868 if (f != 0)
1869 {
1870 if (!fstat (fileno (f), &buf) && S_ISFIFO (buf.st_mode))
1871 return pclose (f);
1872
1873 return fclose (f);
1874 }
1875 return 0;
1876 }
1877
1878 #endif /* HAVE_POPEN */
1879
1880 /* Called once per program. */
1881
1882 static void
1883 __bb_exit_trace_func ()
1884 {
1885 FILE *file = fopen ("bb.out", "a");
1886 struct bb_func *f;
1887 struct bb *b;
1888
1889 if (!file)
1890 perror ("bb.out");
1891
1892 if (bb_mode & 1)
1893 {
1894 if (!bb_tracefile)
1895 perror ("bbtrace");
1896 else
1897 #ifdef HAVE_POPEN
1898 gclose (bb_tracefile);
1899 #else
1900 fclose (bb_tracefile);
1901 #endif /* HAVE_POPEN */
1902 }
1903
1904 /* Check functions in `bb.in'. */
1905
1906 if (file)
1907 {
1908 long time_value;
1909 const struct bb_func *p;
1910 int printed_something = 0;
1911 struct bb *ptr;
1912 long blk;
1913
1914 /* This is somewhat type incorrect. */
1915 time ((void *) &time_value);
1916
1917 for (p = bb_func_head; p != (struct bb_func *) 0; p = p->next)
1918 {
1919 for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
1920 {
1921 if (!ptr->filename || (p->filename != (char *) 0 && strcmp (p->filename, ptr->filename)))
1922 continue;
1923 for (blk = 0; blk < ptr->ncounts; blk++)
1924 {
1925 if (!strcmp (p->funcname, ptr->functions[blk]))
1926 goto found;
1927 }
1928 }
1929
1930 if (!printed_something)
1931 {
1932 fprintf (file, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value));
1933 printed_something = 1;
1934 }
1935
1936 fprintf (file, "\tFunction %s", p->funcname);
1937 if (p->filename)
1938 fprintf (file, " of file %s", p->filename);
1939 fprintf (file, "\n" );
1940
1941 found: ;
1942 }
1943
1944 if (printed_something)
1945 fprintf (file, "\n");
1946
1947 }
1948
1949 if (bb_mode & 2)
1950 {
1951 if (!bb_hashbuckets)
1952 {
1953 if (!reported)
1954 {
1955 fprintf (stderr, "Profiler: out of memory\n");
1956 reported = 1;
1957 }
1958 return;
1959 }
1960
1961 else if (file)
1962 {
1963 long time_value;
1964 int i;
1965 unsigned long addr_max = 0;
1966 unsigned long cnt_max = 0;
1967 int cnt_len;
1968 int addr_len;
1969
1970 /* This is somewhat type incorrect, but it avoids worrying about
1971 exactly where time.h is included from. It should be ok unless
1972 a void * differs from other pointer formats, or if sizeof (long)
1973 is < sizeof (time_t). It would be nice if we could assume the
1974 use of rationale standards here. */
1975
1976 time ((void *) &time_value);
1977 fprintf (file, "Basic block jump tracing");
1978
1979 switch (bb_mode & 12)
1980 {
1981 case 0:
1982 fprintf (file, " (with call)");
1983 break;
1984
1985 case 4:
1986 /* Print nothing. */
1987 break;
1988
1989 case 8:
1990 fprintf (file, " (with call & ret)");
1991 break;
1992
1993 case 12:
1994 fprintf (file, " (with ret)");
1995 break;
1996 }
1997
1998 fprintf (file, " finished on %s\n", ctime ((void *) &time_value));
1999
2000 for (i = 0; i < BB_BUCKETS; i++)
2001 {
2002 struct bb_edge *bucket = bb_hashbuckets[i];
2003 for ( ; bucket; bucket = bucket->next )
2004 {
2005 if (addr_max < bucket->src_addr)
2006 addr_max = bucket->src_addr;
2007 if (addr_max < bucket->dst_addr)
2008 addr_max = bucket->dst_addr;
2009 if (cnt_max < bucket->count)
2010 cnt_max = bucket->count;
2011 }
2012 }
2013 addr_len = num_digits (addr_max, 16);
2014 cnt_len = num_digits (cnt_max, 10);
2015
2016 for ( i = 0; i < BB_BUCKETS; i++)
2017 {
2018 struct bb_edge *bucket = bb_hashbuckets[i];
2019 for ( ; bucket; bucket = bucket->next )
2020 {
2021 fprintf (file, "Jump from block 0x%.*lx to "
2022 "block 0x%.*lx executed %*lu time(s)\n",
2023 addr_len, bucket->src_addr,
2024 addr_len, bucket->dst_addr,
2025 cnt_len, bucket->count);
2026 }
2027 }
2028
2029 fprintf (file, "\n");
2030
2031 }
2032 }
2033
2034 if (file)
2035 fclose (file);
2036
2037 /* Free allocated memory. */
2038
2039 f = bb_func_head;
2040 while (f)
2041 {
2042 struct bb_func *old = f;
2043
2044 f = f->next;
2045 if (old->funcname) free (old->funcname);
2046 if (old->filename) free (old->filename);
2047 free (old);
2048 }
2049
2050 if (bb_stack)
2051 free (bb_stack);
2052
2053 if (bb_hashbuckets)
2054 {
2055 int i;
2056
2057 for (i = 0; i < BB_BUCKETS; i++)
2058 {
2059 struct bb_edge *old, *bucket = bb_hashbuckets[i];
2060
2061 while (bucket)
2062 {
2063 old = bucket;
2064 bucket = bucket->next;
2065 free (old);
2066 }
2067 }
2068 free (bb_hashbuckets);
2069 }
2070
2071 for (b = bb_head; b; b = b->next)
2072 if (b->flags) free (b->flags);
2073 }
2074
2075 /* Called once per program. */
2076
2077 static void
2078 __bb_init_prg ()
2079 {
2080 FILE *file;
2081 char buf[BBINBUFSIZE];
2082 const char *p;
2083 const char *pos;
2084 enum bb_func_mode m;
2085 int i;
2086
2087 #ifdef ON_EXIT
2088 /* Initialize destructor. */
2089 ON_EXIT (__bb_exit_func, 0);
2090 #endif
2091
2092 if (!(file = fopen ("bb.in", "r")))
2093 return;
2094
2095 while(fscanf (file, " %" BBINBUFSIZESTR "s ", buf) != EOF)
2096 {
2097 p = buf;
2098 if (*p == '-')
2099 {
2100 m = TRACE_OFF;
2101 p++;
2102 }
2103 else
2104 {
2105 m = TRACE_ON;
2106 }
2107 if (!strcmp (p, "__bb_trace__"))
2108 bb_mode |= 1;
2109 else if (!strcmp (p, "__bb_jumps__"))
2110 bb_mode |= 2;
2111 else if (!strcmp (p, "__bb_hidecall__"))
2112 bb_mode |= 4;
2113 else if (!strcmp (p, "__bb_showret__"))
2114 bb_mode |= 8;
2115 else
2116 {
2117 struct bb_func *f = (struct bb_func *) malloc (sizeof (struct bb_func));
2118 if (f)
2119 {
2120 unsigned long l;
2121 f->next = bb_func_head;
2122 if ((pos = strchr (p, ':')))
2123 {
2124 if (!(f->funcname = (char *) malloc (strlen (pos+1)+1)))
2125 continue;
2126 strcpy (f->funcname, pos+1);
2127 l = pos-p;
2128 if ((f->filename = (char *) malloc (l+1)))
2129 {
2130 strncpy (f->filename, p, l);
2131 f->filename[l] = '\0';
2132 }
2133 else
2134 f->filename = (char *) 0;
2135 }
2136 else
2137 {
2138 if (!(f->funcname = (char *) malloc (strlen (p)+1)))
2139 continue;
2140 strcpy (f->funcname, p);
2141 f->filename = (char *) 0;
2142 }
2143 f->mode = m;
2144 bb_func_head = f;
2145 }
2146 }
2147 }
2148 fclose (file);
2149
2150 #ifdef HAVE_POPEN
2151
2152 if (bb_mode & 1)
2153 bb_tracefile = gopen ("bbtrace.gz", "w");
2154
2155 #else
2156
2157 if (bb_mode & 1)
2158 bb_tracefile = fopen ("bbtrace", "w");
2159
2160 #endif /* HAVE_POPEN */
2161
2162 if (bb_mode & 2)
2163 {
2164 bb_hashbuckets = (struct bb_edge **)
2165 malloc (BB_BUCKETS * sizeof (struct bb_edge *));
2166 if (bb_hashbuckets)
2167 /* Use a loop here rather than calling bzero to avoid having to
2168 conditionalize its existance. */
2169 for (i = 0; i < BB_BUCKETS; i++)
2170 bb_hashbuckets[i] = 0;
2171 }
2172
2173 if (bb_mode & 12)
2174 {
2175 bb_stacksize = 10;
2176 bb_stack = (unsigned long *) malloc (bb_stacksize * sizeof (*bb_stack));
2177 }
2178
2179 #ifdef ON_EXIT
2180 /* Initialize destructor. */
2181 ON_EXIT (__bb_exit_trace_func, 0);
2182 #endif
2183
2184 }
2185
2186 /* Called upon entering a basic block. */
2187
2188 void
2189 __bb_trace_func ()
2190 {
2191 struct bb_edge *bucket;
2192
2193 MACHINE_STATE_SAVE("1")
2194
2195 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2196 goto skip;
2197
2198 bb_dst = __bb.blocks->addresses[__bb.blockno];
2199 __bb.blocks->counts[__bb.blockno]++;
2200
2201 if (bb_tracefile)
2202 {
2203 fwrite (&bb_dst, sizeof (unsigned long), 1, bb_tracefile);
2204 }
2205
2206 if (bb_hashbuckets)
2207 {
2208 struct bb_edge **startbucket, **oldnext;
2209
2210 oldnext = startbucket
2211 = & bb_hashbuckets[ (((int) bb_src*8) ^ (int) bb_dst) % BB_BUCKETS ];
2212 bucket = *startbucket;
2213
2214 for (bucket = *startbucket; bucket;
2215 oldnext = &(bucket->next), bucket = *oldnext)
2216 {
2217 if (bucket->src_addr == bb_src
2218 && bucket->dst_addr == bb_dst)
2219 {
2220 bucket->count++;
2221 *oldnext = bucket->next;
2222 bucket->next = *startbucket;
2223 *startbucket = bucket;
2224 goto ret;
2225 }
2226 }
2227
2228 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2229
2230 if (!bucket)
2231 {
2232 if (!reported)
2233 {
2234 fprintf (stderr, "Profiler: out of memory\n");
2235 reported = 1;
2236 }
2237 }
2238
2239 else
2240 {
2241 bucket->src_addr = bb_src;
2242 bucket->dst_addr = bb_dst;
2243 bucket->next = *startbucket;
2244 *startbucket = bucket;
2245 bucket->count = 1;
2246 }
2247 }
2248
2249 ret:
2250 bb_src = bb_dst;
2251
2252 skip:
2253 ;
2254
2255 MACHINE_STATE_RESTORE("1")
2256
2257 }
2258
2259 /* Called when returning from a function and `__bb_showret__' is set. */
2260
2261 static void
2262 __bb_trace_func_ret ()
2263 {
2264 struct bb_edge *bucket;
2265
2266 if (!bb_callcount || (__bb.blocks->flags && (__bb.blocks->flags[__bb.blockno] & TRACE_OFF)))
2267 goto skip;
2268
2269 if (bb_hashbuckets)
2270 {
2271 struct bb_edge **startbucket, **oldnext;
2272
2273 oldnext = startbucket
2274 = & bb_hashbuckets[ (((int) bb_dst * 8) ^ (int) bb_src) % BB_BUCKETS ];
2275 bucket = *startbucket;
2276
2277 for (bucket = *startbucket; bucket;
2278 oldnext = &(bucket->next), bucket = *oldnext)
2279 {
2280 if (bucket->src_addr == bb_dst
2281 && bucket->dst_addr == bb_src)
2282 {
2283 bucket->count++;
2284 *oldnext = bucket->next;
2285 bucket->next = *startbucket;
2286 *startbucket = bucket;
2287 goto ret;
2288 }
2289 }
2290
2291 bucket = (struct bb_edge *) malloc (sizeof (struct bb_edge));
2292
2293 if (!bucket)
2294 {
2295 if (!reported)
2296 {
2297 fprintf (stderr, "Profiler: out of memory\n");
2298 reported = 1;
2299 }
2300 }
2301
2302 else
2303 {
2304 bucket->src_addr = bb_dst;
2305 bucket->dst_addr = bb_src;
2306 bucket->next = *startbucket;
2307 *startbucket = bucket;
2308 bucket->count = 1;
2309 }
2310 }
2311
2312 ret:
2313 bb_dst = bb_src;
2314
2315 skip:
2316 ;
2317
2318 }
2319
2320 /* Called upon entering the first function of a file. */
2321
2322 static void
2323 __bb_init_file (struct bb *blocks)
2324 {
2325
2326 const struct bb_func *p;
2327 long blk, ncounts = blocks->ncounts;
2328 const char **functions = blocks->functions;
2329
2330 /* Set up linked list. */
2331 blocks->zero_word = 1;
2332 blocks->next = bb_head;
2333 bb_head = blocks;
2334
2335 blocks->flags = 0;
2336 if (!bb_func_head
2337 || !(blocks->flags = (char *) malloc (sizeof (char) * blocks->ncounts)))
2338 return;
2339
2340 for (blk = 0; blk < ncounts; blk++)
2341 blocks->flags[blk] = 0;
2342
2343 for (blk = 0; blk < ncounts; blk++)
2344 {
2345 for (p = bb_func_head; p; p = p->next)
2346 {
2347 if (!strcmp (p->funcname, functions[blk])
2348 && (!p->filename || !strcmp (p->filename, blocks->filename)))
2349 {
2350 blocks->flags[blk] |= p->mode;
2351 }
2352 }
2353 }
2354
2355 }
2356
2357 /* Called when exiting from a function. */
2358
2359 void
2360 __bb_trace_ret ()
2361 {
2362
2363 MACHINE_STATE_SAVE("2")
2364
2365 if (bb_callcount)
2366 {
2367 if ((bb_mode & 12) && bb_stacksize > bb_callcount)
2368 {
2369 bb_src = bb_stack[bb_callcount];
2370 if (bb_mode & 8)
2371 __bb_trace_func_ret ();
2372 }
2373
2374 bb_callcount -= 1;
2375 }
2376
2377 MACHINE_STATE_RESTORE("2")
2378
2379 }
2380
2381 /* Called when entering a function. */
2382
2383 void
2384 __bb_init_trace_func (struct bb *blocks, unsigned long blockno)
2385 {
2386 static int trace_init = 0;
2387
2388 MACHINE_STATE_SAVE("3")
2389
2390 if (!blocks->zero_word)
2391 {
2392 if (!trace_init)
2393 {
2394 trace_init = 1;
2395 __bb_init_prg ();
2396 }
2397 __bb_init_file (blocks);
2398 }
2399
2400 if (bb_callcount)
2401 {
2402
2403 bb_callcount += 1;
2404
2405 if (bb_mode & 12)
2406 {
2407 if (bb_callcount >= bb_stacksize)
2408 {
2409 size_t newsize = bb_callcount + 100;
2410
2411 bb_stack = (unsigned long *) realloc (bb_stack, newsize);
2412 if (! bb_stack)
2413 {
2414 if (!reported)
2415 {
2416 fprintf (stderr, "Profiler: out of memory\n");
2417 reported = 1;
2418 }
2419 bb_stacksize = 0;
2420 goto stack_overflow;
2421 }
2422 bb_stacksize = newsize;
2423 }
2424 bb_stack[bb_callcount] = bb_src;
2425
2426 if (bb_mode & 4)
2427 bb_src = 0;
2428
2429 }
2430
2431 stack_overflow:;
2432
2433 }
2434
2435 else if (blocks->flags && (blocks->flags[blockno] & TRACE_ON))
2436 {
2437 bb_callcount = 1;
2438 bb_src = 0;
2439
2440 if (bb_stack)
2441 bb_stack[bb_callcount] = bb_src;
2442 }
2443
2444 MACHINE_STATE_RESTORE("3")
2445 }
2446
2447 #endif /* not inhibit_libc */
2448 #endif /* not BLOCK_PROFILER_CODE */
2449 #endif /* L_bb */
2450 \f
2451 #ifdef L_shtab
2452 unsigned int __shtab[] = {
2453 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2454 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2455 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2456 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2457 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2458 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2459 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2460 0x10000000, 0x20000000, 0x40000000, 0x80000000
2461 };
2462 #endif
2463 \f
2464 #ifdef L_clear_cache
2465 /* Clear part of an instruction cache. */
2466
2467 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2468
2469 void
2470 __clear_cache (char *beg, char *end)
2471 {
2472 #ifdef CLEAR_INSN_CACHE
2473 CLEAR_INSN_CACHE (beg, end);
2474 #else
2475 #ifdef INSN_CACHE_SIZE
2476 static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
2477 static int initialized;
2478 int offset;
2479 void *start_addr
2480 void *end_addr;
2481 typedef (*function_ptr) ();
2482
2483 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2484 /* It's cheaper to clear the whole cache.
2485 Put in a series of jump instructions so that calling the beginning
2486 of the cache will clear the whole thing. */
2487
2488 if (! initialized)
2489 {
2490 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2491 & -INSN_CACHE_LINE_WIDTH);
2492 int end_ptr = ptr + INSN_CACHE_SIZE;
2493
2494 while (ptr < end_ptr)
2495 {
2496 *(INSTRUCTION_TYPE *)ptr
2497 = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
2498 ptr += INSN_CACHE_LINE_WIDTH;
2499 }
2500 *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
2501
2502 initialized = 1;
2503 }
2504
2505 /* Call the beginning of the sequence. */
2506 (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2507 & -INSN_CACHE_LINE_WIDTH))
2508 ());
2509
2510 #else /* Cache is large. */
2511
2512 if (! initialized)
2513 {
2514 int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
2515 & -INSN_CACHE_LINE_WIDTH);
2516
2517 while (ptr < (int) array + sizeof array)
2518 {
2519 *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
2520 ptr += INSN_CACHE_LINE_WIDTH;
2521 }
2522
2523 initialized = 1;
2524 }
2525
2526 /* Find the location in array that occupies the same cache line as BEG. */
2527
2528 offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
2529 start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
2530 & -INSN_CACHE_PLANE_SIZE)
2531 + offset);
2532
2533 /* Compute the cache alignment of the place to stop clearing. */
2534 #if 0 /* This is not needed for gcc's purposes. */
2535 /* If the block to clear is bigger than a cache plane,
2536 we clear the entire cache, and OFFSET is already correct. */
2537 if (end < beg + INSN_CACHE_PLANE_SIZE)
2538 #endif
2539 offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
2540 & -INSN_CACHE_LINE_WIDTH)
2541 & (INSN_CACHE_PLANE_SIZE - 1));
2542
2543 #if INSN_CACHE_DEPTH > 1
2544 end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
2545 if (end_addr <= start_addr)
2546 end_addr += INSN_CACHE_PLANE_SIZE;
2547
2548 for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
2549 {
2550 int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
2551 int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
2552
2553 while (addr != stop)
2554 {
2555 /* Call the return instruction at ADDR. */
2556 ((function_ptr) addr) ();
2557
2558 addr += INSN_CACHE_LINE_WIDTH;
2559 }
2560 }
2561 #else /* just one plane */
2562 do
2563 {
2564 /* Call the return instruction at START_ADDR. */
2565 ((function_ptr) start_addr) ();
2566
2567 start_addr += INSN_CACHE_LINE_WIDTH;
2568 }
2569 while ((start_addr % INSN_CACHE_SIZE) != offset);
2570 #endif /* just one plane */
2571 #endif /* Cache is large */
2572 #endif /* Cache exists */
2573 #endif /* CLEAR_INSN_CACHE */
2574 }
2575
2576 #endif /* L_clear_cache */
2577 \f
2578 #ifdef L_trampoline
2579
2580 /* Jump to a trampoline, loading the static chain address. */
2581
2582 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2583
2584 long getpagesize()
2585 {
2586 #ifdef _ALPHA_
2587 return 8192;
2588 #else
2589 return 4096;
2590 #endif
2591 }
2592
2593 #ifdef i386
2594 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2595 #endif
2596
2597 int
2598 mprotect (char *addr, int len, int prot)
2599 {
2600 int np, op;
2601
2602 if (prot == 7)
2603 np = 0x40;
2604 else if (prot == 5)
2605 np = 0x20;
2606 else if (prot == 4)
2607 np = 0x10;
2608 else if (prot == 3)
2609 np = 0x04;
2610 else if (prot == 1)
2611 np = 0x02;
2612 else if (prot == 0)
2613 np = 0x01;
2614
2615 if (VirtualProtect (addr, len, np, &op))
2616 return 0;
2617 else
2618 return -1;
2619 }
2620
2621 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2622
2623 #ifdef TRANSFER_FROM_TRAMPOLINE
2624 TRANSFER_FROM_TRAMPOLINE
2625 #endif
2626
2627 #if defined (NeXT) && defined (__MACH__)
2628
2629 /* Make stack executable so we can call trampolines on stack.
2630 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2631 #ifdef NeXTStep21
2632 #include <mach.h>
2633 #else
2634 #include <mach/mach.h>
2635 #endif
2636
2637 void
2638 __enable_execute_stack (char *addr)
2639 {
2640 kern_return_t r;
2641 char *eaddr = addr + TRAMPOLINE_SIZE;
2642 vm_address_t a = (vm_address_t) addr;
2643
2644 /* turn on execute access on stack */
2645 r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
2646 if (r != KERN_SUCCESS)
2647 {
2648 mach_error("vm_protect VM_PROT_ALL", r);
2649 exit(1);
2650 }
2651
2652 /* We inline the i-cache invalidation for speed */
2653
2654 #ifdef CLEAR_INSN_CACHE
2655 CLEAR_INSN_CACHE (addr, eaddr);
2656 #else
2657 __clear_cache ((int) addr, (int) eaddr);
2658 #endif
2659 }
2660
2661 #endif /* defined (NeXT) && defined (__MACH__) */
2662
2663 #ifdef __convex__
2664
2665 /* Make stack executable so we can call trampolines on stack.
2666 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2667
2668 #include <sys/mman.h>
2669 #include <sys/vmparam.h>
2670 #include <machine/machparam.h>
2671
2672 void
2673 __enable_execute_stack ()
2674 {
2675 int fp;
2676 static unsigned lowest = USRSTACK;
2677 unsigned current = (unsigned) &fp & -NBPG;
2678
2679 if (lowest > current)
2680 {
2681 unsigned len = lowest - current;
2682 mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
2683 lowest = current;
2684 }
2685
2686 /* Clear instruction cache in case an old trampoline is in it. */
2687 asm ("pich");
2688 }
2689 #endif /* __convex__ */
2690
2691 #ifdef __sysV88__
2692
2693 /* Modified from the convex -code above. */
2694
2695 #include <sys/param.h>
2696 #include <errno.h>
2697 #include <sys/m88kbcs.h>
2698
2699 void
2700 __enable_execute_stack ()
2701 {
2702 int save_errno;
2703 static unsigned long lowest = USRSTACK;
2704 unsigned long current = (unsigned long) &save_errno & -NBPC;
2705
2706 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2707 address is seen as 'negative'. That is the case with the stack. */
2708
2709 save_errno=errno;
2710 if (lowest > current)
2711 {
2712 unsigned len=lowest-current;
2713 memctl(current,len,MCT_TEXT);
2714 lowest = current;
2715 }
2716 else
2717 memctl(current,NBPC,MCT_TEXT);
2718 errno=save_errno;
2719 }
2720
2721 #endif /* __sysV88__ */
2722
2723 #ifdef __sysV68__
2724
2725 #include <sys/signal.h>
2726 #include <errno.h>
2727
2728 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2729 so define it here, because we need it in __clear_insn_cache below */
2730 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2731 hence we enable this stuff only if MCT_TEXT is #define'd. */
2732
2733 #ifdef MCT_TEXT
2734 asm("\n\
2735 global memctl\n\
2736 memctl:\n\
2737 movq &75,%d0\n\
2738 trap &0\n\
2739 bcc.b noerror\n\
2740 jmp cerror%\n\
2741 noerror:\n\
2742 movq &0,%d0\n\
2743 rts");
2744 #endif
2745
2746 /* Clear instruction cache so we can call trampolines on stack.
2747 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2748
2749 void
2750 __clear_insn_cache ()
2751 {
2752 #ifdef MCT_TEXT
2753 int save_errno;
2754
2755 /* Preserve errno, because users would be surprised to have
2756 errno changing without explicitly calling any system-call. */
2757 save_errno = errno;
2758
2759 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2760 No need to use an address derived from _start or %sp, as 0 works also. */
2761 memctl(0, 4096, MCT_TEXT);
2762 errno = save_errno;
2763 #endif
2764 }
2765
2766 #endif /* __sysV68__ */
2767
2768 #ifdef __pyr__
2769
2770 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2771 #include <stdio.h>
2772 #include <sys/mman.h>
2773 #include <sys/types.h>
2774 #include <sys/param.h>
2775 #include <sys/vmmac.h>
2776
2777 /* Modified from the convex -code above.
2778 mremap promises to clear the i-cache. */
2779
2780 void
2781 __enable_execute_stack ()
2782 {
2783 int fp;
2784 if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
2785 PROT_READ|PROT_WRITE|PROT_EXEC))
2786 {
2787 perror ("mprotect in __enable_execute_stack");
2788 fflush (stderr);
2789 abort ();
2790 }
2791 }
2792 #endif /* __pyr__ */
2793
2794 #if defined (sony_news) && defined (SYSTYPE_BSD)
2795
2796 #include <stdio.h>
2797 #include <sys/types.h>
2798 #include <sys/param.h>
2799 #include <syscall.h>
2800 #include <machine/sysnews.h>
2801
2802 /* cacheflush function for NEWS-OS 4.2.
2803 This function is called from trampoline-initialize code
2804 defined in config/mips/mips.h. */
2805
2806 void
2807 cacheflush (char *beg, int size, int flag)
2808 {
2809 if (syscall (SYS_sysnews, NEWS_CACHEFLUSH, beg, size, FLUSH_BCACHE))
2810 {
2811 perror ("cache_flush");
2812 fflush (stderr);
2813 abort ();
2814 }
2815 }
2816
2817 #endif /* sony_news */
2818 #endif /* L_trampoline */
2819 \f
2820 #ifndef __CYGWIN__
2821 #ifdef L__main
2822
2823 #include "gbl-ctors.h"
2824 /* Some systems use __main in a way incompatible with its use in gcc, in these
2825 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2826 give the same symbol without quotes for an alternative entry point. You
2827 must define both, or neither. */
2828 #ifndef NAME__MAIN
2829 #define NAME__MAIN "__main"
2830 #define SYMBOL__MAIN __main
2831 #endif
2832
2833 #ifdef INIT_SECTION_ASM_OP
2834 #undef HAS_INIT_SECTION
2835 #define HAS_INIT_SECTION
2836 #endif
2837
2838 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2839 /* Run all the global destructors on exit from the program. */
2840
2841 void
2842 __do_global_dtors ()
2843 {
2844 #ifdef DO_GLOBAL_DTORS_BODY
2845 DO_GLOBAL_DTORS_BODY;
2846 #else
2847 static func_ptr *p = __DTOR_LIST__ + 1;
2848 while (*p)
2849 {
2850 p++;
2851 (*(p-1)) ();
2852 }
2853 #endif
2854 }
2855 #endif
2856
2857 #ifndef HAS_INIT_SECTION
2858 /* Run all the global constructors on entry to the program. */
2859
2860 #ifndef ON_EXIT
2861 #define ON_EXIT(a, b)
2862 #else
2863 /* Make sure the exit routine is pulled in to define the globals as
2864 bss symbols, just in case the linker does not automatically pull
2865 bss definitions from the library. */
2866
2867 extern int _exit_dummy_decl;
2868 int *_exit_dummy_ref = &_exit_dummy_decl;
2869 #endif /* ON_EXIT */
2870
2871 void
2872 __do_global_ctors ()
2873 {
2874 DO_GLOBAL_CTORS_BODY;
2875 ON_EXIT (__do_global_dtors, 0);
2876 }
2877 #endif /* no HAS_INIT_SECTION */
2878
2879 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2880 /* Subroutine called automatically by `main'.
2881 Compiling a global function named `main'
2882 produces an automatic call to this function at the beginning.
2883
2884 For many systems, this routine calls __do_global_ctors.
2885 For systems which support a .init section we use the .init section
2886 to run __do_global_ctors, so we need not do anything here. */
2887
2888 void
2889 SYMBOL__MAIN ()
2890 {
2891 /* Support recursive calls to `main': run initializers just once. */
2892 static int initialized;
2893 if (! initialized)
2894 {
2895 initialized = 1;
2896 __do_global_ctors ();
2897 }
2898 }
2899 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2900
2901 #endif /* L__main */
2902 #endif /* __CYGWIN__ */
2903 \f
2904 #ifdef L_ctors
2905
2906 #include "gbl-ctors.h"
2907
2908 /* Provide default definitions for the lists of constructors and
2909 destructors, so that we don't get linker errors. These symbols are
2910 intentionally bss symbols, so that gld and/or collect will provide
2911 the right values. */
2912
2913 /* We declare the lists here with two elements each,
2914 so that they are valid empty lists if no other definition is loaded.
2915
2916 If we are using the old "set" extensions to have the gnu linker
2917 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2918 must be in the bss/common section.
2919
2920 Long term no port should use those extensions. But many still do. */
2921 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2922 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2923 func_ptr __CTOR_LIST__[2] = {0, 0};
2924 func_ptr __DTOR_LIST__[2] = {0, 0};
2925 #else
2926 func_ptr __CTOR_LIST__[2];
2927 func_ptr __DTOR_LIST__[2];
2928 #endif
2929 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2930 #endif /* L_ctors */
2931 \f
2932 #ifdef L_exit
2933
2934 #include "gbl-ctors.h"
2935
2936 #ifdef NEED_ATEXIT
2937 # ifdef ON_EXIT
2938 # undef ON_EXIT
2939 # endif
2940 int _exit_dummy_decl = 0; /* prevent compiler & linker warnings */
2941 #endif
2942
2943 #ifndef ON_EXIT
2944
2945 #ifdef NEED_ATEXIT
2946 # include <errno.h>
2947
2948 static func_ptr *atexit_chain = 0;
2949 static long atexit_chain_length = 0;
2950 static volatile long last_atexit_chain_slot = -1;
2951
2952 int atexit (func_ptr func)
2953 {
2954 if (++last_atexit_chain_slot == atexit_chain_length)
2955 {
2956 atexit_chain_length += 32;
2957 if (atexit_chain)
2958 atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
2959 * sizeof (func_ptr));
2960 else
2961 atexit_chain = (func_ptr *) malloc (atexit_chain_length
2962 * sizeof (func_ptr));
2963 if (! atexit_chain)
2964 {
2965 atexit_chain_length = 0;
2966 last_atexit_chain_slot = -1;
2967 errno = ENOMEM;
2968 return (-1);
2969 }
2970 }
2971 atexit_chain[last_atexit_chain_slot] = func;
2972 return (0);
2973 }
2974 #endif /* NEED_ATEXIT */
2975
2976 /* If we have no known way of registering our own __do_global_dtors
2977 routine so that it will be invoked at program exit time, then we
2978 have to define our own exit routine which will get this to happen. */
2979
2980 extern void __do_global_dtors ();
2981 extern void __bb_exit_func ();
2982 extern void _cleanup ();
2983 extern void _exit () __attribute__ ((noreturn));
2984
2985 void
2986 exit (int status)
2987 {
2988 #if !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF)
2989 #ifdef NEED_ATEXIT
2990 if (atexit_chain)
2991 {
2992 for ( ; last_atexit_chain_slot-- >= 0; )
2993 {
2994 (*atexit_chain[last_atexit_chain_slot + 1]) ();
2995 atexit_chain[last_atexit_chain_slot + 1] = 0;
2996 }
2997 free (atexit_chain);
2998 atexit_chain = 0;
2999 }
3000 #else /* No NEED_ATEXIT */
3001 __do_global_dtors ();
3002 #endif /* No NEED_ATEXIT */
3003 #endif /* !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF) */
3004 /* In gbl-ctors.h, ON_EXIT is defined if HAVE_ATEXIT is defined. In
3005 __bb_init_func and _bb_init_prg, __bb_exit_func is registered with
3006 ON_EXIT if ON_EXIT is defined. Thus we must not call __bb_exit_func here
3007 if HAVE_ATEXIT is defined. */
3008 #ifndef HAVE_ATEXIT
3009 #ifndef inhibit_libc
3010 __bb_exit_func ();
3011 #endif
3012 #endif /* !HAVE_ATEXIT */
3013 #ifdef EXIT_BODY
3014 EXIT_BODY;
3015 #else
3016 _cleanup ();
3017 #endif
3018 _exit (status);
3019 }
3020
3021 #else /* ON_EXIT defined */
3022 int _exit_dummy_decl = 0; /* prevent compiler & linker warnings */
3023
3024 # ifndef HAVE_ATEXIT
3025 /* Provide a fake for atexit() using ON_EXIT. */
3026 int atexit (func_ptr func)
3027 {
3028 return ON_EXIT (func, NULL);
3029 }
3030 # endif /* HAVE_ATEXIT */
3031 #endif /* ON_EXIT defined */
3032
3033 #endif /* L_exit */
3034 \f
3035 #ifdef L_eh
3036
3037 #include "gthr.h"
3038
3039 /* Shared exception handling support routines. */
3040
3041 extern void __default_terminate (void) __attribute__ ((__noreturn__));
3042
3043 void
3044 __default_terminate ()
3045 {
3046 abort ();
3047 }
3048
3049 void (*__terminate_func)() = __default_terminate;
3050
3051 void
3052 __terminate ()
3053 {
3054 (*__terminate_func)();
3055 }
3056
3057 void *
3058 __throw_type_match (void *catch_type, void *throw_type, void *obj)
3059 {
3060 #if 0
3061 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3062 catch_type, throw_type);
3063 #endif
3064 if (strcmp ((const char *)catch_type, (const char *)throw_type) == 0)
3065 return obj;
3066 return 0;
3067 }
3068
3069 void
3070 __empty ()
3071 {
3072 }
3073 \f
3074
3075 /* Include definitions of EH context and table layout */
3076
3077 #include "eh-common.h"
3078 #ifndef inhibit_libc
3079 #include <stdio.h>
3080 #endif
3081
3082 /* Allocate and return a new EH context structure. */
3083
3084 extern void __throw ();
3085
3086 static void *
3087 new_eh_context ()
3088 {
3089 struct eh_full_context {
3090 struct eh_context c;
3091 void *top_elt[2];
3092 } *ehfc = (struct eh_full_context *) malloc (sizeof *ehfc);
3093
3094 if (! ehfc)
3095 __terminate ();
3096
3097 memset (ehfc, 0, sizeof *ehfc);
3098
3099 ehfc->c.dynamic_handler_chain = (void **) ehfc->top_elt;
3100
3101 /* This should optimize out entirely. This should always be true,
3102 but just in case it ever isn't, don't allow bogus code to be
3103 generated. */
3104
3105 if ((void*)(&ehfc->c) != (void*)ehfc)
3106 __terminate ();
3107
3108 return &ehfc->c;
3109 }
3110
3111 #if __GTHREADS
3112 static __gthread_key_t eh_context_key;
3113
3114 /* Destructor for struct eh_context. */
3115 static void
3116 eh_context_free (void *ptr)
3117 {
3118 __gthread_key_dtor (eh_context_key, ptr);
3119 if (ptr)
3120 free (ptr);
3121 }
3122 #endif
3123
3124 /* Pointer to function to return EH context. */
3125
3126 static struct eh_context *eh_context_initialize ();
3127 static struct eh_context *eh_context_static ();
3128 #if __GTHREADS
3129 static struct eh_context *eh_context_specific ();
3130 #endif
3131
3132 static struct eh_context *(*get_eh_context) () = &eh_context_initialize;
3133
3134 /* Routine to get EH context.
3135 This one will simply call the function pointer. */
3136
3137 void *
3138 __get_eh_context ()
3139 {
3140 return (void *) (*get_eh_context) ();
3141 }
3142
3143 /* Get and set the language specific info pointer. */
3144
3145 void **
3146 __get_eh_info ()
3147 {
3148 struct eh_context *eh = (*get_eh_context) ();
3149 return &eh->info;
3150 }
3151 \f
3152 #if __GTHREADS
3153 static void
3154 eh_threads_initialize ()
3155 {
3156 /* Try to create the key. If it fails, revert to static method,
3157 otherwise start using thread specific EH contexts. */
3158 if (__gthread_key_create (&eh_context_key, &eh_context_free) == 0)
3159 get_eh_context = &eh_context_specific;
3160 else
3161 get_eh_context = &eh_context_static;
3162 }
3163 #endif /* no __GTHREADS */
3164
3165 /* Initialize EH context.
3166 This will be called only once, since we change GET_EH_CONTEXT
3167 pointer to another routine. */
3168
3169 static struct eh_context *
3170 eh_context_initialize ()
3171 {
3172 #if __GTHREADS
3173
3174 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
3175 /* Make sure that get_eh_context does not point to us anymore.
3176 Some systems have dummy thread routines in their libc that
3177 return a success (Solaris 2.6 for example). */
3178 if (__gthread_once (&once, eh_threads_initialize) != 0
3179 || get_eh_context == &eh_context_initialize)
3180 {
3181 /* Use static version of EH context. */
3182 get_eh_context = &eh_context_static;
3183 }
3184
3185 #else /* no __GTHREADS */
3186
3187 /* Use static version of EH context. */
3188 get_eh_context = &eh_context_static;
3189
3190 #endif /* no __GTHREADS */
3191
3192 return (*get_eh_context) ();
3193 }
3194
3195 /* Return a static EH context. */
3196
3197 static struct eh_context *
3198 eh_context_static ()
3199 {
3200 static struct eh_context eh;
3201 static int initialized;
3202 static void *top_elt[2];
3203
3204 if (! initialized)
3205 {
3206 initialized = 1;
3207 memset (&eh, 0, sizeof eh);
3208 eh.dynamic_handler_chain = top_elt;
3209 }
3210 return &eh;
3211 }
3212
3213 #if __GTHREADS
3214 /* Return a thread specific EH context. */
3215
3216 static struct eh_context *
3217 eh_context_specific ()
3218 {
3219 struct eh_context *eh;
3220 eh = (struct eh_context *) __gthread_getspecific (eh_context_key);
3221 if (! eh)
3222 {
3223 eh = new_eh_context ();
3224 if (__gthread_setspecific (eh_context_key, (void *) eh) != 0)
3225 __terminate ();
3226 }
3227
3228 return eh;
3229 }
3230 #endif __GTHREADS
3231 \f
3232 /* Support routines for setjmp/longjmp exception handling. */
3233
3234 /* Calls to __sjthrow are generated by the compiler when an exception
3235 is raised when using the setjmp/longjmp exception handling codegen
3236 method. */
3237
3238 #ifdef DONT_USE_BUILTIN_SETJMP
3239 extern void longjmp (void *, int);
3240 #endif
3241
3242 /* Routine to get the head of the current thread's dynamic handler chain
3243 use for exception handling. */
3244
3245 void ***
3246 __get_dynamic_handler_chain ()
3247 {
3248 struct eh_context *eh = (*get_eh_context) ();
3249 return &eh->dynamic_handler_chain;
3250 }
3251
3252 /* This is used to throw an exception when the setjmp/longjmp codegen
3253 method is used for exception handling.
3254
3255 We call __terminate if there are no handlers left. Otherwise we run the
3256 cleanup actions off the dynamic cleanup stack, and pop the top of the
3257 dynamic handler chain, and use longjmp to transfer back to the associated
3258 handler. */
3259
3260 extern void __sjthrow (void) __attribute__ ((__noreturn__));
3261
3262 void
3263 __sjthrow ()
3264 {
3265 struct eh_context *eh = (*get_eh_context) ();
3266 void ***dhc = &eh->dynamic_handler_chain;
3267 void *jmpbuf;
3268 void (*func)(void *, int);
3269 void *arg;
3270 void ***cleanup;
3271
3272 /* The cleanup chain is one word into the buffer. Get the cleanup
3273 chain. */
3274 cleanup = (void***)&(*dhc)[1];
3275
3276 /* If there are any cleanups in the chain, run them now. */
3277 if (cleanup[0])
3278 {
3279 double store[200];
3280 void **buf = (void**)store;
3281 buf[1] = 0;
3282 buf[0] = (*dhc);
3283
3284 /* try { */
3285 #ifdef DONT_USE_BUILTIN_SETJMP
3286 if (! setjmp (&buf[2]))
3287 #else
3288 if (! __builtin_setjmp (&buf[2]))
3289 #endif
3290 {
3291 *dhc = buf;
3292 while (cleanup[0])
3293 {
3294 func = (void(*)(void*, int))cleanup[0][1];
3295 arg = (void*)cleanup[0][2];
3296
3297 /* Update this before running the cleanup. */
3298 cleanup[0] = (void **)cleanup[0][0];
3299
3300 (*func)(arg, 2);
3301 }
3302 *dhc = buf[0];
3303 }
3304 /* catch (...) */
3305 else
3306 {
3307 __terminate ();
3308 }
3309 }
3310
3311 /* We must call terminate if we try and rethrow an exception, when
3312 there is no exception currently active and when there are no
3313 handlers left. */
3314 if (! eh->info || (*dhc)[0] == 0)
3315 __terminate ();
3316
3317 /* Find the jmpbuf associated with the top element of the dynamic
3318 handler chain. The jumpbuf starts two words into the buffer. */
3319 jmpbuf = &(*dhc)[2];
3320
3321 /* Then we pop the top element off the dynamic handler chain. */
3322 *dhc = (void**)(*dhc)[0];
3323
3324 /* And then we jump to the handler. */
3325
3326 #ifdef DONT_USE_BUILTIN_SETJMP
3327 longjmp (jmpbuf, 1);
3328 #else
3329 __builtin_longjmp (jmpbuf, 1);
3330 #endif
3331 }
3332
3333 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3334 handler, then pop the handler off the dynamic handler stack, and
3335 then throw. This is used to skip the first handler, and transfer
3336 control to the next handler in the dynamic handler stack. */
3337
3338 extern void __sjpopnthrow (void) __attribute__ ((__noreturn__));
3339
3340 void
3341 __sjpopnthrow ()
3342 {
3343 struct eh_context *eh = (*get_eh_context) ();
3344 void ***dhc = &eh->dynamic_handler_chain;
3345 void (*func)(void *, int);
3346 void *arg;
3347 void ***cleanup;
3348
3349 /* The cleanup chain is one word into the buffer. Get the cleanup
3350 chain. */
3351 cleanup = (void***)&(*dhc)[1];
3352
3353 /* If there are any cleanups in the chain, run them now. */
3354 if (cleanup[0])
3355 {
3356 double store[200];
3357 void **buf = (void**)store;
3358 buf[1] = 0;
3359 buf[0] = (*dhc);
3360
3361 /* try { */
3362 #ifdef DONT_USE_BUILTIN_SETJMP
3363 if (! setjmp (&buf[2]))
3364 #else
3365 if (! __builtin_setjmp (&buf[2]))
3366 #endif
3367 {
3368 *dhc = buf;
3369 while (cleanup[0])
3370 {
3371 func = (void(*)(void*, int))cleanup[0][1];
3372 arg = (void*)cleanup[0][2];
3373
3374 /* Update this before running the cleanup. */
3375 cleanup[0] = (void **)cleanup[0][0];
3376
3377 (*func)(arg, 2);
3378 }
3379 *dhc = buf[0];
3380 }
3381 /* catch (...) */
3382 else
3383 {
3384 __terminate ();
3385 }
3386 }
3387
3388 /* Then we pop the top element off the dynamic handler chain. */
3389 *dhc = (void**)(*dhc)[0];
3390
3391 __sjthrow ();
3392 }
3393 \f
3394 /* Support code for all exception region-based exception handling. */
3395
3396 int
3397 __eh_rtime_match (void *rtime)
3398 {
3399 void *info;
3400 __eh_matcher matcher;
3401 void *ret;
3402
3403 info = *(__get_eh_info ());
3404 matcher = ((__eh_info *)info)->match_function;
3405 if (! matcher)
3406 {
3407 #ifndef inhibit_libc
3408 fprintf (stderr, "Internal Compiler Bug: No runtime type matcher.");
3409 #endif
3410 return 0;
3411 }
3412 ret = (*matcher) (info, rtime, (void *)0);
3413 return (ret != NULL);
3414 }
3415
3416 /* This value identifies the place from which an exception is being
3417 thrown. */
3418
3419 #ifdef EH_TABLE_LOOKUP
3420
3421 EH_TABLE_LOOKUP
3422
3423 #else
3424
3425 #ifdef DWARF2_UNWIND_INFO
3426
3427
3428 /* Return the table version of an exception descriptor */
3429
3430 short
3431 __get_eh_table_version (exception_descriptor *table)
3432 {
3433 return table->lang.version;
3434 }
3435
3436 /* Return the originating table language of an exception descriptor */
3437
3438 short
3439 __get_eh_table_language (exception_descriptor *table)
3440 {
3441 return table->lang.language;
3442 }
3443
3444 /* This routine takes a PC and a pointer to the exception region TABLE for
3445 its translation unit, and returns the address of the exception handler
3446 associated with the closest exception table handler entry associated
3447 with that PC, or 0 if there are no table entries the PC fits in.
3448
3449 In the advent of a tie, we have to give the last entry, as it represents
3450 an inner block. */
3451
3452 static void *
3453 old_find_exception_handler (void *pc, old_exception_table *table)
3454 {
3455 if (table)
3456 {
3457 int pos;
3458 int best = -1;
3459
3460 /* We can't do a binary search because the table isn't guaranteed
3461 to be sorted from function to function. */
3462 for (pos = 0; table[pos].start_region != (void *) -1; ++pos)
3463 {
3464 if (table[pos].start_region <= pc && table[pos].end_region > pc)
3465 {
3466 /* This can apply. Make sure it is at least as small as
3467 the previous best. */
3468 if (best == -1 || (table[pos].end_region <= table[best].end_region
3469 && table[pos].start_region >= table[best].start_region))
3470 best = pos;
3471 }
3472 /* But it is sorted by starting PC within a function. */
3473 else if (best >= 0 && table[pos].start_region > pc)
3474 break;
3475 }
3476 if (best != -1)
3477 return table[best].exception_handler;
3478 }
3479
3480 return (void *) 0;
3481 }
3482
3483 /* find_exception_handler finds the correct handler, if there is one, to
3484 handle an exception.
3485 returns a pointer to the handler which controlled should be transferred
3486 to, or NULL if there is nothing left.
3487 Parameters:
3488 PC - pc where the exception originates. If this is a rethrow,
3489 then this starts out as a pointer to the exception table
3490 entry we wish to rethrow out of.
3491 TABLE - exception table for the current module.
3492 EH_INFO - eh info pointer for this exception.
3493 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3494 CLEANUP - returned flag indicating whether this is a cleanup handler.
3495 */
3496 static void *
3497 find_exception_handler (void *pc, exception_descriptor *table,
3498 __eh_info *eh_info, int rethrow, int *cleanup)
3499 {
3500
3501 void *retval = NULL;
3502 *cleanup = 1;
3503 if (table)
3504 {
3505 int pos = 0;
3506 /* The new model assumed the table is sorted inner-most out so the
3507 first region we find which matches is the correct one */
3508
3509 exception_table *tab = &(table->table[0]);
3510
3511 /* Subtract 1 from the PC to avoid hitting the next region */
3512 if (rethrow)
3513 {
3514 /* pc is actually the region table entry to rethrow out of */
3515 pos = ((exception_table *) pc) - tab;
3516 pc = ((exception_table *) pc)->end_region - 1;
3517
3518 /* The label is always on the LAST handler entry for a region,
3519 so we know the next entry is a different region, even if the
3520 addresses are the same. Make sure its not end of table tho. */
3521 if (tab[pos].start_region != (void *) -1)
3522 pos++;
3523 }
3524 else
3525 pc--;
3526
3527 /* We can't do a binary search because the table is in inner-most
3528 to outermost address ranges within functions */
3529 for ( ; tab[pos].start_region != (void *) -1; pos++)
3530 {
3531 if (tab[pos].start_region <= pc && tab[pos].end_region > pc)
3532 {
3533 if (tab[pos].match_info)
3534 {
3535 __eh_matcher matcher = eh_info->match_function;
3536 /* match info but no matcher is NOT a match */
3537 if (matcher)
3538 {
3539 void *ret = (*matcher)((void *) eh_info,
3540 tab[pos].match_info, table);
3541 if (ret)
3542 {
3543 if (retval == NULL)
3544 retval = tab[pos].exception_handler;
3545 *cleanup = 0;
3546 break;
3547 }
3548 }
3549 }
3550 else
3551 {
3552 if (retval == NULL)
3553 retval = tab[pos].exception_handler;
3554 }
3555 }
3556 }
3557 }
3558 return retval;
3559 }
3560 #endif /* DWARF2_UNWIND_INFO */
3561 #endif /* EH_TABLE_LOOKUP */
3562 \f
3563 #ifdef DWARF2_UNWIND_INFO
3564 /* Support code for exception handling using static unwind information. */
3565
3566 #include "frame.h"
3567
3568 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3569 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3570 avoid a warning about casting between int and pointer of different
3571 sizes. */
3572
3573 typedef int ptr_type __attribute__ ((mode (pointer)));
3574
3575 #ifdef INCOMING_REGNO
3576 /* Is the saved value for register REG in frame UDATA stored in a register
3577 window in the previous frame? */
3578
3579 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3580 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3581 compiled functions won't work with the frame-unwind stuff here.
3582 Perhaps the entireity of in_reg_window should be conditional on having
3583 seen a DW_CFA_GNU_window_save? */
3584 #define target_flags 0
3585
3586 static int
3587 in_reg_window (int reg, frame_state *udata)
3588 {
3589 if (udata->saved[reg] == REG_SAVED_REG)
3590 return INCOMING_REGNO (reg) == reg;
3591 if (udata->saved[reg] != REG_SAVED_OFFSET)
3592 return 0;
3593
3594 #ifdef STACK_GROWS_DOWNWARD
3595 return udata->reg_or_offset[reg] > 0;
3596 #else
3597 return udata->reg_or_offset[reg] < 0;
3598 #endif
3599 }
3600 #else
3601 static inline int in_reg_window (int reg, frame_state *udata) { return 0; }
3602 #endif /* INCOMING_REGNO */
3603
3604 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3605 frame called by UDATA or 0. */
3606
3607 static word_type *
3608 get_reg_addr (unsigned reg, frame_state *udata, frame_state *sub_udata)
3609 {
3610 while (udata->saved[reg] == REG_SAVED_REG)
3611 {
3612 reg = udata->reg_or_offset[reg];
3613 if (in_reg_window (reg, udata))
3614 {
3615 udata = sub_udata;
3616 sub_udata = NULL;
3617 }
3618 }
3619 if (udata->saved[reg] == REG_SAVED_OFFSET)
3620 return (word_type *)(udata->cfa + udata->reg_or_offset[reg]);
3621 else
3622 abort ();
3623 }
3624
3625 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3626 frame called by UDATA or 0. */
3627
3628 static inline void *
3629 get_reg (unsigned reg, frame_state *udata, frame_state *sub_udata)
3630 {
3631 return (void *)(ptr_type) *get_reg_addr (reg, udata, sub_udata);
3632 }
3633
3634 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3635
3636 static inline void
3637 put_reg (unsigned reg, void *val, frame_state *udata)
3638 {
3639 *get_reg_addr (reg, udata, NULL) = (word_type)(ptr_type) val;
3640 }
3641
3642 /* Copy the saved value for register REG from frame UDATA to frame
3643 TARGET_UDATA. Unlike the previous two functions, this can handle
3644 registers that are not one word large. */
3645
3646 static void
3647 copy_reg (unsigned reg, frame_state *udata, frame_state *target_udata)
3648 {
3649 word_type *preg = get_reg_addr (reg, udata, NULL);
3650 word_type *ptreg = get_reg_addr (reg, target_udata, NULL);
3651
3652 memcpy (ptreg, preg, __builtin_dwarf_reg_size (reg));
3653 }
3654
3655 /* Retrieve the return address for frame UDATA. */
3656
3657 static inline void *
3658 get_return_addr (frame_state *udata, frame_state *sub_udata)
3659 {
3660 return __builtin_extract_return_addr
3661 (get_reg (udata->retaddr_column, udata, sub_udata));
3662 }
3663
3664 /* Overwrite the return address for frame UDATA with VAL. */
3665
3666 static inline void
3667 put_return_addr (void *val, frame_state *udata)
3668 {
3669 val = __builtin_frob_return_addr (val);
3670 put_reg (udata->retaddr_column, val, udata);
3671 }
3672
3673 /* Given the current frame UDATA and its return address PC, return the
3674 information about the calling frame in CALLER_UDATA. */
3675
3676 static void *
3677 next_stack_level (void *pc, frame_state *udata, frame_state *caller_udata)
3678 {
3679 caller_udata = __frame_state_for (pc, caller_udata);
3680 if (! caller_udata)
3681 return 0;
3682
3683 /* Now go back to our caller's stack frame. If our caller's CFA register
3684 was saved in our stack frame, restore it; otherwise, assume the CFA
3685 register is SP and restore it to our CFA value. */
3686 if (udata->saved[caller_udata->cfa_reg])
3687 caller_udata->cfa = get_reg (caller_udata->cfa_reg, udata, 0);
3688 else
3689 caller_udata->cfa = udata->cfa;
3690 caller_udata->cfa += caller_udata->cfa_offset;
3691
3692 return caller_udata;
3693 }
3694
3695 /* Hook to call before __terminate if only cleanup handlers remain. */
3696 void
3697 __unwinding_cleanup ()
3698 {
3699 }
3700
3701 /* throw_helper performs some of the common grunt work for a throw. This
3702 routine is called by throw and rethrows. This is pretty much split
3703 out from the old __throw routine. An addition has been added which allows
3704 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3705 but cleanups remaining. This allows a debugger to examine the state
3706 at which the throw was executed, before any cleanups, rather than
3707 at the terminate point after the stack has been unwound.
3708
3709 EH is the current eh_context structure.
3710 PC is the address of the call to __throw.
3711 MY_UDATA is the unwind information for __throw.
3712 OFFSET_P is where we return the SP adjustment offset. */
3713
3714 static void *
3715 throw_helper (eh, pc, my_udata, offset_p)
3716 struct eh_context *eh;
3717 void *pc;
3718 frame_state *my_udata;
3719 long *offset_p;
3720 {
3721 frame_state ustruct2, *udata = &ustruct2;
3722 frame_state ustruct;
3723 frame_state *sub_udata = &ustruct;
3724 void *saved_pc = pc;
3725 void *handler;
3726 void *handler_p;
3727 void *pc_p;
3728 frame_state saved_ustruct;
3729 int new_eh_model;
3730 int cleanup = 0;
3731 int only_cleanup = 0;
3732 int rethrow = 0;
3733 int saved_state = 0;
3734 long args_size;
3735 __eh_info *eh_info = (__eh_info *)eh->info;
3736
3737 /* Do we find a handler based on a re-throw PC? */
3738 if (eh->table_index != (void *) 0)
3739 rethrow = 1;
3740
3741 memcpy (udata, my_udata, sizeof (*udata));
3742
3743 handler = (void *) 0;
3744 for (;;)
3745 {
3746 frame_state *p = udata;
3747 udata = next_stack_level (pc, udata, sub_udata);
3748 sub_udata = p;
3749
3750 /* If we couldn't find the next frame, we lose. */
3751 if (! udata)
3752 break;
3753
3754 if (udata->eh_ptr == NULL)
3755 new_eh_model = 0;
3756 else
3757 new_eh_model = (((exception_descriptor *)(udata->eh_ptr))->
3758 runtime_id_field == NEW_EH_RUNTIME);
3759
3760 if (rethrow)
3761 {
3762 rethrow = 0;
3763 handler = find_exception_handler (eh->table_index, udata->eh_ptr,
3764 eh_info, 1, &cleanup);
3765 eh->table_index = (void *)0;
3766 }
3767 else
3768 if (new_eh_model)
3769 handler = find_exception_handler (pc, udata->eh_ptr, eh_info,
3770 0, &cleanup);
3771 else
3772 handler = old_find_exception_handler (pc, udata->eh_ptr);
3773
3774 /* If we found one, we can stop searching, if its not a cleanup.
3775 for cleanups, we save the state, and keep looking. This allows
3776 us to call a debug hook if there are nothing but cleanups left. */
3777 if (handler)
3778 {
3779 if (cleanup)
3780 {
3781 if (!saved_state)
3782 {
3783 saved_ustruct = *udata;
3784 handler_p = handler;
3785 pc_p = pc;
3786 saved_state = 1;
3787 only_cleanup = 1;
3788 }
3789 }
3790 else
3791 {
3792 only_cleanup = 0;
3793 break;
3794 }
3795 }
3796
3797 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3798 hitting the beginning of the next region. */
3799 pc = get_return_addr (udata, sub_udata) - 1;
3800 }
3801
3802 if (saved_state)
3803 {
3804 udata = &saved_ustruct;
3805 handler = handler_p;
3806 pc = pc_p;
3807 if (only_cleanup)
3808 __unwinding_cleanup ();
3809 }
3810
3811 /* If we haven't found a handler by now, this is an unhandled
3812 exception. */
3813 if (! handler)
3814 __terminate();
3815
3816 eh->handler_label = handler;
3817
3818 args_size = udata->args_size;
3819
3820 if (pc == saved_pc)
3821 /* We found a handler in the throw context, no need to unwind. */
3822 udata = my_udata;
3823 else
3824 {
3825 int i;
3826
3827 /* Unwind all the frames between this one and the handler by copying
3828 their saved register values into our register save slots. */
3829
3830 /* Remember the PC where we found the handler. */
3831 void *handler_pc = pc;
3832
3833 /* Start from the throw context again. */
3834 pc = saved_pc;
3835 memcpy (udata, my_udata, sizeof (*udata));
3836
3837 while (pc != handler_pc)
3838 {
3839 frame_state *p = udata;
3840 udata = next_stack_level (pc, udata, sub_udata);
3841 sub_udata = p;
3842
3843 for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
3844 if (i != udata->retaddr_column && udata->saved[i])
3845 {
3846 /* If you modify the saved value of the return address
3847 register on the SPARC, you modify the return address for
3848 your caller's frame. Don't do that here, as it will
3849 confuse get_return_addr. */
3850 if (in_reg_window (i, udata)
3851 && udata->saved[udata->retaddr_column] == REG_SAVED_REG
3852 && udata->reg_or_offset[udata->retaddr_column] == i)
3853 continue;
3854 copy_reg (i, udata, my_udata);
3855 }
3856
3857 pc = get_return_addr (udata, sub_udata) - 1;
3858 }
3859
3860 /* But we do need to update the saved return address register from
3861 the last frame we unwind, or the handler frame will have the wrong
3862 return address. */
3863 if (udata->saved[udata->retaddr_column] == REG_SAVED_REG)
3864 {
3865 i = udata->reg_or_offset[udata->retaddr_column];
3866 if (in_reg_window (i, udata))
3867 copy_reg (i, udata, my_udata);
3868 }
3869 }
3870 /* udata now refers to the frame called by the handler frame. */
3871
3872 /* We adjust SP by the difference between __throw's CFA and the CFA for
3873 the frame called by the handler frame, because those CFAs correspond
3874 to the SP values at the two call sites. We need to further adjust by
3875 the args_size of the handler frame itself to get the handler frame's
3876 SP from before the args were pushed for that call. */
3877 #ifdef STACK_GROWS_DOWNWARD
3878 *offset_p = udata->cfa - my_udata->cfa + args_size;
3879 #else
3880 *offset_p = my_udata->cfa - udata->cfa - args_size;
3881 #endif
3882
3883 return handler;
3884 }
3885
3886
3887 /* We first search for an exception handler, and if we don't find
3888 it, we call __terminate on the current stack frame so that we may
3889 use the debugger to walk the stack and understand why no handler
3890 was found.
3891
3892 If we find one, then we unwind the frames down to the one that
3893 has the handler and transfer control into the handler. */
3894
3895 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3896
3897 void
3898 __throw ()
3899 {
3900 struct eh_context *eh = (*get_eh_context) ();
3901 void *pc, *handler;
3902 long offset;
3903
3904 /* XXX maybe make my_ustruct static so we don't have to look it up for
3905 each throw. */
3906 frame_state my_ustruct, *my_udata = &my_ustruct;
3907
3908 /* This is required for C++ semantics. We must call terminate if we
3909 try and rethrow an exception, when there is no exception currently
3910 active. */
3911 if (! eh->info)
3912 __terminate ();
3913
3914 /* Start at our stack frame. */
3915 label:
3916 my_udata = __frame_state_for (&&label, my_udata);
3917 if (! my_udata)
3918 __terminate ();
3919
3920 /* We need to get the value from the CFA register. */
3921 my_udata->cfa = __builtin_dwarf_cfa ();
3922
3923 /* Do any necessary initialization to access arbitrary stack frames.
3924 On the SPARC, this means flushing the register windows. */
3925 __builtin_unwind_init ();
3926
3927 /* Now reset pc to the right throw point. */
3928 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3929
3930 handler = throw_helper (eh, pc, my_udata, &offset);
3931
3932 /* Now go! */
3933
3934 __builtin_eh_return ((void *)eh, offset, handler);
3935
3936 /* Epilogue: restore the handler frame's register values and return
3937 to the stub. */
3938 }
3939
3940 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3941
3942 void
3943 __rethrow (index)
3944 void *index;
3945 {
3946 struct eh_context *eh = (*get_eh_context) ();
3947 void *pc, *handler;
3948 long offset;
3949
3950 /* XXX maybe make my_ustruct static so we don't have to look it up for
3951 each throw. */
3952 frame_state my_ustruct, *my_udata = &my_ustruct;
3953
3954 /* This is required for C++ semantics. We must call terminate if we
3955 try and rethrow an exception, when there is no exception currently
3956 active. */
3957 if (! eh->info)
3958 __terminate ();
3959
3960 /* This is the table index we want to rethrow from. The value of
3961 the END_REGION label is used for the PC of the throw, and the
3962 search begins with the next table entry. */
3963 eh->table_index = index;
3964
3965 /* Start at our stack frame. */
3966 label:
3967 my_udata = __frame_state_for (&&label, my_udata);
3968 if (! my_udata)
3969 __terminate ();
3970
3971 /* We need to get the value from the CFA register. */
3972 my_udata->cfa = __builtin_dwarf_cfa ();
3973
3974 /* Do any necessary initialization to access arbitrary stack frames.
3975 On the SPARC, this means flushing the register windows. */
3976 __builtin_unwind_init ();
3977
3978 /* Now reset pc to the right throw point. */
3979 pc = __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3980
3981 handler = throw_helper (eh, pc, my_udata, &offset);
3982
3983 /* Now go! */
3984
3985 __builtin_eh_return ((void *)eh, offset, handler);
3986
3987 /* Epilogue: restore the handler frame's register values and return
3988 to the stub. */
3989 }
3990 #endif /* DWARF2_UNWIND_INFO */
3991
3992 #endif /* L_eh */
3993 \f
3994 #ifdef L_pure
3995 #ifndef inhibit_libc
3996 /* This gets us __GNU_LIBRARY__. */
3997 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
3998 #include <stdio.h>
3999
4000 #ifdef __GNU_LIBRARY__
4001 /* Avoid forcing the library's meaning of `write' on the user program
4002 by using the "internal" name (for use within the library) */
4003 #define write(fd, buf, n) __write((fd), (buf), (n))
4004 #endif
4005 #endif /* inhibit_libc */
4006
4007 #define MESSAGE "pure virtual method called\n"
4008
4009 void
4010 __pure_virtual ()
4011 {
4012 #ifndef inhibit_libc
4013 write (2, MESSAGE, sizeof (MESSAGE) - 1);
4014 #endif
4015 __terminate ();
4016 }
4017 #endif