1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License. */
29 /* It is incorrect to include config.h here, because this file is being
30 compiled for the target, and hence definitions concerning only the host
35 /* We disable this when inhibit_libc, so that gcc can still be built without
36 needing header files first. */
37 /* ??? This is not a good solution, since prototypes may be required in
38 some cases for correct code. See also frame.c. */
40 /* fixproto guarantees these system headers exist. */
51 /* Don't use `fancy_abort' here even if config.h says to use it. */
56 #if (SUPPORTS_WEAK == 1) && (defined (ASM_OUTPUT_DEF) || defined (ASM_OUTPUT_WEAK_ALIAS))
60 /* In a cross-compilation situation, default to inhibiting compilation
61 of routines that use libc. */
63 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
67 /* Permit the tm.h file to select the endianness to use just for this
68 file. This is used when the endianness is determined when the
71 #ifndef LIBGCC2_WORDS_BIG_ENDIAN
72 #define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
75 #ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
76 #define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
79 /* In the first part of this file, we are interfacing to calls generated
80 by the compiler itself. These calls pass values into these routines
81 which have very specific modes (rather than very specific types), and
82 these compiler-generated calls also expect any return values to have
83 very specific modes (rather than very specific types). Thus, we need
84 to avoid using regular C language type names in this part of the file
85 because the sizes for those types can be configured to be anything.
86 Instead we use the following special type names. */
88 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
89 typedef int SItype
__attribute__ ((mode (SI
)));
90 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
91 typedef int DItype
__attribute__ ((mode (DI
)));
92 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
94 typedef float SFtype
__attribute__ ((mode (SF
)));
95 typedef float DFtype
__attribute__ ((mode (DF
)));
97 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
98 typedef float XFtype
__attribute__ ((mode (XF
)));
100 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
101 typedef float TFtype
__attribute__ ((mode (TF
)));
104 typedef int word_type
__attribute__ ((mode (__word__
)));
106 /* Make sure that we don't accidentally use any normal C language built-in
107 type names in the first part of this file. Instead we want to use *only*
108 the type names defined above. The following macro definitions insure
109 that if we *do* accidentally use some normal C language built-in type name,
110 we will get a syntax error. */
112 #define char bogus_type
113 #define short bogus_type
114 #define int bogus_type
115 #define long bogus_type
116 #define unsigned bogus_type
117 #define float bogus_type
118 #define double bogus_type
120 #define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
122 /* DIstructs are pairs of SItype values in the order determined by
123 LIBGCC2_WORDS_BIG_ENDIAN. */
125 #if LIBGCC2_WORDS_BIG_ENDIAN
126 struct DIstruct
{SItype high
, low
;};
128 struct DIstruct
{SItype low
, high
;};
131 /* We need this union to unpack/pack DImode values, since we don't have
132 any arithmetic yet. Incoming DImode parameters are stored into the
133 `ll' field, and the unpacked result is read from the struct `s'. */
141 #if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
142 || defined (L_divdi3) || defined (L_udivdi3) \
143 || defined (L_moddi3) || defined (L_umoddi3))
145 #include "longlong.h"
147 #endif /* udiv or mul */
149 extern DItype
__fixunssfdi (SFtype a
);
150 extern DItype
__fixunsdfdi (DFtype a
);
151 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
152 extern DItype
__fixunsxfdi (XFtype a
);
154 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
155 extern DItype
__fixunstfdi (TFtype a
);
158 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
159 #if defined (L_divdi3) || defined (L_moddi3)
171 w
.s
.high
= -uu
.s
.high
- ((USItype
) w
.s
.low
> 0);
177 /* Unless shift functions are defined whith full ANSI prototypes,
178 parameter b will be promoted to int if word_type is smaller than an int. */
181 __lshrdi3 (DItype u
, word_type b
)
192 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
196 w
.s
.low
= (USItype
)uu
.s
.high
>> -bm
;
200 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
201 w
.s
.high
= (USItype
)uu
.s
.high
>> b
;
202 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
211 __ashldi3 (DItype u
, word_type b
)
222 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
226 w
.s
.high
= (USItype
)uu
.s
.low
<< -bm
;
230 USItype carries
= (USItype
)uu
.s
.low
>> bm
;
231 w
.s
.low
= (USItype
)uu
.s
.low
<< b
;
232 w
.s
.high
= ((USItype
)uu
.s
.high
<< b
) | carries
;
241 __ashrdi3 (DItype u
, word_type b
)
252 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
255 /* w.s.high = 1..1 or 0..0 */
256 w
.s
.high
= uu
.s
.high
>> (sizeof (SItype
) * BITS_PER_UNIT
- 1);
257 w
.s
.low
= uu
.s
.high
>> -bm
;
261 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
262 w
.s
.high
= uu
.s
.high
>> b
;
263 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
277 w
.s
.low
= ffs (uu
.s
.low
);
280 w
.s
.low
= ffs (uu
.s
.high
);
283 w
.s
.low
+= BITS_PER_UNIT
* sizeof (SItype
);
292 __muldi3 (DItype u
, DItype v
)
300 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
301 w
.s
.high
+= ((USItype
) uu
.s
.low
* (USItype
) vv
.s
.high
302 + (USItype
) uu
.s
.high
* (USItype
) vv
.s
.low
);
309 #if defined (sdiv_qrnnd)
311 __udiv_w_sdiv (USItype
*rp
, USItype a1
, USItype a0
, USItype d
)
318 if (a1
< d
- a1
- (a0
>> (SI_TYPE_SIZE
- 1)))
320 /* dividend, divisor, and quotient are nonnegative */
321 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
325 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
326 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (SI_TYPE_SIZE
- 1));
327 /* Divide (c1*2^32 + c0) by d */
328 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
329 /* Add 2^31 to quotient */
330 q
+= (USItype
) 1 << (SI_TYPE_SIZE
- 1);
335 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
336 c1
= a1
>> 1; /* A/2 */
337 c0
= (a1
<< (SI_TYPE_SIZE
- 1)) + (a0
>> 1);
339 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
341 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
343 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
360 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
363 c0
= ~c0
; /* logical NOT */
365 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
367 q
= ~q
; /* (A/2)/b1 */
370 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
388 else /* Implies c1 = b1 */
389 { /* Hence a1 = d - 1 = 2*b1 - 1 */
407 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
409 __udiv_w_sdiv (USItype
*rp
__attribute__ ((__unused__
)),
410 USItype a1
__attribute__ ((__unused__
)),
411 USItype a0
__attribute__ ((__unused__
)),
412 USItype d
__attribute__ ((__unused__
)))
419 #if (defined (L_udivdi3) || defined (L_divdi3) || \
420 defined (L_umoddi3) || defined (L_moddi3))
425 static const UQItype __clz_tab
[] =
427 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
428 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
429 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
430 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
431 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
432 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
433 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
434 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
437 #if (defined (L_udivdi3) || defined (L_divdi3) || \
438 defined (L_umoddi3) || defined (L_moddi3))
442 __udivmoddi4 (UDItype n
, UDItype d
, UDItype
*rp
)
447 USItype d0
, d1
, n0
, n1
, n2
;
459 #if !UDIV_NEEDS_NORMALIZATION
466 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
469 /* Remainder in n0. */
476 d0
= 1 / d0
; /* Divide intentionally by zero. */
478 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
479 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
481 /* Remainder in n0. */
492 #else /* UDIV_NEEDS_NORMALIZATION */
500 count_leading_zeros (bm
, d0
);
504 /* Normalize, i.e. make the most significant bit of the
508 n1
= (n1
<< bm
) | (n0
>> (SI_TYPE_SIZE
- bm
));
512 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
515 /* Remainder in n0 >> bm. */
522 d0
= 1 / d0
; /* Divide intentionally by zero. */
524 count_leading_zeros (bm
, d0
);
528 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
529 conclude (the most significant bit of n1 is set) /\ (the
530 leading quotient digit q1 = 1).
532 This special case is necessary, not an optimization.
533 (Shifts counts of SI_TYPE_SIZE are undefined.) */
542 b
= SI_TYPE_SIZE
- bm
;
546 n1
= (n1
<< bm
) | (n0
>> b
);
549 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
554 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
556 /* Remainder in n0 >> bm. */
566 #endif /* UDIV_NEEDS_NORMALIZATION */
577 /* Remainder in n1n0. */
589 count_leading_zeros (bm
, d1
);
592 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
593 conclude (the most significant bit of n1 is set) /\ (the
594 quotient digit q0 = 0 or 1).
596 This special case is necessary, not an optimization. */
598 /* The condition on the next line takes advantage of that
599 n1 >= d1 (true due to program flow). */
600 if (n1
> d1
|| n0
>= d0
)
603 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
622 b
= SI_TYPE_SIZE
- bm
;
624 d1
= (d1
<< bm
) | (d0
>> b
);
627 n1
= (n1
<< bm
) | (n0
>> b
);
630 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
631 umul_ppmm (m1
, m0
, q0
, d0
);
633 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
636 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
641 /* Remainder in (n1n0 - m1m0) >> bm. */
644 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
645 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
646 rr
.s
.high
= n1
>> bm
;
660 UDItype
__udivmoddi4 ();
663 __divdi3 (DItype u
, DItype v
)
674 uu
.ll
= __negdi2 (uu
.ll
);
677 vv
.ll
= __negdi2 (vv
.ll
);
679 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDItype
*) 0);
688 UDItype
__udivmoddi4 ();
690 __moddi3 (DItype u
, DItype v
)
701 uu
.ll
= __negdi2 (uu
.ll
);
703 vv
.ll
= __negdi2 (vv
.ll
);
705 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
714 UDItype
__udivmoddi4 ();
716 __umoddi3 (UDItype u
, UDItype v
)
720 (void) __udivmoddi4 (u
, v
, &w
);
727 UDItype
__udivmoddi4 ();
729 __udivdi3 (UDItype n
, UDItype d
)
731 return __udivmoddi4 (n
, d
, (UDItype
*) 0);
737 __cmpdi2 (DItype a
, DItype b
)
741 au
.ll
= a
, bu
.ll
= b
;
743 if (au
.s
.high
< bu
.s
.high
)
745 else if (au
.s
.high
> bu
.s
.high
)
747 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
749 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
757 __ucmpdi2 (DItype a
, DItype b
)
761 au
.ll
= a
, bu
.ll
= b
;
763 if ((USItype
) au
.s
.high
< (USItype
) bu
.s
.high
)
765 else if ((USItype
) au
.s
.high
> (USItype
) bu
.s
.high
)
767 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
769 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
775 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
776 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
777 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
780 __fixunstfdi (TFtype a
)
788 /* Compute high word of result, as a flonum. */
789 b
= (a
/ HIGH_WORD_COEFF
);
790 /* Convert that to fixed (but not to DItype!),
791 and shift it into the high word. */
794 /* Remove high part from the TFtype, leaving the low part as flonum. */
796 /* Convert that to fixed (but not to DItype!) and add it in.
797 Sometimes A comes out negative. This is significant, since
798 A has more bits than a long int does. */
800 v
-= (USItype
) (- a
);
807 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
812 return - __fixunstfdi (-a
);
813 return __fixunstfdi (a
);
817 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
818 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
819 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
822 __fixunsxfdi (XFtype a
)
830 /* Compute high word of result, as a flonum. */
831 b
= (a
/ HIGH_WORD_COEFF
);
832 /* Convert that to fixed (but not to DItype!),
833 and shift it into the high word. */
836 /* Remove high part from the XFtype, leaving the low part as flonum. */
838 /* Convert that to fixed (but not to DItype!) and add it in.
839 Sometimes A comes out negative. This is significant, since
840 A has more bits than a long int does. */
842 v
-= (USItype
) (- a
);
849 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
854 return - __fixunsxfdi (-a
);
855 return __fixunsxfdi (a
);
860 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
861 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
864 __fixunsdfdi (DFtype a
)
872 /* Compute high word of result, as a flonum. */
873 b
= (a
/ HIGH_WORD_COEFF
);
874 /* Convert that to fixed (but not to DItype!),
875 and shift it into the high word. */
878 /* Remove high part from the DFtype, leaving the low part as flonum. */
880 /* Convert that to fixed (but not to DItype!) and add it in.
881 Sometimes A comes out negative. This is significant, since
882 A has more bits than a long int does. */
884 v
-= (USItype
) (- a
);
896 return - __fixunsdfdi (-a
);
897 return __fixunsdfdi (a
);
902 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
903 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
906 __fixunssfdi (SFtype original_a
)
908 /* Convert the SFtype to a DFtype, because that is surely not going
909 to lose any bits. Some day someone else can write a faster version
910 that avoids converting to DFtype, and verify it really works right. */
911 DFtype a
= original_a
;
918 /* Compute high word of result, as a flonum. */
919 b
= (a
/ HIGH_WORD_COEFF
);
920 /* Convert that to fixed (but not to DItype!),
921 and shift it into the high word. */
924 /* Remove high part from the DFtype, leaving the low part as flonum. */
926 /* Convert that to fixed (but not to DItype!) and add it in.
927 Sometimes A comes out negative. This is significant, since
928 A has more bits than a long int does. */
930 v
-= (USItype
) (- a
);
942 return - __fixunssfdi (-a
);
943 return __fixunssfdi (a
);
947 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
948 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
949 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
950 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
953 __floatdixf (DItype u
)
957 d
= (SItype
) (u
>> WORD_SIZE
);
958 d
*= HIGH_HALFWORD_COEFF
;
959 d
*= HIGH_HALFWORD_COEFF
;
960 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
966 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
967 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
968 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
969 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
972 __floatditf (DItype u
)
976 d
= (SItype
) (u
>> WORD_SIZE
);
977 d
*= HIGH_HALFWORD_COEFF
;
978 d
*= HIGH_HALFWORD_COEFF
;
979 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
986 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
987 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
988 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
991 __floatdidf (DItype u
)
995 d
= (SItype
) (u
>> WORD_SIZE
);
996 d
*= HIGH_HALFWORD_COEFF
;
997 d
*= HIGH_HALFWORD_COEFF
;
998 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1005 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
1006 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
1007 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1008 #define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
1010 /* Define codes for all the float formats that we know of. Note
1011 that this is copied from real.h. */
1013 #define UNKNOWN_FLOAT_FORMAT 0
1014 #define IEEE_FLOAT_FORMAT 1
1015 #define VAX_FLOAT_FORMAT 2
1016 #define IBM_FLOAT_FORMAT 3
1018 /* Default to IEEE float if not specified. Nearly all machines use it. */
1019 #ifndef HOST_FLOAT_FORMAT
1020 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1023 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1028 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1033 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1039 __floatdisf (DItype u
)
1041 /* Do the calculation in DFmode
1042 so that we don't lose any of the precision of the high word
1043 while multiplying it. */
1046 /* Protect against double-rounding error.
1047 Represent any low-order bits, that might be truncated in DFmode,
1048 by a bit that won't be lost. The bit can go in anywhere below the
1049 rounding position of the SFmode. A fixed mask and bit position
1050 handles all usual configurations. It doesn't handle the case
1051 of 128-bit DImode, however. */
1052 if (DF_SIZE
< DI_SIZE
1053 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1055 #define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
1056 if (! (- ((DItype
) 1 << DF_SIZE
) < u
1057 && u
< ((DItype
) 1 << DF_SIZE
)))
1059 if ((USItype
) u
& (REP_BIT
- 1))
1063 f
= (SItype
) (u
>> WORD_SIZE
);
1064 f
*= HIGH_HALFWORD_COEFF
;
1065 f
*= HIGH_HALFWORD_COEFF
;
1066 f
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1072 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1073 /* Reenable the normal types, in case limits.h needs them. */
1086 __fixunsxfsi (XFtype a
)
1088 if (a
>= - (DFtype
) LONG_MIN
)
1089 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1095 /* Reenable the normal types, in case limits.h needs them. */
1108 __fixunsdfsi (DFtype a
)
1110 if (a
>= - (DFtype
) LONG_MIN
)
1111 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1117 /* Reenable the normal types, in case limits.h needs them. */
1130 __fixunssfsi (SFtype a
)
1132 if (a
>= - (SFtype
) LONG_MIN
)
1133 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1138 /* From here on down, the routines use normal data types. */
1140 #define SItype bogus_type
1141 #define USItype bogus_type
1142 #define DItype bogus_type
1143 #define UDItype bogus_type
1144 #define SFtype bogus_type
1145 #define DFtype bogus_type
1157 /* Like bcmp except the sign is meaningful.
1158 Result is negative if S1 is less than S2,
1159 positive if S1 is greater, 0 if S1 and S2 are equal. */
1162 __gcc_bcmp (unsigned char *s1
, unsigned char *s2
, size_t size
)
1166 unsigned char c1
= *s1
++, c2
= *s2
++;
1183 #if defined(__svr4__) || defined(__alliant__)
1187 /* The Alliant needs the added underscore. */
1188 asm (".globl __builtin_saveregs");
1189 asm ("__builtin_saveregs:");
1190 asm (".globl ___builtin_saveregs");
1191 asm ("___builtin_saveregs:");
1193 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1194 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1195 area and also for a new va_list
1197 /* Save all argument registers in the arg reg save area. The
1198 arg reg save area must have the following layout (according
1210 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1211 asm (" fst.q %f12,16(%sp)");
1213 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1214 asm (" st.l %r17,36(%sp)");
1215 asm (" st.l %r18,40(%sp)");
1216 asm (" st.l %r19,44(%sp)");
1217 asm (" st.l %r20,48(%sp)");
1218 asm (" st.l %r21,52(%sp)");
1219 asm (" st.l %r22,56(%sp)");
1220 asm (" st.l %r23,60(%sp)");
1221 asm (" st.l %r24,64(%sp)");
1222 asm (" st.l %r25,68(%sp)");
1223 asm (" st.l %r26,72(%sp)");
1224 asm (" st.l %r27,76(%sp)");
1226 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1227 va_list structure. Put in into
1228 r16 so that it will be returned
1231 /* Initialize all fields of the new va_list structure. This
1232 structure looks like:
1235 unsigned long ireg_used;
1236 unsigned long freg_used;
1242 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1243 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1244 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1245 asm (" bri %r1"); /* delayed return */
1246 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1248 #else /* not __svr4__ */
1249 #if defined(__PARAGON__)
1251 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1252 * and we stand a better chance of hooking into libraries
1253 * compiled by PGI. [andyp@ssd.intel.com]
1257 asm (".globl __builtin_saveregs");
1258 asm ("__builtin_saveregs:");
1259 asm (".globl ___builtin_saveregs");
1260 asm ("___builtin_saveregs:");
1262 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1263 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1264 area and also for a new va_list
1266 /* Save all argument registers in the arg reg save area. The
1267 arg reg save area must have the following layout (according
1279 asm (" fst.q f8, 0(sp)");
1280 asm (" fst.q f12,16(sp)");
1281 asm (" st.l r16,32(sp)");
1282 asm (" st.l r17,36(sp)");
1283 asm (" st.l r18,40(sp)");
1284 asm (" st.l r19,44(sp)");
1285 asm (" st.l r20,48(sp)");
1286 asm (" st.l r21,52(sp)");
1287 asm (" st.l r22,56(sp)");
1288 asm (" st.l r23,60(sp)");
1289 asm (" st.l r24,64(sp)");
1290 asm (" st.l r25,68(sp)");
1291 asm (" st.l r26,72(sp)");
1292 asm (" st.l r27,76(sp)");
1294 asm (" adds 80,sp,r16"); /* compute the address of the new
1295 va_list structure. Put in into
1296 r16 so that it will be returned
1299 /* Initialize all fields of the new va_list structure. This
1300 structure looks like:
1303 unsigned long ireg_used;
1304 unsigned long freg_used;
1310 asm (" st.l r0, 0(r16)"); /* nfixed */
1311 asm (" st.l r0, 4(r16)"); /* nfloating */
1312 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1313 asm (" bri r1"); /* delayed return */
1314 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1315 #else /* not __PARAGON__ */
1319 asm (".globl ___builtin_saveregs");
1320 asm ("___builtin_saveregs:");
1321 asm (" mov sp,r30");
1322 asm (" andnot 0x0f,sp,sp");
1323 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1325 /* Fill in the __va_struct. */
1326 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1327 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1328 asm (" st.l r18, 8(sp)");
1329 asm (" st.l r19,12(sp)");
1330 asm (" st.l r20,16(sp)");
1331 asm (" st.l r21,20(sp)");
1332 asm (" st.l r22,24(sp)");
1333 asm (" st.l r23,28(sp)");
1334 asm (" st.l r24,32(sp)");
1335 asm (" st.l r25,36(sp)");
1336 asm (" st.l r26,40(sp)");
1337 asm (" st.l r27,44(sp)");
1339 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1340 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1342 /* Fill in the __va_ctl. */
1343 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1344 asm (" st.l r28,84(sp)"); /* pointer to more args */
1345 asm (" st.l r0, 88(sp)"); /* nfixed */
1346 asm (" st.l r0, 92(sp)"); /* nfloating */
1348 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1350 asm (" mov r30,sp");
1351 /* recover stack and pass address to start
1353 #endif /* not __PARAGON__ */
1354 #endif /* not __svr4__ */
1355 #else /* not __i860__ */
1357 asm (".global __builtin_saveregs");
1358 asm ("__builtin_saveregs:");
1359 asm (".global ___builtin_saveregs");
1360 asm ("___builtin_saveregs:");
1361 #ifdef NEED_PROC_COMMAND
1364 asm ("st %i0,[%fp+68]");
1365 asm ("st %i1,[%fp+72]");
1366 asm ("st %i2,[%fp+76]");
1367 asm ("st %i3,[%fp+80]");
1368 asm ("st %i4,[%fp+84]");
1370 asm ("st %i5,[%fp+88]");
1371 #ifdef NEED_TYPE_COMMAND
1372 asm (".type __builtin_saveregs,#function");
1373 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1375 #else /* not __sparc__ */
1376 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1380 asm (" .set nomips16");
1382 asm (" .ent __builtin_saveregs");
1383 asm (" .globl __builtin_saveregs");
1384 asm ("__builtin_saveregs:");
1385 asm (" sw $4,0($30)");
1386 asm (" sw $5,4($30)");
1387 asm (" sw $6,8($30)");
1388 asm (" sw $7,12($30)");
1390 asm (" .end __builtin_saveregs");
1391 #else /* not __mips__, etc. */
1394 __builtin_saveregs ()
1399 #endif /* not __mips__ */
1400 #endif /* not __sparc__ */
1401 #endif /* not __i860__ */
1405 #ifndef inhibit_libc
1407 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1409 /* This is used by the `assert' macro. */
1410 extern void __eprintf (const char *, const char *, unsigned int, const char *)
1411 __attribute__ ((__noreturn__
));
1414 __eprintf (const char *string
, const char *expression
,
1415 unsigned int line
, const char *filename
)
1417 fprintf (stderr
, string
, expression
, line
, filename
);
1427 /* Structure emitted by -a */
1431 const char *filename
;
1435 const unsigned long *addresses
;
1437 /* Older GCC's did not emit these fields. */
1439 const char **functions
;
1440 const long *line_nums
;
1441 const char **filenames
;
1445 #ifdef BLOCK_PROFILER_CODE
1448 #ifndef inhibit_libc
1450 /* Simple minded basic block profiling output dumper for
1451 systems that don't provide tcov support. At present,
1452 it requires atexit and stdio. */
1454 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1458 #include "gbl-ctors.h"
1459 #include "gcov-io.h"
1462 static struct bb
*bb_head
;
1464 /* Return the number of digits needed to print a value */
1465 /* __inline__ */ static int num_digits (long value
, int base
)
1467 int minus
= (value
< 0 && base
!= 16);
1468 unsigned long v
= (minus
) ? -value
: value
;
1482 __bb_exit_func (void)
1484 FILE *da_file
, *file
;
1491 i
= strlen (bb_head
->filename
) - 3;
1493 if (!strcmp (bb_head
->filename
+i
, ".da"))
1495 /* Must be -fprofile-arcs not -a.
1496 Dump data in a form that gcov expects. */
1500 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1502 /* If the file exists, and the number of counts in it is the same,
1503 then merge them in. */
1505 if ((da_file
= fopen (ptr
->filename
, "r")) != 0)
1509 if (__read_long (&n_counts
, da_file
, 8) != 0)
1511 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1516 if (n_counts
== ptr
->ncounts
)
1520 for (i
= 0; i
< n_counts
; i
++)
1524 if (__read_long (&v
, da_file
, 8) != 0)
1526 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1530 ptr
->counts
[i
] += v
;
1534 if (fclose (da_file
) == EOF
)
1535 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1538 if ((da_file
= fopen (ptr
->filename
, "w")) == 0)
1540 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1545 /* ??? Should first write a header to the file. Preferably, a 4 byte
1546 magic number, 4 bytes containing the time the program was
1547 compiled, 4 bytes containing the last modification time of the
1548 source file, and 4 bytes indicating the compiler options used.
1550 That way we can easily verify that the proper source/executable/
1551 data file combination is being used from gcov. */
1553 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1556 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1562 long *count_ptr
= ptr
->counts
;
1564 for (j
= ptr
->ncounts
; j
> 0; j
--)
1566 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1574 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1578 if (fclose (da_file
) == EOF
)
1579 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1586 /* Must be basic block profiling. Emit a human readable output file. */
1588 file
= fopen ("bb.out", "a");
1597 /* This is somewhat type incorrect, but it avoids worrying about
1598 exactly where time.h is included from. It should be ok unless
1599 a void * differs from other pointer formats, or if sizeof (long)
1600 is < sizeof (time_t). It would be nice if we could assume the
1601 use of rationale standards here. */
1603 time ((void *) &time_value
);
1604 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1606 /* We check the length field explicitly in order to allow compatibility
1607 with older GCC's which did not provide it. */
1609 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1612 int func_p
= (ptr
->nwords
>= sizeof (struct bb
)
1613 && ptr
->nwords
<= 1000
1615 int line_p
= (func_p
&& ptr
->line_nums
);
1616 int file_p
= (func_p
&& ptr
->filenames
);
1617 int addr_p
= (ptr
->addresses
!= 0);
1618 long ncounts
= ptr
->ncounts
;
1624 int blk_len
= num_digits (ncounts
, 10);
1629 fprintf (file
, "File %s, %ld basic blocks \n\n",
1630 ptr
->filename
, ncounts
);
1632 /* Get max values for each field. */
1633 for (i
= 0; i
< ncounts
; i
++)
1638 if (cnt_max
< ptr
->counts
[i
])
1639 cnt_max
= ptr
->counts
[i
];
1641 if (addr_p
&& addr_max
< ptr
->addresses
[i
])
1642 addr_max
= ptr
->addresses
[i
];
1644 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1645 line_max
= ptr
->line_nums
[i
];
1649 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1657 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1664 addr_len
= num_digits (addr_max
, 16);
1665 cnt_len
= num_digits (cnt_max
, 10);
1666 line_len
= num_digits (line_max
, 10);
1668 /* Now print out the basic block information. */
1669 for (i
= 0; i
< ncounts
; i
++)
1672 " Block #%*d: executed %*ld time(s)",
1674 cnt_len
, ptr
->counts
[i
]);
1677 fprintf (file
, " address= 0x%.*lx", addr_len
,
1681 fprintf (file
, " function= %-*s", func_len
,
1682 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1685 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1688 fprintf (file
, " file= %s",
1689 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1691 fprintf (file
, "\n");
1694 fprintf (file
, "\n");
1698 fprintf (file
, "\n\n");
1704 __bb_init_func (struct bb
*blocks
)
1706 /* User is supposed to check whether the first word is non-0,
1707 but just in case.... */
1709 if (blocks
->zero_word
)
1713 /* Initialize destructor. */
1715 ON_EXIT (__bb_exit_func
, 0);
1718 /* Set up linked list. */
1719 blocks
->zero_word
= 1;
1720 blocks
->next
= bb_head
;
1724 #ifndef MACHINE_STATE_SAVE
1725 #define MACHINE_STATE_SAVE(ID)
1727 #ifndef MACHINE_STATE_RESTORE
1728 #define MACHINE_STATE_RESTORE(ID)
1731 /* Number of buckets in hashtable of basic block addresses. */
1733 #define BB_BUCKETS 311
1735 /* Maximum length of string in file bb.in. */
1737 #define BBINBUFSIZE 500
1739 /* BBINBUFSIZE-1 with double quotes. We could use #BBINBUFSIZE or
1740 "BBINBUFSIZE" but want to avoid trouble with preprocessors. */
1742 #define BBINBUFSIZESTR "499"
1746 struct bb_edge
*next
;
1747 unsigned long src_addr
;
1748 unsigned long dst_addr
;
1749 unsigned long count
;
1754 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1759 struct bb_func
*next
;
1762 enum bb_func_mode mode
;
1765 /* This is the connection to the outside world.
1766 The BLOCK_PROFILER macro must set __bb.blocks
1767 and __bb.blockno. */
1770 unsigned long blockno
;
1774 /* Vars to store addrs of source and destination basic blocks
1777 static unsigned long bb_src
= 0;
1778 static unsigned long bb_dst
= 0;
1780 static FILE *bb_tracefile
= (FILE *) 0;
1781 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1782 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1783 static unsigned long bb_callcount
= 0;
1784 static int bb_mode
= 0;
1786 static unsigned long *bb_stack
= (unsigned long *) 0;
1787 static size_t bb_stacksize
= 0;
1789 static int reported
= 0;
1792 Always : Print execution frequencies of basic blocks
1794 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1795 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1796 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1797 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1802 /*#include <sys/types.h>*/
1803 #include <sys/stat.h>
1804 /*#include <malloc.h>*/
1806 /* Commands executed by gopen. */
1808 #define GOPENDECOMPRESS "gzip -cd "
1809 #define GOPENCOMPRESS "gzip -c >"
1811 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1812 If it does not compile, simply replace gopen by fopen and delete
1813 '.gz' from any first parameter to gopen. */
1816 gopen (char *fn
, char *mode
)
1824 if (mode
[0] != 'r' && mode
[0] != 'w')
1827 p
= fn
+ strlen (fn
)-1;
1828 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1829 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1836 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1837 + sizeof (GOPENDECOMPRESS
));
1838 strcpy (s
, GOPENDECOMPRESS
);
1839 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1840 f
= popen (s
, mode
);
1848 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1849 + sizeof (GOPENCOMPRESS
));
1850 strcpy (s
, GOPENCOMPRESS
);
1851 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1852 if (!(f
= popen (s
, mode
)))
1853 f
= fopen (s
, mode
);
1860 return fopen (fn
, mode
);
1870 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
1878 #endif /* HAVE_POPEN */
1880 /* Called once per program. */
1883 __bb_exit_trace_func ()
1885 FILE *file
= fopen ("bb.out", "a");
1898 gclose (bb_tracefile
);
1900 fclose (bb_tracefile
);
1901 #endif /* HAVE_POPEN */
1904 /* Check functions in `bb.in'. */
1909 const struct bb_func
*p
;
1910 int printed_something
= 0;
1914 /* This is somewhat type incorrect. */
1915 time ((void *) &time_value
);
1917 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
1919 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1921 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
1923 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
1925 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
1930 if (!printed_something
)
1932 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
1933 printed_something
= 1;
1936 fprintf (file
, "\tFunction %s", p
->funcname
);
1938 fprintf (file
, " of file %s", p
->filename
);
1939 fprintf (file
, "\n" );
1944 if (printed_something
)
1945 fprintf (file
, "\n");
1951 if (!bb_hashbuckets
)
1955 fprintf (stderr
, "Profiler: out of memory\n");
1965 unsigned long addr_max
= 0;
1966 unsigned long cnt_max
= 0;
1970 /* This is somewhat type incorrect, but it avoids worrying about
1971 exactly where time.h is included from. It should be ok unless
1972 a void * differs from other pointer formats, or if sizeof (long)
1973 is < sizeof (time_t). It would be nice if we could assume the
1974 use of rationale standards here. */
1976 time ((void *) &time_value
);
1977 fprintf (file
, "Basic block jump tracing");
1979 switch (bb_mode
& 12)
1982 fprintf (file
, " (with call)");
1986 /* Print nothing. */
1990 fprintf (file
, " (with call & ret)");
1994 fprintf (file
, " (with ret)");
1998 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
2000 for (i
= 0; i
< BB_BUCKETS
; i
++)
2002 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2003 for ( ; bucket
; bucket
= bucket
->next
)
2005 if (addr_max
< bucket
->src_addr
)
2006 addr_max
= bucket
->src_addr
;
2007 if (addr_max
< bucket
->dst_addr
)
2008 addr_max
= bucket
->dst_addr
;
2009 if (cnt_max
< bucket
->count
)
2010 cnt_max
= bucket
->count
;
2013 addr_len
= num_digits (addr_max
, 16);
2014 cnt_len
= num_digits (cnt_max
, 10);
2016 for ( i
= 0; i
< BB_BUCKETS
; i
++)
2018 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2019 for ( ; bucket
; bucket
= bucket
->next
)
2021 fprintf (file
, "Jump from block 0x%.*lx to "
2022 "block 0x%.*lx executed %*lu time(s)\n",
2023 addr_len
, bucket
->src_addr
,
2024 addr_len
, bucket
->dst_addr
,
2025 cnt_len
, bucket
->count
);
2029 fprintf (file
, "\n");
2037 /* Free allocated memory. */
2042 struct bb_func
*old
= f
;
2045 if (old
->funcname
) free (old
->funcname
);
2046 if (old
->filename
) free (old
->filename
);
2057 for (i
= 0; i
< BB_BUCKETS
; i
++)
2059 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2064 bucket
= bucket
->next
;
2068 free (bb_hashbuckets
);
2071 for (b
= bb_head
; b
; b
= b
->next
)
2072 if (b
->flags
) free (b
->flags
);
2075 /* Called once per program. */
2081 char buf
[BBINBUFSIZE
];
2084 enum bb_func_mode m
;
2088 /* Initialize destructor. */
2089 ON_EXIT (__bb_exit_func
, 0);
2092 if (!(file
= fopen ("bb.in", "r")))
2095 while(fscanf (file
, " %" BBINBUFSIZESTR
"s ", buf
) != EOF
)
2107 if (!strcmp (p
, "__bb_trace__"))
2109 else if (!strcmp (p
, "__bb_jumps__"))
2111 else if (!strcmp (p
, "__bb_hidecall__"))
2113 else if (!strcmp (p
, "__bb_showret__"))
2117 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2121 f
->next
= bb_func_head
;
2122 if ((pos
= strchr (p
, ':')))
2124 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2126 strcpy (f
->funcname
, pos
+1);
2128 if ((f
->filename
= (char *) malloc (l
+1)))
2130 strncpy (f
->filename
, p
, l
);
2131 f
->filename
[l
] = '\0';
2134 f
->filename
= (char *) 0;
2138 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2140 strcpy (f
->funcname
, p
);
2141 f
->filename
= (char *) 0;
2153 bb_tracefile
= gopen ("bbtrace.gz", "w");
2158 bb_tracefile
= fopen ("bbtrace", "w");
2160 #endif /* HAVE_POPEN */
2164 bb_hashbuckets
= (struct bb_edge
**)
2165 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2167 /* Use a loop here rather than calling bzero to avoid having to
2168 conditionalize its existance. */
2169 for (i
= 0; i
< BB_BUCKETS
; i
++)
2170 bb_hashbuckets
[i
] = 0;
2176 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2180 /* Initialize destructor. */
2181 ON_EXIT (__bb_exit_trace_func
, 0);
2186 /* Called upon entering a basic block. */
2191 struct bb_edge
*bucket
;
2193 MACHINE_STATE_SAVE("1")
2195 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2198 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2199 __bb
.blocks
->counts
[__bb
.blockno
]++;
2203 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2208 struct bb_edge
**startbucket
, **oldnext
;
2210 oldnext
= startbucket
2211 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2212 bucket
= *startbucket
;
2214 for (bucket
= *startbucket
; bucket
;
2215 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2217 if (bucket
->src_addr
== bb_src
2218 && bucket
->dst_addr
== bb_dst
)
2221 *oldnext
= bucket
->next
;
2222 bucket
->next
= *startbucket
;
2223 *startbucket
= bucket
;
2228 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2234 fprintf (stderr
, "Profiler: out of memory\n");
2241 bucket
->src_addr
= bb_src
;
2242 bucket
->dst_addr
= bb_dst
;
2243 bucket
->next
= *startbucket
;
2244 *startbucket
= bucket
;
2255 MACHINE_STATE_RESTORE("1")
2259 /* Called when returning from a function and `__bb_showret__' is set. */
2262 __bb_trace_func_ret ()
2264 struct bb_edge
*bucket
;
2266 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2271 struct bb_edge
**startbucket
, **oldnext
;
2273 oldnext
= startbucket
2274 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2275 bucket
= *startbucket
;
2277 for (bucket
= *startbucket
; bucket
;
2278 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2280 if (bucket
->src_addr
== bb_dst
2281 && bucket
->dst_addr
== bb_src
)
2284 *oldnext
= bucket
->next
;
2285 bucket
->next
= *startbucket
;
2286 *startbucket
= bucket
;
2291 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2297 fprintf (stderr
, "Profiler: out of memory\n");
2304 bucket
->src_addr
= bb_dst
;
2305 bucket
->dst_addr
= bb_src
;
2306 bucket
->next
= *startbucket
;
2307 *startbucket
= bucket
;
2320 /* Called upon entering the first function of a file. */
2323 __bb_init_file (struct bb
*blocks
)
2326 const struct bb_func
*p
;
2327 long blk
, ncounts
= blocks
->ncounts
;
2328 const char **functions
= blocks
->functions
;
2330 /* Set up linked list. */
2331 blocks
->zero_word
= 1;
2332 blocks
->next
= bb_head
;
2337 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2340 for (blk
= 0; blk
< ncounts
; blk
++)
2341 blocks
->flags
[blk
] = 0;
2343 for (blk
= 0; blk
< ncounts
; blk
++)
2345 for (p
= bb_func_head
; p
; p
= p
->next
)
2347 if (!strcmp (p
->funcname
, functions
[blk
])
2348 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2350 blocks
->flags
[blk
] |= p
->mode
;
2357 /* Called when exiting from a function. */
2363 MACHINE_STATE_SAVE("2")
2367 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2369 bb_src
= bb_stack
[bb_callcount
];
2371 __bb_trace_func_ret ();
2377 MACHINE_STATE_RESTORE("2")
2381 /* Called when entering a function. */
2384 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2386 static int trace_init
= 0;
2388 MACHINE_STATE_SAVE("3")
2390 if (!blocks
->zero_word
)
2397 __bb_init_file (blocks
);
2407 if (bb_callcount
>= bb_stacksize
)
2409 size_t newsize
= bb_callcount
+ 100;
2411 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2416 fprintf (stderr
, "Profiler: out of memory\n");
2420 goto stack_overflow
;
2422 bb_stacksize
= newsize
;
2424 bb_stack
[bb_callcount
] = bb_src
;
2435 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2441 bb_stack
[bb_callcount
] = bb_src
;
2444 MACHINE_STATE_RESTORE("3")
2447 #endif /* not inhibit_libc */
2448 #endif /* not BLOCK_PROFILER_CODE */
2452 unsigned int __shtab
[] = {
2453 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2454 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2455 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2456 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2457 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2458 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2459 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2460 0x10000000, 0x20000000, 0x40000000, 0x80000000
2464 #ifdef L_clear_cache
2465 /* Clear part of an instruction cache. */
2467 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2470 __clear_cache (char *beg
, char *end
)
2472 #ifdef CLEAR_INSN_CACHE
2473 CLEAR_INSN_CACHE (beg
, end
);
2475 #ifdef INSN_CACHE_SIZE
2476 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2477 static int initialized
;
2481 typedef (*function_ptr
) ();
2483 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2484 /* It's cheaper to clear the whole cache.
2485 Put in a series of jump instructions so that calling the beginning
2486 of the cache will clear the whole thing. */
2490 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2491 & -INSN_CACHE_LINE_WIDTH
);
2492 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2494 while (ptr
< end_ptr
)
2496 *(INSTRUCTION_TYPE
*)ptr
2497 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2498 ptr
+= INSN_CACHE_LINE_WIDTH
;
2500 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2505 /* Call the beginning of the sequence. */
2506 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2507 & -INSN_CACHE_LINE_WIDTH
))
2510 #else /* Cache is large. */
2514 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2515 & -INSN_CACHE_LINE_WIDTH
);
2517 while (ptr
< (int) array
+ sizeof array
)
2519 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2520 ptr
+= INSN_CACHE_LINE_WIDTH
;
2526 /* Find the location in array that occupies the same cache line as BEG. */
2528 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2529 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2530 & -INSN_CACHE_PLANE_SIZE
)
2533 /* Compute the cache alignment of the place to stop clearing. */
2534 #if 0 /* This is not needed for gcc's purposes. */
2535 /* If the block to clear is bigger than a cache plane,
2536 we clear the entire cache, and OFFSET is already correct. */
2537 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2539 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2540 & -INSN_CACHE_LINE_WIDTH
)
2541 & (INSN_CACHE_PLANE_SIZE
- 1));
2543 #if INSN_CACHE_DEPTH > 1
2544 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2545 if (end_addr
<= start_addr
)
2546 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2548 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2550 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2551 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2553 while (addr
!= stop
)
2555 /* Call the return instruction at ADDR. */
2556 ((function_ptr
) addr
) ();
2558 addr
+= INSN_CACHE_LINE_WIDTH
;
2561 #else /* just one plane */
2564 /* Call the return instruction at START_ADDR. */
2565 ((function_ptr
) start_addr
) ();
2567 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2569 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2570 #endif /* just one plane */
2571 #endif /* Cache is large */
2572 #endif /* Cache exists */
2573 #endif /* CLEAR_INSN_CACHE */
2576 #endif /* L_clear_cache */
2580 /* Jump to a trampoline, loading the static chain address. */
2582 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2594 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2598 mprotect (char *addr
, int len
, int prot
)
2615 if (VirtualProtect (addr
, len
, np
, &op
))
2621 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2623 #ifdef TRANSFER_FROM_TRAMPOLINE
2624 TRANSFER_FROM_TRAMPOLINE
2627 #if defined (NeXT) && defined (__MACH__)
2629 /* Make stack executable so we can call trampolines on stack.
2630 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2634 #include <mach/mach.h>
2638 __enable_execute_stack (char *addr
)
2641 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2642 vm_address_t a
= (vm_address_t
) addr
;
2644 /* turn on execute access on stack */
2645 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2646 if (r
!= KERN_SUCCESS
)
2648 mach_error("vm_protect VM_PROT_ALL", r
);
2652 /* We inline the i-cache invalidation for speed */
2654 #ifdef CLEAR_INSN_CACHE
2655 CLEAR_INSN_CACHE (addr
, eaddr
);
2657 __clear_cache ((int) addr
, (int) eaddr
);
2661 #endif /* defined (NeXT) && defined (__MACH__) */
2665 /* Make stack executable so we can call trampolines on stack.
2666 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2668 #include <sys/mman.h>
2669 #include <sys/vmparam.h>
2670 #include <machine/machparam.h>
2673 __enable_execute_stack ()
2676 static unsigned lowest
= USRSTACK
;
2677 unsigned current
= (unsigned) &fp
& -NBPG
;
2679 if (lowest
> current
)
2681 unsigned len
= lowest
- current
;
2682 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2686 /* Clear instruction cache in case an old trampoline is in it. */
2689 #endif /* __convex__ */
2693 /* Modified from the convex -code above. */
2695 #include <sys/param.h>
2697 #include <sys/m88kbcs.h>
2700 __enable_execute_stack ()
2703 static unsigned long lowest
= USRSTACK
;
2704 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2706 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2707 address is seen as 'negative'. That is the case with the stack. */
2710 if (lowest
> current
)
2712 unsigned len
=lowest
-current
;
2713 memctl(current
,len
,MCT_TEXT
);
2717 memctl(current
,NBPC
,MCT_TEXT
);
2721 #endif /* __sysV88__ */
2725 #include <sys/signal.h>
2728 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2729 so define it here, because we need it in __clear_insn_cache below */
2730 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2731 hence we enable this stuff only if MCT_TEXT is #define'd. */
2746 /* Clear instruction cache so we can call trampolines on stack.
2747 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2750 __clear_insn_cache ()
2755 /* Preserve errno, because users would be surprised to have
2756 errno changing without explicitly calling any system-call. */
2759 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2760 No need to use an address derived from _start or %sp, as 0 works also. */
2761 memctl(0, 4096, MCT_TEXT
);
2766 #endif /* __sysV68__ */
2770 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2772 #include <sys/mman.h>
2773 #include <sys/types.h>
2774 #include <sys/param.h>
2775 #include <sys/vmmac.h>
2777 /* Modified from the convex -code above.
2778 mremap promises to clear the i-cache. */
2781 __enable_execute_stack ()
2784 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2785 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2787 perror ("mprotect in __enable_execute_stack");
2792 #endif /* __pyr__ */
2794 #if defined (sony_news) && defined (SYSTYPE_BSD)
2797 #include <sys/types.h>
2798 #include <sys/param.h>
2799 #include <syscall.h>
2800 #include <machine/sysnews.h>
2802 /* cacheflush function for NEWS-OS 4.2.
2803 This function is called from trampoline-initialize code
2804 defined in config/mips/mips.h. */
2807 cacheflush (char *beg
, int size
, int flag
)
2809 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2811 perror ("cache_flush");
2817 #endif /* sony_news */
2818 #endif /* L_trampoline */
2823 #include "gbl-ctors.h"
2824 /* Some systems use __main in a way incompatible with its use in gcc, in these
2825 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2826 give the same symbol without quotes for an alternative entry point. You
2827 must define both, or neither. */
2829 #define NAME__MAIN "__main"
2830 #define SYMBOL__MAIN __main
2833 #ifdef INIT_SECTION_ASM_OP
2834 #undef HAS_INIT_SECTION
2835 #define HAS_INIT_SECTION
2838 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2839 /* Run all the global destructors on exit from the program. */
2842 __do_global_dtors ()
2844 #ifdef DO_GLOBAL_DTORS_BODY
2845 DO_GLOBAL_DTORS_BODY
;
2847 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2857 #ifndef HAS_INIT_SECTION
2858 /* Run all the global constructors on entry to the program. */
2861 #define ON_EXIT(a, b)
2863 /* Make sure the exit routine is pulled in to define the globals as
2864 bss symbols, just in case the linker does not automatically pull
2865 bss definitions from the library. */
2867 extern int _exit_dummy_decl
;
2868 int *_exit_dummy_ref
= &_exit_dummy_decl
;
2869 #endif /* ON_EXIT */
2872 __do_global_ctors ()
2874 DO_GLOBAL_CTORS_BODY
;
2875 ON_EXIT (__do_global_dtors
, 0);
2877 #endif /* no HAS_INIT_SECTION */
2879 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2880 /* Subroutine called automatically by `main'.
2881 Compiling a global function named `main'
2882 produces an automatic call to this function at the beginning.
2884 For many systems, this routine calls __do_global_ctors.
2885 For systems which support a .init section we use the .init section
2886 to run __do_global_ctors, so we need not do anything here. */
2891 /* Support recursive calls to `main': run initializers just once. */
2892 static int initialized
;
2896 __do_global_ctors ();
2899 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2901 #endif /* L__main */
2902 #endif /* __CYGWIN__ */
2906 #include "gbl-ctors.h"
2908 /* Provide default definitions for the lists of constructors and
2909 destructors, so that we don't get linker errors. These symbols are
2910 intentionally bss symbols, so that gld and/or collect will provide
2911 the right values. */
2913 /* We declare the lists here with two elements each,
2914 so that they are valid empty lists if no other definition is loaded.
2916 If we are using the old "set" extensions to have the gnu linker
2917 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2918 must be in the bss/common section.
2920 Long term no port should use those extensions. But many still do. */
2921 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2922 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2923 func_ptr __CTOR_LIST__
[2] = {0, 0};
2924 func_ptr __DTOR_LIST__
[2] = {0, 0};
2926 func_ptr __CTOR_LIST__
[2];
2927 func_ptr __DTOR_LIST__
[2];
2929 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2930 #endif /* L_ctors */
2934 #include "gbl-ctors.h"
2940 int _exit_dummy_decl
= 0; /* prevent compiler & linker warnings */
2948 static func_ptr
*atexit_chain
= 0;
2949 static long atexit_chain_length
= 0;
2950 static volatile long last_atexit_chain_slot
= -1;
2952 int atexit (func_ptr func
)
2954 if (++last_atexit_chain_slot
== atexit_chain_length
)
2956 atexit_chain_length
+= 32;
2958 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
2959 * sizeof (func_ptr
));
2961 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
2962 * sizeof (func_ptr
));
2965 atexit_chain_length
= 0;
2966 last_atexit_chain_slot
= -1;
2971 atexit_chain
[last_atexit_chain_slot
] = func
;
2974 #endif /* NEED_ATEXIT */
2976 /* If we have no known way of registering our own __do_global_dtors
2977 routine so that it will be invoked at program exit time, then we
2978 have to define our own exit routine which will get this to happen. */
2980 extern void __do_global_dtors ();
2981 extern void __bb_exit_func ();
2982 extern void _cleanup ();
2983 extern void _exit () __attribute__ ((noreturn
));
2988 #if !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF)
2992 for ( ; last_atexit_chain_slot
-- >= 0; )
2994 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
2995 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
2997 free (atexit_chain
);
3000 #else /* No NEED_ATEXIT */
3001 __do_global_dtors ();
3002 #endif /* No NEED_ATEXIT */
3003 #endif /* !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF) */
3004 /* In gbl-ctors.h, ON_EXIT is defined if HAVE_ATEXIT is defined. In
3005 __bb_init_func and _bb_init_prg, __bb_exit_func is registered with
3006 ON_EXIT if ON_EXIT is defined. Thus we must not call __bb_exit_func here
3007 if HAVE_ATEXIT is defined. */
3009 #ifndef inhibit_libc
3012 #endif /* !HAVE_ATEXIT */
3021 #else /* ON_EXIT defined */
3022 int _exit_dummy_decl
= 0; /* prevent compiler & linker warnings */
3024 # ifndef HAVE_ATEXIT
3025 /* Provide a fake for atexit() using ON_EXIT. */
3026 int atexit (func_ptr func
)
3028 return ON_EXIT (func
, NULL
);
3030 # endif /* HAVE_ATEXIT */
3031 #endif /* ON_EXIT defined */
3039 /* Shared exception handling support routines. */
3041 extern void __default_terminate (void) __attribute__ ((__noreturn__
));
3044 __default_terminate ()
3049 void (*__terminate_func
)() = __default_terminate
;
3054 (*__terminate_func
)();
3058 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3061 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3062 catch_type
, throw_type
);
3064 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3075 /* Include definitions of EH context and table layout */
3077 #include "eh-common.h"
3078 #ifndef inhibit_libc
3082 /* Allocate and return a new EH context structure. */
3084 extern void __throw ();
3089 struct eh_full_context
{
3090 struct eh_context c
;
3092 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
3097 memset (ehfc
, 0, sizeof *ehfc
);
3099 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
3101 /* This should optimize out entirely. This should always be true,
3102 but just in case it ever isn't, don't allow bogus code to be
3105 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
3112 static __gthread_key_t eh_context_key
;
3114 /* Destructor for struct eh_context. */
3116 eh_context_free (void *ptr
)
3118 __gthread_key_dtor (eh_context_key
, ptr
);
3124 /* Pointer to function to return EH context. */
3126 static struct eh_context
*eh_context_initialize ();
3127 static struct eh_context
*eh_context_static ();
3129 static struct eh_context
*eh_context_specific ();
3132 static struct eh_context
*(*get_eh_context
) () = &eh_context_initialize
;
3134 /* Routine to get EH context.
3135 This one will simply call the function pointer. */
3140 return (void *) (*get_eh_context
) ();
3143 /* Get and set the language specific info pointer. */
3148 struct eh_context
*eh
= (*get_eh_context
) ();
3154 eh_threads_initialize ()
3156 /* Try to create the key. If it fails, revert to static method,
3157 otherwise start using thread specific EH contexts. */
3158 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3159 get_eh_context
= &eh_context_specific
;
3161 get_eh_context
= &eh_context_static
;
3163 #endif /* no __GTHREADS */
3165 /* Initialize EH context.
3166 This will be called only once, since we change GET_EH_CONTEXT
3167 pointer to another routine. */
3169 static struct eh_context
*
3170 eh_context_initialize ()
3174 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3175 /* Make sure that get_eh_context does not point to us anymore.
3176 Some systems have dummy thread routines in their libc that
3177 return a success (Solaris 2.6 for example). */
3178 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3179 || get_eh_context
== &eh_context_initialize
)
3181 /* Use static version of EH context. */
3182 get_eh_context
= &eh_context_static
;
3185 #else /* no __GTHREADS */
3187 /* Use static version of EH context. */
3188 get_eh_context
= &eh_context_static
;
3190 #endif /* no __GTHREADS */
3192 return (*get_eh_context
) ();
3195 /* Return a static EH context. */
3197 static struct eh_context
*
3198 eh_context_static ()
3200 static struct eh_context eh
;
3201 static int initialized
;
3202 static void *top_elt
[2];
3207 memset (&eh
, 0, sizeof eh
);
3208 eh
.dynamic_handler_chain
= top_elt
;
3214 /* Return a thread specific EH context. */
3216 static struct eh_context
*
3217 eh_context_specific ()
3219 struct eh_context
*eh
;
3220 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3223 eh
= new_eh_context ();
3224 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3232 /* Support routines for setjmp/longjmp exception handling. */
3234 /* Calls to __sjthrow are generated by the compiler when an exception
3235 is raised when using the setjmp/longjmp exception handling codegen
3238 #ifdef DONT_USE_BUILTIN_SETJMP
3239 extern void longjmp (void *, int);
3242 /* Routine to get the head of the current thread's dynamic handler chain
3243 use for exception handling. */
3246 __get_dynamic_handler_chain ()
3248 struct eh_context
*eh
= (*get_eh_context
) ();
3249 return &eh
->dynamic_handler_chain
;
3252 /* This is used to throw an exception when the setjmp/longjmp codegen
3253 method is used for exception handling.
3255 We call __terminate if there are no handlers left. Otherwise we run the
3256 cleanup actions off the dynamic cleanup stack, and pop the top of the
3257 dynamic handler chain, and use longjmp to transfer back to the associated
3260 extern void __sjthrow (void) __attribute__ ((__noreturn__
));
3265 struct eh_context
*eh
= (*get_eh_context
) ();
3266 void ***dhc
= &eh
->dynamic_handler_chain
;
3268 void (*func
)(void *, int);
3272 /* The cleanup chain is one word into the buffer. Get the cleanup
3274 cleanup
= (void***)&(*dhc
)[1];
3276 /* If there are any cleanups in the chain, run them now. */
3280 void **buf
= (void**)store
;
3285 #ifdef DONT_USE_BUILTIN_SETJMP
3286 if (! setjmp (&buf
[2]))
3288 if (! __builtin_setjmp (&buf
[2]))
3294 func
= (void(*)(void*, int))cleanup
[0][1];
3295 arg
= (void*)cleanup
[0][2];
3297 /* Update this before running the cleanup. */
3298 cleanup
[0] = (void **)cleanup
[0][0];
3311 /* We must call terminate if we try and rethrow an exception, when
3312 there is no exception currently active and when there are no
3314 if (! eh
->info
|| (*dhc
)[0] == 0)
3317 /* Find the jmpbuf associated with the top element of the dynamic
3318 handler chain. The jumpbuf starts two words into the buffer. */
3319 jmpbuf
= &(*dhc
)[2];
3321 /* Then we pop the top element off the dynamic handler chain. */
3322 *dhc
= (void**)(*dhc
)[0];
3324 /* And then we jump to the handler. */
3326 #ifdef DONT_USE_BUILTIN_SETJMP
3327 longjmp (jmpbuf
, 1);
3329 __builtin_longjmp (jmpbuf
, 1);
3333 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3334 handler, then pop the handler off the dynamic handler stack, and
3335 then throw. This is used to skip the first handler, and transfer
3336 control to the next handler in the dynamic handler stack. */
3338 extern void __sjpopnthrow (void) __attribute__ ((__noreturn__
));
3343 struct eh_context
*eh
= (*get_eh_context
) ();
3344 void ***dhc
= &eh
->dynamic_handler_chain
;
3345 void (*func
)(void *, int);
3349 /* The cleanup chain is one word into the buffer. Get the cleanup
3351 cleanup
= (void***)&(*dhc
)[1];
3353 /* If there are any cleanups in the chain, run them now. */
3357 void **buf
= (void**)store
;
3362 #ifdef DONT_USE_BUILTIN_SETJMP
3363 if (! setjmp (&buf
[2]))
3365 if (! __builtin_setjmp (&buf
[2]))
3371 func
= (void(*)(void*, int))cleanup
[0][1];
3372 arg
= (void*)cleanup
[0][2];
3374 /* Update this before running the cleanup. */
3375 cleanup
[0] = (void **)cleanup
[0][0];
3388 /* Then we pop the top element off the dynamic handler chain. */
3389 *dhc
= (void**)(*dhc
)[0];
3394 /* Support code for all exception region-based exception handling. */
3397 __eh_rtime_match (void *rtime
)
3400 __eh_matcher matcher
;
3403 info
= *(__get_eh_info ());
3404 matcher
= ((__eh_info
*)info
)->match_function
;
3407 #ifndef inhibit_libc
3408 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3412 ret
= (*matcher
) (info
, rtime
, (void *)0);
3413 return (ret
!= NULL
);
3416 /* This value identifies the place from which an exception is being
3419 #ifdef EH_TABLE_LOOKUP
3425 #ifdef DWARF2_UNWIND_INFO
3428 /* Return the table version of an exception descriptor */
3431 __get_eh_table_version (exception_descriptor
*table
)
3433 return table
->lang
.version
;
3436 /* Return the originating table language of an exception descriptor */
3439 __get_eh_table_language (exception_descriptor
*table
)
3441 return table
->lang
.language
;
3444 /* This routine takes a PC and a pointer to the exception region TABLE for
3445 its translation unit, and returns the address of the exception handler
3446 associated with the closest exception table handler entry associated
3447 with that PC, or 0 if there are no table entries the PC fits in.
3449 In the advent of a tie, we have to give the last entry, as it represents
3453 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3460 /* We can't do a binary search because the table isn't guaranteed
3461 to be sorted from function to function. */
3462 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3464 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3466 /* This can apply. Make sure it is at least as small as
3467 the previous best. */
3468 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3469 && table
[pos
].start_region
>= table
[best
].start_region
))
3472 /* But it is sorted by starting PC within a function. */
3473 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3477 return table
[best
].exception_handler
;
3483 /* find_exception_handler finds the correct handler, if there is one, to
3484 handle an exception.
3485 returns a pointer to the handler which controlled should be transferred
3486 to, or NULL if there is nothing left.
3488 PC - pc where the exception originates. If this is a rethrow,
3489 then this starts out as a pointer to the exception table
3490 entry we wish to rethrow out of.
3491 TABLE - exception table for the current module.
3492 EH_INFO - eh info pointer for this exception.
3493 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3494 CLEANUP - returned flag indicating whether this is a cleanup handler.
3497 find_exception_handler (void *pc
, exception_descriptor
*table
,
3498 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3501 void *retval
= NULL
;
3506 /* The new model assumed the table is sorted inner-most out so the
3507 first region we find which matches is the correct one */
3509 exception_table
*tab
= &(table
->table
[0]);
3511 /* Subtract 1 from the PC to avoid hitting the next region */
3514 /* pc is actually the region table entry to rethrow out of */
3515 pos
= ((exception_table
*) pc
) - tab
;
3516 pc
= ((exception_table
*) pc
)->end_region
- 1;
3518 /* The label is always on the LAST handler entry for a region,
3519 so we know the next entry is a different region, even if the
3520 addresses are the same. Make sure its not end of table tho. */
3521 if (tab
[pos
].start_region
!= (void *) -1)
3527 /* We can't do a binary search because the table is in inner-most
3528 to outermost address ranges within functions */
3529 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3531 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3533 if (tab
[pos
].match_info
)
3535 __eh_matcher matcher
= eh_info
->match_function
;
3536 /* match info but no matcher is NOT a match */
3539 void *ret
= (*matcher
)((void *) eh_info
,
3540 tab
[pos
].match_info
, table
);
3544 retval
= tab
[pos
].exception_handler
;
3553 retval
= tab
[pos
].exception_handler
;
3560 #endif /* DWARF2_UNWIND_INFO */
3561 #endif /* EH_TABLE_LOOKUP */
3563 #ifdef DWARF2_UNWIND_INFO
3564 /* Support code for exception handling using static unwind information. */
3568 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3569 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3570 avoid a warning about casting between int and pointer of different
3573 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3575 #ifdef INCOMING_REGNO
3576 /* Is the saved value for register REG in frame UDATA stored in a register
3577 window in the previous frame? */
3579 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3580 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3581 compiled functions won't work with the frame-unwind stuff here.
3582 Perhaps the entireity of in_reg_window should be conditional on having
3583 seen a DW_CFA_GNU_window_save? */
3584 #define target_flags 0
3587 in_reg_window (int reg
, frame_state
*udata
)
3589 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3590 return INCOMING_REGNO (reg
) == reg
;
3591 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3594 #ifdef STACK_GROWS_DOWNWARD
3595 return udata
->reg_or_offset
[reg
] > 0;
3597 return udata
->reg_or_offset
[reg
] < 0;
3601 static inline int in_reg_window (int reg
, frame_state
*udata
) { return 0; }
3602 #endif /* INCOMING_REGNO */
3604 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3605 frame called by UDATA or 0. */
3608 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3610 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3612 reg
= udata
->reg_or_offset
[reg
];
3613 if (in_reg_window (reg
, udata
))
3619 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3620 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3625 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3626 frame called by UDATA or 0. */
3628 static inline void *
3629 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3631 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3634 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3637 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3639 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3642 /* Copy the saved value for register REG from frame UDATA to frame
3643 TARGET_UDATA. Unlike the previous two functions, this can handle
3644 registers that are not one word large. */
3647 copy_reg (unsigned reg
, frame_state
*udata
, frame_state
*target_udata
)
3649 word_type
*preg
= get_reg_addr (reg
, udata
, NULL
);
3650 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3652 memcpy (ptreg
, preg
, __builtin_dwarf_reg_size (reg
));
3655 /* Retrieve the return address for frame UDATA. */
3657 static inline void *
3658 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3660 return __builtin_extract_return_addr
3661 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3664 /* Overwrite the return address for frame UDATA with VAL. */
3667 put_return_addr (void *val
, frame_state
*udata
)
3669 val
= __builtin_frob_return_addr (val
);
3670 put_reg (udata
->retaddr_column
, val
, udata
);
3673 /* Given the current frame UDATA and its return address PC, return the
3674 information about the calling frame in CALLER_UDATA. */
3677 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
)
3679 caller_udata
= __frame_state_for (pc
, caller_udata
);
3683 /* Now go back to our caller's stack frame. If our caller's CFA register
3684 was saved in our stack frame, restore it; otherwise, assume the CFA
3685 register is SP and restore it to our CFA value. */
3686 if (udata
->saved
[caller_udata
->cfa_reg
])
3687 caller_udata
->cfa
= get_reg (caller_udata
->cfa_reg
, udata
, 0);
3689 caller_udata
->cfa
= udata
->cfa
;
3690 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3692 return caller_udata
;
3695 /* Hook to call before __terminate if only cleanup handlers remain. */
3697 __unwinding_cleanup ()
3701 /* throw_helper performs some of the common grunt work for a throw. This
3702 routine is called by throw and rethrows. This is pretty much split
3703 out from the old __throw routine. An addition has been added which allows
3704 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3705 but cleanups remaining. This allows a debugger to examine the state
3706 at which the throw was executed, before any cleanups, rather than
3707 at the terminate point after the stack has been unwound.
3709 EH is the current eh_context structure.
3710 PC is the address of the call to __throw.
3711 MY_UDATA is the unwind information for __throw.
3712 OFFSET_P is where we return the SP adjustment offset. */
3715 throw_helper (eh
, pc
, my_udata
, offset_p
)
3716 struct eh_context
*eh
;
3718 frame_state
*my_udata
;
3721 frame_state ustruct2
, *udata
= &ustruct2
;
3722 frame_state ustruct
;
3723 frame_state
*sub_udata
= &ustruct
;
3724 void *saved_pc
= pc
;
3728 frame_state saved_ustruct
;
3731 int only_cleanup
= 0;
3733 int saved_state
= 0;
3735 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3737 /* Do we find a handler based on a re-throw PC? */
3738 if (eh
->table_index
!= (void *) 0)
3741 memcpy (udata
, my_udata
, sizeof (*udata
));
3743 handler
= (void *) 0;
3746 frame_state
*p
= udata
;
3747 udata
= next_stack_level (pc
, udata
, sub_udata
);
3750 /* If we couldn't find the next frame, we lose. */
3754 if (udata
->eh_ptr
== NULL
)
3757 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3758 runtime_id_field
== NEW_EH_RUNTIME
);
3763 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3764 eh_info
, 1, &cleanup
);
3765 eh
->table_index
= (void *)0;
3769 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3772 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3774 /* If we found one, we can stop searching, if its not a cleanup.
3775 for cleanups, we save the state, and keep looking. This allows
3776 us to call a debug hook if there are nothing but cleanups left. */
3783 saved_ustruct
= *udata
;
3784 handler_p
= handler
;
3797 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3798 hitting the beginning of the next region. */
3799 pc
= get_return_addr (udata
, sub_udata
) - 1;
3804 udata
= &saved_ustruct
;
3805 handler
= handler_p
;
3808 __unwinding_cleanup ();
3811 /* If we haven't found a handler by now, this is an unhandled
3816 eh
->handler_label
= handler
;
3818 args_size
= udata
->args_size
;
3821 /* We found a handler in the throw context, no need to unwind. */
3827 /* Unwind all the frames between this one and the handler by copying
3828 their saved register values into our register save slots. */
3830 /* Remember the PC where we found the handler. */
3831 void *handler_pc
= pc
;
3833 /* Start from the throw context again. */
3835 memcpy (udata
, my_udata
, sizeof (*udata
));
3837 while (pc
!= handler_pc
)
3839 frame_state
*p
= udata
;
3840 udata
= next_stack_level (pc
, udata
, sub_udata
);
3843 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; ++i
)
3844 if (i
!= udata
->retaddr_column
&& udata
->saved
[i
])
3846 /* If you modify the saved value of the return address
3847 register on the SPARC, you modify the return address for
3848 your caller's frame. Don't do that here, as it will
3849 confuse get_return_addr. */
3850 if (in_reg_window (i
, udata
)
3851 && udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
3852 && udata
->reg_or_offset
[udata
->retaddr_column
] == i
)
3854 copy_reg (i
, udata
, my_udata
);
3857 pc
= get_return_addr (udata
, sub_udata
) - 1;
3860 /* But we do need to update the saved return address register from
3861 the last frame we unwind, or the handler frame will have the wrong
3863 if (udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
)
3865 i
= udata
->reg_or_offset
[udata
->retaddr_column
];
3866 if (in_reg_window (i
, udata
))
3867 copy_reg (i
, udata
, my_udata
);
3870 /* udata now refers to the frame called by the handler frame. */
3872 /* We adjust SP by the difference between __throw's CFA and the CFA for
3873 the frame called by the handler frame, because those CFAs correspond
3874 to the SP values at the two call sites. We need to further adjust by
3875 the args_size of the handler frame itself to get the handler frame's
3876 SP from before the args were pushed for that call. */
3877 #ifdef STACK_GROWS_DOWNWARD
3878 *offset_p
= udata
->cfa
- my_udata
->cfa
+ args_size
;
3880 *offset_p
= my_udata
->cfa
- udata
->cfa
- args_size
;
3887 /* We first search for an exception handler, and if we don't find
3888 it, we call __terminate on the current stack frame so that we may
3889 use the debugger to walk the stack and understand why no handler
3892 If we find one, then we unwind the frames down to the one that
3893 has the handler and transfer control into the handler. */
3895 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3900 struct eh_context
*eh
= (*get_eh_context
) ();
3904 /* XXX maybe make my_ustruct static so we don't have to look it up for
3906 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3908 /* This is required for C++ semantics. We must call terminate if we
3909 try and rethrow an exception, when there is no exception currently
3914 /* Start at our stack frame. */
3916 my_udata
= __frame_state_for (&&label
, my_udata
);
3920 /* We need to get the value from the CFA register. */
3921 my_udata
->cfa
= __builtin_dwarf_cfa ();
3923 /* Do any necessary initialization to access arbitrary stack frames.
3924 On the SPARC, this means flushing the register windows. */
3925 __builtin_unwind_init ();
3927 /* Now reset pc to the right throw point. */
3928 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3930 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3934 __builtin_eh_return ((void *)eh
, offset
, handler
);
3936 /* Epilogue: restore the handler frame's register values and return
3940 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3946 struct eh_context
*eh
= (*get_eh_context
) ();
3950 /* XXX maybe make my_ustruct static so we don't have to look it up for
3952 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3954 /* This is required for C++ semantics. We must call terminate if we
3955 try and rethrow an exception, when there is no exception currently
3960 /* This is the table index we want to rethrow from. The value of
3961 the END_REGION label is used for the PC of the throw, and the
3962 search begins with the next table entry. */
3963 eh
->table_index
= index
;
3965 /* Start at our stack frame. */
3967 my_udata
= __frame_state_for (&&label
, my_udata
);
3971 /* We need to get the value from the CFA register. */
3972 my_udata
->cfa
= __builtin_dwarf_cfa ();
3974 /* Do any necessary initialization to access arbitrary stack frames.
3975 On the SPARC, this means flushing the register windows. */
3976 __builtin_unwind_init ();
3978 /* Now reset pc to the right throw point. */
3979 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3981 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3985 __builtin_eh_return ((void *)eh
, offset
, handler
);
3987 /* Epilogue: restore the handler frame's register values and return
3990 #endif /* DWARF2_UNWIND_INFO */
3995 #ifndef inhibit_libc
3996 /* This gets us __GNU_LIBRARY__. */
3997 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
4000 #ifdef __GNU_LIBRARY__
4001 /* Avoid forcing the library's meaning of `write' on the user program
4002 by using the "internal" name (for use within the library) */
4003 #define write(fd, buf, n) __write((fd), (buf), (n))
4005 #endif /* inhibit_libc */
4007 #define MESSAGE "pure virtual method called\n"
4012 #ifndef inhibit_libc
4013 write (2, MESSAGE
, sizeof (MESSAGE
) - 1);