1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License. */
29 /* It is incorrect to include config.h here, because this file is being
30 compiled for the target, and hence definitions concerning only the host
35 /* We disable this when inhibit_libc, so that gcc can still be built without
36 needing header files first. */
37 /* ??? This is not a good solution, since prototypes may be required in
38 some cases for correct code. See also frame.c. */
40 /* fixproto guarantees these system headers exist. */
51 /* Don't use `fancy_abort' here even if config.h says to use it. */
56 #if (SUPPORTS_WEAK == 1) && (defined (ASM_OUTPUT_DEF) || defined (ASM_OUTPUT_WEAK_ALIAS))
60 /* In a cross-compilation situation, default to inhibiting compilation
61 of routines that use libc. */
63 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
67 /* Permit the tm.h file to select the endianness to use just for this
68 file. This is used when the endianness is determined when the
71 #ifndef LIBGCC2_WORDS_BIG_ENDIAN
72 #define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
75 /* In the first part of this file, we are interfacing to calls generated
76 by the compiler itself. These calls pass values into these routines
77 which have very specific modes (rather than very specific types), and
78 these compiler-generated calls also expect any return values to have
79 very specific modes (rather than very specific types). Thus, we need
80 to avoid using regular C language type names in this part of the file
81 because the sizes for those types can be configured to be anything.
82 Instead we use the following special type names. */
84 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
85 typedef int SItype
__attribute__ ((mode (SI
)));
86 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
87 typedef int DItype
__attribute__ ((mode (DI
)));
88 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
90 typedef float SFtype
__attribute__ ((mode (SF
)));
91 typedef float DFtype
__attribute__ ((mode (DF
)));
93 #if LONG_DOUBLE_TYPE_SIZE == 96
94 typedef float XFtype
__attribute__ ((mode (XF
)));
96 #if LONG_DOUBLE_TYPE_SIZE == 128
97 typedef float TFtype
__attribute__ ((mode (TF
)));
100 typedef int word_type
__attribute__ ((mode (__word__
)));
102 /* Make sure that we don't accidentally use any normal C language built-in
103 type names in the first part of this file. Instead we want to use *only*
104 the type names defined above. The following macro definitions insure
105 that if we *do* accidentally use some normal C language built-in type name,
106 we will get a syntax error. */
108 #define char bogus_type
109 #define short bogus_type
110 #define int bogus_type
111 #define long bogus_type
112 #define unsigned bogus_type
113 #define float bogus_type
114 #define double bogus_type
116 #define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
118 /* DIstructs are pairs of SItype values in the order determined by
119 LIBGCC2_WORDS_BIG_ENDIAN. */
121 #if LIBGCC2_WORDS_BIG_ENDIAN
122 struct DIstruct
{SItype high
, low
;};
124 struct DIstruct
{SItype low
, high
;};
127 /* We need this union to unpack/pack DImode values, since we don't have
128 any arithmetic yet. Incoming DImode parameters are stored into the
129 `ll' field, and the unpacked result is read from the struct `s'. */
137 #if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
138 || defined (L_divdi3) || defined (L_udivdi3) \
139 || defined (L_moddi3) || defined (L_umoddi3))
141 #include "longlong.h"
143 #endif /* udiv or mul */
145 extern DItype
__fixunssfdi (SFtype a
);
146 extern DItype
__fixunsdfdi (DFtype a
);
147 #if LONG_DOUBLE_TYPE_SIZE == 96
148 extern DItype
__fixunsxfdi (XFtype a
);
150 #if LONG_DOUBLE_TYPE_SIZE == 128
151 extern DItype
__fixunstfdi (TFtype a
);
154 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
155 #if defined (L_divdi3) || defined (L_moddi3)
167 w
.s
.high
= -uu
.s
.high
- ((USItype
) w
.s
.low
> 0);
173 /* Unless shift functions are defined whith full ANSI prototypes,
174 parameter b will be promoted to int if word_type is smaller than an int. */
177 __lshrdi3 (DItype u
, word_type b
)
188 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
192 w
.s
.low
= (USItype
)uu
.s
.high
>> -bm
;
196 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
197 w
.s
.high
= (USItype
)uu
.s
.high
>> b
;
198 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
207 __ashldi3 (DItype u
, word_type b
)
218 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
222 w
.s
.high
= (USItype
)uu
.s
.low
<< -bm
;
226 USItype carries
= (USItype
)uu
.s
.low
>> bm
;
227 w
.s
.low
= (USItype
)uu
.s
.low
<< b
;
228 w
.s
.high
= ((USItype
)uu
.s
.high
<< b
) | carries
;
237 __ashrdi3 (DItype u
, word_type b
)
248 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
251 /* w.s.high = 1..1 or 0..0 */
252 w
.s
.high
= uu
.s
.high
>> (sizeof (SItype
) * BITS_PER_UNIT
- 1);
253 w
.s
.low
= uu
.s
.high
>> -bm
;
257 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
258 w
.s
.high
= uu
.s
.high
>> b
;
259 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
273 w
.s
.low
= ffs (uu
.s
.low
);
276 w
.s
.low
= ffs (uu
.s
.high
);
279 w
.s
.low
+= BITS_PER_UNIT
* sizeof (SItype
);
288 __muldi3 (DItype u
, DItype v
)
296 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
297 w
.s
.high
+= ((USItype
) uu
.s
.low
* (USItype
) vv
.s
.high
298 + (USItype
) uu
.s
.high
* (USItype
) vv
.s
.low
);
305 #if defined (sdiv_qrnnd)
307 __udiv_w_sdiv (USItype
*rp
, USItype a1
, USItype a0
, USItype d
)
314 if (a1
< d
- a1
- (a0
>> (SI_TYPE_SIZE
- 1)))
316 /* dividend, divisor, and quotient are nonnegative */
317 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
321 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
322 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (SI_TYPE_SIZE
- 1));
323 /* Divide (c1*2^32 + c0) by d */
324 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
325 /* Add 2^31 to quotient */
326 q
+= (USItype
) 1 << (SI_TYPE_SIZE
- 1);
331 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
332 c1
= a1
>> 1; /* A/2 */
333 c0
= (a1
<< (SI_TYPE_SIZE
- 1)) + (a0
>> 1);
335 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
337 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
339 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
356 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
359 c0
= ~c0
; /* logical NOT */
361 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
363 q
= ~q
; /* (A/2)/b1 */
366 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
384 else /* Implies c1 = b1 */
385 { /* Hence a1 = d - 1 = 2*b1 - 1 */
403 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
405 __udiv_w_sdiv (USItype
*rp
__attribute__ ((__unused__
)),
406 USItype a1
__attribute__ ((__unused__
)),
407 USItype a0
__attribute__ ((__unused__
)),
408 USItype d
__attribute__ ((__unused__
)))
415 #if (defined (L_udivdi3) || defined (L_divdi3) || \
416 defined (L_umoddi3) || defined (L_moddi3))
421 static const UQItype __clz_tab
[] =
423 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
424 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
425 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
426 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
427 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
428 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
429 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
430 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
433 #if (defined (L_udivdi3) || defined (L_divdi3) || \
434 defined (L_umoddi3) || defined (L_moddi3))
438 __udivmoddi4 (UDItype n
, UDItype d
, UDItype
*rp
)
443 USItype d0
, d1
, n0
, n1
, n2
;
455 #if !UDIV_NEEDS_NORMALIZATION
462 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
465 /* Remainder in n0. */
472 d0
= 1 / d0
; /* Divide intentionally by zero. */
474 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
475 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
477 /* Remainder in n0. */
488 #else /* UDIV_NEEDS_NORMALIZATION */
496 count_leading_zeros (bm
, d0
);
500 /* Normalize, i.e. make the most significant bit of the
504 n1
= (n1
<< bm
) | (n0
>> (SI_TYPE_SIZE
- bm
));
508 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
511 /* Remainder in n0 >> bm. */
518 d0
= 1 / d0
; /* Divide intentionally by zero. */
520 count_leading_zeros (bm
, d0
);
524 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
525 conclude (the most significant bit of n1 is set) /\ (the
526 leading quotient digit q1 = 1).
528 This special case is necessary, not an optimization.
529 (Shifts counts of SI_TYPE_SIZE are undefined.) */
538 b
= SI_TYPE_SIZE
- bm
;
542 n1
= (n1
<< bm
) | (n0
>> b
);
545 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
550 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
552 /* Remainder in n0 >> bm. */
562 #endif /* UDIV_NEEDS_NORMALIZATION */
573 /* Remainder in n1n0. */
585 count_leading_zeros (bm
, d1
);
588 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
589 conclude (the most significant bit of n1 is set) /\ (the
590 quotient digit q0 = 0 or 1).
592 This special case is necessary, not an optimization. */
594 /* The condition on the next line takes advantage of that
595 n1 >= d1 (true due to program flow). */
596 if (n1
> d1
|| n0
>= d0
)
599 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
618 b
= SI_TYPE_SIZE
- bm
;
620 d1
= (d1
<< bm
) | (d0
>> b
);
623 n1
= (n1
<< bm
) | (n0
>> b
);
626 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
627 umul_ppmm (m1
, m0
, q0
, d0
);
629 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
632 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
637 /* Remainder in (n1n0 - m1m0) >> bm. */
640 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
641 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
642 rr
.s
.high
= n1
>> bm
;
656 UDItype
__udivmoddi4 ();
659 __divdi3 (DItype u
, DItype v
)
670 uu
.ll
= __negdi2 (uu
.ll
);
673 vv
.ll
= __negdi2 (vv
.ll
);
675 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDItype
*) 0);
684 UDItype
__udivmoddi4 ();
686 __moddi3 (DItype u
, DItype v
)
697 uu
.ll
= __negdi2 (uu
.ll
);
699 vv
.ll
= __negdi2 (vv
.ll
);
701 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
710 UDItype
__udivmoddi4 ();
712 __umoddi3 (UDItype u
, UDItype v
)
716 (void) __udivmoddi4 (u
, v
, &w
);
723 UDItype
__udivmoddi4 ();
725 __udivdi3 (UDItype n
, UDItype d
)
727 return __udivmoddi4 (n
, d
, (UDItype
*) 0);
733 __cmpdi2 (DItype a
, DItype b
)
737 au
.ll
= a
, bu
.ll
= b
;
739 if (au
.s
.high
< bu
.s
.high
)
741 else if (au
.s
.high
> bu
.s
.high
)
743 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
745 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
753 __ucmpdi2 (DItype a
, DItype b
)
757 au
.ll
= a
, bu
.ll
= b
;
759 if ((USItype
) au
.s
.high
< (USItype
) bu
.s
.high
)
761 else if ((USItype
) au
.s
.high
> (USItype
) bu
.s
.high
)
763 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
765 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
771 #if defined(L_fixunstfdi) && (LONG_DOUBLE_TYPE_SIZE == 128)
772 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
773 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
776 __fixunstfdi (TFtype a
)
784 /* Compute high word of result, as a flonum. */
785 b
= (a
/ HIGH_WORD_COEFF
);
786 /* Convert that to fixed (but not to DItype!),
787 and shift it into the high word. */
790 /* Remove high part from the TFtype, leaving the low part as flonum. */
792 /* Convert that to fixed (but not to DItype!) and add it in.
793 Sometimes A comes out negative. This is significant, since
794 A has more bits than a long int does. */
796 v
-= (USItype
) (- a
);
803 #if defined(L_fixtfdi) && (LONG_DOUBLE_TYPE_SIZE == 128)
808 return - __fixunstfdi (-a
);
809 return __fixunstfdi (a
);
813 #if defined(L_fixunsxfdi) && (LONG_DOUBLE_TYPE_SIZE == 96)
814 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
815 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
818 __fixunsxfdi (XFtype a
)
826 /* Compute high word of result, as a flonum. */
827 b
= (a
/ HIGH_WORD_COEFF
);
828 /* Convert that to fixed (but not to DItype!),
829 and shift it into the high word. */
832 /* Remove high part from the XFtype, leaving the low part as flonum. */
834 /* Convert that to fixed (but not to DItype!) and add it in.
835 Sometimes A comes out negative. This is significant, since
836 A has more bits than a long int does. */
838 v
-= (USItype
) (- a
);
845 #if defined(L_fixxfdi) && (LONG_DOUBLE_TYPE_SIZE == 96)
850 return - __fixunsxfdi (-a
);
851 return __fixunsxfdi (a
);
856 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
857 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
860 __fixunsdfdi (DFtype a
)
868 /* Compute high word of result, as a flonum. */
869 b
= (a
/ HIGH_WORD_COEFF
);
870 /* Convert that to fixed (but not to DItype!),
871 and shift it into the high word. */
874 /* Remove high part from the DFtype, leaving the low part as flonum. */
876 /* Convert that to fixed (but not to DItype!) and add it in.
877 Sometimes A comes out negative. This is significant, since
878 A has more bits than a long int does. */
880 v
-= (USItype
) (- a
);
892 return - __fixunsdfdi (-a
);
893 return __fixunsdfdi (a
);
898 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
899 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
902 __fixunssfdi (SFtype original_a
)
904 /* Convert the SFtype to a DFtype, because that is surely not going
905 to lose any bits. Some day someone else can write a faster version
906 that avoids converting to DFtype, and verify it really works right. */
907 DFtype a
= original_a
;
914 /* Compute high word of result, as a flonum. */
915 b
= (a
/ HIGH_WORD_COEFF
);
916 /* Convert that to fixed (but not to DItype!),
917 and shift it into the high word. */
920 /* Remove high part from the DFtype, leaving the low part as flonum. */
922 /* Convert that to fixed (but not to DItype!) and add it in.
923 Sometimes A comes out negative. This is significant, since
924 A has more bits than a long int does. */
926 v
-= (USItype
) (- a
);
938 return - __fixunssfdi (-a
);
939 return __fixunssfdi (a
);
943 #if defined(L_floatdixf) && (LONG_DOUBLE_TYPE_SIZE == 96)
944 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
945 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
946 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
949 __floatdixf (DItype u
)
953 d
= (SItype
) (u
>> WORD_SIZE
);
954 d
*= HIGH_HALFWORD_COEFF
;
955 d
*= HIGH_HALFWORD_COEFF
;
956 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
962 #if defined(L_floatditf) && (LONG_DOUBLE_TYPE_SIZE == 128)
963 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
964 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
965 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
968 __floatditf (DItype u
)
972 d
= (SItype
) (u
>> WORD_SIZE
);
973 d
*= HIGH_HALFWORD_COEFF
;
974 d
*= HIGH_HALFWORD_COEFF
;
975 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
982 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
983 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
984 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
987 __floatdidf (DItype u
)
991 d
= (SItype
) (u
>> WORD_SIZE
);
992 d
*= HIGH_HALFWORD_COEFF
;
993 d
*= HIGH_HALFWORD_COEFF
;
994 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1001 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
1002 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
1003 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1004 #define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
1006 /* Define codes for all the float formats that we know of. Note
1007 that this is copied from real.h. */
1009 #define UNKNOWN_FLOAT_FORMAT 0
1010 #define IEEE_FLOAT_FORMAT 1
1011 #define VAX_FLOAT_FORMAT 2
1012 #define IBM_FLOAT_FORMAT 3
1014 /* Default to IEEE float if not specified. Nearly all machines use it. */
1015 #ifndef HOST_FLOAT_FORMAT
1016 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1019 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1024 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1029 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1035 __floatdisf (DItype u
)
1037 /* Do the calculation in DFmode
1038 so that we don't lose any of the precision of the high word
1039 while multiplying it. */
1042 /* Protect against double-rounding error.
1043 Represent any low-order bits, that might be truncated in DFmode,
1044 by a bit that won't be lost. The bit can go in anywhere below the
1045 rounding position of the SFmode. A fixed mask and bit position
1046 handles all usual configurations. It doesn't handle the case
1047 of 128-bit DImode, however. */
1048 if (DF_SIZE
< DI_SIZE
1049 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1051 #define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
1052 if (! (- ((DItype
) 1 << DF_SIZE
) < u
1053 && u
< ((DItype
) 1 << DF_SIZE
)))
1055 if ((USItype
) u
& (REP_BIT
- 1))
1059 f
= (SItype
) (u
>> WORD_SIZE
);
1060 f
*= HIGH_HALFWORD_COEFF
;
1061 f
*= HIGH_HALFWORD_COEFF
;
1062 f
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1068 #if defined(L_fixunsxfsi) && LONG_DOUBLE_TYPE_SIZE == 96
1069 /* Reenable the normal types, in case limits.h needs them. */
1082 __fixunsxfsi (XFtype a
)
1084 if (a
>= - (DFtype
) LONG_MIN
)
1085 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1091 /* Reenable the normal types, in case limits.h needs them. */
1104 __fixunsdfsi (DFtype a
)
1106 if (a
>= - (DFtype
) LONG_MIN
)
1107 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1113 /* Reenable the normal types, in case limits.h needs them. */
1126 __fixunssfsi (SFtype a
)
1128 if (a
>= - (SFtype
) LONG_MIN
)
1129 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1134 /* From here on down, the routines use normal data types. */
1136 #define SItype bogus_type
1137 #define USItype bogus_type
1138 #define DItype bogus_type
1139 #define UDItype bogus_type
1140 #define SFtype bogus_type
1141 #define DFtype bogus_type
1153 /* Like bcmp except the sign is meaningful.
1154 Result is negative if S1 is less than S2,
1155 positive if S1 is greater, 0 if S1 and S2 are equal. */
1158 __gcc_bcmp (unsigned char *s1
, unsigned char *s2
, size_t size
)
1162 unsigned char c1
= *s1
++, c2
= *s2
++;
1179 #if defined(__svr4__) || defined(__alliant__)
1183 /* The Alliant needs the added underscore. */
1184 asm (".globl __builtin_saveregs");
1185 asm ("__builtin_saveregs:");
1186 asm (".globl ___builtin_saveregs");
1187 asm ("___builtin_saveregs:");
1189 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1190 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1191 area and also for a new va_list
1193 /* Save all argument registers in the arg reg save area. The
1194 arg reg save area must have the following layout (according
1206 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1207 asm (" fst.q %f12,16(%sp)");
1209 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1210 asm (" st.l %r17,36(%sp)");
1211 asm (" st.l %r18,40(%sp)");
1212 asm (" st.l %r19,44(%sp)");
1213 asm (" st.l %r20,48(%sp)");
1214 asm (" st.l %r21,52(%sp)");
1215 asm (" st.l %r22,56(%sp)");
1216 asm (" st.l %r23,60(%sp)");
1217 asm (" st.l %r24,64(%sp)");
1218 asm (" st.l %r25,68(%sp)");
1219 asm (" st.l %r26,72(%sp)");
1220 asm (" st.l %r27,76(%sp)");
1222 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1223 va_list structure. Put in into
1224 r16 so that it will be returned
1227 /* Initialize all fields of the new va_list structure. This
1228 structure looks like:
1231 unsigned long ireg_used;
1232 unsigned long freg_used;
1238 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1239 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1240 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1241 asm (" bri %r1"); /* delayed return */
1242 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1244 #else /* not __svr4__ */
1245 #if defined(__PARAGON__)
1247 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1248 * and we stand a better chance of hooking into libraries
1249 * compiled by PGI. [andyp@ssd.intel.com]
1253 asm (".globl __builtin_saveregs");
1254 asm ("__builtin_saveregs:");
1255 asm (".globl ___builtin_saveregs");
1256 asm ("___builtin_saveregs:");
1258 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1259 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1260 area and also for a new va_list
1262 /* Save all argument registers in the arg reg save area. The
1263 arg reg save area must have the following layout (according
1275 asm (" fst.q f8, 0(sp)");
1276 asm (" fst.q f12,16(sp)");
1277 asm (" st.l r16,32(sp)");
1278 asm (" st.l r17,36(sp)");
1279 asm (" st.l r18,40(sp)");
1280 asm (" st.l r19,44(sp)");
1281 asm (" st.l r20,48(sp)");
1282 asm (" st.l r21,52(sp)");
1283 asm (" st.l r22,56(sp)");
1284 asm (" st.l r23,60(sp)");
1285 asm (" st.l r24,64(sp)");
1286 asm (" st.l r25,68(sp)");
1287 asm (" st.l r26,72(sp)");
1288 asm (" st.l r27,76(sp)");
1290 asm (" adds 80,sp,r16"); /* compute the address of the new
1291 va_list structure. Put in into
1292 r16 so that it will be returned
1295 /* Initialize all fields of the new va_list structure. This
1296 structure looks like:
1299 unsigned long ireg_used;
1300 unsigned long freg_used;
1306 asm (" st.l r0, 0(r16)"); /* nfixed */
1307 asm (" st.l r0, 4(r16)"); /* nfloating */
1308 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1309 asm (" bri r1"); /* delayed return */
1310 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1311 #else /* not __PARAGON__ */
1315 asm (".globl ___builtin_saveregs");
1316 asm ("___builtin_saveregs:");
1317 asm (" mov sp,r30");
1318 asm (" andnot 0x0f,sp,sp");
1319 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1321 /* Fill in the __va_struct. */
1322 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1323 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1324 asm (" st.l r18, 8(sp)");
1325 asm (" st.l r19,12(sp)");
1326 asm (" st.l r20,16(sp)");
1327 asm (" st.l r21,20(sp)");
1328 asm (" st.l r22,24(sp)");
1329 asm (" st.l r23,28(sp)");
1330 asm (" st.l r24,32(sp)");
1331 asm (" st.l r25,36(sp)");
1332 asm (" st.l r26,40(sp)");
1333 asm (" st.l r27,44(sp)");
1335 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1336 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1338 /* Fill in the __va_ctl. */
1339 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1340 asm (" st.l r28,84(sp)"); /* pointer to more args */
1341 asm (" st.l r0, 88(sp)"); /* nfixed */
1342 asm (" st.l r0, 92(sp)"); /* nfloating */
1344 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1346 asm (" mov r30,sp");
1347 /* recover stack and pass address to start
1349 #endif /* not __PARAGON__ */
1350 #endif /* not __svr4__ */
1351 #else /* not __i860__ */
1353 asm (".global __builtin_saveregs");
1354 asm ("__builtin_saveregs:");
1355 asm (".global ___builtin_saveregs");
1356 asm ("___builtin_saveregs:");
1357 #ifdef NEED_PROC_COMMAND
1360 asm ("st %i0,[%fp+68]");
1361 asm ("st %i1,[%fp+72]");
1362 asm ("st %i2,[%fp+76]");
1363 asm ("st %i3,[%fp+80]");
1364 asm ("st %i4,[%fp+84]");
1366 asm ("st %i5,[%fp+88]");
1367 #ifdef NEED_TYPE_COMMAND
1368 asm (".type __builtin_saveregs,#function");
1369 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1371 #else /* not __sparc__ */
1372 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1376 asm (" .set nomips16");
1378 asm (" .ent __builtin_saveregs");
1379 asm (" .globl __builtin_saveregs");
1380 asm ("__builtin_saveregs:");
1381 asm (" sw $4,0($30)");
1382 asm (" sw $5,4($30)");
1383 asm (" sw $6,8($30)");
1384 asm (" sw $7,12($30)");
1386 asm (" .end __builtin_saveregs");
1387 #else /* not __mips__, etc. */
1390 __builtin_saveregs ()
1395 #endif /* not __mips__ */
1396 #endif /* not __sparc__ */
1397 #endif /* not __i860__ */
1401 #ifndef inhibit_libc
1403 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1405 /* This is used by the `assert' macro. */
1406 extern void __eprintf (const char *, const char *, unsigned int, const char *)
1407 __attribute__ ((__noreturn__
));
1410 __eprintf (const char *string
, const char *expression
,
1411 unsigned int line
, const char *filename
)
1413 fprintf (stderr
, string
, expression
, line
, filename
);
1423 /* Structure emitted by -a */
1427 const char *filename
;
1431 const unsigned long *addresses
;
1433 /* Older GCC's did not emit these fields. */
1435 const char **functions
;
1436 const long *line_nums
;
1437 const char **filenames
;
1441 #ifdef BLOCK_PROFILER_CODE
1444 #ifndef inhibit_libc
1446 /* Simple minded basic block profiling output dumper for
1447 systems that don't provide tcov support. At present,
1448 it requires atexit and stdio. */
1450 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1454 #include "gbl-ctors.h"
1455 #include "gcov-io.h"
1458 static struct bb
*bb_head
;
1460 /* Return the number of digits needed to print a value */
1461 /* __inline__ */ static int num_digits (long value
, int base
)
1463 int minus
= (value
< 0 && base
!= 16);
1464 unsigned long v
= (minus
) ? -value
: value
;
1478 __bb_exit_func (void)
1480 FILE *da_file
, *file
;
1487 i
= strlen (bb_head
->filename
) - 3;
1489 if (!strcmp (bb_head
->filename
+i
, ".da"))
1491 /* Must be -fprofile-arcs not -a.
1492 Dump data in a form that gcov expects. */
1496 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1498 /* If the file exists, and the number of counts in it is the same,
1499 then merge them in. */
1501 if ((da_file
= fopen (ptr
->filename
, "r")) != 0)
1505 if (__read_long (&n_counts
, da_file
, 8) != 0)
1507 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1512 if (n_counts
== ptr
->ncounts
)
1516 for (i
= 0; i
< n_counts
; i
++)
1520 if (__read_long (&v
, da_file
, 8) != 0)
1522 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1526 ptr
->counts
[i
] += v
;
1530 if (fclose (da_file
) == EOF
)
1531 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1534 if ((da_file
= fopen (ptr
->filename
, "w")) == 0)
1536 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1541 /* ??? Should first write a header to the file. Preferably, a 4 byte
1542 magic number, 4 bytes containing the time the program was
1543 compiled, 4 bytes containing the last modification time of the
1544 source file, and 4 bytes indicating the compiler options used.
1546 That way we can easily verify that the proper source/executable/
1547 data file combination is being used from gcov. */
1549 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1552 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1558 long *count_ptr
= ptr
->counts
;
1560 for (j
= ptr
->ncounts
; j
> 0; j
--)
1562 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1570 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1574 if (fclose (da_file
) == EOF
)
1575 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1582 /* Must be basic block profiling. Emit a human readable output file. */
1584 file
= fopen ("bb.out", "a");
1593 /* This is somewhat type incorrect, but it avoids worrying about
1594 exactly where time.h is included from. It should be ok unless
1595 a void * differs from other pointer formats, or if sizeof (long)
1596 is < sizeof (time_t). It would be nice if we could assume the
1597 use of rationale standards here. */
1599 time ((void *) &time_value
);
1600 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1602 /* We check the length field explicitly in order to allow compatibility
1603 with older GCC's which did not provide it. */
1605 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1608 int func_p
= (ptr
->nwords
>= sizeof (struct bb
)
1609 && ptr
->nwords
<= 1000
1611 int line_p
= (func_p
&& ptr
->line_nums
);
1612 int file_p
= (func_p
&& ptr
->filenames
);
1613 int addr_p
= (ptr
->addresses
!= 0);
1614 long ncounts
= ptr
->ncounts
;
1620 int blk_len
= num_digits (ncounts
, 10);
1625 fprintf (file
, "File %s, %ld basic blocks \n\n",
1626 ptr
->filename
, ncounts
);
1628 /* Get max values for each field. */
1629 for (i
= 0; i
< ncounts
; i
++)
1634 if (cnt_max
< ptr
->counts
[i
])
1635 cnt_max
= ptr
->counts
[i
];
1637 if (addr_p
&& addr_max
< ptr
->addresses
[i
])
1638 addr_max
= ptr
->addresses
[i
];
1640 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1641 line_max
= ptr
->line_nums
[i
];
1645 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1653 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1660 addr_len
= num_digits (addr_max
, 16);
1661 cnt_len
= num_digits (cnt_max
, 10);
1662 line_len
= num_digits (line_max
, 10);
1664 /* Now print out the basic block information. */
1665 for (i
= 0; i
< ncounts
; i
++)
1668 " Block #%*d: executed %*ld time(s)",
1670 cnt_len
, ptr
->counts
[i
]);
1673 fprintf (file
, " address= 0x%.*lx", addr_len
,
1677 fprintf (file
, " function= %-*s", func_len
,
1678 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1681 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1684 fprintf (file
, " file= %s",
1685 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1687 fprintf (file
, "\n");
1690 fprintf (file
, "\n");
1694 fprintf (file
, "\n\n");
1700 __bb_init_func (struct bb
*blocks
)
1702 /* User is supposed to check whether the first word is non-0,
1703 but just in case.... */
1705 if (blocks
->zero_word
)
1709 /* Initialize destructor. */
1711 ON_EXIT (__bb_exit_func
, 0);
1714 /* Set up linked list. */
1715 blocks
->zero_word
= 1;
1716 blocks
->next
= bb_head
;
1720 #ifndef MACHINE_STATE_SAVE
1721 #define MACHINE_STATE_SAVE(ID)
1723 #ifndef MACHINE_STATE_RESTORE
1724 #define MACHINE_STATE_RESTORE(ID)
1727 /* Number of buckets in hashtable of basic block addresses. */
1729 #define BB_BUCKETS 311
1731 /* Maximum length of string in file bb.in. */
1733 #define BBINBUFSIZE 500
1735 /* BBINBUFSIZE-1 with double quotes. We could use #BBINBUFSIZE or
1736 "BBINBUFSIZE" but want to avoid trouble with preprocessors. */
1738 #define BBINBUFSIZESTR "499"
1742 struct bb_edge
*next
;
1743 unsigned long src_addr
;
1744 unsigned long dst_addr
;
1745 unsigned long count
;
1750 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1755 struct bb_func
*next
;
1758 enum bb_func_mode mode
;
1761 /* This is the connection to the outside world.
1762 The BLOCK_PROFILER macro must set __bb.blocks
1763 and __bb.blockno. */
1766 unsigned long blockno
;
1770 /* Vars to store addrs of source and destination basic blocks
1773 static unsigned long bb_src
= 0;
1774 static unsigned long bb_dst
= 0;
1776 static FILE *bb_tracefile
= (FILE *) 0;
1777 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1778 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1779 static unsigned long bb_callcount
= 0;
1780 static int bb_mode
= 0;
1782 static unsigned long *bb_stack
= (unsigned long *) 0;
1783 static size_t bb_stacksize
= 0;
1785 static int reported
= 0;
1788 Always : Print execution frequencies of basic blocks
1790 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1791 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1792 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1793 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1798 /*#include <sys/types.h>*/
1799 #include <sys/stat.h>
1800 /*#include <malloc.h>*/
1802 /* Commands executed by gopen. */
1804 #define GOPENDECOMPRESS "gzip -cd "
1805 #define GOPENCOMPRESS "gzip -c >"
1807 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1808 If it does not compile, simply replace gopen by fopen and delete
1809 '.gz' from any first parameter to gopen. */
1812 gopen (char *fn
, char *mode
)
1820 if (mode
[0] != 'r' && mode
[0] != 'w')
1823 p
= fn
+ strlen (fn
)-1;
1824 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1825 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1832 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1833 + sizeof (GOPENDECOMPRESS
));
1834 strcpy (s
, GOPENDECOMPRESS
);
1835 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1836 f
= popen (s
, mode
);
1844 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1845 + sizeof (GOPENCOMPRESS
));
1846 strcpy (s
, GOPENCOMPRESS
);
1847 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1848 if (!(f
= popen (s
, mode
)))
1849 f
= fopen (s
, mode
);
1856 return fopen (fn
, mode
);
1866 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
1874 #endif /* HAVE_POPEN */
1876 /* Called once per program. */
1879 __bb_exit_trace_func ()
1881 FILE *file
= fopen ("bb.out", "a");
1894 gclose (bb_tracefile
);
1896 fclose (bb_tracefile
);
1897 #endif /* HAVE_POPEN */
1900 /* Check functions in `bb.in'. */
1905 const struct bb_func
*p
;
1906 int printed_something
= 0;
1910 /* This is somewhat type incorrect. */
1911 time ((void *) &time_value
);
1913 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
1915 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1917 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
1919 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
1921 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
1926 if (!printed_something
)
1928 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
1929 printed_something
= 1;
1932 fprintf (file
, "\tFunction %s", p
->funcname
);
1934 fprintf (file
, " of file %s", p
->filename
);
1935 fprintf (file
, "\n" );
1940 if (printed_something
)
1941 fprintf (file
, "\n");
1947 if (!bb_hashbuckets
)
1951 fprintf (stderr
, "Profiler: out of memory\n");
1961 unsigned long addr_max
= 0;
1962 unsigned long cnt_max
= 0;
1966 /* This is somewhat type incorrect, but it avoids worrying about
1967 exactly where time.h is included from. It should be ok unless
1968 a void * differs from other pointer formats, or if sizeof (long)
1969 is < sizeof (time_t). It would be nice if we could assume the
1970 use of rationale standards here. */
1972 time ((void *) &time_value
);
1973 fprintf (file
, "Basic block jump tracing");
1975 switch (bb_mode
& 12)
1978 fprintf (file
, " (with call)");
1982 /* Print nothing. */
1986 fprintf (file
, " (with call & ret)");
1990 fprintf (file
, " (with ret)");
1994 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
1996 for (i
= 0; i
< BB_BUCKETS
; i
++)
1998 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
1999 for ( ; bucket
; bucket
= bucket
->next
)
2001 if (addr_max
< bucket
->src_addr
)
2002 addr_max
= bucket
->src_addr
;
2003 if (addr_max
< bucket
->dst_addr
)
2004 addr_max
= bucket
->dst_addr
;
2005 if (cnt_max
< bucket
->count
)
2006 cnt_max
= bucket
->count
;
2009 addr_len
= num_digits (addr_max
, 16);
2010 cnt_len
= num_digits (cnt_max
, 10);
2012 for ( i
= 0; i
< BB_BUCKETS
; i
++)
2014 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2015 for ( ; bucket
; bucket
= bucket
->next
)
2017 fprintf (file
, "Jump from block 0x%.*lx to "
2018 "block 0x%.*lx executed %*lu time(s)\n",
2019 addr_len
, bucket
->src_addr
,
2020 addr_len
, bucket
->dst_addr
,
2021 cnt_len
, bucket
->count
);
2025 fprintf (file
, "\n");
2033 /* Free allocated memory. */
2038 struct bb_func
*old
= f
;
2041 if (old
->funcname
) free (old
->funcname
);
2042 if (old
->filename
) free (old
->filename
);
2053 for (i
= 0; i
< BB_BUCKETS
; i
++)
2055 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2060 bucket
= bucket
->next
;
2064 free (bb_hashbuckets
);
2067 for (b
= bb_head
; b
; b
= b
->next
)
2068 if (b
->flags
) free (b
->flags
);
2071 /* Called once per program. */
2078 char buf
[BBINBUFSIZE
];
2081 enum bb_func_mode m
;
2084 /* Initialize destructor. */
2085 ON_EXIT (__bb_exit_func
, 0);
2088 if (!(file
= fopen ("bb.in", "r")))
2091 while(fscanf (file
, " %" BBINBUFSIZESTR
"s ", buf
) != EOF
)
2103 if (!strcmp (p
, "__bb_trace__"))
2105 else if (!strcmp (p
, "__bb_jumps__"))
2107 else if (!strcmp (p
, "__bb_hidecall__"))
2109 else if (!strcmp (p
, "__bb_showret__"))
2113 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2117 f
->next
= bb_func_head
;
2118 if ((pos
= strchr (p
, ':')))
2120 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2122 strcpy (f
->funcname
, pos
+1);
2124 if ((f
->filename
= (char *) malloc (l
+1)))
2126 strncpy (f
->filename
, p
, l
);
2127 f
->filename
[l
] = '\0';
2130 f
->filename
= (char *) 0;
2134 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2136 strcpy (f
->funcname
, p
);
2137 f
->filename
= (char *) 0;
2149 bb_tracefile
= gopen ("bbtrace.gz", "w");
2154 bb_tracefile
= fopen ("bbtrace", "w");
2156 #endif /* HAVE_POPEN */
2160 bb_hashbuckets
= (struct bb_edge
**)
2161 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2163 memset (bb_hashbuckets
, 0, BB_BUCKETS
* sizeof (struct bb_edge
*));
2169 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2173 /* Initialize destructor. */
2174 ON_EXIT (__bb_exit_trace_func
, 0);
2179 /* Called upon entering a basic block. */
2184 struct bb_edge
*bucket
;
2186 MACHINE_STATE_SAVE("1")
2188 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2191 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2192 __bb
.blocks
->counts
[__bb
.blockno
]++;
2196 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2201 struct bb_edge
**startbucket
, **oldnext
;
2203 oldnext
= startbucket
2204 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2205 bucket
= *startbucket
;
2207 for (bucket
= *startbucket
; bucket
;
2208 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2210 if (bucket
->src_addr
== bb_src
2211 && bucket
->dst_addr
== bb_dst
)
2214 *oldnext
= bucket
->next
;
2215 bucket
->next
= *startbucket
;
2216 *startbucket
= bucket
;
2221 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2227 fprintf (stderr
, "Profiler: out of memory\n");
2234 bucket
->src_addr
= bb_src
;
2235 bucket
->dst_addr
= bb_dst
;
2236 bucket
->next
= *startbucket
;
2237 *startbucket
= bucket
;
2248 MACHINE_STATE_RESTORE("1")
2252 /* Called when returning from a function and `__bb_showret__' is set. */
2255 __bb_trace_func_ret ()
2257 struct bb_edge
*bucket
;
2259 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2264 struct bb_edge
**startbucket
, **oldnext
;
2266 oldnext
= startbucket
2267 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2268 bucket
= *startbucket
;
2270 for (bucket
= *startbucket
; bucket
;
2271 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2273 if (bucket
->src_addr
== bb_dst
2274 && bucket
->dst_addr
== bb_src
)
2277 *oldnext
= bucket
->next
;
2278 bucket
->next
= *startbucket
;
2279 *startbucket
= bucket
;
2284 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2290 fprintf (stderr
, "Profiler: out of memory\n");
2297 bucket
->src_addr
= bb_dst
;
2298 bucket
->dst_addr
= bb_src
;
2299 bucket
->next
= *startbucket
;
2300 *startbucket
= bucket
;
2313 /* Called upon entering the first function of a file. */
2316 __bb_init_file (struct bb
*blocks
)
2319 const struct bb_func
*p
;
2320 long blk
, ncounts
= blocks
->ncounts
;
2321 const char **functions
= blocks
->functions
;
2323 /* Set up linked list. */
2324 blocks
->zero_word
= 1;
2325 blocks
->next
= bb_head
;
2330 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2333 for (blk
= 0; blk
< ncounts
; blk
++)
2334 blocks
->flags
[blk
] = 0;
2336 for (blk
= 0; blk
< ncounts
; blk
++)
2338 for (p
= bb_func_head
; p
; p
= p
->next
)
2340 if (!strcmp (p
->funcname
, functions
[blk
])
2341 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2343 blocks
->flags
[blk
] |= p
->mode
;
2350 /* Called when exiting from a function. */
2356 MACHINE_STATE_SAVE("2")
2360 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2362 bb_src
= bb_stack
[bb_callcount
];
2364 __bb_trace_func_ret ();
2370 MACHINE_STATE_RESTORE("2")
2374 /* Called when entering a function. */
2377 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2379 static int trace_init
= 0;
2381 MACHINE_STATE_SAVE("3")
2383 if (!blocks
->zero_word
)
2390 __bb_init_file (blocks
);
2400 if (bb_callcount
>= bb_stacksize
)
2402 size_t newsize
= bb_callcount
+ 100;
2404 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2409 fprintf (stderr
, "Profiler: out of memory\n");
2413 goto stack_overflow
;
2415 bb_stacksize
= newsize
;
2417 bb_stack
[bb_callcount
] = bb_src
;
2428 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2434 bb_stack
[bb_callcount
] = bb_src
;
2437 MACHINE_STATE_RESTORE("3")
2440 #endif /* not inhibit_libc */
2441 #endif /* not BLOCK_PROFILER_CODE */
2445 unsigned int __shtab
[] = {
2446 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2447 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2448 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2449 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2450 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2451 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2452 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2453 0x10000000, 0x20000000, 0x40000000, 0x80000000
2457 #ifdef L_clear_cache
2458 /* Clear part of an instruction cache. */
2460 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2463 __clear_cache (char *beg
, char *end
)
2465 #ifdef CLEAR_INSN_CACHE
2466 CLEAR_INSN_CACHE (beg
, end
);
2468 #ifdef INSN_CACHE_SIZE
2469 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2470 static int initialized
;
2474 typedef (*function_ptr
) ();
2476 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2477 /* It's cheaper to clear the whole cache.
2478 Put in a series of jump instructions so that calling the beginning
2479 of the cache will clear the whole thing. */
2483 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2484 & -INSN_CACHE_LINE_WIDTH
);
2485 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2487 while (ptr
< end_ptr
)
2489 *(INSTRUCTION_TYPE
*)ptr
2490 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2491 ptr
+= INSN_CACHE_LINE_WIDTH
;
2493 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2498 /* Call the beginning of the sequence. */
2499 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2500 & -INSN_CACHE_LINE_WIDTH
))
2503 #else /* Cache is large. */
2507 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2508 & -INSN_CACHE_LINE_WIDTH
);
2510 while (ptr
< (int) array
+ sizeof array
)
2512 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2513 ptr
+= INSN_CACHE_LINE_WIDTH
;
2519 /* Find the location in array that occupies the same cache line as BEG. */
2521 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2522 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2523 & -INSN_CACHE_PLANE_SIZE
)
2526 /* Compute the cache alignment of the place to stop clearing. */
2527 #if 0 /* This is not needed for gcc's purposes. */
2528 /* If the block to clear is bigger than a cache plane,
2529 we clear the entire cache, and OFFSET is already correct. */
2530 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2532 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2533 & -INSN_CACHE_LINE_WIDTH
)
2534 & (INSN_CACHE_PLANE_SIZE
- 1));
2536 #if INSN_CACHE_DEPTH > 1
2537 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2538 if (end_addr
<= start_addr
)
2539 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2541 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2543 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2544 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2546 while (addr
!= stop
)
2548 /* Call the return instruction at ADDR. */
2549 ((function_ptr
) addr
) ();
2551 addr
+= INSN_CACHE_LINE_WIDTH
;
2554 #else /* just one plane */
2557 /* Call the return instruction at START_ADDR. */
2558 ((function_ptr
) start_addr
) ();
2560 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2562 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2563 #endif /* just one plane */
2564 #endif /* Cache is large */
2565 #endif /* Cache exists */
2566 #endif /* CLEAR_INSN_CACHE */
2569 #endif /* L_clear_cache */
2573 /* Jump to a trampoline, loading the static chain address. */
2575 #if defined(WINNT) && ! defined(__CYGWIN32__)
2587 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2591 mprotect (char *addr
, int len
, int prot
)
2608 if (VirtualProtect (addr
, len
, np
, &op
))
2616 #ifdef TRANSFER_FROM_TRAMPOLINE
2617 TRANSFER_FROM_TRAMPOLINE
2620 #if defined (NeXT) && defined (__MACH__)
2622 /* Make stack executable so we can call trampolines on stack.
2623 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2627 #include <mach/mach.h>
2631 __enable_execute_stack (char *addr
)
2634 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2635 vm_address_t a
= (vm_address_t
) addr
;
2637 /* turn on execute access on stack */
2638 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2639 if (r
!= KERN_SUCCESS
)
2641 mach_error("vm_protect VM_PROT_ALL", r
);
2645 /* We inline the i-cache invalidation for speed */
2647 #ifdef CLEAR_INSN_CACHE
2648 CLEAR_INSN_CACHE (addr
, eaddr
);
2650 __clear_cache ((int) addr
, (int) eaddr
);
2654 #endif /* defined (NeXT) && defined (__MACH__) */
2658 /* Make stack executable so we can call trampolines on stack.
2659 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2661 #include <sys/mman.h>
2662 #include <sys/vmparam.h>
2663 #include <machine/machparam.h>
2666 __enable_execute_stack ()
2669 static unsigned lowest
= USRSTACK
;
2670 unsigned current
= (unsigned) &fp
& -NBPG
;
2672 if (lowest
> current
)
2674 unsigned len
= lowest
- current
;
2675 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2679 /* Clear instruction cache in case an old trampoline is in it. */
2682 #endif /* __convex__ */
2686 /* Modified from the convex -code above. */
2688 #include <sys/param.h>
2690 #include <sys/m88kbcs.h>
2693 __enable_execute_stack ()
2696 static unsigned long lowest
= USRSTACK
;
2697 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2699 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2700 address is seen as 'negative'. That is the case with the stack. */
2703 if (lowest
> current
)
2705 unsigned len
=lowest
-current
;
2706 memctl(current
,len
,MCT_TEXT
);
2710 memctl(current
,NBPC
,MCT_TEXT
);
2714 #endif /* __sysV88__ */
2718 #include <sys/signal.h>
2721 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2722 so define it here, because we need it in __clear_insn_cache below */
2723 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2724 hence we enable this stuff only if MCT_TEXT is #define'd. */
2739 /* Clear instruction cache so we can call trampolines on stack.
2740 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2743 __clear_insn_cache ()
2748 /* Preserve errno, because users would be surprised to have
2749 errno changing without explicitly calling any system-call. */
2752 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2753 No need to use an address derived from _start or %sp, as 0 works also. */
2754 memctl(0, 4096, MCT_TEXT
);
2759 #endif /* __sysV68__ */
2763 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2765 #include <sys/mman.h>
2766 #include <sys/types.h>
2767 #include <sys/param.h>
2768 #include <sys/vmmac.h>
2770 /* Modified from the convex -code above.
2771 mremap promises to clear the i-cache. */
2774 __enable_execute_stack ()
2777 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2778 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2780 perror ("mprotect in __enable_execute_stack");
2785 #endif /* __pyr__ */
2787 #if defined (sony_news) && defined (SYSTYPE_BSD)
2790 #include <sys/types.h>
2791 #include <sys/param.h>
2792 #include <syscall.h>
2793 #include <machine/sysnews.h>
2795 /* cacheflush function for NEWS-OS 4.2.
2796 This function is called from trampoline-initialize code
2797 defined in config/mips/mips.h. */
2800 cacheflush (char *beg
, int size
, int flag
)
2802 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2804 perror ("cache_flush");
2810 #endif /* sony_news */
2811 #endif /* L_trampoline */
2813 #ifndef __CYGWIN32__
2816 #include "gbl-ctors.h"
2817 /* Some systems use __main in a way incompatible with its use in gcc, in these
2818 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2819 give the same symbol without quotes for an alternative entry point. You
2820 must define both, or neither. */
2822 #define NAME__MAIN "__main"
2823 #define SYMBOL__MAIN __main
2826 #ifdef INIT_SECTION_ASM_OP
2827 #undef HAS_INIT_SECTION
2828 #define HAS_INIT_SECTION
2831 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2832 /* Run all the global destructors on exit from the program. */
2835 __do_global_dtors ()
2837 #ifdef DO_GLOBAL_DTORS_BODY
2838 DO_GLOBAL_DTORS_BODY
;
2840 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2850 #ifndef HAS_INIT_SECTION
2851 /* Run all the global constructors on entry to the program. */
2854 #define ON_EXIT(a, b)
2856 /* Make sure the exit routine is pulled in to define the globals as
2857 bss symbols, just in case the linker does not automatically pull
2858 bss definitions from the library. */
2860 extern int _exit_dummy_decl
;
2861 int *_exit_dummy_ref
= &_exit_dummy_decl
;
2862 #endif /* ON_EXIT */
2865 __do_global_ctors ()
2867 DO_GLOBAL_CTORS_BODY
;
2868 ON_EXIT (__do_global_dtors
, 0);
2870 #endif /* no HAS_INIT_SECTION */
2872 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2873 /* Subroutine called automatically by `main'.
2874 Compiling a global function named `main'
2875 produces an automatic call to this function at the beginning.
2877 For many systems, this routine calls __do_global_ctors.
2878 For systems which support a .init section we use the .init section
2879 to run __do_global_ctors, so we need not do anything here. */
2884 /* Support recursive calls to `main': run initializers just once. */
2885 static int initialized
;
2889 __do_global_ctors ();
2892 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2894 #endif /* L__main */
2895 #endif /* __CYGWIN32__ */
2899 #include "gbl-ctors.h"
2901 /* Provide default definitions for the lists of constructors and
2902 destructors, so that we don't get linker errors. These symbols are
2903 intentionally bss symbols, so that gld and/or collect will provide
2904 the right values. */
2906 /* We declare the lists here with two elements each,
2907 so that they are valid empty lists if no other definition is loaded. */
2908 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2909 #if defined(__NeXT__) || defined(_AIX)
2910 /* After 2.3, try this definition on all systems. */
2911 func_ptr __CTOR_LIST__
[2] = {0, 0};
2912 func_ptr __DTOR_LIST__
[2] = {0, 0};
2914 func_ptr __CTOR_LIST__
[2];
2915 func_ptr __DTOR_LIST__
[2];
2917 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2918 #endif /* L_ctors */
2922 #include "gbl-ctors.h"
2928 int _exit_dummy_decl
= 0; /* prevent compiler & linker warnings */
2936 static func_ptr
*atexit_chain
= 0;
2937 static long atexit_chain_length
= 0;
2938 static volatile long last_atexit_chain_slot
= -1;
2940 int atexit (func_ptr func
)
2942 if (++last_atexit_chain_slot
== atexit_chain_length
)
2944 atexit_chain_length
+= 32;
2946 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
2947 * sizeof (func_ptr
));
2949 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
2950 * sizeof (func_ptr
));
2953 atexit_chain_length
= 0;
2954 last_atexit_chain_slot
= -1;
2959 atexit_chain
[last_atexit_chain_slot
] = func
;
2962 #endif /* NEED_ATEXIT */
2964 /* If we have no known way of registering our own __do_global_dtors
2965 routine so that it will be invoked at program exit time, then we
2966 have to define our own exit routine which will get this to happen. */
2968 extern void __do_global_dtors ();
2969 extern void __bb_exit_func ();
2970 extern void _cleanup ();
2971 extern void _exit () __attribute__ ((noreturn
));
2976 #if !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF)
2980 for ( ; last_atexit_chain_slot
-- >= 0; )
2982 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
2983 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
2985 free (atexit_chain
);
2988 #else /* No NEED_ATEXIT */
2989 __do_global_dtors ();
2990 #endif /* No NEED_ATEXIT */
2991 #endif /* !defined (INIT_SECTION_ASM_OP) || !defined (OBJECT_FORMAT_ELF) */
2992 /* In gbl-ctors.h, ON_EXIT is defined if HAVE_ATEXIT is defined. In
2993 __bb_init_func and _bb_init_prg, __bb_exit_func is registered with
2994 ON_EXIT if ON_EXIT is defined. Thus we must not call __bb_exit_func here
2995 if HAVE_ATEXIT is defined. */
2997 #ifndef inhibit_libc
3000 #endif /* !HAVE_ATEXIT */
3009 #else /* ON_EXIT defined */
3010 int _exit_dummy_decl
= 0; /* prevent compiler & linker warnings */
3012 # ifndef HAVE_ATEXIT
3013 /* Provide a fake for atexit() using ON_EXIT. */
3014 int atexit (func_ptr func
)
3016 return ON_EXIT (func
, NULL
);
3018 # endif /* HAVE_ATEXIT */
3019 #endif /* ON_EXIT defined */
3027 /* Shared exception handling support routines. */
3029 extern void __default_terminate (void) __attribute__ ((__noreturn__
));
3032 __default_terminate ()
3037 void (*__terminate_func
)() = __default_terminate
;
3042 (*__terminate_func
)();
3046 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3049 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3050 catch_type
, throw_type
);
3052 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3063 /* Include definitions of EH context and table layout */
3065 #include "eh-common.h"
3066 #ifndef inhibit_libc
3070 /* This is a safeguard for dynamic handler chain. */
3072 static void *top_elt
[2];
3074 /* Allocate and return a new EH context structure. */
3076 extern void __throw ();
3081 struct eh_context
*eh
= (struct eh_context
*) malloc (sizeof *eh
);
3085 memset (eh
, 0, sizeof *eh
);
3087 eh
->dynamic_handler_chain
= top_elt
;
3093 static __gthread_key_t eh_context_key
;
3095 /* Destructor for struct eh_context. */
3097 eh_context_free (void *ptr
)
3099 __gthread_key_dtor (eh_context_key
, ptr
);
3105 /* Pointer to function to return EH context. */
3107 static struct eh_context
*eh_context_initialize ();
3108 static struct eh_context
*eh_context_static ();
3110 static struct eh_context
*eh_context_specific ();
3113 static struct eh_context
*(*get_eh_context
) () = &eh_context_initialize
;
3115 /* Routine to get EH context.
3116 This one will simply call the function pointer. */
3121 return (void *) (*get_eh_context
) ();
3124 /* Get and set the language specific info pointer. */
3129 struct eh_context
*eh
= (*get_eh_context
) ();
3135 eh_threads_initialize ()
3137 /* Try to create the key. If it fails, revert to static method,
3138 otherwise start using thread specific EH contexts. */
3139 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3140 get_eh_context
= &eh_context_specific
;
3142 get_eh_context
= &eh_context_static
;
3144 #endif /* no __GTHREADS */
3146 /* Initialize EH context.
3147 This will be called only once, since we change GET_EH_CONTEXT
3148 pointer to another routine. */
3150 static struct eh_context
*
3151 eh_context_initialize ()
3155 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3156 /* Make sure that get_eh_context does not point to us anymore.
3157 Some systems have dummy thread routines in their libc that
3158 return a success (Solaris 2.6 for example). */
3159 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3160 || get_eh_context
== &eh_context_initialize
)
3162 /* Use static version of EH context. */
3163 get_eh_context
= &eh_context_static
;
3166 #else /* no __GTHREADS */
3168 /* Use static version of EH context. */
3169 get_eh_context
= &eh_context_static
;
3171 #endif /* no __GTHREADS */
3173 return (*get_eh_context
) ();
3176 /* Return a static EH context. */
3178 static struct eh_context
*
3179 eh_context_static ()
3181 static struct eh_context
*eh
;
3183 eh
= new_eh_context ();
3188 /* Return a thread specific EH context. */
3190 static struct eh_context
*
3191 eh_context_specific ()
3193 struct eh_context
*eh
;
3194 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3197 eh
= new_eh_context ();
3198 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3206 /* Support routines for setjmp/longjmp exception handling. */
3208 /* Calls to __sjthrow are generated by the compiler when an exception
3209 is raised when using the setjmp/longjmp exception handling codegen
3212 #ifdef DONT_USE_BUILTIN_SETJMP
3213 extern void longjmp (void *, int);
3216 /* Routine to get the head of the current thread's dynamic handler chain
3217 use for exception handling. */
3220 __get_dynamic_handler_chain ()
3222 struct eh_context
*eh
= (*get_eh_context
) ();
3223 return &eh
->dynamic_handler_chain
;
3226 /* This is used to throw an exception when the setjmp/longjmp codegen
3227 method is used for exception handling.
3229 We call __terminate if there are no handlers left. Otherwise we run the
3230 cleanup actions off the dynamic cleanup stack, and pop the top of the
3231 dynamic handler chain, and use longjmp to transfer back to the associated
3234 extern void __sjthrow (void) __attribute__ ((__noreturn__
));
3239 struct eh_context
*eh
= (*get_eh_context
) ();
3240 void ***dhc
= &eh
->dynamic_handler_chain
;
3242 void (*func
)(void *, int);
3246 /* The cleanup chain is one word into the buffer. Get the cleanup
3248 cleanup
= (void***)&(*dhc
)[1];
3250 /* If there are any cleanups in the chain, run them now. */
3254 void **buf
= (void**)store
;
3259 #ifdef DONT_USE_BUILTIN_SETJMP
3260 if (! setjmp (&buf
[2]))
3262 if (! __builtin_setjmp (&buf
[2]))
3268 func
= (void(*)(void*, int))cleanup
[0][1];
3269 arg
= (void*)cleanup
[0][2];
3271 /* Update this before running the cleanup. */
3272 cleanup
[0] = (void **)cleanup
[0][0];
3285 /* We must call terminate if we try and rethrow an exception, when
3286 there is no exception currently active and when there are no
3288 if (! eh
->info
|| (*dhc
) == top_elt
)
3291 /* Find the jmpbuf associated with the top element of the dynamic
3292 handler chain. The jumpbuf starts two words into the buffer. */
3293 jmpbuf
= &(*dhc
)[2];
3295 /* Then we pop the top element off the dynamic handler chain. */
3296 *dhc
= (void**)(*dhc
)[0];
3298 /* And then we jump to the handler. */
3300 #ifdef DONT_USE_BUILTIN_SETJMP
3301 longjmp (jmpbuf
, 1);
3303 __builtin_longjmp (jmpbuf
, 1);
3307 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3308 handler, then pop the handler off the dynamic handler stack, and
3309 then throw. This is used to skip the first handler, and transfer
3310 control to the next handler in the dynamic handler stack. */
3312 extern void __sjpopnthrow (void) __attribute__ ((__noreturn__
));
3317 struct eh_context
*eh
= (*get_eh_context
) ();
3318 void ***dhc
= &eh
->dynamic_handler_chain
;
3319 void (*func
)(void *, int);
3323 /* The cleanup chain is one word into the buffer. Get the cleanup
3325 cleanup
= (void***)&(*dhc
)[1];
3327 /* If there are any cleanups in the chain, run them now. */
3331 void **buf
= (void**)store
;
3336 #ifdef DONT_USE_BUILTIN_SETJMP
3337 if (! setjmp (&buf
[2]))
3339 if (! __builtin_setjmp (&buf
[2]))
3345 func
= (void(*)(void*, int))cleanup
[0][1];
3346 arg
= (void*)cleanup
[0][2];
3348 /* Update this before running the cleanup. */
3349 cleanup
[0] = (void **)cleanup
[0][0];
3362 /* Then we pop the top element off the dynamic handler chain. */
3363 *dhc
= (void**)(*dhc
)[0];
3368 /* Support code for all exception region-based exception handling. */
3371 __eh_rtime_match (void *rtime
)
3374 __eh_matcher matcher
;
3377 info
= *(__get_eh_info ());
3378 matcher
= ((__eh_info
*)info
)->match_function
;
3381 #ifndef inhibit_libc
3382 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3386 ret
= (*matcher
) (info
, rtime
, (void *)0);
3387 return (ret
!= NULL
);
3390 /* This value identifies the place from which an exception is being
3393 #ifdef EH_TABLE_LOOKUP
3399 #ifdef DWARF2_UNWIND_INFO
3402 /* Return the table version of an exception descriptor */
3405 __get_eh_table_version (exception_descriptor
*table
)
3407 return table
->lang
.version
;
3410 /* Return the originating table language of an exception descriptor */
3413 __get_eh_table_language (exception_descriptor
*table
)
3415 return table
->lang
.language
;
3418 /* This routine takes a PC and a pointer to the exception region TABLE for
3419 its translation unit, and returns the address of the exception handler
3420 associated with the closest exception table handler entry associated
3421 with that PC, or 0 if there are no table entries the PC fits in.
3423 In the advent of a tie, we have to give the last entry, as it represents
3427 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3434 /* We can't do a binary search because the table isn't guaranteed
3435 to be sorted from function to function. */
3436 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3438 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3440 /* This can apply. Make sure it is at least as small as
3441 the previous best. */
3442 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3443 && table
[pos
].start_region
>= table
[best
].start_region
))
3446 /* But it is sorted by starting PC within a function. */
3447 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3451 return table
[best
].exception_handler
;
3458 find_exception_handler (void *pc
, exception_descriptor
*table
, void *eh_info
)
3462 /* The new model assumed the table is sorted inner-most out so the
3463 first region we find which matches is the correct one */
3467 exception_table
*tab
= &(table
->table
[0]);
3469 /* Subtract 1 from the PC to avoid hitting the next region */
3472 /* We can't do a binary search because the table is in inner-most
3473 to outermost address ranges within functions */
3474 for (pos
= 0; tab
[pos
].start_region
!= (void *) -1; pos
++)
3476 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3478 if (tab
[pos
].match_info
)
3480 __eh_matcher matcher
= ((__eh_info
*)eh_info
)->match_function
;
3481 /* match info but no matcher is NOT a match */
3484 ret
= (*matcher
)(eh_info
, tab
[pos
].match_info
, table
);
3486 return tab
[pos
].exception_handler
;
3490 return tab
[pos
].exception_handler
;
3497 #endif /* DWARF2_UNWIND_INFO */
3498 #endif /* EH_TABLE_LOOKUP */
3500 #ifdef DWARF2_UNWIND_INFO
3501 /* Support code for exception handling using static unwind information. */
3505 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3506 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3507 avoid a warning about casting between int and pointer of different
3510 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3512 #ifdef INCOMING_REGNO
3513 /* Is the saved value for register REG in frame UDATA stored in a register
3514 window in the previous frame? */
3516 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3517 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3518 compiled functions won't work with the frame-unwind stuff here.
3519 Perhaps the entireity of in_reg_window should be conditional on having
3520 seen a DW_CFA_GNU_window_save? */
3521 #define target_flags 0
3524 in_reg_window (int reg
, frame_state
*udata
)
3526 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3527 return INCOMING_REGNO (reg
) == reg
;
3528 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3531 #ifdef STACK_GROWS_DOWNWARD
3532 return udata
->reg_or_offset
[reg
] > 0;
3534 return udata
->reg_or_offset
[reg
] < 0;
3538 static inline int in_reg_window (int reg
, frame_state
*udata
) { return 0; }
3539 #endif /* INCOMING_REGNO */
3541 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3542 frame called by UDATA or 0. */
3545 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3547 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3549 reg
= udata
->reg_or_offset
[reg
];
3550 if (in_reg_window (reg
, udata
))
3556 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3557 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3562 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3563 frame called by UDATA or 0. */
3565 static inline void *
3566 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3568 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3571 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3574 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3576 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3579 /* Copy the saved value for register REG from frame UDATA to frame
3580 TARGET_UDATA. Unlike the previous two functions, this can handle
3581 registers that are not one word large. */
3584 copy_reg (unsigned reg
, frame_state
*udata
, frame_state
*target_udata
)
3586 word_type
*preg
= get_reg_addr (reg
, udata
, NULL
);
3587 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3589 memcpy (ptreg
, preg
, __builtin_dwarf_reg_size (reg
));
3592 /* Retrieve the return address for frame UDATA. */
3594 static inline void *
3595 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3597 return __builtin_extract_return_addr
3598 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3601 /* Overwrite the return address for frame UDATA with VAL. */
3604 put_return_addr (void *val
, frame_state
*udata
)
3606 val
= __builtin_frob_return_addr (val
);
3607 put_reg (udata
->retaddr_column
, val
, udata
);
3610 /* Given the current frame UDATA and its return address PC, return the
3611 information about the calling frame in CALLER_UDATA. */
3614 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
)
3616 caller_udata
= __frame_state_for (pc
, caller_udata
);
3620 /* Now go back to our caller's stack frame. If our caller's CFA register
3621 was saved in our stack frame, restore it; otherwise, assume the CFA
3622 register is SP and restore it to our CFA value. */
3623 if (udata
->saved
[caller_udata
->cfa_reg
])
3624 caller_udata
->cfa
= get_reg (caller_udata
->cfa_reg
, udata
, 0);
3626 caller_udata
->cfa
= udata
->cfa
;
3627 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3629 return caller_udata
;
3632 /* We first search for an exception handler, and if we don't find
3633 it, we call __terminate on the current stack frame so that we may
3634 use the debugger to walk the stack and understand why no handler
3637 If we find one, then we unwind the frames down to the one that
3638 has the handler and transfer control into the handler. */
3640 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3645 struct eh_context
*eh
= (*get_eh_context
) ();
3646 void *saved_pc
, *pc
, *handler
;
3647 frame_state ustruct
, ustruct2
;
3648 frame_state
*udata
= &ustruct
;
3649 frame_state
*sub_udata
= &ustruct2
;
3650 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3652 int new_exception_model
;
3654 /* This is required for C++ semantics. We must call terminate if we
3655 try and rethrow an exception, when there is no exception currently
3660 /* Start at our stack frame. */
3662 udata
= __frame_state_for (&&label
, udata
);
3666 /* We need to get the value from the CFA register. */
3667 udata
->cfa
= __builtin_dwarf_cfa ();
3669 memcpy (my_udata
, udata
, sizeof (*udata
));
3671 /* Do any necessary initialization to access arbitrary stack frames.
3672 On the SPARC, this means flushing the register windows. */
3673 __builtin_unwind_init ();
3675 /* Now reset pc to the right throw point. */
3676 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3682 frame_state
*p
= udata
;
3683 udata
= next_stack_level (pc
, udata
, sub_udata
);
3686 /* If we couldn't find the next frame, we lose. */
3690 if (udata
->eh_ptr
== NULL
)
3691 new_exception_model
= 0;
3693 new_exception_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3694 runtime_id_field
== NEW_EH_RUNTIME
);
3696 if (new_exception_model
)
3697 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh
->info
);
3699 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3701 /* If we found one, we can stop searching. */
3704 args_size
= udata
->args_size
;
3708 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3709 hitting the beginning of the next region. */
3710 pc
= get_return_addr (udata
, sub_udata
) - 1;
3713 /* If we haven't found a handler by now, this is an unhandled
3718 eh
->handler_label
= handler
;
3721 /* We found a handler in the throw context, no need to unwind. */
3727 /* Unwind all the frames between this one and the handler by copying
3728 their saved register values into our register save slots. */
3730 /* Remember the PC where we found the handler. */
3731 void *handler_pc
= pc
;
3733 /* Start from the throw context again. */
3735 memcpy (udata
, my_udata
, sizeof (*udata
));
3737 while (pc
!= handler_pc
)
3739 frame_state
*p
= udata
;
3740 udata
= next_stack_level (pc
, udata
, sub_udata
);
3743 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; ++i
)
3744 if (i
!= udata
->retaddr_column
&& udata
->saved
[i
])
3746 /* If you modify the saved value of the return address
3747 register on the SPARC, you modify the return address for
3748 your caller's frame. Don't do that here, as it will
3749 confuse get_return_addr. */
3750 if (in_reg_window (i
, udata
)
3751 && udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
3752 && udata
->reg_or_offset
[udata
->retaddr_column
] == i
)
3754 copy_reg (i
, udata
, my_udata
);
3757 pc
= get_return_addr (udata
, sub_udata
) - 1;
3760 /* But we do need to update the saved return address register from
3761 the last frame we unwind, or the handler frame will have the wrong
3763 if (udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
)
3765 i
= udata
->reg_or_offset
[udata
->retaddr_column
];
3766 if (in_reg_window (i
, udata
))
3767 copy_reg (i
, udata
, my_udata
);
3773 __builtin_eh_return ((void *)eh
,
3774 #ifdef STACK_GROWS_DOWNWARD
3775 udata
->cfa
- my_udata
->cfa
,
3777 my_udata
->cfa
- udata
->cfa
,
3781 /* Epilogue: restore the handler frame's register values and return
3784 #endif /* DWARF2_UNWIND_INFO */
3789 #ifndef inhibit_libc
3790 /* This gets us __GNU_LIBRARY__. */
3791 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
3794 #ifdef __GNU_LIBRARY__
3795 /* Avoid forcing the library's meaning of `write' on the user program
3796 by using the "internal" name (for use within the library) */
3797 #define write(fd, buf, n) __write((fd), (buf), (n))
3799 #endif /* inhibit_libc */
3801 #define MESSAGE "pure virtual method called\n"
3806 #ifndef inhibit_libc
3807 write (2, MESSAGE
, sizeof (MESSAGE
) - 1);