]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgcc/libgcc2.c
C _BitInt incremental fixes [PR102989]
[thirdparty/gcc.git] / libgcc / libgcc2.c
CommitLineData
203b91b9
RS
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
83ffe9cd 3/* Copyright (C) 1989-2023 Free Software Foundation, Inc.
203b91b9 4
1322177d 5This file is part of GCC.
203b91b9 6
1322177d
LB
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
748086b7 9Software Foundation; either version 3, or (at your option) any later
1322177d 10version.
203b91b9 11
1322177d
LB
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
203b91b9 16
748086b7
JJ
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
203b91b9 25
0dadecf6 26#include "tconfig.h"
2e39bdbe 27#include "tsystem.h"
4977bab6
ZW
28#include "coretypes.h"
29#include "tm.h"
852b75ed 30#include "libgcc_tm.h"
2467749d 31
53585c36
RH
32#ifdef HAVE_GAS_HIDDEN
33#define ATTRIBUTE_HIDDEN __attribute__ ((__visibility__ ("hidden")))
34#else
35#define ATTRIBUTE_HIDDEN
36#endif
37
b2a203c8
RS
38/* Work out the largest "word" size that we can deal with on this target. */
39#if MIN_UNITS_PER_WORD > 4
40# define LIBGCC2_MAX_UNITS_PER_WORD 8
41#elif (MIN_UNITS_PER_WORD > 2 \
4471aff6 42 || (MIN_UNITS_PER_WORD > 1 && __SIZEOF_LONG_LONG__ > 4))
b2a203c8
RS
43# define LIBGCC2_MAX_UNITS_PER_WORD 4
44#else
45# define LIBGCC2_MAX_UNITS_PER_WORD MIN_UNITS_PER_WORD
46#endif
47
48/* Work out what word size we are using for this compilation.
49 The value can be set on the command line. */
baffad1f 50#ifndef LIBGCC2_UNITS_PER_WORD
b2a203c8 51#define LIBGCC2_UNITS_PER_WORD LIBGCC2_MAX_UNITS_PER_WORD
baffad1f
RS
52#endif
53
b2a203c8 54#if LIBGCC2_UNITS_PER_WORD <= LIBGCC2_MAX_UNITS_PER_WORD
baffad1f 55
299b83b7 56#include "libgcc2.h"
203b91b9 57\f
d8088c6f
BS
58#ifdef DECLARE_LIBRARY_RENAMES
59 DECLARE_LIBRARY_RENAMES
60#endif
61
b68daef4 62#if defined (L_negdi2)
3d2adde6
CC
63DWtype
64__negdi2 (DWtype u)
65{
b982024e
KG
66 const DWunion uu = {.ll = u};
67 const DWunion w = { {.low = -uu.s.low,
68 .high = -uu.s.high - ((UWtype) -uu.s.low > 0) } };
3d2adde6
CC
69
70 return w.ll;
71}
72#endif
91ce572a
CC
73
74#ifdef L_addvsi3
66f77154 75Wtype
0aec6014 76__addvSI3 (Wtype a, Wtype b)
91ce572a 77{
ebc4cd54 78 Wtype w;
91ce572a 79
831f24a7 80 if (__builtin_add_overflow (a, b, &w))
91ce572a
CC
81 abort ();
82
83 return w;
23190837 84}
0aec6014
EB
85#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
86SItype
87__addvsi3 (SItype a, SItype b)
88{
ebc4cd54 89 SItype w;
0aec6014 90
831f24a7 91 if (__builtin_add_overflow (a, b, &w))
0aec6014
EB
92 abort ();
93
94 return w;
95}
96#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
3d2adde6 97#endif
91ce572a
CC
98\f
99#ifdef L_addvdi3
66f77154 100DWtype
0aec6014 101__addvDI3 (DWtype a, DWtype b)
91ce572a 102{
ebc4cd54 103 DWtype w;
91ce572a 104
831f24a7 105 if (__builtin_add_overflow (a, b, &w))
91ce572a
CC
106 abort ();
107
108 return w;
109}
110#endif
111\f
112#ifdef L_subvsi3
66f77154 113Wtype
0aec6014 114__subvSI3 (Wtype a, Wtype b)
91ce572a 115{
ebc4cd54 116 Wtype w;
91ce572a 117
831f24a7 118 if (__builtin_sub_overflow (a, b, &w))
91ce572a
CC
119 abort ();
120
121 return w;
91ce572a 122}
0aec6014
EB
123#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
124SItype
125__subvsi3 (SItype a, SItype b)
126{
ebc4cd54 127 SItype w;
0aec6014 128
831f24a7 129 if (__builtin_sub_overflow (a, b, &w))
0aec6014
EB
130 abort ();
131
132 return w;
133}
134#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
91ce572a
CC
135#endif
136\f
137#ifdef L_subvdi3
66f77154 138DWtype
0aec6014 139__subvDI3 (DWtype a, DWtype b)
91ce572a 140{
ebc4cd54 141 DWtype w;
91ce572a 142
831f24a7 143 if (__builtin_sub_overflow (a, b, &w))
91ce572a
CC
144 abort ();
145
146 return w;
91ce572a
CC
147}
148#endif
149\f
150#ifdef L_mulvsi3
66f77154 151Wtype
0aec6014 152__mulvSI3 (Wtype a, Wtype b)
91ce572a 153{
ebc4cd54 154 Wtype w;
91ce572a 155
831f24a7 156 if (__builtin_mul_overflow (a, b, &w))
91ce572a
CC
157 abort ();
158
159 return w;
160}
0aec6014 161#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
0aec6014
EB
162SItype
163__mulvsi3 (SItype a, SItype b)
164{
ebc4cd54 165 SItype w;
0aec6014 166
831f24a7 167 if (__builtin_mul_overflow (a, b, &w))
0aec6014
EB
168 abort ();
169
170 return w;
171}
172#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
91ce572a
CC
173#endif
174\f
175#ifdef L_negvsi2
66f77154 176Wtype
0aec6014 177__negvSI2 (Wtype a)
91ce572a 178{
ebc4cd54 179 Wtype w;
91ce572a 180
831f24a7 181 if (__builtin_sub_overflow (0, a, &w))
91ce572a
CC
182 abort ();
183
ebc4cd54 184 return w;
91ce572a 185}
0aec6014
EB
186#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
187SItype
188__negvsi2 (SItype a)
189{
ebc4cd54 190 SItype w;
0aec6014 191
831f24a7 192 if (__builtin_sub_overflow (0, a, &w))
0aec6014
EB
193 abort ();
194
ebc4cd54 195 return w;
0aec6014
EB
196}
197#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
91ce572a
CC
198#endif
199\f
200#ifdef L_negvdi2
66f77154 201DWtype
0aec6014 202__negvDI2 (DWtype a)
91ce572a 203{
ebc4cd54 204 DWtype w;
91ce572a 205
831f24a7 206 if (__builtin_sub_overflow (0, a, &w))
91ce572a
CC
207 abort ();
208
e11e816e 209 return w;
91ce572a
CC
210}
211#endif
212\f
213#ifdef L_absvsi2
66f77154 214Wtype
0aec6014 215__absvSI2 (Wtype a)
91ce572a 216{
4919ed71
SK
217 const Wtype v = 0 - (a < 0);
218 Wtype w;
0aec6014 219
4919ed71 220 if (__builtin_add_overflow (a, v, &w))
0aec6014 221 abort ();
0aec6014 222
4919ed71 223 return v ^ w;
0aec6014
EB
224}
225#ifdef COMPAT_SIMODE_TRAPPING_ARITHMETIC
226SItype
227__absvsi2 (SItype a)
228{
4919ed71
SK
229 const SItype v = 0 - (a < 0);
230 SItype w;
91ce572a 231
4919ed71 232 if (__builtin_add_overflow (a, v, &w))
e11e816e 233 abort ();
91ce572a 234
4919ed71 235 return v ^ w;
91ce572a 236}
0aec6014 237#endif /* COMPAT_SIMODE_TRAPPING_ARITHMETIC */
91ce572a
CC
238#endif
239\f
240#ifdef L_absvdi2
66f77154 241DWtype
0aec6014 242__absvDI2 (DWtype a)
91ce572a 243{
4919ed71
SK
244 const DWtype v = 0 - (a < 0);
245 DWtype w;
91ce572a 246
4919ed71 247 if (__builtin_add_overflow (a, v, &w))
e11e816e 248 abort ();
91ce572a 249
4919ed71 250 return v ^ w;
91ce572a
CC
251}
252#endif
253\f
254#ifdef L_mulvdi3
66f77154 255DWtype
0aec6014 256__mulvDI3 (DWtype u, DWtype v)
91ce572a 257{
4c20b2e7
BH
258 /* The unchecked multiplication needs 3 Wtype x Wtype multiplications,
259 but the checked multiplication needs only two. */
b982024e
KG
260 const DWunion uu = {.ll = u};
261 const DWunion vv = {.ll = v};
91ce572a 262
4f2e0d5e 263 if (__builtin_expect (uu.s.high == uu.s.low >> (W_TYPE_SIZE - 1), 1))
4c20b2e7
BH
264 {
265 /* u fits in a single Wtype. */
4f2e0d5e 266 if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
4c20b2e7
BH
267 {
268 /* v fits in a single Wtype as well. */
269 /* A single multiplication. No overflow risk. */
270 return (DWtype) uu.s.low * (DWtype) vv.s.low;
271 }
272 else
273 {
274 /* Two multiplications. */
b982024e
KG
275 DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
276 * (UDWtype) (UWtype) vv.s.low};
277 DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.low
278 * (UDWtype) (UWtype) vv.s.high};
4c20b2e7 279
4c20b2e7
BH
280 if (vv.s.high < 0)
281 w1.s.high -= uu.s.low;
282 if (uu.s.low < 0)
283 w1.ll -= vv.ll;
284 w1.ll += (UWtype) w0.s.high;
4f2e0d5e 285 if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
4c20b2e7
BH
286 {
287 w0.s.high = w1.s.low;
288 return w0.ll;
289 }
290 }
291 }
292 else
293 {
4f2e0d5e 294 if (__builtin_expect (vv.s.high == vv.s.low >> (W_TYPE_SIZE - 1), 1))
4c20b2e7
BH
295 {
296 /* v fits into a single Wtype. */
297 /* Two multiplications. */
b982024e
KG
298 DWunion w0 = {.ll = (UDWtype) (UWtype) uu.s.low
299 * (UDWtype) (UWtype) vv.s.low};
300 DWunion w1 = {.ll = (UDWtype) (UWtype) uu.s.high
301 * (UDWtype) (UWtype) vv.s.low};
4c20b2e7 302
4c20b2e7
BH
303 if (uu.s.high < 0)
304 w1.s.high -= vv.s.low;
305 if (vv.s.low < 0)
306 w1.ll -= uu.ll;
307 w1.ll += (UWtype) w0.s.high;
4f2e0d5e 308 if (__builtin_expect (w1.s.high == w1.s.low >> (W_TYPE_SIZE - 1), 1))
4c20b2e7
BH
309 {
310 w0.s.high = w1.s.low;
311 return w0.ll;
312 }
313 }
314 else
315 {
316 /* A few sign checks and a single multiplication. */
317 if (uu.s.high >= 0)
318 {
319 if (vv.s.high >= 0)
320 {
321 if (uu.s.high == 0 && vv.s.high == 0)
322 {
b982024e
KG
323 const DWtype w = (UDWtype) (UWtype) uu.s.low
324 * (UDWtype) (UWtype) vv.s.low;
4c20b2e7
BH
325 if (__builtin_expect (w >= 0, 1))
326 return w;
327 }
328 }
329 else
330 {
331 if (uu.s.high == 0 && vv.s.high == (Wtype) -1)
332 {
b982024e
KG
333 DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
334 * (UDWtype) (UWtype) vv.s.low};
4c20b2e7 335
4c20b2e7
BH
336 ww.s.high -= uu.s.low;
337 if (__builtin_expect (ww.s.high < 0, 1))
338 return ww.ll;
339 }
340 }
341 }
342 else
343 {
344 if (vv.s.high >= 0)
345 {
346 if (uu.s.high == (Wtype) -1 && vv.s.high == 0)
347 {
b982024e
KG
348 DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
349 * (UDWtype) (UWtype) vv.s.low};
4c20b2e7 350
4c20b2e7
BH
351 ww.s.high -= vv.s.low;
352 if (__builtin_expect (ww.s.high < 0, 1))
353 return ww.ll;
354 }
355 }
356 else
357 {
e7176f75
JJ
358 if ((uu.s.high & vv.s.high) == (Wtype) -1
359 && (uu.s.low | vv.s.low) != 0)
4c20b2e7 360 {
b982024e
KG
361 DWunion ww = {.ll = (UDWtype) (UWtype) uu.s.low
362 * (UDWtype) (UWtype) vv.s.low};
4c20b2e7 363
4c20b2e7
BH
364 ww.s.high -= uu.s.low;
365 ww.s.high -= vv.s.low;
366 if (__builtin_expect (ww.s.high >= 0, 1))
367 return ww.ll;
368 }
369 }
370 }
371 }
372 }
91ce572a 373
4c20b2e7
BH
374 /* Overflow. */
375 abort ();
91ce572a
CC
376}
377#endif
378\f
203b91b9 379
3d042e77 380/* Unless shift functions are defined with full ANSI prototypes,
c7ff6e7a 381 parameter b will be promoted to int if shift_count_type is smaller than an int. */
203b91b9 382#ifdef L_lshrdi3
996ed075 383DWtype
c7ff6e7a 384__lshrdi3 (DWtype u, shift_count_type b)
203b91b9 385{
203b91b9
RS
386 if (b == 0)
387 return u;
388
b982024e 389 const DWunion uu = {.ll = u};
fdf3e18a 390 const shift_count_type bm = W_TYPE_SIZE - b;
b982024e 391 DWunion w;
203b91b9 392
203b91b9
RS
393 if (bm <= 0)
394 {
395 w.s.high = 0;
6da9c622 396 w.s.low = (UWtype) uu.s.high >> -bm;
203b91b9
RS
397 }
398 else
399 {
b982024e 400 const UWtype carries = (UWtype) uu.s.high << bm;
6da9c622
RK
401
402 w.s.high = (UWtype) uu.s.high >> b;
403 w.s.low = ((UWtype) uu.s.low >> b) | carries;
203b91b9
RS
404 }
405
406 return w.ll;
407}
408#endif
409
410#ifdef L_ashldi3
996ed075 411DWtype
c7ff6e7a 412__ashldi3 (DWtype u, shift_count_type b)
203b91b9 413{
203b91b9
RS
414 if (b == 0)
415 return u;
416
b982024e 417 const DWunion uu = {.ll = u};
fdf3e18a 418 const shift_count_type bm = W_TYPE_SIZE - b;
b982024e 419 DWunion w;
203b91b9 420
203b91b9
RS
421 if (bm <= 0)
422 {
423 w.s.low = 0;
6da9c622 424 w.s.high = (UWtype) uu.s.low << -bm;
203b91b9
RS
425 }
426 else
427 {
b982024e 428 const UWtype carries = (UWtype) uu.s.low >> bm;
6da9c622
RK
429
430 w.s.low = (UWtype) uu.s.low << b;
431 w.s.high = ((UWtype) uu.s.high << b) | carries;
203b91b9
RS
432 }
433
434 return w.ll;
435}
436#endif
437
438#ifdef L_ashrdi3
996ed075 439DWtype
c7ff6e7a 440__ashrdi3 (DWtype u, shift_count_type b)
203b91b9 441{
203b91b9
RS
442 if (b == 0)
443 return u;
444
b982024e 445 const DWunion uu = {.ll = u};
fdf3e18a 446 const shift_count_type bm = W_TYPE_SIZE - b;
b982024e 447 DWunion w;
203b91b9 448
203b91b9
RS
449 if (bm <= 0)
450 {
451 /* w.s.high = 1..1 or 0..0 */
fdf3e18a 452 w.s.high = uu.s.high >> (W_TYPE_SIZE - 1);
203b91b9
RS
453 w.s.low = uu.s.high >> -bm;
454 }
455 else
456 {
b982024e 457 const UWtype carries = (UWtype) uu.s.high << bm;
6da9c622 458
203b91b9 459 w.s.high = uu.s.high >> b;
6da9c622 460 w.s.low = ((UWtype) uu.s.low >> b) | carries;
203b91b9
RS
461 }
462
463 return w.ll;
464}
465#endif
466\f
167fa32c 467#ifdef L_bswapsi2
e4b6bec2
EC
468SItype
469__bswapsi2 (SItype u)
167fa32c 470{
a8ae2392
SK
471 return ((((u) & 0xff000000u) >> 24)
472 | (((u) & 0x00ff0000u) >> 8)
473 | (((u) & 0x0000ff00u) << 8)
474 | (((u) & 0x000000ffu) << 24));
167fa32c
EC
475}
476#endif
477#ifdef L_bswapdi2
e4b6bec2
EC
478DItype
479__bswapdi2 (DItype u)
167fa32c
EC
480{
481 return ((((u) & 0xff00000000000000ull) >> 56)
482 | (((u) & 0x00ff000000000000ull) >> 40)
483 | (((u) & 0x0000ff0000000000ull) >> 24)
484 | (((u) & 0x000000ff00000000ull) >> 8)
485 | (((u) & 0x00000000ff000000ull) << 8)
486 | (((u) & 0x0000000000ff0000ull) << 24)
487 | (((u) & 0x000000000000ff00ull) << 40)
488 | (((u) & 0x00000000000000ffull) << 56));
489}
490#endif
dfff898c
RH
491#ifdef L_ffssi2
492#undef int
dfff898c
RH
493int
494__ffsSI2 (UWtype u)
495{
496 UWtype count;
497
498 if (u == 0)
499 return 0;
500
501 count_trailing_zeros (count, u);
502 return count + 1;
503}
504#endif
505\f
aa66bd06 506#ifdef L_ffsdi2
dabb3f04 507#undef int
dabb3f04 508int
dfff898c 509__ffsDI2 (DWtype u)
aa66bd06 510{
b982024e 511 const DWunion uu = {.ll = u};
d6eacd48
RH
512 UWtype word, count, add;
513
d6eacd48
RH
514 if (uu.s.low != 0)
515 word = uu.s.low, add = 0;
516 else if (uu.s.high != 0)
fdf3e18a 517 word = uu.s.high, add = W_TYPE_SIZE;
d6eacd48
RH
518 else
519 return 0;
520
521 count_trailing_zeros (count, word);
522 return count + add + 1;
aa66bd06
RS
523}
524#endif
525\f
203b91b9 526#ifdef L_muldi3
996ed075
JJ
527DWtype
528__muldi3 (DWtype u, DWtype v)
203b91b9 529{
b982024e
KG
530 const DWunion uu = {.ll = u};
531 const DWunion vv = {.ll = v};
532 DWunion w = {.ll = __umulsidi3 (uu.s.low, vv.s.low)};
203b91b9 533
996ed075
JJ
534 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
535 + (UWtype) uu.s.high * (UWtype) vv.s.low);
203b91b9
RS
536
537 return w.ll;
538}
539#endif
540\f
59798a0c
UW
541#if (defined (L_udivdi3) || defined (L_divdi3) || \
542 defined (L_umoddi3) || defined (L_moddi3))
f8eef883 543#if defined (sdiv_qrnnd)
59798a0c
UW
544#define L_udiv_w_sdiv
545#endif
f8eef883 546#endif
59798a0c 547
3904131a 548#ifdef L_udiv_w_sdiv
ce13d15f 549#if defined (sdiv_qrnnd)
59798a0c
UW
550#if (defined (L_udivdi3) || defined (L_divdi3) || \
551 defined (L_umoddi3) || defined (L_moddi3))
1ab9ba62 552static inline __attribute__ ((__always_inline__))
59798a0c 553#endif
996ed075
JJ
554UWtype
555__udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
431b1ee0 556{
996ed075
JJ
557 UWtype q, r;
558 UWtype c0, c1, b1;
431b1ee0 559
996ed075 560 if ((Wtype) d >= 0)
431b1ee0 561 {
996ed075 562 if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
431b1ee0 563 {
ea4b7848 564 /* Dividend, divisor, and quotient are nonnegative. */
431b1ee0
TG
565 sdiv_qrnnd (q, r, a1, a0, d);
566 }
567 else
568 {
ea4b7848 569 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d. */
996ed075 570 sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
ea4b7848 571 /* Divide (c1*2^32 + c0) by d. */
431b1ee0 572 sdiv_qrnnd (q, r, c1, c0, d);
ea4b7848 573 /* Add 2^31 to quotient. */
996ed075 574 q += (UWtype) 1 << (W_TYPE_SIZE - 1);
431b1ee0
TG
575 }
576 }
577 else
578 {
579 b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
580 c1 = a1 >> 1; /* A/2 */
996ed075 581 c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
431b1ee0
TG
582
583 if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
584 {
585 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
586
587 r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
588 if ((d & 1) != 0)
589 {
590 if (r >= q)
591 r = r - q;
592 else if (q - r <= d)
593 {
594 r = r - q + d;
595 q--;
596 }
597 else
598 {
599 r = r - q + 2*d;
600 q -= 2;
601 }
602 }
603 }
604 else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
605 {
606 c1 = (b1 - 1) - c1;
607 c0 = ~c0; /* logical NOT */
608
609 sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
610
611 q = ~q; /* (A/2)/b1 */
612 r = (b1 - 1) - r;
613
614 r = 2*r + (a0 & 1); /* A/(2*b1) */
615
616 if ((d & 1) != 0)
617 {
618 if (r >= q)
619 r = r - q;
620 else if (q - r <= d)
621 {
622 r = r - q + d;
623 q--;
624 }
625 else
626 {
627 r = r - q + 2*d;
628 q -= 2;
629 }
630 }
631 }
632 else /* Implies c1 = b1 */
633 { /* Hence a1 = d - 1 = 2*b1 - 1 */
634 if (a0 >= -d)
635 {
636 q = -1;
637 r = a0 + d;
638 }
639 else
640 {
641 q = -2;
642 r = a0 + 2*d;
643 }
644 }
645 }
646
647 *rp = r;
648 return q;
649}
ce13d15f
RK
650#else
651/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
996ed075
JJ
652UWtype
653__udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
654 UWtype a1 __attribute__ ((__unused__)),
655 UWtype a0 __attribute__ ((__unused__)),
656 UWtype d __attribute__ ((__unused__)))
081f5e7e
KG
657{
658 return 0;
659}
ce13d15f 660#endif
431b1ee0
TG
661#endif
662\f
536bfcd0 663#if (defined (L_udivdi3) || defined (L_divdi3) || \
18362447
UB
664 defined (L_umoddi3) || defined (L_moddi3) || \
665 defined (L_divmoddi4))
536bfcd0
RK
666#define L_udivmoddi4
667#endif
668
d6eacd48 669#ifdef L_clz
dcfae47c 670const UQItype __clz_tab[256] =
203b91b9
RS
671{
672 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
673 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
674 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
675 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
676 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
677 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
678 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
dcfae47c 679 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
203b91b9 680};
d6eacd48 681#endif
2928cd7a
RH
682\f
683#ifdef L_clzsi2
dabb3f04 684#undef int
dabb3f04 685int
8275b011 686__clzSI2 (UWtype x)
2928cd7a 687{
53585c36 688 Wtype ret;
2928cd7a 689
8275b011 690 count_leading_zeros (ret, x);
53585c36
RH
691
692 return ret;
2928cd7a
RH
693}
694#endif
695\f
696#ifdef L_clzdi2
dabb3f04 697#undef int
dabb3f04 698int
8275b011 699__clzDI2 (UDWtype x)
2928cd7a 700{
b982024e 701 const DWunion uu = {.ll = x};
53585c36
RH
702 UWtype word;
703 Wtype ret, add;
704
8275b011
RH
705 if (uu.s.high)
706 word = uu.s.high, add = 0;
53585c36 707 else
8275b011 708 word = uu.s.low, add = W_TYPE_SIZE;
2928cd7a 709
53585c36
RH
710 count_leading_zeros (ret, word);
711 return ret + add;
2928cd7a
RH
712}
713#endif
714\f
715#ifdef L_ctzsi2
dabb3f04 716#undef int
dabb3f04 717int
8275b011 718__ctzSI2 (UWtype x)
2928cd7a 719{
53585c36 720 Wtype ret;
2928cd7a 721
53585c36 722 count_trailing_zeros (ret, x);
2928cd7a 723
53585c36 724 return ret;
2928cd7a
RH
725}
726#endif
727\f
728#ifdef L_ctzdi2
dabb3f04 729#undef int
dabb3f04 730int
8275b011 731__ctzDI2 (UDWtype x)
2928cd7a 732{
b982024e 733 const DWunion uu = {.ll = x};
53585c36
RH
734 UWtype word;
735 Wtype ret, add;
736
8275b011
RH
737 if (uu.s.low)
738 word = uu.s.low, add = 0;
53585c36 739 else
8275b011 740 word = uu.s.high, add = W_TYPE_SIZE;
2928cd7a 741
53585c36
RH
742 count_trailing_zeros (ret, word);
743 return ret + add;
2928cd7a
RH
744}
745#endif
3801c801
BS
746\f
747#ifdef L_clrsbsi2
748#undef int
749int
750__clrsbSI2 (Wtype x)
751{
752 Wtype ret;
2928cd7a 753
3801c801
BS
754 if (x < 0)
755 x = ~x;
756 if (x == 0)
757 return W_TYPE_SIZE - 1;
758 count_leading_zeros (ret, x);
759 return ret - 1;
760}
761#endif
762\f
763#ifdef L_clrsbdi2
764#undef int
765int
766__clrsbDI2 (DWtype x)
767{
768 const DWunion uu = {.ll = x};
769 UWtype word;
770 Wtype ret, add;
771
772 if (uu.s.high == 0)
773 word = uu.s.low, add = W_TYPE_SIZE;
774 else if (uu.s.high == -1)
775 word = ~uu.s.low, add = W_TYPE_SIZE;
776 else if (uu.s.high >= 0)
777 word = uu.s.high, add = 0;
778 else
779 word = ~uu.s.high, add = 0;
780
781 if (word == 0)
782 ret = W_TYPE_SIZE;
783 else
784 count_leading_zeros (ret, word);
785
786 return ret + add - 1;
787}
788#endif
789\f
2928cd7a 790#ifdef L_popcount_tab
dcfae47c 791const UQItype __popcount_tab[256] =
2928cd7a
RH
792{
793 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
794 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
795 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
796 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
797 1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
798 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
799 2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
dcfae47c 800 3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8
2928cd7a
RH
801};
802#endif
803\f
4ea3d774 804#if defined(L_popcountsi2) || defined(L_popcountdi2)
a153644f
TS
805#define POPCOUNTCST2(x) (((UWtype) x << __CHAR_BIT__) | x)
806#define POPCOUNTCST4(x) (((UWtype) x << (2 * __CHAR_BIT__)) | x)
807#define POPCOUNTCST8(x) (((UWtype) x << (4 * __CHAR_BIT__)) | x)
808#if W_TYPE_SIZE == __CHAR_BIT__
4ea3d774 809#define POPCOUNTCST(x) x
a153644f 810#elif W_TYPE_SIZE == 2 * __CHAR_BIT__
4ea3d774 811#define POPCOUNTCST(x) POPCOUNTCST2 (x)
a153644f 812#elif W_TYPE_SIZE == 4 * __CHAR_BIT__
4ea3d774 813#define POPCOUNTCST(x) POPCOUNTCST4 (POPCOUNTCST2 (x))
a153644f 814#elif W_TYPE_SIZE == 8 * __CHAR_BIT__
4ea3d774
JJ
815#define POPCOUNTCST(x) POPCOUNTCST8 (POPCOUNTCST4 (POPCOUNTCST2 (x)))
816#endif
817#endif
818\f
2928cd7a 819#ifdef L_popcountsi2
dabb3f04 820#undef int
dabb3f04 821int
8275b011 822__popcountSI2 (UWtype x)
2928cd7a 823{
4ea3d774
JJ
824 /* Force table lookup on targets like AVR and RL78 which only
825 pretend they have LIBGCC2_UNITS_PER_WORD 4, but actually
826 have 1, and other small word targets. */
a153644f 827#if __SIZEOF_INT__ > 2 && defined (POPCOUNTCST) && __CHAR_BIT__ == 8
4ea3d774
JJ
828 x = x - ((x >> 1) & POPCOUNTCST (0x55));
829 x = (x & POPCOUNTCST (0x33)) + ((x >> 2) & POPCOUNTCST (0x33));
830 x = (x + (x >> 4)) & POPCOUNTCST (0x0F);
a153644f 831 return (x * POPCOUNTCST (0x01)) >> (W_TYPE_SIZE - __CHAR_BIT__);
4ea3d774 832#else
4000debb 833 int i, ret = 0;
8275b011
RH
834
835 for (i = 0; i < W_TYPE_SIZE; i += 8)
836 ret += __popcount_tab[(x >> i) & 0xff];
837
838 return ret;
4ea3d774 839#endif
2928cd7a
RH
840}
841#endif
842\f
843#ifdef L_popcountdi2
dabb3f04 844#undef int
dabb3f04 845int
8275b011 846__popcountDI2 (UDWtype x)
2928cd7a 847{
4ea3d774
JJ
848 /* Force table lookup on targets like AVR and RL78 which only
849 pretend they have LIBGCC2_UNITS_PER_WORD 4, but actually
850 have 1, and other small word targets. */
a153644f 851#if __SIZEOF_INT__ > 2 && defined (POPCOUNTCST) && __CHAR_BIT__ == 8
4ea3d774
JJ
852 const DWunion uu = {.ll = x};
853 UWtype x1 = uu.s.low, x2 = uu.s.high;
854 x1 = x1 - ((x1 >> 1) & POPCOUNTCST (0x55));
855 x2 = x2 - ((x2 >> 1) & POPCOUNTCST (0x55));
856 x1 = (x1 & POPCOUNTCST (0x33)) + ((x1 >> 2) & POPCOUNTCST (0x33));
857 x2 = (x2 & POPCOUNTCST (0x33)) + ((x2 >> 2) & POPCOUNTCST (0x33));
858 x1 = (x1 + (x1 >> 4)) & POPCOUNTCST (0x0F);
859 x2 = (x2 + (x2 >> 4)) & POPCOUNTCST (0x0F);
860 x1 += x2;
a153644f 861 return (x1 * POPCOUNTCST (0x01)) >> (W_TYPE_SIZE - __CHAR_BIT__);
4ea3d774 862#else
4000debb 863 int i, ret = 0;
8275b011
RH
864
865 for (i = 0; i < 2*W_TYPE_SIZE; i += 8)
866 ret += __popcount_tab[(x >> i) & 0xff];
867
868 return ret;
4ea3d774 869#endif
2928cd7a
RH
870}
871#endif
872\f
873#ifdef L_paritysi2
dabb3f04 874#undef int
dabb3f04 875int
8275b011 876__paritySI2 (UWtype x)
2928cd7a 877{
8275b011
RH
878#if W_TYPE_SIZE > 64
879# error "fill out the table"
880#endif
881#if W_TYPE_SIZE > 32
882 x ^= x >> 32;
883#endif
884#if W_TYPE_SIZE > 16
885 x ^= x >> 16;
886#endif
887 x ^= x >> 8;
888 x ^= x >> 4;
889 x &= 0xf;
890 return (0x6996 >> x) & 1;
2928cd7a
RH
891}
892#endif
893\f
894#ifdef L_paritydi2
dabb3f04 895#undef int
dabb3f04 896int
8275b011 897__parityDI2 (UDWtype x)
2928cd7a 898{
b982024e
KG
899 const DWunion uu = {.ll = x};
900 UWtype nx = uu.s.low ^ uu.s.high;
8275b011
RH
901
902#if W_TYPE_SIZE > 64
903# error "fill out the table"
904#endif
905#if W_TYPE_SIZE > 32
906 nx ^= nx >> 32;
907#endif
908#if W_TYPE_SIZE > 16
2928cd7a 909 nx ^= nx >> 16;
8275b011 910#endif
2928cd7a 911 nx ^= nx >> 8;
53585c36 912 nx ^= nx >> 4;
0c9ed856
RH
913 nx &= 0xf;
914 return (0x6996 >> nx) & 1;
2928cd7a
RH
915}
916#endif
d6eacd48
RH
917
918#ifdef L_udivmoddi4
30b8f78b
KV
919#ifdef TARGET_HAS_NO_HW_DIVIDE
920
921#if (defined (L_udivdi3) || defined (L_divdi3) || \
18362447
UB
922 defined (L_umoddi3) || defined (L_moddi3) || \
923 defined (L_divmoddi4))
30b8f78b
KV
924static inline __attribute__ ((__always_inline__))
925#endif
926UDWtype
927__udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
928{
929 UDWtype q = 0, r = n, y = d;
930 UWtype lz1, lz2, i, k;
931
932 /* Implements align divisor shift dividend method. This algorithm
933 aligns the divisor under the dividend and then perform number of
934 test-subtract iterations which shift the dividend left. Number of
935 iterations is k + 1 where k is the number of bit positions the
ebc4cd54 936 divisor must be shifted left to align it under the dividend.
30b8f78b
KV
937 quotient bits can be saved in the rightmost positions of the dividend
938 as it shifts left on each test-subtract iteration. */
939
940 if (y <= r)
941 {
942 lz1 = __builtin_clzll (d);
943 lz2 = __builtin_clzll (n);
944
945 k = lz1 - lz2;
946 y = (y << k);
947
ebc4cd54 948 /* Dividend can exceed 2 ^ (width - 1) - 1 but still be less than the
30b8f78b
KV
949 aligned divisor. Normal iteration can drops the high order bit
950 of the dividend. Therefore, first test-subtract iteration is a
951 special case, saving its quotient bit in a separate location and
952 not shifting the dividend. */
953 if (r >= y)
954 {
955 r = r - y;
956 q = (1ULL << k);
957 }
958
959 if (k > 0)
960 {
961 y = y >> 1;
962
963 /* k additional iterations where k regular test subtract shift
964 dividend iterations are done. */
965 i = k;
966 do
967 {
968 if (r >= y)
969 r = ((r - y) << 1) + 1;
970 else
971 r = (r << 1);
972 i = i - 1;
973 } while (i != 0);
974
975 /* First quotient bit is combined with the quotient bits resulting
976 from the k regular iterations. */
977 q = q + r;
978 r = r >> k;
979 q = q - (r << k);
980 }
981 }
982
983 if (rp)
984 *rp = r;
985 return q;
986}
987#else
203b91b9 988
536bfcd0 989#if (defined (L_udivdi3) || defined (L_divdi3) || \
18362447
UB
990 defined (L_umoddi3) || defined (L_moddi3) || \
991 defined (L_divmoddi4))
1ab9ba62 992static inline __attribute__ ((__always_inline__))
536bfcd0 993#endif
996ed075
JJ
994UDWtype
995__udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
203b91b9 996{
b982024e
KG
997 const DWunion nn = {.ll = n};
998 const DWunion dd = {.ll = d};
996ed075
JJ
999 DWunion rr;
1000 UWtype d0, d1, n0, n1, n2;
1001 UWtype q0, q1;
1002 UWtype b, bm;
203b91b9 1003
203b91b9
RS
1004 d0 = dd.s.low;
1005 d1 = dd.s.high;
1006 n0 = nn.s.low;
1007 n1 = nn.s.high;
1008
1009#if !UDIV_NEEDS_NORMALIZATION
1010 if (d1 == 0)
1011 {
1012 if (d0 > n1)
1013 {
1014 /* 0q = nn / 0D */
1015
1016 udiv_qrnnd (q0, n0, n1, n0, d0);
1017 q1 = 0;
1018
1019 /* Remainder in n0. */
1020 }
1021 else
1022 {
1023 /* qq = NN / 0d */
1024
1025 if (d0 == 0)
1026 d0 = 1 / d0; /* Divide intentionally by zero. */
1027
1028 udiv_qrnnd (q1, n1, 0, n1, d0);
1029 udiv_qrnnd (q0, n0, n1, n0, d0);
1030
1031 /* Remainder in n0. */
1032 }
1033
1034 if (rp != 0)
1035 {
1036 rr.s.low = n0;
1037 rr.s.high = 0;
1038 *rp = rr.ll;
1039 }
1040 }
1041
1042#else /* UDIV_NEEDS_NORMALIZATION */
1043
1044 if (d1 == 0)
1045 {
1046 if (d0 > n1)
1047 {
1048 /* 0q = nn / 0D */
1049
1050 count_leading_zeros (bm, d0);
1051
1052 if (bm != 0)
1053 {
1054 /* Normalize, i.e. make the most significant bit of the
1055 denominator set. */
1056
1057 d0 = d0 << bm;
996ed075 1058 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
203b91b9
RS
1059 n0 = n0 << bm;
1060 }
1061
1062 udiv_qrnnd (q0, n0, n1, n0, d0);
1063 q1 = 0;
1064
1065 /* Remainder in n0 >> bm. */
1066 }
1067 else
1068 {
1069 /* qq = NN / 0d */
1070
1071 if (d0 == 0)
1072 d0 = 1 / d0; /* Divide intentionally by zero. */
1073
1074 count_leading_zeros (bm, d0);
1075
1076 if (bm == 0)
1077 {
1078 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
1079 conclude (the most significant bit of n1 is set) /\ (the
1080 leading quotient digit q1 = 1).
1081
1082 This special case is necessary, not an optimization.
996ed075 1083 (Shifts counts of W_TYPE_SIZE are undefined.) */
203b91b9
RS
1084
1085 n1 -= d0;
1086 q1 = 1;
1087 }
1088 else
1089 {
1090 /* Normalize. */
1091
996ed075 1092 b = W_TYPE_SIZE - bm;
203b91b9
RS
1093
1094 d0 = d0 << bm;
1095 n2 = n1 >> b;
1096 n1 = (n1 << bm) | (n0 >> b);
1097 n0 = n0 << bm;
1098
1099 udiv_qrnnd (q1, n1, n2, n1, d0);
1100 }
1101
0f41302f 1102 /* n1 != d0... */
203b91b9
RS
1103
1104 udiv_qrnnd (q0, n0, n1, n0, d0);
1105
1106 /* Remainder in n0 >> bm. */
1107 }
1108
1109 if (rp != 0)
1110 {
1111 rr.s.low = n0 >> bm;
1112 rr.s.high = 0;
1113 *rp = rr.ll;
1114 }
1115 }
1116#endif /* UDIV_NEEDS_NORMALIZATION */
1117
1118 else
1119 {
1120 if (d1 > n1)
1121 {
1122 /* 00 = nn / DD */
1123
1124 q0 = 0;
1125 q1 = 0;
1126
1127 /* Remainder in n1n0. */
1128 if (rp != 0)
1129 {
1130 rr.s.low = n0;
1131 rr.s.high = n1;
1132 *rp = rr.ll;
1133 }
1134 }
1135 else
1136 {
1137 /* 0q = NN / dd */
1138
1139 count_leading_zeros (bm, d1);
1140 if (bm == 0)
1141 {
1142 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
1143 conclude (the most significant bit of n1 is set) /\ (the
1144 quotient digit q0 = 0 or 1).
1145
1146 This special case is necessary, not an optimization. */
1147
1148 /* The condition on the next line takes advantage of that
1149 n1 >= d1 (true due to program flow). */
1150 if (n1 > d1 || n0 >= d0)
1151 {
1152 q0 = 1;
1153 sub_ddmmss (n1, n0, n1, n0, d1, d0);
1154 }
1155 else
1156 q0 = 0;
1157
1158 q1 = 0;
1159
1160 if (rp != 0)
1161 {
1162 rr.s.low = n0;
1163 rr.s.high = n1;
1164 *rp = rr.ll;
1165 }
1166 }
1167 else
1168 {
996ed075 1169 UWtype m1, m0;
203b91b9
RS
1170 /* Normalize. */
1171
996ed075 1172 b = W_TYPE_SIZE - bm;
203b91b9
RS
1173
1174 d1 = (d1 << bm) | (d0 >> b);
1175 d0 = d0 << bm;
1176 n2 = n1 >> b;
1177 n1 = (n1 << bm) | (n0 >> b);
1178 n0 = n0 << bm;
1179
1180 udiv_qrnnd (q0, n1, n2, n1, d1);
1181 umul_ppmm (m1, m0, q0, d0);
1182
1183 if (m1 > n1 || (m1 == n1 && m0 > n0))
1184 {
1185 q0--;
1186 sub_ddmmss (m1, m0, m1, m0, d1, d0);
1187 }
1188
1189 q1 = 0;
1190
1191 /* Remainder in (n1n0 - m1m0) >> bm. */
1192 if (rp != 0)
1193 {
1194 sub_ddmmss (n1, n0, n1, n0, m1, m0);
1195 rr.s.low = (n1 << b) | (n0 >> bm);
1196 rr.s.high = n1 >> bm;
1197 *rp = rr.ll;
1198 }
1199 }
1200 }
1201 }
1202
b982024e 1203 const DWunion ww = {{.low = q0, .high = q1}};
203b91b9
RS
1204 return ww.ll;
1205}
1206#endif
30b8f78b 1207#endif
203b91b9
RS
1208
1209#ifdef L_divdi3
996ed075
JJ
1210DWtype
1211__divdi3 (DWtype u, DWtype v)
203b91b9 1212{
c7ff6e7a 1213 Wtype c = 0;
b982024e
KG
1214 DWunion uu = {.ll = u};
1215 DWunion vv = {.ll = v};
996ed075 1216 DWtype w;
203b91b9 1217
203b91b9
RS
1218 if (uu.s.high < 0)
1219 c = ~c,
b68daef4 1220 uu.ll = -uu.ll;
203b91b9
RS
1221 if (vv.s.high < 0)
1222 c = ~c,
b68daef4 1223 vv.ll = -vv.ll;
203b91b9 1224
996ed075 1225 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
203b91b9 1226 if (c)
b68daef4 1227 w = -w;
203b91b9
RS
1228
1229 return w;
1230}
1231#endif
1232
1233#ifdef L_moddi3
996ed075
JJ
1234DWtype
1235__moddi3 (DWtype u, DWtype v)
203b91b9 1236{
c7ff6e7a 1237 Wtype c = 0;
b982024e
KG
1238 DWunion uu = {.ll = u};
1239 DWunion vv = {.ll = v};
996ed075 1240 DWtype w;
203b91b9 1241
203b91b9
RS
1242 if (uu.s.high < 0)
1243 c = ~c,
b68daef4 1244 uu.ll = -uu.ll;
203b91b9 1245 if (vv.s.high < 0)
b68daef4 1246 vv.ll = -vv.ll;
203b91b9 1247
9c859be1 1248 (void) __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&w);
203b91b9 1249 if (c)
b68daef4 1250 w = -w;
203b91b9
RS
1251
1252 return w;
1253}
1254#endif
1255
18362447
UB
1256#ifdef L_divmoddi4
1257DWtype
1258__divmoddi4 (DWtype u, DWtype v, DWtype *rp)
1259{
1260 Wtype c1 = 0, c2 = 0;
1261 DWunion uu = {.ll = u};
1262 DWunion vv = {.ll = v};
1263 DWtype w;
1264 DWtype r;
1265
1266 if (uu.s.high < 0)
1267 c1 = ~c1, c2 = ~c2,
1268 uu.ll = -uu.ll;
1269 if (vv.s.high < 0)
1270 c1 = ~c1,
1271 vv.ll = -vv.ll;
1272
1273 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&r);
1274 if (c1)
1275 w = -w;
1276 if (c2)
1277 r = -r;
1278
1279 *rp = r;
1280 return w;
1281}
1282#endif
1283
203b91b9 1284#ifdef L_umoddi3
996ed075
JJ
1285UDWtype
1286__umoddi3 (UDWtype u, UDWtype v)
203b91b9 1287{
996ed075 1288 UDWtype w;
203b91b9
RS
1289
1290 (void) __udivmoddi4 (u, v, &w);
1291
1292 return w;
1293}
1294#endif
1295
1296#ifdef L_udivdi3
996ed075
JJ
1297UDWtype
1298__udivdi3 (UDWtype n, UDWtype d)
203b91b9 1299{
996ed075 1300 return __udivmoddi4 (n, d, (UDWtype *) 0);
203b91b9
RS
1301}
1302#endif
1303\f
2ce182e2
JJ
1304#if (defined(__BITINT_MAXWIDTH__) \
1305 && (defined(L_mulbitint3) || defined(L_divmodbitint4)))
1306/* _BitInt support. */
1307
1308/* If *P is zero or sign extended (the latter only for PREC < 0) from
1309 some narrower _BitInt value, reduce precision. */
1310
1311static inline __attribute__((__always_inline__)) SItype
1312bitint_reduce_prec (const UWtype **p, SItype prec)
1313{
1314 UWtype mslimb;
1315 SItype i;
1316 if (prec < 0)
1317 {
1318#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1319 i = 0;
1320#else
1321 i = ((USItype) -1 - prec) / W_TYPE_SIZE;
1322#endif
1323 mslimb = (*p)[i];
1324 if (mslimb & ((UWtype) 1 << (((USItype) -1 - prec) % W_TYPE_SIZE)))
1325 {
1326 SItype n = ((USItype) -prec) % W_TYPE_SIZE;
1327 if (n)
1328 {
1329 mslimb |= ((UWtype) -1 << (((USItype) -1 - prec) % W_TYPE_SIZE));
1330 if (mslimb == (UWtype) -1)
1331 {
1332 prec += n;
1333 if (prec >= -1)
1334 return -2;
1335#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1336 ++p;
1337#else
1338 --i;
1339#endif
1340 mslimb = (*p)[i];
1341 n = 0;
1342 }
1343 }
1344 while (mslimb == (UWtype) -1)
1345 {
1346 prec += W_TYPE_SIZE;
1347 if (prec >= -1)
1348 return -2;
1349#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1350 ++p;
1351#else
1352 --i;
1353#endif
1354 mslimb = (*p)[i];
1355 }
1356 if (n == 0)
1357 {
1358 if ((Wtype) mslimb >= 0)
1359 {
1360#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1361 --p;
1362#endif
1363 return prec - 1;
1364 }
1365 }
1366 return prec;
1367 }
1368 else
1369 prec = -prec;
1370 }
1371 else
1372 {
1373#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1374 i = 0;
1375#else
1376 i = ((USItype) prec - 1) / W_TYPE_SIZE;
1377#endif
1378 mslimb = (*p)[i];
1379 }
1380 SItype n = ((USItype) prec) % W_TYPE_SIZE;
1381 if (n)
1382 {
1383 mslimb &= ((UWtype) 1 << (((USItype) prec) % W_TYPE_SIZE)) - 1;
1384 if (mslimb == 0)
1385 {
1386 prec -= n;
1387 if (prec == 0)
1388 return 1;
1389#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1390 ++p;
1391#else
1392 --i;
1393#endif
1394 mslimb = (*p)[i];
1395 }
1396 }
1397 while (mslimb == 0)
1398 {
1399 prec -= W_TYPE_SIZE;
1400 if (prec == 0)
1401 return 1;
1402#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1403 ++p;
1404#else
1405 --i;
1406#endif
1407 mslimb = (*p)[i];
1408 }
1409 return prec;
1410}
1411
1412#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1413# define BITINT_INC -1
1414# define BITINT_END(be, le) (be)
1415#else
1416# define BITINT_INC 1
1417# define BITINT_END(be, le) (le)
1418#endif
1419
1420#ifdef L_mulbitint3
1421/* D = S * L. */
1422
1423static UWtype
1424bitint_mul_1 (UWtype *d, const UWtype *s, UWtype l, SItype n)
1425{
1426 UWtype sv, hi, lo, c = 0;
1427 do
1428 {
1429 sv = *s;
1430 s += BITINT_INC;
1431 umul_ppmm (hi, lo, sv, l);
1432 c = __builtin_add_overflow (lo, c, &lo) + hi;
1433 *d = lo;
1434 d += BITINT_INC;
1435 }
1436 while (--n);
1437 return c;
1438}
1439
1440/* D += S * L. */
1441
1442static UWtype
1443bitint_addmul_1 (UWtype *d, const UWtype *s, UWtype l, SItype n)
1444{
1445 UWtype sv, hi, lo, c = 0;
1446 do
1447 {
1448 sv = *s;
1449 s += BITINT_INC;
1450 umul_ppmm (hi, lo, sv, l);
1451 hi += __builtin_add_overflow (lo, *d, &lo);
1452 c = __builtin_add_overflow (lo, c, &lo) + hi;
1453 *d = lo;
1454 d += BITINT_INC;
1455 }
1456 while (--n);
1457 return c;
1458}
1459
1460/* If XPREC is positive, it is precision in bits
1461 of an unsigned _BitInt operand (which has XPREC/W_TYPE_SIZE
1462 full limbs and if Xprec%W_TYPE_SIZE one partial limb.
1463 If Xprec is negative, -XPREC is precision in bits
1464 of a signed _BitInt operand. RETPREC should be always
1465 positive. */
1466
1467void
1468__mulbitint3 (UWtype *ret, SItype retprec,
1469 const UWtype *u, SItype uprec,
1470 const UWtype *v, SItype vprec)
1471{
1472 uprec = bitint_reduce_prec (&u, uprec);
1473 vprec = bitint_reduce_prec (&v, vprec);
1474 USItype auprec = uprec < 0 ? -uprec : uprec;
1475 USItype avprec = vprec < 0 ? -vprec : vprec;
1476
1477 /* Prefer non-negative U.
1478 Otherwise make sure V doesn't have higher precision than U. */
1479 if ((uprec < 0 && vprec >= 0)
1480 || (avprec > auprec && !(uprec >= 0 && vprec < 0)))
1481 {
1482 SItype p;
1483 const UWtype *t;
1484 p = uprec; uprec = vprec; vprec = p;
1485 p = auprec; auprec = avprec; avprec = p;
1486 t = u; u = v; v = t;
1487 }
1488
1489 USItype un = auprec / W_TYPE_SIZE;
1490 USItype un2 = (auprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1491 USItype vn = avprec / W_TYPE_SIZE;
1492 USItype vn2 = (avprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1493 USItype retn = ((USItype) retprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1494 USItype retidx, uidx, vidx;
1495 UWtype vv;
1496 /* Indexes of least significant limb. */
1497#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1498 retidx = retn - 1;
1499 uidx = un2 - 1;
1500 vidx = vn2 - 1;
1501#else
1502 retidx = 0;
1503 uidx = 0;
1504 vidx = 0;
1505#endif
1506 if (__builtin_expect (auprec <= W_TYPE_SIZE, 0) && vprec < 0)
1507 {
1508 UWtype uu = u[uidx];
1509 if (__builtin_expect (auprec < W_TYPE_SIZE, 0))
1510 uu &= ((UWtype) 1 << (auprec % W_TYPE_SIZE)) - 1;
1511 if (uu == 0)
1512 {
1513 /* 0 * negative would be otherwise mishandled below, so
1514 handle it specially. */
1515 __builtin_memset (ret, 0, retn * sizeof (UWtype));
1516 return;
1517 }
1518 }
1519 vv = v[vidx];
1520 if (__builtin_expect (avprec < W_TYPE_SIZE, 0))
1521 {
1522 if (vprec > 0)
1523 vv &= ((UWtype) 1 << (avprec % W_TYPE_SIZE)) - 1;
1524 else
1525 vv |= (UWtype) -1 << (avprec % W_TYPE_SIZE);
1526 }
1527
1528 USItype n = un > retn ? retn : un;
1529 USItype n2 = n;
1530 USItype retidx2 = retidx + n * BITINT_INC;
1531 UWtype c = 0, uv = 0;
1532 if (n)
1533 c = bitint_mul_1 (ret + retidx, u + uidx, vv, n);
1534 if (retn > un && un2 != un)
1535 {
1536 UWtype hi, lo;
1537 uv = u[uidx + n * BITINT_INC];
1538 if (uprec > 0)
1539 uv &= ((UWtype) 1 << (auprec % W_TYPE_SIZE)) - 1;
1540 else
1541 uv |= (UWtype) -1 << (auprec % W_TYPE_SIZE);
1542 umul_ppmm (hi, lo, uv, vv);
1543 c = __builtin_add_overflow (lo, c, &lo) + hi;
1544 ret[retidx2] = lo;
1545 retidx2 += BITINT_INC;
1546 ++n2;
1547 }
1548 if (retn > un2)
1549 {
1550 if (uprec < 0)
1551 {
1552 while (n2 < retn)
1553 {
1554 if (n2 >= un2 + vn2)
1555 break;
1556 UWtype hi, lo;
1557 umul_ppmm (hi, lo, (UWtype) -1, vv);
1558 c = __builtin_add_overflow (lo, c, &lo) + hi;
1559 ret[retidx2] = lo;
1560 retidx2 += BITINT_INC;
1561 ++n2;
1562 }
1563 }
1564 else
1565 {
1566 ret[retidx2] = c;
1567 retidx2 += BITINT_INC;
1568 ++n2;
1569 }
1570 /* If RET has more limbs than U after precision reduction,
1571 fill in the remaining limbs. */
1572 while (n2 < retn)
1573 {
1574 if (n2 < un2 + vn2 || (uprec ^ vprec) >= 0)
1575 c = 0;
1576 else
1577 c = (UWtype) -1;
1578 ret[retidx2] = c;
1579 retidx2 += BITINT_INC;
1580 ++n2;
1581 }
1582 }
1583 /* N is now number of possibly non-zero limbs in RET (ignoring
1584 limbs above UN2 + VN2 which if any have been finalized already). */
1585 USItype end = vprec < 0 ? un2 + vn2 : vn2;
1586 if (retn > un2 + vn2) retn = un2 + vn2;
1587 if (end > retn) end = retn;
1588 for (USItype m = 1; m < end; ++m)
1589 {
1590 retidx += BITINT_INC;
1591 vidx += BITINT_INC;
1592 if (m < vn2)
1593 {
1594 vv = v[vidx];
1595 if (__builtin_expect (m == vn, 0))
1596 {
1597 if (vprec > 0)
1598 vv &= ((UWtype) 1 << (avprec % W_TYPE_SIZE)) - 1;
1599 else
1600 vv |= (UWtype) -1 << (avprec % W_TYPE_SIZE);
1601 }
1602 }
1603 else
1604 vv = (UWtype) -1;
1605 if (m + n > retn)
1606 n = retn - m;
1607 c = 0;
1608 if (n)
1609 c = bitint_addmul_1 (ret + retidx, u + uidx, vv, n);
1610 n2 = m + n;
1611 retidx2 = retidx + n * BITINT_INC;
1612 if (n2 < retn && un2 != un)
1613 {
1614 UWtype hi, lo;
1615 umul_ppmm (hi, lo, uv, vv);
1616 hi += __builtin_add_overflow (lo, ret[retidx2], &lo);
1617 c = __builtin_add_overflow (lo, c, &lo) + hi;
1618 ret[retidx2] = lo;
1619 retidx2 += BITINT_INC;
1620 ++n2;
1621 }
1622 if (uprec < 0)
1623 while (n2 < retn)
1624 {
1625 UWtype hi, lo;
1626 umul_ppmm (hi, lo, (UWtype) -1, vv);
1627 hi += __builtin_add_overflow (lo, ret[retidx2], &lo);
1628 c = __builtin_add_overflow (lo, c, &lo) + hi;
1629 ret[retidx2] = lo;
1630 retidx2 += BITINT_INC;
1631 ++n2;
1632 }
1633 else if (n2 < retn)
1634 {
1635 ret[retidx2] = c;
1636 retidx2 += BITINT_INC;
1637 }
1638 }
1639}
1640#endif
1641
1642#ifdef L_divmodbitint4
1643static void
1644bitint_negate (UWtype *d, const UWtype *s, SItype n)
1645{
1646 UWtype c = 1;
1647 do
1648 {
1649 UWtype sv = *s, lo;
1650 s += BITINT_INC;
1651 c = __builtin_add_overflow (~sv, c, &lo);
1652 *d = lo;
1653 d += BITINT_INC;
1654 }
1655 while (--n);
1656}
1657
1658/* D -= S * L. */
1659
1660static UWtype
1661bitint_submul_1 (UWtype *d, const UWtype *s, UWtype l, SItype n)
1662{
1663 UWtype sv, hi, lo, c = 0;
1664 do
1665 {
1666 sv = *s;
1667 s += BITINT_INC;
1668 umul_ppmm (hi, lo, sv, l);
1669 hi += __builtin_sub_overflow (*d, lo, &lo);
1670 c = __builtin_sub_overflow (lo, c, &lo) + hi;
1671 *d = lo;
1672 d += BITINT_INC;
1673 }
1674 while (--n);
1675 return c;
1676}
1677
1678/* If XPREC is positive, it is precision in bits
1679 of an unsigned _BitInt operand (which has XPREC/W_TYPE_SIZE
1680 full limbs and if Xprec%W_TYPE_SIZE one partial limb.
1681 If Xprec is negative, -XPREC is precision in bits
1682 of a signed _BitInt operand. QPREC and RPREC should be
1683 always non-negative. If either Q or R is NULL (at least
1684 one should be non-NULL), then corresponding QPREC or RPREC
1685 should be 0. */
1686
1687void
1688__divmodbitint4 (UWtype *q, SItype qprec,
1689 UWtype *r, SItype rprec,
1690 const UWtype *u, SItype uprec,
1691 const UWtype *v, SItype vprec)
1692{
1693 uprec = bitint_reduce_prec (&u, uprec);
1694 vprec = bitint_reduce_prec (&v, vprec);
1695 USItype auprec = uprec < 0 ? -uprec : uprec;
1696 USItype avprec = vprec < 0 ? -vprec : vprec;
1697 USItype un = (auprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1698 USItype vn = (avprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1699 USItype qn = ((USItype) qprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1700 USItype rn = ((USItype) rprec + W_TYPE_SIZE - 1) / W_TYPE_SIZE;
1701 USItype up = auprec % W_TYPE_SIZE;
1702 USItype vp = avprec % W_TYPE_SIZE;
1703 if (__builtin_expect (un < vn, 0))
1704 {
1705 /* If abs(v) > abs(u), then q is 0 and r is u. */
1706 if (q)
1707 __builtin_memset (q, 0, qn * sizeof (UWtype));
1708 if (r == NULL)
1709 return;
1710#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1711 r += rn - 1;
1712 u += un - 1;
1713#endif
1714 if (up)
1715 --un;
1716 if (rn < un)
1717 un = rn;
1718 for (rn -= un; un; --un)
1719 {
1720 *r = *u;
1721 r += BITINT_INC;
1722 u += BITINT_INC;
1723 }
1724 if (!rn)
1725 return;
1726 if (up)
1727 {
1728 if (uprec > 0)
1729 *r = *u & (((UWtype) 1 << up) - 1);
1730 else
1731 *r = *u | ((UWtype) -1 << up);
1732 r += BITINT_INC;
1733 if (!--rn)
1734 return;
1735 }
1736 UWtype c = uprec < 0 ? (UWtype) -1 : (UWtype) 0;
1737 for (; rn; --rn)
1738 {
1739 *r = c;
1740 r += BITINT_INC;
1741 }
1742 return;
1743 }
1744 USItype qn2 = un - vn + 1;
1745 if (qn >= qn2)
1746 qn2 = 0;
1747 USItype sz = un + 1 + vn + qn2;
1748 UWtype *buf = __builtin_alloca (sz * sizeof (UWtype));
1749 USItype uidx, vidx;
1750#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1751 uidx = un - 1;
1752 vidx = vn - 1;
1753#else
1754 uidx = 0;
1755 vidx = 0;
1756#endif
1757 if (uprec < 0)
1758 bitint_negate (buf + BITINT_END (uidx + 1, 0), u + uidx, un);
1759 else
1760 __builtin_memcpy (buf + BITINT_END (1, 0), u, un * sizeof (UWtype));
1761 if (up)
1762 buf[BITINT_END (1, un - 1)] &= (((UWtype) 1 << up) - 1);
1763 if (vprec < 0)
1764 bitint_negate (buf + un + 1 + vidx, v + vidx, vn);
1765 else
1766 __builtin_memcpy (buf + un + 1, v, vn * sizeof (UWtype));
1767 if (vp)
1768 buf[un + 1 + BITINT_END (0, vn - 1)] &= (((UWtype) 1 << vp) - 1);
1769 UWtype *u2 = buf;
1770 UWtype *v2 = u2 + un + 1;
1771 UWtype *q2 = v2 + vn;
1772 if (!qn2)
1773 q2 = q + BITINT_END (qn - (un - vn + 1), 0);
1774
1775 /* Knuth's algorithm. See also ../gcc/wide-int.cc (divmod_internal_2). */
1776
1777#ifndef UDIV_NEEDS_NORMALIZATION
1778 /* Handle single limb divisor first. */
1779 if (vn == 1)
1780 {
1781 UWtype vv = v2[0];
1782 if (vv == 0)
1783 vv = 1 / vv; /* Divide intentionally by zero. */
1784 UWtype k = 0;
1785#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1786 for (SItype i = 0; i <= un - 1; ++i)
1787#else
1788 for (SItype i = un - 1; i >= 0; --i)
1789#endif
1790 udiv_qrnnd (q2[i], k, k, u2[BITINT_END (i + 1, i)], vv);
1791 if (r != NULL)
1792 r[BITINT_END (rn - 1, 0)] = k;
1793 }
1794 else
1795#endif
1796 {
1797 SItype s;
1798#ifdef UDIV_NEEDS_NORMALIZATION
1799 if (vn == 1 && v2[0] == 0)
1800 s = 0;
1801 else
1802#endif
1803 if (sizeof (0U) == sizeof (UWtype))
1804 s = __builtin_clz (v2[BITINT_END (0, vn - 1)]);
1805 else if (sizeof (0UL) == sizeof (UWtype))
1806 s = __builtin_clzl (v2[BITINT_END (0, vn - 1)]);
1807 else
1808 s = __builtin_clzll (v2[BITINT_END (0, vn - 1)]);
1809 if (s)
1810 {
1811 /* Normalize by shifting v2 left so that it has msb set. */
1812 const SItype n = sizeof (UWtype) * __CHAR_BIT__;
1813#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1814 for (SItype i = 0; i < vn - 1; ++i)
1815#else
1816 for (SItype i = vn - 1; i > 0; --i)
1817#endif
1818 v2[i] = (v2[i] << s) | (v2[i - BITINT_INC] >> (n - s));
1819 v2[vidx] = v2[vidx] << s;
1820 /* And shift u2 left by the same amount. */
1821 u2[BITINT_END (0, un)] = u2[BITINT_END (1, un - 1)] >> (n - s);
1822#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1823 for (SItype i = 1; i < un; ++i)
1824#else
1825 for (SItype i = un - 1; i > 0; --i)
1826#endif
1827 u2[i] = (u2[i] << s) | (u2[i - BITINT_INC] >> (n - s));
1828 u2[BITINT_END (un, 0)] = u2[BITINT_END (un, 0)] << s;
1829 }
1830 else
1831 u2[BITINT_END (0, un)] = 0;
1832#ifdef UDIV_NEEDS_NORMALIZATION
1833 /* Handle single limb divisor first. */
1834 if (vn == 1)
1835 {
1836 UWtype vv = v2[0];
1837 if (vv == 0)
1838 vv = 1 / vv; /* Divide intentionally by zero. */
1839 UWtype k = u2[BITINT_END (0, un)];
1840#if __LIBGCC_BITINT_ORDER__ == __ORDER_BIG_ENDIAN__
1841 for (SItype i = 0; i <= un - 1; ++i)
1842#else
1843 for (SItype i = un - 1; i >= 0; --i)
1844#endif
1845 udiv_qrnnd (q2[i], k, k, u2[BITINT_END (i + 1, i)], vv);
1846 if (r != NULL)
1847 r[BITINT_END (rn - 1, 0)] = k >> s;
1848 }
1849 else
1850#endif
1851 {
1852 UWtype vv1 = v2[BITINT_END (0, vn - 1)];
1853 UWtype vv0 = v2[BITINT_END (1, vn - 2)];
1854 /* Main loop. */
1855 for (SItype j = un - vn; j >= 0; --j)
1856 {
1857 /* Compute estimate in qhat. */
1858 UWtype uv1 = u2[BITINT_END (un - j - vn, j + vn)];
1859 UWtype uv0 = u2[BITINT_END (un - j - vn + 1, j + vn - 1)];
1860 UWtype qhat, rhat, hi, lo, c;
1861 if (uv1 >= vv1)
1862 {
1863 /* udiv_qrnnd doesn't support quotients which don't
1864 fit into UWtype, so subtract from uv1:uv0 vv1
1865 first. */
1866 uv1 -= vv1 + __builtin_sub_overflow (uv0, vv1, &uv0);
1867 udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
1868 if (!__builtin_add_overflow (rhat, vv1, &rhat))
1869 goto again;
1870 }
1871 else
1872 {
1873 udiv_qrnnd (qhat, rhat, uv1, uv0, vv1);
1874 again:
1875 umul_ppmm (hi, lo, qhat, vv0);
1876 if (hi > rhat
1877 || (hi == rhat
1878 && lo > u2[BITINT_END (un - j - vn + 2,
1879 j + vn - 2)]))
1880 {
1881 --qhat;
1882 if (!__builtin_add_overflow (rhat, vv1, &rhat))
1883 goto again;
1884 }
1885 }
1886
1887 c = bitint_submul_1 (u2 + BITINT_END (un - j, j),
1888 v2 + BITINT_END (vn - 1, 0), qhat, vn);
1889 u2[BITINT_END (un - j - vn, j + vn)] -= c;
1890 /* If we've subtracted too much, decrease qhat and
1891 and add back. */
1892 if ((Wtype) u2[BITINT_END (un - j - vn, j + vn)] < 0)
1893 {
1894 --qhat;
1895 c = 0;
1896 for (USItype i = 0; i < vn; ++i)
1897 {
1898 UWtype s = v2[BITINT_END (vn - 1 - i, i)];
1899 UWtype d = u2[BITINT_END (un - i - j, i + j)];
1900 UWtype c1 = __builtin_add_overflow (d, s, &d);
1901 UWtype c2 = __builtin_add_overflow (d, c, &d);
1902 c = c1 + c2;
1903 u2[BITINT_END (un - i - j, i + j)] = d;
1904 }
1905 u2[BITINT_END (un - j - vn, j + vn)] += c;
1906 }
1907 q2[BITINT_END (un - vn - j, j)] = qhat;
1908 }
1909 if (r != NULL)
1910 {
1911 if (s)
1912 {
1913 const SItype n = sizeof (UWtype) * __CHAR_BIT__;
1914 /* Unnormalize remainder. */
1915 USItype i;
1916 for (i = 0; i < vn && i < rn; ++i)
1917 r[BITINT_END (rn - 1 - i, i)]
1918 = ((u2[BITINT_END (un - i, i)] >> s)
1919 | (u2[BITINT_END (un - i - 1, i + 1)] << (n - s)));
1920 if (i < rn)
1921 r[BITINT_END (rn - vn, vn - 1)]
1922 = u2[BITINT_END (un - vn + 1, vn - 1)] >> s;
1923 }
1924 else if (rn > vn)
1925 __builtin_memcpy (&r[BITINT_END (rn - vn, 0)],
1926 &u2[BITINT_END (un + 1 - vn, 0)],
1927 vn * sizeof (UWtype));
1928 else
1929 __builtin_memcpy (&r[0], &u2[BITINT_END (un + 1 - rn, 0)],
1930 rn * sizeof (UWtype));
1931 }
1932 }
1933 }
1934 if (q != NULL)
1935 {
1936 if ((uprec < 0) ^ (vprec < 0))
1937 {
1938 /* Negative quotient. */
1939 USItype n;
1940 if (un - vn + 1 > qn)
1941 n = qn;
1942 else
1943 n = un - vn + 1;
1944 bitint_negate (q + BITINT_END (qn - 1, 0),
1945 q2 + BITINT_END (un - vn, 0), n);
1946 if (qn > n)
1947 __builtin_memset (q + BITINT_END (0, n), -1,
1948 (qn - n) * sizeof (UWtype));
1949 }
1950 else
1951 {
1952 /* Positive quotient. */
1953 if (qn2)
1954 __builtin_memcpy (q, q2 + BITINT_END (un - vn + 1 - qn, 0),
1955 qn * sizeof (UWtype));
1956 else if (qn > un - vn + 1)
1957 __builtin_memset (q + BITINT_END (0, un - vn + 1), 0,
1958 (qn - (un - vn + 1)) * sizeof (UWtype));
1959 }
1960 }
1961 if (r != NULL)
1962 {
1963 if (uprec < 0)
1964 {
1965 /* Negative remainder. */
1966 bitint_negate (r + BITINT_END (rn - 1, 0),
1967 r + BITINT_END (rn - 1, 0),
1968 rn > vn ? vn : rn);
1969 if (rn > vn)
1970 __builtin_memset (r + BITINT_END (0, vn), -1,
1971 (rn - vn) * sizeof (UWtype));
1972 }
1973 else
1974 {
1975 /* Positive remainder. */
1976 if (rn > vn)
1977 __builtin_memset (r + BITINT_END (0, vn), 0,
1978 (rn - vn) * sizeof (UWtype));
1979 }
1980 }
1981}
1982#endif
1983#endif
1984\f
203b91b9 1985#ifdef L_cmpdi2
c7ff6e7a 1986cmp_return_type
996ed075 1987__cmpdi2 (DWtype a, DWtype b)
203b91b9 1988{
ebc4cd54 1989 return (a > b) - (a < b) + 1;
203b91b9
RS
1990}
1991#endif
1992
1993#ifdef L_ucmpdi2
c7ff6e7a 1994cmp_return_type
ebc4cd54 1995__ucmpdi2 (UDWtype a, UDWtype b)
203b91b9 1996{
ebc4cd54 1997 return (a > b) - (a < b) + 1;
203b91b9
RS
1998}
1999#endif
2000\f
4e9db8b2 2001#if defined(L_fixunstfdi) && LIBGCC2_HAS_TF_MODE
f139f5fa 2002UDWtype
6da9c622 2003__fixunstfDI (TFtype a)
ab495388 2004{
ab495388
RS
2005 if (a < 0)
2006 return 0;
2007
2008 /* Compute high word of result, as a flonum. */
4f2e0d5e 2009 const TFtype b = (a / Wtype_MAXp1_F);
996ed075 2010 /* Convert that to fixed (but not to DWtype!),
ab495388 2011 and shift it into the high word. */
b982024e 2012 UDWtype v = (UWtype) b;
4f2e0d5e 2013 v <<= W_TYPE_SIZE;
ab495388
RS
2014 /* Remove high part from the TFtype, leaving the low part as flonum. */
2015 a -= (TFtype)v;
996ed075 2016 /* Convert that to fixed (but not to DWtype!) and add it in.
ab495388
RS
2017 Sometimes A comes out negative. This is significant, since
2018 A has more bits than a long int does. */
2019 if (a < 0)
996ed075 2020 v -= (UWtype) (- a);
ab495388 2021 else
996ed075 2022 v += (UWtype) a;
ab495388
RS
2023 return v;
2024}
2025#endif
2026
4e9db8b2 2027#if defined(L_fixtfdi) && LIBGCC2_HAS_TF_MODE
996ed075 2028DWtype
37ef1054 2029__fixtfdi (TFtype a)
ab495388
RS
2030{
2031 if (a < 0)
6da9c622
RK
2032 return - __fixunstfDI (-a);
2033 return __fixunstfDI (a);
ab495388
RS
2034}
2035#endif
2036
4e9db8b2 2037#if defined(L_fixunsxfdi) && LIBGCC2_HAS_XF_MODE
f139f5fa 2038UDWtype
6da9c622 2039__fixunsxfDI (XFtype a)
e0799b34 2040{
e0799b34
RS
2041 if (a < 0)
2042 return 0;
2043
2044 /* Compute high word of result, as a flonum. */
4f2e0d5e 2045 const XFtype b = (a / Wtype_MAXp1_F);
996ed075 2046 /* Convert that to fixed (but not to DWtype!),
e0799b34 2047 and shift it into the high word. */
b982024e 2048 UDWtype v = (UWtype) b;
4f2e0d5e 2049 v <<= W_TYPE_SIZE;
e0799b34
RS
2050 /* Remove high part from the XFtype, leaving the low part as flonum. */
2051 a -= (XFtype)v;
996ed075 2052 /* Convert that to fixed (but not to DWtype!) and add it in.
e0799b34
RS
2053 Sometimes A comes out negative. This is significant, since
2054 A has more bits than a long int does. */
2055 if (a < 0)
996ed075 2056 v -= (UWtype) (- a);
e0799b34 2057 else
996ed075 2058 v += (UWtype) a;
e0799b34
RS
2059 return v;
2060}
2061#endif
2062
4e9db8b2 2063#if defined(L_fixxfdi) && LIBGCC2_HAS_XF_MODE
996ed075 2064DWtype
37ef1054 2065__fixxfdi (XFtype a)
e0799b34
RS
2066{
2067 if (a < 0)
6da9c622
RK
2068 return - __fixunsxfDI (-a);
2069 return __fixunsxfDI (a);
e0799b34
RS
2070}
2071#endif
2072
4e9db8b2 2073#if defined(L_fixunsdfdi) && LIBGCC2_HAS_DF_MODE
f139f5fa 2074UDWtype
6da9c622 2075__fixunsdfDI (DFtype a)
203b91b9 2076{
4977bab6
ZW
2077 /* Get high part of result. The division here will just moves the radix
2078 point and will not cause any rounding. Then the conversion to integral
2079 type chops result as desired. */
4f2e0d5e 2080 const UWtype hi = a / Wtype_MAXp1_F;
203b91b9 2081
4977bab6
ZW
2082 /* Get low part of result. Convert `hi' to floating type and scale it back,
2083 then subtract this from the number being converted. This leaves the low
2084 part. Convert that to integral type. */
4f2e0d5e 2085 const UWtype lo = a - (DFtype) hi * Wtype_MAXp1_F;
4977bab6
ZW
2086
2087 /* Assemble result from the two parts. */
4f2e0d5e 2088 return ((UDWtype) hi << W_TYPE_SIZE) | lo;
203b91b9
RS
2089}
2090#endif
2091
4e9db8b2 2092#if defined(L_fixdfdi) && LIBGCC2_HAS_DF_MODE
996ed075 2093DWtype
37ef1054 2094__fixdfdi (DFtype a)
203b91b9
RS
2095{
2096 if (a < 0)
6da9c622
RK
2097 return - __fixunsdfDI (-a);
2098 return __fixunsdfDI (a);
203b91b9
RS
2099}
2100#endif
2101
cfa7bd9c 2102#if defined(L_fixunssfdi) && LIBGCC2_HAS_SF_MODE
f139f5fa 2103UDWtype
4f2e0d5e 2104__fixunssfDI (SFtype a)
203b91b9 2105{
4e9db8b2 2106#if LIBGCC2_HAS_DF_MODE
ab495388 2107 /* Convert the SFtype to a DFtype, because that is surely not going
203b91b9 2108 to lose any bits. Some day someone else can write a faster version
ab495388 2109 that avoids converting to DFtype, and verify it really works right. */
4f2e0d5e 2110 const DFtype dfa = a;
203b91b9 2111
4977bab6
ZW
2112 /* Get high part of result. The division here will just moves the radix
2113 point and will not cause any rounding. Then the conversion to integral
2114 type chops result as desired. */
4f2e0d5e 2115 const UWtype hi = dfa / Wtype_MAXp1_F;
203b91b9 2116
4977bab6
ZW
2117 /* Get low part of result. Convert `hi' to floating type and scale it back,
2118 then subtract this from the number being converted. This leaves the low
2119 part. Convert that to integral type. */
4f2e0d5e 2120 const UWtype lo = dfa - (DFtype) hi * Wtype_MAXp1_F;
4977bab6
ZW
2121
2122 /* Assemble result from the two parts. */
4f2e0d5e
RH
2123 return ((UDWtype) hi << W_TYPE_SIZE) | lo;
2124#elif FLT_MANT_DIG < W_TYPE_SIZE
2125 if (a < 1)
2126 return 0;
2127 if (a < Wtype_MAXp1_F)
2128 return (UWtype)a;
2129 if (a < Wtype_MAXp1_F * Wtype_MAXp1_F)
2130 {
2131 /* Since we know that there are fewer significant bits in the SFmode
2132 quantity than in a word, we know that we can convert out all the
2e681715 2133 significant bits in one step, and thus avoid losing bits. */
4f2e0d5e
RH
2134
2135 /* ??? This following loop essentially performs frexpf. If we could
2136 use the real libm function, or poke at the actual bits of the fp
2137 format, it would be significantly faster. */
2138
2139 UWtype shift = 0, counter;
2140 SFtype msb;
2141
2142 a /= Wtype_MAXp1_F;
2143 for (counter = W_TYPE_SIZE / 2; counter != 0; counter >>= 1)
2144 {
2145 SFtype counterf = (UWtype)1 << counter;
2146 if (a >= counterf)
2147 {
2148 shift |= counter;
2149 a /= counterf;
2150 }
2151 }
2152
2153 /* Rescale into the range of one word, extract the bits of that
2154 one word, and shift the result into position. */
2155 a *= Wtype_MAXp1_F;
2156 counter = a;
2157 return (DWtype)counter << shift;
2158 }
2159 return -1;
2160#else
2161# error
2162#endif
203b91b9
RS
2163}
2164#endif
2165
cfa7bd9c 2166#if defined(L_fixsfdi) && LIBGCC2_HAS_SF_MODE
996ed075 2167DWtype
ab495388 2168__fixsfdi (SFtype a)
203b91b9
RS
2169{
2170 if (a < 0)
6da9c622
RK
2171 return - __fixunssfDI (-a);
2172 return __fixunssfDI (a);
203b91b9
RS
2173}
2174#endif
2175
4e9db8b2 2176#if defined(L_floatdixf) && LIBGCC2_HAS_XF_MODE
e0799b34 2177XFtype
996ed075 2178__floatdixf (DWtype u)
e0799b34 2179{
66bb34c0 2180#if W_TYPE_SIZE > __LIBGCC_XF_MANT_DIG__
4a73d865
JM
2181# error
2182#endif
4f2e0d5e
RH
2183 XFtype d = (Wtype) (u >> W_TYPE_SIZE);
2184 d *= Wtype_MAXp1_F;
2185 d += (UWtype)u;
e5e809f4 2186 return d;
e0799b34
RS
2187}
2188#endif
2189
d7735880
JM
2190#if defined(L_floatundixf) && LIBGCC2_HAS_XF_MODE
2191XFtype
2192__floatundixf (UDWtype u)
2193{
66bb34c0 2194#if W_TYPE_SIZE > __LIBGCC_XF_MANT_DIG__
4a73d865
JM
2195# error
2196#endif
d7735880
JM
2197 XFtype d = (UWtype) (u >> W_TYPE_SIZE);
2198 d *= Wtype_MAXp1_F;
2199 d += (UWtype)u;
2200 return d;
2201}
2202#endif
2203
4e9db8b2 2204#if defined(L_floatditf) && LIBGCC2_HAS_TF_MODE
ab495388 2205TFtype
996ed075 2206__floatditf (DWtype u)
ab495388 2207{
66bb34c0 2208#if W_TYPE_SIZE > __LIBGCC_TF_MANT_DIG__
4a73d865
JM
2209# error
2210#endif
4f2e0d5e
RH
2211 TFtype d = (Wtype) (u >> W_TYPE_SIZE);
2212 d *= Wtype_MAXp1_F;
2213 d += (UWtype)u;
e5e809f4 2214 return d;
ab495388
RS
2215}
2216#endif
2217
d7735880
JM
2218#if defined(L_floatunditf) && LIBGCC2_HAS_TF_MODE
2219TFtype
2220__floatunditf (UDWtype u)
2221{
66bb34c0 2222#if W_TYPE_SIZE > __LIBGCC_TF_MANT_DIG__
4a73d865 2223# error
203b91b9 2224#endif
4a73d865 2225 TFtype d = (UWtype) (u >> W_TYPE_SIZE);
d7735880
JM
2226 d *= Wtype_MAXp1_F;
2227 d += (UWtype)u;
2228 return d;
2229}
2230#endif
2231
4a73d865
JM
2232#if (defined(L_floatdisf) && LIBGCC2_HAS_SF_MODE) \
2233 || (defined(L_floatdidf) && LIBGCC2_HAS_DF_MODE)
4f2e0d5e 2234#define DI_SIZE (W_TYPE_SIZE * 2)
b04c9063
AM
2235#define F_MODE_OK(SIZE) \
2236 (SIZE < DI_SIZE \
2237 && SIZE > (DI_SIZE - SIZE + FSSIZE) \
5fb54b91 2238 && !AVOID_FP_TYPE_CONVERSION(SIZE))
4a73d865
JM
2239#if defined(L_floatdisf)
2240#define FUNC __floatdisf
2241#define FSTYPE SFtype
66bb34c0 2242#define FSSIZE __LIBGCC_SF_MANT_DIG__
4a73d865
JM
2243#else
2244#define FUNC __floatdidf
2245#define FSTYPE DFtype
66bb34c0 2246#define FSSIZE __LIBGCC_DF_MANT_DIG__
4a73d865 2247#endif
203b91b9 2248
4a73d865
JM
2249FSTYPE
2250FUNC (DWtype u)
203b91b9 2251{
4a73d865 2252#if FSSIZE >= W_TYPE_SIZE
4f2e0d5e 2253 /* When the word size is small, we never get any rounding error. */
4a73d865 2254 FSTYPE f = (Wtype) (u >> W_TYPE_SIZE);
4f2e0d5e
RH
2255 f *= Wtype_MAXp1_F;
2256 f += (UWtype)u;
2257 return f;
66bb34c0
JM
2258#elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__)) \
2259 || (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__)) \
2260 || (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
4a73d865 2261
66bb34c0
JM
2262#if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__))
2263# define FSIZE __LIBGCC_DF_MANT_DIG__
4a73d865 2264# define FTYPE DFtype
66bb34c0
JM
2265#elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__))
2266# define FSIZE __LIBGCC_XF_MANT_DIG__
4a73d865 2267# define FTYPE XFtype
66bb34c0
JM
2268#elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
2269# define FSIZE __LIBGCC_TF_MANT_DIG__
4a73d865 2270# define FTYPE TFtype
4f2e0d5e
RH
2271#else
2272# error
2273#endif
2274
4a73d865 2275#define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
4f2e0d5e 2276
d9e1ab8d 2277 /* Protect against double-rounding error.
4f2e0d5e
RH
2278 Represent any low-order bits, that might be truncated by a bit that
2279 won't be lost. The bit can go in anywhere below the rounding position
4a73d865
JM
2280 of the FSTYPE. A fixed mask and bit position handles all usual
2281 configurations. */
2282 if (! (- ((DWtype) 1 << FSIZE) < u
2283 && u < ((DWtype) 1 << FSIZE)))
d9e1ab8d 2284 {
4a73d865 2285 if ((UDWtype) u & (REP_BIT - 1))
d9e1ab8d 2286 {
4a73d865
JM
2287 u &= ~ (REP_BIT - 1);
2288 u |= REP_BIT;
d9e1ab8d
RK
2289 }
2290 }
203b91b9 2291
4a73d865
JM
2292 /* Do the calculation in a wider type so that we don't lose any of
2293 the precision of the high word while multiplying it. */
2294 FTYPE f = (Wtype) (u >> W_TYPE_SIZE);
4f2e0d5e
RH
2295 f *= Wtype_MAXp1_F;
2296 f += (UWtype)u;
4a73d865 2297 return (FSTYPE) f;
4f2e0d5e 2298#else
4a73d865
JM
2299#if FSSIZE >= W_TYPE_SIZE - 2
2300# error
2301#endif
2302 /* Finally, the word size is larger than the number of bits in the
2303 required FSTYPE, and we've got no suitable wider type. The only
2304 way to avoid double rounding is to special case the
2305 extraction. */
4f2e0d5e
RH
2306
2307 /* If there are no high bits set, fall back to one conversion. */
2308 if ((Wtype)u == u)
4a73d865 2309 return (FSTYPE)(Wtype)u;
4f2e0d5e
RH
2310
2311 /* Otherwise, find the power of two. */
2312 Wtype hi = u >> W_TYPE_SIZE;
2313 if (hi < 0)
1f6eac90 2314 hi = -(UWtype) hi;
4f2e0d5e
RH
2315
2316 UWtype count, shift;
5de3e2d8
BE
2317#if !defined (COUNT_LEADING_ZEROS_0) || COUNT_LEADING_ZEROS_0 != W_TYPE_SIZE
2318 if (hi == 0)
2319 count = W_TYPE_SIZE;
2320 else
2321#endif
4f2e0d5e
RH
2322 count_leading_zeros (count, hi);
2323
2324 /* No leading bits means u == minimum. */
2325 if (count == 0)
6395ba73 2326 return Wtype_MAXp1_F * (FSTYPE) (hi | ((UWtype) u != 0));
4f2e0d5e 2327
4a73d865 2328 shift = 1 + W_TYPE_SIZE - count;
4f2e0d5e
RH
2329
2330 /* Shift down the most significant bits. */
2331 hi = u >> shift;
2332
2333 /* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
5fb54b91 2334 if ((UWtype)u << (W_TYPE_SIZE - shift))
4f2e0d5e
RH
2335 hi |= 1;
2336
2337 /* Convert the one word of data, and rescale. */
5fb54b91
RH
2338 FSTYPE f = hi, e;
2339 if (shift == W_TYPE_SIZE)
2340 e = Wtype_MAXp1_F;
2341 /* The following two cases could be merged if we knew that the target
2342 supported a native unsigned->float conversion. More often, we only
2343 have a signed conversion, and have to add extra fixup code. */
2344 else if (shift == W_TYPE_SIZE - 1)
2345 e = Wtype_MAXp1_F / 2;
2346 else
2347 e = (Wtype)1 << shift;
2348 return f * e;
4f2e0d5e 2349#endif
203b91b9
RS
2350}
2351#endif
2352
4a73d865
JM
2353#if (defined(L_floatundisf) && LIBGCC2_HAS_SF_MODE) \
2354 || (defined(L_floatundidf) && LIBGCC2_HAS_DF_MODE)
d7735880 2355#define DI_SIZE (W_TYPE_SIZE * 2)
b04c9063
AM
2356#define F_MODE_OK(SIZE) \
2357 (SIZE < DI_SIZE \
2358 && SIZE > (DI_SIZE - SIZE + FSSIZE) \
5fb54b91 2359 && !AVOID_FP_TYPE_CONVERSION(SIZE))
4a73d865
JM
2360#if defined(L_floatundisf)
2361#define FUNC __floatundisf
2362#define FSTYPE SFtype
66bb34c0 2363#define FSSIZE __LIBGCC_SF_MANT_DIG__
4a73d865
JM
2364#else
2365#define FUNC __floatundidf
2366#define FSTYPE DFtype
66bb34c0 2367#define FSSIZE __LIBGCC_DF_MANT_DIG__
4a73d865 2368#endif
d7735880 2369
4a73d865
JM
2370FSTYPE
2371FUNC (UDWtype u)
d7735880 2372{
4a73d865 2373#if FSSIZE >= W_TYPE_SIZE
d7735880 2374 /* When the word size is small, we never get any rounding error. */
4a73d865 2375 FSTYPE f = (UWtype) (u >> W_TYPE_SIZE);
d7735880
JM
2376 f *= Wtype_MAXp1_F;
2377 f += (UWtype)u;
2378 return f;
66bb34c0
JM
2379#elif (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__)) \
2380 || (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__)) \
2381 || (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
4a73d865 2382
66bb34c0
JM
2383#if (LIBGCC2_HAS_DF_MODE && F_MODE_OK (__LIBGCC_DF_MANT_DIG__))
2384# define FSIZE __LIBGCC_DF_MANT_DIG__
4a73d865 2385# define FTYPE DFtype
66bb34c0
JM
2386#elif (LIBGCC2_HAS_XF_MODE && F_MODE_OK (__LIBGCC_XF_MANT_DIG__))
2387# define FSIZE __LIBGCC_XF_MANT_DIG__
4a73d865 2388# define FTYPE XFtype
66bb34c0
JM
2389#elif (LIBGCC2_HAS_TF_MODE && F_MODE_OK (__LIBGCC_TF_MANT_DIG__))
2390# define FSIZE __LIBGCC_TF_MANT_DIG__
4a73d865 2391# define FTYPE TFtype
d7735880
JM
2392#else
2393# error
2394#endif
2395
4a73d865 2396#define REP_BIT ((UDWtype) 1 << (DI_SIZE - FSIZE))
d7735880
JM
2397
2398 /* Protect against double-rounding error.
2399 Represent any low-order bits, that might be truncated by a bit that
2400 won't be lost. The bit can go in anywhere below the rounding position
4a73d865
JM
2401 of the FSTYPE. A fixed mask and bit position handles all usual
2402 configurations. */
2403 if (u >= ((UDWtype) 1 << FSIZE))
d7735880 2404 {
4a73d865 2405 if ((UDWtype) u & (REP_BIT - 1))
d7735880 2406 {
4a73d865
JM
2407 u &= ~ (REP_BIT - 1);
2408 u |= REP_BIT;
d7735880
JM
2409 }
2410 }
2411
4a73d865
JM
2412 /* Do the calculation in a wider type so that we don't lose any of
2413 the precision of the high word while multiplying it. */
2414 FTYPE f = (UWtype) (u >> W_TYPE_SIZE);
d7735880
JM
2415 f *= Wtype_MAXp1_F;
2416 f += (UWtype)u;
4a73d865 2417 return (FSTYPE) f;
d7735880 2418#else
4a73d865
JM
2419#if FSSIZE == W_TYPE_SIZE - 1
2420# error
2421#endif
2422 /* Finally, the word size is larger than the number of bits in the
2423 required FSTYPE, and we've got no suitable wider type. The only
2424 way to avoid double rounding is to special case the
2425 extraction. */
d7735880
JM
2426
2427 /* If there are no high bits set, fall back to one conversion. */
2428 if ((UWtype)u == u)
4a73d865 2429 return (FSTYPE)(UWtype)u;
d7735880
JM
2430
2431 /* Otherwise, find the power of two. */
2432 UWtype hi = u >> W_TYPE_SIZE;
2433
2434 UWtype count, shift;
2435 count_leading_zeros (count, hi);
2436
2437 shift = W_TYPE_SIZE - count;
2438
2439 /* Shift down the most significant bits. */
2440 hi = u >> shift;
2441
2442 /* If we lost any nonzero bits, set the lsb to ensure correct rounding. */
5fb54b91 2443 if ((UWtype)u << (W_TYPE_SIZE - shift))
d7735880
JM
2444 hi |= 1;
2445
2446 /* Convert the one word of data, and rescale. */
5fb54b91
RH
2447 FSTYPE f = hi, e;
2448 if (shift == W_TYPE_SIZE)
2449 e = Wtype_MAXp1_F;
2450 /* The following two cases could be merged if we knew that the target
2451 supported a native unsigned->float conversion. More often, we only
2452 have a signed conversion, and have to add extra fixup code. */
2453 else if (shift == W_TYPE_SIZE - 1)
2454 e = Wtype_MAXp1_F / 2;
2455 else
2456 e = (Wtype)1 << shift;
2457 return f * e;
d7735880
JM
2458#endif
2459}
2460#endif
2461
4e9db8b2 2462#if defined(L_fixunsxfsi) && LIBGCC2_HAS_XF_MODE
996ed075 2463UWtype
6da9c622 2464__fixunsxfSI (XFtype a)
e0799b34 2465{
5d0e6486
AO
2466 if (a >= - (DFtype) Wtype_MIN)
2467 return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
996ed075 2468 return (Wtype) a;
e0799b34
RS
2469}
2470#endif
2471
4e9db8b2 2472#if defined(L_fixunsdfsi) && LIBGCC2_HAS_DF_MODE
996ed075 2473UWtype
6da9c622 2474__fixunsdfSI (DFtype a)
203b91b9 2475{
5d0e6486
AO
2476 if (a >= - (DFtype) Wtype_MIN)
2477 return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
996ed075 2478 return (Wtype) a;
203b91b9
RS
2479}
2480#endif
2481
cfa7bd9c 2482#if defined(L_fixunssfsi) && LIBGCC2_HAS_SF_MODE
996ed075 2483UWtype
6da9c622 2484__fixunssfSI (SFtype a)
203b91b9 2485{
5d0e6486
AO
2486 if (a >= - (SFtype) Wtype_MIN)
2487 return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
996ed075 2488 return (Wtype) a;
203b91b9 2489}
17684d46
RG
2490#endif
2491\f
2492/* Integer power helper used from __builtin_powi for non-constant
2493 exponents. */
2494
cfa7bd9c 2495#if (defined(L_powisf2) && LIBGCC2_HAS_SF_MODE) \
4e9db8b2
SE
2496 || (defined(L_powidf2) && LIBGCC2_HAS_DF_MODE) \
2497 || (defined(L_powixf2) && LIBGCC2_HAS_XF_MODE) \
2498 || (defined(L_powitf2) && LIBGCC2_HAS_TF_MODE)
17684d46
RG
2499# if defined(L_powisf2)
2500# define TYPE SFtype
2501# define NAME __powisf2
2502# elif defined(L_powidf2)
2503# define TYPE DFtype
2504# define NAME __powidf2
2505# elif defined(L_powixf2)
2506# define TYPE XFtype
2507# define NAME __powixf2
2508# elif defined(L_powitf2)
2509# define TYPE TFtype
2510# define NAME __powitf2
2511# endif
2512
0b8495ae
FJ
2513#undef int
2514#undef unsigned
17684d46 2515TYPE
0b8495ae 2516NAME (TYPE x, int m)
17684d46 2517{
35da095d 2518 unsigned int n = m < 0 ? -(unsigned int) m : (unsigned int) m;
17684d46
RG
2519 TYPE y = n % 2 ? x : 1;
2520 while (n >>= 1)
2521 {
2522 x = x * x;
2523 if (n % 2)
2524 y = y * x;
2525 }
2526 return m < 0 ? 1/y : y;
2527}
2528
203b91b9
RS
2529#endif
2530\f
0abcd6cc
JG
2531#if((defined(L_mulhc3) || defined(L_divhc3)) && LIBGCC2_HAS_HF_MODE) \
2532 || ((defined(L_mulsc3) || defined(L_divsc3)) && LIBGCC2_HAS_SF_MODE) \
4e9db8b2
SE
2533 || ((defined(L_muldc3) || defined(L_divdc3)) && LIBGCC2_HAS_DF_MODE) \
2534 || ((defined(L_mulxc3) || defined(L_divxc3)) && LIBGCC2_HAS_XF_MODE) \
2535 || ((defined(L_multc3) || defined(L_divtc3)) && LIBGCC2_HAS_TF_MODE)
7e7e470f
RH
2536
2537#undef float
2538#undef double
2539#undef long
2540
0abcd6cc
JG
2541#if defined(L_mulhc3) || defined(L_divhc3)
2542# define MTYPE HFtype
2543# define CTYPE HCtype
54f0224d 2544# define AMTYPE SFtype
0abcd6cc
JG
2545# define MODE hc
2546# define CEXT __LIBGCC_HF_FUNC_EXT__
2547# define NOTRUNC (!__LIBGCC_HF_EXCESS_PRECISION__)
2548#elif defined(L_mulsc3) || defined(L_divsc3)
7e7e470f
RH
2549# define MTYPE SFtype
2550# define CTYPE SCtype
54f0224d 2551# define AMTYPE DFtype
7e7e470f 2552# define MODE sc
dd69f047 2553# define CEXT __LIBGCC_SF_FUNC_EXT__
d758aeb5 2554# define NOTRUNC (!__LIBGCC_SF_EXCESS_PRECISION__)
54f0224d
PM
2555# define RBIG (__LIBGCC_SF_MAX__ / 2)
2556# define RMIN (__LIBGCC_SF_MIN__)
2557# define RMIN2 (__LIBGCC_SF_EPSILON__)
2558# define RMINSCAL (1 / __LIBGCC_SF_EPSILON__)
2559# define RMAX2 (RBIG * RMIN2)
7e7e470f
RH
2560#elif defined(L_muldc3) || defined(L_divdc3)
2561# define MTYPE DFtype
2562# define CTYPE DCtype
2563# define MODE dc
dd69f047 2564# define CEXT __LIBGCC_DF_FUNC_EXT__
d758aeb5 2565# define NOTRUNC (!__LIBGCC_DF_EXCESS_PRECISION__)
54f0224d
PM
2566# define RBIG (__LIBGCC_DF_MAX__ / 2)
2567# define RMIN (__LIBGCC_DF_MIN__)
2568# define RMIN2 (__LIBGCC_DF_EPSILON__)
2569# define RMINSCAL (1 / __LIBGCC_DF_EPSILON__)
2570# define RMAX2 (RBIG * RMIN2)
7e7e470f
RH
2571#elif defined(L_mulxc3) || defined(L_divxc3)
2572# define MTYPE XFtype
2573# define CTYPE XCtype
2574# define MODE xc
dd69f047 2575# define CEXT __LIBGCC_XF_FUNC_EXT__
d758aeb5 2576# define NOTRUNC (!__LIBGCC_XF_EXCESS_PRECISION__)
54f0224d
PM
2577# define RBIG (__LIBGCC_XF_MAX__ / 2)
2578# define RMIN (__LIBGCC_XF_MIN__)
2579# define RMIN2 (__LIBGCC_XF_EPSILON__)
2580# define RMINSCAL (1 / __LIBGCC_XF_EPSILON__)
2581# define RMAX2 (RBIG * RMIN2)
7e7e470f
RH
2582#elif defined(L_multc3) || defined(L_divtc3)
2583# define MTYPE TFtype
2584# define CTYPE TCtype
2585# define MODE tc
dd69f047 2586# define CEXT __LIBGCC_TF_FUNC_EXT__
d758aeb5 2587# define NOTRUNC (!__LIBGCC_TF_EXCESS_PRECISION__)
d9105685
PM
2588# if __LIBGCC_TF_MANT_DIG__ == 106
2589# define RBIG (__LIBGCC_DF_MAX__ / 2)
2590# define RMIN (__LIBGCC_DF_MIN__)
2591# define RMIN2 (__LIBGCC_DF_EPSILON__)
2592# define RMINSCAL (1 / __LIBGCC_DF_EPSILON__)
2593# else
2594# define RBIG (__LIBGCC_TF_MAX__ / 2)
2595# define RMIN (__LIBGCC_TF_MIN__)
2596# define RMIN2 (__LIBGCC_TF_EPSILON__)
2597# define RMINSCAL (1 / __LIBGCC_TF_EPSILON__)
2598# endif
54f0224d 2599# define RMAX2 (RBIG * RMIN2)
7e7e470f
RH
2600#else
2601# error
2602#endif
2603
2604#define CONCAT3(A,B,C) _CONCAT3(A,B,C)
2605#define _CONCAT3(A,B,C) A##B##C
2606
2607#define CONCAT2(A,B) _CONCAT2(A,B)
2608#define _CONCAT2(A,B) A##B
2609
af8096fc
UB
2610#define isnan(x) __builtin_isnan (x)
2611#define isfinite(x) __builtin_isfinite (x)
2612#define isinf(x) __builtin_isinf (x)
7e7e470f 2613
ca22d882 2614#define INFINITY CONCAT2(__builtin_huge_val, CEXT) ()
7e7e470f
RH
2615#define I 1i
2616
2617/* Helpers to make the following code slightly less gross. */
2618#define COPYSIGN CONCAT2(__builtin_copysign, CEXT)
2619#define FABS CONCAT2(__builtin_fabs, CEXT)
2620
2621/* Verify that MTYPE matches up with CEXT. */
2622extern void *compile_type_assert[sizeof(INFINITY) == sizeof(MTYPE) ? 1 : -1];
2623
2624/* Ensure that we've lost any extra precision. */
2625#if NOTRUNC
2626# define TRUNC(x)
2627#else
2628# define TRUNC(x) __asm__ ("" : "=m"(x) : "m"(x))
2629#endif
2630
0abcd6cc 2631#if defined(L_mulhc3) || defined(L_mulsc3) || defined(L_muldc3) \
7e7e470f
RH
2632 || defined(L_mulxc3) || defined(L_multc3)
2633
2634CTYPE
2635CONCAT3(__mul,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
2636{
2637 MTYPE ac, bd, ad, bc, x, y;
ddef83d2 2638 CTYPE res;
7e7e470f
RH
2639
2640 ac = a * c;
2641 bd = b * d;
2642 ad = a * d;
2643 bc = b * c;
2644
2645 TRUNC (ac);
2646 TRUNC (bd);
2647 TRUNC (ad);
2648 TRUNC (bc);
2649
2650 x = ac - bd;
2651 y = ad + bc;
2652
2653 if (isnan (x) && isnan (y))
2654 {
2655 /* Recover infinities that computed as NaN + iNaN. */
2656 _Bool recalc = 0;
2657 if (isinf (a) || isinf (b))
2658 {
2659 /* z is infinite. "Box" the infinity and change NaNs in
2660 the other factor to 0. */
2661 a = COPYSIGN (isinf (a) ? 1 : 0, a);
2662 b = COPYSIGN (isinf (b) ? 1 : 0, b);
2663 if (isnan (c)) c = COPYSIGN (0, c);
2664 if (isnan (d)) d = COPYSIGN (0, d);
2665 recalc = 1;
2666 }
2667 if (isinf (c) || isinf (d))
2668 {
2669 /* w is infinite. "Box" the infinity and change NaNs in
2670 the other factor to 0. */
2671 c = COPYSIGN (isinf (c) ? 1 : 0, c);
2672 d = COPYSIGN (isinf (d) ? 1 : 0, d);
2673 if (isnan (a)) a = COPYSIGN (0, a);
2674 if (isnan (b)) b = COPYSIGN (0, b);
2675 recalc = 1;
2676 }
2677 if (!recalc
2678 && (isinf (ac) || isinf (bd)
2679 || isinf (ad) || isinf (bc)))
2680 {
2681 /* Recover infinities from overflow by changing NaNs to 0. */
2682 if (isnan (a)) a = COPYSIGN (0, a);
2683 if (isnan (b)) b = COPYSIGN (0, b);
2684 if (isnan (c)) c = COPYSIGN (0, c);
2685 if (isnan (d)) d = COPYSIGN (0, d);
2686 recalc = 1;
2687 }
2688 if (recalc)
2689 {
2690 x = INFINITY * (a * c - b * d);
2691 y = INFINITY * (a * d + b * c);
2692 }
2693 }
2694
ddef83d2
RG
2695 __real__ res = x;
2696 __imag__ res = y;
2697 return res;
7e7e470f
RH
2698}
2699#endif /* complex multiply */
2700
0abcd6cc 2701#if defined(L_divhc3) || defined(L_divsc3) || defined(L_divdc3) \
7e7e470f
RH
2702 || defined(L_divxc3) || defined(L_divtc3)
2703
2704CTYPE
2705CONCAT3(__div,MODE,3) (MTYPE a, MTYPE b, MTYPE c, MTYPE d)
2706{
54f0224d
PM
2707#if defined(L_divhc3) \
2708 || (defined(L_divsc3) && defined(__LIBGCC_HAVE_HWDBL__) )
2709
2710 /* Half precision is handled with float precision.
2711 float is handled with double precision when double precision
2712 hardware is available.
2713 Due to the additional precision, the simple complex divide
2714 method (without Smith's method) is sufficient to get accurate
2715 answers and runs slightly faster than Smith's method. */
2716
2717 AMTYPE aa, bb, cc, dd;
2718 AMTYPE denom;
2719 MTYPE x, y;
2720 CTYPE res;
2721 aa = a;
2722 bb = b;
2723 cc = c;
2724 dd = d;
2725
2726 denom = (cc * cc) + (dd * dd);
2727 x = ((aa * cc) + (bb * dd)) / denom;
2728 y = ((bb * cc) - (aa * dd)) / denom;
2729
2730#else
7e7e470f 2731 MTYPE denom, ratio, x, y;
ddef83d2 2732 CTYPE res;
7e7e470f 2733
54f0224d
PM
2734 /* double, extended, long double have significant potential
2735 underflow/overflow errors that can be greatly reduced with
2736 a limited number of tests and adjustments. float is handled
2737 the same way when no HW double is available.
2738 */
2739
2740 /* Scale by max(c,d) to reduce chances of denominator overflowing. */
7e7e470f
RH
2741 if (FABS (c) < FABS (d))
2742 {
54f0224d
PM
2743 /* Prevent underflow when denominator is near max representable. */
2744 if (FABS (d) >= RBIG)
2745 {
2746 a = a / 2;
2747 b = b / 2;
2748 c = c / 2;
2749 d = d / 2;
2750 }
2751 /* Avoid overflow/underflow issues when c and d are small.
2752 Scaling up helps avoid some underflows.
2753 No new overflow possible since c&d < RMIN2. */
2754 if (FABS (d) < RMIN2)
2755 {
2756 a = a * RMINSCAL;
2757 b = b * RMINSCAL;
2758 c = c * RMINSCAL;
2759 d = d * RMINSCAL;
2760 }
2761 else
2762 {
2763 if (((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (d) < RMAX2))
2764 || ((FABS (b) < RMIN) && (FABS (a) < RMAX2)
2765 && (FABS (d) < RMAX2)))
2766 {
2767 a = a * RMINSCAL;
2768 b = b * RMINSCAL;
2769 c = c * RMINSCAL;
2770 d = d * RMINSCAL;
2771 }
2772 }
7e7e470f
RH
2773 ratio = c / d;
2774 denom = (c * ratio) + d;
54f0224d
PM
2775 /* Choose alternate order of computation if ratio is subnormal. */
2776 if (FABS (ratio) > RMIN)
2777 {
2778 x = ((a * ratio) + b) / denom;
2779 y = ((b * ratio) - a) / denom;
2780 }
2781 else
2782 {
2783 x = ((c * (a / d)) + b) / denom;
2784 y = ((c * (b / d)) - a) / denom;
2785 }
7e7e470f
RH
2786 }
2787 else
2788 {
54f0224d
PM
2789 /* Prevent underflow when denominator is near max representable. */
2790 if (FABS (c) >= RBIG)
2791 {
2792 a = a / 2;
2793 b = b / 2;
2794 c = c / 2;
2795 d = d / 2;
2796 }
2797 /* Avoid overflow/underflow issues when both c and d are small.
2798 Scaling up helps avoid some underflows.
2799 No new overflow possible since both c&d are less than RMIN2. */
2800 if (FABS (c) < RMIN2)
2801 {
2802 a = a * RMINSCAL;
2803 b = b * RMINSCAL;
2804 c = c * RMINSCAL;
2805 d = d * RMINSCAL;
2806 }
2807 else
2808 {
2809 if (((FABS (a) < RMIN) && (FABS (b) < RMAX2) && (FABS (c) < RMAX2))
2810 || ((FABS (b) < RMIN) && (FABS (a) < RMAX2)
2811 && (FABS (c) < RMAX2)))
2812 {
2813 a = a * RMINSCAL;
2814 b = b * RMINSCAL;
2815 c = c * RMINSCAL;
2816 d = d * RMINSCAL;
2817 }
2818 }
7e7e470f
RH
2819 ratio = d / c;
2820 denom = (d * ratio) + c;
54f0224d
PM
2821 /* Choose alternate order of computation if ratio is subnormal. */
2822 if (FABS (ratio) > RMIN)
2823 {
2824 x = ((b * ratio) + a) / denom;
2825 y = (b - (a * ratio)) / denom;
2826 }
2827 else
2828 {
2829 x = (a + (d * (b / c))) / denom;
2830 y = (b - (d * (a / c))) / denom;
2831 }
7e7e470f 2832 }
54f0224d 2833#endif
7e7e470f 2834
54f0224d
PM
2835 /* Recover infinities and zeros that computed as NaN+iNaN; the only
2836 cases are nonzero/zero, infinite/finite, and finite/infinite. */
7e7e470f
RH
2837 if (isnan (x) && isnan (y))
2838 {
698ac934 2839 if (c == 0.0 && d == 0.0 && (!isnan (a) || !isnan (b)))
7e7e470f
RH
2840 {
2841 x = COPYSIGN (INFINITY, c) * a;
2842 y = COPYSIGN (INFINITY, c) * b;
2843 }
2844 else if ((isinf (a) || isinf (b)) && isfinite (c) && isfinite (d))
2845 {
2846 a = COPYSIGN (isinf (a) ? 1 : 0, a);
2847 b = COPYSIGN (isinf (b) ? 1 : 0, b);
2848 x = INFINITY * (a * c + b * d);
2849 y = INFINITY * (b * c - a * d);
2850 }
2851 else if ((isinf (c) || isinf (d)) && isfinite (a) && isfinite (b))
2852 {
2853 c = COPYSIGN (isinf (c) ? 1 : 0, c);
2854 d = COPYSIGN (isinf (d) ? 1 : 0, d);
2855 x = 0.0 * (a * c + b * d);
2856 y = 0.0 * (b * c - a * d);
2857 }
2858 }
2859
ddef83d2
RG
2860 __real__ res = x;
2861 __imag__ res = y;
2862 return res;
7e7e470f
RH
2863}
2864#endif /* complex divide */
2865
2866#endif /* all complex float routines */
2867\f
ab495388
RS
2868/* From here on down, the routines use normal data types. */
2869
2870#define SItype bogus_type
2871#define USItype bogus_type
2872#define DItype bogus_type
2873#define UDItype bogus_type
2874#define SFtype bogus_type
2875#define DFtype bogus_type
996ed075
JJ
2876#undef Wtype
2877#undef UWtype
2878#undef HWtype
2879#undef UHWtype
2880#undef DWtype
2881#undef UDWtype
ab495388
RS
2882
2883#undef char
2884#undef short
2885#undef int
2886#undef long
2887#undef unsigned
2888#undef float
2889#undef double
9bd23d2c
RS
2890\f
2891#ifdef L__gcc_bcmp
2892
2893/* Like bcmp except the sign is meaningful.
9faa82d8 2894 Result is negative if S1 is less than S2,
9bd23d2c
RS
2895 positive if S1 is greater, 0 if S1 and S2 are equal. */
2896
2897int
299b83b7 2898__gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
9bd23d2c
RS
2899{
2900 while (size > 0)
2901 {
b982024e 2902 const unsigned char c1 = *s1++, c2 = *s2++;
9bd23d2c
RS
2903 if (c1 != c2)
2904 return c1 - c2;
2905 size--;
2906 }
2907 return 0;
2908}
ab495388 2909
3fe68d0a
ZW
2910#endif
2911\f
2912/* __eprintf used to be used by GCC's private version of <assert.h>.
2913 We no longer provide that header, but this routine remains in libgcc.a
2914 for binary backward compatibility. Note that it is not included in
2915 the shared version of libgcc. */
2916#ifdef L_eprintf
2917#ifndef inhibit_libc
2918
2919#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2920#include <stdio.h>
2921
2922void
2923__eprintf (const char *string, const char *expression,
2924 unsigned int line, const char *filename)
2925{
2926 fprintf (stderr, string, expression, line, filename);
2927 fflush (stderr);
2928 abort ();
2929}
2930
2931#endif
203b91b9
RS
2932#endif
2933
203b91b9 2934\f
203b91b9
RS
2935#ifdef L_clear_cache
2936/* Clear part of an instruction cache. */
2937
203b91b9 2938void
a90b0cdd
MS
2939__clear_cache (void *beg __attribute__((__unused__)),
2940 void *end __attribute__((__unused__)))
203b91b9 2941{
23190837 2942#ifdef CLEAR_INSN_CACHE
a90b0cdd
MS
2943 /* Cast the void* pointers to char* as some implementations
2944 of the macro assume the pointers can be subtracted from
2945 one another. */
2946 CLEAR_INSN_CACHE ((char *) beg, (char *) end);
e1178973 2947#endif /* CLEAR_INSN_CACHE */
203b91b9
RS
2948}
2949
2950#endif /* L_clear_cache */
2951\f
2952#ifdef L_trampoline
2953
2954/* Jump to a trampoline, loading the static chain address. */
2955
cd985f66 2956#if defined(WINNT) && ! defined(__CYGWIN__)
902c7559 2957#define WIN32_LEAN_AND_MEAN
bf806a90 2958#include <windows.h>
0a38153f
KT
2959int getpagesize (void);
2960int mprotect (char *,int, int);
e3367a77 2961
94c1e7ac 2962int
3e7d8ef1 2963getpagesize (void)
f5ea9817
RK
2964{
2965#ifdef _ALPHA_
2966 return 8192;
2967#else
2968 return 4096;
2969#endif
2970}
2971
272e2587
RK
2972int
2973mprotect (char *addr, int len, int prot)
f5ea9817 2974{
234952b3 2975 DWORD np, op;
f5ea9817 2976
272e2587
RK
2977 if (prot == 7)
2978 np = 0x40;
2979 else if (prot == 5)
2980 np = 0x20;
2981 else if (prot == 4)
2982 np = 0x10;
2983 else if (prot == 3)
2984 np = 0x04;
2985 else if (prot == 1)
2986 np = 0x02;
2987 else if (prot == 0)
2988 np = 0x01;
234952b3
OS
2989 else
2990 return -1;
f5ea9817
RK
2991
2992 if (VirtualProtect (addr, len, np, &op))
2993 return 0;
2994 else
2995 return -1;
f5ea9817
RK
2996}
2997
cd985f66 2998#endif /* WINNT && ! __CYGWIN__ */
f5ea9817 2999
23190837
AJ
3000#ifdef TRANSFER_FROM_TRAMPOLINE
3001TRANSFER_FROM_TRAMPOLINE
203b91b9 3002#endif
203b91b9
RS
3003#endif /* L_trampoline */
3004\f
cae21ae8 3005#ifndef __CYGWIN__
203b91b9
RS
3006#ifdef L__main
3007
3008#include "gbl-ctors.h"
7abc66b1 3009
c06cff95
RS
3010/* Some systems use __main in a way incompatible with its use in gcc, in these
3011 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
3012 give the same symbol without quotes for an alternative entry point. You
0f41302f 3013 must define both, or neither. */
c06cff95
RS
3014#ifndef NAME__MAIN
3015#define NAME__MAIN "__main"
3016#define SYMBOL__MAIN __main
3017#endif
203b91b9 3018
53d68b9f
JM
3019#if defined (__LIBGCC_INIT_SECTION_ASM_OP__) \
3020 || defined (__LIBGCC_INIT_ARRAY_SECTION_ASM_OP__)
fe1fd353
JM
3021#undef HAS_INIT_SECTION
3022#define HAS_INIT_SECTION
3023#endif
3024
3025#if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
31cf0144
JM
3026
3027/* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
72d1a48d
EB
3028 code to run constructors. In that case, we need to handle EH here, too.
3029 But MINGW32 is special because it handles CRTSTUFF and EH on its own. */
3030
3031#ifdef __MINGW32__
3032#undef __LIBGCC_EH_FRAME_SECTION_NAME__
3033#endif
31cf0144 3034
53d68b9f 3035#ifdef __LIBGCC_EH_FRAME_SECTION_NAME__
e4b776a6 3036#include "unwind-dw2-fde.h"
31cf0144
JM
3037extern unsigned char __EH_FRAME_BEGIN__[];
3038#endif
3039
203b91b9
RS
3040/* Run all the global destructors on exit from the program. */
3041
3042void
3e7d8ef1 3043__do_global_dtors (void)
203b91b9 3044{
89cf554b
RS
3045#ifdef DO_GLOBAL_DTORS_BODY
3046 DO_GLOBAL_DTORS_BODY;
3047#else
b40b9d93
MS
3048 static func_ptr *p = __DTOR_LIST__ + 1;
3049 while (*p)
3050 {
3051 p++;
3052 (*(p-1)) ();
3053 }
89cf554b 3054#endif
53d68b9f 3055#if defined (__LIBGCC_EH_FRAME_SECTION_NAME__) && !defined (HAS_INIT_SECTION)
a4ebb0e6
GRK
3056 {
3057 static int completed = 0;
3058 if (! completed)
3059 {
3060 completed = 1;
3061 __deregister_frame_info (__EH_FRAME_BEGIN__);
3062 }
3063 }
31cf0144 3064#endif
203b91b9 3065}
68d69835 3066#endif
203b91b9 3067
fe1fd353 3068#ifndef HAS_INIT_SECTION
203b91b9
RS
3069/* Run all the global constructors on entry to the program. */
3070
203b91b9 3071void
3e7d8ef1 3072__do_global_ctors (void)
203b91b9 3073{
53d68b9f 3074#ifdef __LIBGCC_EH_FRAME_SECTION_NAME__
31cf0144
JM
3075 {
3076 static struct object object;
3077 __register_frame_info (__EH_FRAME_BEGIN__, &object);
3078 }
3079#endif
203b91b9 3080 DO_GLOBAL_CTORS_BODY;
a218d5ba 3081 atexit (__do_global_dtors);
203b91b9 3082}
fe1fd353 3083#endif /* no HAS_INIT_SECTION */
203b91b9 3084
fe1fd353 3085#if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
203b91b9
RS
3086/* Subroutine called automatically by `main'.
3087 Compiling a global function named `main'
3088 produces an automatic call to this function at the beginning.
3089
3090 For many systems, this routine calls __do_global_ctors.
3091 For systems which support a .init section we use the .init section
3092 to run __do_global_ctors, so we need not do anything here. */
3093
4043d9c1 3094extern void SYMBOL__MAIN (void);
203b91b9 3095void
4043d9c1 3096SYMBOL__MAIN (void)
203b91b9
RS
3097{
3098 /* Support recursive calls to `main': run initializers just once. */
7e6f1890 3099 static int initialized;
203b91b9
RS
3100 if (! initialized)
3101 {
3102 initialized = 1;
3103 __do_global_ctors ();
3104 }
3105}
fe1fd353 3106#endif /* no HAS_INIT_SECTION or INVOKE__main */
203b91b9
RS
3107
3108#endif /* L__main */
cae21ae8 3109#endif /* __CYGWIN__ */
203b91b9 3110\f
ad38743d 3111#ifdef L_ctors
203b91b9
RS
3112
3113#include "gbl-ctors.h"
3114
3115/* Provide default definitions for the lists of constructors and
657be7af
JL
3116 destructors, so that we don't get linker errors. These symbols are
3117 intentionally bss symbols, so that gld and/or collect will provide
3118 the right values. */
203b91b9
RS
3119
3120/* We declare the lists here with two elements each,
657be7af
JL
3121 so that they are valid empty lists if no other definition is loaded.
3122
3123 If we are using the old "set" extensions to have the gnu linker
3124 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3125 must be in the bss/common section.
3126
3127 Long term no port should use those extensions. But many still do. */
1770511a 3128#if !defined(__LIBGCC_INIT_SECTION_ASM_OP__)
aa6ad1a6 3129#if defined (TARGET_ASM_CONSTRUCTOR) || defined (USE_COLLECT2)
d15d0264
RS
3130func_ptr __CTOR_LIST__[2] = {0, 0};
3131func_ptr __DTOR_LIST__[2] = {0, 0};
657be7af
JL
3132#else
3133func_ptr __CTOR_LIST__[2];
3134func_ptr __DTOR_LIST__[2];
3135#endif
1770511a 3136#endif /* no __LIBGCC_INIT_SECTION_ASM_OP__ */
ad38743d 3137#endif /* L_ctors */
baffad1f 3138#endif /* LIBGCC2_UNITS_PER_WORD <= MIN_UNITS_PER_WORD */