1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #define HOST_BITS_PER_HALF_WIDE_INT 32
31 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
32 # define HOST_HALF_WIDE_INT long
33 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
34 # define HOST_HALF_WIDE_INT int
36 #error Please add support for HOST_HALF_WIDE_INT
39 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
40 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
41 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
42 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
43 typedef unsigned HOST_WIDE_INT UWtype
;
44 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
45 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
46 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
48 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
50 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
55 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
61 /* Quantities to deal with values that hold half of a wide int. Used
62 in multiply and divide. */
63 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
65 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
66 #define BLOCKS_NEEDED(PREC) \
67 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
68 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
70 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
71 based on the top existing bit of VAL. */
73 static unsigned HOST_WIDE_INT
74 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
76 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
79 /* Convert the integer in VAL to canonical form, returning its new length.
80 LEN is the number of blocks currently in VAL and PRECISION is the number
81 of bits in the integer it represents.
83 This function only changes the representation, not the value. */
85 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
87 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
91 if (len
> blocks_needed
)
98 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
99 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
100 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
103 /* At this point we know that the top is either 0 or -1. Find the
104 first block that is not a copy of this. */
105 for (i
= len
- 2; i
>= 0; i
--)
107 HOST_WIDE_INT x
= val
[i
];
110 if (SIGN_MASK (x
) == top
)
113 /* We need an extra block because the top bit block i does
114 not match the extension. */
119 /* The number is 0 or -1. */
124 * Conversion routines in and out of wide_int.
127 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
128 result for an integer with precision PRECISION. Return the length
129 of VAL (after any canonization. */
131 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
132 unsigned int xlen
, unsigned int precision
, bool need_canon
)
134 for (unsigned i
= 0; i
< xlen
; i
++)
136 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
139 /* Construct a wide int from a buffer of length LEN. BUFFER will be
140 read according to byte endianess and word endianess of the target.
141 Only the lower BUFFER_LEN bytes of the result are set; the remaining
142 high bytes are cleared. */
144 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
146 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
147 wide_int result
= wide_int::create (precision
);
148 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
150 /* We have to clear all the bits ourself, as we merely or in values
152 unsigned int len
= BLOCKS_NEEDED (precision
);
153 HOST_WIDE_INT
*val
= result
.write_val ();
154 for (unsigned int i
= 0; i
< len
; ++i
)
157 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
161 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
162 unsigned HOST_WIDE_INT value
;
164 if (buffer_len
> UNITS_PER_WORD
)
166 unsigned int word
= byte
/ UNITS_PER_WORD
;
168 if (WORDS_BIG_ENDIAN
)
169 word
= (words
- 1) - word
;
171 offset
= word
* UNITS_PER_WORD
;
173 if (BYTES_BIG_ENDIAN
)
174 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
176 offset
+= byte
% UNITS_PER_WORD
;
179 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
181 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
183 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
184 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
187 result
.set_len (canonize (val
, len
, precision
));
192 /* Sets RESULT from X, the sign is taken according to SGN. */
194 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
196 int len
= x
.get_len ();
197 const HOST_WIDE_INT
*v
= x
.get_val ();
198 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
200 if (wi::neg_p (x
, sgn
))
202 /* We use ones complement to avoid -x80..0 edge case that -
204 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
205 for (int i
= 0; i
< len
; i
++)
208 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
209 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
210 mpz_com (result
, result
);
214 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
215 for (int i
= 0; i
< len
- 1; i
++)
217 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
218 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
221 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
224 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
225 values of VAL will be wrapped; otherwise, they will be set to the
226 appropriate minimum or maximum TYPE bound. */
228 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
231 unsigned int prec
= TYPE_PRECISION (type
);
232 wide_int res
= wide_int::create (prec
);
240 get_type_static_bounds (type
, min
, max
);
242 if (mpz_cmp (x
, min
) < 0)
244 else if (mpz_cmp (x
, max
) > 0)
251 /* Determine the number of unsigned HOST_WIDE_INTs that are required
252 for representing the absolute value. The code to calculate count is
253 extracted from the GMP manual, section "Integer Import and Export":
254 http://gmplib.org/manual/Integer-Import-and-Export.html */
255 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
256 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
257 HOST_WIDE_INT
*val
= res
.write_val ();
258 /* Read the absolute value.
260 Write directly to the wide_int storage if possible, otherwise leave
261 GMP to allocate the memory for us. It might be slightly more efficient
262 to use mpz_tdiv_r_2exp for the latter case, but the situation is
263 pathological and it seems safer to operate on the original mpz value
265 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
266 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
272 count
= MIN (count
, BLOCKS_NEEDED (prec
));
275 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
278 /* Zero-extend the absolute value to PREC bits. */
279 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
282 count
= canonize (val
, count
, prec
);
292 * Largest and smallest values in a mode.
295 /* Return the largest SGNed number that is representable in PRECISION bits.
297 TODO: There is still code from the double_int era that trys to
298 make up for the fact that double int's could not represent the
299 min and max values of all types. This code should be removed
300 because the min and max values can always be represented in
301 wide_ints and int-csts. */
303 wi::max_value (unsigned int precision
, signop sgn
)
305 gcc_checking_assert (precision
!= 0);
307 /* The unsigned max is just all ones. */
308 return shwi (-1, precision
);
310 /* The signed max is all ones except the top bit. This must be
311 explicitly represented. */
312 return mask (precision
- 1, false, precision
);
315 /* Return the largest SGNed number that is representable in PRECISION bits. */
317 wi::min_value (unsigned int precision
, signop sgn
)
319 gcc_checking_assert (precision
!= 0);
321 return uhwi (0, precision
);
323 /* The signed min is all zeros except the top bit. This must be
324 explicitly represented. */
325 return wi::set_bit_in_zero (precision
- 1, precision
);
332 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
333 signedness SGN, to an integer that has PRECISION bits. Store the blocks
334 in VAL and return the number of blocks used.
336 This function can handle both extension (PRECISION > XPRECISION)
337 and truncation (PRECISION < XPRECISION). */
339 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
340 unsigned int xlen
, unsigned int xprecision
,
341 unsigned int precision
, signop sgn
)
343 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
344 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
345 for (unsigned i
= 0; i
< len
; i
++)
348 if (precision
> xprecision
)
350 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
355 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
356 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
357 else if (val
[len
- 1] < 0)
359 while (len
< BLOCKS_NEEDED (xprecision
))
361 if (small_xprecision
)
362 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
369 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
370 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
373 len
= canonize (val
, len
, precision
);
378 /* This function hides the fact that we cannot rely on the bits beyond
379 the precision. This issue comes up in the relational comparisions
380 where we do allow comparisons of values of different precisions. */
381 static inline HOST_WIDE_INT
382 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
383 unsigned int blocks_needed
, unsigned int small_prec
,
384 unsigned int index
, signop sgn
)
389 else if (index
< blocks_needed
|| sgn
== SIGNED
)
390 /* Signed or within the precision. */
391 val
= SIGN_MASK (a
[len
- 1]);
393 /* Unsigned extension beyond the precision. */
396 if (small_prec
&& index
== blocks_needed
- 1)
397 return (sgn
== SIGNED
398 ? sext_hwi (val
, small_prec
)
399 : zext_hwi (val
, small_prec
));
404 /* Find the highest bit represented in a wide int. This will in
405 general have the same value as the sign bit. */
406 static inline HOST_WIDE_INT
407 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
409 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
410 unsigned HOST_WIDE_INT val
= a
[len
- 1];
413 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
417 * Comparisons, note that only equality is an operator. The other
418 * comparisons cannot be operators since they are inherently signed or
419 * unsigned and C++ has no such operators.
422 /* Return true if OP0 == OP1. */
424 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
425 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
429 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
431 if (op0len
!= op1len
)
434 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
436 /* It does not matter if we zext or sext here, we just have to
437 do both the same way. */
438 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
444 if (op0
[l0
] != op1
[l0
])
452 /* Return true if OP0 < OP1 using signed comparisons. */
454 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
455 unsigned int precision
,
456 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
458 HOST_WIDE_INT s0
, s1
;
459 unsigned HOST_WIDE_INT u0
, u1
;
460 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
461 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
462 int l
= MAX (op0len
- 1, op1len
- 1);
464 /* Only the top block is compared as signed. The rest are unsigned
466 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
467 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
476 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
477 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
489 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
492 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
493 unsigned int precision
,
494 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
496 HOST_WIDE_INT s0
, s1
;
497 unsigned HOST_WIDE_INT u0
, u1
;
498 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
499 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
500 int l
= MAX (op0len
- 1, op1len
- 1);
502 /* Only the top block is compared as signed. The rest are unsigned
504 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
505 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
514 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
515 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
527 /* Return true if OP0 < OP1 using unsigned comparisons. */
529 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
530 unsigned int precision
,
531 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
533 unsigned HOST_WIDE_INT x0
;
534 unsigned HOST_WIDE_INT x1
;
535 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
536 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
537 int l
= MAX (op0len
- 1, op1len
- 1);
541 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
542 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
553 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
554 unsigned compares. */
556 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
557 unsigned int precision
,
558 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
560 unsigned HOST_WIDE_INT x0
;
561 unsigned HOST_WIDE_INT x1
;
562 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
563 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
564 int l
= MAX (op0len
- 1, op1len
- 1);
568 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
569 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
584 /* Sign-extend the number represented by XVAL and XLEN into VAL,
585 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
586 and VAL have PRECISION bits. */
588 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
589 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
591 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
592 /* Extending beyond the precision is a no-op. If we have only stored
593 OFFSET bits or fewer, the rest are already signs. */
594 if (offset
>= precision
|| len
>= xlen
)
596 for (unsigned i
= 0; i
< xlen
; ++i
)
600 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
601 for (unsigned int i
= 0; i
< len
; i
++)
605 val
[len
] = sext_hwi (xval
[len
], suboffset
);
608 return canonize (val
, len
, precision
);
611 /* Zero-extend the number represented by XVAL and XLEN into VAL,
612 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
613 and VAL have PRECISION bits. */
615 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
616 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
618 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
619 /* Extending beyond the precision is a no-op. If we have only stored
620 OFFSET bits or fewer, and the upper stored bit is zero, then there
622 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
624 for (unsigned i
= 0; i
< xlen
; ++i
)
628 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
629 for (unsigned int i
= 0; i
< len
; i
++)
630 val
[i
] = i
< xlen
? xval
[i
] : -1;
632 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
635 return canonize (val
, len
+ 1, precision
);
639 * Masking, inserting, shifting, rotating.
642 /* Insert WIDTH bits from Y into X starting at START. */
644 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
651 unsigned int precision
= x
.get_precision ();
652 if (start
>= precision
)
655 gcc_checking_assert (precision
>= width
);
657 if (start
+ width
>= precision
)
658 width
= precision
- start
;
660 mask
= wi::shifted_mask (start
, width
, false, precision
);
661 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
664 tmp
= wi::bit_and_not (x
, mask
);
665 result
= result
| tmp
;
670 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
671 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
674 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
675 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
677 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
678 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
680 if (block
+ 1 >= xlen
)
682 /* The operation either affects the last current block or needs
684 unsigned int len
= block
+ 1;
685 for (unsigned int i
= 0; i
< len
; i
++)
686 val
[i
] = safe_uhwi (xval
, xlen
, i
);
687 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
689 /* If the bit we just set is at the msb of the block, make sure
690 that any higher bits are zeros. */
691 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
697 for (unsigned int i
= 0; i
< xlen
; i
++)
699 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
700 return canonize (val
, xlen
, precision
);
706 wide_int_storage::bswap () const
708 wide_int result
= wide_int::create (precision
);
710 unsigned int len
= BLOCKS_NEEDED (precision
);
711 unsigned int xlen
= get_len ();
712 const HOST_WIDE_INT
*xval
= get_val ();
713 HOST_WIDE_INT
*val
= result
.write_val ();
715 /* This is not a well defined operation if the precision is not a
717 gcc_assert ((precision
& 0x7) == 0);
719 for (i
= 0; i
< len
; i
++)
722 /* Only swap the bytes that are not the padding. */
723 for (s
= 0; s
< precision
; s
+= 8)
725 unsigned int d
= precision
- s
- 8;
726 unsigned HOST_WIDE_INT byte
;
728 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
729 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
731 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
733 block
= d
/ HOST_BITS_PER_WIDE_INT
;
734 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
736 val
[block
] |= byte
<< offset
;
739 result
.set_len (canonize (val
, len
, precision
));
743 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
744 above that up to PREC are zeros. The result is inverted if NEGATE
745 is true. Return the number of blocks in VAL. */
747 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
752 val
[0] = negate
? 0 : -1;
757 val
[0] = negate
? -1 : 0;
762 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
763 val
[i
++] = negate
? 0 : -1;
765 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
768 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
769 val
[i
++] = negate
? ~last
: last
;
772 val
[i
++] = negate
? -1 : 0;
777 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
778 bits are ones, and the bits above that up to PREC are zeros. The result
779 is inverted if NEGATE is true. Return the number of blocks in VAL. */
781 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
782 bool negate
, unsigned int prec
)
784 if (start
>= prec
|| width
== 0)
786 val
[0] = negate
? -1 : 0;
790 if (width
> prec
- start
)
791 width
= prec
- start
;
792 unsigned int end
= start
+ width
;
795 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
796 val
[i
++] = negate
? -1 : 0;
798 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
801 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
803 if (shift
< HOST_BITS_PER_WIDE_INT
)
806 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
807 val
[i
++] = negate
? ~block
: block
;
812 val
[i
++] = negate
? block
: ~block
;
815 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
817 val
[i
++] = negate
? 0 : -1;
819 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
823 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
824 val
[i
++] = negate
? ~block
: block
;
827 val
[i
++] = negate
? -1 : 0;
833 * logical operations.
836 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
838 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
839 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
840 unsigned int op1len
, unsigned int prec
)
844 bool need_canon
= true;
846 unsigned int len
= MAX (op0len
, op1len
);
849 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
867 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
883 val
[l0
] = op0
[l0
] & op1
[l0
];
888 len
= canonize (val
, len
, prec
);
893 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
895 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
896 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
897 unsigned int op1len
, unsigned int prec
)
902 bool need_canon
= true;
904 unsigned int len
= MAX (op0len
, op1len
);
907 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
925 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
941 val
[l0
] = op0
[l0
] & ~op1
[l0
];
946 len
= canonize (val
, len
, prec
);
951 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
953 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
954 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
955 unsigned int op1len
, unsigned int prec
)
960 bool need_canon
= true;
962 unsigned int len
= MAX (op0len
, op1len
);
965 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
983 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
999 val
[l0
] = op0
[l0
] | op1
[l0
];
1004 len
= canonize (val
, len
, prec
);
1009 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1011 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1012 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1013 unsigned int op1len
, unsigned int prec
)
1016 int l0
= op0len
- 1;
1017 int l1
= op1len
- 1;
1018 bool need_canon
= true;
1020 unsigned int len
= MAX (op0len
, op1len
);
1023 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1041 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1057 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1062 len
= canonize (val
, len
, prec
);
1067 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1069 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1070 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1071 unsigned int op1len
, unsigned int prec
)
1074 int l0
= op0len
- 1;
1075 int l1
= op1len
- 1;
1077 unsigned int len
= MAX (op0len
, op1len
);
1080 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1083 val
[l0
] = op0
[l0
] ^ op1mask
;
1090 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1093 val
[l1
] = op0mask
^ op1
[l1
];
1100 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1104 return canonize (val
, len
, prec
);
1111 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1112 whether the result overflows when OP0 and OP1 are treated as having
1113 signedness SGN. Return the number of blocks in VAL. */
1115 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1116 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1117 unsigned int op1len
, unsigned int prec
,
1118 signop sgn
, bool *overflow
)
1120 unsigned HOST_WIDE_INT o0
= 0;
1121 unsigned HOST_WIDE_INT o1
= 0;
1122 unsigned HOST_WIDE_INT x
= 0;
1123 unsigned HOST_WIDE_INT carry
= 0;
1124 unsigned HOST_WIDE_INT old_carry
= 0;
1125 unsigned HOST_WIDE_INT mask0
, mask1
;
1128 unsigned int len
= MAX (op0len
, op1len
);
1129 mask0
= -top_bit_of (op0
, op0len
, prec
);
1130 mask1
= -top_bit_of (op1
, op1len
, prec
);
1131 /* Add all of the explicitly defined elements. */
1133 for (i
= 0; i
< len
; i
++)
1135 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1136 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1137 x
= o0
+ o1
+ carry
;
1140 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1143 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1145 val
[len
] = mask0
+ mask1
+ carry
;
1152 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1155 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1156 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1160 /* Put the MSB of X and O0 and in the top of the HWI. */
1164 *overflow
= (x
<= o0
);
1166 *overflow
= (x
< o0
);
1170 return canonize (val
, len
, prec
);
1173 /* Subroutines of the multiplication and division operations. Unpack
1174 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1175 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1176 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1178 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1179 unsigned int in_len
, unsigned int out_len
,
1180 unsigned int prec
, signop sgn
)
1184 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1185 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1190 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1191 mask
&= HALF_INT_MASK
;
1196 for (i
= 0; i
< blocks_needed
- 1; i
++)
1198 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1200 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1203 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1207 x
= sext_hwi (x
, small_prec
);
1209 x
= zext_hwi (x
, small_prec
);
1212 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1214 /* Smear the sign bit. */
1219 /* The inverse of wi_unpack. IN_LEN is the the number of input
1220 blocks. The number of output blocks will be half this amount. */
1222 wi_pack (unsigned HOST_WIDE_INT
*result
,
1223 const unsigned HOST_HALF_WIDE_INT
*input
,
1224 unsigned int in_len
)
1229 while (i
+ 2 < in_len
)
1231 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1232 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1233 << HOST_BITS_PER_HALF_WIDE_INT
);
1237 /* Handle the case where in_len is odd. For this we zero extend. */
1239 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1241 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1242 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1245 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1248 If HIGH is not set, throw away the upper half after the check is
1249 made to see if it overflows. Unfortunately there is no better way
1250 to check for overflow than to do this. If OVERFLOW is nonnull,
1251 record in *OVERFLOW whether the result overflowed. SGN controls
1252 the signedness and is used to check overflow or if HIGH is set. */
1254 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1255 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1256 unsigned int op2len
, unsigned int prec
, signop sgn
,
1257 bool *overflow
, bool high
)
1259 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1262 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1263 unsigned int half_blocks_needed
= blocks_needed
* 2;
1264 /* The sizes here are scaled to support a 2x largest mode by 2x
1265 largest mode yielding a 4x largest mode result. This is what is
1268 unsigned HOST_HALF_WIDE_INT
1269 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1270 unsigned HOST_HALF_WIDE_INT
1271 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1272 /* The '2' in 'R' is because we are internally doing a full
1274 unsigned HOST_HALF_WIDE_INT
1275 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1276 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1278 /* If the top level routine did not really pass in an overflow, then
1279 just make sure that we never attempt to set it. */
1280 bool needs_overflow
= (overflow
!= 0);
1284 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1285 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1287 /* This is a surprisingly common case, so do it first. */
1288 if (op1
== 0 || op2
== 0)
1295 if (sgn
== UNSIGNED
)
1297 /* If the inputs are single HWIs and the output has room for at
1298 least two HWIs, we can use umul_ppmm directly. */
1299 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1300 && wi::fits_uhwi_p (op1
)
1301 && wi::fits_uhwi_p (op2
))
1303 /* This case never overflows. */
1309 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1310 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1315 return 1 + (val
[1] != 0 || val
[0] < 0);
1317 /* Likewise if the output is a full single HWI, except that the
1318 upper HWI of the result is only used for determining overflow.
1319 (We handle this case inline when overflow isn't needed.) */
1320 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1322 unsigned HOST_WIDE_INT upper
;
1323 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1325 *overflow
= (upper
!= 0);
1333 /* Handle multiplications by 1. */
1338 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1341 for (i
= 0; i
< op2len
; i
++)
1349 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1352 for (i
= 0; i
< op1len
; i
++)
1357 /* If we need to check for overflow, we can only do half wide
1358 multiplies quickly because we need to look at the top bits to
1359 check for the overflow. */
1360 if ((high
|| needs_overflow
)
1361 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1363 unsigned HOST_WIDE_INT r
;
1367 o0
= op1
.to_shwi ();
1368 o1
= op2
.to_shwi ();
1372 o0
= op1
.to_uhwi ();
1373 o1
= op2
.to_uhwi ();
1381 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1386 if ((r
>> prec
) != 0)
1390 val
[0] = high
? r
>> prec
: r
;
1394 /* We do unsigned mul and then correct it. */
1395 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1396 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1398 /* The 2 is for a full mult. */
1399 memset (r
, 0, half_blocks_needed
* 2
1400 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1402 for (j
= 0; j
< half_blocks_needed
; j
++)
1405 for (i
= 0; i
< half_blocks_needed
; i
++)
1407 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1409 r
[i
+ j
] = t
& HALF_INT_MASK
;
1410 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1412 r
[j
+ half_blocks_needed
] = k
;
1415 /* We did unsigned math above. For signed we must adjust the
1416 product (assuming we need to see that). */
1417 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1419 unsigned HOST_WIDE_INT b
;
1420 if (wi::neg_p (op1
))
1423 for (i
= 0; i
< half_blocks_needed
; i
++)
1425 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1426 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1427 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1428 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1431 if (wi::neg_p (op2
))
1434 for (i
= 0; i
< half_blocks_needed
; i
++)
1436 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1437 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1438 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1439 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1448 /* For unsigned, overflow is true if any of the top bits are set.
1449 For signed, overflow is true if any of the top bits are not equal
1451 if (sgn
== UNSIGNED
)
1455 top
= r
[(half_blocks_needed
) - 1];
1456 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1460 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1461 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1467 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1468 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1469 &r
[half_blocks_needed
], half_blocks_needed
);
1470 return canonize (val
, blocks_needed
, prec
);
1474 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1475 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1476 return canonize (val
, blocks_needed
, prec
);
1480 /* Compute the population count of X. */
1482 wi::popcount (const wide_int_ref
&x
)
1487 /* The high order block is special if it is the last block and the
1488 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1489 have to clear out any ones above the precision before doing
1490 popcount on this block. */
1491 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1492 unsigned int stop
= x
.len
;
1495 count
= popcount_hwi (x
.uhigh () << -count
);
1500 if (x
.sign_mask () >= 0)
1504 for (i
= 0; i
< stop
; ++i
)
1505 count
+= popcount_hwi (x
.val
[i
]);
1510 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1511 whether the result overflows when OP0 and OP1 are treated as having
1512 signedness SGN. Return the number of blocks in VAL. */
1514 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1515 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1516 unsigned int op1len
, unsigned int prec
,
1517 signop sgn
, bool *overflow
)
1519 unsigned HOST_WIDE_INT o0
= 0;
1520 unsigned HOST_WIDE_INT o1
= 0;
1521 unsigned HOST_WIDE_INT x
= 0;
1522 /* We implement subtraction as an in place negate and add. Negation
1523 is just inversion and add 1, so we can do the add of 1 by just
1524 starting the borrow in of the first element at 1. */
1525 unsigned HOST_WIDE_INT borrow
= 0;
1526 unsigned HOST_WIDE_INT old_borrow
= 0;
1528 unsigned HOST_WIDE_INT mask0
, mask1
;
1531 unsigned int len
= MAX (op0len
, op1len
);
1532 mask0
= -top_bit_of (op0
, op0len
, prec
);
1533 mask1
= -top_bit_of (op1
, op1len
, prec
);
1535 /* Subtract all of the explicitly defined elements. */
1536 for (i
= 0; i
< len
; i
++)
1538 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1539 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1540 x
= o0
- o1
- borrow
;
1542 old_borrow
= borrow
;
1543 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1546 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1548 val
[len
] = mask0
- mask1
- borrow
;
1555 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1558 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1559 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1563 /* Put the MSB of X and O0 and in the top of the HWI. */
1567 *overflow
= (x
>= o0
);
1569 *overflow
= (x
> o0
);
1573 return canonize (val
, len
, prec
);
1581 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1582 algorithm is a small modification of the algorithm in Hacker's
1583 Delight by Warren, which itself is a small modification of Knuth's
1584 algorithm. M is the number of significant elements of U however
1585 there needs to be at least one extra element of B_DIVIDEND
1586 allocated, N is the number of elements of B_DIVISOR. */
1588 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1589 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1590 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1591 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1594 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1595 HOST_WIDE_INT and stored in the lower bits of each word. This
1596 algorithm should work properly on both 32 and 64 bit
1598 unsigned HOST_WIDE_INT b
1599 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1600 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1601 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1602 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1606 /* Single digit divisor. */
1610 for (j
= m
- 1; j
>= 0; j
--)
1612 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1613 k
= ((k
* b
+ b_dividend
[j
])
1614 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1615 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1621 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1625 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1626 algorithm, we can overwrite b_dividend and b_divisor, so we do
1628 for (i
= n
- 1; i
> 0; i
--)
1629 b_divisor
[i
] = (b_divisor
[i
] << s
)
1630 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1631 b_divisor
[0] = b_divisor
[0] << s
;
1633 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1634 for (i
= m
- 1; i
> 0; i
--)
1635 b_dividend
[i
] = (b_dividend
[i
] << s
)
1636 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1637 b_dividend
[0] = b_dividend
[0] << s
;
1641 for (j
= m
- n
; j
>= 0; j
--)
1643 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1644 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1646 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1649 rhat
+= b_divisor
[n
-1];
1654 /* Multiply and subtract. */
1656 for (i
= 0; i
< n
; i
++)
1658 p
= qhat
* b_divisor
[i
];
1659 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1660 b_dividend
[i
+ j
] = t
;
1661 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1662 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1664 t
= b_dividend
[j
+n
] - k
;
1665 b_dividend
[j
+n
] = t
;
1667 b_quotient
[j
] = qhat
;
1672 for (i
= 0; i
< n
; i
++)
1674 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1675 b_dividend
[i
+j
] = t
;
1676 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1678 b_dividend
[j
+n
] += k
;
1682 for (i
= 0; i
< n
; i
++)
1683 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1684 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1686 for (i
= 0; i
< n
; i
++)
1687 b_remainder
[i
] = b_dividend
[i
];
1691 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1692 the result. If QUOTIENT is nonnull, store the value of the quotient
1693 there and return the number of blocks in it. The return value is
1694 not defined otherwise. If REMAINDER is nonnull, store the value
1695 of the remainder there and store the number of blocks in
1696 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1697 the division overflowed. */
1699 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1700 HOST_WIDE_INT
*remainder
,
1701 const HOST_WIDE_INT
*dividend_val
,
1702 unsigned int dividend_len
, unsigned int dividend_prec
,
1703 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1704 unsigned int divisor_prec
, signop sgn
,
1707 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1708 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1709 unsigned HOST_HALF_WIDE_INT
1710 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1711 unsigned HOST_HALF_WIDE_INT
1712 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1713 unsigned HOST_HALF_WIDE_INT
1714 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1715 unsigned HOST_HALF_WIDE_INT
1716 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1718 bool dividend_neg
= false;
1719 bool divisor_neg
= false;
1720 bool overflow
= false;
1721 wide_int neg_dividend
, neg_divisor
;
1723 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1725 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1730 /* The smallest signed number / -1 causes overflow. The dividend_len
1731 check is for speed rather than correctness. */
1733 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1735 && wi::only_sign_bit_p (dividend
))
1738 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1739 (signed min / -1) has the same representation as the orignal dividend.
1740 We have traditionally made division by zero act as division by one,
1741 so there too we use the original dividend. */
1752 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1753 quotient
[i
] = dividend_val
[i
];
1754 return dividend_len
;
1760 /* Do it on the host if you can. */
1762 && wi::fits_shwi_p (dividend
)
1763 && wi::fits_shwi_p (divisor
))
1765 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1766 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1768 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1770 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1773 quotient
[0] = HOST_WIDE_INT_MIN
;
1786 quotient
[0] = o0
/ o1
;
1789 remainder
[0] = o0
% o1
;
1797 && wi::fits_uhwi_p (dividend
)
1798 && wi::fits_uhwi_p (divisor
))
1800 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1801 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1804 quotient
[0] = o0
/ o1
;
1807 remainder
[0] = o0
% o1
;
1813 /* Make the divisor and dividend positive and remember what we
1817 if (wi::neg_p (dividend
))
1819 neg_dividend
= -dividend
;
1820 dividend
= neg_dividend
;
1821 dividend_neg
= true;
1823 if (wi::neg_p (divisor
))
1825 neg_divisor
= -divisor
;
1826 divisor
= neg_divisor
;
1831 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1832 dividend_blocks_needed
, dividend_prec
, sgn
);
1833 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1834 divisor_blocks_needed
, divisor_prec
, sgn
);
1836 m
= dividend_blocks_needed
;
1838 while (m
> 1 && b_dividend
[m
- 1] == 0)
1841 n
= divisor_blocks_needed
;
1842 while (n
> 1 && b_divisor
[n
- 1] == 0)
1845 memset (b_quotient
, 0, sizeof (b_quotient
));
1847 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1849 unsigned int quotient_len
= 0;
1852 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1853 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1854 /* The quotient is neg if exactly one of the divisor or dividend is
1856 if (dividend_neg
!= divisor_neg
)
1857 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1858 quotient_len
, dividend_prec
,
1864 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1865 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1866 /* The remainder is always the same sign as the dividend. */
1868 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1869 *remainder_len
, dividend_prec
,
1873 return quotient_len
;
1877 * Shifting, rotating and extraction.
1880 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1881 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1883 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1884 unsigned int xlen
, unsigned int precision
,
1887 /* Split the shift into a whole-block shift and a subblock shift. */
1888 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1889 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1891 /* The whole-block shift fills with zeros. */
1892 unsigned int len
= BLOCKS_NEEDED (precision
);
1893 for (unsigned int i
= 0; i
< skip
; ++i
)
1896 /* It's easier to handle the simple block case specially. */
1897 if (small_shift
== 0)
1898 for (unsigned int i
= skip
; i
< len
; ++i
)
1899 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1902 /* The first unfilled output block is a left shift of the first
1903 block in XVAL. The other output blocks contain bits from two
1904 consecutive input blocks. */
1905 unsigned HOST_WIDE_INT carry
= 0;
1906 for (unsigned int i
= skip
; i
< len
; ++i
)
1908 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1909 val
[i
] = (x
<< small_shift
) | carry
;
1910 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1913 return canonize (val
, len
, precision
);
1916 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1917 number of blocks in VAL. The input has XPRECISION bits and the
1918 output has XPRECISION - SHIFT bits. */
1920 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1921 unsigned int xlen
, unsigned int xprecision
,
1924 /* Split the shift into a whole-block shift and a subblock shift. */
1925 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1926 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1928 /* Work out how many blocks are needed to store the significant bits
1929 (excluding the upper zeros or signs). */
1930 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1932 /* It's easier to handle the simple block case specially. */
1933 if (small_shift
== 0)
1934 for (unsigned int i
= 0; i
< len
; ++i
)
1935 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1938 /* Each output block but the last is a combination of two input blocks.
1939 The last block is a right shift of the last block in XVAL. */
1940 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1941 for (unsigned int i
= 0; i
< len
; ++i
)
1943 val
[i
] = curr
>> small_shift
;
1944 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1945 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1951 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1952 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1953 VAL has PRECISION bits. */
1955 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1956 unsigned int xlen
, unsigned int xprecision
,
1957 unsigned int precision
, unsigned int shift
)
1959 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1961 /* The value we just created has precision XPRECISION - SHIFT.
1962 Zero-extend it to wider precisions. */
1963 if (precision
> xprecision
- shift
)
1965 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1967 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1968 else if (val
[len
- 1] < 0)
1970 /* Add a new block with a zero. */
1975 return canonize (val
, len
, precision
);
1978 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1979 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1980 VAL has PRECISION bits. */
1982 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1983 unsigned int xlen
, unsigned int xprecision
,
1984 unsigned int precision
, unsigned int shift
)
1986 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1988 /* The value we just created has precision XPRECISION - SHIFT.
1989 Sign-extend it to wider types. */
1990 if (precision
> xprecision
- shift
)
1992 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1994 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1996 return canonize (val
, len
, precision
);
1999 /* Return the number of leading (upper) zeros in X. */
2001 wi::clz (const wide_int_ref
&x
)
2003 /* Calculate how many bits there above the highest represented block. */
2004 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2006 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2008 /* The upper -COUNT bits of HIGH are not part of the value.
2010 high
= (high
<< -count
) >> -count
;
2011 else if (x
.sign_mask () < 0)
2012 /* The upper bit is set, so there are no leading zeros. */
2015 /* We don't need to look below HIGH. Either HIGH is nonzero,
2016 or the top bit of the block below is nonzero; clz_hwi is
2017 HOST_BITS_PER_WIDE_INT in the latter case. */
2018 return count
+ clz_hwi (high
);
2021 /* Return the number of redundant sign bits in X. (That is, the number
2022 of bits immediately below the sign bit that have the same value as
2025 wi::clrsb (const wide_int_ref
&x
)
2027 /* Calculate how many bits there above the highest represented block. */
2028 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2030 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2031 unsigned HOST_WIDE_INT mask
= -1;
2034 /* The upper -COUNT bits of HIGH are not part of the value.
2035 Clear them from both MASK and HIGH. */
2040 /* If the top bit is 1, count the number of leading 1s. If the top
2041 bit is zero, count the number of leading zeros. */
2042 if (high
> mask
/ 2)
2045 /* There are no sign bits below the top block, so we don't need to look
2046 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2048 return count
+ clz_hwi (high
) - 1;
2051 /* Return the number of trailing (lower) zeros in X. */
2053 wi::ctz (const wide_int_ref
&x
)
2055 if (x
.len
== 1 && x
.ulow () == 0)
2058 /* Having dealt with the zero case, there must be a block with a
2059 nonzero bit. We don't care about the bits above the first 1. */
2061 while (x
.val
[i
] == 0)
2063 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2066 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2069 wi::exact_log2 (const wide_int_ref
&x
)
2071 /* Reject cases where there are implicit -1 blocks above HIGH. */
2072 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2075 /* Set CRUX to the index of the entry that should be nonzero.
2076 If the top block is zero then the next lowest block (if any)
2077 must have the high bit set. */
2078 unsigned int crux
= x
.len
- 1;
2079 if (crux
> 0 && x
.val
[crux
] == 0)
2082 /* Check that all lower blocks are zero. */
2083 for (unsigned int i
= 0; i
< crux
; ++i
)
2087 /* Get a zero-extended form of block CRUX. */
2088 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2089 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2090 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2092 /* Now it's down to whether HWI is a power of 2. */
2093 int res
= ::exact_log2 (hwi
);
2095 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2099 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2101 wi::floor_log2 (const wide_int_ref
&x
)
2103 return x
.precision
- 1 - clz (x
);
2106 /* Return the index of the first (lowest) set bit in X, counting from 1.
2107 Return 0 if X is 0. */
2109 wi::ffs (const wide_int_ref
&x
)
2111 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2114 /* Return true if sign-extending X to have precision PRECISION would give
2115 the minimum signed value at that precision. */
2117 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2119 return ctz (x
) + 1 == int (precision
);
2122 /* Return true if X represents the minimum signed value. */
2124 wi::only_sign_bit_p (const wide_int_ref
&x
)
2126 return only_sign_bit_p (x
, x
.precision
);
2130 * Private utilities.
2133 void gt_ggc_mx (widest_int
*) { }
2134 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2135 void gt_pch_nx (widest_int
*) { }
2137 template void wide_int::dump () const;
2138 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2139 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2140 template void offset_int::dump () const;
2141 template void widest_int::dump () const;