]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/wide-int.cc
Update copyright years.
[thirdparty/gcc.git] / gcc / wide-int.cc
CommitLineData
807e902e 1/* Operations with very long integers.
a945c346 2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
807e902e
KZ
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9Free Software Foundation; either version 3, or (at your option) any
10later version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
807e902e 25#include "tree.h"
d9b950dd 26#include "selftest.h"
807e902e 27
4c8bd90f
RB
28
29#define HOST_BITS_PER_HALF_WIDE_INT 32
30#if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31# define HOST_HALF_WIDE_INT long
32#elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33# define HOST_HALF_WIDE_INT int
34#else
35#error Please add support for HOST_HALF_WIDE_INT
36#endif
37
807e902e 38#define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
ecc7533a
FXC
39/* Do not include longlong.h when compiler is clang-based. See PR61146. */
40#if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
807e902e
KZ
41typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42typedef unsigned HOST_WIDE_INT UWtype;
43typedef unsigned int UQItype __attribute__ ((mode (QI)));
44typedef unsigned int USItype __attribute__ ((mode (SI)));
45typedef unsigned int UDItype __attribute__ ((mode (DI)));
60f82c42
RS
46#if W_TYPE_SIZE == 32
47typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48#else
49typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50#endif
807e902e
KZ
51#include "longlong.h"
52#endif
53
0d00385e 54static const HOST_WIDE_INT zeros[1] = {};
807e902e
KZ
55
56/*
57 * Internal utilities.
58 */
59
60/* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
fecfbfa4 62#define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
807e902e
KZ
63
64#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
0d00385e 65#define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
807e902e
KZ
66#define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
67
68/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
70
71static unsigned HOST_WIDE_INT
72safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
73{
fecfbfa4 74 return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
807e902e
KZ
75}
76
77/* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
80
81 This function only changes the representation, not the value. */
82static unsigned int
83canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
84{
85 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 HOST_WIDE_INT top;
87 int i;
88
89 if (len > blocks_needed)
90 len = blocks_needed;
91
92 if (len == 1)
93 return len;
94
95 top = val[len - 1];
96 if (len * HOST_BITS_PER_WIDE_INT > precision)
97 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
0d00385e 98 if (top != 0 && top != HOST_WIDE_INT_M1)
807e902e
KZ
99 return len;
100
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i = len - 2; i >= 0; i--)
104 {
105 HOST_WIDE_INT x = val[i];
106 if (x != top)
107 {
108 if (SIGN_MASK (x) == top)
109 return i + 1;
110
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
113 return i + 2;
114 }
115 }
116
117 /* The number is 0 or -1. */
118 return 1;
119}
120
321a2b65
JJ
121/* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 another 0 block if needed, and return number of blocks needed. */
123
124static inline unsigned int
125canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
126{
127 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
128 {
129 val[1] = 0;
130 return 2;
131 }
132 return 1;
133}
134
807e902e
KZ
135/*
136 * Conversion routines in and out of wide_int.
137 */
138
139/* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 result for an integer with precision PRECISION. Return the length
2a03d979 141 of VAL (after any canonization). */
807e902e
KZ
142unsigned int
143wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 unsigned int xlen, unsigned int precision, bool need_canon)
145{
146 for (unsigned i = 0; i < xlen; i++)
147 val[i] = xval[i];
148 return need_canon ? canonize (val, xlen, precision) : xlen;
149}
150
151/* Construct a wide int from a buffer of length LEN. BUFFER will be
bd2c6270 152 read according to byte endianness and word endianness of the target.
807e902e
KZ
153 Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 high bytes are cleared. */
155wide_int
156wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
157{
158 unsigned int precision = buffer_len * BITS_PER_UNIT;
159 wide_int result = wide_int::create (precision);
160 unsigned int words = buffer_len / UNITS_PER_WORD;
161
162 /* We have to clear all the bits ourself, as we merely or in values
163 below. */
164 unsigned int len = BLOCKS_NEEDED (precision);
0d00385e 165 HOST_WIDE_INT *val = result.write_val (0);
807e902e
KZ
166 for (unsigned int i = 0; i < len; ++i)
167 val[i] = 0;
168
169 for (unsigned int byte = 0; byte < buffer_len; byte++)
170 {
171 unsigned int offset;
172 unsigned int index;
173 unsigned int bitpos = byte * BITS_PER_UNIT;
174 unsigned HOST_WIDE_INT value;
175
176 if (buffer_len > UNITS_PER_WORD)
177 {
178 unsigned int word = byte / UNITS_PER_WORD;
179
180 if (WORDS_BIG_ENDIAN)
181 word = (words - 1) - word;
182
183 offset = word * UNITS_PER_WORD;
184
185 if (BYTES_BIG_ENDIAN)
186 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 else
188 offset += byte % UNITS_PER_WORD;
189 }
190 else
191 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
192
193 value = (unsigned HOST_WIDE_INT) buffer[offset];
194
195 index = bitpos / HOST_BITS_PER_WIDE_INT;
196 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
197 }
198
199 result.set_len (canonize (val, len, precision));
200
201 return result;
202}
203
204/* Sets RESULT from X, the sign is taken according to SGN. */
205void
206wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
207{
208 int len = x.get_len ();
209 const HOST_WIDE_INT *v = x.get_val ();
210 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
211
212 if (wi::neg_p (x, sgn))
213 {
214 /* We use ones complement to avoid -x80..0 edge case that -
215 won't work on. */
216 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217 for (int i = 0; i < len; i++)
218 t[i] = ~v[i];
219 if (excess > 0)
220 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222 mpz_com (result, result);
223 }
224 else if (excess > 0)
225 {
226 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227 for (int i = 0; i < len - 1; i++)
228 t[i] = v[i];
229 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
231 }
9e603837
JJ
232 else if (excess < 0 && wi::neg_p (x))
233 {
0d00385e 234 int extra = CEIL (-excess, HOST_BITS_PER_WIDE_INT);
9e603837
JJ
235 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
236 for (int i = 0; i < len; i++)
237 t[i] = v[i];
238 for (int i = 0; i < extra; i++)
239 t[len + i] = -1;
240 excess = (-excess) % HOST_BITS_PER_WIDE_INT;
241 if (excess)
242 t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
243 mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
244 }
807e902e
KZ
245 else
246 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
247}
248
249/* Returns X converted to TYPE. If WRAP is true, then out-of-range
250 values of VAL will be wrapped; otherwise, they will be set to the
251 appropriate minimum or maximum TYPE bound. */
252wide_int
253wi::from_mpz (const_tree type, mpz_t x, bool wrap)
254{
255 size_t count, numb;
025e5647 256 unsigned int prec = TYPE_PRECISION (type);
807e902e
KZ
257 wide_int res = wide_int::create (prec);
258
259 if (!wrap)
260 {
261 mpz_t min, max;
262
263 mpz_init (min);
264 mpz_init (max);
265 get_type_static_bounds (type, min, max);
266
267 if (mpz_cmp (x, min) < 0)
268 mpz_set (x, min);
269 else if (mpz_cmp (x, max) > 0)
270 mpz_set (x, max);
271
272 mpz_clear (min);
273 mpz_clear (max);
274 }
275
276 /* Determine the number of unsigned HOST_WIDE_INTs that are required
dcc74ead 277 for representing the absolute value. The code to calculate count is
807e902e
KZ
278 extracted from the GMP manual, section "Integer Import and Export":
279 http://gmplib.org/manual/Integer-Import-and-Export.html */
025e5647 280 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
0d00385e
JJ
281 count = CEIL (mpz_sizeinbase (x, 2), numb);
282 HOST_WIDE_INT *val = res.write_val (0);
dcc74ead
RS
283 /* Read the absolute value.
284
285 Write directly to the wide_int storage if possible, otherwise leave
025e5647
RS
286 GMP to allocate the memory for us. It might be slightly more efficient
287 to use mpz_tdiv_r_2exp for the latter case, but the situation is
288 pathological and it seems safer to operate on the original mpz value
289 in all cases. */
0d00385e 290 void *valres = mpz_export (count <= WIDE_INT_MAX_INL_ELTS ? val : 0,
025e5647 291 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
807e902e
KZ
292 if (count < 1)
293 {
294 val[0] = 0;
295 count = 1;
296 }
025e5647
RS
297 count = MIN (count, BLOCKS_NEEDED (prec));
298 if (valres != val)
299 {
300 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
301 free (valres);
302 }
dcc74ead
RS
303 /* Zero-extend the absolute value to PREC bits. */
304 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
305 val[count++] = 0;
306 else
307 count = canonize (val, count, prec);
308 res.set_len (count);
807e902e
KZ
309
310 if (mpz_sgn (x) < 0)
311 res = -res;
312
313 return res;
314}
315
316/*
317 * Largest and smallest values in a mode.
318 */
319
320/* Return the largest SGNed number that is representable in PRECISION bits.
321
322 TODO: There is still code from the double_int era that trys to
323 make up for the fact that double int's could not represent the
324 min and max values of all types. This code should be removed
325 because the min and max values can always be represented in
326 wide_ints and int-csts. */
327wide_int
328wi::max_value (unsigned int precision, signop sgn)
329{
330 gcc_checking_assert (precision != 0);
331 if (sgn == UNSIGNED)
332 /* The unsigned max is just all ones. */
333 return shwi (-1, precision);
334 else
335 /* The signed max is all ones except the top bit. This must be
336 explicitly represented. */
337 return mask (precision - 1, false, precision);
338}
339
340/* Return the largest SGNed number that is representable in PRECISION bits. */
341wide_int
342wi::min_value (unsigned int precision, signop sgn)
343{
344 gcc_checking_assert (precision != 0);
345 if (sgn == UNSIGNED)
346 return uhwi (0, precision);
347 else
348 /* The signed min is all zeros except the top bit. This must be
349 explicitly represented. */
350 return wi::set_bit_in_zero (precision - 1, precision);
351}
352
353/*
354 * Public utilities.
355 */
356
357/* Convert the number represented by XVAL, XLEN and XPRECISION, which has
358 signedness SGN, to an integer that has PRECISION bits. Store the blocks
359 in VAL and return the number of blocks used.
360
361 This function can handle both extension (PRECISION > XPRECISION)
362 and truncation (PRECISION < XPRECISION). */
363unsigned int
364wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
365 unsigned int xlen, unsigned int xprecision,
366 unsigned int precision, signop sgn)
367{
368 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
369 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
370 for (unsigned i = 0; i < len; i++)
371 val[i] = xval[i];
372
373 if (precision > xprecision)
374 {
375 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
376
377 /* Expanding. */
378 if (sgn == UNSIGNED)
379 {
380 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
381 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
382 else if (val[len - 1] < 0)
383 {
384 while (len < BLOCKS_NEEDED (xprecision))
385 val[len++] = -1;
386 if (small_xprecision)
387 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
388 else
389 val[len++] = 0;
390 }
391 }
392 else
393 {
394 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
395 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
396 }
397 }
398 len = canonize (val, len, precision);
399
400 return len;
401}
402
403/* This function hides the fact that we cannot rely on the bits beyond
404 the precision. This issue comes up in the relational comparisions
405 where we do allow comparisons of values of different precisions. */
406static inline HOST_WIDE_INT
407selt (const HOST_WIDE_INT *a, unsigned int len,
408 unsigned int blocks_needed, unsigned int small_prec,
409 unsigned int index, signop sgn)
410{
411 HOST_WIDE_INT val;
412 if (index < len)
413 val = a[index];
414 else if (index < blocks_needed || sgn == SIGNED)
415 /* Signed or within the precision. */
416 val = SIGN_MASK (a[len - 1]);
417 else
418 /* Unsigned extension beyond the precision. */
419 val = 0;
420
421 if (small_prec && index == blocks_needed - 1)
422 return (sgn == SIGNED
423 ? sext_hwi (val, small_prec)
424 : zext_hwi (val, small_prec));
425 else
426 return val;
427}
428
429/* Find the highest bit represented in a wide int. This will in
430 general have the same value as the sign bit. */
431static inline HOST_WIDE_INT
432top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
433{
434 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
435 unsigned HOST_WIDE_INT val = a[len - 1];
436 if (excess > 0)
437 val <<= excess;
438 return val >> (HOST_BITS_PER_WIDE_INT - 1);
439}
440
441/*
442 * Comparisons, note that only equality is an operator. The other
443 * comparisons cannot be operators since they are inherently signed or
444 * unsigned and C++ has no such operators.
445 */
446
447/* Return true if OP0 == OP1. */
448bool
449wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
450 const HOST_WIDE_INT *op1, unsigned int op1len,
451 unsigned int prec)
452{
453 int l0 = op0len - 1;
454 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
455
456 if (op0len != op1len)
457 return false;
458
459 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
460 {
461 /* It does not matter if we zext or sext here, we just have to
462 do both the same way. */
463 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
464 return false;
465 l0--;
466 }
467
468 while (l0 >= 0)
469 if (op0[l0] != op1[l0])
470 return false;
471 else
472 l0--;
473
474 return true;
475}
476
477/* Return true if OP0 < OP1 using signed comparisons. */
478bool
479wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
480 unsigned int precision,
481 const HOST_WIDE_INT *op1, unsigned int op1len)
482{
483 HOST_WIDE_INT s0, s1;
484 unsigned HOST_WIDE_INT u0, u1;
485 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
486 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
487 int l = MAX (op0len - 1, op1len - 1);
488
489 /* Only the top block is compared as signed. The rest are unsigned
490 comparisons. */
491 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
492 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
493 if (s0 < s1)
494 return true;
495 if (s0 > s1)
496 return false;
497
498 l--;
499 while (l >= 0)
500 {
501 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
502 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
503
504 if (u0 < u1)
505 return true;
506 if (u0 > u1)
507 return false;
508 l--;
509 }
510
511 return false;
512}
513
514/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
515 signed compares. */
516int
517wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
518 unsigned int precision,
519 const HOST_WIDE_INT *op1, unsigned int op1len)
520{
521 HOST_WIDE_INT s0, s1;
522 unsigned HOST_WIDE_INT u0, u1;
523 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
524 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
525 int l = MAX (op0len - 1, op1len - 1);
526
527 /* Only the top block is compared as signed. The rest are unsigned
528 comparisons. */
529 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
530 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
531 if (s0 < s1)
532 return -1;
533 if (s0 > s1)
534 return 1;
535
536 l--;
537 while (l >= 0)
538 {
539 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
540 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
541
542 if (u0 < u1)
543 return -1;
544 if (u0 > u1)
545 return 1;
546 l--;
547 }
548
549 return 0;
550}
551
552/* Return true if OP0 < OP1 using unsigned comparisons. */
553bool
554wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
555 unsigned int precision,
556 const HOST_WIDE_INT *op1, unsigned int op1len)
557{
558 unsigned HOST_WIDE_INT x0;
559 unsigned HOST_WIDE_INT x1;
560 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
561 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
562 int l = MAX (op0len - 1, op1len - 1);
563
564 while (l >= 0)
565 {
566 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
567 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
568 if (x0 < x1)
569 return true;
570 if (x0 > x1)
571 return false;
572 l--;
573 }
574
575 return false;
576}
577
578/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
579 unsigned compares. */
580int
581wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
582 unsigned int precision,
583 const HOST_WIDE_INT *op1, unsigned int op1len)
584{
585 unsigned HOST_WIDE_INT x0;
586 unsigned HOST_WIDE_INT x1;
587 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
588 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
589 int l = MAX (op0len - 1, op1len - 1);
590
591 while (l >= 0)
592 {
593 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
594 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
595 if (x0 < x1)
596 return -1;
597 if (x0 > x1)
598 return 1;
599 l--;
600 }
601
602 return 0;
603}
604
605/*
606 * Extension.
607 */
608
609/* Sign-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
612unsigned int
613wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
614 unsigned int xlen, unsigned int precision, unsigned int offset)
615{
616 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, the rest are already signs. */
619 if (offset >= precision || len >= xlen)
620 {
621 for (unsigned i = 0; i < xlen; ++i)
622 val[i] = xval[i];
623 return xlen;
624 }
625 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
626 for (unsigned int i = 0; i < len; i++)
627 val[i] = xval[i];
628 if (suboffset > 0)
629 {
630 val[len] = sext_hwi (xval[len], suboffset);
631 len += 1;
632 }
633 return canonize (val, len, precision);
634}
635
636/* Zero-extend the number represented by XVAL and XLEN into VAL,
637 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
638 and VAL have PRECISION bits. */
639unsigned int
640wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
641 unsigned int xlen, unsigned int precision, unsigned int offset)
642{
643 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
644 /* Extending beyond the precision is a no-op. If we have only stored
645 OFFSET bits or fewer, and the upper stored bit is zero, then there
646 is nothing to do. */
647 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
648 {
649 for (unsigned i = 0; i < xlen; ++i)
650 val[i] = xval[i];
651 return xlen;
652 }
653 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
654 for (unsigned int i = 0; i < len; i++)
655 val[i] = i < xlen ? xval[i] : -1;
656 if (suboffset > 0)
657 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
658 else
659 val[len] = 0;
660 return canonize (val, len + 1, precision);
661}
662
663/*
664 * Masking, inserting, shifting, rotating.
665 */
666
667/* Insert WIDTH bits from Y into X starting at START. */
668wide_int
669wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
670 unsigned int width)
671{
672 wide_int result;
673 wide_int mask;
674 wide_int tmp;
675
676 unsigned int precision = x.get_precision ();
677 if (start >= precision)
678 return x;
679
680 gcc_checking_assert (precision >= width);
681
682 if (start + width >= precision)
683 width = precision - start;
684
685 mask = wi::shifted_mask (start, width, false, precision);
686 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
687 result = tmp & mask;
688
689 tmp = wi::bit_and_not (x, mask);
690 result = result | tmp;
691
692 return result;
693}
694
695/* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
696 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
697 bits. */
698unsigned int
699wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
700 unsigned int xlen, unsigned int precision, unsigned int bit)
701{
702 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
703 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
704
705 if (block + 1 >= xlen)
706 {
707 /* The operation either affects the last current block or needs
708 a new block. */
709 unsigned int len = block + 1;
710 for (unsigned int i = 0; i < len; i++)
711 val[i] = safe_uhwi (xval, xlen, i);
fecfbfa4 712 val[block] |= HOST_WIDE_INT_1U << subbit;
807e902e
KZ
713
714 /* If the bit we just set is at the msb of the block, make sure
715 that any higher bits are zeros. */
716 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
279a9ce9
JJ
717 {
718 val[len++] = 0;
719 return len;
720 }
721 return canonize (val, len, precision);
807e902e
KZ
722 }
723 else
724 {
725 for (unsigned int i = 0; i < xlen; i++)
726 val[i] = xval[i];
fecfbfa4 727 val[block] |= HOST_WIDE_INT_1U << subbit;
807e902e
KZ
728 return canonize (val, xlen, precision);
729 }
730}
731
0ede6b5a
RS
732/* Byte swap the integer represented by XVAL and LEN into VAL. Return
733 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
734unsigned int
735wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
736 unsigned int len, unsigned int precision)
807e902e 737{
807e902e 738 unsigned int i, s;
807e902e
KZ
739
740 /* This is not a well defined operation if the precision is not a
741 multiple of 8. */
742 gcc_assert ((precision & 0x7) == 0);
743
744 for (i = 0; i < len; i++)
745 val[i] = 0;
746
747 /* Only swap the bytes that are not the padding. */
748 for (s = 0; s < precision; s += 8)
749 {
750 unsigned int d = precision - s - 8;
751 unsigned HOST_WIDE_INT byte;
752
753 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
754 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
755
0ede6b5a 756 byte = (safe_uhwi (xval, len, block) >> offset) & 0xff;
807e902e
KZ
757
758 block = d / HOST_BITS_PER_WIDE_INT;
759 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
760
761 val[block] |= byte << offset;
762 }
763
0ede6b5a 764 return canonize (val, len, precision);
807e902e
KZ
765}
766
108ff03b
RS
767/* Bitreverse the integer represented by XVAL and LEN into VAL. Return
768 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
769unsigned int
770wi::bitreverse_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
771 unsigned int len, unsigned int precision)
772{
773 unsigned int i, s;
774
775 for (i = 0; i < len; i++)
776 val[i] = 0;
777
778 for (s = 0; s < precision; s++)
779 {
780 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
781 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
782 if (((safe_uhwi (xval, len, block) >> offset) & 1) != 0)
783 {
784 unsigned int d = (precision - 1) - s;
785 block = d / HOST_BITS_PER_WIDE_INT;
786 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
e73a307f 787 val[block] |= HOST_WIDE_INT_1U << offset;
108ff03b
RS
788 }
789 }
790
791 return canonize (val, len, precision);
792}
793
807e902e
KZ
794/* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
795 above that up to PREC are zeros. The result is inverted if NEGATE
796 is true. Return the number of blocks in VAL. */
797unsigned int
798wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
799 unsigned int prec)
800{
801 if (width >= prec)
802 {
803 val[0] = negate ? 0 : -1;
804 return 1;
805 }
806 else if (width == 0)
807 {
808 val[0] = negate ? -1 : 0;
809 return 1;
810 }
811
812 unsigned int i = 0;
813 while (i < width / HOST_BITS_PER_WIDE_INT)
814 val[i++] = negate ? 0 : -1;
815
816 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
817 if (shift != 0)
818 {
fecfbfa4 819 HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
807e902e
KZ
820 val[i++] = negate ? ~last : last;
821 }
822 else
823 val[i++] = negate ? -1 : 0;
824
825 return i;
826}
827
828/* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
829 bits are ones, and the bits above that up to PREC are zeros. The result
830 is inverted if NEGATE is true. Return the number of blocks in VAL. */
831unsigned int
832wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
833 bool negate, unsigned int prec)
834{
835 if (start >= prec || width == 0)
836 {
837 val[0] = negate ? -1 : 0;
838 return 1;
839 }
840
841 if (width > prec - start)
842 width = prec - start;
843 unsigned int end = start + width;
844
845 unsigned int i = 0;
846 while (i < start / HOST_BITS_PER_WIDE_INT)
847 val[i++] = negate ? -1 : 0;
848
849 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
850 if (shift)
851 {
fecfbfa4 852 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
807e902e
KZ
853 shift += width;
854 if (shift < HOST_BITS_PER_WIDE_INT)
855 {
856 /* case 000111000 */
fecfbfa4 857 block = (HOST_WIDE_INT_1U << shift) - block - 1;
807e902e
KZ
858 val[i++] = negate ? ~block : block;
859 return i;
860 }
861 else
862 /* ...111000 */
863 val[i++] = negate ? block : ~block;
864 }
865
e5259207
JJ
866 if (end >= prec)
867 {
868 if (!shift)
869 val[i++] = negate ? 0 : -1;
870 return i;
871 }
872
807e902e
KZ
873 while (i < end / HOST_BITS_PER_WIDE_INT)
874 /* 1111111 */
875 val[i++] = negate ? 0 : -1;
876
877 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
878 if (shift != 0)
879 {
880 /* 000011111 */
fecfbfa4 881 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
807e902e
KZ
882 val[i++] = negate ? ~block : block;
883 }
e5259207 884 else
807e902e
KZ
885 val[i++] = negate ? -1 : 0;
886
887 return i;
888}
889
890/*
891 * logical operations.
892 */
893
894/* Set VAL to OP0 & OP1. Return the number of blocks used. */
895unsigned int
896wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
897 unsigned int op0len, const HOST_WIDE_INT *op1,
898 unsigned int op1len, unsigned int prec)
899{
900 int l0 = op0len - 1;
901 int l1 = op1len - 1;
902 bool need_canon = true;
903
904 unsigned int len = MAX (op0len, op1len);
905 if (l0 > l1)
906 {
907 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
908 if (op1mask == 0)
909 {
910 l0 = l1;
911 len = l1 + 1;
912 }
913 else
914 {
915 need_canon = false;
916 while (l0 > l1)
917 {
918 val[l0] = op0[l0];
919 l0--;
920 }
921 }
922 }
923 else if (l1 > l0)
924 {
925 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
926 if (op0mask == 0)
927 len = l0 + 1;
928 else
929 {
930 need_canon = false;
931 while (l1 > l0)
932 {
933 val[l1] = op1[l1];
934 l1--;
935 }
936 }
937 }
938
939 while (l0 >= 0)
940 {
941 val[l0] = op0[l0] & op1[l0];
942 l0--;
943 }
944
945 if (need_canon)
946 len = canonize (val, len, prec);
947
948 return len;
949}
950
951/* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
952unsigned int
953wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
954 unsigned int op0len, const HOST_WIDE_INT *op1,
955 unsigned int op1len, unsigned int prec)
956{
957 wide_int result;
958 int l0 = op0len - 1;
959 int l1 = op1len - 1;
960 bool need_canon = true;
961
962 unsigned int len = MAX (op0len, op1len);
963 if (l0 > l1)
964 {
965 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
966 if (op1mask != 0)
967 {
968 l0 = l1;
969 len = l1 + 1;
970 }
971 else
972 {
973 need_canon = false;
974 while (l0 > l1)
975 {
976 val[l0] = op0[l0];
977 l0--;
978 }
979 }
980 }
981 else if (l1 > l0)
982 {
983 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
984 if (op0mask == 0)
985 len = l0 + 1;
986 else
987 {
988 need_canon = false;
989 while (l1 > l0)
990 {
991 val[l1] = ~op1[l1];
992 l1--;
993 }
994 }
995 }
996
997 while (l0 >= 0)
998 {
999 val[l0] = op0[l0] & ~op1[l0];
1000 l0--;
1001 }
1002
1003 if (need_canon)
1004 len = canonize (val, len, prec);
1005
1006 return len;
1007}
1008
1009/* Set VAL to OP0 | OP1. Return the number of blocks used. */
1010unsigned int
1011wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1012 unsigned int op0len, const HOST_WIDE_INT *op1,
1013 unsigned int op1len, unsigned int prec)
1014{
1015 wide_int result;
1016 int l0 = op0len - 1;
1017 int l1 = op1len - 1;
1018 bool need_canon = true;
1019
1020 unsigned int len = MAX (op0len, op1len);
1021 if (l0 > l1)
1022 {
1023 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1024 if (op1mask != 0)
1025 {
1026 l0 = l1;
1027 len = l1 + 1;
1028 }
1029 else
1030 {
1031 need_canon = false;
1032 while (l0 > l1)
1033 {
1034 val[l0] = op0[l0];
1035 l0--;
1036 }
1037 }
1038 }
1039 else if (l1 > l0)
1040 {
1041 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1042 if (op0mask != 0)
1043 len = l0 + 1;
1044 else
1045 {
1046 need_canon = false;
1047 while (l1 > l0)
1048 {
1049 val[l1] = op1[l1];
1050 l1--;
1051 }
1052 }
1053 }
1054
1055 while (l0 >= 0)
1056 {
1057 val[l0] = op0[l0] | op1[l0];
1058 l0--;
1059 }
1060
1061 if (need_canon)
1062 len = canonize (val, len, prec);
1063
1064 return len;
1065}
1066
1067/* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1068unsigned int
1069wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1070 unsigned int op0len, const HOST_WIDE_INT *op1,
1071 unsigned int op1len, unsigned int prec)
1072{
1073 wide_int result;
1074 int l0 = op0len - 1;
1075 int l1 = op1len - 1;
1076 bool need_canon = true;
1077
1078 unsigned int len = MAX (op0len, op1len);
1079 if (l0 > l1)
1080 {
1081 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1082 if (op1mask == 0)
1083 {
1084 l0 = l1;
1085 len = l1 + 1;
1086 }
1087 else
1088 {
1089 need_canon = false;
1090 while (l0 > l1)
1091 {
1092 val[l0] = op0[l0];
1093 l0--;
1094 }
1095 }
1096 }
1097 else if (l1 > l0)
1098 {
1099 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1100 if (op0mask != 0)
1101 len = l0 + 1;
1102 else
1103 {
1104 need_canon = false;
1105 while (l1 > l0)
1106 {
1107 val[l1] = ~op1[l1];
1108 l1--;
1109 }
1110 }
1111 }
1112
1113 while (l0 >= 0)
1114 {
1115 val[l0] = op0[l0] | ~op1[l0];
1116 l0--;
1117 }
1118
1119 if (need_canon)
1120 len = canonize (val, len, prec);
1121
1122 return len;
1123}
1124
1125/* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1126unsigned int
1127wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1128 unsigned int op0len, const HOST_WIDE_INT *op1,
1129 unsigned int op1len, unsigned int prec)
1130{
1131 wide_int result;
1132 int l0 = op0len - 1;
1133 int l1 = op1len - 1;
1134
1135 unsigned int len = MAX (op0len, op1len);
1136 if (l0 > l1)
1137 {
1138 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1139 while (l0 > l1)
1140 {
1141 val[l0] = op0[l0] ^ op1mask;
1142 l0--;
1143 }
1144 }
1145
1146 if (l1 > l0)
1147 {
1148 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1149 while (l1 > l0)
1150 {
1151 val[l1] = op0mask ^ op1[l1];
1152 l1--;
1153 }
1154 }
1155
1156 while (l0 >= 0)
1157 {
1158 val[l0] = op0[l0] ^ op1[l0];
1159 l0--;
1160 }
1161
1162 return canonize (val, len, prec);
1163}
1164
1165/*
1166 * math
1167 */
1168
1169/* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1170 whether the result overflows when OP0 and OP1 are treated as having
1171 signedness SGN. Return the number of blocks in VAL. */
1172unsigned int
1173wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1174 unsigned int op0len, const HOST_WIDE_INT *op1,
1175 unsigned int op1len, unsigned int prec,
4a669ac3 1176 signop sgn, wi::overflow_type *overflow)
807e902e
KZ
1177{
1178 unsigned HOST_WIDE_INT o0 = 0;
1179 unsigned HOST_WIDE_INT o1 = 0;
1180 unsigned HOST_WIDE_INT x = 0;
1181 unsigned HOST_WIDE_INT carry = 0;
1182 unsigned HOST_WIDE_INT old_carry = 0;
1183 unsigned HOST_WIDE_INT mask0, mask1;
1184 unsigned int i;
1185
1186 unsigned int len = MAX (op0len, op1len);
1187 mask0 = -top_bit_of (op0, op0len, prec);
1188 mask1 = -top_bit_of (op1, op1len, prec);
1189 /* Add all of the explicitly defined elements. */
1190
1191 for (i = 0; i < len; i++)
1192 {
1193 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1194 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1195 x = o0 + o1 + carry;
1196 val[i] = x;
1197 old_carry = carry;
1198 carry = carry == 0 ? x < o0 : x <= o0;
1199 }
1200
1201 if (len * HOST_BITS_PER_WIDE_INT < prec)
1202 {
1203 val[len] = mask0 + mask1 + carry;
1204 len++;
1205 if (overflow)
4a669ac3
AH
1206 *overflow
1207 = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
807e902e
KZ
1208 }
1209 else if (overflow)
1210 {
1211 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1212 if (sgn == SIGNED)
1213 {
1214 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
4a669ac3
AH
1215 if ((HOST_WIDE_INT) (x << shift) < 0)
1216 {
1217 if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1218 *overflow = wi::OVF_UNDERFLOW;
1219 else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1220 *overflow = wi::OVF_OVERFLOW;
1221 else
1222 *overflow = wi::OVF_NONE;
1223 }
1224 else
1225 *overflow = wi::OVF_NONE;
807e902e
KZ
1226 }
1227 else
1228 {
1229 /* Put the MSB of X and O0 and in the top of the HWI. */
1230 x <<= shift;
1231 o0 <<= shift;
1232 if (old_carry)
4a669ac3 1233 *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
807e902e 1234 else
4a669ac3 1235 *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
807e902e
KZ
1236 }
1237 }
1238
1239 return canonize (val, len, prec);
1240}
1241
1242/* Subroutines of the multiplication and division operations. Unpack
1243 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1244 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1245 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1246static void
1247wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1248 unsigned int in_len, unsigned int out_len,
1249 unsigned int prec, signop sgn)
1250{
1251 unsigned int i;
1252 unsigned int j = 0;
1253 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1254 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1255 HOST_WIDE_INT mask;
1256
1257 if (sgn == SIGNED)
1258 {
1259 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1260 mask &= HALF_INT_MASK;
1261 }
1262 else
1263 mask = 0;
1264
1265 for (i = 0; i < blocks_needed - 1; i++)
1266 {
1267 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1268 result[j++] = x;
1269 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1270 }
1271
1272 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1273 if (small_prec)
1274 {
1275 if (sgn == SIGNED)
1276 x = sext_hwi (x, small_prec);
1277 else
1278 x = zext_hwi (x, small_prec);
1279 }
1280 result[j++] = x;
1281 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1282
1283 /* Smear the sign bit. */
1284 while (j < out_len)
1285 result[j++] = mask;
1286}
1287
5ee31e57
RS
1288/* The inverse of wi_unpack. IN_LEN is the number of input
1289 blocks and PRECISION is the precision of the result. Return the
1290 number of blocks in the canonicalized result. */
1291static unsigned int
1292wi_pack (HOST_WIDE_INT *result,
807e902e 1293 const unsigned HOST_HALF_WIDE_INT *input,
5ee31e57 1294 unsigned int in_len, unsigned int precision)
807e902e
KZ
1295{
1296 unsigned int i = 0;
1297 unsigned int j = 0;
5ee31e57 1298 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
807e902e 1299
5ee31e57 1300 while (i + 1 < in_len)
807e902e 1301 {
5ee31e57
RS
1302 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1303 | ((unsigned HOST_WIDE_INT) input[i + 1]
1304 << HOST_BITS_PER_HALF_WIDE_INT));
807e902e
KZ
1305 i += 2;
1306 }
1307
1308 /* Handle the case where in_len is odd. For this we zero extend. */
1309 if (in_len & 1)
5ee31e57
RS
1310 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1311 else if (j < blocks_needed)
1312 result[j++] = 0;
1313 return canonize (result, j, precision);
807e902e
KZ
1314}
1315
1316/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1317 result is returned.
1318
1319 If HIGH is not set, throw away the upper half after the check is
1320 made to see if it overflows. Unfortunately there is no better way
1321 to check for overflow than to do this. If OVERFLOW is nonnull,
1322 record in *OVERFLOW whether the result overflowed. SGN controls
4a669ac3
AH
1323 the signedness and is used to check overflow or if HIGH is set.
1324
1325 NOTE: Overflow type for signed overflow is not yet implemented. */
807e902e
KZ
1326unsigned int
1327wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1328 unsigned int op1len, const HOST_WIDE_INT *op2val,
1329 unsigned int op2len, unsigned int prec, signop sgn,
4a669ac3 1330 wi::overflow_type *overflow, bool high)
807e902e
KZ
1331{
1332 unsigned HOST_WIDE_INT o0, o1, k, t;
1333 unsigned int i;
1334 unsigned int j;
807e902e
KZ
1335
1336 /* If the top level routine did not really pass in an overflow, then
1337 just make sure that we never attempt to set it. */
1338 bool needs_overflow = (overflow != 0);
1339 if (needs_overflow)
4a669ac3 1340 *overflow = wi::OVF_NONE;
807e902e
KZ
1341
1342 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1343 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1344
1345 /* This is a surprisingly common case, so do it first. */
1346 if (op1 == 0 || op2 == 0)
1347 {
1348 val[0] = 0;
1349 return 1;
1350 }
1351
1352#ifdef umul_ppmm
1353 if (sgn == UNSIGNED)
1354 {
1355 /* If the inputs are single HWIs and the output has room for at
1356 least two HWIs, we can use umul_ppmm directly. */
1357 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1358 && wi::fits_uhwi_p (op1)
1359 && wi::fits_uhwi_p (op2))
1360 {
c01d6ad9
JJ
1361 /* This case never overflows. */
1362 if (high)
1363 {
1364 val[0] = 0;
1365 return 1;
1366 }
807e902e 1367 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
09901e8a
JJ
1368 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1369 {
1370 val[2] = 0;
1371 return 3;
1372 }
807e902e
KZ
1373 return 1 + (val[1] != 0 || val[0] < 0);
1374 }
1375 /* Likewise if the output is a full single HWI, except that the
1376 upper HWI of the result is only used for determining overflow.
1377 (We handle this case inline when overflow isn't needed.) */
1378 else if (prec == HOST_BITS_PER_WIDE_INT)
1379 {
1380 unsigned HOST_WIDE_INT upper;
1381 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1382 if (needs_overflow)
4a669ac3
AH
1383 /* Unsigned overflow can only be +OVERFLOW. */
1384 *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
c01d6ad9
JJ
1385 if (high)
1386 val[0] = upper;
807e902e
KZ
1387 return 1;
1388 }
1389 }
1390#endif
1391
1392 /* Handle multiplications by 1. */
1393 if (op1 == 1)
1394 {
c01d6ad9
JJ
1395 if (high)
1396 {
1397 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1398 return 1;
1399 }
807e902e
KZ
1400 for (i = 0; i < op2len; i++)
1401 val[i] = op2val[i];
1402 return op2len;
1403 }
1404 if (op2 == 1)
1405 {
c01d6ad9
JJ
1406 if (high)
1407 {
1408 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1409 return 1;
1410 }
807e902e
KZ
1411 for (i = 0; i < op1len; i++)
1412 val[i] = op1val[i];
1413 return op1len;
1414 }
1415
1416 /* If we need to check for overflow, we can only do half wide
1417 multiplies quickly because we need to look at the top bits to
1418 check for the overflow. */
1419 if ((high || needs_overflow)
1420 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1421 {
1422 unsigned HOST_WIDE_INT r;
1423
1424 if (sgn == SIGNED)
1425 {
1426 o0 = op1.to_shwi ();
1427 o1 = op2.to_shwi ();
1428 }
1429 else
1430 {
1431 o0 = op1.to_uhwi ();
1432 o1 = op2.to_uhwi ();
1433 }
1434
1435 r = o0 * o1;
1436 if (needs_overflow)
1437 {
1438 if (sgn == SIGNED)
1439 {
1440 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
4a669ac3
AH
1441 /* FIXME: Signed overflow type is not implemented yet. */
1442 *overflow = OVF_UNKNOWN;
807e902e
KZ
1443 }
1444 else
1445 {
1446 if ((r >> prec) != 0)
4a669ac3
AH
1447 /* Unsigned overflow can only be +OVERFLOW. */
1448 *overflow = OVF_OVERFLOW;
807e902e
KZ
1449 }
1450 }
1451 val[0] = high ? r >> prec : r;
1452 return 1;
1453 }
1454
0d00385e
JJ
1455 /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
1456 WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
1457 result. */
1458
1459 unsigned HOST_HALF_WIDE_INT
1460 ubuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1461 unsigned HOST_HALF_WIDE_INT
1462 vbuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1463 /* The '2' in 'R' is because we are internally doing a full
1464 multiply. */
1465 unsigned HOST_HALF_WIDE_INT
1466 rbuf[2 * 4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1467 const HOST_WIDE_INT mask
1468 = (HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1469 unsigned HOST_HALF_WIDE_INT *u = ubuf;
1470 unsigned HOST_HALF_WIDE_INT *v = vbuf;
1471 unsigned HOST_HALF_WIDE_INT *r = rbuf;
1472
1473 if (!high)
1474 prec = MIN ((op1len + op2len + 1) * HOST_BITS_PER_WIDE_INT, prec);
1475 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1476 unsigned int half_blocks_needed = blocks_needed * 2;
1477 if (UNLIKELY (prec > WIDE_INT_MAX_INL_PRECISION))
1478 {
1479 unsigned HOST_HALF_WIDE_INT *buf
248bf197 1480 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * half_blocks_needed);
0d00385e 1481 u = buf;
248bf197
JJ
1482 v = u + half_blocks_needed;
1483 r = v + half_blocks_needed;
0d00385e
JJ
1484 }
1485
807e902e
KZ
1486 /* We do unsigned mul and then correct it. */
1487 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1488 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1489
1490 /* The 2 is for a full mult. */
1491 memset (r, 0, half_blocks_needed * 2
1492 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1493
1494 for (j = 0; j < half_blocks_needed; j++)
1495 {
1496 k = 0;
1497 for (i = 0; i < half_blocks_needed; i++)
1498 {
1499 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1500 + r[i + j] + k);
1501 r[i + j] = t & HALF_INT_MASK;
1502 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1503 }
1504 r[j + half_blocks_needed] = k;
1505 }
1506
1507 /* We did unsigned math above. For signed we must adjust the
1508 product (assuming we need to see that). */
1509 if (sgn == SIGNED && (high || needs_overflow))
1510 {
1511 unsigned HOST_WIDE_INT b;
1512 if (wi::neg_p (op1))
1513 {
1514 b = 0;
1515 for (i = 0; i < half_blocks_needed; i++)
1516 {
1517 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1518 - (unsigned HOST_WIDE_INT)v[i] - b;
1519 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1520 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1521 }
1522 }
1523 if (wi::neg_p (op2))
1524 {
1525 b = 0;
1526 for (i = 0; i < half_blocks_needed; i++)
1527 {
1528 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1529 - (unsigned HOST_WIDE_INT)u[i] - b;
1530 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1531 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1532 }
1533 }
1534 }
1535
1536 if (needs_overflow)
1537 {
1538 HOST_WIDE_INT top;
1539
1540 /* For unsigned, overflow is true if any of the top bits are set.
1541 For signed, overflow is true if any of the top bits are not equal
1542 to the sign bit. */
1543 if (sgn == UNSIGNED)
1544 top = 0;
1545 else
1546 {
1547 top = r[(half_blocks_needed) - 1];
1548 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1549 top &= mask;
1550 }
1551
1552 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1553 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
4a669ac3
AH
1554 /* FIXME: Signed overflow type is not implemented yet. */
1555 *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
807e902e
KZ
1556 }
1557
5ee31e57
RS
1558 int r_offset = high ? half_blocks_needed : 0;
1559 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
807e902e
KZ
1560}
1561
1562/* Compute the population count of X. */
1563int
1564wi::popcount (const wide_int_ref &x)
1565{
1566 unsigned int i;
1567 int count;
1568
1569 /* The high order block is special if it is the last block and the
1570 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1571 have to clear out any ones above the precision before doing
1572 popcount on this block. */
1573 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1574 unsigned int stop = x.len;
1575 if (count < 0)
1576 {
1577 count = popcount_hwi (x.uhigh () << -count);
1578 stop -= 1;
1579 }
1580 else
1581 {
1582 if (x.sign_mask () >= 0)
1583 count = 0;
1584 }
1585
1586 for (i = 0; i < stop; ++i)
1587 count += popcount_hwi (x.val[i]);
1588
1589 return count;
1590}
1591
1592/* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1593 whether the result overflows when OP0 and OP1 are treated as having
1594 signedness SGN. Return the number of blocks in VAL. */
1595unsigned int
1596wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1597 unsigned int op0len, const HOST_WIDE_INT *op1,
1598 unsigned int op1len, unsigned int prec,
4a669ac3 1599 signop sgn, wi::overflow_type *overflow)
807e902e
KZ
1600{
1601 unsigned HOST_WIDE_INT o0 = 0;
1602 unsigned HOST_WIDE_INT o1 = 0;
1603 unsigned HOST_WIDE_INT x = 0;
1604 /* We implement subtraction as an in place negate and add. Negation
1605 is just inversion and add 1, so we can do the add of 1 by just
1606 starting the borrow in of the first element at 1. */
1607 unsigned HOST_WIDE_INT borrow = 0;
1608 unsigned HOST_WIDE_INT old_borrow = 0;
1609
1610 unsigned HOST_WIDE_INT mask0, mask1;
1611 unsigned int i;
1612
1613 unsigned int len = MAX (op0len, op1len);
1614 mask0 = -top_bit_of (op0, op0len, prec);
1615 mask1 = -top_bit_of (op1, op1len, prec);
1616
1617 /* Subtract all of the explicitly defined elements. */
1618 for (i = 0; i < len; i++)
1619 {
1620 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1621 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1622 x = o0 - o1 - borrow;
1623 val[i] = x;
1624 old_borrow = borrow;
1625 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1626 }
1627
1628 if (len * HOST_BITS_PER_WIDE_INT < prec)
1629 {
1630 val[len] = mask0 - mask1 - borrow;
1631 len++;
1632 if (overflow)
4a669ac3 1633 *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
807e902e
KZ
1634 }
1635 else if (overflow)
1636 {
1637 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1638 if (sgn == SIGNED)
1639 {
1640 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
4a669ac3
AH
1641 if ((HOST_WIDE_INT) (x << shift) < 0)
1642 {
1643 if (o0 > o1)
1644 *overflow = OVF_UNDERFLOW;
1645 else if (o0 < o1)
1646 *overflow = OVF_OVERFLOW;
1647 else
1648 *overflow = OVF_NONE;
1649 }
1650 else
1651 *overflow = OVF_NONE;
807e902e
KZ
1652 }
1653 else
1654 {
1655 /* Put the MSB of X and O0 and in the top of the HWI. */
1656 x <<= shift;
1657 o0 <<= shift;
1658 if (old_borrow)
4a669ac3 1659 *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
807e902e 1660 else
4a669ac3 1661 *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
807e902e
KZ
1662 }
1663 }
1664
1665 return canonize (val, len, prec);
1666}
1667
1668
1669/*
1670 * Division and Mod
1671 */
1672
1673/* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1674 algorithm is a small modification of the algorithm in Hacker's
1675 Delight by Warren, which itself is a small modification of Knuth's
1676 algorithm. M is the number of significant elements of U however
1677 there needs to be at least one extra element of B_DIVIDEND
248bf197
JJ
1678 allocated, N is the number of elements of B_DIVISOR.
1679 Return new value for N. */
1680static int
807e902e
KZ
1681divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1682 unsigned HOST_HALF_WIDE_INT *b_remainder,
1683 unsigned HOST_HALF_WIDE_INT *b_dividend,
1684 unsigned HOST_HALF_WIDE_INT *b_divisor,
1685 int m, int n)
1686{
1687 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1688 HOST_WIDE_INT and stored in the lower bits of each word. This
1689 algorithm should work properly on both 32 and 64 bit
1690 machines. */
1691 unsigned HOST_WIDE_INT b
1692 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1693 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1694 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1695 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1696 HOST_WIDE_INT t, k;
1697 int i, j, s;
1698
1699 /* Single digit divisor. */
1700 if (n == 1)
1701 {
1702 k = 0;
1703 for (j = m - 1; j >= 0; j--)
1704 {
1705 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1706 k = ((k * b + b_dividend[j])
1707 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1708 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1709 }
1710 b_remainder[0] = k;
248bf197 1711 return 1;
807e902e
KZ
1712 }
1713
1714 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1715
1716 if (s)
1717 {
1718 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1719 algorithm, we can overwrite b_dividend and b_divisor, so we do
1720 that. */
1721 for (i = n - 1; i > 0; i--)
1722 b_divisor[i] = (b_divisor[i] << s)
1723 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1724 b_divisor[0] = b_divisor[0] << s;
1725
1726 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1727 for (i = m - 1; i > 0; i--)
1728 b_dividend[i] = (b_dividend[i] << s)
1729 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1730 b_dividend[0] = b_dividend[0] << s;
1731 }
1732
1733 /* Main loop. */
1734 for (j = m - n; j >= 0; j--)
1735 {
1736 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1737 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1738 again:
1739 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1740 {
1741 qhat -= 1;
1742 rhat += b_divisor[n-1];
1743 if (rhat < b)
1744 goto again;
1745 }
1746
1747 /* Multiply and subtract. */
1748 k = 0;
1749 for (i = 0; i < n; i++)
1750 {
1751 p = qhat * b_divisor[i];
1752 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1753 b_dividend[i + j] = t;
1754 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1755 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1756 }
1757 t = b_dividend[j+n] - k;
1758 b_dividend[j+n] = t;
1759
1760 b_quotient[j] = qhat;
1761 if (t < 0)
1762 {
1763 b_quotient[j] -= 1;
1764 k = 0;
1765 for (i = 0; i < n; i++)
1766 {
1767 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1768 b_dividend[i+j] = t;
1769 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1770 }
1771 b_dividend[j+n] += k;
1772 }
1773 }
248bf197
JJ
1774 /* If N > M, the main loop was skipped, quotient will be 0 and
1775 we can't copy more than M half-limbs into the remainder, as they
1776 aren't present in b_dividend (which has . */
1777 n = MIN (n, m);
807e902e
KZ
1778 if (s)
1779 for (i = 0; i < n; i++)
1780 b_remainder[i] = (b_dividend[i] >> s)
1781 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1782 else
1783 for (i = 0; i < n; i++)
1784 b_remainder[i] = b_dividend[i];
248bf197 1785 return n;
807e902e
KZ
1786}
1787
1788
1789/* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1790 the result. If QUOTIENT is nonnull, store the value of the quotient
1791 there and return the number of blocks in it. The return value is
1792 not defined otherwise. If REMAINDER is nonnull, store the value
1793 of the remainder there and store the number of blocks in
1794 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1795 the division overflowed. */
1796unsigned int
1797wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1798 HOST_WIDE_INT *remainder,
1799 const HOST_WIDE_INT *dividend_val,
1800 unsigned int dividend_len, unsigned int dividend_prec,
1801 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1802 unsigned int divisor_prec, signop sgn,
4a669ac3 1803 wi::overflow_type *oflow)
807e902e 1804{
807e902e
KZ
1805 unsigned int m, n;
1806 bool dividend_neg = false;
1807 bool divisor_neg = false;
1808 bool overflow = false;
1809 wide_int neg_dividend, neg_divisor;
1810
1811 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1812 dividend_prec);
1813 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1814 divisor_prec);
1815 if (divisor == 0)
1816 overflow = true;
1817
1818 /* The smallest signed number / -1 causes overflow. The dividend_len
1819 check is for speed rather than correctness. */
1820 if (sgn == SIGNED
1821 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1822 && divisor == -1
1823 && wi::only_sign_bit_p (dividend))
1824 overflow = true;
1825
1826 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1827 (signed min / -1) has the same representation as the orignal dividend.
1828 We have traditionally made division by zero act as division by one,
1829 so there too we use the original dividend. */
1830 if (overflow)
1831 {
1832 if (remainder)
1833 {
1834 *remainder_len = 1;
1835 remainder[0] = 0;
1836 }
4a669ac3
AH
1837 if (oflow)
1838 *oflow = OVF_OVERFLOW;
807e902e
KZ
1839 if (quotient)
1840 for (unsigned int i = 0; i < dividend_len; ++i)
1841 quotient[i] = dividend_val[i];
1842 return dividend_len;
1843 }
1844
1845 if (oflow)
4a669ac3 1846 *oflow = OVF_NONE;
807e902e
KZ
1847
1848 /* Do it on the host if you can. */
1849 if (sgn == SIGNED
1850 && wi::fits_shwi_p (dividend)
1851 && wi::fits_shwi_p (divisor))
1852 {
1853 HOST_WIDE_INT o0 = dividend.to_shwi ();
1854 HOST_WIDE_INT o1 = divisor.to_shwi ();
1855
1856 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1857 {
1858 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1859 if (quotient)
1860 {
1861 quotient[0] = HOST_WIDE_INT_MIN;
1862 quotient[1] = 0;
1863 }
1864 if (remainder)
1865 {
1866 remainder[0] = 0;
1867 *remainder_len = 1;
1868 }
1869 return 2;
1870 }
1871 else
1872 {
1873 if (quotient)
1874 quotient[0] = o0 / o1;
1875 if (remainder)
1876 {
1877 remainder[0] = o0 % o1;
1878 *remainder_len = 1;
1879 }
1880 return 1;
1881 }
1882 }
1883
1884 if (sgn == UNSIGNED
1885 && wi::fits_uhwi_p (dividend)
1886 && wi::fits_uhwi_p (divisor))
1887 {
1888 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1889 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
db7a2818 1890 unsigned int quotient_len = 1;
807e902e
KZ
1891
1892 if (quotient)
db7a2818
JJ
1893 {
1894 quotient[0] = o0 / o1;
321a2b65 1895 quotient_len = canonize_uhwi (quotient, dividend_prec);
db7a2818 1896 }
807e902e
KZ
1897 if (remainder)
1898 {
1899 remainder[0] = o0 % o1;
321a2b65 1900 *remainder_len = canonize_uhwi (remainder, dividend_prec);
807e902e 1901 }
db7a2818 1902 return quotient_len;
807e902e
KZ
1903 }
1904
1905 /* Make the divisor and dividend positive and remember what we
1906 did. */
1907 if (sgn == SIGNED)
1908 {
1909 if (wi::neg_p (dividend))
1910 {
1911 neg_dividend = -dividend;
1912 dividend = neg_dividend;
1913 dividend_neg = true;
1914 }
1915 if (wi::neg_p (divisor))
1916 {
1917 neg_divisor = -divisor;
1918 divisor = neg_divisor;
1919 divisor_neg = true;
1920 }
1921 }
1922
0d00385e
JJ
1923 unsigned HOST_HALF_WIDE_INT
1924 b_quotient_buf[4 * WIDE_INT_MAX_INL_PRECISION
1925 / HOST_BITS_PER_HALF_WIDE_INT];
1926 unsigned HOST_HALF_WIDE_INT
1927 b_remainder_buf[4 * WIDE_INT_MAX_INL_PRECISION
1928 / HOST_BITS_PER_HALF_WIDE_INT];
1929 unsigned HOST_HALF_WIDE_INT
1930 b_dividend_buf[(4 * WIDE_INT_MAX_INL_PRECISION
1931 / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1932 unsigned HOST_HALF_WIDE_INT
1933 b_divisor_buf[4 * WIDE_INT_MAX_INL_PRECISION
1934 / HOST_BITS_PER_HALF_WIDE_INT];
1935 unsigned HOST_HALF_WIDE_INT *b_quotient = b_quotient_buf;
1936 unsigned HOST_HALF_WIDE_INT *b_remainder = b_remainder_buf;
1937 unsigned HOST_HALF_WIDE_INT *b_dividend = b_dividend_buf;
1938 unsigned HOST_HALF_WIDE_INT *b_divisor = b_divisor_buf;
1939
1940 if (sgn == SIGNED || dividend_val[dividend_len - 1] >= 0)
1941 dividend_prec = MIN ((dividend_len + 1) * HOST_BITS_PER_WIDE_INT,
1942 dividend_prec);
1943 if (sgn == SIGNED || divisor_val[divisor_len - 1] >= 0)
1944 divisor_prec = MIN (divisor_len * HOST_BITS_PER_WIDE_INT, divisor_prec);
1945 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1946 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1947 if (UNLIKELY (dividend_prec > WIDE_INT_MAX_INL_PRECISION)
1948 || UNLIKELY (divisor_prec > WIDE_INT_MAX_INL_PRECISION))
1949 {
1950 unsigned HOST_HALF_WIDE_INT *buf
1951 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT,
248bf197
JJ
1952 3 * dividend_blocks_needed + 1
1953 + divisor_blocks_needed);
0d00385e 1954 b_quotient = buf;
248bf197
JJ
1955 b_remainder = b_quotient + dividend_blocks_needed;
1956 b_dividend = b_remainder + dividend_blocks_needed;
1957 b_divisor = b_dividend + dividend_blocks_needed + 1;
0d00385e 1958 memset (b_quotient, 0,
248bf197 1959 dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
0d00385e 1960 }
807e902e 1961 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
ece79960 1962 dividend_blocks_needed, dividend_prec, UNSIGNED);
807e902e 1963 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
ece79960 1964 divisor_blocks_needed, divisor_prec, UNSIGNED);
807e902e
KZ
1965
1966 m = dividend_blocks_needed;
b30ea138 1967 b_dividend[m] = 0;
807e902e
KZ
1968 while (m > 1 && b_dividend[m - 1] == 0)
1969 m--;
1970
1971 n = divisor_blocks_needed;
1972 while (n > 1 && b_divisor[n - 1] == 0)
1973 n--;
1974
0d00385e
JJ
1975 if (b_quotient == b_quotient_buf)
1976 memset (b_quotient_buf, 0, sizeof (b_quotient_buf));
807e902e 1977
248bf197 1978 n = divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
807e902e
KZ
1979
1980 unsigned int quotient_len = 0;
1981 if (quotient)
1982 {
5ee31e57 1983 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
807e902e
KZ
1984 /* The quotient is neg if exactly one of the divisor or dividend is
1985 neg. */
1986 if (dividend_neg != divisor_neg)
1987 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1988 quotient_len, dividend_prec,
1989 UNSIGNED, 0);
1990 }
1991
1992 if (remainder)
1993 {
5ee31e57 1994 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
807e902e
KZ
1995 /* The remainder is always the same sign as the dividend. */
1996 if (dividend_neg)
1997 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1998 *remainder_len, dividend_prec,
1999 UNSIGNED, 0);
2000 }
2001
2002 return quotient_len;
2003}
2004
2005/*
2006 * Shifting, rotating and extraction.
2007 */
2008
2009/* Left shift XVAL by SHIFT and store the result in VAL. Return the
2010 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
2011unsigned int
2012wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2013 unsigned int xlen, unsigned int precision,
2014 unsigned int shift)
2015{
2016 /* Split the shift into a whole-block shift and a subblock shift. */
2017 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2018 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2019
2020 /* The whole-block shift fills with zeros. */
2021 unsigned int len = BLOCKS_NEEDED (precision);
0d00385e 2022 len = MIN (xlen + skip + 1, len);
807e902e
KZ
2023 for (unsigned int i = 0; i < skip; ++i)
2024 val[i] = 0;
2025
2026 /* It's easier to handle the simple block case specially. */
2027 if (small_shift == 0)
2028 for (unsigned int i = skip; i < len; ++i)
2029 val[i] = safe_uhwi (xval, xlen, i - skip);
2030 else
2031 {
2032 /* The first unfilled output block is a left shift of the first
2033 block in XVAL. The other output blocks contain bits from two
2034 consecutive input blocks. */
2035 unsigned HOST_WIDE_INT carry = 0;
2036 for (unsigned int i = skip; i < len; ++i)
2037 {
2038 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
2039 val[i] = (x << small_shift) | carry;
2040 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
2041 }
2042 }
2043 return canonize (val, len, precision);
2044}
2045
0d00385e 2046/* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
807e902e
KZ
2047 number of blocks in VAL. The input has XPRECISION bits and the
2048 output has XPRECISION - SHIFT bits. */
0d00385e 2049static void
807e902e 2050rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
0d00385e 2051 unsigned int xlen, unsigned int shift, unsigned int len)
807e902e
KZ
2052{
2053 /* Split the shift into a whole-block shift and a subblock shift. */
2054 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2055 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2056
807e902e
KZ
2057 /* It's easier to handle the simple block case specially. */
2058 if (small_shift == 0)
2059 for (unsigned int i = 0; i < len; ++i)
2060 val[i] = safe_uhwi (xval, xlen, i + skip);
2061 else
2062 {
2063 /* Each output block but the last is a combination of two input blocks.
2064 The last block is a right shift of the last block in XVAL. */
2065 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
2066 for (unsigned int i = 0; i < len; ++i)
2067 {
2068 val[i] = curr >> small_shift;
2069 curr = safe_uhwi (xval, xlen, i + skip + 1);
2070 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
2071 }
2072 }
807e902e
KZ
2073}
2074
2075/* Logically right shift XVAL by SHIFT and store the result in VAL.
2076 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2077 VAL has PRECISION bits. */
2078unsigned int
2079wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2080 unsigned int xlen, unsigned int xprecision,
2081 unsigned int precision, unsigned int shift)
2082{
0d00385e
JJ
2083 /* Work out how many blocks are needed to store the significant bits
2084 (excluding the upper zeros or signs). */
2085 unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2086 unsigned int len = blocks_needed;
2087 if (len > xlen && xval[xlen - 1] >= 0)
2088 len = xlen;
2089
2090 rshift_large_common (val, xval, xlen, shift, len);
807e902e
KZ
2091
2092 /* The value we just created has precision XPRECISION - SHIFT.
2093 Zero-extend it to wider precisions. */
0d00385e 2094 if (precision > xprecision - shift && len == blocks_needed)
807e902e
KZ
2095 {
2096 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2097 if (small_prec)
2098 val[len - 1] = zext_hwi (val[len - 1], small_prec);
2099 else if (val[len - 1] < 0)
2100 {
2101 /* Add a new block with a zero. */
2102 val[len++] = 0;
2103 return len;
2104 }
2105 }
2106 return canonize (val, len, precision);
2107}
2108
2109/* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2110 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2111 VAL has PRECISION bits. */
2112unsigned int
2113wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2114 unsigned int xlen, unsigned int xprecision,
2115 unsigned int precision, unsigned int shift)
2116{
0d00385e
JJ
2117 /* Work out how many blocks are needed to store the significant bits
2118 (excluding the upper zeros or signs). */
2119 unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2120 unsigned int len = MIN (xlen, blocks_needed);
2121
2122 rshift_large_common (val, xval, xlen, shift, len);
807e902e
KZ
2123
2124 /* The value we just created has precision XPRECISION - SHIFT.
2125 Sign-extend it to wider types. */
0d00385e 2126 if (precision > xprecision - shift && len == blocks_needed)
807e902e
KZ
2127 {
2128 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2129 if (small_prec)
2130 val[len - 1] = sext_hwi (val[len - 1], small_prec);
2131 }
2132 return canonize (val, len, precision);
2133}
2134
2135/* Return the number of leading (upper) zeros in X. */
2136int
2137wi::clz (const wide_int_ref &x)
2138{
74cb45e6
RS
2139 if (x.sign_mask () < 0)
2140 /* The upper bit is set, so there are no leading zeros. */
2141 return 0;
2142
807e902e
KZ
2143 /* Calculate how many bits there above the highest represented block. */
2144 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2145
2146 unsigned HOST_WIDE_INT high = x.uhigh ();
2147 if (count < 0)
2148 /* The upper -COUNT bits of HIGH are not part of the value.
2149 Clear them out. */
2150 high = (high << -count) >> -count;
807e902e
KZ
2151
2152 /* We don't need to look below HIGH. Either HIGH is nonzero,
2153 or the top bit of the block below is nonzero; clz_hwi is
2154 HOST_BITS_PER_WIDE_INT in the latter case. */
2155 return count + clz_hwi (high);
2156}
2157
2158/* Return the number of redundant sign bits in X. (That is, the number
2159 of bits immediately below the sign bit that have the same value as
2160 the sign bit.) */
2161int
2162wi::clrsb (const wide_int_ref &x)
2163{
2164 /* Calculate how many bits there above the highest represented block. */
2165 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2166
2167 unsigned HOST_WIDE_INT high = x.uhigh ();
2168 unsigned HOST_WIDE_INT mask = -1;
2169 if (count < 0)
2170 {
2171 /* The upper -COUNT bits of HIGH are not part of the value.
2172 Clear them from both MASK and HIGH. */
2173 mask >>= -count;
2174 high &= mask;
2175 }
2176
2177 /* If the top bit is 1, count the number of leading 1s. If the top
2178 bit is zero, count the number of leading zeros. */
2179 if (high > mask / 2)
2180 high ^= mask;
2181
2182 /* There are no sign bits below the top block, so we don't need to look
2183 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2184 HIGH is 0. */
2185 return count + clz_hwi (high) - 1;
2186}
2187
2188/* Return the number of trailing (lower) zeros in X. */
2189int
2190wi::ctz (const wide_int_ref &x)
2191{
2192 if (x.len == 1 && x.ulow () == 0)
2193 return x.precision;
2194
2195 /* Having dealt with the zero case, there must be a block with a
2196 nonzero bit. We don't care about the bits above the first 1. */
2197 unsigned int i = 0;
2198 while (x.val[i] == 0)
2199 ++i;
2200 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2201}
2202
2203/* If X is an exact power of 2, return the base-2 logarithm, otherwise
2204 return -1. */
2205int
2206wi::exact_log2 (const wide_int_ref &x)
2207{
2208 /* Reject cases where there are implicit -1 blocks above HIGH. */
2209 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2210 return -1;
2211
2212 /* Set CRUX to the index of the entry that should be nonzero.
2213 If the top block is zero then the next lowest block (if any)
2214 must have the high bit set. */
2215 unsigned int crux = x.len - 1;
2216 if (crux > 0 && x.val[crux] == 0)
2217 crux -= 1;
2218
2219 /* Check that all lower blocks are zero. */
2220 for (unsigned int i = 0; i < crux; ++i)
2221 if (x.val[i] != 0)
2222 return -1;
2223
2224 /* Get a zero-extended form of block CRUX. */
2225 unsigned HOST_WIDE_INT hwi = x.val[crux];
2226 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2227 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2228
2229 /* Now it's down to whether HWI is a power of 2. */
2230 int res = ::exact_log2 (hwi);
2231 if (res >= 0)
2232 res += crux * HOST_BITS_PER_WIDE_INT;
2233 return res;
2234}
2235
2236/* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2237int
2238wi::floor_log2 (const wide_int_ref &x)
2239{
2240 return x.precision - 1 - clz (x);
2241}
2242
2243/* Return the index of the first (lowest) set bit in X, counting from 1.
2244 Return 0 if X is 0. */
2245int
2246wi::ffs (const wide_int_ref &x)
2247{
2248 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2249}
2250
2251/* Return true if sign-extending X to have precision PRECISION would give
2252 the minimum signed value at that precision. */
2253bool
2254wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2255{
2256 return ctz (x) + 1 == int (precision);
2257}
2258
2259/* Return true if X represents the minimum signed value. */
2260bool
2261wi::only_sign_bit_p (const wide_int_ref &x)
2262{
2263 return only_sign_bit_p (x, x.precision);
2264}
2265
fff22900
RS
2266/* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2267 down to the previous value that has no bits set outside MASK.
2268 This rounding wraps for signed values if VAL is negative and
2269 the top bit of MASK is clear.
2270
2271 For example, round_down_for_mask (6, 0xf1) would give 1 and
2272 round_down_for_mask (24, 0xf1) would give 17. */
2273
2274wide_int
2275wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2276{
2277 /* Get the bits in VAL that are outside the mask. */
2278 wide_int extra_bits = wi::bit_and_not (val, mask);
2279 if (extra_bits == 0)
2280 return val;
2281
2282 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2283 below that bit. */
2284 unsigned int precision = val.get_precision ();
2285 wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2286 false, precision);
2287
2288 /* Clear the bits that aren't in MASK, but ensure that all bits
2289 in MASK below the top cleared bit are set. */
2290 return (val & mask) | (mask & lower_mask);
2291}
2292
2293/* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2294 up to the next value that has no bits set outside MASK. The rounding
2295 wraps if there are no suitable values greater than VAL.
2296
2297 For example, round_up_for_mask (6, 0xf1) would give 16 and
2298 round_up_for_mask (24, 0xf1) would give 32. */
2299
2300wide_int
2301wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2302{
2303 /* Get the bits in VAL that are outside the mask. */
2304 wide_int extra_bits = wi::bit_and_not (val, mask);
2305 if (extra_bits == 0)
2306 return val;
2307
2308 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2309 unsigned int precision = val.get_precision ();
2310 wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2311 true, precision);
2312
2313 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2314 upper_mask &= mask;
2315
2316 /* Conceptually we need to:
2317
2318 - clear bits of VAL outside UPPER_MASK
2319 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2320 - propagate the carry through the bits of VAL in UPPER_MASK
2321
2322 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2323 reaches that bit and the process leaves all lower bits clear.
2324 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2325 wide_int tmp = wi::bit_and_not (upper_mask, val);
2326
2327 return (val | tmp) & -tmp;
2328}
2329
28752261
MG
2330/* Compute the modular multiplicative inverse of A modulo B
2331 using extended Euclid's algorithm. Assumes A and B are coprime,
2332 and that A and B have the same precision. */
2333wide_int
2334wi::mod_inv (const wide_int &a, const wide_int &b)
2335{
2336 /* Verify the assumption. */
2337 gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
2338
2339 unsigned int p = a.get_precision () + 1;
2340 gcc_checking_assert (b.get_precision () + 1 == p);
2341 wide_int c = wide_int::from (a, p, UNSIGNED);
2342 wide_int d = wide_int::from (b, p, UNSIGNED);
2343 wide_int x0 = wide_int::from (0, p, UNSIGNED);
2344 wide_int x1 = wide_int::from (1, p, UNSIGNED);
2345
2346 if (wi::eq_p (b, 1))
2347 return wide_int::from (1, p, UNSIGNED);
2348
2349 while (wi::gt_p (c, 1, UNSIGNED))
2350 {
2351 wide_int t = d;
2352 wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
2353 c = t;
2354 wide_int s = x0;
2355 x0 = wi::sub (x1, wi::mul (q, x0));
2356 x1 = s;
2357 }
2358 if (wi::lt_p (x1, 0, SIGNED))
2359 x1 += d;
2360 return x1;
2361}
2362
807e902e
KZ
2363/*
2364 * Private utilities.
2365 */
2366
2367void gt_ggc_mx (widest_int *) { }
2368void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2369void gt_pch_nx (widest_int *) { }
2370
2371template void wide_int::dump () const;
2372template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2373template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2374template void offset_int::dump () const;
2375template void widest_int::dump () const;
d9b950dd 2376
e61a4f52
AH
2377/* We could add all the above ::dump variants here, but wide_int and
2378 widest_int should handle the common cases. Besides, you can always
2379 call the dump method directly. */
2380
2381DEBUG_FUNCTION void
2382debug (const wide_int &ref)
2383{
2384 ref.dump ();
2385}
2386
2387DEBUG_FUNCTION void
2388debug (const wide_int *ptr)
2389{
2390 if (ptr)
2391 debug (*ptr);
2392 else
2393 fprintf (stderr, "<nil>\n");
2394}
2395
2396DEBUG_FUNCTION void
2397debug (const widest_int &ref)
2398{
2399 ref.dump ();
2400}
2401
2402DEBUG_FUNCTION void
2403debug (const widest_int *ptr)
2404{
2405 if (ptr)
2406 debug (*ptr);
2407 else
2408 fprintf (stderr, "<nil>\n");
2409}
d9b950dd
DM
2410
2411#if CHECKING_P
2412
2413namespace selftest {
2414
2415/* Selftests for wide ints. We run these multiple times, once per type. */
2416
2417/* Helper function for building a test value. */
2418
2419template <class VALUE_TYPE>
2420static VALUE_TYPE
2421from_int (int i);
2422
2423/* Specializations of the fixture for each wide-int type. */
2424
2425/* Specialization for VALUE_TYPE == wide_int. */
2426
2427template <>
2428wide_int
2429from_int (int i)
2430{
2431 return wi::shwi (i, 32);
2432}
2433
2434/* Specialization for VALUE_TYPE == offset_int. */
2435
2436template <>
2437offset_int
2438from_int (int i)
2439{
2440 return offset_int (i);
2441}
2442
2443/* Specialization for VALUE_TYPE == widest_int. */
2444
2445template <>
2446widest_int
2447from_int (int i)
2448{
2449 return widest_int (i);
2450}
2451
2452/* Verify that print_dec (WI, ..., SGN) gives the expected string
2453 representation (using base 10). */
2454
2455static void
2456assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2457{
0d00385e 2458 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
3bcc10b9
JJ
2459 unsigned len;
2460 if (print_dec_buf_size (wi, sgn, &len))
2461 p = XALLOCAVEC (char, len);
0d00385e
JJ
2462 print_dec (wi, p, sgn);
2463 ASSERT_STREQ (expected, p);
d9b950dd
DM
2464}
2465
2466/* Likewise for base 16. */
2467
2468static void
2469assert_hexeq (const char *expected, const wide_int_ref &wi)
2470{
0d00385e 2471 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
3bcc10b9
JJ
2472 unsigned len;
2473 if (print_hex_buf_size (wi, &len))
2474 p = XALLOCAVEC (char, len);
0d00385e
JJ
2475 print_hex (wi, p);
2476 ASSERT_STREQ (expected, p);
d9b950dd
DM
2477}
2478
2479/* Test cases. */
2480
2481/* Verify that print_dec and print_hex work for VALUE_TYPE. */
2482
2483template <class VALUE_TYPE>
2484static void
2485test_printing ()
2486{
2487 VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2488 assert_deceq ("42", a, SIGNED);
2489 assert_hexeq ("0x2a", a);
7984457f
RS
2490 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2491 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2492 assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
0d00385e 2493 if (WIDE_INT_MAX_INL_PRECISION > 128)
7984457f
RS
2494 {
2495 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2496 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2497 assert_hexeq ("0x200000000000004000123456789abcdef",
2498 wi::lshift (1, 129) + wi::lshift (1, 74)
2499 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2500 }
d9b950dd
DM
2501}
2502
2503/* Verify that various operations work correctly for VALUE_TYPE,
2504 unary and binary, using both function syntax, and
2505 overloaded-operators. */
2506
2507template <class VALUE_TYPE>
2508static void
2509test_ops ()
2510{
2511 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2512 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2513
2514 /* Using functions. */
2515 assert_deceq ("-7", wi::neg (a), SIGNED);
2516 assert_deceq ("10", wi::add (a, b), SIGNED);
2517 assert_deceq ("4", wi::sub (a, b), SIGNED);
2518 assert_deceq ("-4", wi::sub (b, a), SIGNED);
2519 assert_deceq ("21", wi::mul (a, b), SIGNED);
2520
2521 /* Using operators. */
2522 assert_deceq ("-7", -a, SIGNED);
2523 assert_deceq ("10", a + b, SIGNED);
2524 assert_deceq ("4", a - b, SIGNED);
2525 assert_deceq ("-4", b - a, SIGNED);
2526 assert_deceq ("21", a * b, SIGNED);
2527}
2528
2529/* Verify that various comparisons work correctly for VALUE_TYPE. */
2530
2531template <class VALUE_TYPE>
2532static void
2533test_comparisons ()
2534{
2535 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2536 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2537
2538 /* == */
2539 ASSERT_TRUE (wi::eq_p (a, a));
2540 ASSERT_FALSE (wi::eq_p (a, b));
2541
2542 /* != */
2543 ASSERT_TRUE (wi::ne_p (a, b));
2544 ASSERT_FALSE (wi::ne_p (a, a));
2545
2546 /* < */
2547 ASSERT_FALSE (wi::lts_p (a, a));
2548 ASSERT_FALSE (wi::lts_p (a, b));
2549 ASSERT_TRUE (wi::lts_p (b, a));
2550
2551 /* <= */
2552 ASSERT_TRUE (wi::les_p (a, a));
2553 ASSERT_FALSE (wi::les_p (a, b));
2554 ASSERT_TRUE (wi::les_p (b, a));
2555
2556 /* > */
2557 ASSERT_FALSE (wi::gts_p (a, a));
2558 ASSERT_TRUE (wi::gts_p (a, b));
2559 ASSERT_FALSE (wi::gts_p (b, a));
2560
2561 /* >= */
2562 ASSERT_TRUE (wi::ges_p (a, a));
2563 ASSERT_TRUE (wi::ges_p (a, b));
2564 ASSERT_FALSE (wi::ges_p (b, a));
2565
2566 /* comparison */
2567 ASSERT_EQ (-1, wi::cmps (b, a));
2568 ASSERT_EQ (0, wi::cmps (a, a));
2569 ASSERT_EQ (1, wi::cmps (a, b));
2570}
2571
2572/* Run all of the selftests, using the given VALUE_TYPE. */
2573
2574template <class VALUE_TYPE>
2575static void run_all_wide_int_tests ()
2576{
2577 test_printing <VALUE_TYPE> ();
2578 test_ops <VALUE_TYPE> ();
2579 test_comparisons <VALUE_TYPE> ();
2580}
2581
2131f7f5
RS
2582/* Test overflow conditions. */
2583
2584static void
2585test_overflow ()
2586{
2587 static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2588 static int offsets[] = { 16, 1, 0 };
2589 for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2590 for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2591 {
2592 int prec = precs[i];
2593 int offset = offsets[j];
4a669ac3 2594 wi::overflow_type overflow;
2131f7f5
RS
2595 wide_int sum, diff;
2596
2597 sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2598 UNSIGNED, &overflow);
2599 ASSERT_EQ (sum, -offset);
4a669ac3 2600 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2131f7f5
RS
2601
2602 sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2603 UNSIGNED, &overflow);
2604 ASSERT_EQ (sum, -offset);
4a669ac3 2605 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2131f7f5
RS
2606
2607 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2608 wi::max_value (prec, UNSIGNED),
2609 UNSIGNED, &overflow);
2610 ASSERT_EQ (diff, -offset);
4a669ac3 2611 ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2131f7f5
RS
2612
2613 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2614 wi::max_value (prec, UNSIGNED) - 1,
2615 UNSIGNED, &overflow);
2616 ASSERT_EQ (diff, 1 - offset);
4a669ac3 2617 ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2131f7f5
RS
2618 }
2619}
2620
fff22900
RS
2621/* Test the round_{down,up}_for_mask functions. */
2622
2623static void
2624test_round_for_mask ()
2625{
2626 unsigned int prec = 18;
2627 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2628 wi::shwi (0xf1, prec)));
2629 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2630 wi::shwi (0xf1, prec)));
2631
2632 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2633 wi::shwi (0xf1, prec)));
2634 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2635 wi::shwi (0xf1, prec)));
2636
2637 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2638 wi::shwi (0xf1, prec)));
2639 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2640 wi::shwi (0xf1, prec)));
2641
2642 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2643 wi::shwi (0x111, prec)));
2644 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2645 wi::shwi (0x111, prec)));
2646
2647 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2648 wi::shwi (0xfc, prec)));
2649 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2650 wi::shwi (0xfc, prec)));
2651
2652 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2653 wi::shwi (0xabc, prec)));
2654 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2655 wi::shwi (0xabc, prec)));
2656
2657 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2658 wi::shwi (0xabc, prec)));
2659 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2660 wi::shwi (0xabc, prec)));
2661
2662 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2663 wi::shwi (0xabc, prec)));
2664 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2665 wi::shwi (0xabc, prec)));
2666}
2667
d9b950dd
DM
2668/* Run all of the selftests within this file, for all value types. */
2669
2670void
2671wide_int_cc_tests ()
2672{
2131f7f5
RS
2673 run_all_wide_int_tests <wide_int> ();
2674 run_all_wide_int_tests <offset_int> ();
2675 run_all_wide_int_tests <widest_int> ();
2676 test_overflow ();
fff22900 2677 test_round_for_mask ();
e5259207
JJ
2678 ASSERT_EQ (wi::mask (128, false, 128),
2679 wi::shifted_mask (0, 128, false, 128));
2680 ASSERT_EQ (wi::mask (128, true, 128),
2681 wi::shifted_mask (0, 128, true, 128));
248bf197
JJ
2682 ASSERT_EQ (wi::multiple_of_p (from_int <widest_int> (1),
2683 from_int <widest_int> (-128), UNSIGNED),
2684 false);
d9b950dd
DM
2685}
2686
2687} // namespace selftest
2688#endif /* CHECKING_P */