]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/wide-int.cc
PR testsuite/92016 - Excess errors in Wstringop-overflow-17.c
[thirdparty/gcc.git] / gcc / wide-int.cc
CommitLineData
e913b5cd 1/* Operations with very long integers.
fbd26352 2 Copyright (C) 2012-2019 Free Software Foundation, Inc.
e913b5cd 3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9Free Software Foundation; either version 3, or (at your option) any
10later version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
e913b5cd 25#include "tree.h"
99b4f3a2 26#include "selftest.h"
e913b5cd 27
f1714fa0 28
29#define HOST_BITS_PER_HALF_WIDE_INT 32
30#if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31# define HOST_HALF_WIDE_INT long
32#elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33# define HOST_HALF_WIDE_INT int
34#else
35#error Please add support for HOST_HALF_WIDE_INT
36#endif
37
25b6c9eb 38#define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
fdfe65e2 39/* Do not include longlong.h when compiler is clang-based. See PR61146. */
40#if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
25b6c9eb 41typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42typedef unsigned HOST_WIDE_INT UWtype;
43typedef unsigned int UQItype __attribute__ ((mode (QI)));
44typedef unsigned int USItype __attribute__ ((mode (SI)));
45typedef unsigned int UDItype __attribute__ ((mode (DI)));
f7bd62a6 46#if W_TYPE_SIZE == 32
47typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48#else
49typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50#endif
25b6c9eb 51#include "longlong.h"
52#endif
53
796b6678 54static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
e913b5cd 55
56/*
57 * Internal utilities.
58 */
59
60/* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
edc19fd0 62#define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
e913b5cd 63
64#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65#define BLOCKS_NEEDED(PREC) \
66 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
5b2cae25 67#define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
e913b5cd 68
05363b4a 69/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70 based on the top existing bit of VAL. */
71
796b6678 72static unsigned HOST_WIDE_INT
73safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
e913b5cd 74{
edc19fd0 75 return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
e913b5cd 76}
77
796b6678 78/* Convert the integer in VAL to canonical form, returning its new length.
79 LEN is the number of blocks currently in VAL and PRECISION is the number
80 of bits in the integer it represents.
e913b5cd 81
796b6678 82 This function only changes the representation, not the value. */
83static unsigned int
84canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
e913b5cd 85{
796b6678 86 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
87 HOST_WIDE_INT top;
88 int i;
e913b5cd 89
796b6678 90 if (len > blocks_needed)
91 len = blocks_needed;
e913b5cd 92
796b6678 93 if (len == 1)
94 return len;
e913b5cd 95
796b6678 96 top = val[len - 1];
5b2cae25 97 if (len * HOST_BITS_PER_WIDE_INT > precision)
dae6a0c3 98 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
796b6678 99 if (top != 0 && top != (HOST_WIDE_INT)-1)
100 return len;
e913b5cd 101
796b6678 102 /* At this point we know that the top is either 0 or -1. Find the
103 first block that is not a copy of this. */
104 for (i = len - 2; i >= 0; i--)
e913b5cd 105 {
796b6678 106 HOST_WIDE_INT x = val[i];
107 if (x != top)
e913b5cd 108 {
796b6678 109 if (SIGN_MASK (x) == top)
110 return i + 1;
111
112 /* We need an extra block because the top bit block i does
113 not match the extension. */
114 return i + 2;
e913b5cd 115 }
116 }
117
796b6678 118 /* The number is 0 or -1. */
119 return 1;
e913b5cd 120}
121
70bca70e 122/* VAL[0] is the unsigned result of an operation. Canonize it by adding
123 another 0 block if needed, and return number of blocks needed. */
124
125static inline unsigned int
126canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127{
128 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129 {
130 val[1] = 0;
131 return 2;
132 }
133 return 1;
134}
135
796b6678 136/*
9292279b 137 * Conversion routines in and out of wide_int.
796b6678 138 */
e913b5cd 139
796b6678 140/* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
141 result for an integer with precision PRECISION. Return the length
142 of VAL (after any canonization. */
143unsigned int
144wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
145 unsigned int xlen, unsigned int precision, bool need_canon)
146{
147 for (unsigned i = 0; i < xlen; i++)
148 val[i] = xval[i];
149 return need_canon ? canonize (val, xlen, precision) : xlen;
e913b5cd 150}
151
152/* Construct a wide int from a buffer of length LEN. BUFFER will be
d0abd9e0 153 read according to byte endianness and word endianness of the target.
e4712d1e 154 Only the lower BUFFER_LEN bytes of the result are set; the remaining
155 high bytes are cleared. */
796b6678 156wide_int
157wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
e913b5cd 158{
796b6678 159 unsigned int precision = buffer_len * BITS_PER_UNIT;
160 wide_int result = wide_int::create (precision);
161 unsigned int words = buffer_len / UNITS_PER_WORD;
e913b5cd 162
163 /* We have to clear all the bits ourself, as we merely or in values
164 below. */
796b6678 165 unsigned int len = BLOCKS_NEEDED (precision);
166 HOST_WIDE_INT *val = result.write_val ();
167 for (unsigned int i = 0; i < len; ++i)
168 val[i] = 0;
e913b5cd 169
796b6678 170 for (unsigned int byte = 0; byte < buffer_len; byte++)
e913b5cd 171 {
796b6678 172 unsigned int offset;
173 unsigned int index;
174 unsigned int bitpos = byte * BITS_PER_UNIT;
e913b5cd 175 unsigned HOST_WIDE_INT value;
176
796b6678 177 if (buffer_len > UNITS_PER_WORD)
e913b5cd 178 {
796b6678 179 unsigned int word = byte / UNITS_PER_WORD;
e913b5cd 180
181 if (WORDS_BIG_ENDIAN)
182 word = (words - 1) - word;
183
184 offset = word * UNITS_PER_WORD;
185
186 if (BYTES_BIG_ENDIAN)
187 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
188 else
189 offset += byte % UNITS_PER_WORD;
190 }
191 else
796b6678 192 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
e913b5cd 193
194 value = (unsigned HOST_WIDE_INT) buffer[offset];
195
196 index = bitpos / HOST_BITS_PER_WIDE_INT;
796b6678 197 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
e913b5cd 198 }
199
796b6678 200 result.set_len (canonize (val, len, precision));
e913b5cd 201
202 return result;
203}
204
28e557ef 205/* Sets RESULT from X, the sign is taken according to SGN. */
e913b5cd 206void
28e557ef 207wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
e913b5cd 208{
05363b4a 209 int len = x.get_len ();
210 const HOST_WIDE_INT *v = x.get_val ();
28e557ef 211 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
e913b5cd 212
796b6678 213 if (wi::neg_p (x, sgn))
e913b5cd 214 {
e913b5cd 215 /* We use ones complement to avoid -x80..0 edge case that -
216 won't work on. */
28e557ef 217 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218 for (int i = 0; i < len; i++)
219 t[i] = ~v[i];
220 if (excess > 0)
221 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
222 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223 mpz_com (result, result);
e913b5cd 224 }
28e557ef 225 else if (excess > 0)
05363b4a 226 {
28e557ef 227 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
05363b4a 228 for (int i = 0; i < len - 1; i++)
229 t[i] = v[i];
28e557ef 230 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
05363b4a 231 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232 }
233 else
234 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
e913b5cd 235}
236
e4712d1e 237/* Returns X converted to TYPE. If WRAP is true, then out-of-range
e913b5cd 238 values of VAL will be wrapped; otherwise, they will be set to the
239 appropriate minimum or maximum TYPE bound. */
796b6678 240wide_int
241wi::from_mpz (const_tree type, mpz_t x, bool wrap)
e913b5cd 242{
243 size_t count, numb;
a140fb64 244 unsigned int prec = TYPE_PRECISION (type);
05363b4a 245 wide_int res = wide_int::create (prec);
e913b5cd 246
247 if (!wrap)
248 {
249 mpz_t min, max;
250
251 mpz_init (min);
252 mpz_init (max);
253 get_type_static_bounds (type, min, max);
254
796b6678 255 if (mpz_cmp (x, min) < 0)
256 mpz_set (x, min);
257 else if (mpz_cmp (x, max) > 0)
258 mpz_set (x, max);
e913b5cd 259
260 mpz_clear (min);
261 mpz_clear (max);
262 }
263
264 /* Determine the number of unsigned HOST_WIDE_INTs that are required
47dcac97 265 for representing the absolute value. The code to calculate count is
e913b5cd 266 extracted from the GMP manual, section "Integer Import and Export":
267 http://gmplib.org/manual/Integer-Import-and-Export.html */
a140fb64 268 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
e4712d1e 269 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
796b6678 270 HOST_WIDE_INT *val = res.write_val ();
47dcac97 271 /* Read the absolute value.
272
273 Write directly to the wide_int storage if possible, otherwise leave
a140fb64 274 GMP to allocate the memory for us. It might be slightly more efficient
275 to use mpz_tdiv_r_2exp for the latter case, but the situation is
276 pathological and it seems safer to operate on the original mpz value
277 in all cases. */
278 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
279 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
5b2cae25 280 if (count < 1)
281 {
282 val[0] = 0;
283 count = 1;
284 }
a140fb64 285 count = MIN (count, BLOCKS_NEEDED (prec));
286 if (valres != val)
287 {
288 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
289 free (valres);
290 }
47dcac97 291 /* Zero-extend the absolute value to PREC bits. */
292 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
293 val[count++] = 0;
294 else
295 count = canonize (val, count, prec);
296 res.set_len (count);
05363b4a 297
796b6678 298 if (mpz_sgn (x) < 0)
e913b5cd 299 res = -res;
300
301 return res;
302}
303
304/*
305 * Largest and smallest values in a mode.
306 */
307
796b6678 308/* Return the largest SGNed number that is representable in PRECISION bits.
50490037 309
310 TODO: There is still code from the double_int era that trys to
311 make up for the fact that double int's could not represent the
312 min and max values of all types. This code should be removed
313 because the min and max values can always be represented in
9292279b 314 wide_ints and int-csts. */
796b6678 315wide_int
316wi::max_value (unsigned int precision, signop sgn)
e913b5cd 317{
40df56fe 318 gcc_checking_assert (precision != 0);
319 if (sgn == UNSIGNED)
796b6678 320 /* The unsigned max is just all ones. */
321 return shwi (-1, precision);
e913b5cd 322 else
323 /* The signed max is all ones except the top bit. This must be
324 explicitly represented. */
796b6678 325 return mask (precision - 1, false, precision);
e913b5cd 326}
327
796b6678 328/* Return the largest SGNed number that is representable in PRECISION bits. */
329wide_int
330wi::min_value (unsigned int precision, signop sgn)
e913b5cd 331{
40df56fe 332 gcc_checking_assert (precision != 0);
333 if (sgn == UNSIGNED)
796b6678 334 return uhwi (0, precision);
e913b5cd 335 else
796b6678 336 /* The signed min is all zeros except the top bit. This must be
337 explicitly represented. */
338 return wi::set_bit_in_zero (precision - 1, precision);
e913b5cd 339}
340
341/*
342 * Public utilities.
343 */
344
796b6678 345/* Convert the number represented by XVAL, XLEN and XPRECISION, which has
346 signedness SGN, to an integer that has PRECISION bits. Store the blocks
347 in VAL and return the number of blocks used.
e913b5cd 348
796b6678 349 This function can handle both extension (PRECISION > XPRECISION)
350 and truncation (PRECISION < XPRECISION). */
351unsigned int
352wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
353 unsigned int xlen, unsigned int xprecision,
354 unsigned int precision, signop sgn)
e913b5cd 355{
796b6678 356 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
357 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
358 for (unsigned i = 0; i < len; i++)
359 val[i] = xval[i];
e913b5cd 360
796b6678 361 if (precision > xprecision)
e913b5cd 362 {
5b2cae25 363 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
364
796b6678 365 /* Expanding. */
e913b5cd 366 if (sgn == UNSIGNED)
367 {
796b6678 368 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
369 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
370 else if (val[len - 1] < 0)
e913b5cd 371 {
796b6678 372 while (len < BLOCKS_NEEDED (xprecision))
373 val[len++] = -1;
374 if (small_xprecision)
375 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
376 else
377 val[len++] = 0;
e913b5cd 378 }
379 }
5b2cae25 380 else
381 {
382 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
383 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
384 }
e913b5cd 385 }
2e9ff529 386 len = canonize (val, len, precision);
e913b5cd 387
796b6678 388 return len;
e913b5cd 389}
390
391/* This function hides the fact that we cannot rely on the bits beyond
392 the precision. This issue comes up in the relational comparisions
393 where we do allow comparisons of values of different precisions. */
394static inline HOST_WIDE_INT
395selt (const HOST_WIDE_INT *a, unsigned int len,
e4712d1e 396 unsigned int blocks_needed, unsigned int small_prec,
397 unsigned int index, signop sgn)
e913b5cd 398{
5b2cae25 399 HOST_WIDE_INT val;
400 if (index < len)
401 val = a[index];
402 else if (index < blocks_needed || sgn == SIGNED)
403 /* Signed or within the precision. */
404 val = SIGN_MASK (a[len - 1]);
405 else
406 /* Unsigned extension beyond the precision. */
407 val = 0;
e913b5cd 408
5b2cae25 409 if (small_prec && index == blocks_needed - 1)
410 return (sgn == SIGNED
411 ? sext_hwi (val, small_prec)
412 : zext_hwi (val, small_prec));
05363b4a 413 else
5b2cae25 414 return val;
e913b5cd 415}
416
05363b4a 417/* Find the highest bit represented in a wide int. This will in
e913b5cd 418 general have the same value as the sign bit. */
419static inline HOST_WIDE_INT
5b2cae25 420top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
e913b5cd 421{
5b2cae25 422 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
423 unsigned HOST_WIDE_INT val = a[len - 1];
424 if (excess > 0)
425 val <<= excess;
426 return val >> (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 427}
428
429/*
430 * Comparisons, note that only equality is an operator. The other
e4712d1e 431 * comparisons cannot be operators since they are inherently signed or
e913b5cd 432 * unsigned and C++ has no such operators.
433 */
434
435/* Return true if OP0 == OP1. */
436bool
796b6678 437wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
438 const HOST_WIDE_INT *op1, unsigned int op1len,
439 unsigned int prec)
e913b5cd 440{
441 int l0 = op0len - 1;
442 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
443
e4712d1e 444 if (op0len != op1len)
e913b5cd 445 return false;
446
447 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
448 {
449 /* It does not matter if we zext or sext here, we just have to
450 do both the same way. */
451 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
452 return false;
453 l0--;
454 }
455
456 while (l0 >= 0)
457 if (op0[l0] != op1[l0])
458 return false;
459 else
460 l0--;
461
462 return true;
463}
464
465/* Return true if OP0 < OP1 using signed comparisons. */
466bool
796b6678 467wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
cd9b5516 468 unsigned int precision,
469 const HOST_WIDE_INT *op1, unsigned int op1len)
e913b5cd 470{
471 HOST_WIDE_INT s0, s1;
472 unsigned HOST_WIDE_INT u0, u1;
cd9b5516 473 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
474 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 475 int l = MAX (op0len - 1, op1len - 1);
476
477 /* Only the top block is compared as signed. The rest are unsigned
478 comparisons. */
cd9b5516 479 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
480 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
e913b5cd 481 if (s0 < s1)
482 return true;
483 if (s0 > s1)
484 return false;
485
486 l--;
487 while (l >= 0)
488 {
cd9b5516 489 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
490 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
e913b5cd 491
492 if (u0 < u1)
493 return true;
494 if (u0 > u1)
495 return false;
496 l--;
497 }
498
499 return false;
500}
501
502/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
503 signed compares. */
504int
796b6678 505wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
cd9b5516 506 unsigned int precision,
507 const HOST_WIDE_INT *op1, unsigned int op1len)
e913b5cd 508{
509 HOST_WIDE_INT s0, s1;
510 unsigned HOST_WIDE_INT u0, u1;
cd9b5516 511 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
512 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 513 int l = MAX (op0len - 1, op1len - 1);
514
515 /* Only the top block is compared as signed. The rest are unsigned
516 comparisons. */
cd9b5516 517 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
518 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
e913b5cd 519 if (s0 < s1)
520 return -1;
521 if (s0 > s1)
522 return 1;
523
524 l--;
525 while (l >= 0)
526 {
cd9b5516 527 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
528 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
e913b5cd 529
530 if (u0 < u1)
531 return -1;
532 if (u0 > u1)
533 return 1;
534 l--;
535 }
536
537 return 0;
538}
539
540/* Return true if OP0 < OP1 using unsigned comparisons. */
541bool
cd9b5516 542wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
543 unsigned int precision,
544 const HOST_WIDE_INT *op1, unsigned int op1len)
e913b5cd 545{
546 unsigned HOST_WIDE_INT x0;
547 unsigned HOST_WIDE_INT x1;
cd9b5516 548 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
549 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 550 int l = MAX (op0len - 1, op1len - 1);
551
552 while (l >= 0)
553 {
cd9b5516 554 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
555 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
e913b5cd 556 if (x0 < x1)
557 return true;
558 if (x0 > x1)
559 return false;
560 l--;
561 }
562
563 return false;
564}
565
566/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
567 unsigned compares. */
568int
cd9b5516 569wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
570 unsigned int precision,
571 const HOST_WIDE_INT *op1, unsigned int op1len)
e913b5cd 572{
573 unsigned HOST_WIDE_INT x0;
574 unsigned HOST_WIDE_INT x1;
cd9b5516 575 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
576 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 577 int l = MAX (op0len - 1, op1len - 1);
578
579 while (l >= 0)
580 {
cd9b5516 581 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
582 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
e913b5cd 583 if (x0 < x1)
584 return -1;
585 if (x0 > x1)
586 return 1;
587 l--;
588 }
589
590 return 0;
591}
592
e913b5cd 593/*
594 * Extension.
595 */
596
796b6678 597/* Sign-extend the number represented by XVAL and XLEN into VAL,
598 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
599 and VAL have PRECISION bits. */
600unsigned int
601wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
602 unsigned int xlen, unsigned int precision, unsigned int offset)
603{
604 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
605 /* Extending beyond the precision is a no-op. If we have only stored
606 OFFSET bits or fewer, the rest are already signs. */
607 if (offset >= precision || len >= xlen)
608 {
609 for (unsigned i = 0; i < xlen; ++i)
610 val[i] = xval[i];
611 return xlen;
612 }
613 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
614 for (unsigned int i = 0; i < len; i++)
615 val[i] = xval[i];
616 if (suboffset > 0)
617 {
618 val[len] = sext_hwi (xval[len], suboffset);
619 len += 1;
620 }
621 return canonize (val, len, precision);
622}
623
624/* Zero-extend the number represented by XVAL and XLEN into VAL,
625 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
626 and VAL have PRECISION bits. */
627unsigned int
628wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
629 unsigned int xlen, unsigned int precision, unsigned int offset)
630{
631 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
632 /* Extending beyond the precision is a no-op. If we have only stored
633 OFFSET bits or fewer, and the upper stored bit is zero, then there
634 is nothing to do. */
635 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
636 {
637 for (unsigned i = 0; i < xlen; ++i)
638 val[i] = xval[i];
639 return xlen;
640 }
641 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
642 for (unsigned int i = 0; i < len; i++)
643 val[i] = i < xlen ? xval[i] : -1;
644 if (suboffset > 0)
645 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
e913b5cd 646 else
796b6678 647 val[len] = 0;
648 return canonize (val, len + 1, precision);
e913b5cd 649}
650
651/*
652 * Masking, inserting, shifting, rotating.
653 */
654
796b6678 655/* Insert WIDTH bits from Y into X starting at START. */
656wide_int
657wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
658 unsigned int width)
e913b5cd 659{
660 wide_int result;
661 wide_int mask;
662 wide_int tmp;
663
796b6678 664 unsigned int precision = x.get_precision ();
e913b5cd 665 if (start >= precision)
796b6678 666 return x;
e913b5cd 667
796b6678 668 gcc_checking_assert (precision >= width);
e913b5cd 669
670 if (start + width >= precision)
671 width = precision - start;
672
796b6678 673 mask = wi::shifted_mask (start, width, false, precision);
674 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
e913b5cd 675 result = tmp & mask;
676
796b6678 677 tmp = wi::bit_and_not (x, mask);
e913b5cd 678 result = result | tmp;
679
e913b5cd 680 return result;
681}
682
796b6678 683/* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
684 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
685 bits. */
686unsigned int
687wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
688 unsigned int xlen, unsigned int precision, unsigned int bit)
689{
690 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
691 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
692
693 if (block + 1 >= xlen)
694 {
695 /* The operation either affects the last current block or needs
696 a new block. */
697 unsigned int len = block + 1;
698 for (unsigned int i = 0; i < len; i++)
699 val[i] = safe_uhwi (xval, xlen, i);
edc19fd0 700 val[block] |= HOST_WIDE_INT_1U << subbit;
796b6678 701
702 /* If the bit we just set is at the msb of the block, make sure
703 that any higher bits are zeros. */
05363b4a 704 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
796b6678 705 val[len++] = 0;
706 return len;
707 }
708 else
709 {
710 for (unsigned int i = 0; i < xlen; i++)
711 val[i] = xval[i];
edc19fd0 712 val[block] |= HOST_WIDE_INT_1U << subbit;
796b6678 713 return canonize (val, xlen, precision);
714 }
715}
716
e913b5cd 717/* bswap THIS. */
796b6678 718wide_int
719wide_int_storage::bswap () const
e913b5cd 720{
796b6678 721 wide_int result = wide_int::create (precision);
722 unsigned int i, s;
723 unsigned int len = BLOCKS_NEEDED (precision);
724 unsigned int xlen = get_len ();
725 const HOST_WIDE_INT *xval = get_val ();
726 HOST_WIDE_INT *val = result.write_val ();
e913b5cd 727
728 /* This is not a well defined operation if the precision is not a
729 multiple of 8. */
730 gcc_assert ((precision & 0x7) == 0);
731
e913b5cd 732 for (i = 0; i < len; i++)
796b6678 733 val[i] = 0;
e913b5cd 734
735 /* Only swap the bytes that are not the padding. */
796b6678 736 for (s = 0; s < precision; s += 8)
e913b5cd 737 {
738 unsigned int d = precision - s - 8;
739 unsigned HOST_WIDE_INT byte;
740
796b6678 741 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
742 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 743
796b6678 744 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
e913b5cd 745
746 block = d / HOST_BITS_PER_WIDE_INT;
747 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
748
796b6678 749 val[block] |= byte << offset;
e913b5cd 750 }
751
796b6678 752 result.set_len (canonize (val, len, precision));
e913b5cd 753 return result;
754}
755
796b6678 756/* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
757 above that up to PREC are zeros. The result is inverted if NEGATE
758 is true. Return the number of blocks in VAL. */
759unsigned int
760wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
761 unsigned int prec)
e913b5cd 762{
9592fe41 763 if (width >= prec)
e913b5cd 764 {
796b6678 765 val[0] = negate ? 0 : -1;
766 return 1;
e913b5cd 767 }
768 else if (width == 0)
769 {
796b6678 770 val[0] = negate ? -1 : 0;
771 return 1;
e913b5cd 772 }
e913b5cd 773
796b6678 774 unsigned int i = 0;
775 while (i < width / HOST_BITS_PER_WIDE_INT)
776 val[i++] = negate ? 0 : -1;
e913b5cd 777
796b6678 778 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
779 if (shift != 0)
780 {
edc19fd0 781 HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
796b6678 782 val[i++] = negate ? ~last : last;
e913b5cd 783 }
796b6678 784 else
785 val[i++] = negate ? -1 : 0;
e913b5cd 786
796b6678 787 return i;
e913b5cd 788}
789
796b6678 790/* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
791 bits are ones, and the bits above that up to PREC are zeros. The result
792 is inverted if NEGATE is true. Return the number of blocks in VAL. */
793unsigned int
794wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
795 bool negate, unsigned int prec)
e913b5cd 796{
2cd0cb08 797 if (start >= prec || width == 0)
e913b5cd 798 {
796b6678 799 val[0] = negate ? -1 : 0;
800 return 1;
e913b5cd 801 }
802
2cd0cb08 803 if (width > prec - start)
804 width = prec - start;
805 unsigned int end = start + width;
806
796b6678 807 unsigned int i = 0;
e913b5cd 808 while (i < start / HOST_BITS_PER_WIDE_INT)
796b6678 809 val[i++] = negate ? -1 : 0;
e913b5cd 810
796b6678 811 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
e913b5cd 812 if (shift)
813 {
edc19fd0 814 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
b309c394 815 shift += width;
816 if (shift < HOST_BITS_PER_WIDE_INT)
e913b5cd 817 {
818 /* case 000111000 */
edc19fd0 819 block = (HOST_WIDE_INT_1U << shift) - block - 1;
796b6678 820 val[i++] = negate ? ~block : block;
821 return i;
e913b5cd 822 }
823 else
824 /* ...111000 */
796b6678 825 val[i++] = negate ? block : ~block;
e913b5cd 826 }
827
828 while (i < end / HOST_BITS_PER_WIDE_INT)
829 /* 1111111 */
796b6678 830 val[i++] = negate ? 0 : -1;
e913b5cd 831
832 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
833 if (shift != 0)
834 {
835 /* 000011111 */
edc19fd0 836 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
796b6678 837 val[i++] = negate ? ~block : block;
e913b5cd 838 }
839 else if (end < prec)
796b6678 840 val[i++] = negate ? -1 : 0;
e913b5cd 841
796b6678 842 return i;
e913b5cd 843}
844
e913b5cd 845/*
846 * logical operations.
847 */
848
796b6678 849/* Set VAL to OP0 & OP1. Return the number of blocks used. */
850unsigned int
851wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
852 unsigned int op0len, const HOST_WIDE_INT *op1,
853 unsigned int op1len, unsigned int prec)
e913b5cd 854{
e913b5cd 855 int l0 = op0len - 1;
856 int l1 = op1len - 1;
857 bool need_canon = true;
858
796b6678 859 unsigned int len = MAX (op0len, op1len);
e913b5cd 860 if (l0 > l1)
861 {
5b2cae25 862 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
e4712d1e 863 if (op1mask == 0)
e913b5cd 864 {
865 l0 = l1;
796b6678 866 len = l1 + 1;
e913b5cd 867 }
868 else
869 {
870 need_canon = false;
871 while (l0 > l1)
872 {
796b6678 873 val[l0] = op0[l0];
e913b5cd 874 l0--;
875 }
876 }
877 }
878 else if (l1 > l0)
879 {
5b2cae25 880 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
e913b5cd 881 if (op0mask == 0)
796b6678 882 len = l0 + 1;
e913b5cd 883 else
884 {
885 need_canon = false;
886 while (l1 > l0)
887 {
796b6678 888 val[l1] = op1[l1];
e913b5cd 889 l1--;
890 }
891 }
892 }
893
894 while (l0 >= 0)
895 {
796b6678 896 val[l0] = op0[l0] & op1[l0];
e913b5cd 897 l0--;
898 }
899
900 if (need_canon)
796b6678 901 len = canonize (val, len, prec);
e913b5cd 902
796b6678 903 return len;
e913b5cd 904}
905
796b6678 906/* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
907unsigned int
908wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
909 unsigned int op0len, const HOST_WIDE_INT *op1,
910 unsigned int op1len, unsigned int prec)
e913b5cd 911{
912 wide_int result;
913 int l0 = op0len - 1;
914 int l1 = op1len - 1;
915 bool need_canon = true;
916
796b6678 917 unsigned int len = MAX (op0len, op1len);
e913b5cd 918 if (l0 > l1)
919 {
5b2cae25 920 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
e913b5cd 921 if (op1mask != 0)
922 {
923 l0 = l1;
796b6678 924 len = l1 + 1;
e913b5cd 925 }
926 else
927 {
928 need_canon = false;
929 while (l0 > l1)
930 {
796b6678 931 val[l0] = op0[l0];
e913b5cd 932 l0--;
933 }
934 }
935 }
936 else if (l1 > l0)
937 {
5b2cae25 938 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
e913b5cd 939 if (op0mask == 0)
796b6678 940 len = l0 + 1;
e913b5cd 941 else
942 {
943 need_canon = false;
944 while (l1 > l0)
945 {
796b6678 946 val[l1] = ~op1[l1];
e913b5cd 947 l1--;
948 }
949 }
950 }
951
952 while (l0 >= 0)
953 {
796b6678 954 val[l0] = op0[l0] & ~op1[l0];
e913b5cd 955 l0--;
956 }
957
958 if (need_canon)
796b6678 959 len = canonize (val, len, prec);
e913b5cd 960
796b6678 961 return len;
e913b5cd 962}
963
796b6678 964/* Set VAL to OP0 | OP1. Return the number of blocks used. */
965unsigned int
966wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
967 unsigned int op0len, const HOST_WIDE_INT *op1,
968 unsigned int op1len, unsigned int prec)
e913b5cd 969{
970 wide_int result;
971 int l0 = op0len - 1;
972 int l1 = op1len - 1;
973 bool need_canon = true;
974
796b6678 975 unsigned int len = MAX (op0len, op1len);
e913b5cd 976 if (l0 > l1)
977 {
5b2cae25 978 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
e913b5cd 979 if (op1mask != 0)
980 {
981 l0 = l1;
796b6678 982 len = l1 + 1;
e913b5cd 983 }
984 else
985 {
986 need_canon = false;
987 while (l0 > l1)
988 {
796b6678 989 val[l0] = op0[l0];
e913b5cd 990 l0--;
991 }
992 }
993 }
994 else if (l1 > l0)
995 {
5b2cae25 996 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
e913b5cd 997 if (op0mask != 0)
796b6678 998 len = l0 + 1;
e913b5cd 999 else
1000 {
1001 need_canon = false;
1002 while (l1 > l0)
1003 {
796b6678 1004 val[l1] = op1[l1];
e913b5cd 1005 l1--;
1006 }
1007 }
1008 }
1009
1010 while (l0 >= 0)
1011 {
796b6678 1012 val[l0] = op0[l0] | op1[l0];
e913b5cd 1013 l0--;
1014 }
1015
1016 if (need_canon)
796b6678 1017 len = canonize (val, len, prec);
e913b5cd 1018
796b6678 1019 return len;
e913b5cd 1020}
1021
796b6678 1022/* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1023unsigned int
1024wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1025 unsigned int op0len, const HOST_WIDE_INT *op1,
1026 unsigned int op1len, unsigned int prec)
e913b5cd 1027{
1028 wide_int result;
1029 int l0 = op0len - 1;
1030 int l1 = op1len - 1;
1031 bool need_canon = true;
1032
796b6678 1033 unsigned int len = MAX (op0len, op1len);
e913b5cd 1034 if (l0 > l1)
1035 {
5b2cae25 1036 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
e913b5cd 1037 if (op1mask == 0)
1038 {
1039 l0 = l1;
796b6678 1040 len = l1 + 1;
e913b5cd 1041 }
1042 else
1043 {
1044 need_canon = false;
1045 while (l0 > l1)
1046 {
796b6678 1047 val[l0] = op0[l0];
e913b5cd 1048 l0--;
1049 }
1050 }
1051 }
1052 else if (l1 > l0)
1053 {
5b2cae25 1054 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
e913b5cd 1055 if (op0mask != 0)
796b6678 1056 len = l0 + 1;
e913b5cd 1057 else
1058 {
1059 need_canon = false;
1060 while (l1 > l0)
1061 {
796b6678 1062 val[l1] = ~op1[l1];
e913b5cd 1063 l1--;
1064 }
1065 }
1066 }
1067
1068 while (l0 >= 0)
1069 {
796b6678 1070 val[l0] = op0[l0] | ~op1[l0];
e913b5cd 1071 l0--;
1072 }
1073
1074 if (need_canon)
796b6678 1075 len = canonize (val, len, prec);
e913b5cd 1076
796b6678 1077 return len;
e913b5cd 1078}
1079
796b6678 1080/* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1081unsigned int
1082wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1083 unsigned int op0len, const HOST_WIDE_INT *op1,
1084 unsigned int op1len, unsigned int prec)
e913b5cd 1085{
1086 wide_int result;
1087 int l0 = op0len - 1;
1088 int l1 = op1len - 1;
1089
796b6678 1090 unsigned int len = MAX (op0len, op1len);
e913b5cd 1091 if (l0 > l1)
1092 {
5b2cae25 1093 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
e913b5cd 1094 while (l0 > l1)
1095 {
796b6678 1096 val[l0] = op0[l0] ^ op1mask;
e913b5cd 1097 l0--;
1098 }
1099 }
1100
1101 if (l1 > l0)
1102 {
5b2cae25 1103 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
e913b5cd 1104 while (l1 > l0)
1105 {
796b6678 1106 val[l1] = op0mask ^ op1[l1];
e913b5cd 1107 l1--;
1108 }
1109 }
1110
1111 while (l0 >= 0)
1112 {
796b6678 1113 val[l0] = op0[l0] ^ op1[l0];
e913b5cd 1114 l0--;
1115 }
1116
796b6678 1117 return canonize (val, len, prec);
e913b5cd 1118}
1119
1120/*
1121 * math
1122 */
1123
796b6678 1124/* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1125 whether the result overflows when OP0 and OP1 are treated as having
1126 signedness SGN. Return the number of blocks in VAL. */
1127unsigned int
1128wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1129 unsigned int op0len, const HOST_WIDE_INT *op1,
1130 unsigned int op1len, unsigned int prec,
30b5769f 1131 signop sgn, wi::overflow_type *overflow)
e913b5cd 1132{
e913b5cd 1133 unsigned HOST_WIDE_INT o0 = 0;
1134 unsigned HOST_WIDE_INT o1 = 0;
1135 unsigned HOST_WIDE_INT x = 0;
1136 unsigned HOST_WIDE_INT carry = 0;
1137 unsigned HOST_WIDE_INT old_carry = 0;
1138 unsigned HOST_WIDE_INT mask0, mask1;
5b2cae25 1139 unsigned int i;
e913b5cd 1140
796b6678 1141 unsigned int len = MAX (op0len, op1len);
5b2cae25 1142 mask0 = -top_bit_of (op0, op0len, prec);
1143 mask1 = -top_bit_of (op1, op1len, prec);
e913b5cd 1144 /* Add all of the explicitly defined elements. */
1145
796b6678 1146 for (i = 0; i < len; i++)
e913b5cd 1147 {
5b2cae25 1148 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1149 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
e913b5cd 1150 x = o0 + o1 + carry;
796b6678 1151 val[i] = x;
e913b5cd 1152 old_carry = carry;
1153 carry = carry == 0 ? x < o0 : x <= o0;
1154 }
1155
796b6678 1156 if (len * HOST_BITS_PER_WIDE_INT < prec)
e913b5cd 1157 {
796b6678 1158 val[len] = mask0 + mask1 + carry;
1159 len++;
e913b5cd 1160 if (overflow)
30b5769f 1161 *overflow
1162 = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
e913b5cd 1163 }
1164 else if (overflow)
1165 {
5b2cae25 1166 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
e913b5cd 1167 if (sgn == SIGNED)
1168 {
5b2cae25 1169 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
30b5769f 1170 if ((HOST_WIDE_INT) (x << shift) < 0)
1171 {
1172 if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1173 *overflow = wi::OVF_UNDERFLOW;
1174 else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1175 *overflow = wi::OVF_OVERFLOW;
1176 else
1177 *overflow = wi::OVF_NONE;
1178 }
1179 else
1180 *overflow = wi::OVF_NONE;
e913b5cd 1181 }
1182 else
1183 {
5b2cae25 1184 /* Put the MSB of X and O0 and in the top of the HWI. */
1185 x <<= shift;
1186 o0 <<= shift;
e913b5cd 1187 if (old_carry)
30b5769f 1188 *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
e913b5cd 1189 else
30b5769f 1190 *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
e913b5cd 1191 }
1192 }
1193
796b6678 1194 return canonize (val, len, prec);
e913b5cd 1195}
1196
e913b5cd 1197/* Subroutines of the multiplication and division operations. Unpack
1198 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1199 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1200 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1201static void
74eae799 1202wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
796b6678 1203 unsigned int in_len, unsigned int out_len,
1204 unsigned int prec, signop sgn)
e913b5cd 1205{
796b6678 1206 unsigned int i;
1207 unsigned int j = 0;
1208 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1209 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
e913b5cd 1210 HOST_WIDE_INT mask;
1211
1212 if (sgn == SIGNED)
1213 {
5b2cae25 1214 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
e913b5cd 1215 mask &= HALF_INT_MASK;
1216 }
1217 else
1218 mask = 0;
1219
74eae799 1220 for (i = 0; i < blocks_needed - 1; i++)
e913b5cd 1221 {
74eae799 1222 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
e913b5cd 1223 result[j++] = x;
1224 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1225 }
1226
74eae799 1227 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1228 if (small_prec)
1229 {
1230 if (sgn == SIGNED)
1231 x = sext_hwi (x, small_prec);
1232 else
1233 x = zext_hwi (x, small_prec);
1234 }
1235 result[j++] = x;
1236 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1237
e913b5cd 1238 /* Smear the sign bit. */
1239 while (j < out_len)
1240 result[j++] = mask;
1241}
1242
ef7be7f8 1243/* The inverse of wi_unpack. IN_LEN is the number of input
1244 blocks and PRECISION is the precision of the result. Return the
1245 number of blocks in the canonicalized result. */
1246static unsigned int
1247wi_pack (HOST_WIDE_INT *result,
e913b5cd 1248 const unsigned HOST_HALF_WIDE_INT *input,
ef7be7f8 1249 unsigned int in_len, unsigned int precision)
e913b5cd 1250{
796b6678 1251 unsigned int i = 0;
1252 unsigned int j = 0;
ef7be7f8 1253 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
e913b5cd 1254
ef7be7f8 1255 while (i + 1 < in_len)
e913b5cd 1256 {
ef7be7f8 1257 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1258 | ((unsigned HOST_WIDE_INT) input[i + 1]
1259 << HOST_BITS_PER_HALF_WIDE_INT));
796b6678 1260 i += 2;
e913b5cd 1261 }
e913b5cd 1262
796b6678 1263 /* Handle the case where in_len is odd. For this we zero extend. */
1264 if (in_len & 1)
ef7be7f8 1265 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1266 else if (j < blocks_needed)
1267 result[j++] = 0;
1268 return canonize (result, j, precision);
e913b5cd 1269}
1270
1271/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
77d15f37 1272 result is returned.
1273
1274 If HIGH is not set, throw away the upper half after the check is
1275 made to see if it overflows. Unfortunately there is no better way
1276 to check for overflow than to do this. If OVERFLOW is nonnull,
1277 record in *OVERFLOW whether the result overflowed. SGN controls
30b5769f 1278 the signedness and is used to check overflow or if HIGH is set.
1279
1280 NOTE: Overflow type for signed overflow is not yet implemented. */
796b6678 1281unsigned int
25b6c9eb 1282wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1283 unsigned int op1len, const HOST_WIDE_INT *op2val,
796b6678 1284 unsigned int op2len, unsigned int prec, signop sgn,
30b5769f 1285 wi::overflow_type *overflow, bool high)
e913b5cd 1286{
e913b5cd 1287 unsigned HOST_WIDE_INT o0, o1, k, t;
1288 unsigned int i;
1289 unsigned int j;
1290 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1291 unsigned int half_blocks_needed = blocks_needed * 2;
1292 /* The sizes here are scaled to support a 2x largest mode by 2x
1293 largest mode yielding a 4x largest mode result. This is what is
1294 needed by vpn. */
1295
1296 unsigned HOST_HALF_WIDE_INT
242947db 1297 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
e913b5cd 1298 unsigned HOST_HALF_WIDE_INT
242947db 1299 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
e913b5cd 1300 /* The '2' in 'R' is because we are internally doing a full
1301 multiply. */
1302 unsigned HOST_HALF_WIDE_INT
242947db 1303 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
e913b5cd 1304 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1305
1306 /* If the top level routine did not really pass in an overflow, then
1307 just make sure that we never attempt to set it. */
796b6678 1308 bool needs_overflow = (overflow != 0);
1309 if (needs_overflow)
30b5769f 1310 *overflow = wi::OVF_NONE;
e913b5cd 1311
25b6c9eb 1312 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1313 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1314
003e7b67 1315 /* This is a surprisingly common case, so do it first. */
25b6c9eb 1316 if (op1 == 0 || op2 == 0)
003e7b67 1317 {
1318 val[0] = 0;
1319 return 1;
1320 }
1321
25b6c9eb 1322#ifdef umul_ppmm
1323 if (sgn == UNSIGNED)
1324 {
1325 /* If the inputs are single HWIs and the output has room for at
1326 least two HWIs, we can use umul_ppmm directly. */
1327 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1328 && wi::fits_uhwi_p (op1)
1329 && wi::fits_uhwi_p (op2))
1330 {
ea796d8b 1331 /* This case never overflows. */
1332 if (high)
1333 {
1334 val[0] = 0;
1335 return 1;
1336 }
25b6c9eb 1337 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
da002466 1338 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1339 {
1340 val[2] = 0;
1341 return 3;
1342 }
25b6c9eb 1343 return 1 + (val[1] != 0 || val[0] < 0);
1344 }
1345 /* Likewise if the output is a full single HWI, except that the
1346 upper HWI of the result is only used for determining overflow.
1347 (We handle this case inline when overflow isn't needed.) */
1348 else if (prec == HOST_BITS_PER_WIDE_INT)
1349 {
1350 unsigned HOST_WIDE_INT upper;
1351 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1352 if (needs_overflow)
30b5769f 1353 /* Unsigned overflow can only be +OVERFLOW. */
1354 *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
ea796d8b 1355 if (high)
1356 val[0] = upper;
25b6c9eb 1357 return 1;
1358 }
1359 }
1360#endif
1361
530b746c 1362 /* Handle multiplications by 1. */
25b6c9eb 1363 if (op1 == 1)
530b746c 1364 {
ea796d8b 1365 if (high)
1366 {
1367 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1368 return 1;
1369 }
530b746c 1370 for (i = 0; i < op2len; i++)
25b6c9eb 1371 val[i] = op2val[i];
530b746c 1372 return op2len;
1373 }
25b6c9eb 1374 if (op2 == 1)
530b746c 1375 {
ea796d8b 1376 if (high)
1377 {
1378 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1379 return 1;
1380 }
530b746c 1381 for (i = 0; i < op1len; i++)
25b6c9eb 1382 val[i] = op1val[i];
530b746c 1383 return op1len;
1384 }
1385
e913b5cd 1386 /* If we need to check for overflow, we can only do half wide
1387 multiplies quickly because we need to look at the top bits to
1388 check for the overflow. */
77d15f37 1389 if ((high || needs_overflow)
e913b5cd 1390 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1391 {
5b2cae25 1392 unsigned HOST_WIDE_INT r;
e913b5cd 1393
1394 if (sgn == SIGNED)
1395 {
25b6c9eb 1396 o0 = op1.to_shwi ();
1397 o1 = op2.to_shwi ();
e913b5cd 1398 }
1399 else
1400 {
25b6c9eb 1401 o0 = op1.to_uhwi ();
1402 o1 = op2.to_uhwi ();
e913b5cd 1403 }
1404
1405 r = o0 * o1;
1406 if (needs_overflow)
1407 {
e913b5cd 1408 if (sgn == SIGNED)
1409 {
e4712d1e 1410 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
30b5769f 1411 /* FIXME: Signed overflow type is not implemented yet. */
1412 *overflow = OVF_UNKNOWN;
e913b5cd 1413 }
1414 else
5b2cae25 1415 {
1416 if ((r >> prec) != 0)
30b5769f 1417 /* Unsigned overflow can only be +OVERFLOW. */
1418 *overflow = OVF_OVERFLOW;
5b2cae25 1419 }
e913b5cd 1420 }
5b2cae25 1421 val[0] = high ? r >> prec : r;
796b6678 1422 return 1;
e913b5cd 1423 }
1424
1425 /* We do unsigned mul and then correct it. */
74eae799 1426 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1427 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
e913b5cd 1428
1429 /* The 2 is for a full mult. */
1430 memset (r, 0, half_blocks_needed * 2
5a2e4914 1431 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
e913b5cd 1432
1433 for (j = 0; j < half_blocks_needed; j++)
1434 {
1435 k = 0;
1436 for (i = 0; i < half_blocks_needed; i++)
1437 {
1438 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1439 + r[i + j] + k);
1440 r[i + j] = t & HALF_INT_MASK;
1441 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1442 }
1443 r[j + half_blocks_needed] = k;
1444 }
1445
1446 /* We did unsigned math above. For signed we must adjust the
1447 product (assuming we need to see that). */
77d15f37 1448 if (sgn == SIGNED && (high || needs_overflow))
e913b5cd 1449 {
1450 unsigned HOST_WIDE_INT b;
25b6c9eb 1451 if (wi::neg_p (op1))
e913b5cd 1452 {
1453 b = 0;
1454 for (i = 0; i < half_blocks_needed; i++)
1455 {
1456 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1457 - (unsigned HOST_WIDE_INT)v[i] - b;
1458 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1459 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1460 }
1461 }
25b6c9eb 1462 if (wi::neg_p (op2))
e913b5cd 1463 {
1464 b = 0;
1465 for (i = 0; i < half_blocks_needed; i++)
1466 {
1467 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1468 - (unsigned HOST_WIDE_INT)u[i] - b;
1469 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1470 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1471 }
1472 }
1473 }
1474
1475 if (needs_overflow)
1476 {
1477 HOST_WIDE_INT top;
1478
1479 /* For unsigned, overflow is true if any of the top bits are set.
1480 For signed, overflow is true if any of the top bits are not equal
1481 to the sign bit. */
1482 if (sgn == UNSIGNED)
1483 top = 0;
1484 else
1485 {
1486 top = r[(half_blocks_needed) - 1];
1487 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1488 top &= mask;
1489 }
1490
1491 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1492 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
30b5769f 1493 /* FIXME: Signed overflow type is not implemented yet. */
1494 *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
e913b5cd 1495 }
1496
ef7be7f8 1497 int r_offset = high ? half_blocks_needed : 0;
1498 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
e913b5cd 1499}
1500
796b6678 1501/* Compute the population count of X. */
1502int
1503wi::popcount (const wide_int_ref &x)
e913b5cd 1504{
796b6678 1505 unsigned int i;
e913b5cd 1506 int count;
e913b5cd 1507
e913b5cd 1508 /* The high order block is special if it is the last block and the
1509 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1510 have to clear out any ones above the precision before doing
1511 popcount on this block. */
796b6678 1512 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1513 unsigned int stop = x.len;
1514 if (count < 0)
e913b5cd 1515 {
796b6678 1516 count = popcount_hwi (x.uhigh () << -count);
1517 stop -= 1;
e913b5cd 1518 }
1519 else
1520 {
796b6678 1521 if (x.sign_mask () >= 0)
1522 count = 0;
e913b5cd 1523 }
1524
796b6678 1525 for (i = 0; i < stop; ++i)
1526 count += popcount_hwi (x.val[i]);
e913b5cd 1527
796b6678 1528 return count;
e913b5cd 1529}
1530
796b6678 1531/* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1532 whether the result overflows when OP0 and OP1 are treated as having
1533 signedness SGN. Return the number of blocks in VAL. */
1534unsigned int
1535wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1536 unsigned int op0len, const HOST_WIDE_INT *op1,
1537 unsigned int op1len, unsigned int prec,
30b5769f 1538 signop sgn, wi::overflow_type *overflow)
e913b5cd 1539{
e913b5cd 1540 unsigned HOST_WIDE_INT o0 = 0;
1541 unsigned HOST_WIDE_INT o1 = 0;
1542 unsigned HOST_WIDE_INT x = 0;
1543 /* We implement subtraction as an in place negate and add. Negation
1544 is just inversion and add 1, so we can do the add of 1 by just
1545 starting the borrow in of the first element at 1. */
1546 unsigned HOST_WIDE_INT borrow = 0;
1547 unsigned HOST_WIDE_INT old_borrow = 0;
1548
1549 unsigned HOST_WIDE_INT mask0, mask1;
5b2cae25 1550 unsigned int i;
e913b5cd 1551
796b6678 1552 unsigned int len = MAX (op0len, op1len);
5b2cae25 1553 mask0 = -top_bit_of (op0, op0len, prec);
1554 mask1 = -top_bit_of (op1, op1len, prec);
e913b5cd 1555
1556 /* Subtract all of the explicitly defined elements. */
796b6678 1557 for (i = 0; i < len; i++)
e913b5cd 1558 {
1559 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1560 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1561 x = o0 - o1 - borrow;
796b6678 1562 val[i] = x;
e913b5cd 1563 old_borrow = borrow;
1564 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1565 }
1566
796b6678 1567 if (len * HOST_BITS_PER_WIDE_INT < prec)
e913b5cd 1568 {
796b6678 1569 val[len] = mask0 - mask1 - borrow;
1570 len++;
e913b5cd 1571 if (overflow)
30b5769f 1572 *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
e913b5cd 1573 }
1574 else if (overflow)
1575 {
5b2cae25 1576 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
e913b5cd 1577 if (sgn == SIGNED)
1578 {
5b2cae25 1579 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
30b5769f 1580 if ((HOST_WIDE_INT) (x << shift) < 0)
1581 {
1582 if (o0 > o1)
1583 *overflow = OVF_UNDERFLOW;
1584 else if (o0 < o1)
1585 *overflow = OVF_OVERFLOW;
1586 else
1587 *overflow = OVF_NONE;
1588 }
1589 else
1590 *overflow = OVF_NONE;
e913b5cd 1591 }
1592 else
1593 {
5b2cae25 1594 /* Put the MSB of X and O0 and in the top of the HWI. */
1595 x <<= shift;
1596 o0 <<= shift;
e913b5cd 1597 if (old_borrow)
30b5769f 1598 *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
e913b5cd 1599 else
30b5769f 1600 *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
e913b5cd 1601 }
1602 }
1603
796b6678 1604 return canonize (val, len, prec);
e913b5cd 1605}
1606
1607
1608/*
1609 * Division and Mod
1610 */
1611
1612/* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1613 algorithm is a small modification of the algorithm in Hacker's
1614 Delight by Warren, which itself is a small modification of Knuth's
1615 algorithm. M is the number of significant elements of U however
1616 there needs to be at least one extra element of B_DIVIDEND
1617 allocated, N is the number of elements of B_DIVISOR. */
796b6678 1618static void
1619divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1620 unsigned HOST_HALF_WIDE_INT *b_remainder,
1621 unsigned HOST_HALF_WIDE_INT *b_dividend,
1622 unsigned HOST_HALF_WIDE_INT *b_divisor,
74eae799 1623 int m, int n)
e913b5cd 1624{
1625 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1626 HOST_WIDE_INT and stored in the lower bits of each word. This
1627 algorithm should work properly on both 32 and 64 bit
1628 machines. */
1629 unsigned HOST_WIDE_INT b
1630 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1631 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1632 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1633 unsigned HOST_WIDE_INT p; /* Product of two digits. */
74eae799 1634 HOST_WIDE_INT t, k;
1635 int i, j, s;
e913b5cd 1636
1637 /* Single digit divisor. */
1638 if (n == 1)
1639 {
1640 k = 0;
1641 for (j = m - 1; j >= 0; j--)
1642 {
1643 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1644 k = ((k * b + b_dividend[j])
1645 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1646 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1647 }
1648 b_remainder[0] = k;
1649 return;
1650 }
1651
1652 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1653
1654 if (s)
1655 {
1656 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1657 algorithm, we can overwrite b_dividend and b_divisor, so we do
1658 that. */
1659 for (i = n - 1; i > 0; i--)
1660 b_divisor[i] = (b_divisor[i] << s)
1661 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1662 b_divisor[0] = b_divisor[0] << s;
0ebd4fb5 1663
e913b5cd 1664 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1665 for (i = m - 1; i > 0; i--)
1666 b_dividend[i] = (b_dividend[i] << s)
1667 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1668 b_dividend[0] = b_dividend[0] << s;
1669 }
1670
1671 /* Main loop. */
1672 for (j = m - n; j >= 0; j--)
1673 {
1674 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1675 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1676 again:
1677 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1678 {
1679 qhat -= 1;
1680 rhat += b_divisor[n-1];
1681 if (rhat < b)
1682 goto again;
1683 }
1684
1685 /* Multiply and subtract. */
1686 k = 0;
1687 for (i = 0; i < n; i++)
1688 {
1689 p = qhat * b_divisor[i];
1690 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1691 b_dividend[i + j] = t;
1692 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1693 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1694 }
1695 t = b_dividend[j+n] - k;
1696 b_dividend[j+n] = t;
1697
1698 b_quotient[j] = qhat;
1699 if (t < 0)
1700 {
1701 b_quotient[j] -= 1;
1702 k = 0;
1703 for (i = 0; i < n; i++)
1704 {
1705 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1706 b_dividend[i+j] = t;
1707 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1708 }
1709 b_dividend[j+n] += k;
1710 }
1711 }
1712 if (s)
1713 for (i = 0; i < n; i++)
1714 b_remainder[i] = (b_dividend[i] >> s)
1715 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1716 else
1717 for (i = 0; i < n; i++)
1718 b_remainder[i] = b_dividend[i];
1719}
1720
1721
796b6678 1722/* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1723 the result. If QUOTIENT is nonnull, store the value of the quotient
1724 there and return the number of blocks in it. The return value is
1725 not defined otherwise. If REMAINDER is nonnull, store the value
1726 of the remainder there and store the number of blocks in
1727 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1728 the division overflowed. */
1729unsigned int
1730wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
d1314cdb 1731 HOST_WIDE_INT *remainder,
1732 const HOST_WIDE_INT *dividend_val,
796b6678 1733 unsigned int dividend_len, unsigned int dividend_prec,
d1314cdb 1734 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
796b6678 1735 unsigned int divisor_prec, signop sgn,
30b5769f 1736 wi::overflow_type *oflow)
796b6678 1737{
1738 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1739 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
e913b5cd 1740 unsigned HOST_HALF_WIDE_INT
1741 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1742 unsigned HOST_HALF_WIDE_INT
1743 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1744 unsigned HOST_HALF_WIDE_INT
1745 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1746 unsigned HOST_HALF_WIDE_INT
1747 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
796b6678 1748 unsigned int m, n;
e913b5cd 1749 bool dividend_neg = false;
1750 bool divisor_neg = false;
1751 bool overflow = false;
d1314cdb 1752 wide_int neg_dividend, neg_divisor;
e913b5cd 1753
d1314cdb 1754 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1755 dividend_prec);
1756 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1757 divisor_prec);
1758 if (divisor == 0)
e913b5cd 1759 overflow = true;
1760
d1314cdb 1761 /* The smallest signed number / -1 causes overflow. The dividend_len
1762 check is for speed rather than correctness. */
5b2cae25 1763 if (sgn == SIGNED
1764 && dividend_len == BLOCKS_NEEDED (dividend_prec)
d1314cdb 1765 && divisor == -1
1766 && wi::only_sign_bit_p (dividend))
1767 overflow = true;
e913b5cd 1768
3a343682 1769 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1770 (signed min / -1) has the same representation as the orignal dividend.
1771 We have traditionally made division by zero act as division by one,
1772 so there too we use the original dividend. */
e913b5cd 1773 if (overflow)
1774 {
796b6678 1775 if (remainder)
e913b5cd 1776 {
796b6678 1777 *remainder_len = 1;
1778 remainder[0] = 0;
e913b5cd 1779 }
30b5769f 1780 if (oflow)
1781 *oflow = OVF_OVERFLOW;
796b6678 1782 if (quotient)
3a343682 1783 for (unsigned int i = 0; i < dividend_len; ++i)
1784 quotient[i] = dividend_val[i];
1785 return dividend_len;
e913b5cd 1786 }
1787
796b6678 1788 if (oflow)
30b5769f 1789 *oflow = OVF_NONE;
796b6678 1790
e913b5cd 1791 /* Do it on the host if you can. */
d1314cdb 1792 if (sgn == SIGNED
1793 && wi::fits_shwi_p (dividend)
1794 && wi::fits_shwi_p (divisor))
e913b5cd 1795 {
d1314cdb 1796 HOST_WIDE_INT o0 = dividend.to_shwi ();
1797 HOST_WIDE_INT o1 = divisor.to_shwi ();
e913b5cd 1798
d1314cdb 1799 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1800 {
1801 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
796b6678 1802 if (quotient)
d1314cdb 1803 {
1804 quotient[0] = HOST_WIDE_INT_MIN;
1805 quotient[1] = 0;
1806 }
796b6678 1807 if (remainder)
1808 {
d1314cdb 1809 remainder[0] = 0;
796b6678 1810 *remainder_len = 1;
1811 }
d1314cdb 1812 return 2;
e913b5cd 1813 }
1814 else
1815 {
796b6678 1816 if (quotient)
5b2cae25 1817 quotient[0] = o0 / o1;
796b6678 1818 if (remainder)
1819 {
5b2cae25 1820 remainder[0] = o0 % o1;
796b6678 1821 *remainder_len = 1;
1822 }
d1314cdb 1823 return 1;
e913b5cd 1824 }
d1314cdb 1825 }
1826
1827 if (sgn == UNSIGNED
1828 && wi::fits_uhwi_p (dividend)
1829 && wi::fits_uhwi_p (divisor))
1830 {
1831 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1832 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
3b8b902b 1833 unsigned int quotient_len = 1;
e913b5cd 1834
d1314cdb 1835 if (quotient)
3b8b902b 1836 {
1837 quotient[0] = o0 / o1;
70bca70e 1838 quotient_len = canonize_uhwi (quotient, dividend_prec);
3b8b902b 1839 }
d1314cdb 1840 if (remainder)
1841 {
1842 remainder[0] = o0 % o1;
70bca70e 1843 *remainder_len = canonize_uhwi (remainder, dividend_prec);
d1314cdb 1844 }
3b8b902b 1845 return quotient_len;
e913b5cd 1846 }
1847
1848 /* Make the divisor and dividend positive and remember what we
1849 did. */
1850 if (sgn == SIGNED)
1851 {
d1314cdb 1852 if (wi::neg_p (dividend))
e913b5cd 1853 {
d1314cdb 1854 neg_dividend = -dividend;
1855 dividend = neg_dividend;
e913b5cd 1856 dividend_neg = true;
1857 }
d1314cdb 1858 if (wi::neg_p (divisor))
e913b5cd 1859 {
d1314cdb 1860 neg_divisor = -divisor;
1861 divisor = neg_divisor;
e913b5cd 1862 divisor_neg = true;
1863 }
1864 }
1865
74eae799 1866 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1867 dividend_blocks_needed, dividend_prec, sgn);
1868 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1869 divisor_blocks_needed, divisor_prec, sgn);
e913b5cd 1870
74eae799 1871 m = dividend_blocks_needed;
e80d050d 1872 b_dividend[m] = 0;
74eae799 1873 while (m > 1 && b_dividend[m - 1] == 0)
1874 m--;
e913b5cd 1875
74eae799 1876 n = divisor_blocks_needed;
1877 while (n > 1 && b_divisor[n - 1] == 0)
e913b5cd 1878 n--;
1879
1880 memset (b_quotient, 0, sizeof (b_quotient));
1881
1882 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1883
796b6678 1884 unsigned int quotient_len = 0;
1885 if (quotient)
e913b5cd 1886 {
ef7be7f8 1887 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
e913b5cd 1888 /* The quotient is neg if exactly one of the divisor or dividend is
1889 neg. */
1890 if (dividend_neg != divisor_neg)
796b6678 1891 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1892 quotient_len, dividend_prec,
1893 UNSIGNED, 0);
e913b5cd 1894 }
e913b5cd 1895
796b6678 1896 if (remainder)
e913b5cd 1897 {
ef7be7f8 1898 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
e913b5cd 1899 /* The remainder is always the same sign as the dividend. */
1900 if (dividend_neg)
796b6678 1901 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1902 *remainder_len, dividend_prec,
1903 UNSIGNED, 0);
e913b5cd 1904 }
e913b5cd 1905
796b6678 1906 return quotient_len;
e913b5cd 1907}
1908
796b6678 1909/*
1910 * Shifting, rotating and extraction.
1911 */
e913b5cd 1912
796b6678 1913/* Left shift XVAL by SHIFT and store the result in VAL. Return the
1914 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1915unsigned int
1916wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1917 unsigned int xlen, unsigned int precision,
1918 unsigned int shift)
1919{
1920 /* Split the shift into a whole-block shift and a subblock shift. */
1921 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1922 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1923
1924 /* The whole-block shift fills with zeros. */
1925 unsigned int len = BLOCKS_NEEDED (precision);
1926 for (unsigned int i = 0; i < skip; ++i)
1927 val[i] = 0;
1928
1929 /* It's easier to handle the simple block case specially. */
1930 if (small_shift == 0)
1931 for (unsigned int i = skip; i < len; ++i)
1932 val[i] = safe_uhwi (xval, xlen, i - skip);
1933 else
e913b5cd 1934 {
796b6678 1935 /* The first unfilled output block is a left shift of the first
1936 block in XVAL. The other output blocks contain bits from two
1937 consecutive input blocks. */
1938 unsigned HOST_WIDE_INT carry = 0;
1939 for (unsigned int i = skip; i < len; ++i)
1940 {
1941 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1942 val[i] = (x << small_shift) | carry;
1943 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1944 }
e913b5cd 1945 }
796b6678 1946 return canonize (val, len, precision);
e913b5cd 1947}
1948
796b6678 1949/* Right shift XVAL by SHIFT and store the result in VAL. Return the
1950 number of blocks in VAL. The input has XPRECISION bits and the
1951 output has XPRECISION - SHIFT bits. */
1952static unsigned int
1953rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1954 unsigned int xlen, unsigned int xprecision,
1955 unsigned int shift)
e913b5cd 1956{
796b6678 1957 /* Split the shift into a whole-block shift and a subblock shift. */
1958 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1959 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
e913b5cd 1960
796b6678 1961 /* Work out how many blocks are needed to store the significant bits
1962 (excluding the upper zeros or signs). */
1963 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
e913b5cd 1964
796b6678 1965 /* It's easier to handle the simple block case specially. */
1966 if (small_shift == 0)
1967 for (unsigned int i = 0; i < len; ++i)
1968 val[i] = safe_uhwi (xval, xlen, i + skip);
e913b5cd 1969 else
1970 {
796b6678 1971 /* Each output block but the last is a combination of two input blocks.
1972 The last block is a right shift of the last block in XVAL. */
1973 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1974 for (unsigned int i = 0; i < len; ++i)
1975 {
1976 val[i] = curr >> small_shift;
1977 curr = safe_uhwi (xval, xlen, i + skip + 1);
1978 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1979 }
e913b5cd 1980 }
796b6678 1981 return len;
1982}
e913b5cd 1983
796b6678 1984/* Logically right shift XVAL by SHIFT and store the result in VAL.
1985 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1986 VAL has PRECISION bits. */
1987unsigned int
1988wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1989 unsigned int xlen, unsigned int xprecision,
1990 unsigned int precision, unsigned int shift)
1991{
1992 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
e913b5cd 1993
796b6678 1994 /* The value we just created has precision XPRECISION - SHIFT.
1995 Zero-extend it to wider precisions. */
1996 if (precision > xprecision - shift)
1997 {
1998 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1999 if (small_prec)
2000 val[len - 1] = zext_hwi (val[len - 1], small_prec);
2001 else if (val[len - 1] < 0)
2002 {
2003 /* Add a new block with a zero. */
2004 val[len++] = 0;
2005 return len;
2006 }
e913b5cd 2007 }
796b6678 2008 return canonize (val, len, precision);
e913b5cd 2009}
2010
796b6678 2011/* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2012 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2013 VAL has PRECISION bits. */
2014unsigned int
2015wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2016 unsigned int xlen, unsigned int xprecision,
2017 unsigned int precision, unsigned int shift)
e913b5cd 2018{
796b6678 2019 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
e913b5cd 2020
796b6678 2021 /* The value we just created has precision XPRECISION - SHIFT.
2022 Sign-extend it to wider types. */
2023 if (precision > xprecision - shift)
e913b5cd 2024 {
796b6678 2025 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2026 if (small_prec)
2027 val[len - 1] = sext_hwi (val[len - 1], small_prec);
e913b5cd 2028 }
796b6678 2029 return canonize (val, len, precision);
2030}
e913b5cd 2031
796b6678 2032/* Return the number of leading (upper) zeros in X. */
2033int
2034wi::clz (const wide_int_ref &x)
2035{
2036 /* Calculate how many bits there above the highest represented block. */
2037 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2038
2039 unsigned HOST_WIDE_INT high = x.uhigh ();
2040 if (count < 0)
2041 /* The upper -COUNT bits of HIGH are not part of the value.
2042 Clear them out. */
2043 high = (high << -count) >> -count;
2044 else if (x.sign_mask () < 0)
2045 /* The upper bit is set, so there are no leading zeros. */
2046 return 0;
e913b5cd 2047
796b6678 2048 /* We don't need to look below HIGH. Either HIGH is nonzero,
2049 or the top bit of the block below is nonzero; clz_hwi is
2050 HOST_BITS_PER_WIDE_INT in the latter case. */
2051 return count + clz_hwi (high);
e913b5cd 2052}
2053
796b6678 2054/* Return the number of redundant sign bits in X. (That is, the number
2055 of bits immediately below the sign bit that have the same value as
2056 the sign bit.) */
2057int
2058wi::clrsb (const wide_int_ref &x)
e913b5cd 2059{
796b6678 2060 /* Calculate how many bits there above the highest represented block. */
2061 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
e913b5cd 2062
796b6678 2063 unsigned HOST_WIDE_INT high = x.uhigh ();
2064 unsigned HOST_WIDE_INT mask = -1;
2065 if (count < 0)
e913b5cd 2066 {
796b6678 2067 /* The upper -COUNT bits of HIGH are not part of the value.
2068 Clear them from both MASK and HIGH. */
2069 mask >>= -count;
2070 high &= mask;
e913b5cd 2071 }
2072
796b6678 2073 /* If the top bit is 1, count the number of leading 1s. If the top
2074 bit is zero, count the number of leading zeros. */
2075 if (high > mask / 2)
2076 high ^= mask;
e913b5cd 2077
796b6678 2078 /* There are no sign bits below the top block, so we don't need to look
2079 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2080 HIGH is 0. */
2081 return count + clz_hwi (high) - 1;
2082}
e913b5cd 2083
796b6678 2084/* Return the number of trailing (lower) zeros in X. */
2085int
2086wi::ctz (const wide_int_ref &x)
2087{
2088 if (x.len == 1 && x.ulow () == 0)
1cee90ad 2089 return x.precision;
e913b5cd 2090
796b6678 2091 /* Having dealt with the zero case, there must be a block with a
2092 nonzero bit. We don't care about the bits above the first 1. */
2093 unsigned int i = 0;
2094 while (x.val[i] == 0)
2095 ++i;
2096 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
e913b5cd 2097}
2098
796b6678 2099/* If X is an exact power of 2, return the base-2 logarithm, otherwise
2100 return -1. */
2101int
2102wi::exact_log2 (const wide_int_ref &x)
e913b5cd 2103{
796b6678 2104 /* Reject cases where there are implicit -1 blocks above HIGH. */
2105 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2106 return -1;
e913b5cd 2107
796b6678 2108 /* Set CRUX to the index of the entry that should be nonzero.
2109 If the top block is zero then the next lowest block (if any)
2110 must have the high bit set. */
2111 unsigned int crux = x.len - 1;
2112 if (crux > 0 && x.val[crux] == 0)
2113 crux -= 1;
2114
2115 /* Check that all lower blocks are zero. */
2116 for (unsigned int i = 0; i < crux; ++i)
2117 if (x.val[i] != 0)
2118 return -1;
2119
2120 /* Get a zero-extended form of block CRUX. */
2121 unsigned HOST_WIDE_INT hwi = x.val[crux];
e9880ac8 2122 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
796b6678 2123 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2124
2125 /* Now it's down to whether HWI is a power of 2. */
2126 int res = ::exact_log2 (hwi);
2127 if (res >= 0)
2128 res += crux * HOST_BITS_PER_WIDE_INT;
2129 return res;
2130}
e913b5cd 2131
796b6678 2132/* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2133int
2134wi::floor_log2 (const wide_int_ref &x)
2135{
2136 return x.precision - 1 - clz (x);
2137}
e913b5cd 2138
796b6678 2139/* Return the index of the first (lowest) set bit in X, counting from 1.
2140 Return 0 if X is 0. */
2141int
2142wi::ffs (const wide_int_ref &x)
2143{
2144 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2145}
e913b5cd 2146
796b6678 2147/* Return true if sign-extending X to have precision PRECISION would give
2148 the minimum signed value at that precision. */
2149bool
2150wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2151{
2152 return ctz (x) + 1 == int (precision);
2153}
e913b5cd 2154
796b6678 2155/* Return true if X represents the minimum signed value. */
2156bool
2157wi::only_sign_bit_p (const wide_int_ref &x)
2158{
2159 return only_sign_bit_p (x, x.precision);
e913b5cd 2160}
2161
a5c5f57e 2162/* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2163 down to the previous value that has no bits set outside MASK.
2164 This rounding wraps for signed values if VAL is negative and
2165 the top bit of MASK is clear.
2166
2167 For example, round_down_for_mask (6, 0xf1) would give 1 and
2168 round_down_for_mask (24, 0xf1) would give 17. */
2169
2170wide_int
2171wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2172{
2173 /* Get the bits in VAL that are outside the mask. */
2174 wide_int extra_bits = wi::bit_and_not (val, mask);
2175 if (extra_bits == 0)
2176 return val;
2177
2178 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2179 below that bit. */
2180 unsigned int precision = val.get_precision ();
2181 wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2182 false, precision);
2183
2184 /* Clear the bits that aren't in MASK, but ensure that all bits
2185 in MASK below the top cleared bit are set. */
2186 return (val & mask) | (mask & lower_mask);
2187}
2188
2189/* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2190 up to the next value that has no bits set outside MASK. The rounding
2191 wraps if there are no suitable values greater than VAL.
2192
2193 For example, round_up_for_mask (6, 0xf1) would give 16 and
2194 round_up_for_mask (24, 0xf1) would give 32. */
2195
2196wide_int
2197wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2198{
2199 /* Get the bits in VAL that are outside the mask. */
2200 wide_int extra_bits = wi::bit_and_not (val, mask);
2201 if (extra_bits == 0)
2202 return val;
2203
2204 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2205 unsigned int precision = val.get_precision ();
2206 wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2207 true, precision);
2208
2209 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2210 upper_mask &= mask;
2211
2212 /* Conceptually we need to:
2213
2214 - clear bits of VAL outside UPPER_MASK
2215 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2216 - propagate the carry through the bits of VAL in UPPER_MASK
2217
2218 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2219 reaches that bit and the process leaves all lower bits clear.
2220 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2221 wide_int tmp = wi::bit_and_not (upper_mask, val);
2222
2223 return (val | tmp) & -tmp;
2224}
2225
e913b5cd 2226/*
2227 * Private utilities.
2228 */
2229
5de9d3ed 2230void gt_ggc_mx (widest_int *) { }
2231void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2232void gt_pch_nx (widest_int *) { }
c2393a0b 2233
2234template void wide_int::dump () const;
2235template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2236template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2237template void offset_int::dump () const;
2238template void widest_int::dump () const;
99b4f3a2 2239
0d87e1c4 2240/* We could add all the above ::dump variants here, but wide_int and
2241 widest_int should handle the common cases. Besides, you can always
2242 call the dump method directly. */
2243
2244DEBUG_FUNCTION void
2245debug (const wide_int &ref)
2246{
2247 ref.dump ();
2248}
2249
2250DEBUG_FUNCTION void
2251debug (const wide_int *ptr)
2252{
2253 if (ptr)
2254 debug (*ptr);
2255 else
2256 fprintf (stderr, "<nil>\n");
2257}
2258
2259DEBUG_FUNCTION void
2260debug (const widest_int &ref)
2261{
2262 ref.dump ();
2263}
2264
2265DEBUG_FUNCTION void
2266debug (const widest_int *ptr)
2267{
2268 if (ptr)
2269 debug (*ptr);
2270 else
2271 fprintf (stderr, "<nil>\n");
2272}
99b4f3a2 2273
2274#if CHECKING_P
2275
2276namespace selftest {
2277
2278/* Selftests for wide ints. We run these multiple times, once per type. */
2279
2280/* Helper function for building a test value. */
2281
2282template <class VALUE_TYPE>
2283static VALUE_TYPE
2284from_int (int i);
2285
2286/* Specializations of the fixture for each wide-int type. */
2287
2288/* Specialization for VALUE_TYPE == wide_int. */
2289
2290template <>
2291wide_int
2292from_int (int i)
2293{
2294 return wi::shwi (i, 32);
2295}
2296
2297/* Specialization for VALUE_TYPE == offset_int. */
2298
2299template <>
2300offset_int
2301from_int (int i)
2302{
2303 return offset_int (i);
2304}
2305
2306/* Specialization for VALUE_TYPE == widest_int. */
2307
2308template <>
2309widest_int
2310from_int (int i)
2311{
2312 return widest_int (i);
2313}
2314
2315/* Verify that print_dec (WI, ..., SGN) gives the expected string
2316 representation (using base 10). */
2317
2318static void
2319assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2320{
2321 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2322 print_dec (wi, buf, sgn);
2323 ASSERT_STREQ (expected, buf);
2324}
2325
2326/* Likewise for base 16. */
2327
2328static void
2329assert_hexeq (const char *expected, const wide_int_ref &wi)
2330{
2331 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2332 print_hex (wi, buf);
2333 ASSERT_STREQ (expected, buf);
2334}
2335
2336/* Test cases. */
2337
2338/* Verify that print_dec and print_hex work for VALUE_TYPE. */
2339
2340template <class VALUE_TYPE>
2341static void
2342test_printing ()
2343{
2344 VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2345 assert_deceq ("42", a, SIGNED);
2346 assert_hexeq ("0x2a", a);
bf39e18e 2347 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2348 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2349 assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2350 if (WIDE_INT_MAX_PRECISION > 128)
2351 {
2352 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2353 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2354 assert_hexeq ("0x200000000000004000123456789abcdef",
2355 wi::lshift (1, 129) + wi::lshift (1, 74)
2356 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2357 }
99b4f3a2 2358}
2359
2360/* Verify that various operations work correctly for VALUE_TYPE,
2361 unary and binary, using both function syntax, and
2362 overloaded-operators. */
2363
2364template <class VALUE_TYPE>
2365static void
2366test_ops ()
2367{
2368 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2369 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2370
2371 /* Using functions. */
2372 assert_deceq ("-7", wi::neg (a), SIGNED);
2373 assert_deceq ("10", wi::add (a, b), SIGNED);
2374 assert_deceq ("4", wi::sub (a, b), SIGNED);
2375 assert_deceq ("-4", wi::sub (b, a), SIGNED);
2376 assert_deceq ("21", wi::mul (a, b), SIGNED);
2377
2378 /* Using operators. */
2379 assert_deceq ("-7", -a, SIGNED);
2380 assert_deceq ("10", a + b, SIGNED);
2381 assert_deceq ("4", a - b, SIGNED);
2382 assert_deceq ("-4", b - a, SIGNED);
2383 assert_deceq ("21", a * b, SIGNED);
2384}
2385
2386/* Verify that various comparisons work correctly for VALUE_TYPE. */
2387
2388template <class VALUE_TYPE>
2389static void
2390test_comparisons ()
2391{
2392 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2393 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2394
2395 /* == */
2396 ASSERT_TRUE (wi::eq_p (a, a));
2397 ASSERT_FALSE (wi::eq_p (a, b));
2398
2399 /* != */
2400 ASSERT_TRUE (wi::ne_p (a, b));
2401 ASSERT_FALSE (wi::ne_p (a, a));
2402
2403 /* < */
2404 ASSERT_FALSE (wi::lts_p (a, a));
2405 ASSERT_FALSE (wi::lts_p (a, b));
2406 ASSERT_TRUE (wi::lts_p (b, a));
2407
2408 /* <= */
2409 ASSERT_TRUE (wi::les_p (a, a));
2410 ASSERT_FALSE (wi::les_p (a, b));
2411 ASSERT_TRUE (wi::les_p (b, a));
2412
2413 /* > */
2414 ASSERT_FALSE (wi::gts_p (a, a));
2415 ASSERT_TRUE (wi::gts_p (a, b));
2416 ASSERT_FALSE (wi::gts_p (b, a));
2417
2418 /* >= */
2419 ASSERT_TRUE (wi::ges_p (a, a));
2420 ASSERT_TRUE (wi::ges_p (a, b));
2421 ASSERT_FALSE (wi::ges_p (b, a));
2422
2423 /* comparison */
2424 ASSERT_EQ (-1, wi::cmps (b, a));
2425 ASSERT_EQ (0, wi::cmps (a, a));
2426 ASSERT_EQ (1, wi::cmps (a, b));
2427}
2428
2429/* Run all of the selftests, using the given VALUE_TYPE. */
2430
2431template <class VALUE_TYPE>
2432static void run_all_wide_int_tests ()
2433{
2434 test_printing <VALUE_TYPE> ();
2435 test_ops <VALUE_TYPE> ();
2436 test_comparisons <VALUE_TYPE> ();
2437}
2438
2061c393 2439/* Test overflow conditions. */
2440
2441static void
2442test_overflow ()
2443{
2444 static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2445 static int offsets[] = { 16, 1, 0 };
2446 for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2447 for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2448 {
2449 int prec = precs[i];
2450 int offset = offsets[j];
30b5769f 2451 wi::overflow_type overflow;
2061c393 2452 wide_int sum, diff;
2453
2454 sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2455 UNSIGNED, &overflow);
2456 ASSERT_EQ (sum, -offset);
30b5769f 2457 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2061c393 2458
2459 sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2460 UNSIGNED, &overflow);
2461 ASSERT_EQ (sum, -offset);
30b5769f 2462 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2061c393 2463
2464 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2465 wi::max_value (prec, UNSIGNED),
2466 UNSIGNED, &overflow);
2467 ASSERT_EQ (diff, -offset);
30b5769f 2468 ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2061c393 2469
2470 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2471 wi::max_value (prec, UNSIGNED) - 1,
2472 UNSIGNED, &overflow);
2473 ASSERT_EQ (diff, 1 - offset);
30b5769f 2474 ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2061c393 2475 }
2476}
2477
a5c5f57e 2478/* Test the round_{down,up}_for_mask functions. */
2479
2480static void
2481test_round_for_mask ()
2482{
2483 unsigned int prec = 18;
2484 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2485 wi::shwi (0xf1, prec)));
2486 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2487 wi::shwi (0xf1, prec)));
2488
2489 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2490 wi::shwi (0xf1, prec)));
2491 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2492 wi::shwi (0xf1, prec)));
2493
2494 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2495 wi::shwi (0xf1, prec)));
2496 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2497 wi::shwi (0xf1, prec)));
2498
2499 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2500 wi::shwi (0x111, prec)));
2501 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2502 wi::shwi (0x111, prec)));
2503
2504 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2505 wi::shwi (0xfc, prec)));
2506 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2507 wi::shwi (0xfc, prec)));
2508
2509 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2510 wi::shwi (0xabc, prec)));
2511 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2512 wi::shwi (0xabc, prec)));
2513
2514 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2515 wi::shwi (0xabc, prec)));
2516 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2517 wi::shwi (0xabc, prec)));
2518
2519 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2520 wi::shwi (0xabc, prec)));
2521 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2522 wi::shwi (0xabc, prec)));
2523}
2524
99b4f3a2 2525/* Run all of the selftests within this file, for all value types. */
2526
2527void
2528wide_int_cc_tests ()
2529{
2061c393 2530 run_all_wide_int_tests <wide_int> ();
2531 run_all_wide_int_tests <offset_int> ();
2532 run_all_wide_int_tests <widest_int> ();
2533 test_overflow ();
a5c5f57e 2534 test_round_for_mask ();
99b4f3a2 2535}
2536
2537} // namespace selftest
2538#endif /* CHECKING_P */