]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/wide-int.cc
alias.c: Reorder #include statements and remove duplicates.
[thirdparty/gcc.git] / gcc / wide-int.cc
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "alias.h"
27 #include "dumpfile.h"
28
29
30 #define HOST_BITS_PER_HALF_WIDE_INT 32
31 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
32 # define HOST_HALF_WIDE_INT long
33 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
34 # define HOST_HALF_WIDE_INT int
35 #else
36 #error Please add support for HOST_HALF_WIDE_INT
37 #endif
38
39 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
40 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
41 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
42 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
43 typedef unsigned HOST_WIDE_INT UWtype;
44 typedef unsigned int UQItype __attribute__ ((mode (QI)));
45 typedef unsigned int USItype __attribute__ ((mode (SI)));
46 typedef unsigned int UDItype __attribute__ ((mode (DI)));
47 #if W_TYPE_SIZE == 32
48 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
49 #else
50 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
51 #endif
52 #include "longlong.h"
53 #endif
54
55 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
56
57 /*
58 * Internal utilities.
59 */
60
61 /* Quantities to deal with values that hold half of a wide int. Used
62 in multiply and divide. */
63 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64
65 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
66 #define BLOCKS_NEEDED(PREC) \
67 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
68 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
69
70 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
71 based on the top existing bit of VAL. */
72
73 static unsigned HOST_WIDE_INT
74 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
75 {
76 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
77 }
78
79 /* Convert the integer in VAL to canonical form, returning its new length.
80 LEN is the number of blocks currently in VAL and PRECISION is the number
81 of bits in the integer it represents.
82
83 This function only changes the representation, not the value. */
84 static unsigned int
85 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
86 {
87 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
88 HOST_WIDE_INT top;
89 int i;
90
91 if (len > blocks_needed)
92 len = blocks_needed;
93
94 if (len == 1)
95 return len;
96
97 top = val[len - 1];
98 if (len * HOST_BITS_PER_WIDE_INT > precision)
99 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
100 if (top != 0 && top != (HOST_WIDE_INT)-1)
101 return len;
102
103 /* At this point we know that the top is either 0 or -1. Find the
104 first block that is not a copy of this. */
105 for (i = len - 2; i >= 0; i--)
106 {
107 HOST_WIDE_INT x = val[i];
108 if (x != top)
109 {
110 if (SIGN_MASK (x) == top)
111 return i + 1;
112
113 /* We need an extra block because the top bit block i does
114 not match the extension. */
115 return i + 2;
116 }
117 }
118
119 /* The number is 0 or -1. */
120 return 1;
121 }
122
123 /*
124 * Conversion routines in and out of wide_int.
125 */
126
127 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
128 result for an integer with precision PRECISION. Return the length
129 of VAL (after any canonization. */
130 unsigned int
131 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
132 unsigned int xlen, unsigned int precision, bool need_canon)
133 {
134 for (unsigned i = 0; i < xlen; i++)
135 val[i] = xval[i];
136 return need_canon ? canonize (val, xlen, precision) : xlen;
137 }
138
139 /* Construct a wide int from a buffer of length LEN. BUFFER will be
140 read according to byte endianess and word endianess of the target.
141 Only the lower BUFFER_LEN bytes of the result are set; the remaining
142 high bytes are cleared. */
143 wide_int
144 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
145 {
146 unsigned int precision = buffer_len * BITS_PER_UNIT;
147 wide_int result = wide_int::create (precision);
148 unsigned int words = buffer_len / UNITS_PER_WORD;
149
150 /* We have to clear all the bits ourself, as we merely or in values
151 below. */
152 unsigned int len = BLOCKS_NEEDED (precision);
153 HOST_WIDE_INT *val = result.write_val ();
154 for (unsigned int i = 0; i < len; ++i)
155 val[i] = 0;
156
157 for (unsigned int byte = 0; byte < buffer_len; byte++)
158 {
159 unsigned int offset;
160 unsigned int index;
161 unsigned int bitpos = byte * BITS_PER_UNIT;
162 unsigned HOST_WIDE_INT value;
163
164 if (buffer_len > UNITS_PER_WORD)
165 {
166 unsigned int word = byte / UNITS_PER_WORD;
167
168 if (WORDS_BIG_ENDIAN)
169 word = (words - 1) - word;
170
171 offset = word * UNITS_PER_WORD;
172
173 if (BYTES_BIG_ENDIAN)
174 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
175 else
176 offset += byte % UNITS_PER_WORD;
177 }
178 else
179 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
180
181 value = (unsigned HOST_WIDE_INT) buffer[offset];
182
183 index = bitpos / HOST_BITS_PER_WIDE_INT;
184 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
185 }
186
187 result.set_len (canonize (val, len, precision));
188
189 return result;
190 }
191
192 /* Sets RESULT from X, the sign is taken according to SGN. */
193 void
194 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
195 {
196 int len = x.get_len ();
197 const HOST_WIDE_INT *v = x.get_val ();
198 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
199
200 if (wi::neg_p (x, sgn))
201 {
202 /* We use ones complement to avoid -x80..0 edge case that -
203 won't work on. */
204 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
205 for (int i = 0; i < len; i++)
206 t[i] = ~v[i];
207 if (excess > 0)
208 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
209 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
210 mpz_com (result, result);
211 }
212 else if (excess > 0)
213 {
214 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
215 for (int i = 0; i < len - 1; i++)
216 t[i] = v[i];
217 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
218 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
219 }
220 else
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
222 }
223
224 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
225 values of VAL will be wrapped; otherwise, they will be set to the
226 appropriate minimum or maximum TYPE bound. */
227 wide_int
228 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
229 {
230 size_t count, numb;
231 unsigned int prec = TYPE_PRECISION (type);
232 wide_int res = wide_int::create (prec);
233
234 if (!wrap)
235 {
236 mpz_t min, max;
237
238 mpz_init (min);
239 mpz_init (max);
240 get_type_static_bounds (type, min, max);
241
242 if (mpz_cmp (x, min) < 0)
243 mpz_set (x, min);
244 else if (mpz_cmp (x, max) > 0)
245 mpz_set (x, max);
246
247 mpz_clear (min);
248 mpz_clear (max);
249 }
250
251 /* Determine the number of unsigned HOST_WIDE_INTs that are required
252 for representing the absolute value. The code to calculate count is
253 extracted from the GMP manual, section "Integer Import and Export":
254 http://gmplib.org/manual/Integer-Import-and-Export.html */
255 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
256 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
257 HOST_WIDE_INT *val = res.write_val ();
258 /* Read the absolute value.
259
260 Write directly to the wide_int storage if possible, otherwise leave
261 GMP to allocate the memory for us. It might be slightly more efficient
262 to use mpz_tdiv_r_2exp for the latter case, but the situation is
263 pathological and it seems safer to operate on the original mpz value
264 in all cases. */
265 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
266 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
267 if (count < 1)
268 {
269 val[0] = 0;
270 count = 1;
271 }
272 count = MIN (count, BLOCKS_NEEDED (prec));
273 if (valres != val)
274 {
275 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
276 free (valres);
277 }
278 /* Zero-extend the absolute value to PREC bits. */
279 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
280 val[count++] = 0;
281 else
282 count = canonize (val, count, prec);
283 res.set_len (count);
284
285 if (mpz_sgn (x) < 0)
286 res = -res;
287
288 return res;
289 }
290
291 /*
292 * Largest and smallest values in a mode.
293 */
294
295 /* Return the largest SGNed number that is representable in PRECISION bits.
296
297 TODO: There is still code from the double_int era that trys to
298 make up for the fact that double int's could not represent the
299 min and max values of all types. This code should be removed
300 because the min and max values can always be represented in
301 wide_ints and int-csts. */
302 wide_int
303 wi::max_value (unsigned int precision, signop sgn)
304 {
305 gcc_checking_assert (precision != 0);
306 if (sgn == UNSIGNED)
307 /* The unsigned max is just all ones. */
308 return shwi (-1, precision);
309 else
310 /* The signed max is all ones except the top bit. This must be
311 explicitly represented. */
312 return mask (precision - 1, false, precision);
313 }
314
315 /* Return the largest SGNed number that is representable in PRECISION bits. */
316 wide_int
317 wi::min_value (unsigned int precision, signop sgn)
318 {
319 gcc_checking_assert (precision != 0);
320 if (sgn == UNSIGNED)
321 return uhwi (0, precision);
322 else
323 /* The signed min is all zeros except the top bit. This must be
324 explicitly represented. */
325 return wi::set_bit_in_zero (precision - 1, precision);
326 }
327
328 /*
329 * Public utilities.
330 */
331
332 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
333 signedness SGN, to an integer that has PRECISION bits. Store the blocks
334 in VAL and return the number of blocks used.
335
336 This function can handle both extension (PRECISION > XPRECISION)
337 and truncation (PRECISION < XPRECISION). */
338 unsigned int
339 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
340 unsigned int xlen, unsigned int xprecision,
341 unsigned int precision, signop sgn)
342 {
343 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
344 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
345 for (unsigned i = 0; i < len; i++)
346 val[i] = xval[i];
347
348 if (precision > xprecision)
349 {
350 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
351
352 /* Expanding. */
353 if (sgn == UNSIGNED)
354 {
355 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
356 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
357 else if (val[len - 1] < 0)
358 {
359 while (len < BLOCKS_NEEDED (xprecision))
360 val[len++] = -1;
361 if (small_xprecision)
362 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
363 else
364 val[len++] = 0;
365 }
366 }
367 else
368 {
369 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
370 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
371 }
372 }
373 len = canonize (val, len, precision);
374
375 return len;
376 }
377
378 /* This function hides the fact that we cannot rely on the bits beyond
379 the precision. This issue comes up in the relational comparisions
380 where we do allow comparisons of values of different precisions. */
381 static inline HOST_WIDE_INT
382 selt (const HOST_WIDE_INT *a, unsigned int len,
383 unsigned int blocks_needed, unsigned int small_prec,
384 unsigned int index, signop sgn)
385 {
386 HOST_WIDE_INT val;
387 if (index < len)
388 val = a[index];
389 else if (index < blocks_needed || sgn == SIGNED)
390 /* Signed or within the precision. */
391 val = SIGN_MASK (a[len - 1]);
392 else
393 /* Unsigned extension beyond the precision. */
394 val = 0;
395
396 if (small_prec && index == blocks_needed - 1)
397 return (sgn == SIGNED
398 ? sext_hwi (val, small_prec)
399 : zext_hwi (val, small_prec));
400 else
401 return val;
402 }
403
404 /* Find the highest bit represented in a wide int. This will in
405 general have the same value as the sign bit. */
406 static inline HOST_WIDE_INT
407 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
408 {
409 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
410 unsigned HOST_WIDE_INT val = a[len - 1];
411 if (excess > 0)
412 val <<= excess;
413 return val >> (HOST_BITS_PER_WIDE_INT - 1);
414 }
415
416 /*
417 * Comparisons, note that only equality is an operator. The other
418 * comparisons cannot be operators since they are inherently signed or
419 * unsigned and C++ has no such operators.
420 */
421
422 /* Return true if OP0 == OP1. */
423 bool
424 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
425 const HOST_WIDE_INT *op1, unsigned int op1len,
426 unsigned int prec)
427 {
428 int l0 = op0len - 1;
429 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
430
431 if (op0len != op1len)
432 return false;
433
434 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
435 {
436 /* It does not matter if we zext or sext here, we just have to
437 do both the same way. */
438 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
439 return false;
440 l0--;
441 }
442
443 while (l0 >= 0)
444 if (op0[l0] != op1[l0])
445 return false;
446 else
447 l0--;
448
449 return true;
450 }
451
452 /* Return true if OP0 < OP1 using signed comparisons. */
453 bool
454 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
455 unsigned int precision,
456 const HOST_WIDE_INT *op1, unsigned int op1len)
457 {
458 HOST_WIDE_INT s0, s1;
459 unsigned HOST_WIDE_INT u0, u1;
460 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
461 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
462 int l = MAX (op0len - 1, op1len - 1);
463
464 /* Only the top block is compared as signed. The rest are unsigned
465 comparisons. */
466 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
467 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
468 if (s0 < s1)
469 return true;
470 if (s0 > s1)
471 return false;
472
473 l--;
474 while (l >= 0)
475 {
476 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
477 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
478
479 if (u0 < u1)
480 return true;
481 if (u0 > u1)
482 return false;
483 l--;
484 }
485
486 return false;
487 }
488
489 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
490 signed compares. */
491 int
492 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
493 unsigned int precision,
494 const HOST_WIDE_INT *op1, unsigned int op1len)
495 {
496 HOST_WIDE_INT s0, s1;
497 unsigned HOST_WIDE_INT u0, u1;
498 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
499 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
500 int l = MAX (op0len - 1, op1len - 1);
501
502 /* Only the top block is compared as signed. The rest are unsigned
503 comparisons. */
504 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
505 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
506 if (s0 < s1)
507 return -1;
508 if (s0 > s1)
509 return 1;
510
511 l--;
512 while (l >= 0)
513 {
514 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
515 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
516
517 if (u0 < u1)
518 return -1;
519 if (u0 > u1)
520 return 1;
521 l--;
522 }
523
524 return 0;
525 }
526
527 /* Return true if OP0 < OP1 using unsigned comparisons. */
528 bool
529 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
530 unsigned int precision,
531 const HOST_WIDE_INT *op1, unsigned int op1len)
532 {
533 unsigned HOST_WIDE_INT x0;
534 unsigned HOST_WIDE_INT x1;
535 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
536 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
537 int l = MAX (op0len - 1, op1len - 1);
538
539 while (l >= 0)
540 {
541 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
542 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
543 if (x0 < x1)
544 return true;
545 if (x0 > x1)
546 return false;
547 l--;
548 }
549
550 return false;
551 }
552
553 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
554 unsigned compares. */
555 int
556 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
557 unsigned int precision,
558 const HOST_WIDE_INT *op1, unsigned int op1len)
559 {
560 unsigned HOST_WIDE_INT x0;
561 unsigned HOST_WIDE_INT x1;
562 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
563 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
564 int l = MAX (op0len - 1, op1len - 1);
565
566 while (l >= 0)
567 {
568 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
569 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
570 if (x0 < x1)
571 return -1;
572 if (x0 > x1)
573 return 1;
574 l--;
575 }
576
577 return 0;
578 }
579
580 /*
581 * Extension.
582 */
583
584 /* Sign-extend the number represented by XVAL and XLEN into VAL,
585 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
586 and VAL have PRECISION bits. */
587 unsigned int
588 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
589 unsigned int xlen, unsigned int precision, unsigned int offset)
590 {
591 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
592 /* Extending beyond the precision is a no-op. If we have only stored
593 OFFSET bits or fewer, the rest are already signs. */
594 if (offset >= precision || len >= xlen)
595 {
596 for (unsigned i = 0; i < xlen; ++i)
597 val[i] = xval[i];
598 return xlen;
599 }
600 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
601 for (unsigned int i = 0; i < len; i++)
602 val[i] = xval[i];
603 if (suboffset > 0)
604 {
605 val[len] = sext_hwi (xval[len], suboffset);
606 len += 1;
607 }
608 return canonize (val, len, precision);
609 }
610
611 /* Zero-extend the number represented by XVAL and XLEN into VAL,
612 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
613 and VAL have PRECISION bits. */
614 unsigned int
615 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
616 unsigned int xlen, unsigned int precision, unsigned int offset)
617 {
618 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
619 /* Extending beyond the precision is a no-op. If we have only stored
620 OFFSET bits or fewer, and the upper stored bit is zero, then there
621 is nothing to do. */
622 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
623 {
624 for (unsigned i = 0; i < xlen; ++i)
625 val[i] = xval[i];
626 return xlen;
627 }
628 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
629 for (unsigned int i = 0; i < len; i++)
630 val[i] = i < xlen ? xval[i] : -1;
631 if (suboffset > 0)
632 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
633 else
634 val[len] = 0;
635 return canonize (val, len + 1, precision);
636 }
637
638 /*
639 * Masking, inserting, shifting, rotating.
640 */
641
642 /* Insert WIDTH bits from Y into X starting at START. */
643 wide_int
644 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
645 unsigned int width)
646 {
647 wide_int result;
648 wide_int mask;
649 wide_int tmp;
650
651 unsigned int precision = x.get_precision ();
652 if (start >= precision)
653 return x;
654
655 gcc_checking_assert (precision >= width);
656
657 if (start + width >= precision)
658 width = precision - start;
659
660 mask = wi::shifted_mask (start, width, false, precision);
661 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
662 result = tmp & mask;
663
664 tmp = wi::bit_and_not (x, mask);
665 result = result | tmp;
666
667 return result;
668 }
669
670 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
671 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
672 bits. */
673 unsigned int
674 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
675 unsigned int xlen, unsigned int precision, unsigned int bit)
676 {
677 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
678 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
679
680 if (block + 1 >= xlen)
681 {
682 /* The operation either affects the last current block or needs
683 a new block. */
684 unsigned int len = block + 1;
685 for (unsigned int i = 0; i < len; i++)
686 val[i] = safe_uhwi (xval, xlen, i);
687 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
688
689 /* If the bit we just set is at the msb of the block, make sure
690 that any higher bits are zeros. */
691 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
692 val[len++] = 0;
693 return len;
694 }
695 else
696 {
697 for (unsigned int i = 0; i < xlen; i++)
698 val[i] = xval[i];
699 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
700 return canonize (val, xlen, precision);
701 }
702 }
703
704 /* bswap THIS. */
705 wide_int
706 wide_int_storage::bswap () const
707 {
708 wide_int result = wide_int::create (precision);
709 unsigned int i, s;
710 unsigned int len = BLOCKS_NEEDED (precision);
711 unsigned int xlen = get_len ();
712 const HOST_WIDE_INT *xval = get_val ();
713 HOST_WIDE_INT *val = result.write_val ();
714
715 /* This is not a well defined operation if the precision is not a
716 multiple of 8. */
717 gcc_assert ((precision & 0x7) == 0);
718
719 for (i = 0; i < len; i++)
720 val[i] = 0;
721
722 /* Only swap the bytes that are not the padding. */
723 for (s = 0; s < precision; s += 8)
724 {
725 unsigned int d = precision - s - 8;
726 unsigned HOST_WIDE_INT byte;
727
728 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
729 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
730
731 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
732
733 block = d / HOST_BITS_PER_WIDE_INT;
734 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
735
736 val[block] |= byte << offset;
737 }
738
739 result.set_len (canonize (val, len, precision));
740 return result;
741 }
742
743 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
744 above that up to PREC are zeros. The result is inverted if NEGATE
745 is true. Return the number of blocks in VAL. */
746 unsigned int
747 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
748 unsigned int prec)
749 {
750 if (width >= prec)
751 {
752 val[0] = negate ? 0 : -1;
753 return 1;
754 }
755 else if (width == 0)
756 {
757 val[0] = negate ? -1 : 0;
758 return 1;
759 }
760
761 unsigned int i = 0;
762 while (i < width / HOST_BITS_PER_WIDE_INT)
763 val[i++] = negate ? 0 : -1;
764
765 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
766 if (shift != 0)
767 {
768 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
769 val[i++] = negate ? ~last : last;
770 }
771 else
772 val[i++] = negate ? -1 : 0;
773
774 return i;
775 }
776
777 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
778 bits are ones, and the bits above that up to PREC are zeros. The result
779 is inverted if NEGATE is true. Return the number of blocks in VAL. */
780 unsigned int
781 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
782 bool negate, unsigned int prec)
783 {
784 if (start >= prec || width == 0)
785 {
786 val[0] = negate ? -1 : 0;
787 return 1;
788 }
789
790 if (width > prec - start)
791 width = prec - start;
792 unsigned int end = start + width;
793
794 unsigned int i = 0;
795 while (i < start / HOST_BITS_PER_WIDE_INT)
796 val[i++] = negate ? -1 : 0;
797
798 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
799 if (shift)
800 {
801 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
802 shift += width;
803 if (shift < HOST_BITS_PER_WIDE_INT)
804 {
805 /* case 000111000 */
806 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
807 val[i++] = negate ? ~block : block;
808 return i;
809 }
810 else
811 /* ...111000 */
812 val[i++] = negate ? block : ~block;
813 }
814
815 while (i < end / HOST_BITS_PER_WIDE_INT)
816 /* 1111111 */
817 val[i++] = negate ? 0 : -1;
818
819 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
820 if (shift != 0)
821 {
822 /* 000011111 */
823 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
824 val[i++] = negate ? ~block : block;
825 }
826 else if (end < prec)
827 val[i++] = negate ? -1 : 0;
828
829 return i;
830 }
831
832 /*
833 * logical operations.
834 */
835
836 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
837 unsigned int
838 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
839 unsigned int op0len, const HOST_WIDE_INT *op1,
840 unsigned int op1len, unsigned int prec)
841 {
842 int l0 = op0len - 1;
843 int l1 = op1len - 1;
844 bool need_canon = true;
845
846 unsigned int len = MAX (op0len, op1len);
847 if (l0 > l1)
848 {
849 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
850 if (op1mask == 0)
851 {
852 l0 = l1;
853 len = l1 + 1;
854 }
855 else
856 {
857 need_canon = false;
858 while (l0 > l1)
859 {
860 val[l0] = op0[l0];
861 l0--;
862 }
863 }
864 }
865 else if (l1 > l0)
866 {
867 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
868 if (op0mask == 0)
869 len = l0 + 1;
870 else
871 {
872 need_canon = false;
873 while (l1 > l0)
874 {
875 val[l1] = op1[l1];
876 l1--;
877 }
878 }
879 }
880
881 while (l0 >= 0)
882 {
883 val[l0] = op0[l0] & op1[l0];
884 l0--;
885 }
886
887 if (need_canon)
888 len = canonize (val, len, prec);
889
890 return len;
891 }
892
893 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
894 unsigned int
895 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
896 unsigned int op0len, const HOST_WIDE_INT *op1,
897 unsigned int op1len, unsigned int prec)
898 {
899 wide_int result;
900 int l0 = op0len - 1;
901 int l1 = op1len - 1;
902 bool need_canon = true;
903
904 unsigned int len = MAX (op0len, op1len);
905 if (l0 > l1)
906 {
907 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
908 if (op1mask != 0)
909 {
910 l0 = l1;
911 len = l1 + 1;
912 }
913 else
914 {
915 need_canon = false;
916 while (l0 > l1)
917 {
918 val[l0] = op0[l0];
919 l0--;
920 }
921 }
922 }
923 else if (l1 > l0)
924 {
925 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
926 if (op0mask == 0)
927 len = l0 + 1;
928 else
929 {
930 need_canon = false;
931 while (l1 > l0)
932 {
933 val[l1] = ~op1[l1];
934 l1--;
935 }
936 }
937 }
938
939 while (l0 >= 0)
940 {
941 val[l0] = op0[l0] & ~op1[l0];
942 l0--;
943 }
944
945 if (need_canon)
946 len = canonize (val, len, prec);
947
948 return len;
949 }
950
951 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
952 unsigned int
953 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
954 unsigned int op0len, const HOST_WIDE_INT *op1,
955 unsigned int op1len, unsigned int prec)
956 {
957 wide_int result;
958 int l0 = op0len - 1;
959 int l1 = op1len - 1;
960 bool need_canon = true;
961
962 unsigned int len = MAX (op0len, op1len);
963 if (l0 > l1)
964 {
965 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
966 if (op1mask != 0)
967 {
968 l0 = l1;
969 len = l1 + 1;
970 }
971 else
972 {
973 need_canon = false;
974 while (l0 > l1)
975 {
976 val[l0] = op0[l0];
977 l0--;
978 }
979 }
980 }
981 else if (l1 > l0)
982 {
983 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
984 if (op0mask != 0)
985 len = l0 + 1;
986 else
987 {
988 need_canon = false;
989 while (l1 > l0)
990 {
991 val[l1] = op1[l1];
992 l1--;
993 }
994 }
995 }
996
997 while (l0 >= 0)
998 {
999 val[l0] = op0[l0] | op1[l0];
1000 l0--;
1001 }
1002
1003 if (need_canon)
1004 len = canonize (val, len, prec);
1005
1006 return len;
1007 }
1008
1009 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1010 unsigned int
1011 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1012 unsigned int op0len, const HOST_WIDE_INT *op1,
1013 unsigned int op1len, unsigned int prec)
1014 {
1015 wide_int result;
1016 int l0 = op0len - 1;
1017 int l1 = op1len - 1;
1018 bool need_canon = true;
1019
1020 unsigned int len = MAX (op0len, op1len);
1021 if (l0 > l1)
1022 {
1023 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1024 if (op1mask == 0)
1025 {
1026 l0 = l1;
1027 len = l1 + 1;
1028 }
1029 else
1030 {
1031 need_canon = false;
1032 while (l0 > l1)
1033 {
1034 val[l0] = op0[l0];
1035 l0--;
1036 }
1037 }
1038 }
1039 else if (l1 > l0)
1040 {
1041 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1042 if (op0mask != 0)
1043 len = l0 + 1;
1044 else
1045 {
1046 need_canon = false;
1047 while (l1 > l0)
1048 {
1049 val[l1] = ~op1[l1];
1050 l1--;
1051 }
1052 }
1053 }
1054
1055 while (l0 >= 0)
1056 {
1057 val[l0] = op0[l0] | ~op1[l0];
1058 l0--;
1059 }
1060
1061 if (need_canon)
1062 len = canonize (val, len, prec);
1063
1064 return len;
1065 }
1066
1067 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1068 unsigned int
1069 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1070 unsigned int op0len, const HOST_WIDE_INT *op1,
1071 unsigned int op1len, unsigned int prec)
1072 {
1073 wide_int result;
1074 int l0 = op0len - 1;
1075 int l1 = op1len - 1;
1076
1077 unsigned int len = MAX (op0len, op1len);
1078 if (l0 > l1)
1079 {
1080 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1081 while (l0 > l1)
1082 {
1083 val[l0] = op0[l0] ^ op1mask;
1084 l0--;
1085 }
1086 }
1087
1088 if (l1 > l0)
1089 {
1090 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1091 while (l1 > l0)
1092 {
1093 val[l1] = op0mask ^ op1[l1];
1094 l1--;
1095 }
1096 }
1097
1098 while (l0 >= 0)
1099 {
1100 val[l0] = op0[l0] ^ op1[l0];
1101 l0--;
1102 }
1103
1104 return canonize (val, len, prec);
1105 }
1106
1107 /*
1108 * math
1109 */
1110
1111 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1112 whether the result overflows when OP0 and OP1 are treated as having
1113 signedness SGN. Return the number of blocks in VAL. */
1114 unsigned int
1115 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1116 unsigned int op0len, const HOST_WIDE_INT *op1,
1117 unsigned int op1len, unsigned int prec,
1118 signop sgn, bool *overflow)
1119 {
1120 unsigned HOST_WIDE_INT o0 = 0;
1121 unsigned HOST_WIDE_INT o1 = 0;
1122 unsigned HOST_WIDE_INT x = 0;
1123 unsigned HOST_WIDE_INT carry = 0;
1124 unsigned HOST_WIDE_INT old_carry = 0;
1125 unsigned HOST_WIDE_INT mask0, mask1;
1126 unsigned int i;
1127
1128 unsigned int len = MAX (op0len, op1len);
1129 mask0 = -top_bit_of (op0, op0len, prec);
1130 mask1 = -top_bit_of (op1, op1len, prec);
1131 /* Add all of the explicitly defined elements. */
1132
1133 for (i = 0; i < len; i++)
1134 {
1135 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1136 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1137 x = o0 + o1 + carry;
1138 val[i] = x;
1139 old_carry = carry;
1140 carry = carry == 0 ? x < o0 : x <= o0;
1141 }
1142
1143 if (len * HOST_BITS_PER_WIDE_INT < prec)
1144 {
1145 val[len] = mask0 + mask1 + carry;
1146 len++;
1147 if (overflow)
1148 *overflow = false;
1149 }
1150 else if (overflow)
1151 {
1152 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1153 if (sgn == SIGNED)
1154 {
1155 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1156 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1157 }
1158 else
1159 {
1160 /* Put the MSB of X and O0 and in the top of the HWI. */
1161 x <<= shift;
1162 o0 <<= shift;
1163 if (old_carry)
1164 *overflow = (x <= o0);
1165 else
1166 *overflow = (x < o0);
1167 }
1168 }
1169
1170 return canonize (val, len, prec);
1171 }
1172
1173 /* Subroutines of the multiplication and division operations. Unpack
1174 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1175 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1176 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1177 static void
1178 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1179 unsigned int in_len, unsigned int out_len,
1180 unsigned int prec, signop sgn)
1181 {
1182 unsigned int i;
1183 unsigned int j = 0;
1184 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1185 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1186 HOST_WIDE_INT mask;
1187
1188 if (sgn == SIGNED)
1189 {
1190 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1191 mask &= HALF_INT_MASK;
1192 }
1193 else
1194 mask = 0;
1195
1196 for (i = 0; i < blocks_needed - 1; i++)
1197 {
1198 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1199 result[j++] = x;
1200 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1201 }
1202
1203 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1204 if (small_prec)
1205 {
1206 if (sgn == SIGNED)
1207 x = sext_hwi (x, small_prec);
1208 else
1209 x = zext_hwi (x, small_prec);
1210 }
1211 result[j++] = x;
1212 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1213
1214 /* Smear the sign bit. */
1215 while (j < out_len)
1216 result[j++] = mask;
1217 }
1218
1219 /* The inverse of wi_unpack. IN_LEN is the the number of input
1220 blocks. The number of output blocks will be half this amount. */
1221 static void
1222 wi_pack (unsigned HOST_WIDE_INT *result,
1223 const unsigned HOST_HALF_WIDE_INT *input,
1224 unsigned int in_len)
1225 {
1226 unsigned int i = 0;
1227 unsigned int j = 0;
1228
1229 while (i + 2 < in_len)
1230 {
1231 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1232 | ((unsigned HOST_WIDE_INT)input[i + 1]
1233 << HOST_BITS_PER_HALF_WIDE_INT);
1234 i += 2;
1235 }
1236
1237 /* Handle the case where in_len is odd. For this we zero extend. */
1238 if (in_len & 1)
1239 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1240 else
1241 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1242 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1243 }
1244
1245 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1246 result is returned.
1247
1248 If HIGH is not set, throw away the upper half after the check is
1249 made to see if it overflows. Unfortunately there is no better way
1250 to check for overflow than to do this. If OVERFLOW is nonnull,
1251 record in *OVERFLOW whether the result overflowed. SGN controls
1252 the signedness and is used to check overflow or if HIGH is set. */
1253 unsigned int
1254 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1255 unsigned int op1len, const HOST_WIDE_INT *op2val,
1256 unsigned int op2len, unsigned int prec, signop sgn,
1257 bool *overflow, bool high)
1258 {
1259 unsigned HOST_WIDE_INT o0, o1, k, t;
1260 unsigned int i;
1261 unsigned int j;
1262 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1263 unsigned int half_blocks_needed = blocks_needed * 2;
1264 /* The sizes here are scaled to support a 2x largest mode by 2x
1265 largest mode yielding a 4x largest mode result. This is what is
1266 needed by vpn. */
1267
1268 unsigned HOST_HALF_WIDE_INT
1269 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1270 unsigned HOST_HALF_WIDE_INT
1271 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1272 /* The '2' in 'R' is because we are internally doing a full
1273 multiply. */
1274 unsigned HOST_HALF_WIDE_INT
1275 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1276 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1277
1278 /* If the top level routine did not really pass in an overflow, then
1279 just make sure that we never attempt to set it. */
1280 bool needs_overflow = (overflow != 0);
1281 if (needs_overflow)
1282 *overflow = false;
1283
1284 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1285 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1286
1287 /* This is a surprisingly common case, so do it first. */
1288 if (op1 == 0 || op2 == 0)
1289 {
1290 val[0] = 0;
1291 return 1;
1292 }
1293
1294 #ifdef umul_ppmm
1295 if (sgn == UNSIGNED)
1296 {
1297 /* If the inputs are single HWIs and the output has room for at
1298 least two HWIs, we can use umul_ppmm directly. */
1299 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1300 && wi::fits_uhwi_p (op1)
1301 && wi::fits_uhwi_p (op2))
1302 {
1303 /* This case never overflows. */
1304 if (high)
1305 {
1306 val[0] = 0;
1307 return 1;
1308 }
1309 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1310 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1311 {
1312 val[2] = 0;
1313 return 3;
1314 }
1315 return 1 + (val[1] != 0 || val[0] < 0);
1316 }
1317 /* Likewise if the output is a full single HWI, except that the
1318 upper HWI of the result is only used for determining overflow.
1319 (We handle this case inline when overflow isn't needed.) */
1320 else if (prec == HOST_BITS_PER_WIDE_INT)
1321 {
1322 unsigned HOST_WIDE_INT upper;
1323 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1324 if (needs_overflow)
1325 *overflow = (upper != 0);
1326 if (high)
1327 val[0] = upper;
1328 return 1;
1329 }
1330 }
1331 #endif
1332
1333 /* Handle multiplications by 1. */
1334 if (op1 == 1)
1335 {
1336 if (high)
1337 {
1338 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1339 return 1;
1340 }
1341 for (i = 0; i < op2len; i++)
1342 val[i] = op2val[i];
1343 return op2len;
1344 }
1345 if (op2 == 1)
1346 {
1347 if (high)
1348 {
1349 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1350 return 1;
1351 }
1352 for (i = 0; i < op1len; i++)
1353 val[i] = op1val[i];
1354 return op1len;
1355 }
1356
1357 /* If we need to check for overflow, we can only do half wide
1358 multiplies quickly because we need to look at the top bits to
1359 check for the overflow. */
1360 if ((high || needs_overflow)
1361 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1362 {
1363 unsigned HOST_WIDE_INT r;
1364
1365 if (sgn == SIGNED)
1366 {
1367 o0 = op1.to_shwi ();
1368 o1 = op2.to_shwi ();
1369 }
1370 else
1371 {
1372 o0 = op1.to_uhwi ();
1373 o1 = op2.to_uhwi ();
1374 }
1375
1376 r = o0 * o1;
1377 if (needs_overflow)
1378 {
1379 if (sgn == SIGNED)
1380 {
1381 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1382 *overflow = true;
1383 }
1384 else
1385 {
1386 if ((r >> prec) != 0)
1387 *overflow = true;
1388 }
1389 }
1390 val[0] = high ? r >> prec : r;
1391 return 1;
1392 }
1393
1394 /* We do unsigned mul and then correct it. */
1395 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1396 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1397
1398 /* The 2 is for a full mult. */
1399 memset (r, 0, half_blocks_needed * 2
1400 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1401
1402 for (j = 0; j < half_blocks_needed; j++)
1403 {
1404 k = 0;
1405 for (i = 0; i < half_blocks_needed; i++)
1406 {
1407 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1408 + r[i + j] + k);
1409 r[i + j] = t & HALF_INT_MASK;
1410 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1411 }
1412 r[j + half_blocks_needed] = k;
1413 }
1414
1415 /* We did unsigned math above. For signed we must adjust the
1416 product (assuming we need to see that). */
1417 if (sgn == SIGNED && (high || needs_overflow))
1418 {
1419 unsigned HOST_WIDE_INT b;
1420 if (wi::neg_p (op1))
1421 {
1422 b = 0;
1423 for (i = 0; i < half_blocks_needed; i++)
1424 {
1425 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1426 - (unsigned HOST_WIDE_INT)v[i] - b;
1427 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1428 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1429 }
1430 }
1431 if (wi::neg_p (op2))
1432 {
1433 b = 0;
1434 for (i = 0; i < half_blocks_needed; i++)
1435 {
1436 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1437 - (unsigned HOST_WIDE_INT)u[i] - b;
1438 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1439 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1440 }
1441 }
1442 }
1443
1444 if (needs_overflow)
1445 {
1446 HOST_WIDE_INT top;
1447
1448 /* For unsigned, overflow is true if any of the top bits are set.
1449 For signed, overflow is true if any of the top bits are not equal
1450 to the sign bit. */
1451 if (sgn == UNSIGNED)
1452 top = 0;
1453 else
1454 {
1455 top = r[(half_blocks_needed) - 1];
1456 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1457 top &= mask;
1458 }
1459
1460 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1461 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1462 *overflow = true;
1463 }
1464
1465 if (high)
1466 {
1467 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1468 wi_pack ((unsigned HOST_WIDE_INT *) val,
1469 &r[half_blocks_needed], half_blocks_needed);
1470 return canonize (val, blocks_needed, prec);
1471 }
1472 else
1473 {
1474 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1475 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1476 return canonize (val, blocks_needed, prec);
1477 }
1478 }
1479
1480 /* Compute the population count of X. */
1481 int
1482 wi::popcount (const wide_int_ref &x)
1483 {
1484 unsigned int i;
1485 int count;
1486
1487 /* The high order block is special if it is the last block and the
1488 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1489 have to clear out any ones above the precision before doing
1490 popcount on this block. */
1491 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1492 unsigned int stop = x.len;
1493 if (count < 0)
1494 {
1495 count = popcount_hwi (x.uhigh () << -count);
1496 stop -= 1;
1497 }
1498 else
1499 {
1500 if (x.sign_mask () >= 0)
1501 count = 0;
1502 }
1503
1504 for (i = 0; i < stop; ++i)
1505 count += popcount_hwi (x.val[i]);
1506
1507 return count;
1508 }
1509
1510 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1511 whether the result overflows when OP0 and OP1 are treated as having
1512 signedness SGN. Return the number of blocks in VAL. */
1513 unsigned int
1514 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1515 unsigned int op0len, const HOST_WIDE_INT *op1,
1516 unsigned int op1len, unsigned int prec,
1517 signop sgn, bool *overflow)
1518 {
1519 unsigned HOST_WIDE_INT o0 = 0;
1520 unsigned HOST_WIDE_INT o1 = 0;
1521 unsigned HOST_WIDE_INT x = 0;
1522 /* We implement subtraction as an in place negate and add. Negation
1523 is just inversion and add 1, so we can do the add of 1 by just
1524 starting the borrow in of the first element at 1. */
1525 unsigned HOST_WIDE_INT borrow = 0;
1526 unsigned HOST_WIDE_INT old_borrow = 0;
1527
1528 unsigned HOST_WIDE_INT mask0, mask1;
1529 unsigned int i;
1530
1531 unsigned int len = MAX (op0len, op1len);
1532 mask0 = -top_bit_of (op0, op0len, prec);
1533 mask1 = -top_bit_of (op1, op1len, prec);
1534
1535 /* Subtract all of the explicitly defined elements. */
1536 for (i = 0; i < len; i++)
1537 {
1538 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1539 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1540 x = o0 - o1 - borrow;
1541 val[i] = x;
1542 old_borrow = borrow;
1543 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1544 }
1545
1546 if (len * HOST_BITS_PER_WIDE_INT < prec)
1547 {
1548 val[len] = mask0 - mask1 - borrow;
1549 len++;
1550 if (overflow)
1551 *overflow = false;
1552 }
1553 else if (overflow)
1554 {
1555 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1556 if (sgn == SIGNED)
1557 {
1558 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1559 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1560 }
1561 else
1562 {
1563 /* Put the MSB of X and O0 and in the top of the HWI. */
1564 x <<= shift;
1565 o0 <<= shift;
1566 if (old_borrow)
1567 *overflow = (x >= o0);
1568 else
1569 *overflow = (x > o0);
1570 }
1571 }
1572
1573 return canonize (val, len, prec);
1574 }
1575
1576
1577 /*
1578 * Division and Mod
1579 */
1580
1581 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1582 algorithm is a small modification of the algorithm in Hacker's
1583 Delight by Warren, which itself is a small modification of Knuth's
1584 algorithm. M is the number of significant elements of U however
1585 there needs to be at least one extra element of B_DIVIDEND
1586 allocated, N is the number of elements of B_DIVISOR. */
1587 static void
1588 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1589 unsigned HOST_HALF_WIDE_INT *b_remainder,
1590 unsigned HOST_HALF_WIDE_INT *b_dividend,
1591 unsigned HOST_HALF_WIDE_INT *b_divisor,
1592 int m, int n)
1593 {
1594 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1595 HOST_WIDE_INT and stored in the lower bits of each word. This
1596 algorithm should work properly on both 32 and 64 bit
1597 machines. */
1598 unsigned HOST_WIDE_INT b
1599 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1600 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1601 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1602 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1603 HOST_WIDE_INT t, k;
1604 int i, j, s;
1605
1606 /* Single digit divisor. */
1607 if (n == 1)
1608 {
1609 k = 0;
1610 for (j = m - 1; j >= 0; j--)
1611 {
1612 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1613 k = ((k * b + b_dividend[j])
1614 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1615 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1616 }
1617 b_remainder[0] = k;
1618 return;
1619 }
1620
1621 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1622
1623 if (s)
1624 {
1625 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1626 algorithm, we can overwrite b_dividend and b_divisor, so we do
1627 that. */
1628 for (i = n - 1; i > 0; i--)
1629 b_divisor[i] = (b_divisor[i] << s)
1630 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1631 b_divisor[0] = b_divisor[0] << s;
1632
1633 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1634 for (i = m - 1; i > 0; i--)
1635 b_dividend[i] = (b_dividend[i] << s)
1636 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1637 b_dividend[0] = b_dividend[0] << s;
1638 }
1639
1640 /* Main loop. */
1641 for (j = m - n; j >= 0; j--)
1642 {
1643 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1644 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1645 again:
1646 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1647 {
1648 qhat -= 1;
1649 rhat += b_divisor[n-1];
1650 if (rhat < b)
1651 goto again;
1652 }
1653
1654 /* Multiply and subtract. */
1655 k = 0;
1656 for (i = 0; i < n; i++)
1657 {
1658 p = qhat * b_divisor[i];
1659 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1660 b_dividend[i + j] = t;
1661 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1662 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1663 }
1664 t = b_dividend[j+n] - k;
1665 b_dividend[j+n] = t;
1666
1667 b_quotient[j] = qhat;
1668 if (t < 0)
1669 {
1670 b_quotient[j] -= 1;
1671 k = 0;
1672 for (i = 0; i < n; i++)
1673 {
1674 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1675 b_dividend[i+j] = t;
1676 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1677 }
1678 b_dividend[j+n] += k;
1679 }
1680 }
1681 if (s)
1682 for (i = 0; i < n; i++)
1683 b_remainder[i] = (b_dividend[i] >> s)
1684 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1685 else
1686 for (i = 0; i < n; i++)
1687 b_remainder[i] = b_dividend[i];
1688 }
1689
1690
1691 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1692 the result. If QUOTIENT is nonnull, store the value of the quotient
1693 there and return the number of blocks in it. The return value is
1694 not defined otherwise. If REMAINDER is nonnull, store the value
1695 of the remainder there and store the number of blocks in
1696 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1697 the division overflowed. */
1698 unsigned int
1699 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1700 HOST_WIDE_INT *remainder,
1701 const HOST_WIDE_INT *dividend_val,
1702 unsigned int dividend_len, unsigned int dividend_prec,
1703 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1704 unsigned int divisor_prec, signop sgn,
1705 bool *oflow)
1706 {
1707 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1708 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1709 unsigned HOST_HALF_WIDE_INT
1710 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1711 unsigned HOST_HALF_WIDE_INT
1712 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1713 unsigned HOST_HALF_WIDE_INT
1714 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1715 unsigned HOST_HALF_WIDE_INT
1716 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1717 unsigned int m, n;
1718 bool dividend_neg = false;
1719 bool divisor_neg = false;
1720 bool overflow = false;
1721 wide_int neg_dividend, neg_divisor;
1722
1723 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1724 dividend_prec);
1725 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1726 divisor_prec);
1727 if (divisor == 0)
1728 overflow = true;
1729
1730 /* The smallest signed number / -1 causes overflow. The dividend_len
1731 check is for speed rather than correctness. */
1732 if (sgn == SIGNED
1733 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1734 && divisor == -1
1735 && wi::only_sign_bit_p (dividend))
1736 overflow = true;
1737
1738 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1739 (signed min / -1) has the same representation as the orignal dividend.
1740 We have traditionally made division by zero act as division by one,
1741 so there too we use the original dividend. */
1742 if (overflow)
1743 {
1744 if (remainder)
1745 {
1746 *remainder_len = 1;
1747 remainder[0] = 0;
1748 }
1749 if (oflow != 0)
1750 *oflow = true;
1751 if (quotient)
1752 for (unsigned int i = 0; i < dividend_len; ++i)
1753 quotient[i] = dividend_val[i];
1754 return dividend_len;
1755 }
1756
1757 if (oflow)
1758 *oflow = false;
1759
1760 /* Do it on the host if you can. */
1761 if (sgn == SIGNED
1762 && wi::fits_shwi_p (dividend)
1763 && wi::fits_shwi_p (divisor))
1764 {
1765 HOST_WIDE_INT o0 = dividend.to_shwi ();
1766 HOST_WIDE_INT o1 = divisor.to_shwi ();
1767
1768 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1769 {
1770 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1771 if (quotient)
1772 {
1773 quotient[0] = HOST_WIDE_INT_MIN;
1774 quotient[1] = 0;
1775 }
1776 if (remainder)
1777 {
1778 remainder[0] = 0;
1779 *remainder_len = 1;
1780 }
1781 return 2;
1782 }
1783 else
1784 {
1785 if (quotient)
1786 quotient[0] = o0 / o1;
1787 if (remainder)
1788 {
1789 remainder[0] = o0 % o1;
1790 *remainder_len = 1;
1791 }
1792 return 1;
1793 }
1794 }
1795
1796 if (sgn == UNSIGNED
1797 && wi::fits_uhwi_p (dividend)
1798 && wi::fits_uhwi_p (divisor))
1799 {
1800 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1801 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1802
1803 if (quotient)
1804 quotient[0] = o0 / o1;
1805 if (remainder)
1806 {
1807 remainder[0] = o0 % o1;
1808 *remainder_len = 1;
1809 }
1810 return 1;
1811 }
1812
1813 /* Make the divisor and dividend positive and remember what we
1814 did. */
1815 if (sgn == SIGNED)
1816 {
1817 if (wi::neg_p (dividend))
1818 {
1819 neg_dividend = -dividend;
1820 dividend = neg_dividend;
1821 dividend_neg = true;
1822 }
1823 if (wi::neg_p (divisor))
1824 {
1825 neg_divisor = -divisor;
1826 divisor = neg_divisor;
1827 divisor_neg = true;
1828 }
1829 }
1830
1831 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1832 dividend_blocks_needed, dividend_prec, sgn);
1833 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1834 divisor_blocks_needed, divisor_prec, sgn);
1835
1836 m = dividend_blocks_needed;
1837 b_dividend[m] = 0;
1838 while (m > 1 && b_dividend[m - 1] == 0)
1839 m--;
1840
1841 n = divisor_blocks_needed;
1842 while (n > 1 && b_divisor[n - 1] == 0)
1843 n--;
1844
1845 memset (b_quotient, 0, sizeof (b_quotient));
1846
1847 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1848
1849 unsigned int quotient_len = 0;
1850 if (quotient)
1851 {
1852 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1853 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1854 /* The quotient is neg if exactly one of the divisor or dividend is
1855 neg. */
1856 if (dividend_neg != divisor_neg)
1857 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1858 quotient_len, dividend_prec,
1859 UNSIGNED, 0);
1860 }
1861
1862 if (remainder)
1863 {
1864 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1865 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1866 /* The remainder is always the same sign as the dividend. */
1867 if (dividend_neg)
1868 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1869 *remainder_len, dividend_prec,
1870 UNSIGNED, 0);
1871 }
1872
1873 return quotient_len;
1874 }
1875
1876 /*
1877 * Shifting, rotating and extraction.
1878 */
1879
1880 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1881 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1882 unsigned int
1883 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1884 unsigned int xlen, unsigned int precision,
1885 unsigned int shift)
1886 {
1887 /* Split the shift into a whole-block shift and a subblock shift. */
1888 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1889 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1890
1891 /* The whole-block shift fills with zeros. */
1892 unsigned int len = BLOCKS_NEEDED (precision);
1893 for (unsigned int i = 0; i < skip; ++i)
1894 val[i] = 0;
1895
1896 /* It's easier to handle the simple block case specially. */
1897 if (small_shift == 0)
1898 for (unsigned int i = skip; i < len; ++i)
1899 val[i] = safe_uhwi (xval, xlen, i - skip);
1900 else
1901 {
1902 /* The first unfilled output block is a left shift of the first
1903 block in XVAL. The other output blocks contain bits from two
1904 consecutive input blocks. */
1905 unsigned HOST_WIDE_INT carry = 0;
1906 for (unsigned int i = skip; i < len; ++i)
1907 {
1908 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1909 val[i] = (x << small_shift) | carry;
1910 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1911 }
1912 }
1913 return canonize (val, len, precision);
1914 }
1915
1916 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1917 number of blocks in VAL. The input has XPRECISION bits and the
1918 output has XPRECISION - SHIFT bits. */
1919 static unsigned int
1920 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1921 unsigned int xlen, unsigned int xprecision,
1922 unsigned int shift)
1923 {
1924 /* Split the shift into a whole-block shift and a subblock shift. */
1925 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1926 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1927
1928 /* Work out how many blocks are needed to store the significant bits
1929 (excluding the upper zeros or signs). */
1930 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1931
1932 /* It's easier to handle the simple block case specially. */
1933 if (small_shift == 0)
1934 for (unsigned int i = 0; i < len; ++i)
1935 val[i] = safe_uhwi (xval, xlen, i + skip);
1936 else
1937 {
1938 /* Each output block but the last is a combination of two input blocks.
1939 The last block is a right shift of the last block in XVAL. */
1940 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1941 for (unsigned int i = 0; i < len; ++i)
1942 {
1943 val[i] = curr >> small_shift;
1944 curr = safe_uhwi (xval, xlen, i + skip + 1);
1945 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1946 }
1947 }
1948 return len;
1949 }
1950
1951 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1952 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1953 VAL has PRECISION bits. */
1954 unsigned int
1955 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1956 unsigned int xlen, unsigned int xprecision,
1957 unsigned int precision, unsigned int shift)
1958 {
1959 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1960
1961 /* The value we just created has precision XPRECISION - SHIFT.
1962 Zero-extend it to wider precisions. */
1963 if (precision > xprecision - shift)
1964 {
1965 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1966 if (small_prec)
1967 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1968 else if (val[len - 1] < 0)
1969 {
1970 /* Add a new block with a zero. */
1971 val[len++] = 0;
1972 return len;
1973 }
1974 }
1975 return canonize (val, len, precision);
1976 }
1977
1978 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1979 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1980 VAL has PRECISION bits. */
1981 unsigned int
1982 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1983 unsigned int xlen, unsigned int xprecision,
1984 unsigned int precision, unsigned int shift)
1985 {
1986 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1987
1988 /* The value we just created has precision XPRECISION - SHIFT.
1989 Sign-extend it to wider types. */
1990 if (precision > xprecision - shift)
1991 {
1992 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1993 if (small_prec)
1994 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1995 }
1996 return canonize (val, len, precision);
1997 }
1998
1999 /* Return the number of leading (upper) zeros in X. */
2000 int
2001 wi::clz (const wide_int_ref &x)
2002 {
2003 /* Calculate how many bits there above the highest represented block. */
2004 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2005
2006 unsigned HOST_WIDE_INT high = x.uhigh ();
2007 if (count < 0)
2008 /* The upper -COUNT bits of HIGH are not part of the value.
2009 Clear them out. */
2010 high = (high << -count) >> -count;
2011 else if (x.sign_mask () < 0)
2012 /* The upper bit is set, so there are no leading zeros. */
2013 return 0;
2014
2015 /* We don't need to look below HIGH. Either HIGH is nonzero,
2016 or the top bit of the block below is nonzero; clz_hwi is
2017 HOST_BITS_PER_WIDE_INT in the latter case. */
2018 return count + clz_hwi (high);
2019 }
2020
2021 /* Return the number of redundant sign bits in X. (That is, the number
2022 of bits immediately below the sign bit that have the same value as
2023 the sign bit.) */
2024 int
2025 wi::clrsb (const wide_int_ref &x)
2026 {
2027 /* Calculate how many bits there above the highest represented block. */
2028 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2029
2030 unsigned HOST_WIDE_INT high = x.uhigh ();
2031 unsigned HOST_WIDE_INT mask = -1;
2032 if (count < 0)
2033 {
2034 /* The upper -COUNT bits of HIGH are not part of the value.
2035 Clear them from both MASK and HIGH. */
2036 mask >>= -count;
2037 high &= mask;
2038 }
2039
2040 /* If the top bit is 1, count the number of leading 1s. If the top
2041 bit is zero, count the number of leading zeros. */
2042 if (high > mask / 2)
2043 high ^= mask;
2044
2045 /* There are no sign bits below the top block, so we don't need to look
2046 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2047 HIGH is 0. */
2048 return count + clz_hwi (high) - 1;
2049 }
2050
2051 /* Return the number of trailing (lower) zeros in X. */
2052 int
2053 wi::ctz (const wide_int_ref &x)
2054 {
2055 if (x.len == 1 && x.ulow () == 0)
2056 return x.precision;
2057
2058 /* Having dealt with the zero case, there must be a block with a
2059 nonzero bit. We don't care about the bits above the first 1. */
2060 unsigned int i = 0;
2061 while (x.val[i] == 0)
2062 ++i;
2063 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2064 }
2065
2066 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2067 return -1. */
2068 int
2069 wi::exact_log2 (const wide_int_ref &x)
2070 {
2071 /* Reject cases where there are implicit -1 blocks above HIGH. */
2072 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2073 return -1;
2074
2075 /* Set CRUX to the index of the entry that should be nonzero.
2076 If the top block is zero then the next lowest block (if any)
2077 must have the high bit set. */
2078 unsigned int crux = x.len - 1;
2079 if (crux > 0 && x.val[crux] == 0)
2080 crux -= 1;
2081
2082 /* Check that all lower blocks are zero. */
2083 for (unsigned int i = 0; i < crux; ++i)
2084 if (x.val[i] != 0)
2085 return -1;
2086
2087 /* Get a zero-extended form of block CRUX. */
2088 unsigned HOST_WIDE_INT hwi = x.val[crux];
2089 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2090 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2091
2092 /* Now it's down to whether HWI is a power of 2. */
2093 int res = ::exact_log2 (hwi);
2094 if (res >= 0)
2095 res += crux * HOST_BITS_PER_WIDE_INT;
2096 return res;
2097 }
2098
2099 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2100 int
2101 wi::floor_log2 (const wide_int_ref &x)
2102 {
2103 return x.precision - 1 - clz (x);
2104 }
2105
2106 /* Return the index of the first (lowest) set bit in X, counting from 1.
2107 Return 0 if X is 0. */
2108 int
2109 wi::ffs (const wide_int_ref &x)
2110 {
2111 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2112 }
2113
2114 /* Return true if sign-extending X to have precision PRECISION would give
2115 the minimum signed value at that precision. */
2116 bool
2117 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2118 {
2119 return ctz (x) + 1 == int (precision);
2120 }
2121
2122 /* Return true if X represents the minimum signed value. */
2123 bool
2124 wi::only_sign_bit_p (const wide_int_ref &x)
2125 {
2126 return only_sign_bit_p (x, x.precision);
2127 }
2128
2129 /*
2130 * Private utilities.
2131 */
2132
2133 void gt_ggc_mx (widest_int *) { }
2134 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2135 void gt_pch_nx (widest_int *) { }
2136
2137 template void wide_int::dump () const;
2138 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2139 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2140 template void offset_int::dump () const;
2141 template void widest_int::dump () const;