]>
Commit | Line | Data |
---|---|---|
807e902e | 1 | /* Operations with very long integers. |
aeee4812 | 2 | Copyright (C) 2012-2023 Free Software Foundation, Inc. |
807e902e KZ |
3 | Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by the | |
9 | Free Software Foundation; either version 3, or (at your option) any | |
10 | later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT | |
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
24 | #include "tm.h" | |
807e902e | 25 | #include "tree.h" |
d9b950dd | 26 | #include "selftest.h" |
807e902e | 27 | |
4c8bd90f RB |
28 | |
29 | #define HOST_BITS_PER_HALF_WIDE_INT 32 | |
30 | #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG | |
31 | # define HOST_HALF_WIDE_INT long | |
32 | #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT | |
33 | # define HOST_HALF_WIDE_INT int | |
34 | #else | |
35 | #error Please add support for HOST_HALF_WIDE_INT | |
36 | #endif | |
37 | ||
807e902e | 38 | #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT |
ecc7533a FXC |
39 | /* Do not include longlong.h when compiler is clang-based. See PR61146. */ |
40 | #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__) | |
807e902e KZ |
41 | typedef unsigned HOST_HALF_WIDE_INT UHWtype; |
42 | typedef unsigned HOST_WIDE_INT UWtype; | |
43 | typedef unsigned int UQItype __attribute__ ((mode (QI))); | |
44 | typedef unsigned int USItype __attribute__ ((mode (SI))); | |
45 | typedef unsigned int UDItype __attribute__ ((mode (DI))); | |
60f82c42 RS |
46 | #if W_TYPE_SIZE == 32 |
47 | typedef unsigned int UDWtype __attribute__ ((mode (DI))); | |
48 | #else | |
49 | typedef unsigned int UDWtype __attribute__ ((mode (TI))); | |
50 | #endif | |
807e902e KZ |
51 | #include "longlong.h" |
52 | #endif | |
53 | ||
54 | static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {}; | |
55 | ||
56 | /* | |
57 | * Internal utilities. | |
58 | */ | |
59 | ||
60 | /* Quantities to deal with values that hold half of a wide int. Used | |
61 | in multiply and divide. */ | |
fecfbfa4 | 62 | #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1) |
807e902e KZ |
63 | |
64 | #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT) | |
65 | #define BLOCKS_NEEDED(PREC) \ | |
66 | (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1) | |
67 | #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0) | |
68 | ||
69 | /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1 | |
70 | based on the top existing bit of VAL. */ | |
71 | ||
72 | static unsigned HOST_WIDE_INT | |
73 | safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i) | |
74 | { | |
fecfbfa4 | 75 | return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0; |
807e902e KZ |
76 | } |
77 | ||
78 | /* Convert the integer in VAL to canonical form, returning its new length. | |
79 | LEN is the number of blocks currently in VAL and PRECISION is the number | |
80 | of bits in the integer it represents. | |
81 | ||
82 | This function only changes the representation, not the value. */ | |
83 | static unsigned int | |
84 | canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision) | |
85 | { | |
86 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); | |
87 | HOST_WIDE_INT top; | |
88 | int i; | |
89 | ||
90 | if (len > blocks_needed) | |
91 | len = blocks_needed; | |
92 | ||
93 | if (len == 1) | |
94 | return len; | |
95 | ||
96 | top = val[len - 1]; | |
97 | if (len * HOST_BITS_PER_WIDE_INT > precision) | |
98 | val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT); | |
99 | if (top != 0 && top != (HOST_WIDE_INT)-1) | |
100 | return len; | |
101 | ||
102 | /* At this point we know that the top is either 0 or -1. Find the | |
103 | first block that is not a copy of this. */ | |
104 | for (i = len - 2; i >= 0; i--) | |
105 | { | |
106 | HOST_WIDE_INT x = val[i]; | |
107 | if (x != top) | |
108 | { | |
109 | if (SIGN_MASK (x) == top) | |
110 | return i + 1; | |
111 | ||
112 | /* We need an extra block because the top bit block i does | |
113 | not match the extension. */ | |
114 | return i + 2; | |
115 | } | |
116 | } | |
117 | ||
118 | /* The number is 0 or -1. */ | |
119 | return 1; | |
120 | } | |
121 | ||
321a2b65 JJ |
122 | /* VAL[0] is the unsigned result of an operation. Canonize it by adding |
123 | another 0 block if needed, and return number of blocks needed. */ | |
124 | ||
125 | static inline unsigned int | |
126 | canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision) | |
127 | { | |
128 | if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT) | |
129 | { | |
130 | val[1] = 0; | |
131 | return 2; | |
132 | } | |
133 | return 1; | |
134 | } | |
135 | ||
807e902e KZ |
136 | /* |
137 | * Conversion routines in and out of wide_int. | |
138 | */ | |
139 | ||
140 | /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the | |
141 | result for an integer with precision PRECISION. Return the length | |
2a03d979 | 142 | of VAL (after any canonization). */ |
807e902e KZ |
143 | unsigned int |
144 | wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
145 | unsigned int xlen, unsigned int precision, bool need_canon) | |
146 | { | |
147 | for (unsigned i = 0; i < xlen; i++) | |
148 | val[i] = xval[i]; | |
149 | return need_canon ? canonize (val, xlen, precision) : xlen; | |
150 | } | |
151 | ||
152 | /* Construct a wide int from a buffer of length LEN. BUFFER will be | |
bd2c6270 | 153 | read according to byte endianness and word endianness of the target. |
807e902e KZ |
154 | Only the lower BUFFER_LEN bytes of the result are set; the remaining |
155 | high bytes are cleared. */ | |
156 | wide_int | |
157 | wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len) | |
158 | { | |
159 | unsigned int precision = buffer_len * BITS_PER_UNIT; | |
160 | wide_int result = wide_int::create (precision); | |
161 | unsigned int words = buffer_len / UNITS_PER_WORD; | |
162 | ||
163 | /* We have to clear all the bits ourself, as we merely or in values | |
164 | below. */ | |
165 | unsigned int len = BLOCKS_NEEDED (precision); | |
166 | HOST_WIDE_INT *val = result.write_val (); | |
167 | for (unsigned int i = 0; i < len; ++i) | |
168 | val[i] = 0; | |
169 | ||
170 | for (unsigned int byte = 0; byte < buffer_len; byte++) | |
171 | { | |
172 | unsigned int offset; | |
173 | unsigned int index; | |
174 | unsigned int bitpos = byte * BITS_PER_UNIT; | |
175 | unsigned HOST_WIDE_INT value; | |
176 | ||
177 | if (buffer_len > UNITS_PER_WORD) | |
178 | { | |
179 | unsigned int word = byte / UNITS_PER_WORD; | |
180 | ||
181 | if (WORDS_BIG_ENDIAN) | |
182 | word = (words - 1) - word; | |
183 | ||
184 | offset = word * UNITS_PER_WORD; | |
185 | ||
186 | if (BYTES_BIG_ENDIAN) | |
187 | offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD); | |
188 | else | |
189 | offset += byte % UNITS_PER_WORD; | |
190 | } | |
191 | else | |
192 | offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte; | |
193 | ||
194 | value = (unsigned HOST_WIDE_INT) buffer[offset]; | |
195 | ||
196 | index = bitpos / HOST_BITS_PER_WIDE_INT; | |
197 | val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT); | |
198 | } | |
199 | ||
200 | result.set_len (canonize (val, len, precision)); | |
201 | ||
202 | return result; | |
203 | } | |
204 | ||
205 | /* Sets RESULT from X, the sign is taken according to SGN. */ | |
206 | void | |
207 | wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn) | |
208 | { | |
209 | int len = x.get_len (); | |
210 | const HOST_WIDE_INT *v = x.get_val (); | |
211 | int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision (); | |
212 | ||
213 | if (wi::neg_p (x, sgn)) | |
214 | { | |
215 | /* We use ones complement to avoid -x80..0 edge case that - | |
216 | won't work on. */ | |
217 | HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len); | |
218 | for (int i = 0; i < len; i++) | |
219 | t[i] = ~v[i]; | |
220 | if (excess > 0) | |
221 | t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess; | |
222 | mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t); | |
223 | mpz_com (result, result); | |
224 | } | |
225 | else if (excess > 0) | |
226 | { | |
227 | HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len); | |
228 | for (int i = 0; i < len - 1; i++) | |
229 | t[i] = v[i]; | |
230 | t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess; | |
231 | mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t); | |
232 | } | |
9e603837 JJ |
233 | else if (excess < 0 && wi::neg_p (x)) |
234 | { | |
235 | int extra | |
236 | = (-excess + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; | |
237 | HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra); | |
238 | for (int i = 0; i < len; i++) | |
239 | t[i] = v[i]; | |
240 | for (int i = 0; i < extra; i++) | |
241 | t[len + i] = -1; | |
242 | excess = (-excess) % HOST_BITS_PER_WIDE_INT; | |
243 | if (excess) | |
244 | t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1; | |
245 | mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t); | |
246 | } | |
807e902e KZ |
247 | else |
248 | mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v); | |
249 | } | |
250 | ||
251 | /* Returns X converted to TYPE. If WRAP is true, then out-of-range | |
252 | values of VAL will be wrapped; otherwise, they will be set to the | |
253 | appropriate minimum or maximum TYPE bound. */ | |
254 | wide_int | |
255 | wi::from_mpz (const_tree type, mpz_t x, bool wrap) | |
256 | { | |
257 | size_t count, numb; | |
025e5647 | 258 | unsigned int prec = TYPE_PRECISION (type); |
807e902e KZ |
259 | wide_int res = wide_int::create (prec); |
260 | ||
261 | if (!wrap) | |
262 | { | |
263 | mpz_t min, max; | |
264 | ||
265 | mpz_init (min); | |
266 | mpz_init (max); | |
267 | get_type_static_bounds (type, min, max); | |
268 | ||
269 | if (mpz_cmp (x, min) < 0) | |
270 | mpz_set (x, min); | |
271 | else if (mpz_cmp (x, max) > 0) | |
272 | mpz_set (x, max); | |
273 | ||
274 | mpz_clear (min); | |
275 | mpz_clear (max); | |
276 | } | |
277 | ||
278 | /* Determine the number of unsigned HOST_WIDE_INTs that are required | |
dcc74ead | 279 | for representing the absolute value. The code to calculate count is |
807e902e KZ |
280 | extracted from the GMP manual, section "Integer Import and Export": |
281 | http://gmplib.org/manual/Integer-Import-and-Export.html */ | |
025e5647 | 282 | numb = CHAR_BIT * sizeof (HOST_WIDE_INT); |
807e902e KZ |
283 | count = (mpz_sizeinbase (x, 2) + numb - 1) / numb; |
284 | HOST_WIDE_INT *val = res.write_val (); | |
dcc74ead RS |
285 | /* Read the absolute value. |
286 | ||
287 | Write directly to the wide_int storage if possible, otherwise leave | |
025e5647 RS |
288 | GMP to allocate the memory for us. It might be slightly more efficient |
289 | to use mpz_tdiv_r_2exp for the latter case, but the situation is | |
290 | pathological and it seems safer to operate on the original mpz value | |
291 | in all cases. */ | |
292 | void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0, | |
293 | &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x); | |
807e902e KZ |
294 | if (count < 1) |
295 | { | |
296 | val[0] = 0; | |
297 | count = 1; | |
298 | } | |
025e5647 RS |
299 | count = MIN (count, BLOCKS_NEEDED (prec)); |
300 | if (valres != val) | |
301 | { | |
302 | memcpy (val, valres, count * sizeof (HOST_WIDE_INT)); | |
303 | free (valres); | |
304 | } | |
dcc74ead RS |
305 | /* Zero-extend the absolute value to PREC bits. */ |
306 | if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0) | |
307 | val[count++] = 0; | |
308 | else | |
309 | count = canonize (val, count, prec); | |
310 | res.set_len (count); | |
807e902e KZ |
311 | |
312 | if (mpz_sgn (x) < 0) | |
313 | res = -res; | |
314 | ||
315 | return res; | |
316 | } | |
317 | ||
318 | /* | |
319 | * Largest and smallest values in a mode. | |
320 | */ | |
321 | ||
322 | /* Return the largest SGNed number that is representable in PRECISION bits. | |
323 | ||
324 | TODO: There is still code from the double_int era that trys to | |
325 | make up for the fact that double int's could not represent the | |
326 | min and max values of all types. This code should be removed | |
327 | because the min and max values can always be represented in | |
328 | wide_ints and int-csts. */ | |
329 | wide_int | |
330 | wi::max_value (unsigned int precision, signop sgn) | |
331 | { | |
332 | gcc_checking_assert (precision != 0); | |
333 | if (sgn == UNSIGNED) | |
334 | /* The unsigned max is just all ones. */ | |
335 | return shwi (-1, precision); | |
336 | else | |
337 | /* The signed max is all ones except the top bit. This must be | |
338 | explicitly represented. */ | |
339 | return mask (precision - 1, false, precision); | |
340 | } | |
341 | ||
342 | /* Return the largest SGNed number that is representable in PRECISION bits. */ | |
343 | wide_int | |
344 | wi::min_value (unsigned int precision, signop sgn) | |
345 | { | |
346 | gcc_checking_assert (precision != 0); | |
347 | if (sgn == UNSIGNED) | |
348 | return uhwi (0, precision); | |
349 | else | |
350 | /* The signed min is all zeros except the top bit. This must be | |
351 | explicitly represented. */ | |
352 | return wi::set_bit_in_zero (precision - 1, precision); | |
353 | } | |
354 | ||
355 | /* | |
356 | * Public utilities. | |
357 | */ | |
358 | ||
359 | /* Convert the number represented by XVAL, XLEN and XPRECISION, which has | |
360 | signedness SGN, to an integer that has PRECISION bits. Store the blocks | |
361 | in VAL and return the number of blocks used. | |
362 | ||
363 | This function can handle both extension (PRECISION > XPRECISION) | |
364 | and truncation (PRECISION < XPRECISION). */ | |
365 | unsigned int | |
366 | wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
367 | unsigned int xlen, unsigned int xprecision, | |
368 | unsigned int precision, signop sgn) | |
369 | { | |
370 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); | |
371 | unsigned int len = blocks_needed < xlen ? blocks_needed : xlen; | |
372 | for (unsigned i = 0; i < len; i++) | |
373 | val[i] = xval[i]; | |
374 | ||
375 | if (precision > xprecision) | |
376 | { | |
377 | unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT; | |
378 | ||
379 | /* Expanding. */ | |
380 | if (sgn == UNSIGNED) | |
381 | { | |
382 | if (small_xprecision && len == BLOCKS_NEEDED (xprecision)) | |
383 | val[len - 1] = zext_hwi (val[len - 1], small_xprecision); | |
384 | else if (val[len - 1] < 0) | |
385 | { | |
386 | while (len < BLOCKS_NEEDED (xprecision)) | |
387 | val[len++] = -1; | |
388 | if (small_xprecision) | |
389 | val[len - 1] = zext_hwi (val[len - 1], small_xprecision); | |
390 | else | |
391 | val[len++] = 0; | |
392 | } | |
393 | } | |
394 | else | |
395 | { | |
396 | if (small_xprecision && len == BLOCKS_NEEDED (xprecision)) | |
397 | val[len - 1] = sext_hwi (val[len - 1], small_xprecision); | |
398 | } | |
399 | } | |
400 | len = canonize (val, len, precision); | |
401 | ||
402 | return len; | |
403 | } | |
404 | ||
405 | /* This function hides the fact that we cannot rely on the bits beyond | |
406 | the precision. This issue comes up in the relational comparisions | |
407 | where we do allow comparisons of values of different precisions. */ | |
408 | static inline HOST_WIDE_INT | |
409 | selt (const HOST_WIDE_INT *a, unsigned int len, | |
410 | unsigned int blocks_needed, unsigned int small_prec, | |
411 | unsigned int index, signop sgn) | |
412 | { | |
413 | HOST_WIDE_INT val; | |
414 | if (index < len) | |
415 | val = a[index]; | |
416 | else if (index < blocks_needed || sgn == SIGNED) | |
417 | /* Signed or within the precision. */ | |
418 | val = SIGN_MASK (a[len - 1]); | |
419 | else | |
420 | /* Unsigned extension beyond the precision. */ | |
421 | val = 0; | |
422 | ||
423 | if (small_prec && index == blocks_needed - 1) | |
424 | return (sgn == SIGNED | |
425 | ? sext_hwi (val, small_prec) | |
426 | : zext_hwi (val, small_prec)); | |
427 | else | |
428 | return val; | |
429 | } | |
430 | ||
431 | /* Find the highest bit represented in a wide int. This will in | |
432 | general have the same value as the sign bit. */ | |
433 | static inline HOST_WIDE_INT | |
434 | top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec) | |
435 | { | |
436 | int excess = len * HOST_BITS_PER_WIDE_INT - prec; | |
437 | unsigned HOST_WIDE_INT val = a[len - 1]; | |
438 | if (excess > 0) | |
439 | val <<= excess; | |
440 | return val >> (HOST_BITS_PER_WIDE_INT - 1); | |
441 | } | |
442 | ||
443 | /* | |
444 | * Comparisons, note that only equality is an operator. The other | |
445 | * comparisons cannot be operators since they are inherently signed or | |
446 | * unsigned and C++ has no such operators. | |
447 | */ | |
448 | ||
449 | /* Return true if OP0 == OP1. */ | |
450 | bool | |
451 | wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, | |
452 | const HOST_WIDE_INT *op1, unsigned int op1len, | |
453 | unsigned int prec) | |
454 | { | |
455 | int l0 = op0len - 1; | |
456 | unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); | |
457 | ||
458 | if (op0len != op1len) | |
459 | return false; | |
460 | ||
461 | if (op0len == BLOCKS_NEEDED (prec) && small_prec) | |
462 | { | |
463 | /* It does not matter if we zext or sext here, we just have to | |
464 | do both the same way. */ | |
465 | if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec)) | |
466 | return false; | |
467 | l0--; | |
468 | } | |
469 | ||
470 | while (l0 >= 0) | |
471 | if (op0[l0] != op1[l0]) | |
472 | return false; | |
473 | else | |
474 | l0--; | |
475 | ||
476 | return true; | |
477 | } | |
478 | ||
479 | /* Return true if OP0 < OP1 using signed comparisons. */ | |
480 | bool | |
481 | wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, | |
482 | unsigned int precision, | |
483 | const HOST_WIDE_INT *op1, unsigned int op1len) | |
484 | { | |
485 | HOST_WIDE_INT s0, s1; | |
486 | unsigned HOST_WIDE_INT u0, u1; | |
487 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); | |
488 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); | |
489 | int l = MAX (op0len - 1, op1len - 1); | |
490 | ||
491 | /* Only the top block is compared as signed. The rest are unsigned | |
492 | comparisons. */ | |
493 | s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); | |
494 | s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); | |
495 | if (s0 < s1) | |
496 | return true; | |
497 | if (s0 > s1) | |
498 | return false; | |
499 | ||
500 | l--; | |
501 | while (l >= 0) | |
502 | { | |
503 | u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); | |
504 | u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); | |
505 | ||
506 | if (u0 < u1) | |
507 | return true; | |
508 | if (u0 > u1) | |
509 | return false; | |
510 | l--; | |
511 | } | |
512 | ||
513 | return false; | |
514 | } | |
515 | ||
516 | /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using | |
517 | signed compares. */ | |
518 | int | |
519 | wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len, | |
520 | unsigned int precision, | |
521 | const HOST_WIDE_INT *op1, unsigned int op1len) | |
522 | { | |
523 | HOST_WIDE_INT s0, s1; | |
524 | unsigned HOST_WIDE_INT u0, u1; | |
525 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); | |
526 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); | |
527 | int l = MAX (op0len - 1, op1len - 1); | |
528 | ||
529 | /* Only the top block is compared as signed. The rest are unsigned | |
530 | comparisons. */ | |
531 | s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); | |
532 | s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); | |
533 | if (s0 < s1) | |
534 | return -1; | |
535 | if (s0 > s1) | |
536 | return 1; | |
537 | ||
538 | l--; | |
539 | while (l >= 0) | |
540 | { | |
541 | u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED); | |
542 | u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED); | |
543 | ||
544 | if (u0 < u1) | |
545 | return -1; | |
546 | if (u0 > u1) | |
547 | return 1; | |
548 | l--; | |
549 | } | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | /* Return true if OP0 < OP1 using unsigned comparisons. */ | |
555 | bool | |
556 | wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, | |
557 | unsigned int precision, | |
558 | const HOST_WIDE_INT *op1, unsigned int op1len) | |
559 | { | |
560 | unsigned HOST_WIDE_INT x0; | |
561 | unsigned HOST_WIDE_INT x1; | |
562 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); | |
563 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); | |
564 | int l = MAX (op0len - 1, op1len - 1); | |
565 | ||
566 | while (l >= 0) | |
567 | { | |
568 | x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED); | |
569 | x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED); | |
570 | if (x0 < x1) | |
571 | return true; | |
572 | if (x0 > x1) | |
573 | return false; | |
574 | l--; | |
575 | } | |
576 | ||
577 | return false; | |
578 | } | |
579 | ||
580 | /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using | |
581 | unsigned compares. */ | |
582 | int | |
583 | wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, | |
584 | unsigned int precision, | |
585 | const HOST_WIDE_INT *op1, unsigned int op1len) | |
586 | { | |
587 | unsigned HOST_WIDE_INT x0; | |
588 | unsigned HOST_WIDE_INT x1; | |
589 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); | |
590 | unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); | |
591 | int l = MAX (op0len - 1, op1len - 1); | |
592 | ||
593 | while (l >= 0) | |
594 | { | |
595 | x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED); | |
596 | x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED); | |
597 | if (x0 < x1) | |
598 | return -1; | |
599 | if (x0 > x1) | |
600 | return 1; | |
601 | l--; | |
602 | } | |
603 | ||
604 | return 0; | |
605 | } | |
606 | ||
607 | /* | |
608 | * Extension. | |
609 | */ | |
610 | ||
611 | /* Sign-extend the number represented by XVAL and XLEN into VAL, | |
612 | starting at OFFSET. Return the number of blocks in VAL. Both XVAL | |
613 | and VAL have PRECISION bits. */ | |
614 | unsigned int | |
615 | wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
616 | unsigned int xlen, unsigned int precision, unsigned int offset) | |
617 | { | |
618 | unsigned int len = offset / HOST_BITS_PER_WIDE_INT; | |
619 | /* Extending beyond the precision is a no-op. If we have only stored | |
620 | OFFSET bits or fewer, the rest are already signs. */ | |
621 | if (offset >= precision || len >= xlen) | |
622 | { | |
623 | for (unsigned i = 0; i < xlen; ++i) | |
624 | val[i] = xval[i]; | |
625 | return xlen; | |
626 | } | |
627 | unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT; | |
628 | for (unsigned int i = 0; i < len; i++) | |
629 | val[i] = xval[i]; | |
630 | if (suboffset > 0) | |
631 | { | |
632 | val[len] = sext_hwi (xval[len], suboffset); | |
633 | len += 1; | |
634 | } | |
635 | return canonize (val, len, precision); | |
636 | } | |
637 | ||
638 | /* Zero-extend the number represented by XVAL and XLEN into VAL, | |
639 | starting at OFFSET. Return the number of blocks in VAL. Both XVAL | |
640 | and VAL have PRECISION bits. */ | |
641 | unsigned int | |
642 | wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
643 | unsigned int xlen, unsigned int precision, unsigned int offset) | |
644 | { | |
645 | unsigned int len = offset / HOST_BITS_PER_WIDE_INT; | |
646 | /* Extending beyond the precision is a no-op. If we have only stored | |
647 | OFFSET bits or fewer, and the upper stored bit is zero, then there | |
648 | is nothing to do. */ | |
649 | if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0)) | |
650 | { | |
651 | for (unsigned i = 0; i < xlen; ++i) | |
652 | val[i] = xval[i]; | |
653 | return xlen; | |
654 | } | |
655 | unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT; | |
656 | for (unsigned int i = 0; i < len; i++) | |
657 | val[i] = i < xlen ? xval[i] : -1; | |
658 | if (suboffset > 0) | |
659 | val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset); | |
660 | else | |
661 | val[len] = 0; | |
662 | return canonize (val, len + 1, precision); | |
663 | } | |
664 | ||
665 | /* | |
666 | * Masking, inserting, shifting, rotating. | |
667 | */ | |
668 | ||
669 | /* Insert WIDTH bits from Y into X starting at START. */ | |
670 | wide_int | |
671 | wi::insert (const wide_int &x, const wide_int &y, unsigned int start, | |
672 | unsigned int width) | |
673 | { | |
674 | wide_int result; | |
675 | wide_int mask; | |
676 | wide_int tmp; | |
677 | ||
678 | unsigned int precision = x.get_precision (); | |
679 | if (start >= precision) | |
680 | return x; | |
681 | ||
682 | gcc_checking_assert (precision >= width); | |
683 | ||
684 | if (start + width >= precision) | |
685 | width = precision - start; | |
686 | ||
687 | mask = wi::shifted_mask (start, width, false, precision); | |
688 | tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start); | |
689 | result = tmp & mask; | |
690 | ||
691 | tmp = wi::bit_and_not (x, mask); | |
692 | result = result | tmp; | |
693 | ||
694 | return result; | |
695 | } | |
696 | ||
697 | /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT. | |
698 | Return the number of blocks in VAL. Both XVAL and VAL have PRECISION | |
699 | bits. */ | |
700 | unsigned int | |
701 | wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
702 | unsigned int xlen, unsigned int precision, unsigned int bit) | |
703 | { | |
704 | unsigned int block = bit / HOST_BITS_PER_WIDE_INT; | |
705 | unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT; | |
706 | ||
707 | if (block + 1 >= xlen) | |
708 | { | |
709 | /* The operation either affects the last current block or needs | |
710 | a new block. */ | |
711 | unsigned int len = block + 1; | |
712 | for (unsigned int i = 0; i < len; i++) | |
713 | val[i] = safe_uhwi (xval, xlen, i); | |
fecfbfa4 | 714 | val[block] |= HOST_WIDE_INT_1U << subbit; |
807e902e KZ |
715 | |
716 | /* If the bit we just set is at the msb of the block, make sure | |
717 | that any higher bits are zeros. */ | |
718 | if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1) | |
279a9ce9 JJ |
719 | { |
720 | val[len++] = 0; | |
721 | return len; | |
722 | } | |
723 | return canonize (val, len, precision); | |
807e902e KZ |
724 | } |
725 | else | |
726 | { | |
727 | for (unsigned int i = 0; i < xlen; i++) | |
728 | val[i] = xval[i]; | |
fecfbfa4 | 729 | val[block] |= HOST_WIDE_INT_1U << subbit; |
807e902e KZ |
730 | return canonize (val, xlen, precision); |
731 | } | |
732 | } | |
733 | ||
0ede6b5a RS |
734 | /* Byte swap the integer represented by XVAL and LEN into VAL. Return |
735 | the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */ | |
736 | unsigned int | |
737 | wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
738 | unsigned int len, unsigned int precision) | |
807e902e | 739 | { |
807e902e | 740 | unsigned int i, s; |
807e902e KZ |
741 | |
742 | /* This is not a well defined operation if the precision is not a | |
743 | multiple of 8. */ | |
744 | gcc_assert ((precision & 0x7) == 0); | |
745 | ||
746 | for (i = 0; i < len; i++) | |
747 | val[i] = 0; | |
748 | ||
749 | /* Only swap the bytes that are not the padding. */ | |
750 | for (s = 0; s < precision; s += 8) | |
751 | { | |
752 | unsigned int d = precision - s - 8; | |
753 | unsigned HOST_WIDE_INT byte; | |
754 | ||
755 | unsigned int block = s / HOST_BITS_PER_WIDE_INT; | |
756 | unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1); | |
757 | ||
0ede6b5a | 758 | byte = (safe_uhwi (xval, len, block) >> offset) & 0xff; |
807e902e KZ |
759 | |
760 | block = d / HOST_BITS_PER_WIDE_INT; | |
761 | offset = d & (HOST_BITS_PER_WIDE_INT - 1); | |
762 | ||
763 | val[block] |= byte << offset; | |
764 | } | |
765 | ||
0ede6b5a | 766 | return canonize (val, len, precision); |
807e902e KZ |
767 | } |
768 | ||
108ff03b RS |
769 | /* Bitreverse the integer represented by XVAL and LEN into VAL. Return |
770 | the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */ | |
771 | unsigned int | |
772 | wi::bitreverse_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
773 | unsigned int len, unsigned int precision) | |
774 | { | |
775 | unsigned int i, s; | |
776 | ||
777 | for (i = 0; i < len; i++) | |
778 | val[i] = 0; | |
779 | ||
780 | for (s = 0; s < precision; s++) | |
781 | { | |
782 | unsigned int block = s / HOST_BITS_PER_WIDE_INT; | |
783 | unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1); | |
784 | if (((safe_uhwi (xval, len, block) >> offset) & 1) != 0) | |
785 | { | |
786 | unsigned int d = (precision - 1) - s; | |
787 | block = d / HOST_BITS_PER_WIDE_INT; | |
788 | offset = d & (HOST_BITS_PER_WIDE_INT - 1); | |
e73a307f | 789 | val[block] |= HOST_WIDE_INT_1U << offset; |
108ff03b RS |
790 | } |
791 | } | |
792 | ||
793 | return canonize (val, len, precision); | |
794 | } | |
795 | ||
807e902e KZ |
796 | /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits |
797 | above that up to PREC are zeros. The result is inverted if NEGATE | |
798 | is true. Return the number of blocks in VAL. */ | |
799 | unsigned int | |
800 | wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate, | |
801 | unsigned int prec) | |
802 | { | |
803 | if (width >= prec) | |
804 | { | |
805 | val[0] = negate ? 0 : -1; | |
806 | return 1; | |
807 | } | |
808 | else if (width == 0) | |
809 | { | |
810 | val[0] = negate ? -1 : 0; | |
811 | return 1; | |
812 | } | |
813 | ||
814 | unsigned int i = 0; | |
815 | while (i < width / HOST_BITS_PER_WIDE_INT) | |
816 | val[i++] = negate ? 0 : -1; | |
817 | ||
818 | unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1); | |
819 | if (shift != 0) | |
820 | { | |
fecfbfa4 | 821 | HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1; |
807e902e KZ |
822 | val[i++] = negate ? ~last : last; |
823 | } | |
824 | else | |
825 | val[i++] = negate ? -1 : 0; | |
826 | ||
827 | return i; | |
828 | } | |
829 | ||
830 | /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH | |
831 | bits are ones, and the bits above that up to PREC are zeros. The result | |
832 | is inverted if NEGATE is true. Return the number of blocks in VAL. */ | |
833 | unsigned int | |
834 | wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width, | |
835 | bool negate, unsigned int prec) | |
836 | { | |
837 | if (start >= prec || width == 0) | |
838 | { | |
839 | val[0] = negate ? -1 : 0; | |
840 | return 1; | |
841 | } | |
842 | ||
843 | if (width > prec - start) | |
844 | width = prec - start; | |
845 | unsigned int end = start + width; | |
846 | ||
847 | unsigned int i = 0; | |
848 | while (i < start / HOST_BITS_PER_WIDE_INT) | |
849 | val[i++] = negate ? -1 : 0; | |
850 | ||
851 | unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1); | |
852 | if (shift) | |
853 | { | |
fecfbfa4 | 854 | HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1; |
807e902e KZ |
855 | shift += width; |
856 | if (shift < HOST_BITS_PER_WIDE_INT) | |
857 | { | |
858 | /* case 000111000 */ | |
fecfbfa4 | 859 | block = (HOST_WIDE_INT_1U << shift) - block - 1; |
807e902e KZ |
860 | val[i++] = negate ? ~block : block; |
861 | return i; | |
862 | } | |
863 | else | |
864 | /* ...111000 */ | |
865 | val[i++] = negate ? block : ~block; | |
866 | } | |
867 | ||
e5259207 JJ |
868 | if (end >= prec) |
869 | { | |
870 | if (!shift) | |
871 | val[i++] = negate ? 0 : -1; | |
872 | return i; | |
873 | } | |
874 | ||
807e902e KZ |
875 | while (i < end / HOST_BITS_PER_WIDE_INT) |
876 | /* 1111111 */ | |
877 | val[i++] = negate ? 0 : -1; | |
878 | ||
879 | shift = end & (HOST_BITS_PER_WIDE_INT - 1); | |
880 | if (shift != 0) | |
881 | { | |
882 | /* 000011111 */ | |
fecfbfa4 | 883 | HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1; |
807e902e KZ |
884 | val[i++] = negate ? ~block : block; |
885 | } | |
e5259207 | 886 | else |
807e902e KZ |
887 | val[i++] = negate ? -1 : 0; |
888 | ||
889 | return i; | |
890 | } | |
891 | ||
892 | /* | |
893 | * logical operations. | |
894 | */ | |
895 | ||
896 | /* Set VAL to OP0 & OP1. Return the number of blocks used. */ | |
897 | unsigned int | |
898 | wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
899 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
900 | unsigned int op1len, unsigned int prec) | |
901 | { | |
902 | int l0 = op0len - 1; | |
903 | int l1 = op1len - 1; | |
904 | bool need_canon = true; | |
905 | ||
906 | unsigned int len = MAX (op0len, op1len); | |
907 | if (l0 > l1) | |
908 | { | |
909 | HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); | |
910 | if (op1mask == 0) | |
911 | { | |
912 | l0 = l1; | |
913 | len = l1 + 1; | |
914 | } | |
915 | else | |
916 | { | |
917 | need_canon = false; | |
918 | while (l0 > l1) | |
919 | { | |
920 | val[l0] = op0[l0]; | |
921 | l0--; | |
922 | } | |
923 | } | |
924 | } | |
925 | else if (l1 > l0) | |
926 | { | |
927 | HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); | |
928 | if (op0mask == 0) | |
929 | len = l0 + 1; | |
930 | else | |
931 | { | |
932 | need_canon = false; | |
933 | while (l1 > l0) | |
934 | { | |
935 | val[l1] = op1[l1]; | |
936 | l1--; | |
937 | } | |
938 | } | |
939 | } | |
940 | ||
941 | while (l0 >= 0) | |
942 | { | |
943 | val[l0] = op0[l0] & op1[l0]; | |
944 | l0--; | |
945 | } | |
946 | ||
947 | if (need_canon) | |
948 | len = canonize (val, len, prec); | |
949 | ||
950 | return len; | |
951 | } | |
952 | ||
953 | /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */ | |
954 | unsigned int | |
955 | wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
956 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
957 | unsigned int op1len, unsigned int prec) | |
958 | { | |
959 | wide_int result; | |
960 | int l0 = op0len - 1; | |
961 | int l1 = op1len - 1; | |
962 | bool need_canon = true; | |
963 | ||
964 | unsigned int len = MAX (op0len, op1len); | |
965 | if (l0 > l1) | |
966 | { | |
967 | HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); | |
968 | if (op1mask != 0) | |
969 | { | |
970 | l0 = l1; | |
971 | len = l1 + 1; | |
972 | } | |
973 | else | |
974 | { | |
975 | need_canon = false; | |
976 | while (l0 > l1) | |
977 | { | |
978 | val[l0] = op0[l0]; | |
979 | l0--; | |
980 | } | |
981 | } | |
982 | } | |
983 | else if (l1 > l0) | |
984 | { | |
985 | HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); | |
986 | if (op0mask == 0) | |
987 | len = l0 + 1; | |
988 | else | |
989 | { | |
990 | need_canon = false; | |
991 | while (l1 > l0) | |
992 | { | |
993 | val[l1] = ~op1[l1]; | |
994 | l1--; | |
995 | } | |
996 | } | |
997 | } | |
998 | ||
999 | while (l0 >= 0) | |
1000 | { | |
1001 | val[l0] = op0[l0] & ~op1[l0]; | |
1002 | l0--; | |
1003 | } | |
1004 | ||
1005 | if (need_canon) | |
1006 | len = canonize (val, len, prec); | |
1007 | ||
1008 | return len; | |
1009 | } | |
1010 | ||
1011 | /* Set VAL to OP0 | OP1. Return the number of blocks used. */ | |
1012 | unsigned int | |
1013 | wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
1014 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
1015 | unsigned int op1len, unsigned int prec) | |
1016 | { | |
1017 | wide_int result; | |
1018 | int l0 = op0len - 1; | |
1019 | int l1 = op1len - 1; | |
1020 | bool need_canon = true; | |
1021 | ||
1022 | unsigned int len = MAX (op0len, op1len); | |
1023 | if (l0 > l1) | |
1024 | { | |
1025 | HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); | |
1026 | if (op1mask != 0) | |
1027 | { | |
1028 | l0 = l1; | |
1029 | len = l1 + 1; | |
1030 | } | |
1031 | else | |
1032 | { | |
1033 | need_canon = false; | |
1034 | while (l0 > l1) | |
1035 | { | |
1036 | val[l0] = op0[l0]; | |
1037 | l0--; | |
1038 | } | |
1039 | } | |
1040 | } | |
1041 | else if (l1 > l0) | |
1042 | { | |
1043 | HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); | |
1044 | if (op0mask != 0) | |
1045 | len = l0 + 1; | |
1046 | else | |
1047 | { | |
1048 | need_canon = false; | |
1049 | while (l1 > l0) | |
1050 | { | |
1051 | val[l1] = op1[l1]; | |
1052 | l1--; | |
1053 | } | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | while (l0 >= 0) | |
1058 | { | |
1059 | val[l0] = op0[l0] | op1[l0]; | |
1060 | l0--; | |
1061 | } | |
1062 | ||
1063 | if (need_canon) | |
1064 | len = canonize (val, len, prec); | |
1065 | ||
1066 | return len; | |
1067 | } | |
1068 | ||
1069 | /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */ | |
1070 | unsigned int | |
1071 | wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
1072 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
1073 | unsigned int op1len, unsigned int prec) | |
1074 | { | |
1075 | wide_int result; | |
1076 | int l0 = op0len - 1; | |
1077 | int l1 = op1len - 1; | |
1078 | bool need_canon = true; | |
1079 | ||
1080 | unsigned int len = MAX (op0len, op1len); | |
1081 | if (l0 > l1) | |
1082 | { | |
1083 | HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); | |
1084 | if (op1mask == 0) | |
1085 | { | |
1086 | l0 = l1; | |
1087 | len = l1 + 1; | |
1088 | } | |
1089 | else | |
1090 | { | |
1091 | need_canon = false; | |
1092 | while (l0 > l1) | |
1093 | { | |
1094 | val[l0] = op0[l0]; | |
1095 | l0--; | |
1096 | } | |
1097 | } | |
1098 | } | |
1099 | else if (l1 > l0) | |
1100 | { | |
1101 | HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); | |
1102 | if (op0mask != 0) | |
1103 | len = l0 + 1; | |
1104 | else | |
1105 | { | |
1106 | need_canon = false; | |
1107 | while (l1 > l0) | |
1108 | { | |
1109 | val[l1] = ~op1[l1]; | |
1110 | l1--; | |
1111 | } | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | while (l0 >= 0) | |
1116 | { | |
1117 | val[l0] = op0[l0] | ~op1[l0]; | |
1118 | l0--; | |
1119 | } | |
1120 | ||
1121 | if (need_canon) | |
1122 | len = canonize (val, len, prec); | |
1123 | ||
1124 | return len; | |
1125 | } | |
1126 | ||
1127 | /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */ | |
1128 | unsigned int | |
1129 | wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
1130 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
1131 | unsigned int op1len, unsigned int prec) | |
1132 | { | |
1133 | wide_int result; | |
1134 | int l0 = op0len - 1; | |
1135 | int l1 = op1len - 1; | |
1136 | ||
1137 | unsigned int len = MAX (op0len, op1len); | |
1138 | if (l0 > l1) | |
1139 | { | |
1140 | HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); | |
1141 | while (l0 > l1) | |
1142 | { | |
1143 | val[l0] = op0[l0] ^ op1mask; | |
1144 | l0--; | |
1145 | } | |
1146 | } | |
1147 | ||
1148 | if (l1 > l0) | |
1149 | { | |
1150 | HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); | |
1151 | while (l1 > l0) | |
1152 | { | |
1153 | val[l1] = op0mask ^ op1[l1]; | |
1154 | l1--; | |
1155 | } | |
1156 | } | |
1157 | ||
1158 | while (l0 >= 0) | |
1159 | { | |
1160 | val[l0] = op0[l0] ^ op1[l0]; | |
1161 | l0--; | |
1162 | } | |
1163 | ||
1164 | return canonize (val, len, prec); | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * math | |
1169 | */ | |
1170 | ||
1171 | /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW | |
1172 | whether the result overflows when OP0 and OP1 are treated as having | |
1173 | signedness SGN. Return the number of blocks in VAL. */ | |
1174 | unsigned int | |
1175 | wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
1176 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
1177 | unsigned int op1len, unsigned int prec, | |
4a669ac3 | 1178 | signop sgn, wi::overflow_type *overflow) |
807e902e KZ |
1179 | { |
1180 | unsigned HOST_WIDE_INT o0 = 0; | |
1181 | unsigned HOST_WIDE_INT o1 = 0; | |
1182 | unsigned HOST_WIDE_INT x = 0; | |
1183 | unsigned HOST_WIDE_INT carry = 0; | |
1184 | unsigned HOST_WIDE_INT old_carry = 0; | |
1185 | unsigned HOST_WIDE_INT mask0, mask1; | |
1186 | unsigned int i; | |
1187 | ||
1188 | unsigned int len = MAX (op0len, op1len); | |
1189 | mask0 = -top_bit_of (op0, op0len, prec); | |
1190 | mask1 = -top_bit_of (op1, op1len, prec); | |
1191 | /* Add all of the explicitly defined elements. */ | |
1192 | ||
1193 | for (i = 0; i < len; i++) | |
1194 | { | |
1195 | o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0; | |
1196 | o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1; | |
1197 | x = o0 + o1 + carry; | |
1198 | val[i] = x; | |
1199 | old_carry = carry; | |
1200 | carry = carry == 0 ? x < o0 : x <= o0; | |
1201 | } | |
1202 | ||
1203 | if (len * HOST_BITS_PER_WIDE_INT < prec) | |
1204 | { | |
1205 | val[len] = mask0 + mask1 + carry; | |
1206 | len++; | |
1207 | if (overflow) | |
4a669ac3 AH |
1208 | *overflow |
1209 | = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE; | |
807e902e KZ |
1210 | } |
1211 | else if (overflow) | |
1212 | { | |
1213 | unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT; | |
1214 | if (sgn == SIGNED) | |
1215 | { | |
1216 | unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1); | |
4a669ac3 AH |
1217 | if ((HOST_WIDE_INT) (x << shift) < 0) |
1218 | { | |
1219 | if (o0 > (unsigned HOST_WIDE_INT) val[len - 1]) | |
1220 | *overflow = wi::OVF_UNDERFLOW; | |
1221 | else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1]) | |
1222 | *overflow = wi::OVF_OVERFLOW; | |
1223 | else | |
1224 | *overflow = wi::OVF_NONE; | |
1225 | } | |
1226 | else | |
1227 | *overflow = wi::OVF_NONE; | |
807e902e KZ |
1228 | } |
1229 | else | |
1230 | { | |
1231 | /* Put the MSB of X and O0 and in the top of the HWI. */ | |
1232 | x <<= shift; | |
1233 | o0 <<= shift; | |
1234 | if (old_carry) | |
4a669ac3 | 1235 | *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE; |
807e902e | 1236 | else |
4a669ac3 | 1237 | *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE; |
807e902e KZ |
1238 | } |
1239 | } | |
1240 | ||
1241 | return canonize (val, len, prec); | |
1242 | } | |
1243 | ||
1244 | /* Subroutines of the multiplication and division operations. Unpack | |
1245 | the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN | |
1246 | HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by | |
1247 | uncompressing the top bit of INPUT[IN_LEN - 1]. */ | |
1248 | static void | |
1249 | wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input, | |
1250 | unsigned int in_len, unsigned int out_len, | |
1251 | unsigned int prec, signop sgn) | |
1252 | { | |
1253 | unsigned int i; | |
1254 | unsigned int j = 0; | |
1255 | unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); | |
1256 | unsigned int blocks_needed = BLOCKS_NEEDED (prec); | |
1257 | HOST_WIDE_INT mask; | |
1258 | ||
1259 | if (sgn == SIGNED) | |
1260 | { | |
1261 | mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec); | |
1262 | mask &= HALF_INT_MASK; | |
1263 | } | |
1264 | else | |
1265 | mask = 0; | |
1266 | ||
1267 | for (i = 0; i < blocks_needed - 1; i++) | |
1268 | { | |
1269 | HOST_WIDE_INT x = safe_uhwi (input, in_len, i); | |
1270 | result[j++] = x; | |
1271 | result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT; | |
1272 | } | |
1273 | ||
1274 | HOST_WIDE_INT x = safe_uhwi (input, in_len, i); | |
1275 | if (small_prec) | |
1276 | { | |
1277 | if (sgn == SIGNED) | |
1278 | x = sext_hwi (x, small_prec); | |
1279 | else | |
1280 | x = zext_hwi (x, small_prec); | |
1281 | } | |
1282 | result[j++] = x; | |
1283 | result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT; | |
1284 | ||
1285 | /* Smear the sign bit. */ | |
1286 | while (j < out_len) | |
1287 | result[j++] = mask; | |
1288 | } | |
1289 | ||
5ee31e57 RS |
1290 | /* The inverse of wi_unpack. IN_LEN is the number of input |
1291 | blocks and PRECISION is the precision of the result. Return the | |
1292 | number of blocks in the canonicalized result. */ | |
1293 | static unsigned int | |
1294 | wi_pack (HOST_WIDE_INT *result, | |
807e902e | 1295 | const unsigned HOST_HALF_WIDE_INT *input, |
5ee31e57 | 1296 | unsigned int in_len, unsigned int precision) |
807e902e KZ |
1297 | { |
1298 | unsigned int i = 0; | |
1299 | unsigned int j = 0; | |
5ee31e57 | 1300 | unsigned int blocks_needed = BLOCKS_NEEDED (precision); |
807e902e | 1301 | |
5ee31e57 | 1302 | while (i + 1 < in_len) |
807e902e | 1303 | { |
5ee31e57 RS |
1304 | result[j++] = ((unsigned HOST_WIDE_INT) input[i] |
1305 | | ((unsigned HOST_WIDE_INT) input[i + 1] | |
1306 | << HOST_BITS_PER_HALF_WIDE_INT)); | |
807e902e KZ |
1307 | i += 2; |
1308 | } | |
1309 | ||
1310 | /* Handle the case where in_len is odd. For this we zero extend. */ | |
1311 | if (in_len & 1) | |
5ee31e57 RS |
1312 | result[j++] = (unsigned HOST_WIDE_INT) input[i]; |
1313 | else if (j < blocks_needed) | |
1314 | result[j++] = 0; | |
1315 | return canonize (result, j, precision); | |
807e902e KZ |
1316 | } |
1317 | ||
1318 | /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the | |
1319 | result is returned. | |
1320 | ||
1321 | If HIGH is not set, throw away the upper half after the check is | |
1322 | made to see if it overflows. Unfortunately there is no better way | |
1323 | to check for overflow than to do this. If OVERFLOW is nonnull, | |
1324 | record in *OVERFLOW whether the result overflowed. SGN controls | |
4a669ac3 AH |
1325 | the signedness and is used to check overflow or if HIGH is set. |
1326 | ||
1327 | NOTE: Overflow type for signed overflow is not yet implemented. */ | |
807e902e KZ |
1328 | unsigned int |
1329 | wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val, | |
1330 | unsigned int op1len, const HOST_WIDE_INT *op2val, | |
1331 | unsigned int op2len, unsigned int prec, signop sgn, | |
4a669ac3 | 1332 | wi::overflow_type *overflow, bool high) |
807e902e KZ |
1333 | { |
1334 | unsigned HOST_WIDE_INT o0, o1, k, t; | |
1335 | unsigned int i; | |
1336 | unsigned int j; | |
1337 | unsigned int blocks_needed = BLOCKS_NEEDED (prec); | |
1338 | unsigned int half_blocks_needed = blocks_needed * 2; | |
1339 | /* The sizes here are scaled to support a 2x largest mode by 2x | |
1340 | largest mode yielding a 4x largest mode result. This is what is | |
1341 | needed by vpn. */ | |
1342 | ||
1343 | unsigned HOST_HALF_WIDE_INT | |
1344 | u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; | |
1345 | unsigned HOST_HALF_WIDE_INT | |
1346 | v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; | |
1347 | /* The '2' in 'R' is because we are internally doing a full | |
1348 | multiply. */ | |
1349 | unsigned HOST_HALF_WIDE_INT | |
1350 | r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; | |
1351 | HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1; | |
1352 | ||
1353 | /* If the top level routine did not really pass in an overflow, then | |
1354 | just make sure that we never attempt to set it. */ | |
1355 | bool needs_overflow = (overflow != 0); | |
1356 | if (needs_overflow) | |
4a669ac3 | 1357 | *overflow = wi::OVF_NONE; |
807e902e KZ |
1358 | |
1359 | wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec); | |
1360 | wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec); | |
1361 | ||
1362 | /* This is a surprisingly common case, so do it first. */ | |
1363 | if (op1 == 0 || op2 == 0) | |
1364 | { | |
1365 | val[0] = 0; | |
1366 | return 1; | |
1367 | } | |
1368 | ||
1369 | #ifdef umul_ppmm | |
1370 | if (sgn == UNSIGNED) | |
1371 | { | |
1372 | /* If the inputs are single HWIs and the output has room for at | |
1373 | least two HWIs, we can use umul_ppmm directly. */ | |
1374 | if (prec >= HOST_BITS_PER_WIDE_INT * 2 | |
1375 | && wi::fits_uhwi_p (op1) | |
1376 | && wi::fits_uhwi_p (op2)) | |
1377 | { | |
c01d6ad9 JJ |
1378 | /* This case never overflows. */ |
1379 | if (high) | |
1380 | { | |
1381 | val[0] = 0; | |
1382 | return 1; | |
1383 | } | |
807e902e | 1384 | umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ()); |
09901e8a JJ |
1385 | if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2) |
1386 | { | |
1387 | val[2] = 0; | |
1388 | return 3; | |
1389 | } | |
807e902e KZ |
1390 | return 1 + (val[1] != 0 || val[0] < 0); |
1391 | } | |
1392 | /* Likewise if the output is a full single HWI, except that the | |
1393 | upper HWI of the result is only used for determining overflow. | |
1394 | (We handle this case inline when overflow isn't needed.) */ | |
1395 | else if (prec == HOST_BITS_PER_WIDE_INT) | |
1396 | { | |
1397 | unsigned HOST_WIDE_INT upper; | |
1398 | umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ()); | |
1399 | if (needs_overflow) | |
4a669ac3 AH |
1400 | /* Unsigned overflow can only be +OVERFLOW. */ |
1401 | *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE; | |
c01d6ad9 JJ |
1402 | if (high) |
1403 | val[0] = upper; | |
807e902e KZ |
1404 | return 1; |
1405 | } | |
1406 | } | |
1407 | #endif | |
1408 | ||
1409 | /* Handle multiplications by 1. */ | |
1410 | if (op1 == 1) | |
1411 | { | |
c01d6ad9 JJ |
1412 | if (high) |
1413 | { | |
1414 | val[0] = wi::neg_p (op2, sgn) ? -1 : 0; | |
1415 | return 1; | |
1416 | } | |
807e902e KZ |
1417 | for (i = 0; i < op2len; i++) |
1418 | val[i] = op2val[i]; | |
1419 | return op2len; | |
1420 | } | |
1421 | if (op2 == 1) | |
1422 | { | |
c01d6ad9 JJ |
1423 | if (high) |
1424 | { | |
1425 | val[0] = wi::neg_p (op1, sgn) ? -1 : 0; | |
1426 | return 1; | |
1427 | } | |
807e902e KZ |
1428 | for (i = 0; i < op1len; i++) |
1429 | val[i] = op1val[i]; | |
1430 | return op1len; | |
1431 | } | |
1432 | ||
1433 | /* If we need to check for overflow, we can only do half wide | |
1434 | multiplies quickly because we need to look at the top bits to | |
1435 | check for the overflow. */ | |
1436 | if ((high || needs_overflow) | |
1437 | && (prec <= HOST_BITS_PER_HALF_WIDE_INT)) | |
1438 | { | |
1439 | unsigned HOST_WIDE_INT r; | |
1440 | ||
1441 | if (sgn == SIGNED) | |
1442 | { | |
1443 | o0 = op1.to_shwi (); | |
1444 | o1 = op2.to_shwi (); | |
1445 | } | |
1446 | else | |
1447 | { | |
1448 | o0 = op1.to_uhwi (); | |
1449 | o1 = op2.to_uhwi (); | |
1450 | } | |
1451 | ||
1452 | r = o0 * o1; | |
1453 | if (needs_overflow) | |
1454 | { | |
1455 | if (sgn == SIGNED) | |
1456 | { | |
1457 | if ((HOST_WIDE_INT) r != sext_hwi (r, prec)) | |
4a669ac3 AH |
1458 | /* FIXME: Signed overflow type is not implemented yet. */ |
1459 | *overflow = OVF_UNKNOWN; | |
807e902e KZ |
1460 | } |
1461 | else | |
1462 | { | |
1463 | if ((r >> prec) != 0) | |
4a669ac3 AH |
1464 | /* Unsigned overflow can only be +OVERFLOW. */ |
1465 | *overflow = OVF_OVERFLOW; | |
807e902e KZ |
1466 | } |
1467 | } | |
1468 | val[0] = high ? r >> prec : r; | |
1469 | return 1; | |
1470 | } | |
1471 | ||
1472 | /* We do unsigned mul and then correct it. */ | |
1473 | wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED); | |
1474 | wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED); | |
1475 | ||
1476 | /* The 2 is for a full mult. */ | |
1477 | memset (r, 0, half_blocks_needed * 2 | |
1478 | * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT); | |
1479 | ||
1480 | for (j = 0; j < half_blocks_needed; j++) | |
1481 | { | |
1482 | k = 0; | |
1483 | for (i = 0; i < half_blocks_needed; i++) | |
1484 | { | |
1485 | t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j] | |
1486 | + r[i + j] + k); | |
1487 | r[i + j] = t & HALF_INT_MASK; | |
1488 | k = t >> HOST_BITS_PER_HALF_WIDE_INT; | |
1489 | } | |
1490 | r[j + half_blocks_needed] = k; | |
1491 | } | |
1492 | ||
1493 | /* We did unsigned math above. For signed we must adjust the | |
1494 | product (assuming we need to see that). */ | |
1495 | if (sgn == SIGNED && (high || needs_overflow)) | |
1496 | { | |
1497 | unsigned HOST_WIDE_INT b; | |
1498 | if (wi::neg_p (op1)) | |
1499 | { | |
1500 | b = 0; | |
1501 | for (i = 0; i < half_blocks_needed; i++) | |
1502 | { | |
1503 | t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed] | |
1504 | - (unsigned HOST_WIDE_INT)v[i] - b; | |
1505 | r[i + half_blocks_needed] = t & HALF_INT_MASK; | |
1506 | b = t >> (HOST_BITS_PER_WIDE_INT - 1); | |
1507 | } | |
1508 | } | |
1509 | if (wi::neg_p (op2)) | |
1510 | { | |
1511 | b = 0; | |
1512 | for (i = 0; i < half_blocks_needed; i++) | |
1513 | { | |
1514 | t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed] | |
1515 | - (unsigned HOST_WIDE_INT)u[i] - b; | |
1516 | r[i + half_blocks_needed] = t & HALF_INT_MASK; | |
1517 | b = t >> (HOST_BITS_PER_WIDE_INT - 1); | |
1518 | } | |
1519 | } | |
1520 | } | |
1521 | ||
1522 | if (needs_overflow) | |
1523 | { | |
1524 | HOST_WIDE_INT top; | |
1525 | ||
1526 | /* For unsigned, overflow is true if any of the top bits are set. | |
1527 | For signed, overflow is true if any of the top bits are not equal | |
1528 | to the sign bit. */ | |
1529 | if (sgn == UNSIGNED) | |
1530 | top = 0; | |
1531 | else | |
1532 | { | |
1533 | top = r[(half_blocks_needed) - 1]; | |
1534 | top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2)); | |
1535 | top &= mask; | |
1536 | } | |
1537 | ||
1538 | for (i = half_blocks_needed; i < half_blocks_needed * 2; i++) | |
1539 | if (((HOST_WIDE_INT)(r[i] & mask)) != top) | |
4a669ac3 AH |
1540 | /* FIXME: Signed overflow type is not implemented yet. */ |
1541 | *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN; | |
807e902e KZ |
1542 | } |
1543 | ||
5ee31e57 RS |
1544 | int r_offset = high ? half_blocks_needed : 0; |
1545 | return wi_pack (val, &r[r_offset], half_blocks_needed, prec); | |
807e902e KZ |
1546 | } |
1547 | ||
1548 | /* Compute the population count of X. */ | |
1549 | int | |
1550 | wi::popcount (const wide_int_ref &x) | |
1551 | { | |
1552 | unsigned int i; | |
1553 | int count; | |
1554 | ||
1555 | /* The high order block is special if it is the last block and the | |
1556 | precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We | |
1557 | have to clear out any ones above the precision before doing | |
1558 | popcount on this block. */ | |
1559 | count = x.precision - x.len * HOST_BITS_PER_WIDE_INT; | |
1560 | unsigned int stop = x.len; | |
1561 | if (count < 0) | |
1562 | { | |
1563 | count = popcount_hwi (x.uhigh () << -count); | |
1564 | stop -= 1; | |
1565 | } | |
1566 | else | |
1567 | { | |
1568 | if (x.sign_mask () >= 0) | |
1569 | count = 0; | |
1570 | } | |
1571 | ||
1572 | for (i = 0; i < stop; ++i) | |
1573 | count += popcount_hwi (x.val[i]); | |
1574 | ||
1575 | return count; | |
1576 | } | |
1577 | ||
1578 | /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW | |
1579 | whether the result overflows when OP0 and OP1 are treated as having | |
1580 | signedness SGN. Return the number of blocks in VAL. */ | |
1581 | unsigned int | |
1582 | wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, | |
1583 | unsigned int op0len, const HOST_WIDE_INT *op1, | |
1584 | unsigned int op1len, unsigned int prec, | |
4a669ac3 | 1585 | signop sgn, wi::overflow_type *overflow) |
807e902e KZ |
1586 | { |
1587 | unsigned HOST_WIDE_INT o0 = 0; | |
1588 | unsigned HOST_WIDE_INT o1 = 0; | |
1589 | unsigned HOST_WIDE_INT x = 0; | |
1590 | /* We implement subtraction as an in place negate and add. Negation | |
1591 | is just inversion and add 1, so we can do the add of 1 by just | |
1592 | starting the borrow in of the first element at 1. */ | |
1593 | unsigned HOST_WIDE_INT borrow = 0; | |
1594 | unsigned HOST_WIDE_INT old_borrow = 0; | |
1595 | ||
1596 | unsigned HOST_WIDE_INT mask0, mask1; | |
1597 | unsigned int i; | |
1598 | ||
1599 | unsigned int len = MAX (op0len, op1len); | |
1600 | mask0 = -top_bit_of (op0, op0len, prec); | |
1601 | mask1 = -top_bit_of (op1, op1len, prec); | |
1602 | ||
1603 | /* Subtract all of the explicitly defined elements. */ | |
1604 | for (i = 0; i < len; i++) | |
1605 | { | |
1606 | o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0; | |
1607 | o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1; | |
1608 | x = o0 - o1 - borrow; | |
1609 | val[i] = x; | |
1610 | old_borrow = borrow; | |
1611 | borrow = borrow == 0 ? o0 < o1 : o0 <= o1; | |
1612 | } | |
1613 | ||
1614 | if (len * HOST_BITS_PER_WIDE_INT < prec) | |
1615 | { | |
1616 | val[len] = mask0 - mask1 - borrow; | |
1617 | len++; | |
1618 | if (overflow) | |
4a669ac3 | 1619 | *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE; |
807e902e KZ |
1620 | } |
1621 | else if (overflow) | |
1622 | { | |
1623 | unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT; | |
1624 | if (sgn == SIGNED) | |
1625 | { | |
1626 | unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0); | |
4a669ac3 AH |
1627 | if ((HOST_WIDE_INT) (x << shift) < 0) |
1628 | { | |
1629 | if (o0 > o1) | |
1630 | *overflow = OVF_UNDERFLOW; | |
1631 | else if (o0 < o1) | |
1632 | *overflow = OVF_OVERFLOW; | |
1633 | else | |
1634 | *overflow = OVF_NONE; | |
1635 | } | |
1636 | else | |
1637 | *overflow = OVF_NONE; | |
807e902e KZ |
1638 | } |
1639 | else | |
1640 | { | |
1641 | /* Put the MSB of X and O0 and in the top of the HWI. */ | |
1642 | x <<= shift; | |
1643 | o0 <<= shift; | |
1644 | if (old_borrow) | |
4a669ac3 | 1645 | *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE; |
807e902e | 1646 | else |
4a669ac3 | 1647 | *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE; |
807e902e KZ |
1648 | } |
1649 | } | |
1650 | ||
1651 | return canonize (val, len, prec); | |
1652 | } | |
1653 | ||
1654 | ||
1655 | /* | |
1656 | * Division and Mod | |
1657 | */ | |
1658 | ||
1659 | /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The | |
1660 | algorithm is a small modification of the algorithm in Hacker's | |
1661 | Delight by Warren, which itself is a small modification of Knuth's | |
1662 | algorithm. M is the number of significant elements of U however | |
1663 | there needs to be at least one extra element of B_DIVIDEND | |
1664 | allocated, N is the number of elements of B_DIVISOR. */ | |
1665 | static void | |
1666 | divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient, | |
1667 | unsigned HOST_HALF_WIDE_INT *b_remainder, | |
1668 | unsigned HOST_HALF_WIDE_INT *b_dividend, | |
1669 | unsigned HOST_HALF_WIDE_INT *b_divisor, | |
1670 | int m, int n) | |
1671 | { | |
1672 | /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a | |
1673 | HOST_WIDE_INT and stored in the lower bits of each word. This | |
1674 | algorithm should work properly on both 32 and 64 bit | |
1675 | machines. */ | |
1676 | unsigned HOST_WIDE_INT b | |
1677 | = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT; | |
1678 | unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */ | |
1679 | unsigned HOST_WIDE_INT rhat; /* A remainder. */ | |
1680 | unsigned HOST_WIDE_INT p; /* Product of two digits. */ | |
1681 | HOST_WIDE_INT t, k; | |
1682 | int i, j, s; | |
1683 | ||
1684 | /* Single digit divisor. */ | |
1685 | if (n == 1) | |
1686 | { | |
1687 | k = 0; | |
1688 | for (j = m - 1; j >= 0; j--) | |
1689 | { | |
1690 | b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0]; | |
1691 | k = ((k * b + b_dividend[j]) | |
1692 | - ((unsigned HOST_WIDE_INT)b_quotient[j] | |
1693 | * (unsigned HOST_WIDE_INT)b_divisor[0])); | |
1694 | } | |
1695 | b_remainder[0] = k; | |
1696 | return; | |
1697 | } | |
1698 | ||
1699 | s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */ | |
1700 | ||
1701 | if (s) | |
1702 | { | |
1703 | /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published | |
1704 | algorithm, we can overwrite b_dividend and b_divisor, so we do | |
1705 | that. */ | |
1706 | for (i = n - 1; i > 0; i--) | |
1707 | b_divisor[i] = (b_divisor[i] << s) | |
1708 | | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s)); | |
1709 | b_divisor[0] = b_divisor[0] << s; | |
1710 | ||
1711 | b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s); | |
1712 | for (i = m - 1; i > 0; i--) | |
1713 | b_dividend[i] = (b_dividend[i] << s) | |
1714 | | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s)); | |
1715 | b_dividend[0] = b_dividend[0] << s; | |
1716 | } | |
1717 | ||
1718 | /* Main loop. */ | |
1719 | for (j = m - n; j >= 0; j--) | |
1720 | { | |
1721 | qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1]; | |
1722 | rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1]; | |
1723 | again: | |
1724 | if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2]) | |
1725 | { | |
1726 | qhat -= 1; | |
1727 | rhat += b_divisor[n-1]; | |
1728 | if (rhat < b) | |
1729 | goto again; | |
1730 | } | |
1731 | ||
1732 | /* Multiply and subtract. */ | |
1733 | k = 0; | |
1734 | for (i = 0; i < n; i++) | |
1735 | { | |
1736 | p = qhat * b_divisor[i]; | |
1737 | t = b_dividend[i+j] - k - (p & HALF_INT_MASK); | |
1738 | b_dividend[i + j] = t; | |
1739 | k = ((p >> HOST_BITS_PER_HALF_WIDE_INT) | |
1740 | - (t >> HOST_BITS_PER_HALF_WIDE_INT)); | |
1741 | } | |
1742 | t = b_dividend[j+n] - k; | |
1743 | b_dividend[j+n] = t; | |
1744 | ||
1745 | b_quotient[j] = qhat; | |
1746 | if (t < 0) | |
1747 | { | |
1748 | b_quotient[j] -= 1; | |
1749 | k = 0; | |
1750 | for (i = 0; i < n; i++) | |
1751 | { | |
1752 | t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k; | |
1753 | b_dividend[i+j] = t; | |
1754 | k = t >> HOST_BITS_PER_HALF_WIDE_INT; | |
1755 | } | |
1756 | b_dividend[j+n] += k; | |
1757 | } | |
1758 | } | |
1759 | if (s) | |
1760 | for (i = 0; i < n; i++) | |
1761 | b_remainder[i] = (b_dividend[i] >> s) | |
1762 | | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s)); | |
1763 | else | |
1764 | for (i = 0; i < n; i++) | |
1765 | b_remainder[i] = b_dividend[i]; | |
1766 | } | |
1767 | ||
1768 | ||
1769 | /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate | |
1770 | the result. If QUOTIENT is nonnull, store the value of the quotient | |
1771 | there and return the number of blocks in it. The return value is | |
1772 | not defined otherwise. If REMAINDER is nonnull, store the value | |
1773 | of the remainder there and store the number of blocks in | |
1774 | *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether | |
1775 | the division overflowed. */ | |
1776 | unsigned int | |
1777 | wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len, | |
1778 | HOST_WIDE_INT *remainder, | |
1779 | const HOST_WIDE_INT *dividend_val, | |
1780 | unsigned int dividend_len, unsigned int dividend_prec, | |
1781 | const HOST_WIDE_INT *divisor_val, unsigned int divisor_len, | |
1782 | unsigned int divisor_prec, signop sgn, | |
4a669ac3 | 1783 | wi::overflow_type *oflow) |
807e902e KZ |
1784 | { |
1785 | unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec); | |
1786 | unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec); | |
1787 | unsigned HOST_HALF_WIDE_INT | |
1788 | b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; | |
1789 | unsigned HOST_HALF_WIDE_INT | |
1790 | b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; | |
1791 | unsigned HOST_HALF_WIDE_INT | |
1792 | b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1]; | |
1793 | unsigned HOST_HALF_WIDE_INT | |
1794 | b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; | |
1795 | unsigned int m, n; | |
1796 | bool dividend_neg = false; | |
1797 | bool divisor_neg = false; | |
1798 | bool overflow = false; | |
1799 | wide_int neg_dividend, neg_divisor; | |
1800 | ||
1801 | wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len, | |
1802 | dividend_prec); | |
1803 | wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len, | |
1804 | divisor_prec); | |
1805 | if (divisor == 0) | |
1806 | overflow = true; | |
1807 | ||
1808 | /* The smallest signed number / -1 causes overflow. The dividend_len | |
1809 | check is for speed rather than correctness. */ | |
1810 | if (sgn == SIGNED | |
1811 | && dividend_len == BLOCKS_NEEDED (dividend_prec) | |
1812 | && divisor == -1 | |
1813 | && wi::only_sign_bit_p (dividend)) | |
1814 | overflow = true; | |
1815 | ||
1816 | /* Handle the overflow cases. Viewed as unsigned value, the quotient of | |
1817 | (signed min / -1) has the same representation as the orignal dividend. | |
1818 | We have traditionally made division by zero act as division by one, | |
1819 | so there too we use the original dividend. */ | |
1820 | if (overflow) | |
1821 | { | |
1822 | if (remainder) | |
1823 | { | |
1824 | *remainder_len = 1; | |
1825 | remainder[0] = 0; | |
1826 | } | |
4a669ac3 AH |
1827 | if (oflow) |
1828 | *oflow = OVF_OVERFLOW; | |
807e902e KZ |
1829 | if (quotient) |
1830 | for (unsigned int i = 0; i < dividend_len; ++i) | |
1831 | quotient[i] = dividend_val[i]; | |
1832 | return dividend_len; | |
1833 | } | |
1834 | ||
1835 | if (oflow) | |
4a669ac3 | 1836 | *oflow = OVF_NONE; |
807e902e KZ |
1837 | |
1838 | /* Do it on the host if you can. */ | |
1839 | if (sgn == SIGNED | |
1840 | && wi::fits_shwi_p (dividend) | |
1841 | && wi::fits_shwi_p (divisor)) | |
1842 | { | |
1843 | HOST_WIDE_INT o0 = dividend.to_shwi (); | |
1844 | HOST_WIDE_INT o1 = divisor.to_shwi (); | |
1845 | ||
1846 | if (o0 == HOST_WIDE_INT_MIN && o1 == -1) | |
1847 | { | |
1848 | gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT); | |
1849 | if (quotient) | |
1850 | { | |
1851 | quotient[0] = HOST_WIDE_INT_MIN; | |
1852 | quotient[1] = 0; | |
1853 | } | |
1854 | if (remainder) | |
1855 | { | |
1856 | remainder[0] = 0; | |
1857 | *remainder_len = 1; | |
1858 | } | |
1859 | return 2; | |
1860 | } | |
1861 | else | |
1862 | { | |
1863 | if (quotient) | |
1864 | quotient[0] = o0 / o1; | |
1865 | if (remainder) | |
1866 | { | |
1867 | remainder[0] = o0 % o1; | |
1868 | *remainder_len = 1; | |
1869 | } | |
1870 | return 1; | |
1871 | } | |
1872 | } | |
1873 | ||
1874 | if (sgn == UNSIGNED | |
1875 | && wi::fits_uhwi_p (dividend) | |
1876 | && wi::fits_uhwi_p (divisor)) | |
1877 | { | |
1878 | unsigned HOST_WIDE_INT o0 = dividend.to_uhwi (); | |
1879 | unsigned HOST_WIDE_INT o1 = divisor.to_uhwi (); | |
db7a2818 | 1880 | unsigned int quotient_len = 1; |
807e902e KZ |
1881 | |
1882 | if (quotient) | |
db7a2818 JJ |
1883 | { |
1884 | quotient[0] = o0 / o1; | |
321a2b65 | 1885 | quotient_len = canonize_uhwi (quotient, dividend_prec); |
db7a2818 | 1886 | } |
807e902e KZ |
1887 | if (remainder) |
1888 | { | |
1889 | remainder[0] = o0 % o1; | |
321a2b65 | 1890 | *remainder_len = canonize_uhwi (remainder, dividend_prec); |
807e902e | 1891 | } |
db7a2818 | 1892 | return quotient_len; |
807e902e KZ |
1893 | } |
1894 | ||
1895 | /* Make the divisor and dividend positive and remember what we | |
1896 | did. */ | |
1897 | if (sgn == SIGNED) | |
1898 | { | |
1899 | if (wi::neg_p (dividend)) | |
1900 | { | |
1901 | neg_dividend = -dividend; | |
1902 | dividend = neg_dividend; | |
1903 | dividend_neg = true; | |
1904 | } | |
1905 | if (wi::neg_p (divisor)) | |
1906 | { | |
1907 | neg_divisor = -divisor; | |
1908 | divisor = neg_divisor; | |
1909 | divisor_neg = true; | |
1910 | } | |
1911 | } | |
1912 | ||
1913 | wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (), | |
ece79960 | 1914 | dividend_blocks_needed, dividend_prec, UNSIGNED); |
807e902e | 1915 | wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (), |
ece79960 | 1916 | divisor_blocks_needed, divisor_prec, UNSIGNED); |
807e902e KZ |
1917 | |
1918 | m = dividend_blocks_needed; | |
b30ea138 | 1919 | b_dividend[m] = 0; |
807e902e KZ |
1920 | while (m > 1 && b_dividend[m - 1] == 0) |
1921 | m--; | |
1922 | ||
1923 | n = divisor_blocks_needed; | |
1924 | while (n > 1 && b_divisor[n - 1] == 0) | |
1925 | n--; | |
1926 | ||
1927 | memset (b_quotient, 0, sizeof (b_quotient)); | |
1928 | ||
1929 | divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n); | |
1930 | ||
1931 | unsigned int quotient_len = 0; | |
1932 | if (quotient) | |
1933 | { | |
5ee31e57 | 1934 | quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec); |
807e902e KZ |
1935 | /* The quotient is neg if exactly one of the divisor or dividend is |
1936 | neg. */ | |
1937 | if (dividend_neg != divisor_neg) | |
1938 | quotient_len = wi::sub_large (quotient, zeros, 1, quotient, | |
1939 | quotient_len, dividend_prec, | |
1940 | UNSIGNED, 0); | |
1941 | } | |
1942 | ||
1943 | if (remainder) | |
1944 | { | |
5ee31e57 | 1945 | *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec); |
807e902e KZ |
1946 | /* The remainder is always the same sign as the dividend. */ |
1947 | if (dividend_neg) | |
1948 | *remainder_len = wi::sub_large (remainder, zeros, 1, remainder, | |
1949 | *remainder_len, dividend_prec, | |
1950 | UNSIGNED, 0); | |
1951 | } | |
1952 | ||
1953 | return quotient_len; | |
1954 | } | |
1955 | ||
1956 | /* | |
1957 | * Shifting, rotating and extraction. | |
1958 | */ | |
1959 | ||
1960 | /* Left shift XVAL by SHIFT and store the result in VAL. Return the | |
1961 | number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */ | |
1962 | unsigned int | |
1963 | wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
1964 | unsigned int xlen, unsigned int precision, | |
1965 | unsigned int shift) | |
1966 | { | |
1967 | /* Split the shift into a whole-block shift and a subblock shift. */ | |
1968 | unsigned int skip = shift / HOST_BITS_PER_WIDE_INT; | |
1969 | unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT; | |
1970 | ||
1971 | /* The whole-block shift fills with zeros. */ | |
1972 | unsigned int len = BLOCKS_NEEDED (precision); | |
1973 | for (unsigned int i = 0; i < skip; ++i) | |
1974 | val[i] = 0; | |
1975 | ||
1976 | /* It's easier to handle the simple block case specially. */ | |
1977 | if (small_shift == 0) | |
1978 | for (unsigned int i = skip; i < len; ++i) | |
1979 | val[i] = safe_uhwi (xval, xlen, i - skip); | |
1980 | else | |
1981 | { | |
1982 | /* The first unfilled output block is a left shift of the first | |
1983 | block in XVAL. The other output blocks contain bits from two | |
1984 | consecutive input blocks. */ | |
1985 | unsigned HOST_WIDE_INT carry = 0; | |
1986 | for (unsigned int i = skip; i < len; ++i) | |
1987 | { | |
1988 | unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip); | |
1989 | val[i] = (x << small_shift) | carry; | |
1990 | carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT); | |
1991 | } | |
1992 | } | |
1993 | return canonize (val, len, precision); | |
1994 | } | |
1995 | ||
1996 | /* Right shift XVAL by SHIFT and store the result in VAL. Return the | |
1997 | number of blocks in VAL. The input has XPRECISION bits and the | |
1998 | output has XPRECISION - SHIFT bits. */ | |
1999 | static unsigned int | |
2000 | rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
2001 | unsigned int xlen, unsigned int xprecision, | |
2002 | unsigned int shift) | |
2003 | { | |
2004 | /* Split the shift into a whole-block shift and a subblock shift. */ | |
2005 | unsigned int skip = shift / HOST_BITS_PER_WIDE_INT; | |
2006 | unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT; | |
2007 | ||
2008 | /* Work out how many blocks are needed to store the significant bits | |
2009 | (excluding the upper zeros or signs). */ | |
2010 | unsigned int len = BLOCKS_NEEDED (xprecision - shift); | |
2011 | ||
2012 | /* It's easier to handle the simple block case specially. */ | |
2013 | if (small_shift == 0) | |
2014 | for (unsigned int i = 0; i < len; ++i) | |
2015 | val[i] = safe_uhwi (xval, xlen, i + skip); | |
2016 | else | |
2017 | { | |
2018 | /* Each output block but the last is a combination of two input blocks. | |
2019 | The last block is a right shift of the last block in XVAL. */ | |
2020 | unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip); | |
2021 | for (unsigned int i = 0; i < len; ++i) | |
2022 | { | |
2023 | val[i] = curr >> small_shift; | |
2024 | curr = safe_uhwi (xval, xlen, i + skip + 1); | |
2025 | val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT); | |
2026 | } | |
2027 | } | |
2028 | return len; | |
2029 | } | |
2030 | ||
2031 | /* Logically right shift XVAL by SHIFT and store the result in VAL. | |
2032 | Return the number of blocks in VAL. XVAL has XPRECISION bits and | |
2033 | VAL has PRECISION bits. */ | |
2034 | unsigned int | |
2035 | wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
2036 | unsigned int xlen, unsigned int xprecision, | |
2037 | unsigned int precision, unsigned int shift) | |
2038 | { | |
2039 | unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift); | |
2040 | ||
2041 | /* The value we just created has precision XPRECISION - SHIFT. | |
2042 | Zero-extend it to wider precisions. */ | |
2043 | if (precision > xprecision - shift) | |
2044 | { | |
2045 | unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT; | |
2046 | if (small_prec) | |
2047 | val[len - 1] = zext_hwi (val[len - 1], small_prec); | |
2048 | else if (val[len - 1] < 0) | |
2049 | { | |
2050 | /* Add a new block with a zero. */ | |
2051 | val[len++] = 0; | |
2052 | return len; | |
2053 | } | |
2054 | } | |
2055 | return canonize (val, len, precision); | |
2056 | } | |
2057 | ||
2058 | /* Arithmetically right shift XVAL by SHIFT and store the result in VAL. | |
2059 | Return the number of blocks in VAL. XVAL has XPRECISION bits and | |
2060 | VAL has PRECISION bits. */ | |
2061 | unsigned int | |
2062 | wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, | |
2063 | unsigned int xlen, unsigned int xprecision, | |
2064 | unsigned int precision, unsigned int shift) | |
2065 | { | |
2066 | unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift); | |
2067 | ||
2068 | /* The value we just created has precision XPRECISION - SHIFT. | |
2069 | Sign-extend it to wider types. */ | |
2070 | if (precision > xprecision - shift) | |
2071 | { | |
2072 | unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT; | |
2073 | if (small_prec) | |
2074 | val[len - 1] = sext_hwi (val[len - 1], small_prec); | |
2075 | } | |
2076 | return canonize (val, len, precision); | |
2077 | } | |
2078 | ||
2079 | /* Return the number of leading (upper) zeros in X. */ | |
2080 | int | |
2081 | wi::clz (const wide_int_ref &x) | |
2082 | { | |
74cb45e6 RS |
2083 | if (x.sign_mask () < 0) |
2084 | /* The upper bit is set, so there are no leading zeros. */ | |
2085 | return 0; | |
2086 | ||
807e902e KZ |
2087 | /* Calculate how many bits there above the highest represented block. */ |
2088 | int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT; | |
2089 | ||
2090 | unsigned HOST_WIDE_INT high = x.uhigh (); | |
2091 | if (count < 0) | |
2092 | /* The upper -COUNT bits of HIGH are not part of the value. | |
2093 | Clear them out. */ | |
2094 | high = (high << -count) >> -count; | |
807e902e KZ |
2095 | |
2096 | /* We don't need to look below HIGH. Either HIGH is nonzero, | |
2097 | or the top bit of the block below is nonzero; clz_hwi is | |
2098 | HOST_BITS_PER_WIDE_INT in the latter case. */ | |
2099 | return count + clz_hwi (high); | |
2100 | } | |
2101 | ||
2102 | /* Return the number of redundant sign bits in X. (That is, the number | |
2103 | of bits immediately below the sign bit that have the same value as | |
2104 | the sign bit.) */ | |
2105 | int | |
2106 | wi::clrsb (const wide_int_ref &x) | |
2107 | { | |
2108 | /* Calculate how many bits there above the highest represented block. */ | |
2109 | int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT; | |
2110 | ||
2111 | unsigned HOST_WIDE_INT high = x.uhigh (); | |
2112 | unsigned HOST_WIDE_INT mask = -1; | |
2113 | if (count < 0) | |
2114 | { | |
2115 | /* The upper -COUNT bits of HIGH are not part of the value. | |
2116 | Clear them from both MASK and HIGH. */ | |
2117 | mask >>= -count; | |
2118 | high &= mask; | |
2119 | } | |
2120 | ||
2121 | /* If the top bit is 1, count the number of leading 1s. If the top | |
2122 | bit is zero, count the number of leading zeros. */ | |
2123 | if (high > mask / 2) | |
2124 | high ^= mask; | |
2125 | ||
2126 | /* There are no sign bits below the top block, so we don't need to look | |
2127 | beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when | |
2128 | HIGH is 0. */ | |
2129 | return count + clz_hwi (high) - 1; | |
2130 | } | |
2131 | ||
2132 | /* Return the number of trailing (lower) zeros in X. */ | |
2133 | int | |
2134 | wi::ctz (const wide_int_ref &x) | |
2135 | { | |
2136 | if (x.len == 1 && x.ulow () == 0) | |
2137 | return x.precision; | |
2138 | ||
2139 | /* Having dealt with the zero case, there must be a block with a | |
2140 | nonzero bit. We don't care about the bits above the first 1. */ | |
2141 | unsigned int i = 0; | |
2142 | while (x.val[i] == 0) | |
2143 | ++i; | |
2144 | return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]); | |
2145 | } | |
2146 | ||
2147 | /* If X is an exact power of 2, return the base-2 logarithm, otherwise | |
2148 | return -1. */ | |
2149 | int | |
2150 | wi::exact_log2 (const wide_int_ref &x) | |
2151 | { | |
2152 | /* Reject cases where there are implicit -1 blocks above HIGH. */ | |
2153 | if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0) | |
2154 | return -1; | |
2155 | ||
2156 | /* Set CRUX to the index of the entry that should be nonzero. | |
2157 | If the top block is zero then the next lowest block (if any) | |
2158 | must have the high bit set. */ | |
2159 | unsigned int crux = x.len - 1; | |
2160 | if (crux > 0 && x.val[crux] == 0) | |
2161 | crux -= 1; | |
2162 | ||
2163 | /* Check that all lower blocks are zero. */ | |
2164 | for (unsigned int i = 0; i < crux; ++i) | |
2165 | if (x.val[i] != 0) | |
2166 | return -1; | |
2167 | ||
2168 | /* Get a zero-extended form of block CRUX. */ | |
2169 | unsigned HOST_WIDE_INT hwi = x.val[crux]; | |
2170 | if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision) | |
2171 | hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT); | |
2172 | ||
2173 | /* Now it's down to whether HWI is a power of 2. */ | |
2174 | int res = ::exact_log2 (hwi); | |
2175 | if (res >= 0) | |
2176 | res += crux * HOST_BITS_PER_WIDE_INT; | |
2177 | return res; | |
2178 | } | |
2179 | ||
2180 | /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */ | |
2181 | int | |
2182 | wi::floor_log2 (const wide_int_ref &x) | |
2183 | { | |
2184 | return x.precision - 1 - clz (x); | |
2185 | } | |
2186 | ||
2187 | /* Return the index of the first (lowest) set bit in X, counting from 1. | |
2188 | Return 0 if X is 0. */ | |
2189 | int | |
2190 | wi::ffs (const wide_int_ref &x) | |
2191 | { | |
2192 | return eq_p (x, 0) ? 0 : ctz (x) + 1; | |
2193 | } | |
2194 | ||
2195 | /* Return true if sign-extending X to have precision PRECISION would give | |
2196 | the minimum signed value at that precision. */ | |
2197 | bool | |
2198 | wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision) | |
2199 | { | |
2200 | return ctz (x) + 1 == int (precision); | |
2201 | } | |
2202 | ||
2203 | /* Return true if X represents the minimum signed value. */ | |
2204 | bool | |
2205 | wi::only_sign_bit_p (const wide_int_ref &x) | |
2206 | { | |
2207 | return only_sign_bit_p (x, x.precision); | |
2208 | } | |
2209 | ||
fff22900 RS |
2210 | /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL |
2211 | down to the previous value that has no bits set outside MASK. | |
2212 | This rounding wraps for signed values if VAL is negative and | |
2213 | the top bit of MASK is clear. | |
2214 | ||
2215 | For example, round_down_for_mask (6, 0xf1) would give 1 and | |
2216 | round_down_for_mask (24, 0xf1) would give 17. */ | |
2217 | ||
2218 | wide_int | |
2219 | wi::round_down_for_mask (const wide_int &val, const wide_int &mask) | |
2220 | { | |
2221 | /* Get the bits in VAL that are outside the mask. */ | |
2222 | wide_int extra_bits = wi::bit_and_not (val, mask); | |
2223 | if (extra_bits == 0) | |
2224 | return val; | |
2225 | ||
2226 | /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s | |
2227 | below that bit. */ | |
2228 | unsigned int precision = val.get_precision (); | |
2229 | wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits), | |
2230 | false, precision); | |
2231 | ||
2232 | /* Clear the bits that aren't in MASK, but ensure that all bits | |
2233 | in MASK below the top cleared bit are set. */ | |
2234 | return (val & mask) | (mask & lower_mask); | |
2235 | } | |
2236 | ||
2237 | /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL | |
2238 | up to the next value that has no bits set outside MASK. The rounding | |
2239 | wraps if there are no suitable values greater than VAL. | |
2240 | ||
2241 | For example, round_up_for_mask (6, 0xf1) would give 16 and | |
2242 | round_up_for_mask (24, 0xf1) would give 32. */ | |
2243 | ||
2244 | wide_int | |
2245 | wi::round_up_for_mask (const wide_int &val, const wide_int &mask) | |
2246 | { | |
2247 | /* Get the bits in VAL that are outside the mask. */ | |
2248 | wide_int extra_bits = wi::bit_and_not (val, mask); | |
2249 | if (extra_bits == 0) | |
2250 | return val; | |
2251 | ||
2252 | /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */ | |
2253 | unsigned int precision = val.get_precision (); | |
2254 | wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits), | |
2255 | true, precision); | |
2256 | ||
2257 | /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */ | |
2258 | upper_mask &= mask; | |
2259 | ||
2260 | /* Conceptually we need to: | |
2261 | ||
2262 | - clear bits of VAL outside UPPER_MASK | |
2263 | - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0) | |
2264 | - propagate the carry through the bits of VAL in UPPER_MASK | |
2265 | ||
2266 | If (~VAL & UPPER_MASK) is nonzero, the carry eventually | |
2267 | reaches that bit and the process leaves all lower bits clear. | |
2268 | If (~VAL & UPPER_MASK) is zero then the result is also zero. */ | |
2269 | wide_int tmp = wi::bit_and_not (upper_mask, val); | |
2270 | ||
2271 | return (val | tmp) & -tmp; | |
2272 | } | |
2273 | ||
28752261 MG |
2274 | /* Compute the modular multiplicative inverse of A modulo B |
2275 | using extended Euclid's algorithm. Assumes A and B are coprime, | |
2276 | and that A and B have the same precision. */ | |
2277 | wide_int | |
2278 | wi::mod_inv (const wide_int &a, const wide_int &b) | |
2279 | { | |
2280 | /* Verify the assumption. */ | |
2281 | gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1)); | |
2282 | ||
2283 | unsigned int p = a.get_precision () + 1; | |
2284 | gcc_checking_assert (b.get_precision () + 1 == p); | |
2285 | wide_int c = wide_int::from (a, p, UNSIGNED); | |
2286 | wide_int d = wide_int::from (b, p, UNSIGNED); | |
2287 | wide_int x0 = wide_int::from (0, p, UNSIGNED); | |
2288 | wide_int x1 = wide_int::from (1, p, UNSIGNED); | |
2289 | ||
2290 | if (wi::eq_p (b, 1)) | |
2291 | return wide_int::from (1, p, UNSIGNED); | |
2292 | ||
2293 | while (wi::gt_p (c, 1, UNSIGNED)) | |
2294 | { | |
2295 | wide_int t = d; | |
2296 | wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d); | |
2297 | c = t; | |
2298 | wide_int s = x0; | |
2299 | x0 = wi::sub (x1, wi::mul (q, x0)); | |
2300 | x1 = s; | |
2301 | } | |
2302 | if (wi::lt_p (x1, 0, SIGNED)) | |
2303 | x1 += d; | |
2304 | return x1; | |
2305 | } | |
2306 | ||
807e902e KZ |
2307 | /* |
2308 | * Private utilities. | |
2309 | */ | |
2310 | ||
2311 | void gt_ggc_mx (widest_int *) { } | |
2312 | void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { } | |
2313 | void gt_pch_nx (widest_int *) { } | |
2314 | ||
2315 | template void wide_int::dump () const; | |
2316 | template void generic_wide_int <wide_int_ref_storage <false> >::dump () const; | |
2317 | template void generic_wide_int <wide_int_ref_storage <true> >::dump () const; | |
2318 | template void offset_int::dump () const; | |
2319 | template void widest_int::dump () const; | |
d9b950dd | 2320 | |
e61a4f52 AH |
2321 | /* We could add all the above ::dump variants here, but wide_int and |
2322 | widest_int should handle the common cases. Besides, you can always | |
2323 | call the dump method directly. */ | |
2324 | ||
2325 | DEBUG_FUNCTION void | |
2326 | debug (const wide_int &ref) | |
2327 | { | |
2328 | ref.dump (); | |
2329 | } | |
2330 | ||
2331 | DEBUG_FUNCTION void | |
2332 | debug (const wide_int *ptr) | |
2333 | { | |
2334 | if (ptr) | |
2335 | debug (*ptr); | |
2336 | else | |
2337 | fprintf (stderr, "<nil>\n"); | |
2338 | } | |
2339 | ||
2340 | DEBUG_FUNCTION void | |
2341 | debug (const widest_int &ref) | |
2342 | { | |
2343 | ref.dump (); | |
2344 | } | |
2345 | ||
2346 | DEBUG_FUNCTION void | |
2347 | debug (const widest_int *ptr) | |
2348 | { | |
2349 | if (ptr) | |
2350 | debug (*ptr); | |
2351 | else | |
2352 | fprintf (stderr, "<nil>\n"); | |
2353 | } | |
d9b950dd DM |
2354 | |
2355 | #if CHECKING_P | |
2356 | ||
2357 | namespace selftest { | |
2358 | ||
2359 | /* Selftests for wide ints. We run these multiple times, once per type. */ | |
2360 | ||
2361 | /* Helper function for building a test value. */ | |
2362 | ||
2363 | template <class VALUE_TYPE> | |
2364 | static VALUE_TYPE | |
2365 | from_int (int i); | |
2366 | ||
2367 | /* Specializations of the fixture for each wide-int type. */ | |
2368 | ||
2369 | /* Specialization for VALUE_TYPE == wide_int. */ | |
2370 | ||
2371 | template <> | |
2372 | wide_int | |
2373 | from_int (int i) | |
2374 | { | |
2375 | return wi::shwi (i, 32); | |
2376 | } | |
2377 | ||
2378 | /* Specialization for VALUE_TYPE == offset_int. */ | |
2379 | ||
2380 | template <> | |
2381 | offset_int | |
2382 | from_int (int i) | |
2383 | { | |
2384 | return offset_int (i); | |
2385 | } | |
2386 | ||
2387 | /* Specialization for VALUE_TYPE == widest_int. */ | |
2388 | ||
2389 | template <> | |
2390 | widest_int | |
2391 | from_int (int i) | |
2392 | { | |
2393 | return widest_int (i); | |
2394 | } | |
2395 | ||
2396 | /* Verify that print_dec (WI, ..., SGN) gives the expected string | |
2397 | representation (using base 10). */ | |
2398 | ||
2399 | static void | |
2400 | assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn) | |
2401 | { | |
2402 | char buf[WIDE_INT_PRINT_BUFFER_SIZE]; | |
2403 | print_dec (wi, buf, sgn); | |
2404 | ASSERT_STREQ (expected, buf); | |
2405 | } | |
2406 | ||
2407 | /* Likewise for base 16. */ | |
2408 | ||
2409 | static void | |
2410 | assert_hexeq (const char *expected, const wide_int_ref &wi) | |
2411 | { | |
2412 | char buf[WIDE_INT_PRINT_BUFFER_SIZE]; | |
2413 | print_hex (wi, buf); | |
2414 | ASSERT_STREQ (expected, buf); | |
2415 | } | |
2416 | ||
2417 | /* Test cases. */ | |
2418 | ||
2419 | /* Verify that print_dec and print_hex work for VALUE_TYPE. */ | |
2420 | ||
2421 | template <class VALUE_TYPE> | |
2422 | static void | |
2423 | test_printing () | |
2424 | { | |
2425 | VALUE_TYPE a = from_int<VALUE_TYPE> (42); | |
2426 | assert_deceq ("42", a, SIGNED); | |
2427 | assert_hexeq ("0x2a", a); | |
7984457f RS |
2428 | assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69)); |
2429 | assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69)); | |
2430 | assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false)); | |
2431 | if (WIDE_INT_MAX_PRECISION > 128) | |
2432 | { | |
2433 | assert_hexeq ("0x20000000000000000fffffffffffffffe", | |
2434 | wi::lshift (1, 129) + wi::lshift (1, 64) - 2); | |
2435 | assert_hexeq ("0x200000000000004000123456789abcdef", | |
2436 | wi::lshift (1, 129) + wi::lshift (1, 74) | |
2437 | + wi::lshift (0x1234567, 32) + 0x89abcdef); | |
2438 | } | |
d9b950dd DM |
2439 | } |
2440 | ||
2441 | /* Verify that various operations work correctly for VALUE_TYPE, | |
2442 | unary and binary, using both function syntax, and | |
2443 | overloaded-operators. */ | |
2444 | ||
2445 | template <class VALUE_TYPE> | |
2446 | static void | |
2447 | test_ops () | |
2448 | { | |
2449 | VALUE_TYPE a = from_int<VALUE_TYPE> (7); | |
2450 | VALUE_TYPE b = from_int<VALUE_TYPE> (3); | |
2451 | ||
2452 | /* Using functions. */ | |
2453 | assert_deceq ("-7", wi::neg (a), SIGNED); | |
2454 | assert_deceq ("10", wi::add (a, b), SIGNED); | |
2455 | assert_deceq ("4", wi::sub (a, b), SIGNED); | |
2456 | assert_deceq ("-4", wi::sub (b, a), SIGNED); | |
2457 | assert_deceq ("21", wi::mul (a, b), SIGNED); | |
2458 | ||
2459 | /* Using operators. */ | |
2460 | assert_deceq ("-7", -a, SIGNED); | |
2461 | assert_deceq ("10", a + b, SIGNED); | |
2462 | assert_deceq ("4", a - b, SIGNED); | |
2463 | assert_deceq ("-4", b - a, SIGNED); | |
2464 | assert_deceq ("21", a * b, SIGNED); | |
2465 | } | |
2466 | ||
2467 | /* Verify that various comparisons work correctly for VALUE_TYPE. */ | |
2468 | ||
2469 | template <class VALUE_TYPE> | |
2470 | static void | |
2471 | test_comparisons () | |
2472 | { | |
2473 | VALUE_TYPE a = from_int<VALUE_TYPE> (7); | |
2474 | VALUE_TYPE b = from_int<VALUE_TYPE> (3); | |
2475 | ||
2476 | /* == */ | |
2477 | ASSERT_TRUE (wi::eq_p (a, a)); | |
2478 | ASSERT_FALSE (wi::eq_p (a, b)); | |
2479 | ||
2480 | /* != */ | |
2481 | ASSERT_TRUE (wi::ne_p (a, b)); | |
2482 | ASSERT_FALSE (wi::ne_p (a, a)); | |
2483 | ||
2484 | /* < */ | |
2485 | ASSERT_FALSE (wi::lts_p (a, a)); | |
2486 | ASSERT_FALSE (wi::lts_p (a, b)); | |
2487 | ASSERT_TRUE (wi::lts_p (b, a)); | |
2488 | ||
2489 | /* <= */ | |
2490 | ASSERT_TRUE (wi::les_p (a, a)); | |
2491 | ASSERT_FALSE (wi::les_p (a, b)); | |
2492 | ASSERT_TRUE (wi::les_p (b, a)); | |
2493 | ||
2494 | /* > */ | |
2495 | ASSERT_FALSE (wi::gts_p (a, a)); | |
2496 | ASSERT_TRUE (wi::gts_p (a, b)); | |
2497 | ASSERT_FALSE (wi::gts_p (b, a)); | |
2498 | ||
2499 | /* >= */ | |
2500 | ASSERT_TRUE (wi::ges_p (a, a)); | |
2501 | ASSERT_TRUE (wi::ges_p (a, b)); | |
2502 | ASSERT_FALSE (wi::ges_p (b, a)); | |
2503 | ||
2504 | /* comparison */ | |
2505 | ASSERT_EQ (-1, wi::cmps (b, a)); | |
2506 | ASSERT_EQ (0, wi::cmps (a, a)); | |
2507 | ASSERT_EQ (1, wi::cmps (a, b)); | |
2508 | } | |
2509 | ||
2510 | /* Run all of the selftests, using the given VALUE_TYPE. */ | |
2511 | ||
2512 | template <class VALUE_TYPE> | |
2513 | static void run_all_wide_int_tests () | |
2514 | { | |
2515 | test_printing <VALUE_TYPE> (); | |
2516 | test_ops <VALUE_TYPE> (); | |
2517 | test_comparisons <VALUE_TYPE> (); | |
2518 | } | |
2519 | ||
2131f7f5 RS |
2520 | /* Test overflow conditions. */ |
2521 | ||
2522 | static void | |
2523 | test_overflow () | |
2524 | { | |
2525 | static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 }; | |
2526 | static int offsets[] = { 16, 1, 0 }; | |
2527 | for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i) | |
2528 | for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j) | |
2529 | { | |
2530 | int prec = precs[i]; | |
2531 | int offset = offsets[j]; | |
4a669ac3 | 2532 | wi::overflow_type overflow; |
2131f7f5 RS |
2533 | wide_int sum, diff; |
2534 | ||
2535 | sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1, | |
2536 | UNSIGNED, &overflow); | |
2537 | ASSERT_EQ (sum, -offset); | |
4a669ac3 | 2538 | ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0); |
2131f7f5 RS |
2539 | |
2540 | sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset, | |
2541 | UNSIGNED, &overflow); | |
2542 | ASSERT_EQ (sum, -offset); | |
4a669ac3 | 2543 | ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0); |
2131f7f5 RS |
2544 | |
2545 | diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset, | |
2546 | wi::max_value (prec, UNSIGNED), | |
2547 | UNSIGNED, &overflow); | |
2548 | ASSERT_EQ (diff, -offset); | |
4a669ac3 | 2549 | ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0); |
2131f7f5 RS |
2550 | |
2551 | diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset, | |
2552 | wi::max_value (prec, UNSIGNED) - 1, | |
2553 | UNSIGNED, &overflow); | |
2554 | ASSERT_EQ (diff, 1 - offset); | |
4a669ac3 | 2555 | ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1); |
2131f7f5 RS |
2556 | } |
2557 | } | |
2558 | ||
fff22900 RS |
2559 | /* Test the round_{down,up}_for_mask functions. */ |
2560 | ||
2561 | static void | |
2562 | test_round_for_mask () | |
2563 | { | |
2564 | unsigned int prec = 18; | |
2565 | ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec), | |
2566 | wi::shwi (0xf1, prec))); | |
2567 | ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec), | |
2568 | wi::shwi (0xf1, prec))); | |
2569 | ||
2570 | ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec), | |
2571 | wi::shwi (0xf1, prec))); | |
2572 | ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec), | |
2573 | wi::shwi (0xf1, prec))); | |
2574 | ||
2575 | ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec), | |
2576 | wi::shwi (0xf1, prec))); | |
2577 | ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec), | |
2578 | wi::shwi (0xf1, prec))); | |
2579 | ||
2580 | ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec), | |
2581 | wi::shwi (0x111, prec))); | |
2582 | ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec), | |
2583 | wi::shwi (0x111, prec))); | |
2584 | ||
2585 | ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec), | |
2586 | wi::shwi (0xfc, prec))); | |
2587 | ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec), | |
2588 | wi::shwi (0xfc, prec))); | |
2589 | ||
2590 | ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec), | |
2591 | wi::shwi (0xabc, prec))); | |
2592 | ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec), | |
2593 | wi::shwi (0xabc, prec))); | |
2594 | ||
2595 | ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec), | |
2596 | wi::shwi (0xabc, prec))); | |
2597 | ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec), | |
2598 | wi::shwi (0xabc, prec))); | |
2599 | ||
2600 | ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec), | |
2601 | wi::shwi (0xabc, prec))); | |
2602 | ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec), | |
2603 | wi::shwi (0xabc, prec))); | |
2604 | } | |
2605 | ||
d9b950dd DM |
2606 | /* Run all of the selftests within this file, for all value types. */ |
2607 | ||
2608 | void | |
2609 | wide_int_cc_tests () | |
2610 | { | |
2131f7f5 RS |
2611 | run_all_wide_int_tests <wide_int> (); |
2612 | run_all_wide_int_tests <offset_int> (); | |
2613 | run_all_wide_int_tests <widest_int> (); | |
2614 | test_overflow (); | |
fff22900 | 2615 | test_round_for_mask (); |
e5259207 JJ |
2616 | ASSERT_EQ (wi::mask (128, false, 128), |
2617 | wi::shifted_mask (0, 128, false, 128)); | |
2618 | ASSERT_EQ (wi::mask (128, true, 128), | |
2619 | wi::shifted_mask (0, 128, true, 128)); | |
d9b950dd DM |
2620 | } |
2621 | ||
2622 | } // namespace selftest | |
2623 | #endif /* CHECKING_P */ |