]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/double-int.c
fold-const.c (LOWPART, [...]): Move ...
[thirdparty/gcc.git] / gcc / double-int.c
1 /* Operations with long integers.
2 Copyright (C) 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25
26 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
27 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
28 and SUM1. Then this yields nonzero if overflow occurred during the
29 addition.
30
31 Overflow occurs if A and B have the same sign, but A and SUM differ in
32 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
33 sign. */
34 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
35
36 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
37 We do that by representing the two-word integer in 4 words, with only
38 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
39 number. The value of the word is LOWPART + HIGHPART * BASE. */
40
41 #define LOWPART(x) \
42 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
43 #define HIGHPART(x) \
44 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
45 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
46
47 /* Unpack a two-word integer into 4 words.
48 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
49 WORDS points to the array of HOST_WIDE_INTs. */
50
51 static void
52 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
53 {
54 words[0] = LOWPART (low);
55 words[1] = HIGHPART (low);
56 words[2] = LOWPART (hi);
57 words[3] = HIGHPART (hi);
58 }
59
60 /* Pack an array of 4 words into a two-word integer.
61 WORDS points to the array of words.
62 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
63
64 static void
65 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
66 HOST_WIDE_INT *hi)
67 {
68 *low = words[0] + words[1] * BASE;
69 *hi = words[2] + words[3] * BASE;
70 }
71
72 /* Force the double-word integer L1, H1 to be within the range of the
73 integer type TYPE. Stores the properly truncated and sign-extended
74 double-word integer in *LV, *HV. Returns true if the operation
75 overflows, that is, argument and result are different. */
76
77 int
78 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
79 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
80 {
81 unsigned HOST_WIDE_INT low0 = l1;
82 HOST_WIDE_INT high0 = h1;
83 unsigned int prec = TYPE_PRECISION (type);
84 int sign_extended_type;
85
86 /* Size types *are* sign extended. */
87 sign_extended_type = (!TYPE_UNSIGNED (type)
88 || (TREE_CODE (type) == INTEGER_TYPE
89 && TYPE_IS_SIZETYPE (type)));
90
91 /* First clear all bits that are beyond the type's precision. */
92 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
93 ;
94 else if (prec > HOST_BITS_PER_WIDE_INT)
95 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
96 else
97 {
98 h1 = 0;
99 if (prec < HOST_BITS_PER_WIDE_INT)
100 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
101 }
102
103 /* Then do sign extension if necessary. */
104 if (!sign_extended_type)
105 /* No sign extension */;
106 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
107 /* Correct width already. */;
108 else if (prec > HOST_BITS_PER_WIDE_INT)
109 {
110 /* Sign extend top half? */
111 if (h1 & ((unsigned HOST_WIDE_INT)1
112 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
113 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
114 }
115 else if (prec == HOST_BITS_PER_WIDE_INT)
116 {
117 if ((HOST_WIDE_INT)l1 < 0)
118 h1 = -1;
119 }
120 else
121 {
122 /* Sign extend bottom half? */
123 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
124 {
125 h1 = -1;
126 l1 |= (HOST_WIDE_INT)(-1) << prec;
127 }
128 }
129
130 *lv = l1;
131 *hv = h1;
132
133 /* If the value didn't fit, signal overflow. */
134 return l1 != low0 || h1 != high0;
135 }
136
137 /* We force the double-int HIGH:LOW to the range of the type TYPE by
138 sign or zero extending it.
139 OVERFLOWABLE indicates if we are interested
140 in overflow of the value, when >0 we are only interested in signed
141 overflow, for <0 we are interested in any overflow. OVERFLOWED
142 indicates whether overflow has already occurred. CONST_OVERFLOWED
143 indicates whether constant overflow has already occurred. We force
144 T's value to be within range of T's type (by setting to 0 or 1 all
145 the bits outside the type's range). We set TREE_OVERFLOWED if,
146 OVERFLOWED is nonzero,
147 or OVERFLOWABLE is >0 and signed overflow occurs
148 or OVERFLOWABLE is <0 and any overflow occurs
149 We return a new tree node for the extended double-int. The node
150 is shared if no overflow flags are set. */
151
152 tree
153 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
154 HOST_WIDE_INT high, int overflowable,
155 bool overflowed)
156 {
157 int sign_extended_type;
158 bool overflow;
159
160 /* Size types *are* sign extended. */
161 sign_extended_type = (!TYPE_UNSIGNED (type)
162 || (TREE_CODE (type) == INTEGER_TYPE
163 && TYPE_IS_SIZETYPE (type)));
164
165 overflow = fit_double_type (low, high, &low, &high, type);
166
167 /* If we need to set overflow flags, return a new unshared node. */
168 if (overflowed || overflow)
169 {
170 if (overflowed
171 || overflowable < 0
172 || (overflowable > 0 && sign_extended_type))
173 {
174 tree t = make_node (INTEGER_CST);
175 TREE_INT_CST_LOW (t) = low;
176 TREE_INT_CST_HIGH (t) = high;
177 TREE_TYPE (t) = type;
178 TREE_OVERFLOW (t) = 1;
179 return t;
180 }
181 }
182
183 /* Else build a shared node. */
184 return build_int_cst_wide (type, low, high);
185 }
186
187 /* Add two doubleword integers with doubleword result.
188 Return nonzero if the operation overflows according to UNSIGNED_P.
189 Each argument is given as two `HOST_WIDE_INT' pieces.
190 One argument is L1 and H1; the other, L2 and H2.
191 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
192
193 int
194 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
195 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
196 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
197 bool unsigned_p)
198 {
199 unsigned HOST_WIDE_INT l;
200 HOST_WIDE_INT h;
201
202 l = l1 + l2;
203 h = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) h1
204 + (unsigned HOST_WIDE_INT) h2
205 + (l < l1));
206
207 *lv = l;
208 *hv = h;
209
210 if (unsigned_p)
211 return ((unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1
212 || (h == h1
213 && l < l1));
214 else
215 return OVERFLOW_SUM_SIGN (h1, h2, h);
216 }
217
218 /* Negate a doubleword integer with doubleword result.
219 Return nonzero if the operation overflows, assuming it's signed.
220 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
222
223 int
224 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
225 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
226 {
227 if (l1 == 0)
228 {
229 *lv = 0;
230 *hv = - h1;
231 return (*hv & h1) < 0;
232 }
233 else
234 {
235 *lv = -l1;
236 *hv = ~h1;
237 return 0;
238 }
239 }
240
241 /* Multiply two doubleword integers with doubleword result.
242 Return nonzero if the operation overflows according to UNSIGNED_P.
243 Each argument is given as two `HOST_WIDE_INT' pieces.
244 One argument is L1 and H1; the other, L2 and H2.
245 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
246
247 int
248 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
249 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
250 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
251 bool unsigned_p)
252 {
253 HOST_WIDE_INT arg1[4];
254 HOST_WIDE_INT arg2[4];
255 HOST_WIDE_INT prod[4 * 2];
256 unsigned HOST_WIDE_INT carry;
257 int i, j, k;
258 unsigned HOST_WIDE_INT toplow, neglow;
259 HOST_WIDE_INT tophigh, neghigh;
260
261 encode (arg1, l1, h1);
262 encode (arg2, l2, h2);
263
264 memset (prod, 0, sizeof prod);
265
266 for (i = 0; i < 4; i++)
267 {
268 carry = 0;
269 for (j = 0; j < 4; j++)
270 {
271 k = i + j;
272 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
273 carry += arg1[i] * arg2[j];
274 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
275 carry += prod[k];
276 prod[k] = LOWPART (carry);
277 carry = HIGHPART (carry);
278 }
279 prod[i + 4] = carry;
280 }
281
282 decode (prod, lv, hv);
283 decode (prod + 4, &toplow, &tophigh);
284
285 /* Unsigned overflow is immediate. */
286 if (unsigned_p)
287 return (toplow | tophigh) != 0;
288
289 /* Check for signed overflow by calculating the signed representation of the
290 top half of the result; it should agree with the low half's sign bit. */
291 if (h1 < 0)
292 {
293 neg_double (l2, h2, &neglow, &neghigh);
294 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
295 }
296 if (h2 < 0)
297 {
298 neg_double (l1, h1, &neglow, &neghigh);
299 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
300 }
301 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
302 }
303
304 /* Shift the doubleword integer in L1, H1 left by COUNT places
305 keeping only PREC bits of result.
306 Shift right if COUNT is negative.
307 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
308 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
309
310 void
311 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
312 HOST_WIDE_INT count, unsigned int prec,
313 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, bool arith)
314 {
315 unsigned HOST_WIDE_INT signmask;
316
317 if (count < 0)
318 {
319 rshift_double (l1, h1, -count, prec, lv, hv, arith);
320 return;
321 }
322
323 if (SHIFT_COUNT_TRUNCATED)
324 count %= prec;
325
326 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
327 {
328 /* Shifting by the host word size is undefined according to the
329 ANSI standard, so we must handle this as a special case. */
330 *hv = 0;
331 *lv = 0;
332 }
333 else if (count >= HOST_BITS_PER_WIDE_INT)
334 {
335 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
336 *lv = 0;
337 }
338 else
339 {
340 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
341 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
342 *lv = l1 << count;
343 }
344
345 /* Sign extend all bits that are beyond the precision. */
346
347 signmask = -((prec > HOST_BITS_PER_WIDE_INT
348 ? ((unsigned HOST_WIDE_INT) *hv
349 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
350 : (*lv >> (prec - 1))) & 1);
351
352 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
353 ;
354 else if (prec >= HOST_BITS_PER_WIDE_INT)
355 {
356 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
357 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
358 }
359 else
360 {
361 *hv = signmask;
362 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
363 *lv |= signmask << prec;
364 }
365 }
366
367 /* Shift the doubleword integer in L1, H1 right by COUNT places
368 keeping only PREC bits of result. Shift left if COUNT is negative.
369 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
370 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
371
372 void
373 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
374 HOST_WIDE_INT count, unsigned int prec,
375 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
376 bool arith)
377 {
378 unsigned HOST_WIDE_INT signmask;
379
380 if (count < 0)
381 {
382 lshift_double (l1, h1, -count, prec, lv, hv, arith);
383 return;
384 }
385
386 signmask = (arith
387 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
388 : 0);
389
390 if (SHIFT_COUNT_TRUNCATED)
391 count %= prec;
392
393 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
394 {
395 /* Shifting by the host word size is undefined according to the
396 ANSI standard, so we must handle this as a special case. */
397 *hv = 0;
398 *lv = 0;
399 }
400 else if (count >= HOST_BITS_PER_WIDE_INT)
401 {
402 *hv = 0;
403 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
404 }
405 else
406 {
407 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
408 *lv = ((l1 >> count)
409 | ((unsigned HOST_WIDE_INT) h1
410 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
411 }
412
413 /* Zero / sign extend all bits that are beyond the precision. */
414
415 if (count >= (HOST_WIDE_INT)prec)
416 {
417 *hv = signmask;
418 *lv = signmask;
419 }
420 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
421 ;
422 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
423 {
424 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
425 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
426 }
427 else
428 {
429 *hv = signmask;
430 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
431 *lv |= signmask << (prec - count);
432 }
433 }
434
435 /* Rotate the doubleword integer in L1, H1 left by COUNT places
436 keeping only PREC bits of result.
437 Rotate right if COUNT is negative.
438 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
439
440 void
441 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
442 HOST_WIDE_INT count, unsigned int prec,
443 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
444 {
445 unsigned HOST_WIDE_INT s1l, s2l;
446 HOST_WIDE_INT s1h, s2h;
447
448 count %= prec;
449 if (count < 0)
450 count += prec;
451
452 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
453 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
454 *lv = s1l | s2l;
455 *hv = s1h | s2h;
456 }
457
458 /* Rotate the doubleword integer in L1, H1 left by COUNT places
459 keeping only PREC bits of result. COUNT must be positive.
460 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
461
462 void
463 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
464 HOST_WIDE_INT count, unsigned int prec,
465 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
466 {
467 unsigned HOST_WIDE_INT s1l, s2l;
468 HOST_WIDE_INT s1h, s2h;
469
470 count %= prec;
471 if (count < 0)
472 count += prec;
473
474 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
475 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
476 *lv = s1l | s2l;
477 *hv = s1h | s2h;
478 }
479
480 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
481 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
482 CODE is a tree code for a kind of division, one of
483 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
484 or EXACT_DIV_EXPR
485 It controls how the quotient is rounded to an integer.
486 Return nonzero if the operation overflows.
487 UNS nonzero says do unsigned division. */
488
489 int
490 div_and_round_double (unsigned code, int uns,
491 /* num == numerator == dividend */
492 unsigned HOST_WIDE_INT lnum_orig,
493 HOST_WIDE_INT hnum_orig,
494 /* den == denominator == divisor */
495 unsigned HOST_WIDE_INT lden_orig,
496 HOST_WIDE_INT hden_orig,
497 unsigned HOST_WIDE_INT *lquo,
498 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
499 HOST_WIDE_INT *hrem)
500 {
501 int quo_neg = 0;
502 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
503 HOST_WIDE_INT den[4], quo[4];
504 int i, j;
505 unsigned HOST_WIDE_INT work;
506 unsigned HOST_WIDE_INT carry = 0;
507 unsigned HOST_WIDE_INT lnum = lnum_orig;
508 HOST_WIDE_INT hnum = hnum_orig;
509 unsigned HOST_WIDE_INT lden = lden_orig;
510 HOST_WIDE_INT hden = hden_orig;
511 int overflow = 0;
512
513 if (hden == 0 && lden == 0)
514 overflow = 1, lden = 1;
515
516 /* Calculate quotient sign and convert operands to unsigned. */
517 if (!uns)
518 {
519 if (hnum < 0)
520 {
521 quo_neg = ~ quo_neg;
522 /* (minimum integer) / (-1) is the only overflow case. */
523 if (neg_double (lnum, hnum, &lnum, &hnum)
524 && ((HOST_WIDE_INT) lden & hden) == -1)
525 overflow = 1;
526 }
527 if (hden < 0)
528 {
529 quo_neg = ~ quo_neg;
530 neg_double (lden, hden, &lden, &hden);
531 }
532 }
533
534 if (hnum == 0 && hden == 0)
535 { /* single precision */
536 *hquo = *hrem = 0;
537 /* This unsigned division rounds toward zero. */
538 *lquo = lnum / lden;
539 goto finish_up;
540 }
541
542 if (hnum == 0)
543 { /* trivial case: dividend < divisor */
544 /* hden != 0 already checked. */
545 *hquo = *lquo = 0;
546 *hrem = hnum;
547 *lrem = lnum;
548 goto finish_up;
549 }
550
551 memset (quo, 0, sizeof quo);
552
553 memset (num, 0, sizeof num); /* to zero 9th element */
554 memset (den, 0, sizeof den);
555
556 encode (num, lnum, hnum);
557 encode (den, lden, hden);
558
559 /* Special code for when the divisor < BASE. */
560 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
561 {
562 /* hnum != 0 already checked. */
563 for (i = 4 - 1; i >= 0; i--)
564 {
565 work = num[i] + carry * BASE;
566 quo[i] = work / lden;
567 carry = work % lden;
568 }
569 }
570 else
571 {
572 /* Full double precision division,
573 with thanks to Don Knuth's "Seminumerical Algorithms". */
574 int num_hi_sig, den_hi_sig;
575 unsigned HOST_WIDE_INT quo_est, scale;
576
577 /* Find the highest nonzero divisor digit. */
578 for (i = 4 - 1;; i--)
579 if (den[i] != 0)
580 {
581 den_hi_sig = i;
582 break;
583 }
584
585 /* Insure that the first digit of the divisor is at least BASE/2.
586 This is required by the quotient digit estimation algorithm. */
587
588 scale = BASE / (den[den_hi_sig] + 1);
589 if (scale > 1)
590 { /* scale divisor and dividend */
591 carry = 0;
592 for (i = 0; i <= 4 - 1; i++)
593 {
594 work = (num[i] * scale) + carry;
595 num[i] = LOWPART (work);
596 carry = HIGHPART (work);
597 }
598
599 num[4] = carry;
600 carry = 0;
601 for (i = 0; i <= 4 - 1; i++)
602 {
603 work = (den[i] * scale) + carry;
604 den[i] = LOWPART (work);
605 carry = HIGHPART (work);
606 if (den[i] != 0) den_hi_sig = i;
607 }
608 }
609
610 num_hi_sig = 4;
611
612 /* Main loop */
613 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
614 {
615 /* Guess the next quotient digit, quo_est, by dividing the first
616 two remaining dividend digits by the high order quotient digit.
617 quo_est is never low and is at most 2 high. */
618 unsigned HOST_WIDE_INT tmp;
619
620 num_hi_sig = i + den_hi_sig + 1;
621 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
622 if (num[num_hi_sig] != den[den_hi_sig])
623 quo_est = work / den[den_hi_sig];
624 else
625 quo_est = BASE - 1;
626
627 /* Refine quo_est so it's usually correct, and at most one high. */
628 tmp = work - quo_est * den[den_hi_sig];
629 if (tmp < BASE
630 && (den[den_hi_sig - 1] * quo_est
631 > (tmp * BASE + num[num_hi_sig - 2])))
632 quo_est--;
633
634 /* Try QUO_EST as the quotient digit, by multiplying the
635 divisor by QUO_EST and subtracting from the remaining dividend.
636 Keep in mind that QUO_EST is the I - 1st digit. */
637
638 carry = 0;
639 for (j = 0; j <= den_hi_sig; j++)
640 {
641 work = quo_est * den[j] + carry;
642 carry = HIGHPART (work);
643 work = num[i + j] - LOWPART (work);
644 num[i + j] = LOWPART (work);
645 carry += HIGHPART (work) != 0;
646 }
647
648 /* If quo_est was high by one, then num[i] went negative and
649 we need to correct things. */
650 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
651 {
652 quo_est--;
653 carry = 0; /* add divisor back in */
654 for (j = 0; j <= den_hi_sig; j++)
655 {
656 work = num[i + j] + den[j] + carry;
657 carry = HIGHPART (work);
658 num[i + j] = LOWPART (work);
659 }
660
661 num [num_hi_sig] += carry;
662 }
663
664 /* Store the quotient digit. */
665 quo[i] = quo_est;
666 }
667 }
668
669 decode (quo, lquo, hquo);
670
671 finish_up:
672 /* If result is negative, make it so. */
673 if (quo_neg)
674 neg_double (*lquo, *hquo, lquo, hquo);
675
676 /* Compute trial remainder: rem = num - (quo * den) */
677 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
678 neg_double (*lrem, *hrem, lrem, hrem);
679 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
680
681 switch (code)
682 {
683 case TRUNC_DIV_EXPR:
684 case TRUNC_MOD_EXPR: /* round toward zero */
685 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
686 return overflow;
687
688 case FLOOR_DIV_EXPR:
689 case FLOOR_MOD_EXPR: /* round toward negative infinity */
690 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
691 {
692 /* quo = quo - 1; */
693 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
694 lquo, hquo);
695 }
696 else
697 return overflow;
698 break;
699
700 case CEIL_DIV_EXPR:
701 case CEIL_MOD_EXPR: /* round toward positive infinity */
702 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
703 {
704 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
705 lquo, hquo);
706 }
707 else
708 return overflow;
709 break;
710
711 case ROUND_DIV_EXPR:
712 case ROUND_MOD_EXPR: /* round to closest integer */
713 {
714 unsigned HOST_WIDE_INT labs_rem = *lrem;
715 HOST_WIDE_INT habs_rem = *hrem;
716 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
717 HOST_WIDE_INT habs_den = hden, htwice;
718
719 /* Get absolute values. */
720 if (*hrem < 0)
721 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
722 if (hden < 0)
723 neg_double (lden, hden, &labs_den, &habs_den);
724
725 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
726 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
727 labs_rem, habs_rem, &ltwice, &htwice);
728
729 if (((unsigned HOST_WIDE_INT) habs_den
730 < (unsigned HOST_WIDE_INT) htwice)
731 || (((unsigned HOST_WIDE_INT) habs_den
732 == (unsigned HOST_WIDE_INT) htwice)
733 && (labs_den <= ltwice)))
734 {
735 if (*hquo < 0)
736 /* quo = quo - 1; */
737 add_double (*lquo, *hquo,
738 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
739 else
740 /* quo = quo + 1; */
741 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
742 lquo, hquo);
743 }
744 else
745 return overflow;
746 }
747 break;
748
749 default:
750 gcc_unreachable ();
751 }
752
753 /* Compute true remainder: rem = num - (quo * den) */
754 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
755 neg_double (*lrem, *hrem, lrem, hrem);
756 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
757 return overflow;
758 }
759
760
761 /* Returns mask for PREC bits. */
762
763 double_int
764 double_int_mask (unsigned prec)
765 {
766 unsigned HOST_WIDE_INT m;
767 double_int mask;
768
769 if (prec > HOST_BITS_PER_WIDE_INT)
770 {
771 prec -= HOST_BITS_PER_WIDE_INT;
772 m = ((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1;
773 mask.high = (HOST_WIDE_INT) m;
774 mask.low = ALL_ONES;
775 }
776 else
777 {
778 mask.high = 0;
779 mask.low = ((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1;
780 }
781
782 return mask;
783 }
784
785 /* Clears the bits of CST over the precision PREC. If UNS is false, the bits
786 outside of the precision are set to the sign bit (i.e., the PREC-th one),
787 otherwise they are set to zero.
788
789 This corresponds to returning the value represented by PREC lowermost bits
790 of CST, with the given signedness. */
791
792 double_int
793 double_int_ext (double_int cst, unsigned prec, bool uns)
794 {
795 if (uns)
796 return double_int_zext (cst, prec);
797 else
798 return double_int_sext (cst, prec);
799 }
800
801 /* The same as double_int_ext with UNS = true. */
802
803 double_int
804 double_int_zext (double_int cst, unsigned prec)
805 {
806 double_int mask = double_int_mask (prec);
807 double_int r;
808
809 r.low = cst.low & mask.low;
810 r.high = cst.high & mask.high;
811
812 return r;
813 }
814
815 /* The same as double_int_ext with UNS = false. */
816
817 double_int
818 double_int_sext (double_int cst, unsigned prec)
819 {
820 double_int mask = double_int_mask (prec);
821 double_int r;
822 unsigned HOST_WIDE_INT snum;
823
824 if (prec <= HOST_BITS_PER_WIDE_INT)
825 snum = cst.low;
826 else
827 {
828 prec -= HOST_BITS_PER_WIDE_INT;
829 snum = (unsigned HOST_WIDE_INT) cst.high;
830 }
831 if (((snum >> (prec - 1)) & 1) == 1)
832 {
833 r.low = cst.low | ~mask.low;
834 r.high = cst.high | ~mask.high;
835 }
836 else
837 {
838 r.low = cst.low & mask.low;
839 r.high = cst.high & mask.high;
840 }
841
842 return r;
843 }
844
845 /* Constructs long integer from tree CST. The extra bits over the precision of
846 the number are filled with sign bit if CST is signed, and with zeros if it
847 is unsigned. */
848
849 double_int
850 tree_to_double_int (const_tree cst)
851 {
852 /* We do not need to call double_int_restrict here to ensure the semantics as
853 described, as this is the default one for trees. */
854 return TREE_INT_CST (cst);
855 }
856
857 /* Returns true if CST fits in unsigned HOST_WIDE_INT. */
858
859 bool
860 double_int_fits_in_uhwi_p (double_int cst)
861 {
862 return cst.high == 0;
863 }
864
865 /* Returns true if CST fits in signed HOST_WIDE_INT. */
866
867 bool
868 double_int_fits_in_shwi_p (double_int cst)
869 {
870 if (cst.high == 0)
871 return (HOST_WIDE_INT) cst.low >= 0;
872 else if (cst.high == -1)
873 return (HOST_WIDE_INT) cst.low < 0;
874 else
875 return false;
876 }
877
878 /* Returns true if CST fits in HOST_WIDE_INT if UNS is false, or in
879 unsigned HOST_WIDE_INT if UNS is true. */
880
881 bool
882 double_int_fits_in_hwi_p (double_int cst, bool uns)
883 {
884 if (uns)
885 return double_int_fits_in_uhwi_p (cst);
886 else
887 return double_int_fits_in_shwi_p (cst);
888 }
889
890 /* Returns value of CST as a signed number. CST must satisfy
891 double_int_fits_in_shwi_p. */
892
893 HOST_WIDE_INT
894 double_int_to_shwi (double_int cst)
895 {
896 return (HOST_WIDE_INT) cst.low;
897 }
898
899 /* Returns value of CST as an unsigned number. CST must satisfy
900 double_int_fits_in_uhwi_p. */
901
902 unsigned HOST_WIDE_INT
903 double_int_to_uhwi (double_int cst)
904 {
905 return cst.low;
906 }
907
908 /* Returns A * B. */
909
910 double_int
911 double_int_mul (double_int a, double_int b)
912 {
913 double_int ret;
914 mul_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
915 return ret;
916 }
917
918 /* Returns A + B. */
919
920 double_int
921 double_int_add (double_int a, double_int b)
922 {
923 double_int ret;
924 add_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
925 return ret;
926 }
927
928 /* Returns -A. */
929
930 double_int
931 double_int_neg (double_int a)
932 {
933 double_int ret;
934 neg_double (a.low, a.high, &ret.low, &ret.high);
935 return ret;
936 }
937
938 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
939 specified by CODE). CODE is enum tree_code in fact, but double_int.h
940 must be included before tree.h. The remainder after the division is
941 stored to MOD. */
942
943 double_int
944 double_int_divmod (double_int a, double_int b, bool uns, unsigned code,
945 double_int *mod)
946 {
947 double_int ret;
948
949 div_and_round_double (code, uns, a.low, a.high,
950 b.low, b.high, &ret.low, &ret.high,
951 &mod->low, &mod->high);
952 return ret;
953 }
954
955 /* The same as double_int_divmod with UNS = false. */
956
957 double_int
958 double_int_sdivmod (double_int a, double_int b, unsigned code, double_int *mod)
959 {
960 return double_int_divmod (a, b, false, code, mod);
961 }
962
963 /* The same as double_int_divmod with UNS = true. */
964
965 double_int
966 double_int_udivmod (double_int a, double_int b, unsigned code, double_int *mod)
967 {
968 return double_int_divmod (a, b, true, code, mod);
969 }
970
971 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
972 specified by CODE). CODE is enum tree_code in fact, but double_int.h
973 must be included before tree.h. */
974
975 double_int
976 double_int_div (double_int a, double_int b, bool uns, unsigned code)
977 {
978 double_int mod;
979
980 return double_int_divmod (a, b, uns, code, &mod);
981 }
982
983 /* The same as double_int_div with UNS = false. */
984
985 double_int
986 double_int_sdiv (double_int a, double_int b, unsigned code)
987 {
988 return double_int_div (a, b, false, code);
989 }
990
991 /* The same as double_int_div with UNS = true. */
992
993 double_int
994 double_int_udiv (double_int a, double_int b, unsigned code)
995 {
996 return double_int_div (a, b, true, code);
997 }
998
999 /* Returns A % B (computed as unsigned depending on UNS, and rounded as
1000 specified by CODE). CODE is enum tree_code in fact, but double_int.h
1001 must be included before tree.h. */
1002
1003 double_int
1004 double_int_mod (double_int a, double_int b, bool uns, unsigned code)
1005 {
1006 double_int mod;
1007
1008 double_int_divmod (a, b, uns, code, &mod);
1009 return mod;
1010 }
1011
1012 /* The same as double_int_mod with UNS = false. */
1013
1014 double_int
1015 double_int_smod (double_int a, double_int b, unsigned code)
1016 {
1017 return double_int_mod (a, b, false, code);
1018 }
1019
1020 /* The same as double_int_mod with UNS = true. */
1021
1022 double_int
1023 double_int_umod (double_int a, double_int b, unsigned code)
1024 {
1025 return double_int_mod (a, b, true, code);
1026 }
1027
1028 /* Shift A left by COUNT places keeping only PREC bits of result. Shift
1029 right if COUNT is negative. ARITH true specifies arithmetic shifting;
1030 otherwise use logical shift. */
1031
1032 double_int
1033 double_int_lshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith)
1034 {
1035 double_int ret;
1036 lshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
1037 return ret;
1038 }
1039
1040 /* Shift A rigth by COUNT places keeping only PREC bits of result. Shift
1041 left if COUNT is negative. ARITH true specifies arithmetic shifting;
1042 otherwise use logical shift. */
1043
1044 double_int
1045 double_int_rshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith)
1046 {
1047 double_int ret;
1048 rshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
1049 return ret;
1050 }
1051
1052 /* Constructs tree in type TYPE from with value given by CST. Signedness of CST
1053 is assumed to be the same as the signedness of TYPE. */
1054
1055 tree
1056 double_int_to_tree (tree type, double_int cst)
1057 {
1058 cst = double_int_ext (cst, TYPE_PRECISION (type), TYPE_UNSIGNED (type));
1059
1060 return build_int_cst_wide (type, cst.low, cst.high);
1061 }
1062
1063 /* Returns true if CST fits into range of TYPE. Signedness of CST is assumed
1064 to be the same as the signedness of TYPE. */
1065
1066 bool
1067 double_int_fits_to_tree_p (const_tree type, double_int cst)
1068 {
1069 double_int ext = double_int_ext (cst,
1070 TYPE_PRECISION (type),
1071 TYPE_UNSIGNED (type));
1072
1073 return double_int_equal_p (cst, ext);
1074 }
1075
1076 /* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the
1077 comparison is given by UNS. */
1078
1079 int
1080 double_int_cmp (double_int a, double_int b, bool uns)
1081 {
1082 if (uns)
1083 return double_int_ucmp (a, b);
1084 else
1085 return double_int_scmp (a, b);
1086 }
1087
1088 /* Compares two unsigned values A and B. Returns -1 if A < B, 0 if A == B,
1089 and 1 if A > B. */
1090
1091 int
1092 double_int_ucmp (double_int a, double_int b)
1093 {
1094 if ((unsigned HOST_WIDE_INT) a.high < (unsigned HOST_WIDE_INT) b.high)
1095 return -1;
1096 if ((unsigned HOST_WIDE_INT) a.high > (unsigned HOST_WIDE_INT) b.high)
1097 return 1;
1098 if (a.low < b.low)
1099 return -1;
1100 if (a.low > b.low)
1101 return 1;
1102
1103 return 0;
1104 }
1105
1106 /* Compares two signed values A and B. Returns -1 if A < B, 0 if A == B,
1107 and 1 if A > B. */
1108
1109 int
1110 double_int_scmp (double_int a, double_int b)
1111 {
1112 if (a.high < b.high)
1113 return -1;
1114 if (a.high > b.high)
1115 return 1;
1116 if (a.low < b.low)
1117 return -1;
1118 if (a.low > b.low)
1119 return 1;
1120
1121 return 0;
1122 }
1123
1124 /* Splits last digit of *CST (taken as unsigned) in BASE and returns it. */
1125
1126 static unsigned
1127 double_int_split_digit (double_int *cst, unsigned base)
1128 {
1129 unsigned HOST_WIDE_INT resl, reml;
1130 HOST_WIDE_INT resh, remh;
1131
1132 div_and_round_double (FLOOR_DIV_EXPR, true, cst->low, cst->high, base, 0,
1133 &resl, &resh, &reml, &remh);
1134 cst->high = resh;
1135 cst->low = resl;
1136
1137 return reml;
1138 }
1139
1140 /* Dumps CST to FILE. If UNS is true, CST is considered to be unsigned,
1141 otherwise it is signed. */
1142
1143 void
1144 dump_double_int (FILE *file, double_int cst, bool uns)
1145 {
1146 unsigned digits[100], n;
1147 int i;
1148
1149 if (double_int_zero_p (cst))
1150 {
1151 fprintf (file, "0");
1152 return;
1153 }
1154
1155 if (!uns && double_int_negative_p (cst))
1156 {
1157 fprintf (file, "-");
1158 cst = double_int_neg (cst);
1159 }
1160
1161 for (n = 0; !double_int_zero_p (cst); n++)
1162 digits[n] = double_int_split_digit (&cst, 10);
1163 for (i = n - 1; i >= 0; i--)
1164 fprintf (file, "%u", digits[i]);
1165 }
1166
1167
1168 /* Sets RESULT to VAL, taken unsigned if UNS is true and as signed
1169 otherwise. */
1170
1171 void
1172 mpz_set_double_int (mpz_t result, double_int val, bool uns)
1173 {
1174 bool negate = false;
1175 unsigned HOST_WIDE_INT vp[2];
1176
1177 if (!uns && double_int_negative_p (val))
1178 {
1179 negate = true;
1180 val = double_int_neg (val);
1181 }
1182
1183 vp[0] = val.low;
1184 vp[1] = (unsigned HOST_WIDE_INT) val.high;
1185 mpz_import (result, 2, -1, sizeof (HOST_WIDE_INT), 0, 0, vp);
1186
1187 if (negate)
1188 mpz_neg (result, result);
1189 }
1190
1191 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
1192 values of VAL will be wrapped; otherwise, they will be set to the
1193 appropriate minimum or maximum TYPE bound. */
1194
1195 double_int
1196 mpz_get_double_int (const_tree type, mpz_t val, bool wrap)
1197 {
1198 unsigned HOST_WIDE_INT *vp;
1199 size_t count, numb;
1200 double_int res;
1201
1202 if (!wrap)
1203 {
1204 mpz_t min, max;
1205
1206 mpz_init (min);
1207 mpz_init (max);
1208 get_type_static_bounds (type, min, max);
1209
1210 if (mpz_cmp (val, min) < 0)
1211 mpz_set (val, min);
1212 else if (mpz_cmp (val, max) > 0)
1213 mpz_set (val, max);
1214
1215 mpz_clear (min);
1216 mpz_clear (max);
1217 }
1218
1219 /* Determine the number of unsigned HOST_WIDE_INT that are required
1220 for representing the value. The code to calculate count is
1221 extracted from the GMP manual, section "Integer Import and Export":
1222 http://gmplib.org/manual/Integer-Import-and-Export.html */
1223 numb = 8*sizeof(HOST_WIDE_INT);
1224 count = (mpz_sizeinbase (val, 2) + numb-1) / numb;
1225 if (count < 2)
1226 count = 2;
1227 vp = (unsigned HOST_WIDE_INT *) alloca (count * sizeof(HOST_WIDE_INT));
1228
1229 vp[0] = 0;
1230 vp[1] = 0;
1231 mpz_export (vp, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, val);
1232
1233 gcc_assert (wrap || count <= 2);
1234
1235 res.low = vp[0];
1236 res.high = (HOST_WIDE_INT) vp[1];
1237
1238 res = double_int_ext (res, TYPE_PRECISION (type), TYPE_UNSIGNED (type));
1239 if (mpz_sgn (val) < 0)
1240 res = double_int_neg (res);
1241
1242 return res;
1243 }