]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/value-range.cc
Add overflow API for plus minus mult on range
[thirdparty/gcc.git] / gcc / value-range.cc
1 /* Support routines for value ranges.
2 Copyright (C) 2019-2023 Free Software Foundation, Inc.
3 Major hacks by Aldy Hernandez <aldyh@redhat.com> and
4 Andrew MacLeod <amacleod@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "ssa.h"
29 #include "tree-pretty-print.h"
30 #include "value-range-pretty-print.h"
31 #include "fold-const.h"
32 #include "gimple-range.h"
33
34 void
35 irange::accept (const vrange_visitor &v) const
36 {
37 v.visit (*this);
38 }
39
40 void
41 unsupported_range::accept (const vrange_visitor &v) const
42 {
43 v.visit (*this);
44 }
45
46 // Convenience function only available for integers and pointers.
47
48 wide_int
49 Value_Range::lower_bound () const
50 {
51 if (is_a <irange> (*m_vrange))
52 return as_a <irange> (*m_vrange).lower_bound ();
53 gcc_unreachable ();
54 }
55
56 // Convenience function only available for integers and pointers.
57
58 wide_int
59 Value_Range::upper_bound () const
60 {
61 if (is_a <irange> (*m_vrange))
62 return as_a <irange> (*m_vrange).upper_bound ();
63 gcc_unreachable ();
64 }
65
66 void
67 Value_Range::dump (FILE *out) const
68 {
69 if (m_vrange)
70 m_vrange->dump (out);
71 else
72 fprintf (out, "NULL");
73 }
74
75 DEBUG_FUNCTION void
76 debug (const Value_Range &r)
77 {
78 r.dump (stderr);
79 fprintf (stderr, "\n");
80 }
81
82 DEBUG_FUNCTION void
83 debug (const irange_bitmask &bm)
84 {
85 bm.dump (stderr);
86 fprintf (stderr, "\n");
87 }
88
89 // Default vrange definitions.
90
91 bool
92 vrange::contains_p (tree) const
93 {
94 return varying_p ();
95 }
96
97 bool
98 vrange::singleton_p (tree *) const
99 {
100 return false;
101 }
102
103 void
104 vrange::set (tree min, tree, value_range_kind)
105 {
106 set_varying (TREE_TYPE (min));
107 }
108
109 tree
110 vrange::type () const
111 {
112 return void_type_node;
113 }
114
115 bool
116 vrange::supports_type_p (const_tree) const
117 {
118 return false;
119 }
120
121 void
122 vrange::set_undefined ()
123 {
124 m_kind = VR_UNDEFINED;
125 }
126
127 void
128 vrange::set_varying (tree)
129 {
130 m_kind = VR_VARYING;
131 }
132
133 bool
134 vrange::union_ (const vrange &r)
135 {
136 if (r.undefined_p () || varying_p ())
137 return false;
138 if (undefined_p () || r.varying_p ())
139 {
140 operator= (r);
141 return true;
142 }
143 gcc_unreachable ();
144 return false;
145 }
146
147 bool
148 vrange::intersect (const vrange &r)
149 {
150 if (undefined_p () || r.varying_p ())
151 return false;
152 if (r.undefined_p ())
153 {
154 set_undefined ();
155 return true;
156 }
157 if (varying_p ())
158 {
159 operator= (r);
160 return true;
161 }
162 gcc_unreachable ();
163 return false;
164 }
165
166 bool
167 vrange::zero_p () const
168 {
169 return false;
170 }
171
172 bool
173 vrange::nonzero_p () const
174 {
175 return false;
176 }
177
178 void
179 vrange::set_nonzero (tree type)
180 {
181 set_varying (type);
182 }
183
184 void
185 vrange::set_zero (tree type)
186 {
187 set_varying (type);
188 }
189
190 void
191 vrange::set_nonnegative (tree type)
192 {
193 set_varying (type);
194 }
195
196 bool
197 vrange::fits_p (const vrange &) const
198 {
199 return true;
200 }
201
202 // Assignment operator for generic ranges. Copying incompatible types
203 // is not allowed.
204
205 vrange &
206 vrange::operator= (const vrange &src)
207 {
208 if (is_a <irange> (src))
209 as_a <irange> (*this) = as_a <irange> (src);
210 else if (is_a <frange> (src))
211 as_a <frange> (*this) = as_a <frange> (src);
212 else
213 {
214 gcc_checking_assert (is_a <unsupported_range> (src));
215 m_kind = src.m_kind;
216 }
217 return *this;
218 }
219
220 // Equality operator for generic ranges.
221
222 bool
223 vrange::operator== (const vrange &src) const
224 {
225 if (is_a <irange> (src))
226 return as_a <irange> (*this) == as_a <irange> (src);
227 if (is_a <frange> (src))
228 return as_a <frange> (*this) == as_a <frange> (src);
229 gcc_unreachable ();
230 }
231
232 // Wrapper for vrange_printer to dump a range to a file.
233
234 void
235 vrange::dump (FILE *file) const
236 {
237 pretty_printer buffer;
238 pp_needs_newline (&buffer) = true;
239 buffer.buffer->stream = file;
240 vrange_printer vrange_pp (&buffer);
241 this->accept (vrange_pp);
242 pp_flush (&buffer);
243 }
244
245 void
246 irange_bitmask::dump (FILE *file) const
247 {
248 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
249 pretty_printer buffer;
250
251 pp_needs_newline (&buffer) = true;
252 buffer.buffer->stream = file;
253 pp_string (&buffer, "MASK ");
254 print_hex (m_mask, buf);
255 pp_string (&buffer, buf);
256 pp_string (&buffer, " VALUE ");
257 print_hex (m_value, buf);
258 pp_string (&buffer, buf);
259 pp_flush (&buffer);
260 }
261
262 namespace inchash
263 {
264
265 void
266 add_vrange (const vrange &v, inchash::hash &hstate,
267 unsigned int)
268 {
269 if (v.undefined_p ())
270 {
271 hstate.add_int (VR_UNDEFINED);
272 return;
273 }
274 // Types are ignored throughout to inhibit two ranges being equal
275 // but having different hash values. This can happen when two
276 // ranges are equal and their types are different (but
277 // types_compatible_p is true).
278 if (is_a <irange> (v))
279 {
280 const irange &r = as_a <irange> (v);
281 if (r.varying_p ())
282 hstate.add_int (VR_VARYING);
283 else
284 hstate.add_int (VR_RANGE);
285 for (unsigned i = 0; i < r.num_pairs (); ++i)
286 {
287 hstate.add_wide_int (r.lower_bound (i));
288 hstate.add_wide_int (r.upper_bound (i));
289 }
290 irange_bitmask bm = r.get_bitmask ();
291 hstate.add_wide_int (bm.value ());
292 hstate.add_wide_int (bm.mask ());
293 return;
294 }
295 if (is_a <frange> (v))
296 {
297 const frange &r = as_a <frange> (v);
298 if (r.known_isnan ())
299 hstate.add_int (VR_NAN);
300 else
301 {
302 hstate.add_int (r.varying_p () ? VR_VARYING : VR_RANGE);
303 hstate.add_real_value (r.lower_bound ());
304 hstate.add_real_value (r.upper_bound ());
305 }
306 nan_state nan = r.get_nan_state ();
307 hstate.add_int (nan.pos_p ());
308 hstate.add_int (nan.neg_p ());
309 return;
310 }
311 gcc_unreachable ();
312 }
313
314 } //namespace inchash
315
316 bool
317 irange::nonnegative_p () const
318 {
319 return wi::ge_p (lower_bound (), 0, TYPE_SIGN (type ()));
320 }
321
322 bool
323 irange::nonpositive_p () const
324 {
325 return wi::le_p (upper_bound (), 0, TYPE_SIGN (type ()));
326 }
327
328 bool
329 irange::supports_type_p (const_tree type) const
330 {
331 return supports_p (type);
332 }
333
334 // Return TRUE if R fits in THIS.
335
336 bool
337 irange::fits_p (const vrange &r) const
338 {
339 return m_max_ranges >= as_a <irange> (r).num_pairs ();
340 }
341
342 void
343 irange::set_nonnegative (tree type)
344 {
345 set (type,
346 wi::zero (TYPE_PRECISION (type)),
347 wi::to_wide (TYPE_MAX_VALUE (type)));
348 }
349
350 void
351 frange::accept (const vrange_visitor &v) const
352 {
353 v.visit (*this);
354 }
355
356 // Flush denormal endpoints to the appropriate 0.0.
357
358 void
359 frange::flush_denormals_to_zero ()
360 {
361 if (undefined_p () || known_isnan ())
362 return;
363
364 machine_mode mode = TYPE_MODE (type ());
365 // Flush [x, -DENORMAL] to [x, -0.0].
366 if (real_isdenormal (&m_max, mode) && real_isneg (&m_max))
367 {
368 if (HONOR_SIGNED_ZEROS (m_type))
369 m_max = dconstm0;
370 else
371 m_max = dconst0;
372 }
373 // Flush [+DENORMAL, x] to [+0.0, x].
374 if (real_isdenormal (&m_min, mode) && !real_isneg (&m_min))
375 m_min = dconst0;
376 }
377
378 // Setter for franges.
379
380 void
381 frange::set (tree type,
382 const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
383 const nan_state &nan, value_range_kind kind)
384 {
385 switch (kind)
386 {
387 case VR_UNDEFINED:
388 set_undefined ();
389 return;
390 case VR_VARYING:
391 case VR_ANTI_RANGE:
392 set_varying (type);
393 return;
394 case VR_RANGE:
395 break;
396 default:
397 gcc_unreachable ();
398 }
399
400 gcc_checking_assert (!real_isnan (&min) && !real_isnan (&max));
401
402 m_kind = kind;
403 m_type = type;
404 m_min = min;
405 m_max = max;
406 if (HONOR_NANS (m_type))
407 {
408 m_pos_nan = nan.pos_p ();
409 m_neg_nan = nan.neg_p ();
410 }
411 else
412 {
413 m_pos_nan = false;
414 m_neg_nan = false;
415 }
416
417 if (!MODE_HAS_SIGNED_ZEROS (TYPE_MODE (m_type)))
418 {
419 if (real_iszero (&m_min, 1))
420 m_min.sign = 0;
421 if (real_iszero (&m_max, 1))
422 m_max.sign = 0;
423 }
424 else if (!HONOR_SIGNED_ZEROS (m_type))
425 {
426 if (real_iszero (&m_max, 1))
427 m_max.sign = 0;
428 if (real_iszero (&m_min, 0))
429 m_min.sign = 1;
430 }
431
432 // For -ffinite-math-only we can drop ranges outside the
433 // representable numbers to min/max for the type.
434 if (!HONOR_INFINITIES (m_type))
435 {
436 REAL_VALUE_TYPE min_repr = frange_val_min (m_type);
437 REAL_VALUE_TYPE max_repr = frange_val_max (m_type);
438 if (real_less (&m_min, &min_repr))
439 m_min = min_repr;
440 else if (real_less (&max_repr, &m_min))
441 m_min = max_repr;
442 if (real_less (&max_repr, &m_max))
443 m_max = max_repr;
444 else if (real_less (&m_max, &min_repr))
445 m_max = min_repr;
446 }
447
448 // Check for swapped ranges.
449 gcc_checking_assert (real_compare (LE_EXPR, &min, &max));
450
451 normalize_kind ();
452 }
453
454 // Setter for an frange defaulting the NAN possibility to +-NAN when
455 // HONOR_NANS.
456
457 void
458 frange::set (tree type,
459 const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
460 value_range_kind kind)
461 {
462 set (type, min, max, nan_state (true), kind);
463 }
464
465 void
466 frange::set (tree min, tree max, value_range_kind kind)
467 {
468 set (TREE_TYPE (min),
469 *TREE_REAL_CST_PTR (min), *TREE_REAL_CST_PTR (max), kind);
470 }
471
472 // Normalize range to VARYING or UNDEFINED, or vice versa. Return
473 // TRUE if anything changed.
474 //
475 // A range with no known properties can be dropped to VARYING.
476 // Similarly, a VARYING with any properties should be dropped to a
477 // VR_RANGE. Normalizing ranges upon changing them ensures there is
478 // only one representation for a given range.
479
480 bool
481 frange::normalize_kind ()
482 {
483 if (m_kind == VR_RANGE
484 && frange_val_is_min (m_min, m_type)
485 && frange_val_is_max (m_max, m_type))
486 {
487 if (!HONOR_NANS (m_type) || (m_pos_nan && m_neg_nan))
488 {
489 set_varying (m_type);
490 return true;
491 }
492 }
493 else if (m_kind == VR_VARYING)
494 {
495 if (HONOR_NANS (m_type) && (!m_pos_nan || !m_neg_nan))
496 {
497 m_kind = VR_RANGE;
498 m_min = frange_val_min (m_type);
499 m_max = frange_val_max (m_type);
500 if (flag_checking)
501 verify_range ();
502 return true;
503 }
504 }
505 else if (m_kind == VR_NAN && !m_pos_nan && !m_neg_nan)
506 set_undefined ();
507 return false;
508 }
509
510 // Union or intersect the zero endpoints of two ranges. For example:
511 // [-0, x] U [+0, x] => [-0, x]
512 // [ x, -0] U [ x, +0] => [ x, +0]
513 // [-0, x] ^ [+0, x] => [+0, x]
514 // [ x, -0] ^ [ x, +0] => [ x, -0]
515 //
516 // UNION_P is true when performing a union, or false when intersecting.
517
518 bool
519 frange::combine_zeros (const frange &r, bool union_p)
520 {
521 gcc_checking_assert (!undefined_p () && !known_isnan ());
522
523 bool changed = false;
524 if (real_iszero (&m_min) && real_iszero (&r.m_min)
525 && real_isneg (&m_min) != real_isneg (&r.m_min))
526 {
527 m_min.sign = union_p;
528 changed = true;
529 }
530 if (real_iszero (&m_max) && real_iszero (&r.m_max)
531 && real_isneg (&m_max) != real_isneg (&r.m_max))
532 {
533 m_max.sign = !union_p;
534 changed = true;
535 }
536 // If the signs are swapped, the resulting range is empty.
537 if (m_min.sign == 0 && m_max.sign == 1)
538 {
539 if (maybe_isnan ())
540 m_kind = VR_NAN;
541 else
542 set_undefined ();
543 changed = true;
544 }
545 return changed;
546 }
547
548 // Union two ranges when one is known to be a NAN.
549
550 bool
551 frange::union_nans (const frange &r)
552 {
553 gcc_checking_assert (known_isnan () || r.known_isnan ());
554
555 bool changed = false;
556 if (known_isnan () && m_kind != r.m_kind)
557 {
558 m_kind = r.m_kind;
559 m_min = r.m_min;
560 m_max = r.m_max;
561 changed = true;
562 }
563 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
564 {
565 m_pos_nan |= r.m_pos_nan;
566 m_neg_nan |= r.m_neg_nan;
567 changed = true;
568 }
569 if (changed)
570 {
571 normalize_kind ();
572 return true;
573 }
574 return false;
575 }
576
577 bool
578 frange::union_ (const vrange &v)
579 {
580 const frange &r = as_a <frange> (v);
581
582 if (r.undefined_p () || varying_p ())
583 return false;
584 if (undefined_p () || r.varying_p ())
585 {
586 *this = r;
587 return true;
588 }
589
590 // Combine NAN info.
591 if (known_isnan () || r.known_isnan ())
592 return union_nans (r);
593 bool changed = false;
594 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
595 {
596 m_pos_nan |= r.m_pos_nan;
597 m_neg_nan |= r.m_neg_nan;
598 changed = true;
599 }
600
601 // Combine endpoints.
602 if (real_less (&r.m_min, &m_min))
603 {
604 m_min = r.m_min;
605 changed = true;
606 }
607 if (real_less (&m_max, &r.m_max))
608 {
609 m_max = r.m_max;
610 changed = true;
611 }
612
613 if (HONOR_SIGNED_ZEROS (m_type))
614 changed |= combine_zeros (r, true);
615
616 changed |= normalize_kind ();
617 return changed;
618 }
619
620 // Intersect two ranges when one is known to be a NAN.
621
622 bool
623 frange::intersect_nans (const frange &r)
624 {
625 gcc_checking_assert (known_isnan () || r.known_isnan ());
626
627 m_pos_nan &= r.m_pos_nan;
628 m_neg_nan &= r.m_neg_nan;
629 if (maybe_isnan ())
630 m_kind = VR_NAN;
631 else
632 set_undefined ();
633 if (flag_checking)
634 verify_range ();
635 return true;
636 }
637
638 bool
639 frange::intersect (const vrange &v)
640 {
641 const frange &r = as_a <frange> (v);
642
643 if (undefined_p () || r.varying_p ())
644 return false;
645 if (r.undefined_p ())
646 {
647 set_undefined ();
648 return true;
649 }
650 if (varying_p ())
651 {
652 *this = r;
653 return true;
654 }
655
656 // Combine NAN info.
657 if (known_isnan () || r.known_isnan ())
658 return intersect_nans (r);
659 bool changed = false;
660 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
661 {
662 m_pos_nan &= r.m_pos_nan;
663 m_neg_nan &= r.m_neg_nan;
664 changed = true;
665 }
666
667 // Combine endpoints.
668 if (real_less (&m_min, &r.m_min))
669 {
670 m_min = r.m_min;
671 changed = true;
672 }
673 if (real_less (&r.m_max, &m_max))
674 {
675 m_max = r.m_max;
676 changed = true;
677 }
678 // If the endpoints are swapped, the resulting range is empty.
679 if (real_less (&m_max, &m_min))
680 {
681 if (maybe_isnan ())
682 m_kind = VR_NAN;
683 else
684 set_undefined ();
685 if (flag_checking)
686 verify_range ();
687 return true;
688 }
689
690 if (HONOR_SIGNED_ZEROS (m_type))
691 changed |= combine_zeros (r, false);
692
693 changed |= normalize_kind ();
694 return changed;
695 }
696
697 frange &
698 frange::operator= (const frange &src)
699 {
700 m_kind = src.m_kind;
701 m_type = src.m_type;
702 m_min = src.m_min;
703 m_max = src.m_max;
704 m_pos_nan = src.m_pos_nan;
705 m_neg_nan = src.m_neg_nan;
706
707 if (flag_checking)
708 verify_range ();
709 return *this;
710 }
711
712 bool
713 frange::operator== (const frange &src) const
714 {
715 if (m_kind == src.m_kind)
716 {
717 if (undefined_p ())
718 return true;
719
720 if (varying_p ())
721 return types_compatible_p (m_type, src.m_type);
722
723 bool nan1 = known_isnan ();
724 bool nan2 = src.known_isnan ();
725 if (nan1 || nan2)
726 {
727 if (nan1 && nan2)
728 return (m_pos_nan == src.m_pos_nan
729 && m_neg_nan == src.m_neg_nan);
730 return false;
731 }
732
733 return (real_identical (&m_min, &src.m_min)
734 && real_identical (&m_max, &src.m_max)
735 && m_pos_nan == src.m_pos_nan
736 && m_neg_nan == src.m_neg_nan
737 && types_compatible_p (m_type, src.m_type));
738 }
739 return false;
740 }
741
742 // Return TRUE if range contains R.
743
744 bool
745 frange::contains_p (const REAL_VALUE_TYPE &r) const
746 {
747 gcc_checking_assert (m_kind != VR_ANTI_RANGE);
748
749 if (undefined_p ())
750 return false;
751
752 if (varying_p ())
753 return true;
754
755 if (real_isnan (&r))
756 {
757 // No NAN in range.
758 if (!m_pos_nan && !m_neg_nan)
759 return false;
760 // Both +NAN and -NAN are present.
761 if (m_pos_nan && m_neg_nan)
762 return true;
763 return m_neg_nan == r.sign;
764 }
765 if (known_isnan ())
766 return false;
767
768 if (real_compare (GE_EXPR, &r, &m_min) && real_compare (LE_EXPR, &r, &m_max))
769 {
770 // Make sure the signs are equal for signed zeros.
771 if (HONOR_SIGNED_ZEROS (m_type) && real_iszero (&r))
772 return r.sign == m_min.sign || r.sign == m_max.sign;
773 return true;
774 }
775 return false;
776 }
777
778 // If range is a singleton, place it in RESULT and return TRUE. If
779 // RESULT is NULL, just return TRUE.
780 //
781 // A NAN can never be a singleton.
782
783 bool
784 frange::internal_singleton_p (REAL_VALUE_TYPE *result) const
785 {
786 if (m_kind == VR_RANGE && real_identical (&m_min, &m_max))
787 {
788 // Return false for any singleton that may be a NAN.
789 if (HONOR_NANS (m_type) && maybe_isnan ())
790 return false;
791
792 if (MODE_COMPOSITE_P (TYPE_MODE (m_type)))
793 {
794 // For IBM long doubles, if the value is +-Inf or is exactly
795 // representable in double, the other double could be +0.0
796 // or -0.0. Since this means there is more than one way to
797 // represent a value, return false to avoid propagating it.
798 // See libgcc/config/rs6000/ibm-ldouble-format for details.
799 if (real_isinf (&m_min))
800 return false;
801 REAL_VALUE_TYPE r;
802 real_convert (&r, DFmode, &m_min);
803 if (real_identical (&r, &m_min))
804 return false;
805 }
806
807 if (result)
808 *result = m_min;
809 return true;
810 }
811 return false;
812 }
813
814 bool
815 frange::singleton_p (tree *result) const
816 {
817 if (internal_singleton_p ())
818 {
819 if (result)
820 *result = build_real (m_type, m_min);
821 return true;
822 }
823 return false;
824 }
825
826 bool
827 frange::singleton_p (REAL_VALUE_TYPE &r) const
828 {
829 return internal_singleton_p (&r);
830 }
831
832 bool
833 frange::supports_type_p (const_tree type) const
834 {
835 return supports_p (type);
836 }
837
838 void
839 frange::verify_range ()
840 {
841 if (!undefined_p ())
842 gcc_checking_assert (HONOR_NANS (m_type) || !maybe_isnan ());
843 switch (m_kind)
844 {
845 case VR_UNDEFINED:
846 gcc_checking_assert (!m_type);
847 return;
848 case VR_VARYING:
849 gcc_checking_assert (m_type);
850 gcc_checking_assert (frange_val_is_min (m_min, m_type));
851 gcc_checking_assert (frange_val_is_max (m_max, m_type));
852 if (HONOR_NANS (m_type))
853 gcc_checking_assert (m_pos_nan && m_neg_nan);
854 else
855 gcc_checking_assert (!m_pos_nan && !m_neg_nan);
856 return;
857 case VR_RANGE:
858 gcc_checking_assert (m_type);
859 break;
860 case VR_NAN:
861 gcc_checking_assert (m_type);
862 gcc_checking_assert (m_pos_nan || m_neg_nan);
863 return;
864 default:
865 gcc_unreachable ();
866 }
867
868 // NANs cannot appear in the endpoints of a range.
869 gcc_checking_assert (!real_isnan (&m_min) && !real_isnan (&m_max));
870
871 // Make sure we don't have swapped ranges.
872 gcc_checking_assert (!real_less (&m_max, &m_min));
873
874 // [ +0.0, -0.0 ] is nonsensical.
875 gcc_checking_assert (!(real_iszero (&m_min, 0) && real_iszero (&m_max, 1)));
876
877 // If all the properties are clear, we better not span the entire
878 // domain, because that would make us varying.
879 if (m_pos_nan && m_neg_nan)
880 gcc_checking_assert (!frange_val_is_min (m_min, m_type)
881 || !frange_val_is_max (m_max, m_type));
882 }
883
884 // We can't do much with nonzeros yet.
885 void
886 frange::set_nonzero (tree type)
887 {
888 set_varying (type);
889 }
890
891 // We can't do much with nonzeros yet.
892 bool
893 frange::nonzero_p () const
894 {
895 return false;
896 }
897
898 // Set range to [+0.0, +0.0] if honoring signed zeros, or [0.0, 0.0]
899 // otherwise.
900
901 void
902 frange::set_zero (tree type)
903 {
904 if (HONOR_SIGNED_ZEROS (type))
905 {
906 set (type, dconstm0, dconst0);
907 clear_nan ();
908 }
909 else
910 set (type, dconst0, dconst0);
911 }
912
913 // Return TRUE for any zero regardless of sign.
914
915 bool
916 frange::zero_p () const
917 {
918 return (m_kind == VR_RANGE
919 && real_iszero (&m_min)
920 && real_iszero (&m_max));
921 }
922
923 // Set the range to non-negative numbers, that is [+0.0, +INF].
924 //
925 // The NAN in the resulting range (if HONOR_NANS) has a varying sign
926 // as there are no guarantees in IEEE 754 wrt to the sign of a NAN,
927 // except for copy, abs, and copysign. It is the responsibility of
928 // the caller to set the NAN's sign if desired.
929
930 void
931 frange::set_nonnegative (tree type)
932 {
933 set (type, dconst0, frange_val_max (type));
934 }
935
936 // Here we copy between any two irange's.
937
938 irange &
939 irange::operator= (const irange &src)
940 {
941 int needed = src.num_pairs ();
942 maybe_resize (needed);
943
944 unsigned x;
945 unsigned lim = src.m_num_ranges;
946 if (lim > m_max_ranges)
947 lim = m_max_ranges;
948
949 for (x = 0; x < lim * 2; ++x)
950 m_base[x] = src.m_base[x];
951
952 // If the range didn't fit, the last range should cover the rest.
953 if (lim != src.m_num_ranges)
954 m_base[x - 1] = src.m_base[src.m_num_ranges * 2 - 1];
955
956 m_num_ranges = lim;
957 m_type = src.m_type;
958 m_kind = src.m_kind;
959 m_bitmask = src.m_bitmask;
960 if (m_max_ranges == 1)
961 normalize_kind ();
962 if (flag_checking)
963 verify_range ();
964 return *this;
965 }
966
967 value_range_kind
968 get_legacy_range (const irange &r, tree &min, tree &max)
969 {
970 if (r.undefined_p ())
971 {
972 min = NULL_TREE;
973 max = NULL_TREE;
974 return VR_UNDEFINED;
975 }
976
977 tree type = r.type ();
978 if (r.varying_p ())
979 {
980 min = wide_int_to_tree (type, r.lower_bound ());
981 max = wide_int_to_tree (type, r.upper_bound ());
982 return VR_VARYING;
983 }
984
985 unsigned int precision = TYPE_PRECISION (type);
986 signop sign = TYPE_SIGN (type);
987 if (r.num_pairs () > 1
988 && precision > 1
989 && r.lower_bound () == wi::min_value (precision, sign)
990 && r.upper_bound () == wi::max_value (precision, sign))
991 {
992 int_range<3> inv (r);
993 inv.invert ();
994 min = wide_int_to_tree (type, inv.lower_bound (0));
995 max = wide_int_to_tree (type, inv.upper_bound (0));
996 return VR_ANTI_RANGE;
997 }
998
999 min = wide_int_to_tree (type, r.lower_bound ());
1000 max = wide_int_to_tree (type, r.upper_bound ());
1001 return VR_RANGE;
1002 }
1003
1004 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
1005 This means adjusting VRTYPE, MIN and MAX representing the case of a
1006 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
1007 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
1008 In corner cases where MAX+1 or MIN-1 wraps this will fall back
1009 to varying.
1010 This routine exists to ease canonicalization in the case where we
1011 extract ranges from var + CST op limit. */
1012
1013 void
1014 irange::set (tree type, const wide_int &min, const wide_int &max,
1015 value_range_kind kind)
1016 {
1017 unsigned prec = TYPE_PRECISION (type);
1018 signop sign = TYPE_SIGN (type);
1019 wide_int min_value = wi::min_value (prec, sign);
1020 wide_int max_value = wi::max_value (prec, sign);
1021
1022 m_type = type;
1023 m_bitmask.set_unknown (prec);
1024
1025 if (kind == VR_RANGE)
1026 {
1027 m_base[0] = min;
1028 m_base[1] = max;
1029 m_num_ranges = 1;
1030 if (min == min_value && max == max_value)
1031 m_kind = VR_VARYING;
1032 else
1033 m_kind = VR_RANGE;
1034 }
1035 else
1036 {
1037 gcc_checking_assert (kind == VR_ANTI_RANGE);
1038 gcc_checking_assert (m_max_ranges > 1);
1039
1040 m_kind = VR_UNDEFINED;
1041 m_num_ranges = 0;
1042 wi::overflow_type ovf;
1043 wide_int lim;
1044 if (sign == SIGNED)
1045 lim = wi::add (min, -1, sign, &ovf);
1046 else
1047 lim = wi::sub (min, 1, sign, &ovf);
1048
1049 if (!ovf)
1050 {
1051 m_kind = VR_RANGE;
1052 m_base[0] = min_value;
1053 m_base[1] = lim;
1054 ++m_num_ranges;
1055 }
1056 if (sign == SIGNED)
1057 lim = wi::sub (max, -1, sign, &ovf);
1058 else
1059 lim = wi::add (max, 1, sign, &ovf);
1060 if (!ovf)
1061 {
1062 m_kind = VR_RANGE;
1063 m_base[m_num_ranges * 2] = lim;
1064 m_base[m_num_ranges * 2 + 1] = max_value;
1065 ++m_num_ranges;
1066 }
1067 }
1068
1069 if (flag_checking)
1070 verify_range ();
1071 }
1072
1073 void
1074 irange::set (tree min, tree max, value_range_kind kind)
1075 {
1076 if (POLY_INT_CST_P (min) || POLY_INT_CST_P (max))
1077 {
1078 set_varying (TREE_TYPE (min));
1079 return;
1080 }
1081
1082 gcc_checking_assert (TREE_CODE (min) == INTEGER_CST);
1083 gcc_checking_assert (TREE_CODE (max) == INTEGER_CST);
1084
1085 return set (TREE_TYPE (min), wi::to_wide (min), wi::to_wide (max), kind);
1086 }
1087
1088 // Check the validity of the range.
1089
1090 void
1091 irange::verify_range ()
1092 {
1093 gcc_checking_assert (m_discriminator == VR_IRANGE);
1094 if (m_kind == VR_UNDEFINED)
1095 {
1096 gcc_checking_assert (m_num_ranges == 0);
1097 return;
1098 }
1099 gcc_checking_assert (m_num_ranges <= m_max_ranges);
1100
1101 // Legacy allowed these to represent VARYING for unknown types.
1102 // Leave this in for now, until all users are converted. Eventually
1103 // we should abort in set_varying.
1104 if (m_kind == VR_VARYING && m_type == error_mark_node)
1105 return;
1106
1107 unsigned prec = TYPE_PRECISION (m_type);
1108 if (m_kind == VR_VARYING)
1109 {
1110 gcc_checking_assert (m_bitmask.unknown_p ());
1111 gcc_checking_assert (m_num_ranges == 1);
1112 gcc_checking_assert (varying_compatible_p ());
1113 gcc_checking_assert (lower_bound ().get_precision () == prec);
1114 gcc_checking_assert (upper_bound ().get_precision () == prec);
1115 return;
1116 }
1117 gcc_checking_assert (m_num_ranges != 0);
1118 gcc_checking_assert (!varying_compatible_p ());
1119 for (unsigned i = 0; i < m_num_ranges; ++i)
1120 {
1121 wide_int lb = lower_bound (i);
1122 wide_int ub = upper_bound (i);
1123 gcc_checking_assert (lb.get_precision () == prec);
1124 gcc_checking_assert (ub.get_precision () == prec);
1125 int c = wi::cmp (lb, ub, TYPE_SIGN (m_type));
1126 gcc_checking_assert (c == 0 || c == -1);
1127 }
1128 m_bitmask.verify_mask ();
1129 }
1130
1131 bool
1132 irange::operator== (const irange &other) const
1133 {
1134 if (m_num_ranges != other.m_num_ranges)
1135 return false;
1136
1137 if (m_num_ranges == 0)
1138 return true;
1139
1140 signop sign1 = TYPE_SIGN (type ());
1141 signop sign2 = TYPE_SIGN (other.type ());
1142
1143 for (unsigned i = 0; i < m_num_ranges; ++i)
1144 {
1145 widest_int lb = widest_int::from (lower_bound (i), sign1);
1146 widest_int ub = widest_int::from (upper_bound (i), sign1);
1147 widest_int lb_other = widest_int::from (other.lower_bound (i), sign2);
1148 widest_int ub_other = widest_int::from (other.upper_bound (i), sign2);
1149 if (lb != lb_other || ub != ub_other)
1150 return false;
1151 }
1152
1153 irange_bitmask bm1 = get_bitmask ();
1154 irange_bitmask bm2 = other.get_bitmask ();
1155 widest_int tmp1 = widest_int::from (bm1.mask (), sign1);
1156 widest_int tmp2 = widest_int::from (bm2.mask (), sign2);
1157 if (tmp1 != tmp2)
1158 return false;
1159 if (bm1.unknown_p ())
1160 return true;
1161 tmp1 = widest_int::from (bm1.value (), sign1);
1162 tmp2 = widest_int::from (bm2.value (), sign2);
1163 return tmp1 == tmp2;
1164 }
1165
1166 /* If range is a singleton, place it in RESULT and return TRUE. */
1167
1168 bool
1169 irange::singleton_p (tree *result) const
1170 {
1171 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1172 {
1173 if (result)
1174 *result = wide_int_to_tree (type (), lower_bound ());
1175 return true;
1176 }
1177 return false;
1178 }
1179
1180 bool
1181 irange::singleton_p (wide_int &w) const
1182 {
1183 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1184 {
1185 w = lower_bound ();
1186 return true;
1187 }
1188 return false;
1189 }
1190
1191 /* Return 1 if CST is inside value range.
1192 0 if CST is not inside value range.
1193
1194 Benchmark compile/20001226-1.c compilation time after changing this
1195 function. */
1196
1197 bool
1198 irange::contains_p (const wide_int &cst) const
1199 {
1200 if (undefined_p ())
1201 return false;
1202
1203 // See if we can exclude CST based on the known 0 bits.
1204 if (!m_bitmask.unknown_p ()
1205 && cst != 0
1206 && wi::bit_and (m_bitmask.get_nonzero_bits (), cst) == 0)
1207 return false;
1208
1209 signop sign = TYPE_SIGN (type ());
1210 for (unsigned r = 0; r < m_num_ranges; ++r)
1211 {
1212 if (wi::lt_p (cst, lower_bound (r), sign))
1213 return false;
1214 if (wi::le_p (cst, upper_bound (r), sign))
1215 return true;
1216 }
1217
1218 return false;
1219 }
1220
1221 // Perform an efficient union with R when both ranges have only a single pair.
1222 // Excluded are VARYING and UNDEFINED ranges.
1223
1224 bool
1225 irange::irange_single_pair_union (const irange &r)
1226 {
1227 gcc_checking_assert (!undefined_p () && !varying_p ());
1228 gcc_checking_assert (!r.undefined_p () && !varying_p ());
1229
1230 signop sign = TYPE_SIGN (m_type);
1231 // Check if current lower bound is also the new lower bound.
1232 if (wi::le_p (m_base[0], r.m_base[0], sign))
1233 {
1234 // If current upper bound is new upper bound, we're done.
1235 if (wi::le_p (r.m_base[1], m_base[1], sign))
1236 return union_bitmask (r);
1237 // Otherwise R has the new upper bound.
1238 // Check for overlap/touching ranges, or single target range.
1239 if (m_max_ranges == 1
1240 || (widest_int::from (m_base[1], sign) + 1
1241 >= widest_int::from (r.m_base[0], TYPE_SIGN (r.m_type))))
1242 m_base[1] = r.m_base[1];
1243 else
1244 {
1245 // This is a dual range result.
1246 m_base[2] = r.m_base[0];
1247 m_base[3] = r.m_base[1];
1248 m_num_ranges = 2;
1249 }
1250 // The range has been altered, so normalize it even if nothing
1251 // changed in the mask.
1252 if (!union_bitmask (r))
1253 normalize_kind ();
1254 if (flag_checking)
1255 verify_range ();
1256 return true;
1257 }
1258
1259 // Set the new lower bound to R's lower bound.
1260 wide_int lb = m_base[0];
1261 m_base[0] = r.m_base[0];
1262
1263 // If R fully contains THIS range, just set the upper bound.
1264 if (wi::ge_p (r.m_base[1], m_base[1], sign))
1265 m_base[1] = r.m_base[1];
1266 // Check for overlapping ranges, or target limited to a single range.
1267 else if (m_max_ranges == 1
1268 || (widest_int::from (r.m_base[1], TYPE_SIGN (r.m_type)) + 1
1269 >= widest_int::from (lb, sign)))
1270 ;
1271 else
1272 {
1273 // Left with 2 pairs.
1274 m_num_ranges = 2;
1275 m_base[2] = lb;
1276 m_base[3] = m_base[1];
1277 m_base[1] = r.m_base[1];
1278 }
1279 // The range has been altered, so normalize it even if nothing
1280 // changed in the mask.
1281 if (!union_bitmask (r))
1282 normalize_kind ();
1283 if (flag_checking)
1284 verify_range ();
1285 return true;
1286 }
1287
1288 // Return TRUE if anything changes.
1289
1290 bool
1291 irange::union_ (const vrange &v)
1292 {
1293 const irange &r = as_a <irange> (v);
1294
1295 if (r.undefined_p ())
1296 return false;
1297
1298 if (undefined_p ())
1299 {
1300 operator= (r);
1301 if (flag_checking)
1302 verify_range ();
1303 return true;
1304 }
1305
1306 if (varying_p ())
1307 return false;
1308
1309 if (r.varying_p ())
1310 {
1311 set_varying (type ());
1312 return true;
1313 }
1314
1315 // Special case one range union one range.
1316 if (m_num_ranges == 1 && r.m_num_ranges == 1)
1317 return irange_single_pair_union (r);
1318
1319 // If this ranges fully contains R, then we need do nothing.
1320 if (irange_contains_p (r))
1321 return union_bitmask (r);
1322
1323 // Do not worry about merging and such by reserving twice as many
1324 // pairs as needed, and then simply sort the 2 ranges into this
1325 // intermediate form.
1326 //
1327 // The intermediate result will have the property that the beginning
1328 // of each range is <= the beginning of the next range. There may
1329 // be overlapping ranges at this point. I.e. this would be valid
1330 // [-20, 10], [-10, 0], [0, 20], [40, 90] as it satisfies this
1331 // constraint : -20 < -10 < 0 < 40. When the range is rebuilt into r,
1332 // the merge is performed.
1333 //
1334 // [Xi,Yi]..[Xn,Yn] U [Xj,Yj]..[Xm,Ym] --> [Xk,Yk]..[Xp,Yp]
1335 auto_vec<wide_int, 20> res (m_num_ranges * 2 + r.m_num_ranges * 2);
1336 unsigned i = 0, j = 0, k = 0;
1337 signop sign = TYPE_SIGN (m_type);
1338
1339 while (i < m_num_ranges * 2 && j < r.m_num_ranges * 2)
1340 {
1341 // lower of Xi and Xj is the lowest point.
1342 if (widest_int::from (m_base[i], sign)
1343 <= widest_int::from (r.m_base[j], sign))
1344 {
1345 res.quick_push (m_base[i]);
1346 res.quick_push (m_base[i + 1]);
1347 k += 2;
1348 i += 2;
1349 }
1350 else
1351 {
1352 res.quick_push (r.m_base[j]);
1353 res.quick_push (r.m_base[j + 1]);
1354 k += 2;
1355 j += 2;
1356 }
1357 }
1358 for ( ; i < m_num_ranges * 2; i += 2)
1359 {
1360 res.quick_push (m_base[i]);
1361 res.quick_push (m_base[i + 1]);
1362 k += 2;
1363 }
1364 for ( ; j < r.m_num_ranges * 2; j += 2)
1365 {
1366 res.quick_push (r.m_base[j]);
1367 res.quick_push (r.m_base[j + 1]);
1368 k += 2;
1369 }
1370
1371 // Now normalize the vector removing any overlaps.
1372 i = 2;
1373 for (j = 2; j < k ; j += 2)
1374 {
1375 // Current upper+1 is >= lower bound next pair, then we merge ranges.
1376 if (widest_int::from (res[i - 1], sign) + 1
1377 >= widest_int::from (res[j], sign))
1378 {
1379 // New upper bounds is greater of current or the next one.
1380 if (widest_int::from (res[j + 1], sign)
1381 > widest_int::from (res[i - 1], sign))
1382 res[i - 1] = res[j + 1];
1383 }
1384 else
1385 {
1386 // This is a new distinct range, but no point in copying it
1387 // if it is already in the right place.
1388 if (i != j)
1389 {
1390 res[i++] = res[j];
1391 res[i++] = res[j + 1];
1392 }
1393 else
1394 i += 2;
1395 }
1396 }
1397
1398 // At this point, the vector should have i ranges, none overlapping.
1399 // Now it simply needs to be copied, and if there are too many
1400 // ranges, merge some. We wont do any analysis as to what the
1401 // "best" merges are, simply combine the final ranges into one.
1402 maybe_resize (i / 2);
1403 if (i > m_max_ranges * 2)
1404 {
1405 res[m_max_ranges * 2 - 1] = res[i - 1];
1406 i = m_max_ranges * 2;
1407 }
1408
1409 for (j = 0; j < i ; j++)
1410 m_base[j] = res [j];
1411 m_num_ranges = i / 2;
1412
1413 m_kind = VR_RANGE;
1414 // The range has been altered, so normalize it even if nothing
1415 // changed in the mask.
1416 if (!union_bitmask (r))
1417 normalize_kind ();
1418 if (flag_checking)
1419 verify_range ();
1420 return true;
1421 }
1422
1423 // Return TRUE if THIS fully contains R. No undefined or varying cases.
1424
1425 bool
1426 irange::irange_contains_p (const irange &r) const
1427 {
1428 gcc_checking_assert (!undefined_p () && !varying_p ());
1429 gcc_checking_assert (!r.undefined_p () && !varying_p ());
1430
1431 // In order for THIS to fully contain R, all of the pairs within R must
1432 // be fully contained by the pairs in this object.
1433 signop sign = TYPE_SIGN (m_type);
1434 unsigned ri = 0;
1435 unsigned i = 0;
1436 wide_int rl = r.m_base[0];
1437 wide_int ru = r.m_base[1];
1438 wide_int l = m_base[0];
1439 wide_int u = m_base[1];
1440 while (1)
1441 {
1442 // If r is contained within this range, move to the next R
1443 if (wi::ge_p (rl, l, sign)
1444 && wi::le_p (ru, u, sign))
1445 {
1446 // This pair is OK, Either done, or bump to the next.
1447 if (++ri >= r.num_pairs ())
1448 return true;
1449 rl = r.m_base[ri * 2];
1450 ru = r.m_base[ri * 2 + 1];
1451 continue;
1452 }
1453 // Otherwise, check if this's pair occurs before R's.
1454 if (wi::lt_p (u, rl, sign))
1455 {
1456 // There's still at least one pair of R left.
1457 if (++i >= num_pairs ())
1458 return false;
1459 l = m_base[i * 2];
1460 u = m_base[i * 2 + 1];
1461 continue;
1462 }
1463 return false;
1464 }
1465 return false;
1466 }
1467
1468
1469 // Return TRUE if anything changes.
1470
1471 bool
1472 irange::intersect (const vrange &v)
1473 {
1474 const irange &r = as_a <irange> (v);
1475 gcc_checking_assert (undefined_p () || r.undefined_p ()
1476 || range_compatible_p (type (), r.type ()));
1477
1478 if (undefined_p ())
1479 return false;
1480 if (r.undefined_p ())
1481 {
1482 set_undefined ();
1483 return true;
1484 }
1485 if (r.varying_p ())
1486 return false;
1487 if (varying_p ())
1488 {
1489 operator= (r);
1490 return true;
1491 }
1492
1493 if (r.num_pairs () == 1)
1494 {
1495 bool res = intersect (r.lower_bound (), r.upper_bound ());
1496 if (undefined_p ())
1497 return true;
1498
1499 res |= intersect_bitmask (r);
1500 if (res)
1501 normalize_kind ();
1502 return res;
1503 }
1504
1505 // If R fully contains this, then intersection will change nothing.
1506 if (r.irange_contains_p (*this))
1507 return intersect_bitmask (r);
1508
1509 // ?? We could probably come up with something smarter than the
1510 // worst case scenario here.
1511 int needed = num_pairs () + r.num_pairs ();
1512 maybe_resize (needed);
1513
1514 signop sign = TYPE_SIGN (m_type);
1515 unsigned bld_pair = 0;
1516 unsigned bld_lim = m_max_ranges;
1517 int_range_max r2 (*this);
1518 unsigned r2_lim = r2.num_pairs ();
1519 unsigned i2 = 0;
1520 for (unsigned i = 0; i < r.num_pairs (); )
1521 {
1522 // If r1's upper is < r2's lower, we can skip r1's pair.
1523 wide_int ru = r.m_base[i * 2 + 1];
1524 wide_int r2l = r2.m_base[i2 * 2];
1525 if (wi::lt_p (ru, r2l, sign))
1526 {
1527 i++;
1528 continue;
1529 }
1530 // Likewise, skip r2's pair if its excluded.
1531 wide_int r2u = r2.m_base[i2 * 2 + 1];
1532 wide_int rl = r.m_base[i * 2];
1533 if (wi::lt_p (r2u, rl, sign))
1534 {
1535 i2++;
1536 if (i2 < r2_lim)
1537 continue;
1538 // No more r2, break.
1539 break;
1540 }
1541
1542 // Must be some overlap. Find the highest of the lower bounds,
1543 // and set it, unless the build limits lower bounds is already
1544 // set.
1545 if (bld_pair < bld_lim)
1546 {
1547 if (wi::ge_p (rl, r2l, sign))
1548 m_base[bld_pair * 2] = rl;
1549 else
1550 m_base[bld_pair * 2] = r2l;
1551 }
1552 else
1553 // Decrease and set a new upper.
1554 bld_pair--;
1555
1556 // ...and choose the lower of the upper bounds.
1557 if (wi::le_p (ru, r2u, sign))
1558 {
1559 m_base[bld_pair * 2 + 1] = ru;
1560 bld_pair++;
1561 // Move past the r1 pair and keep trying.
1562 i++;
1563 continue;
1564 }
1565 else
1566 {
1567 m_base[bld_pair * 2 + 1] = r2u;
1568 bld_pair++;
1569 i2++;
1570 if (i2 < r2_lim)
1571 continue;
1572 // No more r2, break.
1573 break;
1574 }
1575 // r2 has the higher lower bound.
1576 }
1577
1578 // At the exit of this loop, it is one of 2 things:
1579 // ran out of r1, or r2, but either means we are done.
1580 m_num_ranges = bld_pair;
1581 if (m_num_ranges == 0)
1582 {
1583 set_undefined ();
1584 return true;
1585 }
1586
1587 m_kind = VR_RANGE;
1588 // The range has been altered, so normalize it even if nothing
1589 // changed in the mask.
1590 if (!intersect_bitmask (r))
1591 normalize_kind ();
1592 if (flag_checking)
1593 verify_range ();
1594 return true;
1595 }
1596
1597
1598 // Multirange intersect for a specified wide_int [lb, ub] range.
1599 // Return TRUE if intersect changed anything.
1600 //
1601 // NOTE: It is the caller's responsibility to intersect the mask.
1602
1603 bool
1604 irange::intersect (const wide_int& lb, const wide_int& ub)
1605 {
1606 // Undefined remains undefined.
1607 if (undefined_p ())
1608 return false;
1609
1610 tree range_type = type();
1611 signop sign = TYPE_SIGN (range_type);
1612
1613 gcc_checking_assert (TYPE_PRECISION (range_type) == wi::get_precision (lb));
1614 gcc_checking_assert (TYPE_PRECISION (range_type) == wi::get_precision (ub));
1615
1616 // If this range is fully contained, then intersection will do nothing.
1617 if (wi::ge_p (lower_bound (), lb, sign)
1618 && wi::le_p (upper_bound (), ub, sign))
1619 return false;
1620
1621 unsigned bld_index = 0;
1622 unsigned pair_lim = num_pairs ();
1623 for (unsigned i = 0; i < pair_lim; i++)
1624 {
1625 wide_int pairl = m_base[i * 2];
1626 wide_int pairu = m_base[i * 2 + 1];
1627 // Once UB is less than a pairs lower bound, we're done.
1628 if (wi::lt_p (ub, pairl, sign))
1629 break;
1630 // if LB is greater than this pairs upper, this pair is excluded.
1631 if (wi::lt_p (pairu, lb, sign))
1632 continue;
1633
1634 // Must be some overlap. Find the highest of the lower bounds,
1635 // and set it
1636 if (wi::gt_p (lb, pairl, sign))
1637 m_base[bld_index * 2] = lb;
1638 else
1639 m_base[bld_index * 2] = pairl;
1640
1641 // ...and choose the lower of the upper bounds and if the base pair
1642 // has the lower upper bound, need to check next pair too.
1643 if (wi::lt_p (ub, pairu, sign))
1644 {
1645 m_base[bld_index++ * 2 + 1] = ub;
1646 break;
1647 }
1648 else
1649 m_base[bld_index++ * 2 + 1] = pairu;
1650 }
1651
1652 m_num_ranges = bld_index;
1653 if (m_num_ranges == 0)
1654 {
1655 set_undefined ();
1656 return true;
1657 }
1658
1659 m_kind = VR_RANGE;
1660 // The caller must normalize and verify the range, as the bitmask
1661 // still needs to be handled.
1662 return true;
1663 }
1664
1665
1666 // Signed 1-bits are strange. You can't subtract 1, because you can't
1667 // represent the number 1. This works around that for the invert routine.
1668
1669 static wide_int inline
1670 subtract_one (const wide_int &x, tree type, wi::overflow_type &overflow)
1671 {
1672 if (TYPE_SIGN (type) == SIGNED)
1673 return wi::add (x, -1, SIGNED, &overflow);
1674 else
1675 return wi::sub (x, 1, UNSIGNED, &overflow);
1676 }
1677
1678 // The analogous function for adding 1.
1679
1680 static wide_int inline
1681 add_one (const wide_int &x, tree type, wi::overflow_type &overflow)
1682 {
1683 if (TYPE_SIGN (type) == SIGNED)
1684 return wi::sub (x, -1, SIGNED, &overflow);
1685 else
1686 return wi::add (x, 1, UNSIGNED, &overflow);
1687 }
1688
1689 // Return the inverse of a range.
1690
1691 void
1692 irange::invert ()
1693 {
1694 gcc_checking_assert (!undefined_p () && !varying_p ());
1695
1696 // We always need one more set of bounds to represent an inverse, so
1697 // if we're at the limit, we can't properly represent things.
1698 //
1699 // For instance, to represent the inverse of a 2 sub-range set
1700 // [5, 10][20, 30], we would need a 3 sub-range set
1701 // [-MIN, 4][11, 19][31, MAX].
1702 //
1703 // In this case, return the most conservative thing.
1704 //
1705 // However, if any of the extremes of the range are -MIN/+MAX, we
1706 // know we will not need an extra bound. For example:
1707 //
1708 // INVERT([-MIN,20][30,40]) => [21,29][41,+MAX]
1709 // INVERT([-MIN,20][30,MAX]) => [21,29]
1710 tree ttype = type ();
1711 unsigned prec = TYPE_PRECISION (ttype);
1712 signop sign = TYPE_SIGN (ttype);
1713 wide_int type_min = wi::min_value (prec, sign);
1714 wide_int type_max = wi::max_value (prec, sign);
1715 m_bitmask.set_unknown (prec);
1716
1717 // At this point, we need one extra sub-range to represent the
1718 // inverse.
1719 maybe_resize (m_num_ranges + 1);
1720
1721 // The algorithm is as follows. To calculate INVERT ([a,b][c,d]), we
1722 // generate [-MIN, a-1][b+1, c-1][d+1, MAX].
1723 //
1724 // If there is an over/underflow in the calculation for any
1725 // sub-range, we eliminate that subrange. This allows us to easily
1726 // calculate INVERT([-MIN, 5]) with: [-MIN, -MIN-1][6, MAX]. And since
1727 // we eliminate the underflow, only [6, MAX] remains.
1728 unsigned i = 0;
1729 wi::overflow_type ovf;
1730 // Construct leftmost range.
1731 int_range_max orig_range (*this);
1732 unsigned nitems = 0;
1733 wide_int tmp;
1734 // If this is going to underflow on the MINUS 1, don't even bother
1735 // checking. This also handles subtracting one from an unsigned 0,
1736 // which doesn't set the underflow bit.
1737 if (type_min != orig_range.lower_bound ())
1738 {
1739 m_base[nitems++] = type_min;
1740 tmp = subtract_one (orig_range.lower_bound (), ttype, ovf);
1741 m_base[nitems++] = tmp;
1742 if (ovf)
1743 nitems = 0;
1744 }
1745 i++;
1746 // Construct middle ranges if applicable.
1747 if (orig_range.num_pairs () > 1)
1748 {
1749 unsigned j = i;
1750 for (; j < (orig_range.num_pairs () * 2) - 1; j += 2)
1751 {
1752 // The middle ranges cannot have MAX/MIN, so there's no need
1753 // to check for unsigned overflow on the +1 and -1 here.
1754 tmp = wi::add (orig_range.m_base[j], 1, sign, &ovf);
1755 m_base[nitems++] = tmp;
1756 tmp = subtract_one (orig_range.m_base[j + 1], ttype, ovf);
1757 m_base[nitems++] = tmp;
1758 if (ovf)
1759 nitems -= 2;
1760 }
1761 i = j;
1762 }
1763 // Construct rightmost range.
1764 //
1765 // However, if this will overflow on the PLUS 1, don't even bother.
1766 // This also handles adding one to an unsigned MAX, which doesn't
1767 // set the overflow bit.
1768 if (type_max != orig_range.m_base[i])
1769 {
1770 tmp = add_one (orig_range.m_base[i], ttype, ovf);
1771 m_base[nitems++] = tmp;
1772 m_base[nitems++] = type_max;
1773 if (ovf)
1774 nitems -= 2;
1775 }
1776 m_num_ranges = nitems / 2;
1777
1778 // We disallow undefined or varying coming in, so the result can
1779 // only be a VR_RANGE.
1780 gcc_checking_assert (m_kind == VR_RANGE);
1781
1782 if (flag_checking)
1783 verify_range ();
1784 }
1785
1786 // Return the bitmask inherent in the range.
1787
1788 irange_bitmask
1789 irange::get_bitmask_from_range () const
1790 {
1791 unsigned prec = TYPE_PRECISION (type ());
1792 wide_int min = lower_bound ();
1793 wide_int max = upper_bound ();
1794
1795 // All the bits of a singleton are known.
1796 if (min == max)
1797 {
1798 wide_int mask = wi::zero (prec);
1799 wide_int value = lower_bound ();
1800 return irange_bitmask (value, mask);
1801 }
1802
1803 wide_int xorv = min ^ max;
1804
1805 if (xorv != 0)
1806 xorv = wi::mask (prec - wi::clz (xorv), false, prec);
1807
1808 return irange_bitmask (wi::zero (prec), min | xorv);
1809 }
1810
1811 // If the the mask can be trivially converted to a range, do so and
1812 // return TRUE.
1813
1814 bool
1815 irange::set_range_from_bitmask ()
1816 {
1817 gcc_checking_assert (!undefined_p ());
1818 if (m_bitmask.unknown_p ())
1819 return false;
1820
1821 // If all the bits are known, this is a singleton.
1822 if (m_bitmask.mask () == 0)
1823 {
1824 set (m_type, m_bitmask.value (), m_bitmask.value ());
1825 return true;
1826 }
1827
1828 unsigned popcount = wi::popcount (m_bitmask.get_nonzero_bits ());
1829
1830 // If we have only one bit set in the mask, we can figure out the
1831 // range immediately.
1832 if (popcount == 1)
1833 {
1834 // Make sure we don't pessimize the range.
1835 if (!contains_p (m_bitmask.get_nonzero_bits ()))
1836 return false;
1837
1838 bool has_zero = contains_zero_p (*this);
1839 wide_int nz = m_bitmask.get_nonzero_bits ();
1840 set (m_type, nz, nz);
1841 m_bitmask.set_nonzero_bits (nz);
1842 if (has_zero)
1843 {
1844 int_range<2> zero;
1845 zero.set_zero (type ());
1846 union_ (zero);
1847 }
1848 if (flag_checking)
1849 verify_range ();
1850 return true;
1851 }
1852 else if (popcount == 0)
1853 {
1854 set_zero (type ());
1855 return true;
1856 }
1857 return false;
1858 }
1859
1860 void
1861 irange::update_bitmask (const irange_bitmask &bm)
1862 {
1863 gcc_checking_assert (!undefined_p ());
1864
1865 // Drop VARYINGs with known bits to a plain range.
1866 if (m_kind == VR_VARYING && !bm.unknown_p ())
1867 m_kind = VR_RANGE;
1868
1869 m_bitmask = bm;
1870 if (!set_range_from_bitmask ())
1871 normalize_kind ();
1872 if (flag_checking)
1873 verify_range ();
1874 }
1875
1876 // Return the bitmask of known bits that includes the bitmask inherent
1877 // in the range.
1878
1879 irange_bitmask
1880 irange::get_bitmask () const
1881 {
1882 gcc_checking_assert (!undefined_p ());
1883
1884 // The mask inherent in the range is calculated on-demand. For
1885 // example, [0,255] does not have known bits set by default. This
1886 // saves us considerable time, because setting it at creation incurs
1887 // a large penalty for irange::set. At the time of writing there
1888 // was a 5% slowdown in VRP if we kept the mask precisely up to date
1889 // at all times. Instead, we default to -1 and set it when
1890 // explicitly requested. However, this function will always return
1891 // the correct mask.
1892 //
1893 // This also means that the mask may have a finer granularity than
1894 // the range and thus contradict it. Think of the mask as an
1895 // enhancement to the range. For example:
1896 //
1897 // [3, 1000] MASK 0xfffffffe VALUE 0x0
1898 //
1899 // 3 is in the range endpoints, but is excluded per the known 0 bits
1900 // in the mask.
1901 //
1902 // See also the note in irange_bitmask::intersect.
1903 irange_bitmask bm = get_bitmask_from_range ();
1904 if (!m_bitmask.unknown_p ())
1905 bm.intersect (m_bitmask);
1906 return bm;
1907 }
1908
1909 // Set the nonzero bits in R into THIS. Return TRUE and
1910 // normalize the range if anything changed.
1911
1912 void
1913 irange::set_nonzero_bits (const wide_int &bits)
1914 {
1915 gcc_checking_assert (!undefined_p ());
1916 irange_bitmask bm (wi::zero (TYPE_PRECISION (type ())), bits);
1917 update_bitmask (bm);
1918 }
1919
1920 // Return the nonzero bits in R.
1921
1922 wide_int
1923 irange::get_nonzero_bits () const
1924 {
1925 gcc_checking_assert (!undefined_p ());
1926 irange_bitmask bm = get_bitmask ();
1927 return bm.value () | bm.mask ();
1928 }
1929
1930 // Intersect the bitmask in R into THIS and normalize the range.
1931 // Return TRUE if the intersection changed anything.
1932
1933 bool
1934 irange::intersect_bitmask (const irange &r)
1935 {
1936 gcc_checking_assert (!undefined_p () && !r.undefined_p ());
1937
1938 if (m_bitmask == r.m_bitmask)
1939 return false;
1940
1941 irange_bitmask bm = get_bitmask ();
1942 irange_bitmask save = bm;
1943 if (!bm.intersect (r.get_bitmask ()))
1944 return false;
1945
1946 m_bitmask = bm;
1947
1948 // Updating m_bitmask may still yield a semantic bitmask (as
1949 // returned by get_bitmask) which is functionally equivalent to what
1950 // we originally had. In which case, there's still no change.
1951 if (save == get_bitmask ())
1952 return false;
1953
1954 if (!set_range_from_bitmask ())
1955 normalize_kind ();
1956 if (flag_checking)
1957 verify_range ();
1958 return true;
1959 }
1960
1961 // Union the bitmask in R into THIS. Return TRUE and normalize the
1962 // range if anything changed.
1963
1964 bool
1965 irange::union_bitmask (const irange &r)
1966 {
1967 gcc_checking_assert (!undefined_p () && !r.undefined_p ());
1968
1969 if (m_bitmask == r.m_bitmask)
1970 return false;
1971
1972 irange_bitmask bm = get_bitmask ();
1973 irange_bitmask save = bm;
1974 if (!bm.union_ (r.get_bitmask ()))
1975 return false;
1976
1977 m_bitmask = bm;
1978
1979 // Updating m_bitmask may still yield a semantic bitmask (as
1980 // returned by get_bitmask) which is functionally equivalent to what
1981 // we originally had. In which case, there's still no change.
1982 if (save == get_bitmask ())
1983 return false;
1984
1985 // No need to call set_range_from_mask, because we'll never
1986 // narrow the range. Besides, it would cause endless recursion
1987 // because of the union_ in set_range_from_mask.
1988 normalize_kind ();
1989 return true;
1990 }
1991
1992 void
1993 irange_bitmask::verify_mask () const
1994 {
1995 gcc_assert (m_value.get_precision () == m_mask.get_precision ());
1996 }
1997
1998 void
1999 dump_value_range (FILE *file, const vrange *vr)
2000 {
2001 vr->dump (file);
2002 }
2003
2004 DEBUG_FUNCTION void
2005 debug (const vrange *vr)
2006 {
2007 dump_value_range (stderr, vr);
2008 fprintf (stderr, "\n");
2009 }
2010
2011 DEBUG_FUNCTION void
2012 debug (const vrange &vr)
2013 {
2014 debug (&vr);
2015 }
2016
2017 DEBUG_FUNCTION void
2018 debug (const value_range *vr)
2019 {
2020 dump_value_range (stderr, vr);
2021 fprintf (stderr, "\n");
2022 }
2023
2024 DEBUG_FUNCTION void
2025 debug (const value_range &vr)
2026 {
2027 dump_value_range (stderr, &vr);
2028 fprintf (stderr, "\n");
2029 }
2030
2031 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
2032
2033 bool
2034 vrp_operand_equal_p (const_tree val1, const_tree val2)
2035 {
2036 if (val1 == val2)
2037 return true;
2038 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
2039 return false;
2040 return true;
2041 }
2042
2043 void
2044 gt_ggc_mx (irange *x)
2045 {
2046 if (!x->undefined_p ())
2047 gt_ggc_mx (x->m_type);
2048 }
2049
2050 void
2051 gt_pch_nx (irange *x)
2052 {
2053 if (!x->undefined_p ())
2054 gt_pch_nx (x->m_type);
2055 }
2056
2057 void
2058 gt_pch_nx (irange *x, gt_pointer_operator op, void *cookie)
2059 {
2060 for (unsigned i = 0; i < x->m_num_ranges; ++i)
2061 {
2062 op (&x->m_base[i * 2], NULL, cookie);
2063 op (&x->m_base[i * 2 + 1], NULL, cookie);
2064 }
2065 }
2066
2067 void
2068 gt_ggc_mx (frange *x)
2069 {
2070 gt_ggc_mx (x->m_type);
2071 }
2072
2073 void
2074 gt_pch_nx (frange *x)
2075 {
2076 gt_pch_nx (x->m_type);
2077 }
2078
2079 void
2080 gt_pch_nx (frange *x, gt_pointer_operator op, void *cookie)
2081 {
2082 op (&x->m_type, NULL, cookie);
2083 }
2084
2085 void
2086 gt_ggc_mx (vrange *x)
2087 {
2088 if (is_a <irange> (*x))
2089 return gt_ggc_mx ((irange *) x);
2090 if (is_a <frange> (*x))
2091 return gt_ggc_mx ((frange *) x);
2092 gcc_unreachable ();
2093 }
2094
2095 void
2096 gt_pch_nx (vrange *x)
2097 {
2098 if (is_a <irange> (*x))
2099 return gt_pch_nx ((irange *) x);
2100 if (is_a <frange> (*x))
2101 return gt_pch_nx ((frange *) x);
2102 gcc_unreachable ();
2103 }
2104
2105 void
2106 gt_pch_nx (vrange *x, gt_pointer_operator op, void *cookie)
2107 {
2108 if (is_a <irange> (*x))
2109 gt_pch_nx ((irange *) x, op, cookie);
2110 else if (is_a <frange> (*x))
2111 gt_pch_nx ((frange *) x, op, cookie);
2112 else
2113 gcc_unreachable ();
2114 }
2115
2116 #define DEFINE_INT_RANGE_INSTANCE(N) \
2117 template int_range<N>::int_range(tree_node *, \
2118 const wide_int &, \
2119 const wide_int &, \
2120 value_range_kind); \
2121 template int_range<N>::int_range(tree); \
2122 template int_range<N>::int_range(const irange &); \
2123 template int_range<N>::int_range(const int_range &); \
2124 template int_range<N>& int_range<N>::operator= (const int_range &);
2125
2126 DEFINE_INT_RANGE_INSTANCE(1)
2127 DEFINE_INT_RANGE_INSTANCE(2)
2128 DEFINE_INT_RANGE_INSTANCE(3)
2129 DEFINE_INT_RANGE_INSTANCE(255)
2130
2131 #if CHECKING_P
2132 #include "selftest.h"
2133
2134 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
2135 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
2136 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
2137
2138 namespace selftest
2139 {
2140
2141 static int_range<2>
2142 range (tree type, int a, int b, value_range_kind kind = VR_RANGE)
2143 {
2144 wide_int w1, w2;
2145 if (TYPE_UNSIGNED (type))
2146 {
2147 w1 = wi::uhwi (a, TYPE_PRECISION (type));
2148 w2 = wi::uhwi (b, TYPE_PRECISION (type));
2149 }
2150 else
2151 {
2152 w1 = wi::shwi (a, TYPE_PRECISION (type));
2153 w2 = wi::shwi (b, TYPE_PRECISION (type));
2154 }
2155 return int_range<2> (type, w1, w2, kind);
2156 }
2157
2158 static int_range<2>
2159 range_int (int a, int b, value_range_kind kind = VR_RANGE)
2160 {
2161 return range (integer_type_node, a, b, kind);
2162 }
2163
2164 static int_range<2>
2165 range_uint (int a, int b, value_range_kind kind = VR_RANGE)
2166 {
2167 return range (unsigned_type_node, a, b, kind);
2168 }
2169
2170 static int_range<2>
2171 range_uint128 (int a, int b, value_range_kind kind = VR_RANGE)
2172 {
2173 tree u128_type_node = build_nonstandard_integer_type (128, 1);
2174 return range (u128_type_node, a, b, kind);
2175 }
2176
2177 static int_range<2>
2178 range_uchar (int a, int b, value_range_kind kind = VR_RANGE)
2179 {
2180 return range (unsigned_char_type_node, a, b, kind);
2181 }
2182
2183 static int_range<2>
2184 range_char (int a, int b, value_range_kind kind = VR_RANGE)
2185 {
2186 return range (signed_char_type_node, a, b, kind);
2187 }
2188
2189 static int_range<3>
2190 build_range3 (int a, int b, int c, int d, int e, int f)
2191 {
2192 int_range<3> i1 = range_int (a, b);
2193 int_range<3> i2 = range_int (c, d);
2194 int_range<3> i3 = range_int (e, f);
2195 i1.union_ (i2);
2196 i1.union_ (i3);
2197 return i1;
2198 }
2199
2200 static void
2201 range_tests_irange3 ()
2202 {
2203 int_range<3> r0, r1, r2;
2204 int_range<3> i1, i2, i3;
2205
2206 // ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].
2207 r0 = range_int (10, 20);
2208 r1 = range_int (5, 8);
2209 r0.union_ (r1);
2210 r1 = range_int (1, 3);
2211 r0.union_ (r1);
2212 ASSERT_TRUE (r0 == build_range3 (1, 3, 5, 8, 10, 20));
2213
2214 // [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].
2215 r1 = range_int (-5, 0);
2216 r0.union_ (r1);
2217 ASSERT_TRUE (r0 == build_range3 (-5, 3, 5, 8, 10, 20));
2218
2219 // [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].
2220 r1 = range_int (50, 60);
2221 r0 = range_int (10, 20);
2222 r0.union_ (range_int (30, 40));
2223 r0.union_ (r1);
2224 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
2225 // [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].
2226 r1 = range_int (70, 80);
2227 r0.union_ (r1);
2228
2229 r2 = build_range3 (10, 20, 30, 40, 50, 60);
2230 r2.union_ (range_int (70, 80));
2231 ASSERT_TRUE (r0 == r2);
2232
2233 // [10,20][30,40][50,60] U [6,35] => [6,40][50,60].
2234 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2235 r1 = range_int (6, 35);
2236 r0.union_ (r1);
2237 r1 = range_int (6, 40);
2238 r1.union_ (range_int (50, 60));
2239 ASSERT_TRUE (r0 == r1);
2240
2241 // [10,20][30,40][50,60] U [6,60] => [6,60].
2242 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2243 r1 = range_int (6, 60);
2244 r0.union_ (r1);
2245 ASSERT_TRUE (r0 == range_int (6, 60));
2246
2247 // [10,20][30,40][50,60] U [6,70] => [6,70].
2248 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2249 r1 = range_int (6, 70);
2250 r0.union_ (r1);
2251 ASSERT_TRUE (r0 == range_int (6, 70));
2252
2253 // [10,20][30,40][50,60] U [35,70] => [10,20][30,70].
2254 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2255 r1 = range_int (35, 70);
2256 r0.union_ (r1);
2257 r1 = range_int (10, 20);
2258 r1.union_ (range_int (30, 70));
2259 ASSERT_TRUE (r0 == r1);
2260
2261 // [10,20][30,40][50,60] U [15,35] => [10,40][50,60].
2262 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2263 r1 = range_int (15, 35);
2264 r0.union_ (r1);
2265 r1 = range_int (10, 40);
2266 r1.union_ (range_int (50, 60));
2267 ASSERT_TRUE (r0 == r1);
2268
2269 // [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].
2270 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2271 r1 = range_int (35, 35);
2272 r0.union_ (r1);
2273 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
2274 }
2275
2276 static void
2277 range_tests_int_range_max ()
2278 {
2279 int_range_max big;
2280 unsigned int nrange;
2281
2282 // Build a huge multi-range range.
2283 for (nrange = 0; nrange < 50; ++nrange)
2284 {
2285 int_range<1> tmp = range_int (nrange*10, nrange *10 + 5);
2286 big.union_ (tmp);
2287 }
2288 ASSERT_TRUE (big.num_pairs () == nrange);
2289
2290 // Verify that we can copy it without loosing precision.
2291 int_range_max copy (big);
2292 ASSERT_TRUE (copy.num_pairs () == nrange);
2293
2294 // Inverting it should produce one more sub-range.
2295 big.invert ();
2296 ASSERT_TRUE (big.num_pairs () == nrange + 1);
2297
2298 int_range<1> tmp = range_int (5, 37);
2299 big.intersect (tmp);
2300 ASSERT_TRUE (big.num_pairs () == 4);
2301
2302 // Test that [10,10][20,20] does NOT contain 15.
2303 {
2304 int_range_max i1 = range_int (10, 10);
2305 int_range_max i2 = range_int (20, 20);
2306 i1.union_ (i2);
2307 ASSERT_FALSE (i1.contains_p (INT (15)));
2308 }
2309 }
2310
2311 // Simulate -fstrict-enums where the domain of a type is less than the
2312 // underlying type.
2313
2314 static void
2315 range_tests_strict_enum ()
2316 {
2317 // The enum can only hold [0, 3].
2318 tree rtype = copy_node (unsigned_type_node);
2319 TYPE_MIN_VALUE (rtype) = build_int_cstu (rtype, 0);
2320 TYPE_MAX_VALUE (rtype) = build_int_cstu (rtype, 3);
2321
2322 // Test that even though vr1 covers the strict enum domain ([0, 3]),
2323 // it does not cover the domain of the underlying type.
2324 int_range<1> vr1 = range (rtype, 0, 1);
2325 int_range<1> vr2 = range (rtype, 2, 3);
2326 vr1.union_ (vr2);
2327 ASSERT_TRUE (vr1 == range (rtype, 0, 3));
2328 ASSERT_FALSE (vr1.varying_p ());
2329
2330 // Test that copying to a multi-range does not change things.
2331 int_range<2> ir1 (vr1);
2332 ASSERT_TRUE (ir1 == vr1);
2333 ASSERT_FALSE (ir1.varying_p ());
2334
2335 // The same test as above, but using TYPE_{MIN,MAX}_VALUE instead of [0,3].
2336 vr1 = int_range<2> (rtype,
2337 wi::to_wide (TYPE_MIN_VALUE (rtype)),
2338 wi::to_wide (TYPE_MAX_VALUE (rtype)));
2339 ir1 = vr1;
2340 ASSERT_TRUE (ir1 == vr1);
2341 ASSERT_FALSE (ir1.varying_p ());
2342 }
2343
2344 static void
2345 range_tests_misc ()
2346 {
2347 tree u128_type = build_nonstandard_integer_type (128, /*unsigned=*/1);
2348 int_range<2> i1, i2, i3;
2349 int_range<2> r0, r1, rold;
2350
2351 // Test 1-bit signed integer union.
2352 // [-1,-1] U [0,0] = VARYING.
2353 tree one_bit_type = build_nonstandard_integer_type (1, 0);
2354 wide_int one_bit_min = irange_val_min (one_bit_type);
2355 wide_int one_bit_max = irange_val_max (one_bit_type);
2356 {
2357 int_range<2> min = int_range<2> (one_bit_type, one_bit_min, one_bit_min);
2358 int_range<2> max = int_range<2> (one_bit_type, one_bit_max, one_bit_max);
2359 max.union_ (min);
2360 ASSERT_TRUE (max.varying_p ());
2361 }
2362 // Test that we can set a range of true+false for a 1-bit signed int.
2363 r0 = range_true_and_false (one_bit_type);
2364
2365 // Test inversion of 1-bit signed integers.
2366 {
2367 int_range<2> min = int_range<2> (one_bit_type, one_bit_min, one_bit_min);
2368 int_range<2> max = int_range<2> (one_bit_type, one_bit_max, one_bit_max);
2369 int_range<2> t;
2370 t = min;
2371 t.invert ();
2372 ASSERT_TRUE (t == max);
2373 t = max;
2374 t.invert ();
2375 ASSERT_TRUE (t == min);
2376 }
2377
2378 // Test that NOT(255) is [0..254] in 8-bit land.
2379 int_range<1> not_255 = range_uchar (255, 255, VR_ANTI_RANGE);
2380 ASSERT_TRUE (not_255 == range_uchar (0, 254));
2381
2382 // Test that NOT(0) is [1..255] in 8-bit land.
2383 int_range<2> not_zero = range_nonzero (unsigned_char_type_node);
2384 ASSERT_TRUE (not_zero == range_uchar (1, 255));
2385
2386 // Check that [0,127][0x..ffffff80,0x..ffffff]
2387 // => ~[128, 0x..ffffff7f].
2388 r0 = range_uint128 (0, 127);
2389 wide_int high = wi::minus_one (128);
2390 // low = -1 - 127 => 0x..ffffff80.
2391 wide_int low = wi::sub (high, wi::uhwi (127, 128));
2392 r1 = int_range<1> (u128_type, low, high); // [0x..ffffff80, 0x..ffffffff]
2393 // r0 = [0,127][0x..ffffff80,0x..fffffff].
2394 r0.union_ (r1);
2395 // r1 = [128, 0x..ffffff7f].
2396 r1 = int_range<1> (u128_type,
2397 wi::uhwi (128, 128),
2398 wi::sub (wi::minus_one (128), wi::uhwi (128, 128)));
2399 r0.invert ();
2400 ASSERT_TRUE (r0 == r1);
2401
2402 r0.set_varying (integer_type_node);
2403 wide_int minint = r0.lower_bound ();
2404 wide_int maxint = r0.upper_bound ();
2405
2406 r0.set_varying (short_integer_type_node);
2407
2408 r0.set_varying (unsigned_type_node);
2409 wide_int maxuint = r0.upper_bound ();
2410
2411 // Check that ~[0,5] => [6,MAX] for unsigned int.
2412 r0 = range_uint (0, 5);
2413 r0.invert ();
2414 ASSERT_TRUE (r0 == int_range<1> (unsigned_type_node,
2415 wi::uhwi (6, TYPE_PRECISION (unsigned_type_node)),
2416 maxuint));
2417
2418 // Check that ~[10,MAX] => [0,9] for unsigned int.
2419 r0 = int_range<1> (unsigned_type_node,
2420 wi::uhwi (10, TYPE_PRECISION (unsigned_type_node)),
2421 maxuint);
2422 r0.invert ();
2423 ASSERT_TRUE (r0 == range_uint (0, 9));
2424
2425 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
2426 r0 = range_uint128 (0, 5, VR_ANTI_RANGE);
2427 r1 = int_range<1> (u128_type, wi::uhwi (6, 128), wi::minus_one (128));
2428 ASSERT_TRUE (r0 == r1);
2429
2430 // Check that [~5] is really [-MIN,4][6,MAX].
2431 r0 = range_int (5, 5, VR_ANTI_RANGE);
2432 r1 = int_range<1> (integer_type_node, minint, INT (4));
2433 r1.union_ (int_range<1> (integer_type_node, INT (6), maxint));
2434 ASSERT_FALSE (r1.undefined_p ());
2435 ASSERT_TRUE (r0 == r1);
2436
2437 r1 = range_int (5, 5);
2438 int_range<2> r2 (r1);
2439 ASSERT_TRUE (r1 == r2);
2440
2441 r1 = range_int (5, 10);
2442
2443 r1 = range_int (5, 10);
2444 ASSERT_TRUE (r1.contains_p (INT (7)));
2445
2446 r1 = range_char (0, 20);
2447 ASSERT_TRUE (r1.contains_p (SCHAR(15)));
2448 ASSERT_FALSE (r1.contains_p (SCHAR(300)));
2449
2450 // NOT([10,20]) ==> [-MIN,9][21,MAX].
2451 r0 = r1 = range_int (10, 20);
2452 r2 = int_range<1> (integer_type_node, minint, INT(9));
2453 r2.union_ (int_range<1> (integer_type_node, INT(21), maxint));
2454 ASSERT_FALSE (r2.undefined_p ());
2455 r1.invert ();
2456 ASSERT_TRUE (r1 == r2);
2457 // Test that NOT(NOT(x)) == x.
2458 r2.invert ();
2459 ASSERT_TRUE (r0 == r2);
2460
2461 // Test that booleans and their inverse work as expected.
2462 r0 = range_zero (boolean_type_node);
2463 ASSERT_TRUE (r0 == range_false ());
2464 r0.invert ();
2465 ASSERT_TRUE (r0 == range_true ());
2466
2467 // Make sure NULL and non-NULL of pointer types work, and that
2468 // inverses of them are consistent.
2469 tree voidp = build_pointer_type (void_type_node);
2470 r0 = range_zero (voidp);
2471 r1 = r0;
2472 r0.invert ();
2473 r0.invert ();
2474 ASSERT_TRUE (r0 == r1);
2475
2476 // [10,20] U [15, 30] => [10, 30].
2477 r0 = range_int (10, 20);
2478 r1 = range_int (15, 30);
2479 r0.union_ (r1);
2480 ASSERT_TRUE (r0 == range_int (10, 30));
2481
2482 // [15,40] U [] => [15,40].
2483 r0 = range_int (15, 40);
2484 r1.set_undefined ();
2485 r0.union_ (r1);
2486 ASSERT_TRUE (r0 == range_int (15, 40));
2487
2488 // [10,20] U [10,10] => [10,20].
2489 r0 = range_int (10, 20);
2490 r1 = range_int (10, 10);
2491 r0.union_ (r1);
2492 ASSERT_TRUE (r0 == range_int (10, 20));
2493
2494 // [10,20] U [9,9] => [9,20].
2495 r0 = range_int (10, 20);
2496 r1 = range_int (9, 9);
2497 r0.union_ (r1);
2498 ASSERT_TRUE (r0 == range_int (9, 20));
2499
2500 // [10,20] ^ [15,30] => [15,20].
2501 r0 = range_int (10, 20);
2502 r1 = range_int (15, 30);
2503 r0.intersect (r1);
2504 ASSERT_TRUE (r0 == range_int (15, 20));
2505
2506 // Test the internal sanity of wide_int's wrt HWIs.
2507 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node),
2508 TYPE_SIGN (boolean_type_node))
2509 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node)));
2510
2511 // Test zero_p().
2512 r0 = range_int (0, 0);
2513 ASSERT_TRUE (r0.zero_p ());
2514
2515 // Test nonzero_p().
2516 r0 = range_int (0, 0);
2517 r0.invert ();
2518 ASSERT_TRUE (r0.nonzero_p ());
2519
2520 // r0 = ~[1,1]
2521 r0 = range_int (1, 1, VR_ANTI_RANGE);
2522 // r1 = ~[3,3]
2523 r1 = range_int (3, 3, VR_ANTI_RANGE);
2524
2525 // vv = [0,0][2,2][4, MAX]
2526 int_range<3> vv = r0;
2527 vv.intersect (r1);
2528
2529 ASSERT_TRUE (vv.contains_p (UINT (2)));
2530 ASSERT_TRUE (vv.num_pairs () == 3);
2531
2532 r0 = range_int (1, 1);
2533 // And union it with [0,0][2,2][4,MAX] multi range
2534 r0.union_ (vv);
2535 // The result should be [0,2][4,MAX], or ~[3,3] but it must contain 2
2536 ASSERT_TRUE (r0.contains_p (INT (2)));
2537 }
2538
2539 static void
2540 range_tests_nonzero_bits ()
2541 {
2542 int_range<2> r0, r1;
2543
2544 // Adding nonzero bits to a varying drops the varying.
2545 r0.set_varying (integer_type_node);
2546 r0.set_nonzero_bits (INT (255));
2547 ASSERT_TRUE (!r0.varying_p ());
2548 // Dropping the nonzero bits brings us back to varying.
2549 r0.set_nonzero_bits (INT (-1));
2550 ASSERT_TRUE (r0.varying_p ());
2551
2552 // Test contains_p with nonzero bits.
2553 r0.set_zero (integer_type_node);
2554 ASSERT_TRUE (r0.contains_p (INT (0)));
2555 ASSERT_FALSE (r0.contains_p (INT (1)));
2556 r0.set_nonzero_bits (INT (0xfe));
2557 ASSERT_FALSE (r0.contains_p (INT (0x100)));
2558 ASSERT_FALSE (r0.contains_p (INT (0x3)));
2559
2560 // Union of nonzero bits.
2561 r0.set_varying (integer_type_node);
2562 r0.set_nonzero_bits (INT (0xf0));
2563 r1.set_varying (integer_type_node);
2564 r1.set_nonzero_bits (INT (0xf));
2565 r0.union_ (r1);
2566 ASSERT_TRUE (r0.get_nonzero_bits () == 0xff);
2567
2568 // Intersect of nonzero bits.
2569 r0 = range_int (0, 255);
2570 r0.set_nonzero_bits (INT (0xfe));
2571 r1.set_varying (integer_type_node);
2572 r1.set_nonzero_bits (INT (0xf0));
2573 r0.intersect (r1);
2574 ASSERT_TRUE (r0.get_nonzero_bits () == 0xf0);
2575
2576 // Intersect where the mask of nonzero bits is implicit from the range.
2577 r0.set_varying (integer_type_node);
2578 r1 = range_int (0, 255);
2579 r0.intersect (r1);
2580 ASSERT_TRUE (r0.get_nonzero_bits () == 0xff);
2581
2582 // The union of a mask of 0xff..ffff00 with a mask of 0xff spans the
2583 // entire domain, and makes the range a varying.
2584 r0.set_varying (integer_type_node);
2585 wide_int x = wi::shwi (0xff, TYPE_PRECISION (integer_type_node));
2586 x = wi::bit_not (x);
2587 r0.set_nonzero_bits (x); // 0xff..ff00
2588 r1.set_varying (integer_type_node);
2589 r1.set_nonzero_bits (INT (0xff));
2590 r0.union_ (r1);
2591 ASSERT_TRUE (r0.varying_p ());
2592
2593 // Test that setting a nonzero bit of 1 does not pessimize the range.
2594 r0.set_zero (integer_type_node);
2595 r0.set_nonzero_bits (INT (1));
2596 ASSERT_TRUE (r0.zero_p ());
2597 }
2598
2599 // Build an frange from string endpoints.
2600
2601 static inline frange
2602 frange_float (const char *lb, const char *ub, tree type = float_type_node)
2603 {
2604 REAL_VALUE_TYPE min, max;
2605 gcc_assert (real_from_string (&min, lb) == 0);
2606 gcc_assert (real_from_string (&max, ub) == 0);
2607 return frange (type, min, max);
2608 }
2609
2610 static void
2611 range_tests_nan ()
2612 {
2613 frange r0, r1;
2614 REAL_VALUE_TYPE q, r;
2615 bool signbit;
2616
2617 // Equal ranges but with differing NAN bits are not equal.
2618 if (HONOR_NANS (float_type_node))
2619 {
2620 r1 = frange_float ("10", "12");
2621 r0 = r1;
2622 ASSERT_EQ (r0, r1);
2623 r0.clear_nan ();
2624 ASSERT_NE (r0, r1);
2625 r0.update_nan ();
2626 ASSERT_EQ (r0, r1);
2627
2628 // [10, 20] NAN ^ [30, 40] NAN = NAN.
2629 r0 = frange_float ("10", "20");
2630 r1 = frange_float ("30", "40");
2631 r0.intersect (r1);
2632 ASSERT_TRUE (r0.known_isnan ());
2633
2634 // [3,5] U [5,10] NAN = ... NAN
2635 r0 = frange_float ("3", "5");
2636 r0.clear_nan ();
2637 r1 = frange_float ("5", "10");
2638 r0.union_ (r1);
2639 ASSERT_TRUE (r0.maybe_isnan ());
2640 }
2641
2642 // [5,6] U NAN = [5,6] NAN.
2643 r0 = frange_float ("5", "6");
2644 r0.clear_nan ();
2645 r1.set_nan (float_type_node);
2646 r0.union_ (r1);
2647 real_from_string (&q, "5");
2648 real_from_string (&r, "6");
2649 ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
2650 ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
2651 ASSERT_TRUE (r0.maybe_isnan ());
2652
2653 // NAN U NAN = NAN
2654 r0.set_nan (float_type_node);
2655 r1.set_nan (float_type_node);
2656 r0.union_ (r1);
2657 ASSERT_TRUE (r0.known_isnan ());
2658
2659 // [INF, INF] NAN ^ NAN = NAN
2660 r0.set_nan (float_type_node);
2661 r1 = frange_float ("+Inf", "+Inf");
2662 if (!HONOR_NANS (float_type_node))
2663 r1.update_nan ();
2664 r0.intersect (r1);
2665 ASSERT_TRUE (r0.known_isnan ());
2666
2667 // NAN ^ NAN = NAN
2668 r0.set_nan (float_type_node);
2669 r1.set_nan (float_type_node);
2670 r0.intersect (r1);
2671 ASSERT_TRUE (r0.known_isnan ());
2672
2673 // +NAN ^ -NAN = UNDEFINED
2674 r0.set_nan (float_type_node, false);
2675 r1.set_nan (float_type_node, true);
2676 r0.intersect (r1);
2677 ASSERT_TRUE (r0.undefined_p ());
2678
2679 // VARYING ^ NAN = NAN.
2680 r0.set_nan (float_type_node);
2681 r1.set_varying (float_type_node);
2682 r0.intersect (r1);
2683 ASSERT_TRUE (r0.known_isnan ());
2684
2685 // [3,4] ^ NAN = UNDEFINED.
2686 r0 = frange_float ("3", "4");
2687 r0.clear_nan ();
2688 r1.set_nan (float_type_node);
2689 r0.intersect (r1);
2690 ASSERT_TRUE (r0.undefined_p ());
2691
2692 // [-3, 5] ^ NAN = UNDEFINED
2693 r0 = frange_float ("-3", "5");
2694 r0.clear_nan ();
2695 r1.set_nan (float_type_node);
2696 r0.intersect (r1);
2697 ASSERT_TRUE (r0.undefined_p ());
2698
2699 // Setting the NAN bit to yes does not make us a known NAN.
2700 r0.set_varying (float_type_node);
2701 r0.update_nan ();
2702 ASSERT_FALSE (r0.known_isnan ());
2703
2704 // NAN is in a VARYING.
2705 r0.set_varying (float_type_node);
2706 real_nan (&r, "", 1, TYPE_MODE (float_type_node));
2707 REAL_VALUE_TYPE nan = r;
2708 ASSERT_TRUE (r0.contains_p (nan));
2709
2710 // -NAN is in a VARYING.
2711 r0.set_varying (float_type_node);
2712 q = real_value_negate (&r);
2713 REAL_VALUE_TYPE neg_nan = q;
2714 ASSERT_TRUE (r0.contains_p (neg_nan));
2715
2716 // Clearing the NAN on a [] NAN is the empty set.
2717 r0.set_nan (float_type_node);
2718 r0.clear_nan ();
2719 ASSERT_TRUE (r0.undefined_p ());
2720
2721 // [10,20] NAN ^ [21,25] NAN = [NAN]
2722 r0 = frange_float ("10", "20");
2723 r0.update_nan ();
2724 r1 = frange_float ("21", "25");
2725 r1.update_nan ();
2726 r0.intersect (r1);
2727 ASSERT_TRUE (r0.known_isnan ());
2728
2729 // NAN U [5,6] should be [5,6] +-NAN.
2730 r0.set_nan (float_type_node);
2731 r1 = frange_float ("5", "6");
2732 r1.clear_nan ();
2733 r0.union_ (r1);
2734 real_from_string (&q, "5");
2735 real_from_string (&r, "6");
2736 ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
2737 ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
2738 ASSERT_TRUE (!r0.signbit_p (signbit));
2739 ASSERT_TRUE (r0.maybe_isnan ());
2740
2741 // NAN U NAN shouldn't change anything.
2742 r0.set_nan (float_type_node);
2743 r1.set_nan (float_type_node);
2744 ASSERT_FALSE (r0.union_ (r1));
2745
2746 // [3,5] NAN U NAN shouldn't change anything.
2747 r0 = frange_float ("3", "5");
2748 r1.set_nan (float_type_node);
2749 ASSERT_FALSE (r0.union_ (r1));
2750
2751 // [3,5] U NAN *does* trigger a change.
2752 r0 = frange_float ("3", "5");
2753 r0.clear_nan ();
2754 r1.set_nan (float_type_node);
2755 ASSERT_TRUE (r0.union_ (r1));
2756 }
2757
2758 static void
2759 range_tests_signed_zeros ()
2760 {
2761 REAL_VALUE_TYPE zero = dconst0;
2762 REAL_VALUE_TYPE neg_zero = zero;
2763 neg_zero.sign = 1;
2764 frange r0, r1;
2765 bool signbit;
2766
2767 // [0,0] contains [0,0] but not [-0,-0] and vice versa.
2768 r0 = frange_float ("0.0", "0.0");
2769 r1 = frange_float ("-0.0", "-0.0");
2770 ASSERT_TRUE (r0.contains_p (zero));
2771 ASSERT_TRUE (!r0.contains_p (neg_zero));
2772 ASSERT_TRUE (r1.contains_p (neg_zero));
2773 ASSERT_TRUE (!r1.contains_p (zero));
2774
2775 // Test contains_p() when we know the sign of the zero.
2776 r0 = frange_float ("0.0", "0.0");
2777 ASSERT_TRUE (r0.contains_p (zero));
2778 ASSERT_FALSE (r0.contains_p (neg_zero));
2779 r0 = frange_float ("-0.0", "-0.0");
2780 ASSERT_TRUE (r0.contains_p (neg_zero));
2781 ASSERT_FALSE (r0.contains_p (zero));
2782
2783 r0 = frange_float ("-0.0", "0.0");
2784 ASSERT_TRUE (r0.contains_p (neg_zero));
2785 ASSERT_TRUE (r0.contains_p (zero));
2786
2787 r0 = frange_float ("-3", "5");
2788 ASSERT_TRUE (r0.contains_p (neg_zero));
2789 ASSERT_TRUE (r0.contains_p (zero));
2790
2791 // The intersection of zeros that differ in sign is a NAN (or
2792 // undefined if not honoring NANs).
2793 r0 = frange_float ("-0.0", "-0.0");
2794 r1 = frange_float ("0.0", "0.0");
2795 r0.intersect (r1);
2796 if (HONOR_NANS (float_type_node))
2797 ASSERT_TRUE (r0.known_isnan ());
2798 else
2799 ASSERT_TRUE (r0.undefined_p ());
2800
2801 // The union of zeros that differ in sign is a zero with unknown sign.
2802 r0 = frange_float ("0.0", "0.0");
2803 r1 = frange_float ("-0.0", "-0.0");
2804 r0.union_ (r1);
2805 ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
2806
2807 // [-0, +0] has an unknown sign.
2808 r0 = frange_float ("-0.0", "0.0");
2809 ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
2810
2811 // [-0, +0] ^ [0, 0] is [0, 0]
2812 r0 = frange_float ("-0.0", "0.0");
2813 r1 = frange_float ("0.0", "0.0");
2814 r0.intersect (r1);
2815 ASSERT_TRUE (r0.zero_p ());
2816
2817 r0 = frange_float ("+0", "5");
2818 r0.clear_nan ();
2819 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
2820
2821 r0 = frange_float ("-0", "5");
2822 r0.clear_nan ();
2823 ASSERT_TRUE (!r0.signbit_p (signbit));
2824
2825 r0 = frange_float ("-0", "10");
2826 r1 = frange_float ("0", "5");
2827 r0.intersect (r1);
2828 ASSERT_TRUE (real_iszero (&r0.lower_bound (), false));
2829
2830 r0 = frange_float ("-0", "5");
2831 r1 = frange_float ("0", "5");
2832 r0.union_ (r1);
2833 ASSERT_TRUE (real_iszero (&r0.lower_bound (), true));
2834
2835 r0 = frange_float ("-5", "-0");
2836 r0.update_nan ();
2837 r1 = frange_float ("0", "0");
2838 r1.update_nan ();
2839 r0.intersect (r1);
2840 if (HONOR_NANS (float_type_node))
2841 ASSERT_TRUE (r0.known_isnan ());
2842 else
2843 ASSERT_TRUE (r0.undefined_p ());
2844
2845 r0.set_nonnegative (float_type_node);
2846 if (HONOR_NANS (float_type_node))
2847 ASSERT_TRUE (r0.maybe_isnan ());
2848
2849 // Numbers containing zero should have an unknown SIGNBIT.
2850 r0 = frange_float ("0", "10");
2851 r0.clear_nan ();
2852 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
2853 }
2854
2855 static void
2856 range_tests_signbit ()
2857 {
2858 frange r0, r1;
2859 bool signbit;
2860
2861 // Negative numbers should have the SIGNBIT set.
2862 r0 = frange_float ("-5", "-1");
2863 r0.clear_nan ();
2864 ASSERT_TRUE (r0.signbit_p (signbit) && signbit);
2865 // Positive numbers should have the SIGNBIT clear.
2866 r0 = frange_float ("1", "10");
2867 r0.clear_nan ();
2868 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
2869 // Numbers spanning both positive and negative should have an
2870 // unknown SIGNBIT.
2871 r0 = frange_float ("-10", "10");
2872 r0.clear_nan ();
2873 ASSERT_TRUE (!r0.signbit_p (signbit));
2874 r0.set_varying (float_type_node);
2875 ASSERT_TRUE (!r0.signbit_p (signbit));
2876 }
2877
2878 static void
2879 range_tests_floats ()
2880 {
2881 frange r0, r1;
2882
2883 if (HONOR_NANS (float_type_node))
2884 range_tests_nan ();
2885 range_tests_signbit ();
2886
2887 if (HONOR_SIGNED_ZEROS (float_type_node))
2888 range_tests_signed_zeros ();
2889
2890 // A range of [-INF,+INF] is actually VARYING if no other properties
2891 // are set.
2892 r0 = frange_float ("-Inf", "+Inf");
2893 ASSERT_TRUE (r0.varying_p ());
2894 // ...unless it has some special property...
2895 if (HONOR_NANS (r0.type ()))
2896 {
2897 r0.clear_nan ();
2898 ASSERT_FALSE (r0.varying_p ());
2899 }
2900
2901 // For most architectures, where float and double are different
2902 // sizes, having the same endpoints does not necessarily mean the
2903 // ranges are equal.
2904 if (!types_compatible_p (float_type_node, double_type_node))
2905 {
2906 r0 = frange_float ("3.0", "3.0", float_type_node);
2907 r1 = frange_float ("3.0", "3.0", double_type_node);
2908 ASSERT_NE (r0, r1);
2909 }
2910
2911 // [3,5] U [10,12] = [3,12].
2912 r0 = frange_float ("3", "5");
2913 r1 = frange_float ("10", "12");
2914 r0.union_ (r1);
2915 ASSERT_EQ (r0, frange_float ("3", "12"));
2916
2917 // [5,10] U [4,8] = [4,10]
2918 r0 = frange_float ("5", "10");
2919 r1 = frange_float ("4", "8");
2920 r0.union_ (r1);
2921 ASSERT_EQ (r0, frange_float ("4", "10"));
2922
2923 // [3,5] U [4,10] = [3,10]
2924 r0 = frange_float ("3", "5");
2925 r1 = frange_float ("4", "10");
2926 r0.union_ (r1);
2927 ASSERT_EQ (r0, frange_float ("3", "10"));
2928
2929 // [4,10] U [5,11] = [4,11]
2930 r0 = frange_float ("4", "10");
2931 r1 = frange_float ("5", "11");
2932 r0.union_ (r1);
2933 ASSERT_EQ (r0, frange_float ("4", "11"));
2934
2935 // [3,12] ^ [10,12] = [10,12].
2936 r0 = frange_float ("3", "12");
2937 r1 = frange_float ("10", "12");
2938 r0.intersect (r1);
2939 ASSERT_EQ (r0, frange_float ("10", "12"));
2940
2941 // [10,12] ^ [11,11] = [11,11]
2942 r0 = frange_float ("10", "12");
2943 r1 = frange_float ("11", "11");
2944 r0.intersect (r1);
2945 ASSERT_EQ (r0, frange_float ("11", "11"));
2946
2947 // [10,20] ^ [5,15] = [10,15]
2948 r0 = frange_float ("10", "20");
2949 r1 = frange_float ("5", "15");
2950 r0.intersect (r1);
2951 ASSERT_EQ (r0, frange_float ("10", "15"));
2952
2953 // [10,20] ^ [15,25] = [15,20]
2954 r0 = frange_float ("10", "20");
2955 r1 = frange_float ("15", "25");
2956 r0.intersect (r1);
2957 ASSERT_EQ (r0, frange_float ("15", "20"));
2958
2959 // [10,20] ^ [21,25] = []
2960 r0 = frange_float ("10", "20");
2961 r0.clear_nan ();
2962 r1 = frange_float ("21", "25");
2963 r1.clear_nan ();
2964 r0.intersect (r1);
2965 ASSERT_TRUE (r0.undefined_p ());
2966
2967 if (HONOR_INFINITIES (float_type_node))
2968 {
2969 // Make sure [-Inf, -Inf] doesn't get normalized.
2970 r0 = frange_float ("-Inf", "-Inf");
2971 ASSERT_TRUE (real_isinf (&r0.lower_bound (), true));
2972 ASSERT_TRUE (real_isinf (&r0.upper_bound (), true));
2973 }
2974
2975 // Test that reading back a global range yields the same result as
2976 // what we wrote into it.
2977 tree ssa = make_temp_ssa_name (float_type_node, NULL, "blah");
2978 r0.set_varying (float_type_node);
2979 r0.clear_nan ();
2980 set_range_info (ssa, r0);
2981 get_global_range_query ()->range_of_expr (r1, ssa);
2982 ASSERT_EQ (r0, r1);
2983 }
2984
2985 // Run floating range tests for various combinations of NAN and INF
2986 // support.
2987
2988 static void
2989 range_tests_floats_various ()
2990 {
2991 int save_finite_math_only = flag_finite_math_only;
2992
2993 // Test -ffinite-math-only.
2994 flag_finite_math_only = 1;
2995 range_tests_floats ();
2996 // Test -fno-finite-math-only.
2997 flag_finite_math_only = 0;
2998 range_tests_floats ();
2999
3000 flag_finite_math_only = save_finite_math_only;
3001 }
3002
3003 void
3004 range_tests ()
3005 {
3006 range_tests_irange3 ();
3007 range_tests_int_range_max ();
3008 range_tests_strict_enum ();
3009 range_tests_nonzero_bits ();
3010 range_tests_floats_various ();
3011 range_tests_misc ();
3012 }
3013
3014 } // namespace selftest
3015
3016 #endif // CHECKING_P