1 /* Support routines for value ranges.
2 Copyright (C) 2019-2024 Free Software Foundation, Inc.
3 Major hacks by Aldy Hernandez <aldyh@redhat.com> and
4 Andrew MacLeod <amacleod@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "tree-pretty-print.h"
30 #include "value-range-pretty-print.h"
31 #include "fold-const.h"
32 #include "gimple-range.h"
34 // Return the bitmask inherent in a range.
37 get_bitmask_from_range (tree type
,
38 const wide_int
&min
, const wide_int
&max
)
40 unsigned prec
= TYPE_PRECISION (type
);
42 // All the bits of a singleton are known.
45 wide_int mask
= wi::zero (prec
);
47 return irange_bitmask (value
, mask
);
50 wide_int xorv
= min
^ max
;
53 xorv
= wi::mask (prec
- wi::clz (xorv
), false, prec
);
55 return irange_bitmask (wi::zero (prec
), min
| xorv
);
59 irange::accept (const vrange_visitor
&v
) const
65 Value_Range::dump (FILE *out
) const
70 fprintf (out
, "NULL");
74 debug (const Value_Range
&r
)
77 fprintf (stderr
, "\n");
81 debug (const irange_bitmask
&bm
)
84 fprintf (stderr
, "\n");
87 // Definitions for unsupported_range.
90 unsupported_range::accept (const vrange_visitor
&v
) const
96 vrange::update_bitmask (const class irange_bitmask
&)
101 vrange::get_bitmask () const
103 // Return all unknown bits for the given precision.
104 return irange_bitmask (TYPE_PRECISION (type ()));
108 unsupported_range::contains_p (tree
) const
114 unsupported_range::singleton_p (tree
*) const
120 unsupported_range::set (tree min
, tree
, value_range_kind
)
122 set_varying (TREE_TYPE (min
));
126 unsupported_range::type () const
128 return void_type_node
;
132 unsupported_range::supports_type_p (const_tree
) const
138 unsupported_range::set_undefined ()
140 m_kind
= VR_UNDEFINED
;
144 unsupported_range::set_varying (tree
)
150 unsupported_range::union_ (const vrange
&v
)
152 const unsupported_range
&r
= as_a
<unsupported_range
> (v
);
154 if (r
.undefined_p () || varying_p ())
156 if (undefined_p () || r
.varying_p ())
166 unsupported_range::intersect (const vrange
&v
)
168 const unsupported_range
&r
= as_a
<unsupported_range
> (v
);
170 if (undefined_p () || r
.varying_p ())
172 if (r
.undefined_p ())
187 unsupported_range::zero_p () const
193 unsupported_range::nonzero_p () const
199 unsupported_range::set_nonzero (tree type
)
205 unsupported_range::set_zero (tree type
)
211 unsupported_range::set_nonnegative (tree type
)
217 unsupported_range::fits_p (const vrange
&) const
223 unsupported_range::operator= (const unsupported_range
&r
)
225 if (r
.undefined_p ())
227 else if (r
.varying_p ())
228 set_varying (void_type_node
);
235 unsupported_range::lbound () const
241 unsupported_range::ubound () const
246 // Assignment operator for generic ranges. Copying incompatible types
250 vrange::operator= (const vrange
&src
)
252 if (is_a
<irange
> (src
))
253 as_a
<irange
> (*this) = as_a
<irange
> (src
);
254 else if (is_a
<prange
> (src
))
255 as_a
<prange
> (*this) = as_a
<prange
> (src
);
256 else if (is_a
<frange
> (src
))
257 as_a
<frange
> (*this) = as_a
<frange
> (src
);
260 gcc_checking_assert (is_a
<unsupported_range
> (src
));
266 // Equality operator for generic ranges.
269 vrange::operator== (const vrange
&src
) const
271 if (is_a
<irange
> (src
))
272 return as_a
<irange
> (*this) == as_a
<irange
> (src
);
273 if (is_a
<prange
> (src
))
274 return as_a
<prange
> (*this) == as_a
<prange
> (src
);
275 if (is_a
<frange
> (src
))
276 return as_a
<frange
> (*this) == as_a
<frange
> (src
);
280 // Wrapper for vrange_printer to dump a range to a file.
283 vrange::dump (FILE *file
) const
285 pretty_printer buffer
;
286 pp_needs_newline (&buffer
) = true;
287 buffer
.buffer
->stream
= file
;
288 vrange_printer
vrange_pp (&buffer
);
289 this->accept (vrange_pp
);
294 irange_bitmask::dump (FILE *file
) const
296 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
], *p
;
297 pretty_printer buffer
;
299 pp_needs_newline (&buffer
) = true;
300 buffer
.buffer
->stream
= file
;
301 pp_string (&buffer
, "MASK ");
302 unsigned len_mask
, len_val
;
303 if (print_hex_buf_size (m_mask
, &len_mask
)
304 | print_hex_buf_size (m_value
, &len_val
))
305 p
= XALLOCAVEC (char, MAX (len_mask
, len_val
));
308 print_hex (m_mask
, p
);
309 pp_string (&buffer
, p
);
310 pp_string (&buffer
, " VALUE ");
311 print_hex (m_value
, p
);
312 pp_string (&buffer
, p
);
320 add_vrange (const vrange
&v
, inchash::hash
&hstate
,
323 if (v
.undefined_p ())
325 hstate
.add_int (VR_UNDEFINED
);
328 // Types are ignored throughout to inhibit two ranges being equal
329 // but having different hash values. This can happen when two
330 // ranges are equal and their types are different (but
331 // types_compatible_p is true).
332 if (is_a
<irange
> (v
))
334 const irange
&r
= as_a
<irange
> (v
);
336 hstate
.add_int (VR_VARYING
);
338 hstate
.add_int (VR_RANGE
);
339 for (unsigned i
= 0; i
< r
.num_pairs (); ++i
)
341 hstate
.add_wide_int (r
.lower_bound (i
));
342 hstate
.add_wide_int (r
.upper_bound (i
));
344 irange_bitmask bm
= r
.get_bitmask ();
345 hstate
.add_wide_int (bm
.value ());
346 hstate
.add_wide_int (bm
.mask ());
349 if (is_a
<prange
> (v
))
351 const prange
&r
= as_a
<prange
> (v
);
353 hstate
.add_int (VR_VARYING
);
356 hstate
.add_int (VR_RANGE
);
357 hstate
.add_wide_int (r
.lower_bound ());
358 hstate
.add_wide_int (r
.upper_bound ());
359 irange_bitmask bm
= r
.get_bitmask ();
360 hstate
.add_wide_int (bm
.value ());
361 hstate
.add_wide_int (bm
.mask ());
365 if (is_a
<frange
> (v
))
367 const frange
&r
= as_a
<frange
> (v
);
368 if (r
.known_isnan ())
369 hstate
.add_int (VR_NAN
);
372 hstate
.add_int (r
.varying_p () ? VR_VARYING
: VR_RANGE
);
373 hstate
.add_real_value (r
.lower_bound ());
374 hstate
.add_real_value (r
.upper_bound ());
376 nan_state nan
= r
.get_nan_state ();
377 hstate
.add_int (nan
.pos_p ());
378 hstate
.add_int (nan
.neg_p ());
384 } //namespace inchash
387 irange::nonnegative_p () const
389 return wi::ge_p (lower_bound (), 0, TYPE_SIGN (type ()));
393 irange::nonpositive_p () const
395 return wi::le_p (upper_bound (), 0, TYPE_SIGN (type ()));
399 irange::supports_type_p (const_tree type
) const
401 return supports_p (type
);
404 // Return TRUE if R fits in THIS.
407 irange::fits_p (const vrange
&r
) const
409 return m_max_ranges
>= as_a
<irange
> (r
).num_pairs ();
413 irange::set_nonnegative (tree type
)
416 wi::zero (TYPE_PRECISION (type
)),
417 wi::to_wide (TYPE_MAX_VALUE (type
)));
420 // Prange implementation.
423 prange::accept (const vrange_visitor
&v
) const
429 prange::set_nonnegative (tree type
)
432 wi::zero (TYPE_PRECISION (type
)),
433 wi::max_value (TYPE_PRECISION (type
), UNSIGNED
));
437 prange::set (tree min
, tree max
, value_range_kind kind
)
439 return set (TREE_TYPE (min
), wi::to_wide (min
), wi::to_wide (max
), kind
);
443 prange::set (tree type
, const wide_int
&min
, const wide_int
&max
,
444 value_range_kind kind
)
446 if (kind
== VR_UNDEFINED
)
451 if (kind
== VR_VARYING
)
456 if (kind
== VR_ANTI_RANGE
)
458 gcc_checking_assert (min
== 0 && max
== 0);
465 if (m_min
== 0 && m_max
== -1)
468 m_bitmask
.set_unknown (TYPE_PRECISION (type
));
475 m_bitmask
= get_bitmask_from_range (type
, min
, max
);
481 prange::contains_p (const wide_int
&w
) const
489 return (wi::le_p (lower_bound (), w
, UNSIGNED
)
490 && wi::ge_p (upper_bound (), w
, UNSIGNED
));
494 prange::singleton_p (tree
*result
) const
496 if (m_kind
== VR_RANGE
&& lower_bound () == upper_bound ())
499 *result
= wide_int_to_tree (type (), m_min
);
506 prange::lbound () const
508 return wide_int_to_tree (type (), m_min
);
512 prange::ubound () const
514 return wide_int_to_tree (type (), m_max
);
518 prange::union_ (const vrange
&v
)
520 const prange
&r
= as_a
<prange
> (v
);
522 if (r
.undefined_p ())
535 set_varying (type ());
539 wide_int new_lb
= wi::min (r
.lower_bound (), lower_bound (), UNSIGNED
);
540 wide_int new_ub
= wi::max (r
.upper_bound (), upper_bound (), UNSIGNED
);
541 prange
new_range (type (), new_lb
, new_ub
);
542 new_range
.m_bitmask
.union_ (m_bitmask
);
543 new_range
.m_bitmask
.union_ (r
.m_bitmask
);
544 if (new_range
.varying_compatible_p ())
546 set_varying (type ());
550 new_range
.verify_range ();
551 if (new_range
== *this)
558 prange::intersect (const vrange
&v
)
560 const prange
&r
= as_a
<prange
> (v
);
561 gcc_checking_assert (undefined_p () || r
.undefined_p ()
562 || range_compatible_p (type (), r
.type ()));
566 if (r
.undefined_p ())
580 m_min
= wi::max (r
.lower_bound (), lower_bound (), UNSIGNED
);
581 m_max
= wi::min (r
.upper_bound (), upper_bound (), UNSIGNED
);
582 if (wi::gt_p (m_min
, m_max
, UNSIGNED
))
588 // Intersect all bitmasks: the old one, the new one, and the other operand's.
589 irange_bitmask new_bitmask
= get_bitmask_from_range (m_type
, m_min
, m_max
);
590 m_bitmask
.intersect (new_bitmask
);
591 m_bitmask
.intersect (r
.m_bitmask
);
601 prange::operator= (const prange
&src
)
607 m_bitmask
= src
.m_bitmask
;
614 prange::operator== (const prange
&src
) const
616 if (m_kind
== src
.m_kind
)
622 return types_compatible_p (type (), src
.type ());
624 return (m_min
== src
.m_min
&& m_max
== src
.m_max
625 && m_bitmask
== src
.m_bitmask
);
633 gcc_checking_assert (!undefined_p () && !varying_p ());
635 wide_int new_lb
, new_ub
;
636 unsigned prec
= TYPE_PRECISION (type ());
637 wide_int type_min
= wi::zero (prec
);
638 wide_int type_max
= wi::max_value (prec
, UNSIGNED
);
639 wi::overflow_type ovf
;
641 if (lower_bound () == type_min
)
643 new_lb
= wi::add (upper_bound (), 1, UNSIGNED
, &ovf
);
647 set (type (), new_lb
, new_ub
);
649 else if (upper_bound () == type_max
)
651 wi::overflow_type ovf
;
653 new_ub
= wi::sub (lower_bound (), 1, UNSIGNED
, &ovf
);
656 set (type (), new_lb
, new_ub
);
659 set_varying (type ());
663 prange::verify_range () const
665 gcc_checking_assert (m_discriminator
== VR_PRANGE
);
667 if (m_kind
== VR_UNDEFINED
)
670 gcc_checking_assert (supports_p (type ()));
672 if (m_kind
== VR_VARYING
)
674 gcc_checking_assert (varying_compatible_p ());
677 gcc_checking_assert (!varying_compatible_p ());
678 gcc_checking_assert (m_kind
== VR_RANGE
);
682 prange::update_bitmask (const irange_bitmask
&bm
)
684 gcc_checking_assert (!undefined_p ());
686 // If all the bits are known, this is a singleton.
689 set (type (), m_bitmask
.value (), m_bitmask
.value ());
693 // Drop VARYINGs with known bits to a plain range.
694 if (m_kind
== VR_VARYING
&& !bm
.unknown_p ())
698 if (varying_compatible_p ())
706 // Frange implementation.
709 frange::accept (const vrange_visitor
&v
) const
715 frange::fits_p (const vrange
&) const
720 // Flush denormal endpoints to the appropriate 0.0.
723 frange::flush_denormals_to_zero ()
725 if (undefined_p () || known_isnan ())
728 machine_mode mode
= TYPE_MODE (type ());
729 // Flush [x, -DENORMAL] to [x, -0.0].
730 if (real_isdenormal (&m_max
, mode
) && real_isneg (&m_max
))
732 if (HONOR_SIGNED_ZEROS (m_type
))
737 // Flush [+DENORMAL, x] to [+0.0, x].
738 if (real_isdenormal (&m_min
, mode
) && !real_isneg (&m_min
))
742 // Setter for franges.
745 frange::set (tree type
,
746 const REAL_VALUE_TYPE
&min
, const REAL_VALUE_TYPE
&max
,
747 const nan_state
&nan
, value_range_kind kind
)
764 gcc_checking_assert (!real_isnan (&min
) && !real_isnan (&max
));
770 if (HONOR_NANS (m_type
))
772 m_pos_nan
= nan
.pos_p ();
773 m_neg_nan
= nan
.neg_p ();
781 if (!MODE_HAS_SIGNED_ZEROS (TYPE_MODE (m_type
)))
783 if (real_iszero (&m_min
, 1))
785 if (real_iszero (&m_max
, 1))
788 else if (!HONOR_SIGNED_ZEROS (m_type
))
790 if (real_iszero (&m_max
, 1))
792 if (real_iszero (&m_min
, 0))
796 // For -ffinite-math-only we can drop ranges outside the
797 // representable numbers to min/max for the type.
798 if (!HONOR_INFINITIES (m_type
))
800 REAL_VALUE_TYPE min_repr
= frange_val_min (m_type
);
801 REAL_VALUE_TYPE max_repr
= frange_val_max (m_type
);
802 if (real_less (&m_min
, &min_repr
))
804 else if (real_less (&max_repr
, &m_min
))
806 if (real_less (&max_repr
, &m_max
))
808 else if (real_less (&m_max
, &min_repr
))
812 // Check for swapped ranges.
813 gcc_checking_assert (real_compare (LE_EXPR
, &min
, &max
));
818 // Setter for an frange defaulting the NAN possibility to +-NAN when
822 frange::set (tree type
,
823 const REAL_VALUE_TYPE
&min
, const REAL_VALUE_TYPE
&max
,
824 value_range_kind kind
)
826 set (type
, min
, max
, nan_state (true), kind
);
830 frange::set (tree min
, tree max
, value_range_kind kind
)
832 set (TREE_TYPE (min
),
833 *TREE_REAL_CST_PTR (min
), *TREE_REAL_CST_PTR (max
), kind
);
836 // Normalize range to VARYING or UNDEFINED, or vice versa. Return
837 // TRUE if anything changed.
839 // A range with no known properties can be dropped to VARYING.
840 // Similarly, a VARYING with any properties should be dropped to a
841 // VR_RANGE. Normalizing ranges upon changing them ensures there is
842 // only one representation for a given range.
845 frange::normalize_kind ()
847 if (m_kind
== VR_RANGE
848 && frange_val_is_min (m_min
, m_type
)
849 && frange_val_is_max (m_max
, m_type
))
851 if (!HONOR_NANS (m_type
) || (m_pos_nan
&& m_neg_nan
))
853 set_varying (m_type
);
857 else if (m_kind
== VR_VARYING
)
859 if (HONOR_NANS (m_type
) && (!m_pos_nan
|| !m_neg_nan
))
862 m_min
= frange_val_min (m_type
);
863 m_max
= frange_val_max (m_type
);
869 else if (m_kind
== VR_NAN
&& !m_pos_nan
&& !m_neg_nan
)
874 // Union or intersect the zero endpoints of two ranges. For example:
875 // [-0, x] U [+0, x] => [-0, x]
876 // [ x, -0] U [ x, +0] => [ x, +0]
877 // [-0, x] ^ [+0, x] => [+0, x]
878 // [ x, -0] ^ [ x, +0] => [ x, -0]
880 // UNION_P is true when performing a union, or false when intersecting.
883 frange::combine_zeros (const frange
&r
, bool union_p
)
885 gcc_checking_assert (!undefined_p () && !known_isnan ());
887 bool changed
= false;
888 if (real_iszero (&m_min
) && real_iszero (&r
.m_min
)
889 && real_isneg (&m_min
) != real_isneg (&r
.m_min
))
891 m_min
.sign
= union_p
;
894 if (real_iszero (&m_max
) && real_iszero (&r
.m_max
)
895 && real_isneg (&m_max
) != real_isneg (&r
.m_max
))
897 m_max
.sign
= !union_p
;
900 // If the signs are swapped, the resulting range is empty.
901 if (m_min
.sign
== 0 && m_max
.sign
== 1)
912 // Union two ranges when one is known to be a NAN.
915 frange::union_nans (const frange
&r
)
917 gcc_checking_assert (known_isnan () || r
.known_isnan ());
919 bool changed
= false;
920 if (known_isnan () && m_kind
!= r
.m_kind
)
927 if (m_pos_nan
!= r
.m_pos_nan
|| m_neg_nan
!= r
.m_neg_nan
)
929 m_pos_nan
|= r
.m_pos_nan
;
930 m_neg_nan
|= r
.m_neg_nan
;
942 frange::union_ (const vrange
&v
)
944 const frange
&r
= as_a
<frange
> (v
);
946 if (r
.undefined_p () || varying_p ())
948 if (undefined_p () || r
.varying_p ())
955 if (known_isnan () || r
.known_isnan ())
956 return union_nans (r
);
957 bool changed
= false;
958 if (m_pos_nan
!= r
.m_pos_nan
|| m_neg_nan
!= r
.m_neg_nan
)
960 m_pos_nan
|= r
.m_pos_nan
;
961 m_neg_nan
|= r
.m_neg_nan
;
965 // Combine endpoints.
966 if (real_less (&r
.m_min
, &m_min
))
971 if (real_less (&m_max
, &r
.m_max
))
977 if (HONOR_SIGNED_ZEROS (m_type
))
978 changed
|= combine_zeros (r
, true);
980 changed
|= normalize_kind ();
984 // Intersect two ranges when one is known to be a NAN.
987 frange::intersect_nans (const frange
&r
)
989 gcc_checking_assert (known_isnan () || r
.known_isnan ());
991 m_pos_nan
&= r
.m_pos_nan
;
992 m_neg_nan
&= r
.m_neg_nan
;
1003 frange::intersect (const vrange
&v
)
1005 const frange
&r
= as_a
<frange
> (v
);
1007 if (undefined_p () || r
.varying_p ())
1009 if (r
.undefined_p ())
1020 // Combine NAN info.
1021 if (known_isnan () || r
.known_isnan ())
1022 return intersect_nans (r
);
1023 bool changed
= false;
1024 if (m_pos_nan
!= r
.m_pos_nan
|| m_neg_nan
!= r
.m_neg_nan
)
1026 m_pos_nan
&= r
.m_pos_nan
;
1027 m_neg_nan
&= r
.m_neg_nan
;
1031 // Combine endpoints.
1032 if (real_less (&m_min
, &r
.m_min
))
1037 if (real_less (&r
.m_max
, &m_max
))
1042 // If the endpoints are swapped, the resulting range is empty.
1043 if (real_less (&m_max
, &m_min
))
1054 if (HONOR_SIGNED_ZEROS (m_type
))
1055 changed
|= combine_zeros (r
, false);
1057 changed
|= normalize_kind ();
1062 frange::operator= (const frange
&src
)
1064 m_kind
= src
.m_kind
;
1065 m_type
= src
.m_type
;
1068 m_pos_nan
= src
.m_pos_nan
;
1069 m_neg_nan
= src
.m_neg_nan
;
1077 frange::operator== (const frange
&src
) const
1079 if (m_kind
== src
.m_kind
)
1085 return types_compatible_p (m_type
, src
.m_type
);
1087 bool nan1
= known_isnan ();
1088 bool nan2
= src
.known_isnan ();
1092 return (m_pos_nan
== src
.m_pos_nan
1093 && m_neg_nan
== src
.m_neg_nan
);
1097 return (real_identical (&m_min
, &src
.m_min
)
1098 && real_identical (&m_max
, &src
.m_max
)
1099 && m_pos_nan
== src
.m_pos_nan
1100 && m_neg_nan
== src
.m_neg_nan
1101 && types_compatible_p (m_type
, src
.m_type
));
1106 // Return TRUE if range contains R.
1109 frange::contains_p (const REAL_VALUE_TYPE
&r
) const
1111 gcc_checking_assert (m_kind
!= VR_ANTI_RANGE
);
1119 if (real_isnan (&r
))
1122 if (!m_pos_nan
&& !m_neg_nan
)
1124 // Both +NAN and -NAN are present.
1125 if (m_pos_nan
&& m_neg_nan
)
1127 return m_neg_nan
== r
.sign
;
1132 if (real_compare (GE_EXPR
, &r
, &m_min
) && real_compare (LE_EXPR
, &r
, &m_max
))
1134 // Make sure the signs are equal for signed zeros.
1135 if (HONOR_SIGNED_ZEROS (m_type
) && real_iszero (&r
))
1136 return r
.sign
== m_min
.sign
|| r
.sign
== m_max
.sign
;
1142 // If range is a singleton, place it in RESULT and return TRUE. If
1143 // RESULT is NULL, just return TRUE.
1145 // A NAN can never be a singleton.
1148 frange::internal_singleton_p (REAL_VALUE_TYPE
*result
) const
1150 if (m_kind
== VR_RANGE
&& real_identical (&m_min
, &m_max
))
1152 // Return false for any singleton that may be a NAN.
1153 if (HONOR_NANS (m_type
) && maybe_isnan ())
1156 if (MODE_COMPOSITE_P (TYPE_MODE (m_type
)))
1158 // For IBM long doubles, if the value is +-Inf or is exactly
1159 // representable in double, the other double could be +0.0
1160 // or -0.0. Since this means there is more than one way to
1161 // represent a value, return false to avoid propagating it.
1162 // See libgcc/config/rs6000/ibm-ldouble-format for details.
1163 if (real_isinf (&m_min
))
1166 real_convert (&r
, DFmode
, &m_min
);
1167 if (real_identical (&r
, &m_min
))
1179 frange::singleton_p (tree
*result
) const
1181 if (internal_singleton_p ())
1184 *result
= build_real (m_type
, m_min
);
1191 frange::singleton_p (REAL_VALUE_TYPE
&r
) const
1193 return internal_singleton_p (&r
);
1197 frange::supports_type_p (const_tree type
) const
1199 return supports_p (type
);
1203 frange::verify_range ()
1205 if (!undefined_p ())
1206 gcc_checking_assert (HONOR_NANS (m_type
) || !maybe_isnan ());
1210 gcc_checking_assert (!m_type
);
1213 gcc_checking_assert (m_type
);
1214 gcc_checking_assert (frange_val_is_min (m_min
, m_type
));
1215 gcc_checking_assert (frange_val_is_max (m_max
, m_type
));
1216 if (HONOR_NANS (m_type
))
1217 gcc_checking_assert (m_pos_nan
&& m_neg_nan
);
1219 gcc_checking_assert (!m_pos_nan
&& !m_neg_nan
);
1222 gcc_checking_assert (m_type
);
1225 gcc_checking_assert (m_type
);
1226 gcc_checking_assert (m_pos_nan
|| m_neg_nan
);
1232 // NANs cannot appear in the endpoints of a range.
1233 gcc_checking_assert (!real_isnan (&m_min
) && !real_isnan (&m_max
));
1235 // Make sure we don't have swapped ranges.
1236 gcc_checking_assert (!real_less (&m_max
, &m_min
));
1238 // [ +0.0, -0.0 ] is nonsensical.
1239 gcc_checking_assert (!(real_iszero (&m_min
, 0) && real_iszero (&m_max
, 1)));
1241 // If all the properties are clear, we better not span the entire
1242 // domain, because that would make us varying.
1243 if (m_pos_nan
&& m_neg_nan
)
1244 gcc_checking_assert (!frange_val_is_min (m_min
, m_type
)
1245 || !frange_val_is_max (m_max
, m_type
));
1248 // We can't do much with nonzeros yet.
1250 frange::set_nonzero (tree type
)
1255 // We can't do much with nonzeros yet.
1257 frange::nonzero_p () const
1262 // Set range to [+0.0, +0.0] if honoring signed zeros, or [0.0, 0.0]
1266 frange::set_zero (tree type
)
1268 if (HONOR_SIGNED_ZEROS (type
))
1270 set (type
, dconstm0
, dconst0
);
1274 set (type
, dconst0
, dconst0
);
1277 // Return TRUE for any zero regardless of sign.
1280 frange::zero_p () const
1282 return (m_kind
== VR_RANGE
1283 && real_iszero (&m_min
)
1284 && real_iszero (&m_max
));
1287 // Set the range to non-negative numbers, that is [+0.0, +INF].
1289 // The NAN in the resulting range (if HONOR_NANS) has a varying sign
1290 // as there are no guarantees in IEEE 754 wrt to the sign of a NAN,
1291 // except for copy, abs, and copysign. It is the responsibility of
1292 // the caller to set the NAN's sign if desired.
1295 frange::set_nonnegative (tree type
)
1297 set (type
, dconst0
, frange_val_max (type
));
1301 frange::lbound () const
1303 return build_real (type (), lower_bound ());
1307 frange::ubound () const
1309 return build_real (type (), upper_bound ());
1312 // Here we copy between any two irange's.
1315 irange::operator= (const irange
&src
)
1317 int needed
= src
.num_pairs ();
1318 maybe_resize (needed
);
1321 unsigned lim
= src
.m_num_ranges
;
1322 if (lim
> m_max_ranges
)
1325 for (x
= 0; x
< lim
* 2; ++x
)
1326 m_base
[x
] = src
.m_base
[x
];
1328 // If the range didn't fit, the last range should cover the rest.
1329 if (lim
!= src
.m_num_ranges
)
1330 m_base
[x
- 1] = src
.m_base
[src
.m_num_ranges
* 2 - 1];
1333 m_type
= src
.m_type
;
1334 m_kind
= src
.m_kind
;
1335 m_bitmask
= src
.m_bitmask
;
1336 if (m_max_ranges
== 1)
1343 static value_range_kind
1344 get_legacy_range (const irange
&r
, tree
&min
, tree
&max
)
1346 if (r
.undefined_p ())
1350 return VR_UNDEFINED
;
1353 tree type
= r
.type ();
1356 min
= wide_int_to_tree (type
, r
.lower_bound ());
1357 max
= wide_int_to_tree (type
, r
.upper_bound ());
1361 unsigned int precision
= TYPE_PRECISION (type
);
1362 signop sign
= TYPE_SIGN (type
);
1363 if (r
.num_pairs () > 1
1365 && r
.lower_bound () == wi::min_value (precision
, sign
)
1366 && r
.upper_bound () == wi::max_value (precision
, sign
))
1368 int_range
<3> inv (r
);
1370 min
= wide_int_to_tree (type
, inv
.lower_bound (0));
1371 max
= wide_int_to_tree (type
, inv
.upper_bound (0));
1372 return VR_ANTI_RANGE
;
1375 min
= wide_int_to_tree (type
, r
.lower_bound ());
1376 max
= wide_int_to_tree (type
, r
.upper_bound ());
1380 static value_range_kind
1381 get_legacy_range (const prange
&r
, tree
&min
, tree
&max
)
1383 if (r
.undefined_p ())
1387 return VR_UNDEFINED
;
1390 tree type
= r
.type ();
1399 min
= max
= r
.lbound ();
1404 min
= max
= build_zero_cst (type
);
1405 return VR_ANTI_RANGE
;
1412 // Given a range in V, return an old-style legacy range consisting of
1413 // a value_range_kind with a MIN/MAX. This is to maintain
1414 // compatibility with passes that still depend on VR_ANTI_RANGE, and
1415 // only works for integers and pointers.
1418 get_legacy_range (const vrange
&v
, tree
&min
, tree
&max
)
1420 if (is_a
<irange
> (v
))
1421 return get_legacy_range (as_a
<irange
> (v
), min
, max
);
1423 return get_legacy_range (as_a
<prange
> (v
), min
, max
);
1426 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
1427 This means adjusting VRTYPE, MIN and MAX representing the case of a
1428 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
1429 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
1430 In corner cases where MAX+1 or MIN-1 wraps this will fall back
1432 This routine exists to ease canonicalization in the case where we
1433 extract ranges from var + CST op limit. */
1436 irange::set (tree type
, const wide_int
&min
, const wide_int
&max
,
1437 value_range_kind kind
)
1439 unsigned prec
= TYPE_PRECISION (type
);
1440 signop sign
= TYPE_SIGN (type
);
1441 wide_int min_value
= wi::min_value (prec
, sign
);
1442 wide_int max_value
= wi::max_value (prec
, sign
);
1445 m_bitmask
.set_unknown (prec
);
1447 if (kind
== VR_RANGE
)
1452 if (min
== min_value
&& max
== max_value
)
1453 m_kind
= VR_VARYING
;
1459 gcc_checking_assert (kind
== VR_ANTI_RANGE
);
1460 gcc_checking_assert (m_max_ranges
> 1);
1462 m_kind
= VR_UNDEFINED
;
1464 wi::overflow_type ovf
;
1467 lim
= wi::add (min
, -1, sign
, &ovf
);
1469 lim
= wi::sub (min
, 1, sign
, &ovf
);
1474 m_base
[0] = min_value
;
1479 lim
= wi::sub (max
, -1, sign
, &ovf
);
1481 lim
= wi::add (max
, 1, sign
, &ovf
);
1485 m_base
[m_num_ranges
* 2] = lim
;
1486 m_base
[m_num_ranges
* 2 + 1] = max_value
;
1496 irange::set (tree min
, tree max
, value_range_kind kind
)
1498 if (POLY_INT_CST_P (min
) || POLY_INT_CST_P (max
))
1500 set_varying (TREE_TYPE (min
));
1504 gcc_checking_assert (TREE_CODE (min
) == INTEGER_CST
);
1505 gcc_checking_assert (TREE_CODE (max
) == INTEGER_CST
);
1507 return set (TREE_TYPE (min
), wi::to_wide (min
), wi::to_wide (max
), kind
);
1510 // Check the validity of the range.
1513 irange::verify_range ()
1515 gcc_checking_assert (m_discriminator
== VR_IRANGE
);
1516 if (m_kind
== VR_UNDEFINED
)
1518 gcc_checking_assert (m_num_ranges
== 0);
1521 gcc_checking_assert (m_num_ranges
<= m_max_ranges
);
1523 // Legacy allowed these to represent VARYING for unknown types.
1524 // Leave this in for now, until all users are converted. Eventually
1525 // we should abort in set_varying.
1526 if (m_kind
== VR_VARYING
&& m_type
== error_mark_node
)
1529 unsigned prec
= TYPE_PRECISION (m_type
);
1530 if (m_kind
== VR_VARYING
)
1532 gcc_checking_assert (m_bitmask
.unknown_p ());
1533 gcc_checking_assert (m_num_ranges
== 1);
1534 gcc_checking_assert (varying_compatible_p ());
1535 gcc_checking_assert (lower_bound ().get_precision () == prec
);
1536 gcc_checking_assert (upper_bound ().get_precision () == prec
);
1539 gcc_checking_assert (m_num_ranges
!= 0);
1540 gcc_checking_assert (!varying_compatible_p ());
1541 for (unsigned i
= 0; i
< m_num_ranges
; ++i
)
1543 wide_int lb
= lower_bound (i
);
1544 wide_int ub
= upper_bound (i
);
1545 gcc_checking_assert (lb
.get_precision () == prec
);
1546 gcc_checking_assert (ub
.get_precision () == prec
);
1547 int c
= wi::cmp (lb
, ub
, TYPE_SIGN (m_type
));
1548 gcc_checking_assert (c
== 0 || c
== -1);
1550 m_bitmask
.verify_mask ();
1554 irange::operator== (const irange
&other
) const
1556 if (m_num_ranges
!= other
.m_num_ranges
)
1559 if (m_num_ranges
== 0)
1562 signop sign1
= TYPE_SIGN (type ());
1563 signop sign2
= TYPE_SIGN (other
.type ());
1565 for (unsigned i
= 0; i
< m_num_ranges
; ++i
)
1567 widest_int lb
= widest_int::from (lower_bound (i
), sign1
);
1568 widest_int ub
= widest_int::from (upper_bound (i
), sign1
);
1569 widest_int lb_other
= widest_int::from (other
.lower_bound (i
), sign2
);
1570 widest_int ub_other
= widest_int::from (other
.upper_bound (i
), sign2
);
1571 if (lb
!= lb_other
|| ub
!= ub_other
)
1575 irange_bitmask bm1
= get_bitmask ();
1576 irange_bitmask bm2
= other
.get_bitmask ();
1577 widest_int tmp1
= widest_int::from (bm1
.mask (), sign1
);
1578 widest_int tmp2
= widest_int::from (bm2
.mask (), sign2
);
1581 if (bm1
.unknown_p ())
1583 tmp1
= widest_int::from (bm1
.value (), sign1
);
1584 tmp2
= widest_int::from (bm2
.value (), sign2
);
1585 return tmp1
== tmp2
;
1588 /* If range is a singleton, place it in RESULT and return TRUE. */
1591 irange::singleton_p (tree
*result
) const
1593 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1596 *result
= wide_int_to_tree (type (), lower_bound ());
1603 irange::singleton_p (wide_int
&w
) const
1605 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1613 /* Return 1 if CST is inside value range.
1614 0 if CST is not inside value range.
1616 Benchmark compile/20001226-1.c compilation time after changing this
1620 irange::contains_p (const wide_int
&cst
) const
1625 // See if we can exclude CST based on the known 0 bits.
1626 if (!m_bitmask
.unknown_p ()
1628 && wi::bit_and (m_bitmask
.get_nonzero_bits (), cst
) == 0)
1631 signop sign
= TYPE_SIGN (type ());
1632 for (unsigned r
= 0; r
< m_num_ranges
; ++r
)
1634 if (wi::lt_p (cst
, lower_bound (r
), sign
))
1636 if (wi::le_p (cst
, upper_bound (r
), sign
))
1643 // Perform an efficient union with R when both ranges have only a single pair.
1644 // Excluded are VARYING and UNDEFINED ranges.
1647 irange::irange_single_pair_union (const irange
&r
)
1649 gcc_checking_assert (!undefined_p () && !varying_p ());
1650 gcc_checking_assert (!r
.undefined_p () && !varying_p ());
1652 signop sign
= TYPE_SIGN (m_type
);
1653 // Check if current lower bound is also the new lower bound.
1654 if (wi::le_p (m_base
[0], r
.m_base
[0], sign
))
1656 // If current upper bound is new upper bound, we're done.
1657 if (wi::le_p (r
.m_base
[1], m_base
[1], sign
))
1658 return union_bitmask (r
);
1659 // Otherwise R has the new upper bound.
1660 // Check for overlap/touching ranges, or single target range.
1661 if (m_max_ranges
== 1
1662 || (widest_int::from (m_base
[1], sign
) + 1
1663 >= widest_int::from (r
.m_base
[0], TYPE_SIGN (r
.m_type
))))
1664 m_base
[1] = r
.m_base
[1];
1667 // This is a dual range result.
1668 m_base
[2] = r
.m_base
[0];
1669 m_base
[3] = r
.m_base
[1];
1672 // The range has been altered, so normalize it even if nothing
1673 // changed in the mask.
1674 if (!union_bitmask (r
))
1681 // Set the new lower bound to R's lower bound.
1682 wide_int lb
= m_base
[0];
1683 m_base
[0] = r
.m_base
[0];
1685 // If R fully contains THIS range, just set the upper bound.
1686 if (wi::ge_p (r
.m_base
[1], m_base
[1], sign
))
1687 m_base
[1] = r
.m_base
[1];
1688 // Check for overlapping ranges, or target limited to a single range.
1689 else if (m_max_ranges
== 1
1690 || (widest_int::from (r
.m_base
[1], TYPE_SIGN (r
.m_type
)) + 1
1691 >= widest_int::from (lb
, sign
)))
1695 // Left with 2 pairs.
1698 m_base
[3] = m_base
[1];
1699 m_base
[1] = r
.m_base
[1];
1701 // The range has been altered, so normalize it even if nothing
1702 // changed in the mask.
1703 if (!union_bitmask (r
))
1710 // Append R to this range, knowing that R occurs after all of these subranges.
1711 // Return TRUE as something must have changed.
1714 irange::union_append (const irange
&r
)
1716 // Check if the first range in R is an immmediate successor to the last
1717 // range, ths requiring a merge.
1718 signop sign
= TYPE_SIGN (m_type
);
1719 wide_int lb
= r
.lower_bound ();
1720 wide_int ub
= upper_bound ();
1722 if (widest_int::from (ub
, sign
) + 1
1723 == widest_int::from (lb
, sign
))
1725 m_base
[m_num_ranges
* 2 - 1] = r
.m_base
[1];
1728 maybe_resize (m_num_ranges
+ r
.m_num_ranges
- start
);
1729 for ( ; start
< r
.m_num_ranges
; start
++)
1731 // Merge the last ranges if it exceeds the maximum size.
1732 if (m_num_ranges
+ 1 > m_max_ranges
)
1734 m_base
[m_max_ranges
* 2 - 1] = r
.m_base
[r
.m_num_ranges
* 2 - 1];
1737 m_base
[m_num_ranges
* 2] = r
.m_base
[start
* 2];
1738 m_base
[m_num_ranges
* 2 + 1] = r
.m_base
[start
* 2 + 1];
1742 if (!union_bitmask (r
))
1749 // Return TRUE if anything changes.
1752 irange::union_ (const vrange
&v
)
1754 const irange
&r
= as_a
<irange
> (v
);
1756 if (r
.undefined_p ())
1772 set_varying (type ());
1776 // Special case one range union one range.
1777 if (m_num_ranges
== 1 && r
.m_num_ranges
== 1)
1778 return irange_single_pair_union (r
);
1780 signop sign
= TYPE_SIGN (m_type
);
1781 // Check for an append to the end.
1782 if (m_kind
== VR_RANGE
&& wi::gt_p (r
.lower_bound (), upper_bound (), sign
))
1783 return union_append (r
);
1785 // If this ranges fully contains R, then we need do nothing.
1786 if (irange_contains_p (r
))
1787 return union_bitmask (r
);
1789 // Do not worry about merging and such by reserving twice as many
1790 // pairs as needed, and then simply sort the 2 ranges into this
1791 // intermediate form.
1793 // The intermediate result will have the property that the beginning
1794 // of each range is <= the beginning of the next range. There may
1795 // be overlapping ranges at this point. I.e. this would be valid
1796 // [-20, 10], [-10, 0], [0, 20], [40, 90] as it satisfies this
1797 // constraint : -20 < -10 < 0 < 40. When the range is rebuilt into r,
1798 // the merge is performed.
1800 // [Xi,Yi]..[Xn,Yn] U [Xj,Yj]..[Xm,Ym] --> [Xk,Yk]..[Xp,Yp]
1801 auto_vec
<wide_int
, 20> res (m_num_ranges
* 2 + r
.m_num_ranges
* 2);
1802 unsigned i
= 0, j
= 0, k
= 0;
1804 while (i
< m_num_ranges
* 2 && j
< r
.m_num_ranges
* 2)
1806 // lower of Xi and Xj is the lowest point.
1807 if (widest_int::from (m_base
[i
], sign
)
1808 <= widest_int::from (r
.m_base
[j
], sign
))
1810 res
.quick_push (m_base
[i
]);
1811 res
.quick_push (m_base
[i
+ 1]);
1817 res
.quick_push (r
.m_base
[j
]);
1818 res
.quick_push (r
.m_base
[j
+ 1]);
1823 for ( ; i
< m_num_ranges
* 2; i
+= 2)
1825 res
.quick_push (m_base
[i
]);
1826 res
.quick_push (m_base
[i
+ 1]);
1829 for ( ; j
< r
.m_num_ranges
* 2; j
+= 2)
1831 res
.quick_push (r
.m_base
[j
]);
1832 res
.quick_push (r
.m_base
[j
+ 1]);
1836 // Now normalize the vector removing any overlaps.
1838 for (j
= 2; j
< k
; j
+= 2)
1840 // Current upper+1 is >= lower bound next pair, then we merge ranges.
1841 if (widest_int::from (res
[i
- 1], sign
) + 1
1842 >= widest_int::from (res
[j
], sign
))
1844 // New upper bounds is greater of current or the next one.
1845 if (widest_int::from (res
[j
+ 1], sign
)
1846 > widest_int::from (res
[i
- 1], sign
))
1847 res
[i
- 1] = res
[j
+ 1];
1851 // This is a new distinct range, but no point in copying it
1852 // if it is already in the right place.
1856 res
[i
++] = res
[j
+ 1];
1863 // At this point, the vector should have i ranges, none overlapping.
1864 // Now it simply needs to be copied, and if there are too many
1865 // ranges, merge some. We wont do any analysis as to what the
1866 // "best" merges are, simply combine the final ranges into one.
1867 maybe_resize (i
/ 2);
1868 if (i
> m_max_ranges
* 2)
1870 res
[m_max_ranges
* 2 - 1] = res
[i
- 1];
1871 i
= m_max_ranges
* 2;
1874 for (j
= 0; j
< i
; j
++)
1875 m_base
[j
] = res
[j
];
1876 m_num_ranges
= i
/ 2;
1879 // The range has been altered, so normalize it even if nothing
1880 // changed in the mask.
1881 if (!union_bitmask (r
))
1888 // Return TRUE if THIS fully contains R. No undefined or varying cases.
1891 irange::irange_contains_p (const irange
&r
) const
1893 gcc_checking_assert (!undefined_p () && !varying_p ());
1894 gcc_checking_assert (!r
.undefined_p () && !varying_p ());
1896 // In order for THIS to fully contain R, all of the pairs within R must
1897 // be fully contained by the pairs in this object.
1898 signop sign
= TYPE_SIGN (m_type
);
1901 wide_int rl
= r
.m_base
[0];
1902 wide_int ru
= r
.m_base
[1];
1903 wide_int l
= m_base
[0];
1904 wide_int u
= m_base
[1];
1907 // If r is contained within this range, move to the next R
1908 if (wi::ge_p (rl
, l
, sign
)
1909 && wi::le_p (ru
, u
, sign
))
1911 // This pair is OK, Either done, or bump to the next.
1912 if (++ri
>= r
.num_pairs ())
1914 rl
= r
.m_base
[ri
* 2];
1915 ru
= r
.m_base
[ri
* 2 + 1];
1918 // Otherwise, check if this's pair occurs before R's.
1919 if (wi::lt_p (u
, rl
, sign
))
1921 // There's still at least one pair of R left.
1922 if (++i
>= num_pairs ())
1925 u
= m_base
[i
* 2 + 1];
1934 // Return TRUE if anything changes.
1937 irange::intersect (const vrange
&v
)
1939 const irange
&r
= as_a
<irange
> (v
);
1940 gcc_checking_assert (undefined_p () || r
.undefined_p ()
1941 || range_compatible_p (type (), r
.type ()));
1945 if (r
.undefined_p ())
1958 if (r
.num_pairs () == 1)
1960 bool res
= intersect (r
.lower_bound (), r
.upper_bound ());
1964 res
|= intersect_bitmask (r
);
1970 // If R fully contains this, then intersection will change nothing.
1971 if (r
.irange_contains_p (*this))
1972 return intersect_bitmask (r
);
1974 // ?? We could probably come up with something smarter than the
1975 // worst case scenario here.
1976 int needed
= num_pairs () + r
.num_pairs ();
1977 maybe_resize (needed
);
1979 signop sign
= TYPE_SIGN (m_type
);
1980 unsigned bld_pair
= 0;
1981 unsigned bld_lim
= m_max_ranges
;
1982 int_range_max
r2 (*this);
1983 unsigned r2_lim
= r2
.num_pairs ();
1985 for (unsigned i
= 0; i
< r
.num_pairs (); )
1987 // If r1's upper is < r2's lower, we can skip r1's pair.
1988 wide_int ru
= r
.m_base
[i
* 2 + 1];
1989 wide_int r2l
= r2
.m_base
[i2
* 2];
1990 if (wi::lt_p (ru
, r2l
, sign
))
1995 // Likewise, skip r2's pair if its excluded.
1996 wide_int r2u
= r2
.m_base
[i2
* 2 + 1];
1997 wide_int rl
= r
.m_base
[i
* 2];
1998 if (wi::lt_p (r2u
, rl
, sign
))
2003 // No more r2, break.
2007 // Must be some overlap. Find the highest of the lower bounds,
2008 // and set it, unless the build limits lower bounds is already
2010 if (bld_pair
< bld_lim
)
2012 if (wi::ge_p (rl
, r2l
, sign
))
2013 m_base
[bld_pair
* 2] = rl
;
2015 m_base
[bld_pair
* 2] = r2l
;
2018 // Decrease and set a new upper.
2021 // ...and choose the lower of the upper bounds.
2022 if (wi::le_p (ru
, r2u
, sign
))
2024 m_base
[bld_pair
* 2 + 1] = ru
;
2026 // Move past the r1 pair and keep trying.
2032 m_base
[bld_pair
* 2 + 1] = r2u
;
2037 // No more r2, break.
2040 // r2 has the higher lower bound.
2043 // At the exit of this loop, it is one of 2 things:
2044 // ran out of r1, or r2, but either means we are done.
2045 m_num_ranges
= bld_pair
;
2046 if (m_num_ranges
== 0)
2053 // The range has been altered, so normalize it even if nothing
2054 // changed in the mask.
2055 if (!intersect_bitmask (r
))
2063 // Multirange intersect for a specified wide_int [lb, ub] range.
2064 // Return TRUE if intersect changed anything.
2066 // NOTE: It is the caller's responsibility to intersect the mask.
2069 irange::intersect (const wide_int
& lb
, const wide_int
& ub
)
2071 // Undefined remains undefined.
2075 tree range_type
= type();
2076 signop sign
= TYPE_SIGN (range_type
);
2078 gcc_checking_assert (TYPE_PRECISION (range_type
) == wi::get_precision (lb
));
2079 gcc_checking_assert (TYPE_PRECISION (range_type
) == wi::get_precision (ub
));
2081 // If this range is fully contained, then intersection will do nothing.
2082 if (wi::ge_p (lower_bound (), lb
, sign
)
2083 && wi::le_p (upper_bound (), ub
, sign
))
2086 unsigned bld_index
= 0;
2087 unsigned pair_lim
= num_pairs ();
2088 for (unsigned i
= 0; i
< pair_lim
; i
++)
2090 wide_int pairl
= m_base
[i
* 2];
2091 wide_int pairu
= m_base
[i
* 2 + 1];
2092 // Once UB is less than a pairs lower bound, we're done.
2093 if (wi::lt_p (ub
, pairl
, sign
))
2095 // if LB is greater than this pairs upper, this pair is excluded.
2096 if (wi::lt_p (pairu
, lb
, sign
))
2099 // Must be some overlap. Find the highest of the lower bounds,
2101 if (wi::gt_p (lb
, pairl
, sign
))
2102 m_base
[bld_index
* 2] = lb
;
2104 m_base
[bld_index
* 2] = pairl
;
2106 // ...and choose the lower of the upper bounds and if the base pair
2107 // has the lower upper bound, need to check next pair too.
2108 if (wi::lt_p (ub
, pairu
, sign
))
2110 m_base
[bld_index
++ * 2 + 1] = ub
;
2114 m_base
[bld_index
++ * 2 + 1] = pairu
;
2117 m_num_ranges
= bld_index
;
2118 if (m_num_ranges
== 0)
2125 // The caller must normalize and verify the range, as the bitmask
2126 // still needs to be handled.
2131 // Signed 1-bits are strange. You can't subtract 1, because you can't
2132 // represent the number 1. This works around that for the invert routine.
2134 static wide_int
inline
2135 subtract_one (const wide_int
&x
, tree type
, wi::overflow_type
&overflow
)
2137 if (TYPE_SIGN (type
) == SIGNED
)
2138 return wi::add (x
, -1, SIGNED
, &overflow
);
2140 return wi::sub (x
, 1, UNSIGNED
, &overflow
);
2143 // The analogous function for adding 1.
2145 static wide_int
inline
2146 add_one (const wide_int
&x
, tree type
, wi::overflow_type
&overflow
)
2148 if (TYPE_SIGN (type
) == SIGNED
)
2149 return wi::sub (x
, -1, SIGNED
, &overflow
);
2151 return wi::add (x
, 1, UNSIGNED
, &overflow
);
2154 // Return the inverse of a range.
2159 gcc_checking_assert (!undefined_p () && !varying_p ());
2161 // We always need one more set of bounds to represent an inverse, so
2162 // if we're at the limit, we can't properly represent things.
2164 // For instance, to represent the inverse of a 2 sub-range set
2165 // [5, 10][20, 30], we would need a 3 sub-range set
2166 // [-MIN, 4][11, 19][31, MAX].
2168 // In this case, return the most conservative thing.
2170 // However, if any of the extremes of the range are -MIN/+MAX, we
2171 // know we will not need an extra bound. For example:
2173 // INVERT([-MIN,20][30,40]) => [21,29][41,+MAX]
2174 // INVERT([-MIN,20][30,MAX]) => [21,29]
2175 tree ttype
= type ();
2176 unsigned prec
= TYPE_PRECISION (ttype
);
2177 signop sign
= TYPE_SIGN (ttype
);
2178 wide_int type_min
= wi::min_value (prec
, sign
);
2179 wide_int type_max
= wi::max_value (prec
, sign
);
2180 m_bitmask
.set_unknown (prec
);
2182 // At this point, we need one extra sub-range to represent the
2184 maybe_resize (m_num_ranges
+ 1);
2186 // The algorithm is as follows. To calculate INVERT ([a,b][c,d]), we
2187 // generate [-MIN, a-1][b+1, c-1][d+1, MAX].
2189 // If there is an over/underflow in the calculation for any
2190 // sub-range, we eliminate that subrange. This allows us to easily
2191 // calculate INVERT([-MIN, 5]) with: [-MIN, -MIN-1][6, MAX]. And since
2192 // we eliminate the underflow, only [6, MAX] remains.
2194 wi::overflow_type ovf
;
2195 // Construct leftmost range.
2196 int_range_max
orig_range (*this);
2197 unsigned nitems
= 0;
2199 // If this is going to underflow on the MINUS 1, don't even bother
2200 // checking. This also handles subtracting one from an unsigned 0,
2201 // which doesn't set the underflow bit.
2202 if (type_min
!= orig_range
.lower_bound ())
2204 m_base
[nitems
++] = type_min
;
2205 tmp
= subtract_one (orig_range
.lower_bound (), ttype
, ovf
);
2206 m_base
[nitems
++] = tmp
;
2211 // Construct middle ranges if applicable.
2212 if (orig_range
.num_pairs () > 1)
2215 for (; j
< (orig_range
.num_pairs () * 2) - 1; j
+= 2)
2217 // The middle ranges cannot have MAX/MIN, so there's no need
2218 // to check for unsigned overflow on the +1 and -1 here.
2219 tmp
= wi::add (orig_range
.m_base
[j
], 1, sign
, &ovf
);
2220 m_base
[nitems
++] = tmp
;
2221 tmp
= subtract_one (orig_range
.m_base
[j
+ 1], ttype
, ovf
);
2222 m_base
[nitems
++] = tmp
;
2228 // Construct rightmost range.
2230 // However, if this will overflow on the PLUS 1, don't even bother.
2231 // This also handles adding one to an unsigned MAX, which doesn't
2232 // set the overflow bit.
2233 if (type_max
!= orig_range
.m_base
[i
])
2235 tmp
= add_one (orig_range
.m_base
[i
], ttype
, ovf
);
2236 m_base
[nitems
++] = tmp
;
2237 m_base
[nitems
++] = type_max
;
2241 m_num_ranges
= nitems
/ 2;
2243 // We disallow undefined or varying coming in, so the result can
2244 // only be a VR_RANGE.
2245 gcc_checking_assert (m_kind
== VR_RANGE
);
2251 // Remove trailing ranges that this bitmask indicates can't exist.
2254 irange_bitmask::adjust_range (irange
&r
) const
2256 if (unknown_p () || r
.undefined_p ())
2259 int_range_max range
;
2260 tree type
= r
.type ();
2261 int prec
= TYPE_PRECISION (type
);
2262 // If there are trailing zeros, create a range representing those bits.
2263 gcc_checking_assert (m_mask
!= 0);
2264 int z
= wi::ctz (m_mask
);
2267 wide_int ub
= (wi::one (prec
) << z
) - 1;
2268 range
= int_range
<5> (type
, wi::zero (prec
), ub
);
2269 // Then remove the specific value these bits contain from the range.
2270 wide_int value
= m_value
& ub
;
2271 range
.intersect (int_range
<2> (type
, value
, value
, VR_ANTI_RANGE
));
2272 // Inverting produces a list of ranges which can be valid.
2274 // And finally select R from only those valid values.
2275 r
.intersect (range
);
2280 // If the mask can be trivially converted to a range, do so and
2284 irange::set_range_from_bitmask ()
2286 gcc_checking_assert (!undefined_p ());
2287 if (m_bitmask
.unknown_p ())
2290 // If all the bits are known, this is a singleton.
2291 if (m_bitmask
.mask () == 0)
2293 set (m_type
, m_bitmask
.value (), m_bitmask
.value ());
2297 unsigned popcount
= wi::popcount (m_bitmask
.get_nonzero_bits ());
2299 // If we have only one bit set in the mask, we can figure out the
2300 // range immediately.
2303 // Make sure we don't pessimize the range.
2304 if (!contains_p (m_bitmask
.get_nonzero_bits ()))
2307 bool has_zero
= contains_zero_p (*this);
2308 wide_int nz
= m_bitmask
.get_nonzero_bits ();
2309 set (m_type
, nz
, nz
);
2310 m_bitmask
.set_nonzero_bits (nz
);
2314 zero
.set_zero (type ());
2321 else if (popcount
== 0)
2330 irange::update_bitmask (const irange_bitmask
&bm
)
2332 gcc_checking_assert (!undefined_p ());
2334 // Drop VARYINGs with known bits to a plain range.
2335 if (m_kind
== VR_VARYING
&& !bm
.unknown_p ())
2339 if (!set_range_from_bitmask ())
2345 // Return the bitmask of known bits that includes the bitmask inherent
2349 irange::get_bitmask () const
2351 gcc_checking_assert (!undefined_p ());
2353 // The mask inherent in the range is calculated on-demand. For
2354 // example, [0,255] does not have known bits set by default. This
2355 // saves us considerable time, because setting it at creation incurs
2356 // a large penalty for irange::set. At the time of writing there
2357 // was a 5% slowdown in VRP if we kept the mask precisely up to date
2358 // at all times. Instead, we default to -1 and set it when
2359 // explicitly requested. However, this function will always return
2360 // the correct mask.
2362 // This also means that the mask may have a finer granularity than
2363 // the range and thus contradict it. Think of the mask as an
2364 // enhancement to the range. For example:
2366 // [3, 1000] MASK 0xfffffffe VALUE 0x0
2368 // 3 is in the range endpoints, but is excluded per the known 0 bits
2371 // See also the note in irange_bitmask::intersect.
2373 = get_bitmask_from_range (type (), lower_bound (), upper_bound ());
2374 if (!m_bitmask
.unknown_p ())
2375 bm
.intersect (m_bitmask
);
2379 // Set the nonzero bits in R into THIS. Return TRUE and
2380 // normalize the range if anything changed.
2383 vrange::set_nonzero_bits (const wide_int
&bits
)
2385 gcc_checking_assert (!undefined_p ());
2386 irange_bitmask
bm (wi::zero (TYPE_PRECISION (type ())), bits
);
2387 update_bitmask (bm
);
2390 // Return the nonzero bits in R.
2393 vrange::get_nonzero_bits () const
2395 gcc_checking_assert (!undefined_p ());
2396 irange_bitmask bm
= get_bitmask ();
2397 return bm
.value () | bm
.mask ();
2400 // Intersect the bitmask in R into THIS and normalize the range.
2401 // Return TRUE if the intersection changed anything.
2404 irange::intersect_bitmask (const irange
&r
)
2406 gcc_checking_assert (!undefined_p () && !r
.undefined_p ());
2408 if (m_bitmask
== r
.m_bitmask
)
2411 irange_bitmask bm
= get_bitmask ();
2412 irange_bitmask save
= bm
;
2413 bm
.intersect (r
.get_bitmask ());
2419 // Updating m_bitmask may still yield a semantic bitmask (as
2420 // returned by get_bitmask) which is functionally equivalent to what
2421 // we originally had. In which case, there's still no change.
2422 if (save
== get_bitmask ())
2425 if (!set_range_from_bitmask ())
2427 m_bitmask
.adjust_range (*this);
2433 // Union the bitmask in R into THIS. Return TRUE and normalize the
2434 // range if anything changed.
2437 irange::union_bitmask (const irange
&r
)
2439 gcc_checking_assert (!undefined_p () && !r
.undefined_p ());
2441 if (m_bitmask
== r
.m_bitmask
)
2444 irange_bitmask bm
= get_bitmask ();
2445 irange_bitmask save
= bm
;
2446 bm
.union_ (r
.get_bitmask ());
2452 // Updating m_bitmask may still yield a semantic bitmask (as
2453 // returned by get_bitmask) which is functionally equivalent to what
2454 // we originally had. In which case, there's still no change.
2455 if (save
== get_bitmask ())
2458 // No need to call set_range_from_mask, because we'll never
2459 // narrow the range. Besides, it would cause endless recursion
2460 // because of the union_ in set_range_from_mask.
2466 irange::lbound () const
2468 return wide_int_to_tree (type (), lower_bound ());
2472 irange::ubound () const
2474 return wide_int_to_tree (type (), upper_bound ());
2478 irange_bitmask::verify_mask () const
2480 gcc_assert (m_value
.get_precision () == m_mask
.get_precision ());
2481 gcc_checking_assert (wi::bit_and (m_mask
, m_value
) == 0);
2485 dump_value_range (FILE *file
, const vrange
*vr
)
2491 debug (const vrange
*vr
)
2493 dump_value_range (stderr
, vr
);
2494 fprintf (stderr
, "\n");
2498 debug (const vrange
&vr
)
2504 debug (const value_range
*vr
)
2506 dump_value_range (stderr
, vr
);
2507 fprintf (stderr
, "\n");
2511 debug (const value_range
&vr
)
2513 dump_value_range (stderr
, &vr
);
2514 fprintf (stderr
, "\n");
2517 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
2520 vrp_operand_equal_p (const_tree val1
, const_tree val2
)
2524 if (!val1
|| !val2
|| !operand_equal_p (val1
, val2
, 0))
2529 #define DEFINE_INT_RANGE_INSTANCE(N) \
2530 template int_range<N>::int_range(tree_node *, \
2533 value_range_kind); \
2534 template int_range<N>::int_range(tree); \
2535 template int_range<N>::int_range(const irange &); \
2536 template int_range<N>::int_range(const int_range &); \
2537 template int_range<N>& int_range<N>::operator= (const int_range &);
2539 DEFINE_INT_RANGE_INSTANCE(1)
2540 DEFINE_INT_RANGE_INSTANCE(2)
2541 DEFINE_INT_RANGE_INSTANCE(3)
2542 DEFINE_INT_RANGE_INSTANCE(255)
2545 #include "selftest.h"
2547 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
2548 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
2549 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
2555 range (tree type
, int a
, int b
, value_range_kind kind
= VR_RANGE
)
2558 if (TYPE_UNSIGNED (type
))
2560 w1
= wi::uhwi (a
, TYPE_PRECISION (type
));
2561 w2
= wi::uhwi (b
, TYPE_PRECISION (type
));
2565 w1
= wi::shwi (a
, TYPE_PRECISION (type
));
2566 w2
= wi::shwi (b
, TYPE_PRECISION (type
));
2568 return int_range
<2> (type
, w1
, w2
, kind
);
2572 range_int (int a
, int b
, value_range_kind kind
= VR_RANGE
)
2574 return range (integer_type_node
, a
, b
, kind
);
2578 range_uint (int a
, int b
, value_range_kind kind
= VR_RANGE
)
2580 return range (unsigned_type_node
, a
, b
, kind
);
2584 range_uint128 (int a
, int b
, value_range_kind kind
= VR_RANGE
)
2586 tree u128_type_node
= build_nonstandard_integer_type (128, 1);
2587 return range (u128_type_node
, a
, b
, kind
);
2591 range_uchar (int a
, int b
, value_range_kind kind
= VR_RANGE
)
2593 return range (unsigned_char_type_node
, a
, b
, kind
);
2597 range_char (int a
, int b
, value_range_kind kind
= VR_RANGE
)
2599 return range (signed_char_type_node
, a
, b
, kind
);
2603 build_range3 (int a
, int b
, int c
, int d
, int e
, int f
)
2605 int_range
<3> i1
= range_int (a
, b
);
2606 int_range
<3> i2
= range_int (c
, d
);
2607 int_range
<3> i3
= range_int (e
, f
);
2614 range_tests_irange3 ()
2616 int_range
<3> r0
, r1
, r2
;
2617 int_range
<3> i1
, i2
, i3
;
2619 // ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].
2620 r0
= range_int (10, 20);
2621 r1
= range_int (5, 8);
2623 r1
= range_int (1, 3);
2625 ASSERT_TRUE (r0
== build_range3 (1, 3, 5, 8, 10, 20));
2627 // [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].
2628 r1
= range_int (-5, 0);
2630 ASSERT_TRUE (r0
== build_range3 (-5, 3, 5, 8, 10, 20));
2632 // [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].
2633 r1
= range_int (50, 60);
2634 r0
= range_int (10, 20);
2635 r0
.union_ (range_int (30, 40));
2637 ASSERT_TRUE (r0
== build_range3 (10, 20, 30, 40, 50, 60));
2638 // [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].
2639 r1
= range_int (70, 80);
2642 r2
= build_range3 (10, 20, 30, 40, 50, 60);
2643 r2
.union_ (range_int (70, 80));
2644 ASSERT_TRUE (r0
== r2
);
2646 // [10,20][30,40][50,60] U [6,35] => [6,40][50,60].
2647 r0
= build_range3 (10, 20, 30, 40, 50, 60);
2648 r1
= range_int (6, 35);
2650 r1
= range_int (6, 40);
2651 r1
.union_ (range_int (50, 60));
2652 ASSERT_TRUE (r0
== r1
);
2654 // [10,20][30,40][50,60] U [6,60] => [6,60].
2655 r0
= build_range3 (10, 20, 30, 40, 50, 60);
2656 r1
= range_int (6, 60);
2658 ASSERT_TRUE (r0
== range_int (6, 60));
2660 // [10,20][30,40][50,60] U [6,70] => [6,70].
2661 r0
= build_range3 (10, 20, 30, 40, 50, 60);
2662 r1
= range_int (6, 70);
2664 ASSERT_TRUE (r0
== range_int (6, 70));
2666 // [10,20][30,40][50,60] U [35,70] => [10,20][30,70].
2667 r0
= build_range3 (10, 20, 30, 40, 50, 60);
2668 r1
= range_int (35, 70);
2670 r1
= range_int (10, 20);
2671 r1
.union_ (range_int (30, 70));
2672 ASSERT_TRUE (r0
== r1
);
2674 // [10,20][30,40][50,60] U [15,35] => [10,40][50,60].
2675 r0
= build_range3 (10, 20, 30, 40, 50, 60);
2676 r1
= range_int (15, 35);
2678 r1
= range_int (10, 40);
2679 r1
.union_ (range_int (50, 60));
2680 ASSERT_TRUE (r0
== r1
);
2682 // [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].
2683 r0
= build_range3 (10, 20, 30, 40, 50, 60);
2684 r1
= range_int (35, 35);
2686 ASSERT_TRUE (r0
== build_range3 (10, 20, 30, 40, 50, 60));
2690 range_tests_int_range_max ()
2693 unsigned int nrange
;
2695 // Build a huge multi-range range.
2696 for (nrange
= 0; nrange
< 50; ++nrange
)
2698 int_range
<1> tmp
= range_int (nrange
*10, nrange
*10 + 5);
2701 ASSERT_TRUE (big
.num_pairs () == nrange
);
2703 // Verify that we can copy it without loosing precision.
2704 int_range_max
copy (big
);
2705 ASSERT_TRUE (copy
.num_pairs () == nrange
);
2707 // Inverting it should produce one more sub-range.
2709 ASSERT_TRUE (big
.num_pairs () == nrange
+ 1);
2711 int_range
<1> tmp
= range_int (5, 37);
2712 big
.intersect (tmp
);
2713 ASSERT_TRUE (big
.num_pairs () == 4);
2715 // Test that [10,10][20,20] does NOT contain 15.
2717 int_range_max i1
= range_int (10, 10);
2718 int_range_max i2
= range_int (20, 20);
2720 ASSERT_FALSE (i1
.contains_p (INT (15)));
2724 // Simulate -fstrict-enums where the domain of a type is less than the
2728 range_tests_strict_enum ()
2730 // The enum can only hold [0, 3].
2731 tree rtype
= copy_node (unsigned_type_node
);
2732 TYPE_MIN_VALUE (rtype
) = build_int_cstu (rtype
, 0);
2733 TYPE_MAX_VALUE (rtype
) = build_int_cstu (rtype
, 3);
2735 // Test that even though vr1 covers the strict enum domain ([0, 3]),
2736 // it does not cover the domain of the underlying type.
2737 int_range
<1> vr1
= range (rtype
, 0, 1);
2738 int_range
<1> vr2
= range (rtype
, 2, 3);
2740 ASSERT_TRUE (vr1
== range (rtype
, 0, 3));
2741 ASSERT_FALSE (vr1
.varying_p ());
2743 // Test that copying to a multi-range does not change things.
2744 int_range
<2> ir1 (vr1
);
2745 ASSERT_TRUE (ir1
== vr1
);
2746 ASSERT_FALSE (ir1
.varying_p ());
2748 // The same test as above, but using TYPE_{MIN,MAX}_VALUE instead of [0,3].
2749 vr1
= int_range
<2> (rtype
,
2750 wi::to_wide (TYPE_MIN_VALUE (rtype
)),
2751 wi::to_wide (TYPE_MAX_VALUE (rtype
)));
2753 ASSERT_TRUE (ir1
== vr1
);
2754 ASSERT_FALSE (ir1
.varying_p ());
2760 tree u128_type
= build_nonstandard_integer_type (128, /*unsigned=*/1);
2761 int_range
<2> i1
, i2
, i3
;
2762 int_range
<2> r0
, r1
, rold
;
2764 // Test 1-bit signed integer union.
2765 // [-1,-1] U [0,0] = VARYING.
2766 tree one_bit_type
= build_nonstandard_integer_type (1, 0);
2767 wide_int one_bit_min
= irange_val_min (one_bit_type
);
2768 wide_int one_bit_max
= irange_val_max (one_bit_type
);
2770 int_range
<2> min
= int_range
<2> (one_bit_type
, one_bit_min
, one_bit_min
);
2771 int_range
<2> max
= int_range
<2> (one_bit_type
, one_bit_max
, one_bit_max
);
2773 ASSERT_TRUE (max
.varying_p ());
2775 // Test that we can set a range of true+false for a 1-bit signed int.
2776 r0
= range_true_and_false (one_bit_type
);
2778 // Test inversion of 1-bit signed integers.
2780 int_range
<2> min
= int_range
<2> (one_bit_type
, one_bit_min
, one_bit_min
);
2781 int_range
<2> max
= int_range
<2> (one_bit_type
, one_bit_max
, one_bit_max
);
2785 ASSERT_TRUE (t
== max
);
2788 ASSERT_TRUE (t
== min
);
2791 // Test that NOT(255) is [0..254] in 8-bit land.
2792 int_range
<1> not_255
= range_uchar (255, 255, VR_ANTI_RANGE
);
2793 ASSERT_TRUE (not_255
== range_uchar (0, 254));
2795 // Test that NOT(0) is [1..255] in 8-bit land.
2796 int_range
<2> not_zero
;
2797 not_zero
.set_nonzero (unsigned_char_type_node
);
2798 ASSERT_TRUE (not_zero
== range_uchar (1, 255));
2800 // Check that [0,127][0x..ffffff80,0x..ffffff]
2801 // => ~[128, 0x..ffffff7f].
2802 r0
= range_uint128 (0, 127);
2803 wide_int high
= wi::minus_one (128);
2804 // low = -1 - 127 => 0x..ffffff80.
2805 wide_int low
= wi::sub (high
, wi::uhwi (127, 128));
2806 r1
= int_range
<1> (u128_type
, low
, high
); // [0x..ffffff80, 0x..ffffffff]
2807 // r0 = [0,127][0x..ffffff80,0x..fffffff].
2809 // r1 = [128, 0x..ffffff7f].
2810 r1
= int_range
<1> (u128_type
,
2811 wi::uhwi (128, 128),
2812 wi::sub (wi::minus_one (128), wi::uhwi (128, 128)));
2814 ASSERT_TRUE (r0
== r1
);
2816 r0
.set_varying (integer_type_node
);
2817 wide_int minint
= r0
.lower_bound ();
2818 wide_int maxint
= r0
.upper_bound ();
2820 r0
.set_varying (short_integer_type_node
);
2822 r0
.set_varying (unsigned_type_node
);
2823 wide_int maxuint
= r0
.upper_bound ();
2825 // Check that ~[0,5] => [6,MAX] for unsigned int.
2826 r0
= range_uint (0, 5);
2828 ASSERT_TRUE (r0
== int_range
<1> (unsigned_type_node
,
2829 wi::uhwi (6, TYPE_PRECISION (unsigned_type_node
)),
2832 // Check that ~[10,MAX] => [0,9] for unsigned int.
2833 r0
= int_range
<1> (unsigned_type_node
,
2834 wi::uhwi (10, TYPE_PRECISION (unsigned_type_node
)),
2837 ASSERT_TRUE (r0
== range_uint (0, 9));
2839 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
2840 r0
= range_uint128 (0, 5, VR_ANTI_RANGE
);
2841 r1
= int_range
<1> (u128_type
, wi::uhwi (6, 128), wi::minus_one (128));
2842 ASSERT_TRUE (r0
== r1
);
2844 // Check that [~5] is really [-MIN,4][6,MAX].
2845 r0
= range_int (5, 5, VR_ANTI_RANGE
);
2846 r1
= int_range
<1> (integer_type_node
, minint
, INT (4));
2847 r1
.union_ (int_range
<1> (integer_type_node
, INT (6), maxint
));
2848 ASSERT_FALSE (r1
.undefined_p ());
2849 ASSERT_TRUE (r0
== r1
);
2851 r1
= range_int (5, 5);
2852 int_range
<2> r2 (r1
);
2853 ASSERT_TRUE (r1
== r2
);
2855 r1
= range_int (5, 10);
2857 r1
= range_int (5, 10);
2858 ASSERT_TRUE (r1
.contains_p (INT (7)));
2860 r1
= range_char (0, 20);
2861 ASSERT_TRUE (r1
.contains_p (SCHAR(15)));
2862 ASSERT_FALSE (r1
.contains_p (SCHAR(300)));
2864 // NOT([10,20]) ==> [-MIN,9][21,MAX].
2865 r0
= r1
= range_int (10, 20);
2866 r2
= int_range
<1> (integer_type_node
, minint
, INT(9));
2867 r2
.union_ (int_range
<1> (integer_type_node
, INT(21), maxint
));
2868 ASSERT_FALSE (r2
.undefined_p ());
2870 ASSERT_TRUE (r1
== r2
);
2871 // Test that NOT(NOT(x)) == x.
2873 ASSERT_TRUE (r0
== r2
);
2875 // Test that booleans and their inverse work as expected.
2876 r0
.set_zero (boolean_type_node
);
2877 ASSERT_TRUE (r0
== range_false ());
2879 ASSERT_TRUE (r0
== range_true ());
2881 // Make sure NULL and non-NULL of pointer types work, and that
2882 // inverses of them are consistent.
2883 tree voidp
= build_pointer_type (void_type_node
);
2885 p0
.set_zero (voidp
);
2889 ASSERT_TRUE (p0
== p1
);
2891 // [10,20] U [15, 30] => [10, 30].
2892 r0
= range_int (10, 20);
2893 r1
= range_int (15, 30);
2895 ASSERT_TRUE (r0
== range_int (10, 30));
2897 // [15,40] U [] => [15,40].
2898 r0
= range_int (15, 40);
2899 r1
.set_undefined ();
2901 ASSERT_TRUE (r0
== range_int (15, 40));
2903 // [10,20] U [10,10] => [10,20].
2904 r0
= range_int (10, 20);
2905 r1
= range_int (10, 10);
2907 ASSERT_TRUE (r0
== range_int (10, 20));
2909 // [10,20] U [9,9] => [9,20].
2910 r0
= range_int (10, 20);
2911 r1
= range_int (9, 9);
2913 ASSERT_TRUE (r0
== range_int (9, 20));
2915 // [10,20] ^ [15,30] => [15,20].
2916 r0
= range_int (10, 20);
2917 r1
= range_int (15, 30);
2919 ASSERT_TRUE (r0
== range_int (15, 20));
2921 // Test the internal sanity of wide_int's wrt HWIs.
2922 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node
),
2923 TYPE_SIGN (boolean_type_node
))
2924 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node
)));
2927 r0
= range_int (0, 0);
2928 ASSERT_TRUE (r0
.zero_p ());
2930 // Test nonzero_p().
2931 r0
= range_int (0, 0);
2933 ASSERT_TRUE (r0
.nonzero_p ());
2936 r0
= range_int (1, 1, VR_ANTI_RANGE
);
2938 r1
= range_int (3, 3, VR_ANTI_RANGE
);
2940 // vv = [0,0][2,2][4, MAX]
2941 int_range
<3> vv
= r0
;
2944 ASSERT_TRUE (vv
.contains_p (UINT (2)));
2945 ASSERT_TRUE (vv
.num_pairs () == 3);
2947 r0
= range_int (1, 1);
2948 // And union it with [0,0][2,2][4,MAX] multi range
2950 // The result should be [0,2][4,MAX], or ~[3,3] but it must contain 2
2951 ASSERT_TRUE (r0
.contains_p (INT (2)));
2955 range_tests_nonzero_bits ()
2957 int_range
<2> r0
, r1
;
2959 // Adding nonzero bits to a varying drops the varying.
2960 r0
.set_varying (integer_type_node
);
2961 r0
.set_nonzero_bits (INT (255));
2962 ASSERT_TRUE (!r0
.varying_p ());
2963 // Dropping the nonzero bits brings us back to varying.
2964 r0
.set_nonzero_bits (INT (-1));
2965 ASSERT_TRUE (r0
.varying_p ());
2967 // Test contains_p with nonzero bits.
2968 r0
.set_zero (integer_type_node
);
2969 ASSERT_TRUE (r0
.contains_p (INT (0)));
2970 ASSERT_FALSE (r0
.contains_p (INT (1)));
2971 r0
.set_nonzero_bits (INT (0xfe));
2972 ASSERT_FALSE (r0
.contains_p (INT (0x100)));
2973 ASSERT_FALSE (r0
.contains_p (INT (0x3)));
2975 // Union of nonzero bits.
2976 r0
.set_varying (integer_type_node
);
2977 r0
.set_nonzero_bits (INT (0xf0));
2978 r1
.set_varying (integer_type_node
);
2979 r1
.set_nonzero_bits (INT (0xf));
2981 ASSERT_TRUE (r0
.get_nonzero_bits () == 0xff);
2983 // Intersect of nonzero bits.
2984 r0
= range_int (0, 255);
2985 r0
.set_nonzero_bits (INT (0xfe));
2986 r1
.set_varying (integer_type_node
);
2987 r1
.set_nonzero_bits (INT (0xf0));
2989 ASSERT_TRUE (r0
.get_nonzero_bits () == 0xf0);
2991 // Intersect where the mask of nonzero bits is implicit from the range.
2992 r0
.set_varying (integer_type_node
);
2993 r1
= range_int (0, 255);
2995 ASSERT_TRUE (r0
.get_nonzero_bits () == 0xff);
2997 // The union of a mask of 0xff..ffff00 with a mask of 0xff spans the
2998 // entire domain, and makes the range a varying.
2999 r0
.set_varying (integer_type_node
);
3000 wide_int x
= wi::shwi (0xff, TYPE_PRECISION (integer_type_node
));
3001 x
= wi::bit_not (x
);
3002 r0
.set_nonzero_bits (x
); // 0xff..ff00
3003 r1
.set_varying (integer_type_node
);
3004 r1
.set_nonzero_bits (INT (0xff));
3006 ASSERT_TRUE (r0
.varying_p ());
3008 // Test that setting a nonzero bit of 1 does not pessimize the range.
3009 r0
.set_zero (integer_type_node
);
3010 r0
.set_nonzero_bits (INT (1));
3011 ASSERT_TRUE (r0
.zero_p ());
3014 // Build an frange from string endpoints.
3016 static inline frange
3017 frange_float (const char *lb
, const char *ub
, tree type
= float_type_node
)
3019 REAL_VALUE_TYPE min
, max
;
3020 gcc_assert (real_from_string (&min
, lb
) == 0);
3021 gcc_assert (real_from_string (&max
, ub
) == 0);
3022 return frange (type
, min
, max
);
3029 REAL_VALUE_TYPE q
, r
;
3032 // Equal ranges but with differing NAN bits are not equal.
3033 if (HONOR_NANS (float_type_node
))
3035 r1
= frange_float ("10", "12");
3043 // [10, 20] NAN ^ [30, 40] NAN = NAN.
3044 r0
= frange_float ("10", "20");
3045 r1
= frange_float ("30", "40");
3047 ASSERT_TRUE (r0
.known_isnan ());
3049 // [3,5] U [5,10] NAN = ... NAN
3050 r0
= frange_float ("3", "5");
3052 r1
= frange_float ("5", "10");
3054 ASSERT_TRUE (r0
.maybe_isnan ());
3057 // [5,6] U NAN = [5,6] NAN.
3058 r0
= frange_float ("5", "6");
3060 r1
.set_nan (float_type_node
);
3062 real_from_string (&q
, "5");
3063 real_from_string (&r
, "6");
3064 ASSERT_TRUE (real_identical (&q
, &r0
.lower_bound ()));
3065 ASSERT_TRUE (real_identical (&r
, &r0
.upper_bound ()));
3066 ASSERT_TRUE (r0
.maybe_isnan ());
3069 r0
.set_nan (float_type_node
);
3070 r1
.set_nan (float_type_node
);
3072 ASSERT_TRUE (r0
.known_isnan ());
3074 // [INF, INF] NAN ^ NAN = NAN
3075 r0
.set_nan (float_type_node
);
3076 r1
= frange_float ("+Inf", "+Inf");
3077 if (!HONOR_NANS (float_type_node
))
3080 ASSERT_TRUE (r0
.known_isnan ());
3083 r0
.set_nan (float_type_node
);
3084 r1
.set_nan (float_type_node
);
3086 ASSERT_TRUE (r0
.known_isnan ());
3088 // +NAN ^ -NAN = UNDEFINED
3089 r0
.set_nan (float_type_node
, false);
3090 r1
.set_nan (float_type_node
, true);
3092 ASSERT_TRUE (r0
.undefined_p ());
3094 // VARYING ^ NAN = NAN.
3095 r0
.set_nan (float_type_node
);
3096 r1
.set_varying (float_type_node
);
3098 ASSERT_TRUE (r0
.known_isnan ());
3100 // [3,4] ^ NAN = UNDEFINED.
3101 r0
= frange_float ("3", "4");
3103 r1
.set_nan (float_type_node
);
3105 ASSERT_TRUE (r0
.undefined_p ());
3107 // [-3, 5] ^ NAN = UNDEFINED
3108 r0
= frange_float ("-3", "5");
3110 r1
.set_nan (float_type_node
);
3112 ASSERT_TRUE (r0
.undefined_p ());
3114 // Setting the NAN bit to yes does not make us a known NAN.
3115 r0
.set_varying (float_type_node
);
3117 ASSERT_FALSE (r0
.known_isnan ());
3119 // NAN is in a VARYING.
3120 r0
.set_varying (float_type_node
);
3121 real_nan (&r
, "", 1, TYPE_MODE (float_type_node
));
3122 REAL_VALUE_TYPE nan
= r
;
3123 ASSERT_TRUE (r0
.contains_p (nan
));
3125 // -NAN is in a VARYING.
3126 r0
.set_varying (float_type_node
);
3127 q
= real_value_negate (&r
);
3128 REAL_VALUE_TYPE neg_nan
= q
;
3129 ASSERT_TRUE (r0
.contains_p (neg_nan
));
3131 // Clearing the NAN on a [] NAN is the empty set.
3132 r0
.set_nan (float_type_node
);
3134 ASSERT_TRUE (r0
.undefined_p ());
3136 // [10,20] NAN ^ [21,25] NAN = [NAN]
3137 r0
= frange_float ("10", "20");
3139 r1
= frange_float ("21", "25");
3142 ASSERT_TRUE (r0
.known_isnan ());
3144 // NAN U [5,6] should be [5,6] +-NAN.
3145 r0
.set_nan (float_type_node
);
3146 r1
= frange_float ("5", "6");
3149 real_from_string (&q
, "5");
3150 real_from_string (&r
, "6");
3151 ASSERT_TRUE (real_identical (&q
, &r0
.lower_bound ()));
3152 ASSERT_TRUE (real_identical (&r
, &r0
.upper_bound ()));
3153 ASSERT_TRUE (!r0
.signbit_p (signbit
));
3154 ASSERT_TRUE (r0
.maybe_isnan ());
3156 // NAN U NAN shouldn't change anything.
3157 r0
.set_nan (float_type_node
);
3158 r1
.set_nan (float_type_node
);
3159 ASSERT_FALSE (r0
.union_ (r1
));
3161 // [3,5] NAN U NAN shouldn't change anything.
3162 r0
= frange_float ("3", "5");
3163 r1
.set_nan (float_type_node
);
3164 ASSERT_FALSE (r0
.union_ (r1
));
3166 // [3,5] U NAN *does* trigger a change.
3167 r0
= frange_float ("3", "5");
3169 r1
.set_nan (float_type_node
);
3170 ASSERT_TRUE (r0
.union_ (r1
));
3174 range_tests_signed_zeros ()
3176 REAL_VALUE_TYPE zero
= dconst0
;
3177 REAL_VALUE_TYPE neg_zero
= zero
;
3182 // [0,0] contains [0,0] but not [-0,-0] and vice versa.
3183 r0
= frange_float ("0.0", "0.0");
3184 r1
= frange_float ("-0.0", "-0.0");
3185 ASSERT_TRUE (r0
.contains_p (zero
));
3186 ASSERT_TRUE (!r0
.contains_p (neg_zero
));
3187 ASSERT_TRUE (r1
.contains_p (neg_zero
));
3188 ASSERT_TRUE (!r1
.contains_p (zero
));
3190 // Test contains_p() when we know the sign of the zero.
3191 r0
= frange_float ("0.0", "0.0");
3192 ASSERT_TRUE (r0
.contains_p (zero
));
3193 ASSERT_FALSE (r0
.contains_p (neg_zero
));
3194 r0
= frange_float ("-0.0", "-0.0");
3195 ASSERT_TRUE (r0
.contains_p (neg_zero
));
3196 ASSERT_FALSE (r0
.contains_p (zero
));
3198 r0
= frange_float ("-0.0", "0.0");
3199 ASSERT_TRUE (r0
.contains_p (neg_zero
));
3200 ASSERT_TRUE (r0
.contains_p (zero
));
3202 r0
= frange_float ("-3", "5");
3203 ASSERT_TRUE (r0
.contains_p (neg_zero
));
3204 ASSERT_TRUE (r0
.contains_p (zero
));
3206 // The intersection of zeros that differ in sign is a NAN (or
3207 // undefined if not honoring NANs).
3208 r0
= frange_float ("-0.0", "-0.0");
3209 r1
= frange_float ("0.0", "0.0");
3211 if (HONOR_NANS (float_type_node
))
3212 ASSERT_TRUE (r0
.known_isnan ());
3214 ASSERT_TRUE (r0
.undefined_p ());
3216 // The union of zeros that differ in sign is a zero with unknown sign.
3217 r0
= frange_float ("0.0", "0.0");
3218 r1
= frange_float ("-0.0", "-0.0");
3220 ASSERT_TRUE (r0
.zero_p () && !r0
.signbit_p (signbit
));
3222 // [-0, +0] has an unknown sign.
3223 r0
= frange_float ("-0.0", "0.0");
3224 ASSERT_TRUE (r0
.zero_p () && !r0
.signbit_p (signbit
));
3226 // [-0, +0] ^ [0, 0] is [0, 0]
3227 r0
= frange_float ("-0.0", "0.0");
3228 r1
= frange_float ("0.0", "0.0");
3230 ASSERT_TRUE (r0
.zero_p ());
3232 r0
= frange_float ("+0", "5");
3234 ASSERT_TRUE (r0
.signbit_p (signbit
) && !signbit
);
3236 r0
= frange_float ("-0", "5");
3238 ASSERT_TRUE (!r0
.signbit_p (signbit
));
3240 r0
= frange_float ("-0", "10");
3241 r1
= frange_float ("0", "5");
3243 ASSERT_TRUE (real_iszero (&r0
.lower_bound (), false));
3245 r0
= frange_float ("-0", "5");
3246 r1
= frange_float ("0", "5");
3248 ASSERT_TRUE (real_iszero (&r0
.lower_bound (), true));
3250 r0
= frange_float ("-5", "-0");
3252 r1
= frange_float ("0", "0");
3255 if (HONOR_NANS (float_type_node
))
3256 ASSERT_TRUE (r0
.known_isnan ());
3258 ASSERT_TRUE (r0
.undefined_p ());
3260 r0
.set_nonnegative (float_type_node
);
3261 if (HONOR_NANS (float_type_node
))
3262 ASSERT_TRUE (r0
.maybe_isnan ());
3264 // Numbers containing zero should have an unknown SIGNBIT.
3265 r0
= frange_float ("0", "10");
3267 ASSERT_TRUE (r0
.signbit_p (signbit
) && !signbit
);
3271 range_tests_signbit ()
3276 // Negative numbers should have the SIGNBIT set.
3277 r0
= frange_float ("-5", "-1");
3279 ASSERT_TRUE (r0
.signbit_p (signbit
) && signbit
);
3280 // Positive numbers should have the SIGNBIT clear.
3281 r0
= frange_float ("1", "10");
3283 ASSERT_TRUE (r0
.signbit_p (signbit
) && !signbit
);
3284 // Numbers spanning both positive and negative should have an
3286 r0
= frange_float ("-10", "10");
3288 ASSERT_TRUE (!r0
.signbit_p (signbit
));
3289 r0
.set_varying (float_type_node
);
3290 ASSERT_TRUE (!r0
.signbit_p (signbit
));
3294 range_tests_floats ()
3298 if (HONOR_NANS (float_type_node
))
3300 range_tests_signbit ();
3302 if (HONOR_SIGNED_ZEROS (float_type_node
))
3303 range_tests_signed_zeros ();
3305 // A range of [-INF,+INF] is actually VARYING if no other properties
3307 r0
= frange_float ("-Inf", "+Inf");
3308 ASSERT_TRUE (r0
.varying_p ());
3309 // ...unless it has some special property...
3310 if (HONOR_NANS (r0
.type ()))
3313 ASSERT_FALSE (r0
.varying_p ());
3316 // For most architectures, where float and double are different
3317 // sizes, having the same endpoints does not necessarily mean the
3318 // ranges are equal.
3319 if (!types_compatible_p (float_type_node
, double_type_node
))
3321 r0
= frange_float ("3.0", "3.0", float_type_node
);
3322 r1
= frange_float ("3.0", "3.0", double_type_node
);
3326 // [3,5] U [10,12] = [3,12].
3327 r0
= frange_float ("3", "5");
3328 r1
= frange_float ("10", "12");
3330 ASSERT_EQ (r0
, frange_float ("3", "12"));
3332 // [5,10] U [4,8] = [4,10]
3333 r0
= frange_float ("5", "10");
3334 r1
= frange_float ("4", "8");
3336 ASSERT_EQ (r0
, frange_float ("4", "10"));
3338 // [3,5] U [4,10] = [3,10]
3339 r0
= frange_float ("3", "5");
3340 r1
= frange_float ("4", "10");
3342 ASSERT_EQ (r0
, frange_float ("3", "10"));
3344 // [4,10] U [5,11] = [4,11]
3345 r0
= frange_float ("4", "10");
3346 r1
= frange_float ("5", "11");
3348 ASSERT_EQ (r0
, frange_float ("4", "11"));
3350 // [3,12] ^ [10,12] = [10,12].
3351 r0
= frange_float ("3", "12");
3352 r1
= frange_float ("10", "12");
3354 ASSERT_EQ (r0
, frange_float ("10", "12"));
3356 // [10,12] ^ [11,11] = [11,11]
3357 r0
= frange_float ("10", "12");
3358 r1
= frange_float ("11", "11");
3360 ASSERT_EQ (r0
, frange_float ("11", "11"));
3362 // [10,20] ^ [5,15] = [10,15]
3363 r0
= frange_float ("10", "20");
3364 r1
= frange_float ("5", "15");
3366 ASSERT_EQ (r0
, frange_float ("10", "15"));
3368 // [10,20] ^ [15,25] = [15,20]
3369 r0
= frange_float ("10", "20");
3370 r1
= frange_float ("15", "25");
3372 ASSERT_EQ (r0
, frange_float ("15", "20"));
3374 // [10,20] ^ [21,25] = []
3375 r0
= frange_float ("10", "20");
3377 r1
= frange_float ("21", "25");
3380 ASSERT_TRUE (r0
.undefined_p ());
3382 if (HONOR_INFINITIES (float_type_node
))
3384 // Make sure [-Inf, -Inf] doesn't get normalized.
3385 r0
= frange_float ("-Inf", "-Inf");
3386 ASSERT_TRUE (real_isinf (&r0
.lower_bound (), true));
3387 ASSERT_TRUE (real_isinf (&r0
.upper_bound (), true));
3390 // Test that reading back a global range yields the same result as
3391 // what we wrote into it.
3392 tree ssa
= make_temp_ssa_name (float_type_node
, NULL
, "blah");
3393 r0
.set_varying (float_type_node
);
3395 set_range_info (ssa
, r0
);
3396 get_global_range_query ()->range_of_expr (r1
, ssa
);
3400 // Run floating range tests for various combinations of NAN and INF
3404 range_tests_floats_various ()
3406 int save_finite_math_only
= flag_finite_math_only
;
3408 // Test -ffinite-math-only.
3409 flag_finite_math_only
= 1;
3410 range_tests_floats ();
3411 // Test -fno-finite-math-only.
3412 flag_finite_math_only
= 0;
3413 range_tests_floats ();
3415 flag_finite_math_only
= save_finite_math_only
;
3421 range_tests_irange3 ();
3422 range_tests_int_range_max ();
3423 range_tests_strict_enum ();
3424 range_tests_nonzero_bits ();
3425 range_tests_floats_various ();
3426 range_tests_misc ();
3429 } // namespace selftest
3431 #endif // CHECKING_P