]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/value-range.cc
c++, mingw: Fix up types of dtor hooks to __cxa_{,thread_}atexit/__cxa_throw on mingw...
[thirdparty/gcc.git] / gcc / value-range.cc
1 /* Support routines for value ranges.
2 Copyright (C) 2019-2024 Free Software Foundation, Inc.
3 Major hacks by Aldy Hernandez <aldyh@redhat.com> and
4 Andrew MacLeod <amacleod@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "ssa.h"
29 #include "tree-pretty-print.h"
30 #include "value-range-pretty-print.h"
31 #include "fold-const.h"
32 #include "gimple-range.h"
33
34 // Return the bitmask inherent in a range.
35
36 static irange_bitmask
37 get_bitmask_from_range (tree type,
38 const wide_int &min, const wide_int &max)
39 {
40 unsigned prec = TYPE_PRECISION (type);
41
42 // All the bits of a singleton are known.
43 if (min == max)
44 {
45 wide_int mask = wi::zero (prec);
46 wide_int value = min;
47 return irange_bitmask (value, mask);
48 }
49
50 wide_int xorv = min ^ max;
51
52 if (xorv != 0)
53 xorv = wi::mask (prec - wi::clz (xorv), false, prec);
54
55 return irange_bitmask (wi::zero (prec), min | xorv);
56 }
57
58 void
59 irange::accept (const vrange_visitor &v) const
60 {
61 v.visit (*this);
62 }
63
64 void
65 Value_Range::dump (FILE *out) const
66 {
67 if (m_vrange)
68 m_vrange->dump (out);
69 else
70 fprintf (out, "NULL");
71 }
72
73 DEBUG_FUNCTION void
74 debug (const Value_Range &r)
75 {
76 r.dump (stderr);
77 fprintf (stderr, "\n");
78 }
79
80 DEBUG_FUNCTION void
81 debug (const irange_bitmask &bm)
82 {
83 bm.dump (stderr);
84 fprintf (stderr, "\n");
85 }
86
87 // Definitions for unsupported_range.
88
89 void
90 unsupported_range::accept (const vrange_visitor &v) const
91 {
92 v.visit (*this);
93 }
94
95 void
96 vrange::update_bitmask (const class irange_bitmask &)
97 {
98 }
99
100 irange_bitmask
101 vrange::get_bitmask () const
102 {
103 // Return all unknown bits for the given precision.
104 return irange_bitmask (TYPE_PRECISION (type ()));
105 }
106
107 bool
108 unsupported_range::contains_p (tree) const
109 {
110 return varying_p ();
111 }
112
113 bool
114 unsupported_range::singleton_p (tree *) const
115 {
116 return false;
117 }
118
119 void
120 unsupported_range::set (tree min, tree, value_range_kind)
121 {
122 set_varying (TREE_TYPE (min));
123 }
124
125 tree
126 unsupported_range::type () const
127 {
128 return void_type_node;
129 }
130
131 bool
132 unsupported_range::supports_type_p (const_tree) const
133 {
134 return false;
135 }
136
137 void
138 unsupported_range::set_undefined ()
139 {
140 m_kind = VR_UNDEFINED;
141 }
142
143 void
144 unsupported_range::set_varying (tree)
145 {
146 m_kind = VR_VARYING;
147 }
148
149 bool
150 unsupported_range::union_ (const vrange &v)
151 {
152 const unsupported_range &r = as_a <unsupported_range> (v);
153
154 if (r.undefined_p () || varying_p ())
155 return false;
156 if (undefined_p () || r.varying_p ())
157 {
158 operator= (r);
159 return true;
160 }
161 gcc_unreachable ();
162 return false;
163 }
164
165 bool
166 unsupported_range::intersect (const vrange &v)
167 {
168 const unsupported_range &r = as_a <unsupported_range> (v);
169
170 if (undefined_p () || r.varying_p ())
171 return false;
172 if (r.undefined_p ())
173 {
174 set_undefined ();
175 return true;
176 }
177 if (varying_p ())
178 {
179 operator= (r);
180 return true;
181 }
182 gcc_unreachable ();
183 return false;
184 }
185
186 bool
187 unsupported_range::zero_p () const
188 {
189 return false;
190 }
191
192 bool
193 unsupported_range::nonzero_p () const
194 {
195 return false;
196 }
197
198 void
199 unsupported_range::set_nonzero (tree type)
200 {
201 set_varying (type);
202 }
203
204 void
205 unsupported_range::set_zero (tree type)
206 {
207 set_varying (type);
208 }
209
210 void
211 unsupported_range::set_nonnegative (tree type)
212 {
213 set_varying (type);
214 }
215
216 bool
217 unsupported_range::fits_p (const vrange &) const
218 {
219 return true;
220 }
221
222 unsupported_range &
223 unsupported_range::operator= (const unsupported_range &r)
224 {
225 if (r.undefined_p ())
226 set_undefined ();
227 else if (r.varying_p ())
228 set_varying (void_type_node);
229 else
230 gcc_unreachable ();
231 return *this;
232 }
233
234 tree
235 unsupported_range::lbound () const
236 {
237 return NULL;
238 }
239
240 tree
241 unsupported_range::ubound () const
242 {
243 return NULL;
244 }
245
246 // Assignment operator for generic ranges. Copying incompatible types
247 // is not allowed.
248
249 vrange &
250 vrange::operator= (const vrange &src)
251 {
252 if (is_a <irange> (src))
253 as_a <irange> (*this) = as_a <irange> (src);
254 else if (is_a <prange> (src))
255 as_a <prange> (*this) = as_a <prange> (src);
256 else if (is_a <frange> (src))
257 as_a <frange> (*this) = as_a <frange> (src);
258 else
259 {
260 gcc_checking_assert (is_a <unsupported_range> (src));
261 m_kind = src.m_kind;
262 }
263 return *this;
264 }
265
266 // Equality operator for generic ranges.
267
268 bool
269 vrange::operator== (const vrange &src) const
270 {
271 if (is_a <irange> (src))
272 return as_a <irange> (*this) == as_a <irange> (src);
273 if (is_a <prange> (src))
274 return as_a <prange> (*this) == as_a <prange> (src);
275 if (is_a <frange> (src))
276 return as_a <frange> (*this) == as_a <frange> (src);
277 gcc_unreachable ();
278 }
279
280 // Wrapper for vrange_printer to dump a range to a file.
281
282 void
283 vrange::dump (FILE *file) const
284 {
285 pretty_printer buffer;
286 pp_needs_newline (&buffer) = true;
287 buffer.buffer->stream = file;
288 vrange_printer vrange_pp (&buffer);
289 this->accept (vrange_pp);
290 pp_flush (&buffer);
291 }
292
293 void
294 irange_bitmask::dump (FILE *file) const
295 {
296 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p;
297 pretty_printer buffer;
298
299 pp_needs_newline (&buffer) = true;
300 buffer.buffer->stream = file;
301 pp_string (&buffer, "MASK ");
302 unsigned len_mask, len_val;
303 if (print_hex_buf_size (m_mask, &len_mask)
304 | print_hex_buf_size (m_value, &len_val))
305 p = XALLOCAVEC (char, MAX (len_mask, len_val));
306 else
307 p = buf;
308 print_hex (m_mask, p);
309 pp_string (&buffer, p);
310 pp_string (&buffer, " VALUE ");
311 print_hex (m_value, p);
312 pp_string (&buffer, p);
313 pp_flush (&buffer);
314 }
315
316 namespace inchash
317 {
318
319 void
320 add_vrange (const vrange &v, inchash::hash &hstate,
321 unsigned int)
322 {
323 if (v.undefined_p ())
324 {
325 hstate.add_int (VR_UNDEFINED);
326 return;
327 }
328 // Types are ignored throughout to inhibit two ranges being equal
329 // but having different hash values. This can happen when two
330 // ranges are equal and their types are different (but
331 // types_compatible_p is true).
332 if (is_a <irange> (v))
333 {
334 const irange &r = as_a <irange> (v);
335 if (r.varying_p ())
336 hstate.add_int (VR_VARYING);
337 else
338 hstate.add_int (VR_RANGE);
339 for (unsigned i = 0; i < r.num_pairs (); ++i)
340 {
341 hstate.add_wide_int (r.lower_bound (i));
342 hstate.add_wide_int (r.upper_bound (i));
343 }
344 irange_bitmask bm = r.get_bitmask ();
345 hstate.add_wide_int (bm.value ());
346 hstate.add_wide_int (bm.mask ());
347 return;
348 }
349 if (is_a <prange> (v))
350 {
351 const prange &r = as_a <prange> (v);
352 if (r.varying_p ())
353 hstate.add_int (VR_VARYING);
354 else
355 {
356 hstate.add_int (VR_RANGE);
357 hstate.add_wide_int (r.lower_bound ());
358 hstate.add_wide_int (r.upper_bound ());
359 irange_bitmask bm = r.get_bitmask ();
360 hstate.add_wide_int (bm.value ());
361 hstate.add_wide_int (bm.mask ());
362 }
363 return;
364 }
365 if (is_a <frange> (v))
366 {
367 const frange &r = as_a <frange> (v);
368 if (r.known_isnan ())
369 hstate.add_int (VR_NAN);
370 else
371 {
372 hstate.add_int (r.varying_p () ? VR_VARYING : VR_RANGE);
373 hstate.add_real_value (r.lower_bound ());
374 hstate.add_real_value (r.upper_bound ());
375 }
376 nan_state nan = r.get_nan_state ();
377 hstate.add_int (nan.pos_p ());
378 hstate.add_int (nan.neg_p ());
379 return;
380 }
381 gcc_unreachable ();
382 }
383
384 } //namespace inchash
385
386 bool
387 irange::nonnegative_p () const
388 {
389 return wi::ge_p (lower_bound (), 0, TYPE_SIGN (type ()));
390 }
391
392 bool
393 irange::nonpositive_p () const
394 {
395 return wi::le_p (upper_bound (), 0, TYPE_SIGN (type ()));
396 }
397
398 bool
399 irange::supports_type_p (const_tree type) const
400 {
401 return supports_p (type);
402 }
403
404 // Return TRUE if R fits in THIS.
405
406 bool
407 irange::fits_p (const vrange &r) const
408 {
409 return m_max_ranges >= as_a <irange> (r).num_pairs ();
410 }
411
412 void
413 irange::set_nonnegative (tree type)
414 {
415 set (type,
416 wi::zero (TYPE_PRECISION (type)),
417 wi::to_wide (TYPE_MAX_VALUE (type)));
418 }
419
420 // Prange implementation.
421
422 void
423 prange::accept (const vrange_visitor &v) const
424 {
425 v.visit (*this);
426 }
427
428 void
429 prange::set_nonnegative (tree type)
430 {
431 set (type,
432 wi::zero (TYPE_PRECISION (type)),
433 wi::max_value (TYPE_PRECISION (type), UNSIGNED));
434 }
435
436 void
437 prange::set (tree min, tree max, value_range_kind kind)
438 {
439 return set (TREE_TYPE (min), wi::to_wide (min), wi::to_wide (max), kind);
440 }
441
442 void
443 prange::set (tree type, const wide_int &min, const wide_int &max,
444 value_range_kind kind)
445 {
446 if (kind == VR_UNDEFINED)
447 {
448 set_undefined ();
449 return;
450 }
451 if (kind == VR_VARYING)
452 {
453 set_varying (type);
454 return;
455 }
456 if (kind == VR_ANTI_RANGE)
457 {
458 gcc_checking_assert (min == 0 && max == 0);
459 set_nonzero (type);
460 return;
461 }
462 m_type = type;
463 m_min = min;
464 m_max = max;
465 if (m_min == 0 && m_max == -1)
466 {
467 m_kind = VR_VARYING;
468 m_bitmask.set_unknown (TYPE_PRECISION (type));
469 if (flag_checking)
470 verify_range ();
471 return;
472 }
473
474 m_kind = VR_RANGE;
475 m_bitmask = get_bitmask_from_range (type, min, max);
476 if (flag_checking)
477 verify_range ();
478 }
479
480 bool
481 prange::contains_p (const wide_int &w) const
482 {
483 if (undefined_p ())
484 return false;
485
486 if (varying_p ())
487 return true;
488
489 return (wi::le_p (lower_bound (), w, UNSIGNED)
490 && wi::ge_p (upper_bound (), w, UNSIGNED));
491 }
492
493 bool
494 prange::singleton_p (tree *result) const
495 {
496 if (m_kind == VR_RANGE && lower_bound () == upper_bound ())
497 {
498 if (result)
499 *result = wide_int_to_tree (type (), m_min);
500 return true;
501 }
502 return false;
503 }
504
505 tree
506 prange::lbound () const
507 {
508 return wide_int_to_tree (type (), m_min);
509 }
510
511 tree
512 prange::ubound () const
513 {
514 return wide_int_to_tree (type (), m_max);
515 }
516
517 bool
518 prange::union_ (const vrange &v)
519 {
520 const prange &r = as_a <prange> (v);
521
522 if (r.undefined_p ())
523 return false;
524 if (undefined_p ())
525 {
526 *this = r;
527 if (flag_checking)
528 verify_range ();
529 return true;
530 }
531 if (varying_p ())
532 return false;
533 if (r.varying_p ())
534 {
535 set_varying (type ());
536 return true;
537 }
538
539 wide_int new_lb = wi::min (r.lower_bound (), lower_bound (), UNSIGNED);
540 wide_int new_ub = wi::max (r.upper_bound (), upper_bound (), UNSIGNED);
541 prange new_range (type (), new_lb, new_ub);
542 new_range.m_bitmask.union_ (m_bitmask);
543 new_range.m_bitmask.union_ (r.m_bitmask);
544 if (new_range.varying_compatible_p ())
545 {
546 set_varying (type ());
547 return true;
548 }
549 if (flag_checking)
550 new_range.verify_range ();
551 if (new_range == *this)
552 return false;
553 *this = new_range;
554 return true;
555 }
556
557 bool
558 prange::intersect (const vrange &v)
559 {
560 const prange &r = as_a <prange> (v);
561 gcc_checking_assert (undefined_p () || r.undefined_p ()
562 || range_compatible_p (type (), r.type ()));
563
564 if (undefined_p ())
565 return false;
566 if (r.undefined_p ())
567 {
568 set_undefined ();
569 return true;
570 }
571 if (r.varying_p ())
572 return false;
573 if (varying_p ())
574 {
575 *this = r;
576 return true;
577 }
578
579 prange save = *this;
580 m_min = wi::max (r.lower_bound (), lower_bound (), UNSIGNED);
581 m_max = wi::min (r.upper_bound (), upper_bound (), UNSIGNED);
582 if (wi::gt_p (m_min, m_max, UNSIGNED))
583 {
584 set_undefined ();
585 return true;
586 }
587
588 // Intersect all bitmasks: the old one, the new one, and the other operand's.
589 irange_bitmask new_bitmask = get_bitmask_from_range (m_type, m_min, m_max);
590 m_bitmask.intersect (new_bitmask);
591 m_bitmask.intersect (r.m_bitmask);
592
593 if (flag_checking)
594 verify_range ();
595 if (*this == save)
596 return false;
597 return true;
598 }
599
600 prange &
601 prange::operator= (const prange &src)
602 {
603 m_type = src.m_type;
604 m_kind = src.m_kind;
605 m_min = src.m_min;
606 m_max = src.m_max;
607 m_bitmask = src.m_bitmask;
608 if (flag_checking)
609 verify_range ();
610 return *this;
611 }
612
613 bool
614 prange::operator== (const prange &src) const
615 {
616 if (m_kind == src.m_kind)
617 {
618 if (undefined_p ())
619 return true;
620
621 if (varying_p ())
622 return types_compatible_p (type (), src.type ());
623
624 return (m_min == src.m_min && m_max == src.m_max
625 && m_bitmask == src.m_bitmask);
626 }
627 return false;
628 }
629
630 void
631 prange::invert ()
632 {
633 gcc_checking_assert (!undefined_p () && !varying_p ());
634
635 wide_int new_lb, new_ub;
636 unsigned prec = TYPE_PRECISION (type ());
637 wide_int type_min = wi::zero (prec);
638 wide_int type_max = wi::max_value (prec, UNSIGNED);
639 wi::overflow_type ovf;
640
641 if (lower_bound () == type_min)
642 {
643 new_lb = wi::add (upper_bound (), 1, UNSIGNED, &ovf);
644 if (ovf)
645 new_lb = type_min;
646 new_ub = type_max;
647 set (type (), new_lb, new_ub);
648 }
649 else if (upper_bound () == type_max)
650 {
651 wi::overflow_type ovf;
652 new_lb = type_min;
653 new_ub = wi::sub (lower_bound (), 1, UNSIGNED, &ovf);
654 if (ovf)
655 new_ub = type_max;
656 set (type (), new_lb, new_ub);
657 }
658 else
659 set_varying (type ());
660 }
661
662 void
663 prange::verify_range () const
664 {
665 gcc_checking_assert (m_discriminator == VR_PRANGE);
666
667 if (m_kind == VR_UNDEFINED)
668 return;
669
670 gcc_checking_assert (supports_p (type ()));
671
672 if (m_kind == VR_VARYING)
673 {
674 gcc_checking_assert (varying_compatible_p ());
675 return;
676 }
677 gcc_checking_assert (!varying_compatible_p ());
678 gcc_checking_assert (m_kind == VR_RANGE);
679 }
680
681 void
682 prange::update_bitmask (const irange_bitmask &bm)
683 {
684 gcc_checking_assert (!undefined_p ());
685
686 // If all the bits are known, this is a singleton.
687 if (bm.mask () == 0)
688 {
689 set (type (), m_bitmask.value (), m_bitmask.value ());
690 return;
691 }
692
693 // Drop VARYINGs with known bits to a plain range.
694 if (m_kind == VR_VARYING && !bm.unknown_p ())
695 m_kind = VR_RANGE;
696
697 m_bitmask = bm;
698 if (varying_compatible_p ())
699 m_kind = VR_VARYING;
700
701 if (flag_checking)
702 verify_range ();
703 }
704
705
706 // Frange implementation.
707
708 void
709 frange::accept (const vrange_visitor &v) const
710 {
711 v.visit (*this);
712 }
713
714 bool
715 frange::fits_p (const vrange &) const
716 {
717 return true;
718 }
719
720 // Flush denormal endpoints to the appropriate 0.0.
721
722 void
723 frange::flush_denormals_to_zero ()
724 {
725 if (undefined_p () || known_isnan ())
726 return;
727
728 machine_mode mode = TYPE_MODE (type ());
729 // Flush [x, -DENORMAL] to [x, -0.0].
730 if (real_isdenormal (&m_max, mode) && real_isneg (&m_max))
731 {
732 if (HONOR_SIGNED_ZEROS (m_type))
733 m_max = dconstm0;
734 else
735 m_max = dconst0;
736 }
737 // Flush [+DENORMAL, x] to [+0.0, x].
738 if (real_isdenormal (&m_min, mode) && !real_isneg (&m_min))
739 m_min = dconst0;
740 }
741
742 // Setter for franges.
743
744 void
745 frange::set (tree type,
746 const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
747 const nan_state &nan, value_range_kind kind)
748 {
749 switch (kind)
750 {
751 case VR_UNDEFINED:
752 set_undefined ();
753 return;
754 case VR_VARYING:
755 case VR_ANTI_RANGE:
756 set_varying (type);
757 return;
758 case VR_RANGE:
759 break;
760 default:
761 gcc_unreachable ();
762 }
763
764 gcc_checking_assert (!real_isnan (&min) && !real_isnan (&max));
765
766 m_kind = kind;
767 m_type = type;
768 m_min = min;
769 m_max = max;
770 if (HONOR_NANS (m_type))
771 {
772 m_pos_nan = nan.pos_p ();
773 m_neg_nan = nan.neg_p ();
774 }
775 else
776 {
777 m_pos_nan = false;
778 m_neg_nan = false;
779 }
780
781 if (!MODE_HAS_SIGNED_ZEROS (TYPE_MODE (m_type)))
782 {
783 if (real_iszero (&m_min, 1))
784 m_min.sign = 0;
785 if (real_iszero (&m_max, 1))
786 m_max.sign = 0;
787 }
788 else if (!HONOR_SIGNED_ZEROS (m_type))
789 {
790 if (real_iszero (&m_max, 1))
791 m_max.sign = 0;
792 if (real_iszero (&m_min, 0))
793 m_min.sign = 1;
794 }
795
796 // For -ffinite-math-only we can drop ranges outside the
797 // representable numbers to min/max for the type.
798 if (!HONOR_INFINITIES (m_type))
799 {
800 REAL_VALUE_TYPE min_repr = frange_val_min (m_type);
801 REAL_VALUE_TYPE max_repr = frange_val_max (m_type);
802 if (real_less (&m_min, &min_repr))
803 m_min = min_repr;
804 else if (real_less (&max_repr, &m_min))
805 m_min = max_repr;
806 if (real_less (&max_repr, &m_max))
807 m_max = max_repr;
808 else if (real_less (&m_max, &min_repr))
809 m_max = min_repr;
810 }
811
812 // Check for swapped ranges.
813 gcc_checking_assert (real_compare (LE_EXPR, &min, &max));
814
815 normalize_kind ();
816 }
817
818 // Setter for an frange defaulting the NAN possibility to +-NAN when
819 // HONOR_NANS.
820
821 void
822 frange::set (tree type,
823 const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
824 value_range_kind kind)
825 {
826 set (type, min, max, nan_state (true), kind);
827 }
828
829 void
830 frange::set (tree min, tree max, value_range_kind kind)
831 {
832 set (TREE_TYPE (min),
833 *TREE_REAL_CST_PTR (min), *TREE_REAL_CST_PTR (max), kind);
834 }
835
836 // Normalize range to VARYING or UNDEFINED, or vice versa. Return
837 // TRUE if anything changed.
838 //
839 // A range with no known properties can be dropped to VARYING.
840 // Similarly, a VARYING with any properties should be dropped to a
841 // VR_RANGE. Normalizing ranges upon changing them ensures there is
842 // only one representation for a given range.
843
844 bool
845 frange::normalize_kind ()
846 {
847 if (m_kind == VR_RANGE
848 && frange_val_is_min (m_min, m_type)
849 && frange_val_is_max (m_max, m_type))
850 {
851 if (!HONOR_NANS (m_type) || (m_pos_nan && m_neg_nan))
852 {
853 set_varying (m_type);
854 return true;
855 }
856 }
857 else if (m_kind == VR_VARYING)
858 {
859 if (HONOR_NANS (m_type) && (!m_pos_nan || !m_neg_nan))
860 {
861 m_kind = VR_RANGE;
862 m_min = frange_val_min (m_type);
863 m_max = frange_val_max (m_type);
864 if (flag_checking)
865 verify_range ();
866 return true;
867 }
868 }
869 else if (m_kind == VR_NAN && !m_pos_nan && !m_neg_nan)
870 set_undefined ();
871 return false;
872 }
873
874 // Union or intersect the zero endpoints of two ranges. For example:
875 // [-0, x] U [+0, x] => [-0, x]
876 // [ x, -0] U [ x, +0] => [ x, +0]
877 // [-0, x] ^ [+0, x] => [+0, x]
878 // [ x, -0] ^ [ x, +0] => [ x, -0]
879 //
880 // UNION_P is true when performing a union, or false when intersecting.
881
882 bool
883 frange::combine_zeros (const frange &r, bool union_p)
884 {
885 gcc_checking_assert (!undefined_p () && !known_isnan ());
886
887 bool changed = false;
888 if (real_iszero (&m_min) && real_iszero (&r.m_min)
889 && real_isneg (&m_min) != real_isneg (&r.m_min))
890 {
891 m_min.sign = union_p;
892 changed = true;
893 }
894 if (real_iszero (&m_max) && real_iszero (&r.m_max)
895 && real_isneg (&m_max) != real_isneg (&r.m_max))
896 {
897 m_max.sign = !union_p;
898 changed = true;
899 }
900 // If the signs are swapped, the resulting range is empty.
901 if (m_min.sign == 0 && m_max.sign == 1)
902 {
903 if (maybe_isnan ())
904 m_kind = VR_NAN;
905 else
906 set_undefined ();
907 changed = true;
908 }
909 return changed;
910 }
911
912 // Union two ranges when one is known to be a NAN.
913
914 bool
915 frange::union_nans (const frange &r)
916 {
917 gcc_checking_assert (known_isnan () || r.known_isnan ());
918
919 bool changed = false;
920 if (known_isnan () && m_kind != r.m_kind)
921 {
922 m_kind = r.m_kind;
923 m_min = r.m_min;
924 m_max = r.m_max;
925 changed = true;
926 }
927 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
928 {
929 m_pos_nan |= r.m_pos_nan;
930 m_neg_nan |= r.m_neg_nan;
931 changed = true;
932 }
933 if (changed)
934 {
935 normalize_kind ();
936 return true;
937 }
938 return false;
939 }
940
941 bool
942 frange::union_ (const vrange &v)
943 {
944 const frange &r = as_a <frange> (v);
945
946 if (r.undefined_p () || varying_p ())
947 return false;
948 if (undefined_p () || r.varying_p ())
949 {
950 *this = r;
951 return true;
952 }
953
954 // Combine NAN info.
955 if (known_isnan () || r.known_isnan ())
956 return union_nans (r);
957 bool changed = false;
958 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
959 {
960 m_pos_nan |= r.m_pos_nan;
961 m_neg_nan |= r.m_neg_nan;
962 changed = true;
963 }
964
965 // Combine endpoints.
966 if (real_less (&r.m_min, &m_min))
967 {
968 m_min = r.m_min;
969 changed = true;
970 }
971 if (real_less (&m_max, &r.m_max))
972 {
973 m_max = r.m_max;
974 changed = true;
975 }
976
977 if (HONOR_SIGNED_ZEROS (m_type))
978 changed |= combine_zeros (r, true);
979
980 changed |= normalize_kind ();
981 return changed;
982 }
983
984 // Intersect two ranges when one is known to be a NAN.
985
986 bool
987 frange::intersect_nans (const frange &r)
988 {
989 gcc_checking_assert (known_isnan () || r.known_isnan ());
990
991 m_pos_nan &= r.m_pos_nan;
992 m_neg_nan &= r.m_neg_nan;
993 if (maybe_isnan ())
994 m_kind = VR_NAN;
995 else
996 set_undefined ();
997 if (flag_checking)
998 verify_range ();
999 return true;
1000 }
1001
1002 bool
1003 frange::intersect (const vrange &v)
1004 {
1005 const frange &r = as_a <frange> (v);
1006
1007 if (undefined_p () || r.varying_p ())
1008 return false;
1009 if (r.undefined_p ())
1010 {
1011 set_undefined ();
1012 return true;
1013 }
1014 if (varying_p ())
1015 {
1016 *this = r;
1017 return true;
1018 }
1019
1020 // Combine NAN info.
1021 if (known_isnan () || r.known_isnan ())
1022 return intersect_nans (r);
1023 bool changed = false;
1024 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
1025 {
1026 m_pos_nan &= r.m_pos_nan;
1027 m_neg_nan &= r.m_neg_nan;
1028 changed = true;
1029 }
1030
1031 // Combine endpoints.
1032 if (real_less (&m_min, &r.m_min))
1033 {
1034 m_min = r.m_min;
1035 changed = true;
1036 }
1037 if (real_less (&r.m_max, &m_max))
1038 {
1039 m_max = r.m_max;
1040 changed = true;
1041 }
1042 // If the endpoints are swapped, the resulting range is empty.
1043 if (real_less (&m_max, &m_min))
1044 {
1045 if (maybe_isnan ())
1046 m_kind = VR_NAN;
1047 else
1048 set_undefined ();
1049 if (flag_checking)
1050 verify_range ();
1051 return true;
1052 }
1053
1054 if (HONOR_SIGNED_ZEROS (m_type))
1055 changed |= combine_zeros (r, false);
1056
1057 changed |= normalize_kind ();
1058 return changed;
1059 }
1060
1061 frange &
1062 frange::operator= (const frange &src)
1063 {
1064 m_kind = src.m_kind;
1065 m_type = src.m_type;
1066 m_min = src.m_min;
1067 m_max = src.m_max;
1068 m_pos_nan = src.m_pos_nan;
1069 m_neg_nan = src.m_neg_nan;
1070
1071 if (flag_checking)
1072 verify_range ();
1073 return *this;
1074 }
1075
1076 bool
1077 frange::operator== (const frange &src) const
1078 {
1079 if (m_kind == src.m_kind)
1080 {
1081 if (undefined_p ())
1082 return true;
1083
1084 if (varying_p ())
1085 return types_compatible_p (m_type, src.m_type);
1086
1087 bool nan1 = known_isnan ();
1088 bool nan2 = src.known_isnan ();
1089 if (nan1 || nan2)
1090 {
1091 if (nan1 && nan2)
1092 return (m_pos_nan == src.m_pos_nan
1093 && m_neg_nan == src.m_neg_nan);
1094 return false;
1095 }
1096
1097 return (real_identical (&m_min, &src.m_min)
1098 && real_identical (&m_max, &src.m_max)
1099 && m_pos_nan == src.m_pos_nan
1100 && m_neg_nan == src.m_neg_nan
1101 && types_compatible_p (m_type, src.m_type));
1102 }
1103 return false;
1104 }
1105
1106 // Return TRUE if range contains R.
1107
1108 bool
1109 frange::contains_p (const REAL_VALUE_TYPE &r) const
1110 {
1111 gcc_checking_assert (m_kind != VR_ANTI_RANGE);
1112
1113 if (undefined_p ())
1114 return false;
1115
1116 if (varying_p ())
1117 return true;
1118
1119 if (real_isnan (&r))
1120 {
1121 // No NAN in range.
1122 if (!m_pos_nan && !m_neg_nan)
1123 return false;
1124 // Both +NAN and -NAN are present.
1125 if (m_pos_nan && m_neg_nan)
1126 return true;
1127 return m_neg_nan == r.sign;
1128 }
1129 if (known_isnan ())
1130 return false;
1131
1132 if (real_compare (GE_EXPR, &r, &m_min) && real_compare (LE_EXPR, &r, &m_max))
1133 {
1134 // Make sure the signs are equal for signed zeros.
1135 if (HONOR_SIGNED_ZEROS (m_type) && real_iszero (&r))
1136 return r.sign == m_min.sign || r.sign == m_max.sign;
1137 return true;
1138 }
1139 return false;
1140 }
1141
1142 // If range is a singleton, place it in RESULT and return TRUE. If
1143 // RESULT is NULL, just return TRUE.
1144 //
1145 // A NAN can never be a singleton.
1146
1147 bool
1148 frange::internal_singleton_p (REAL_VALUE_TYPE *result) const
1149 {
1150 if (m_kind == VR_RANGE && real_identical (&m_min, &m_max))
1151 {
1152 // Return false for any singleton that may be a NAN.
1153 if (HONOR_NANS (m_type) && maybe_isnan ())
1154 return false;
1155
1156 if (MODE_COMPOSITE_P (TYPE_MODE (m_type)))
1157 {
1158 // For IBM long doubles, if the value is +-Inf or is exactly
1159 // representable in double, the other double could be +0.0
1160 // or -0.0. Since this means there is more than one way to
1161 // represent a value, return false to avoid propagating it.
1162 // See libgcc/config/rs6000/ibm-ldouble-format for details.
1163 if (real_isinf (&m_min))
1164 return false;
1165 REAL_VALUE_TYPE r;
1166 real_convert (&r, DFmode, &m_min);
1167 if (real_identical (&r, &m_min))
1168 return false;
1169 }
1170
1171 if (result)
1172 *result = m_min;
1173 return true;
1174 }
1175 return false;
1176 }
1177
1178 bool
1179 frange::singleton_p (tree *result) const
1180 {
1181 if (internal_singleton_p ())
1182 {
1183 if (result)
1184 *result = build_real (m_type, m_min);
1185 return true;
1186 }
1187 return false;
1188 }
1189
1190 bool
1191 frange::singleton_p (REAL_VALUE_TYPE &r) const
1192 {
1193 return internal_singleton_p (&r);
1194 }
1195
1196 bool
1197 frange::supports_type_p (const_tree type) const
1198 {
1199 return supports_p (type);
1200 }
1201
1202 void
1203 frange::verify_range ()
1204 {
1205 if (!undefined_p ())
1206 gcc_checking_assert (HONOR_NANS (m_type) || !maybe_isnan ());
1207 switch (m_kind)
1208 {
1209 case VR_UNDEFINED:
1210 gcc_checking_assert (!m_type);
1211 return;
1212 case VR_VARYING:
1213 gcc_checking_assert (m_type);
1214 gcc_checking_assert (frange_val_is_min (m_min, m_type));
1215 gcc_checking_assert (frange_val_is_max (m_max, m_type));
1216 if (HONOR_NANS (m_type))
1217 gcc_checking_assert (m_pos_nan && m_neg_nan);
1218 else
1219 gcc_checking_assert (!m_pos_nan && !m_neg_nan);
1220 return;
1221 case VR_RANGE:
1222 gcc_checking_assert (m_type);
1223 break;
1224 case VR_NAN:
1225 gcc_checking_assert (m_type);
1226 gcc_checking_assert (m_pos_nan || m_neg_nan);
1227 return;
1228 default:
1229 gcc_unreachable ();
1230 }
1231
1232 // NANs cannot appear in the endpoints of a range.
1233 gcc_checking_assert (!real_isnan (&m_min) && !real_isnan (&m_max));
1234
1235 // Make sure we don't have swapped ranges.
1236 gcc_checking_assert (!real_less (&m_max, &m_min));
1237
1238 // [ +0.0, -0.0 ] is nonsensical.
1239 gcc_checking_assert (!(real_iszero (&m_min, 0) && real_iszero (&m_max, 1)));
1240
1241 // If all the properties are clear, we better not span the entire
1242 // domain, because that would make us varying.
1243 if (m_pos_nan && m_neg_nan)
1244 gcc_checking_assert (!frange_val_is_min (m_min, m_type)
1245 || !frange_val_is_max (m_max, m_type));
1246 }
1247
1248 // We can't do much with nonzeros yet.
1249 void
1250 frange::set_nonzero (tree type)
1251 {
1252 set_varying (type);
1253 }
1254
1255 // We can't do much with nonzeros yet.
1256 bool
1257 frange::nonzero_p () const
1258 {
1259 return false;
1260 }
1261
1262 // Set range to [+0.0, +0.0] if honoring signed zeros, or [0.0, 0.0]
1263 // otherwise.
1264
1265 void
1266 frange::set_zero (tree type)
1267 {
1268 if (HONOR_SIGNED_ZEROS (type))
1269 {
1270 set (type, dconstm0, dconst0);
1271 clear_nan ();
1272 }
1273 else
1274 set (type, dconst0, dconst0);
1275 }
1276
1277 // Return TRUE for any zero regardless of sign.
1278
1279 bool
1280 frange::zero_p () const
1281 {
1282 return (m_kind == VR_RANGE
1283 && real_iszero (&m_min)
1284 && real_iszero (&m_max));
1285 }
1286
1287 // Set the range to non-negative numbers, that is [+0.0, +INF].
1288 //
1289 // The NAN in the resulting range (if HONOR_NANS) has a varying sign
1290 // as there are no guarantees in IEEE 754 wrt to the sign of a NAN,
1291 // except for copy, abs, and copysign. It is the responsibility of
1292 // the caller to set the NAN's sign if desired.
1293
1294 void
1295 frange::set_nonnegative (tree type)
1296 {
1297 set (type, dconst0, frange_val_max (type));
1298 }
1299
1300 tree
1301 frange::lbound () const
1302 {
1303 return build_real (type (), lower_bound ());
1304 }
1305
1306 tree
1307 frange::ubound () const
1308 {
1309 return build_real (type (), upper_bound ());
1310 }
1311
1312 // Here we copy between any two irange's.
1313
1314 irange &
1315 irange::operator= (const irange &src)
1316 {
1317 int needed = src.num_pairs ();
1318 maybe_resize (needed);
1319
1320 unsigned x;
1321 unsigned lim = src.m_num_ranges;
1322 if (lim > m_max_ranges)
1323 lim = m_max_ranges;
1324
1325 for (x = 0; x < lim * 2; ++x)
1326 m_base[x] = src.m_base[x];
1327
1328 // If the range didn't fit, the last range should cover the rest.
1329 if (lim != src.m_num_ranges)
1330 m_base[x - 1] = src.m_base[src.m_num_ranges * 2 - 1];
1331
1332 m_num_ranges = lim;
1333 m_type = src.m_type;
1334 m_kind = src.m_kind;
1335 m_bitmask = src.m_bitmask;
1336 if (m_max_ranges == 1)
1337 normalize_kind ();
1338 if (flag_checking)
1339 verify_range ();
1340 return *this;
1341 }
1342
1343 static value_range_kind
1344 get_legacy_range (const irange &r, tree &min, tree &max)
1345 {
1346 if (r.undefined_p ())
1347 {
1348 min = NULL_TREE;
1349 max = NULL_TREE;
1350 return VR_UNDEFINED;
1351 }
1352
1353 tree type = r.type ();
1354 if (r.varying_p ())
1355 {
1356 min = wide_int_to_tree (type, r.lower_bound ());
1357 max = wide_int_to_tree (type, r.upper_bound ());
1358 return VR_VARYING;
1359 }
1360
1361 unsigned int precision = TYPE_PRECISION (type);
1362 signop sign = TYPE_SIGN (type);
1363 if (r.num_pairs () > 1
1364 && precision > 1
1365 && r.lower_bound () == wi::min_value (precision, sign)
1366 && r.upper_bound () == wi::max_value (precision, sign))
1367 {
1368 int_range<3> inv (r);
1369 inv.invert ();
1370 min = wide_int_to_tree (type, inv.lower_bound (0));
1371 max = wide_int_to_tree (type, inv.upper_bound (0));
1372 return VR_ANTI_RANGE;
1373 }
1374
1375 min = wide_int_to_tree (type, r.lower_bound ());
1376 max = wide_int_to_tree (type, r.upper_bound ());
1377 return VR_RANGE;
1378 }
1379
1380 static value_range_kind
1381 get_legacy_range (const prange &r, tree &min, tree &max)
1382 {
1383 if (r.undefined_p ())
1384 {
1385 min = NULL_TREE;
1386 max = NULL_TREE;
1387 return VR_UNDEFINED;
1388 }
1389
1390 tree type = r.type ();
1391 if (r.varying_p ())
1392 {
1393 min = r.lbound ();
1394 max = r.ubound ();
1395 return VR_VARYING;
1396 }
1397 if (r.zero_p ())
1398 {
1399 min = max = r.lbound ();
1400 return VR_RANGE;
1401 }
1402 if (r.nonzero_p ())
1403 {
1404 min = max = build_zero_cst (type);
1405 return VR_ANTI_RANGE;
1406 }
1407 min = r.lbound ();
1408 max = r.ubound ();
1409 return VR_RANGE;
1410 }
1411
1412 // Given a range in V, return an old-style legacy range consisting of
1413 // a value_range_kind with a MIN/MAX. This is to maintain
1414 // compatibility with passes that still depend on VR_ANTI_RANGE, and
1415 // only works for integers and pointers.
1416
1417 value_range_kind
1418 get_legacy_range (const vrange &v, tree &min, tree &max)
1419 {
1420 if (is_a <irange> (v))
1421 return get_legacy_range (as_a <irange> (v), min, max);
1422
1423 return get_legacy_range (as_a <prange> (v), min, max);
1424 }
1425
1426 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
1427 This means adjusting VRTYPE, MIN and MAX representing the case of a
1428 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
1429 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
1430 In corner cases where MAX+1 or MIN-1 wraps this will fall back
1431 to varying.
1432 This routine exists to ease canonicalization in the case where we
1433 extract ranges from var + CST op limit. */
1434
1435 void
1436 irange::set (tree type, const wide_int &min, const wide_int &max,
1437 value_range_kind kind)
1438 {
1439 unsigned prec = TYPE_PRECISION (type);
1440 signop sign = TYPE_SIGN (type);
1441 wide_int min_value = wi::min_value (prec, sign);
1442 wide_int max_value = wi::max_value (prec, sign);
1443
1444 m_type = type;
1445 m_bitmask.set_unknown (prec);
1446
1447 if (kind == VR_RANGE)
1448 {
1449 m_base[0] = min;
1450 m_base[1] = max;
1451 m_num_ranges = 1;
1452 if (min == min_value && max == max_value)
1453 m_kind = VR_VARYING;
1454 else
1455 m_kind = VR_RANGE;
1456 }
1457 else
1458 {
1459 gcc_checking_assert (kind == VR_ANTI_RANGE);
1460 gcc_checking_assert (m_max_ranges > 1);
1461
1462 m_kind = VR_UNDEFINED;
1463 m_num_ranges = 0;
1464 wi::overflow_type ovf;
1465 wide_int lim;
1466 if (sign == SIGNED)
1467 lim = wi::add (min, -1, sign, &ovf);
1468 else
1469 lim = wi::sub (min, 1, sign, &ovf);
1470
1471 if (!ovf)
1472 {
1473 m_kind = VR_RANGE;
1474 m_base[0] = min_value;
1475 m_base[1] = lim;
1476 ++m_num_ranges;
1477 }
1478 if (sign == SIGNED)
1479 lim = wi::sub (max, -1, sign, &ovf);
1480 else
1481 lim = wi::add (max, 1, sign, &ovf);
1482 if (!ovf)
1483 {
1484 m_kind = VR_RANGE;
1485 m_base[m_num_ranges * 2] = lim;
1486 m_base[m_num_ranges * 2 + 1] = max_value;
1487 ++m_num_ranges;
1488 }
1489 }
1490
1491 if (flag_checking)
1492 verify_range ();
1493 }
1494
1495 void
1496 irange::set (tree min, tree max, value_range_kind kind)
1497 {
1498 if (POLY_INT_CST_P (min) || POLY_INT_CST_P (max))
1499 {
1500 set_varying (TREE_TYPE (min));
1501 return;
1502 }
1503
1504 gcc_checking_assert (TREE_CODE (min) == INTEGER_CST);
1505 gcc_checking_assert (TREE_CODE (max) == INTEGER_CST);
1506
1507 return set (TREE_TYPE (min), wi::to_wide (min), wi::to_wide (max), kind);
1508 }
1509
1510 // Check the validity of the range.
1511
1512 void
1513 irange::verify_range ()
1514 {
1515 gcc_checking_assert (m_discriminator == VR_IRANGE);
1516 if (m_kind == VR_UNDEFINED)
1517 {
1518 gcc_checking_assert (m_num_ranges == 0);
1519 return;
1520 }
1521 gcc_checking_assert (m_num_ranges <= m_max_ranges);
1522
1523 // Legacy allowed these to represent VARYING for unknown types.
1524 // Leave this in for now, until all users are converted. Eventually
1525 // we should abort in set_varying.
1526 if (m_kind == VR_VARYING && m_type == error_mark_node)
1527 return;
1528
1529 unsigned prec = TYPE_PRECISION (m_type);
1530 if (m_kind == VR_VARYING)
1531 {
1532 gcc_checking_assert (m_bitmask.unknown_p ());
1533 gcc_checking_assert (m_num_ranges == 1);
1534 gcc_checking_assert (varying_compatible_p ());
1535 gcc_checking_assert (lower_bound ().get_precision () == prec);
1536 gcc_checking_assert (upper_bound ().get_precision () == prec);
1537 return;
1538 }
1539 gcc_checking_assert (m_num_ranges != 0);
1540 gcc_checking_assert (!varying_compatible_p ());
1541 for (unsigned i = 0; i < m_num_ranges; ++i)
1542 {
1543 wide_int lb = lower_bound (i);
1544 wide_int ub = upper_bound (i);
1545 gcc_checking_assert (lb.get_precision () == prec);
1546 gcc_checking_assert (ub.get_precision () == prec);
1547 int c = wi::cmp (lb, ub, TYPE_SIGN (m_type));
1548 gcc_checking_assert (c == 0 || c == -1);
1549 }
1550 m_bitmask.verify_mask ();
1551 }
1552
1553 bool
1554 irange::operator== (const irange &other) const
1555 {
1556 if (m_num_ranges != other.m_num_ranges)
1557 return false;
1558
1559 if (m_num_ranges == 0)
1560 return true;
1561
1562 signop sign1 = TYPE_SIGN (type ());
1563 signop sign2 = TYPE_SIGN (other.type ());
1564
1565 for (unsigned i = 0; i < m_num_ranges; ++i)
1566 {
1567 widest_int lb = widest_int::from (lower_bound (i), sign1);
1568 widest_int ub = widest_int::from (upper_bound (i), sign1);
1569 widest_int lb_other = widest_int::from (other.lower_bound (i), sign2);
1570 widest_int ub_other = widest_int::from (other.upper_bound (i), sign2);
1571 if (lb != lb_other || ub != ub_other)
1572 return false;
1573 }
1574
1575 irange_bitmask bm1 = get_bitmask ();
1576 irange_bitmask bm2 = other.get_bitmask ();
1577 widest_int tmp1 = widest_int::from (bm1.mask (), sign1);
1578 widest_int tmp2 = widest_int::from (bm2.mask (), sign2);
1579 if (tmp1 != tmp2)
1580 return false;
1581 if (bm1.unknown_p ())
1582 return true;
1583 tmp1 = widest_int::from (bm1.value (), sign1);
1584 tmp2 = widest_int::from (bm2.value (), sign2);
1585 return tmp1 == tmp2;
1586 }
1587
1588 /* If range is a singleton, place it in RESULT and return TRUE. */
1589
1590 bool
1591 irange::singleton_p (tree *result) const
1592 {
1593 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1594 {
1595 if (result)
1596 *result = wide_int_to_tree (type (), lower_bound ());
1597 return true;
1598 }
1599 return false;
1600 }
1601
1602 bool
1603 irange::singleton_p (wide_int &w) const
1604 {
1605 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1606 {
1607 w = lower_bound ();
1608 return true;
1609 }
1610 return false;
1611 }
1612
1613 /* Return 1 if CST is inside value range.
1614 0 if CST is not inside value range.
1615
1616 Benchmark compile/20001226-1.c compilation time after changing this
1617 function. */
1618
1619 bool
1620 irange::contains_p (const wide_int &cst) const
1621 {
1622 if (undefined_p ())
1623 return false;
1624
1625 // See if we can exclude CST based on the known 0 bits.
1626 if (!m_bitmask.unknown_p ()
1627 && cst != 0
1628 && wi::bit_and (m_bitmask.get_nonzero_bits (), cst) == 0)
1629 return false;
1630
1631 signop sign = TYPE_SIGN (type ());
1632 for (unsigned r = 0; r < m_num_ranges; ++r)
1633 {
1634 if (wi::lt_p (cst, lower_bound (r), sign))
1635 return false;
1636 if (wi::le_p (cst, upper_bound (r), sign))
1637 return true;
1638 }
1639
1640 return false;
1641 }
1642
1643 // Perform an efficient union with R when both ranges have only a single pair.
1644 // Excluded are VARYING and UNDEFINED ranges.
1645
1646 bool
1647 irange::irange_single_pair_union (const irange &r)
1648 {
1649 gcc_checking_assert (!undefined_p () && !varying_p ());
1650 gcc_checking_assert (!r.undefined_p () && !varying_p ());
1651
1652 signop sign = TYPE_SIGN (m_type);
1653 // Check if current lower bound is also the new lower bound.
1654 if (wi::le_p (m_base[0], r.m_base[0], sign))
1655 {
1656 // If current upper bound is new upper bound, we're done.
1657 if (wi::le_p (r.m_base[1], m_base[1], sign))
1658 return union_bitmask (r);
1659 // Otherwise R has the new upper bound.
1660 // Check for overlap/touching ranges, or single target range.
1661 if (m_max_ranges == 1
1662 || (widest_int::from (m_base[1], sign) + 1
1663 >= widest_int::from (r.m_base[0], TYPE_SIGN (r.m_type))))
1664 m_base[1] = r.m_base[1];
1665 else
1666 {
1667 // This is a dual range result.
1668 m_base[2] = r.m_base[0];
1669 m_base[3] = r.m_base[1];
1670 m_num_ranges = 2;
1671 }
1672 // The range has been altered, so normalize it even if nothing
1673 // changed in the mask.
1674 if (!union_bitmask (r))
1675 normalize_kind ();
1676 if (flag_checking)
1677 verify_range ();
1678 return true;
1679 }
1680
1681 // Set the new lower bound to R's lower bound.
1682 wide_int lb = m_base[0];
1683 m_base[0] = r.m_base[0];
1684
1685 // If R fully contains THIS range, just set the upper bound.
1686 if (wi::ge_p (r.m_base[1], m_base[1], sign))
1687 m_base[1] = r.m_base[1];
1688 // Check for overlapping ranges, or target limited to a single range.
1689 else if (m_max_ranges == 1
1690 || (widest_int::from (r.m_base[1], TYPE_SIGN (r.m_type)) + 1
1691 >= widest_int::from (lb, sign)))
1692 ;
1693 else
1694 {
1695 // Left with 2 pairs.
1696 m_num_ranges = 2;
1697 m_base[2] = lb;
1698 m_base[3] = m_base[1];
1699 m_base[1] = r.m_base[1];
1700 }
1701 // The range has been altered, so normalize it even if nothing
1702 // changed in the mask.
1703 if (!union_bitmask (r))
1704 normalize_kind ();
1705 if (flag_checking)
1706 verify_range ();
1707 return true;
1708 }
1709
1710 // Append R to this range, knowing that R occurs after all of these subranges.
1711 // Return TRUE as something must have changed.
1712
1713 bool
1714 irange::union_append (const irange &r)
1715 {
1716 // Check if the first range in R is an immmediate successor to the last
1717 // range, ths requiring a merge.
1718 signop sign = TYPE_SIGN (m_type);
1719 wide_int lb = r.lower_bound ();
1720 wide_int ub = upper_bound ();
1721 unsigned start = 0;
1722 if (widest_int::from (ub, sign) + 1
1723 == widest_int::from (lb, sign))
1724 {
1725 m_base[m_num_ranges * 2 - 1] = r.m_base[1];
1726 start = 1;
1727 }
1728 maybe_resize (m_num_ranges + r.m_num_ranges - start);
1729 for ( ; start < r.m_num_ranges; start++)
1730 {
1731 // Merge the last ranges if it exceeds the maximum size.
1732 if (m_num_ranges + 1 > m_max_ranges)
1733 {
1734 m_base[m_max_ranges * 2 - 1] = r.m_base[r.m_num_ranges * 2 - 1];
1735 break;
1736 }
1737 m_base[m_num_ranges * 2] = r.m_base[start * 2];
1738 m_base[m_num_ranges * 2 + 1] = r.m_base[start * 2 + 1];
1739 m_num_ranges++;
1740 }
1741
1742 if (!union_bitmask (r))
1743 normalize_kind ();
1744 if (flag_checking)
1745 verify_range ();
1746 return true;
1747 }
1748
1749 // Return TRUE if anything changes.
1750
1751 bool
1752 irange::union_ (const vrange &v)
1753 {
1754 const irange &r = as_a <irange> (v);
1755
1756 if (r.undefined_p ())
1757 return false;
1758
1759 if (undefined_p ())
1760 {
1761 operator= (r);
1762 if (flag_checking)
1763 verify_range ();
1764 return true;
1765 }
1766
1767 if (varying_p ())
1768 return false;
1769
1770 if (r.varying_p ())
1771 {
1772 set_varying (type ());
1773 return true;
1774 }
1775
1776 // Special case one range union one range.
1777 if (m_num_ranges == 1 && r.m_num_ranges == 1)
1778 return irange_single_pair_union (r);
1779
1780 signop sign = TYPE_SIGN (m_type);
1781 // Check for an append to the end.
1782 if (m_kind == VR_RANGE && wi::gt_p (r.lower_bound (), upper_bound (), sign))
1783 return union_append (r);
1784
1785 // If this ranges fully contains R, then we need do nothing.
1786 if (irange_contains_p (r))
1787 return union_bitmask (r);
1788
1789 // Do not worry about merging and such by reserving twice as many
1790 // pairs as needed, and then simply sort the 2 ranges into this
1791 // intermediate form.
1792 //
1793 // The intermediate result will have the property that the beginning
1794 // of each range is <= the beginning of the next range. There may
1795 // be overlapping ranges at this point. I.e. this would be valid
1796 // [-20, 10], [-10, 0], [0, 20], [40, 90] as it satisfies this
1797 // constraint : -20 < -10 < 0 < 40. When the range is rebuilt into r,
1798 // the merge is performed.
1799 //
1800 // [Xi,Yi]..[Xn,Yn] U [Xj,Yj]..[Xm,Ym] --> [Xk,Yk]..[Xp,Yp]
1801 auto_vec<wide_int, 20> res (m_num_ranges * 2 + r.m_num_ranges * 2);
1802 unsigned i = 0, j = 0, k = 0;
1803
1804 while (i < m_num_ranges * 2 && j < r.m_num_ranges * 2)
1805 {
1806 // lower of Xi and Xj is the lowest point.
1807 if (widest_int::from (m_base[i], sign)
1808 <= widest_int::from (r.m_base[j], sign))
1809 {
1810 res.quick_push (m_base[i]);
1811 res.quick_push (m_base[i + 1]);
1812 k += 2;
1813 i += 2;
1814 }
1815 else
1816 {
1817 res.quick_push (r.m_base[j]);
1818 res.quick_push (r.m_base[j + 1]);
1819 k += 2;
1820 j += 2;
1821 }
1822 }
1823 for ( ; i < m_num_ranges * 2; i += 2)
1824 {
1825 res.quick_push (m_base[i]);
1826 res.quick_push (m_base[i + 1]);
1827 k += 2;
1828 }
1829 for ( ; j < r.m_num_ranges * 2; j += 2)
1830 {
1831 res.quick_push (r.m_base[j]);
1832 res.quick_push (r.m_base[j + 1]);
1833 k += 2;
1834 }
1835
1836 // Now normalize the vector removing any overlaps.
1837 i = 2;
1838 for (j = 2; j < k ; j += 2)
1839 {
1840 // Current upper+1 is >= lower bound next pair, then we merge ranges.
1841 if (widest_int::from (res[i - 1], sign) + 1
1842 >= widest_int::from (res[j], sign))
1843 {
1844 // New upper bounds is greater of current or the next one.
1845 if (widest_int::from (res[j + 1], sign)
1846 > widest_int::from (res[i - 1], sign))
1847 res[i - 1] = res[j + 1];
1848 }
1849 else
1850 {
1851 // This is a new distinct range, but no point in copying it
1852 // if it is already in the right place.
1853 if (i != j)
1854 {
1855 res[i++] = res[j];
1856 res[i++] = res[j + 1];
1857 }
1858 else
1859 i += 2;
1860 }
1861 }
1862
1863 // At this point, the vector should have i ranges, none overlapping.
1864 // Now it simply needs to be copied, and if there are too many
1865 // ranges, merge some. We wont do any analysis as to what the
1866 // "best" merges are, simply combine the final ranges into one.
1867 maybe_resize (i / 2);
1868 if (i > m_max_ranges * 2)
1869 {
1870 res[m_max_ranges * 2 - 1] = res[i - 1];
1871 i = m_max_ranges * 2;
1872 }
1873
1874 for (j = 0; j < i ; j++)
1875 m_base[j] = res [j];
1876 m_num_ranges = i / 2;
1877
1878 m_kind = VR_RANGE;
1879 // The range has been altered, so normalize it even if nothing
1880 // changed in the mask.
1881 if (!union_bitmask (r))
1882 normalize_kind ();
1883 if (flag_checking)
1884 verify_range ();
1885 return true;
1886 }
1887
1888 // Return TRUE if THIS fully contains R. No undefined or varying cases.
1889
1890 bool
1891 irange::irange_contains_p (const irange &r) const
1892 {
1893 gcc_checking_assert (!undefined_p () && !varying_p ());
1894 gcc_checking_assert (!r.undefined_p () && !varying_p ());
1895
1896 // In order for THIS to fully contain R, all of the pairs within R must
1897 // be fully contained by the pairs in this object.
1898 signop sign = TYPE_SIGN (m_type);
1899 unsigned ri = 0;
1900 unsigned i = 0;
1901 wide_int rl = r.m_base[0];
1902 wide_int ru = r.m_base[1];
1903 wide_int l = m_base[0];
1904 wide_int u = m_base[1];
1905 while (1)
1906 {
1907 // If r is contained within this range, move to the next R
1908 if (wi::ge_p (rl, l, sign)
1909 && wi::le_p (ru, u, sign))
1910 {
1911 // This pair is OK, Either done, or bump to the next.
1912 if (++ri >= r.num_pairs ())
1913 return true;
1914 rl = r.m_base[ri * 2];
1915 ru = r.m_base[ri * 2 + 1];
1916 continue;
1917 }
1918 // Otherwise, check if this's pair occurs before R's.
1919 if (wi::lt_p (u, rl, sign))
1920 {
1921 // There's still at least one pair of R left.
1922 if (++i >= num_pairs ())
1923 return false;
1924 l = m_base[i * 2];
1925 u = m_base[i * 2 + 1];
1926 continue;
1927 }
1928 return false;
1929 }
1930 return false;
1931 }
1932
1933
1934 // Return TRUE if anything changes.
1935
1936 bool
1937 irange::intersect (const vrange &v)
1938 {
1939 const irange &r = as_a <irange> (v);
1940 gcc_checking_assert (undefined_p () || r.undefined_p ()
1941 || range_compatible_p (type (), r.type ()));
1942
1943 if (undefined_p ())
1944 return false;
1945 if (r.undefined_p ())
1946 {
1947 set_undefined ();
1948 return true;
1949 }
1950 if (r.varying_p ())
1951 return false;
1952 if (varying_p ())
1953 {
1954 operator= (r);
1955 return true;
1956 }
1957
1958 if (r.num_pairs () == 1)
1959 {
1960 bool res = intersect (r.lower_bound (), r.upper_bound ());
1961 if (undefined_p ())
1962 return true;
1963
1964 res |= intersect_bitmask (r);
1965 if (res)
1966 normalize_kind ();
1967 return res;
1968 }
1969
1970 // If R fully contains this, then intersection will change nothing.
1971 if (r.irange_contains_p (*this))
1972 return intersect_bitmask (r);
1973
1974 // ?? We could probably come up with something smarter than the
1975 // worst case scenario here.
1976 int needed = num_pairs () + r.num_pairs ();
1977 maybe_resize (needed);
1978
1979 signop sign = TYPE_SIGN (m_type);
1980 unsigned bld_pair = 0;
1981 unsigned bld_lim = m_max_ranges;
1982 int_range_max r2 (*this);
1983 unsigned r2_lim = r2.num_pairs ();
1984 unsigned i2 = 0;
1985 for (unsigned i = 0; i < r.num_pairs (); )
1986 {
1987 // If r1's upper is < r2's lower, we can skip r1's pair.
1988 wide_int ru = r.m_base[i * 2 + 1];
1989 wide_int r2l = r2.m_base[i2 * 2];
1990 if (wi::lt_p (ru, r2l, sign))
1991 {
1992 i++;
1993 continue;
1994 }
1995 // Likewise, skip r2's pair if its excluded.
1996 wide_int r2u = r2.m_base[i2 * 2 + 1];
1997 wide_int rl = r.m_base[i * 2];
1998 if (wi::lt_p (r2u, rl, sign))
1999 {
2000 i2++;
2001 if (i2 < r2_lim)
2002 continue;
2003 // No more r2, break.
2004 break;
2005 }
2006
2007 // Must be some overlap. Find the highest of the lower bounds,
2008 // and set it, unless the build limits lower bounds is already
2009 // set.
2010 if (bld_pair < bld_lim)
2011 {
2012 if (wi::ge_p (rl, r2l, sign))
2013 m_base[bld_pair * 2] = rl;
2014 else
2015 m_base[bld_pair * 2] = r2l;
2016 }
2017 else
2018 // Decrease and set a new upper.
2019 bld_pair--;
2020
2021 // ...and choose the lower of the upper bounds.
2022 if (wi::le_p (ru, r2u, sign))
2023 {
2024 m_base[bld_pair * 2 + 1] = ru;
2025 bld_pair++;
2026 // Move past the r1 pair and keep trying.
2027 i++;
2028 continue;
2029 }
2030 else
2031 {
2032 m_base[bld_pair * 2 + 1] = r2u;
2033 bld_pair++;
2034 i2++;
2035 if (i2 < r2_lim)
2036 continue;
2037 // No more r2, break.
2038 break;
2039 }
2040 // r2 has the higher lower bound.
2041 }
2042
2043 // At the exit of this loop, it is one of 2 things:
2044 // ran out of r1, or r2, but either means we are done.
2045 m_num_ranges = bld_pair;
2046 if (m_num_ranges == 0)
2047 {
2048 set_undefined ();
2049 return true;
2050 }
2051
2052 m_kind = VR_RANGE;
2053 // The range has been altered, so normalize it even if nothing
2054 // changed in the mask.
2055 if (!intersect_bitmask (r))
2056 normalize_kind ();
2057 if (flag_checking)
2058 verify_range ();
2059 return true;
2060 }
2061
2062
2063 // Multirange intersect for a specified wide_int [lb, ub] range.
2064 // Return TRUE if intersect changed anything.
2065 //
2066 // NOTE: It is the caller's responsibility to intersect the mask.
2067
2068 bool
2069 irange::intersect (const wide_int& lb, const wide_int& ub)
2070 {
2071 // Undefined remains undefined.
2072 if (undefined_p ())
2073 return false;
2074
2075 tree range_type = type();
2076 signop sign = TYPE_SIGN (range_type);
2077
2078 gcc_checking_assert (TYPE_PRECISION (range_type) == wi::get_precision (lb));
2079 gcc_checking_assert (TYPE_PRECISION (range_type) == wi::get_precision (ub));
2080
2081 // If this range is fully contained, then intersection will do nothing.
2082 if (wi::ge_p (lower_bound (), lb, sign)
2083 && wi::le_p (upper_bound (), ub, sign))
2084 return false;
2085
2086 unsigned bld_index = 0;
2087 unsigned pair_lim = num_pairs ();
2088 for (unsigned i = 0; i < pair_lim; i++)
2089 {
2090 wide_int pairl = m_base[i * 2];
2091 wide_int pairu = m_base[i * 2 + 1];
2092 // Once UB is less than a pairs lower bound, we're done.
2093 if (wi::lt_p (ub, pairl, sign))
2094 break;
2095 // if LB is greater than this pairs upper, this pair is excluded.
2096 if (wi::lt_p (pairu, lb, sign))
2097 continue;
2098
2099 // Must be some overlap. Find the highest of the lower bounds,
2100 // and set it
2101 if (wi::gt_p (lb, pairl, sign))
2102 m_base[bld_index * 2] = lb;
2103 else
2104 m_base[bld_index * 2] = pairl;
2105
2106 // ...and choose the lower of the upper bounds and if the base pair
2107 // has the lower upper bound, need to check next pair too.
2108 if (wi::lt_p (ub, pairu, sign))
2109 {
2110 m_base[bld_index++ * 2 + 1] = ub;
2111 break;
2112 }
2113 else
2114 m_base[bld_index++ * 2 + 1] = pairu;
2115 }
2116
2117 m_num_ranges = bld_index;
2118 if (m_num_ranges == 0)
2119 {
2120 set_undefined ();
2121 return true;
2122 }
2123
2124 m_kind = VR_RANGE;
2125 // The caller must normalize and verify the range, as the bitmask
2126 // still needs to be handled.
2127 return true;
2128 }
2129
2130
2131 // Signed 1-bits are strange. You can't subtract 1, because you can't
2132 // represent the number 1. This works around that for the invert routine.
2133
2134 static wide_int inline
2135 subtract_one (const wide_int &x, tree type, wi::overflow_type &overflow)
2136 {
2137 if (TYPE_SIGN (type) == SIGNED)
2138 return wi::add (x, -1, SIGNED, &overflow);
2139 else
2140 return wi::sub (x, 1, UNSIGNED, &overflow);
2141 }
2142
2143 // The analogous function for adding 1.
2144
2145 static wide_int inline
2146 add_one (const wide_int &x, tree type, wi::overflow_type &overflow)
2147 {
2148 if (TYPE_SIGN (type) == SIGNED)
2149 return wi::sub (x, -1, SIGNED, &overflow);
2150 else
2151 return wi::add (x, 1, UNSIGNED, &overflow);
2152 }
2153
2154 // Return the inverse of a range.
2155
2156 void
2157 irange::invert ()
2158 {
2159 gcc_checking_assert (!undefined_p () && !varying_p ());
2160
2161 // We always need one more set of bounds to represent an inverse, so
2162 // if we're at the limit, we can't properly represent things.
2163 //
2164 // For instance, to represent the inverse of a 2 sub-range set
2165 // [5, 10][20, 30], we would need a 3 sub-range set
2166 // [-MIN, 4][11, 19][31, MAX].
2167 //
2168 // In this case, return the most conservative thing.
2169 //
2170 // However, if any of the extremes of the range are -MIN/+MAX, we
2171 // know we will not need an extra bound. For example:
2172 //
2173 // INVERT([-MIN,20][30,40]) => [21,29][41,+MAX]
2174 // INVERT([-MIN,20][30,MAX]) => [21,29]
2175 tree ttype = type ();
2176 unsigned prec = TYPE_PRECISION (ttype);
2177 signop sign = TYPE_SIGN (ttype);
2178 wide_int type_min = wi::min_value (prec, sign);
2179 wide_int type_max = wi::max_value (prec, sign);
2180 m_bitmask.set_unknown (prec);
2181
2182 // At this point, we need one extra sub-range to represent the
2183 // inverse.
2184 maybe_resize (m_num_ranges + 1);
2185
2186 // The algorithm is as follows. To calculate INVERT ([a,b][c,d]), we
2187 // generate [-MIN, a-1][b+1, c-1][d+1, MAX].
2188 //
2189 // If there is an over/underflow in the calculation for any
2190 // sub-range, we eliminate that subrange. This allows us to easily
2191 // calculate INVERT([-MIN, 5]) with: [-MIN, -MIN-1][6, MAX]. And since
2192 // we eliminate the underflow, only [6, MAX] remains.
2193 unsigned i = 0;
2194 wi::overflow_type ovf;
2195 // Construct leftmost range.
2196 int_range_max orig_range (*this);
2197 unsigned nitems = 0;
2198 wide_int tmp;
2199 // If this is going to underflow on the MINUS 1, don't even bother
2200 // checking. This also handles subtracting one from an unsigned 0,
2201 // which doesn't set the underflow bit.
2202 if (type_min != orig_range.lower_bound ())
2203 {
2204 m_base[nitems++] = type_min;
2205 tmp = subtract_one (orig_range.lower_bound (), ttype, ovf);
2206 m_base[nitems++] = tmp;
2207 if (ovf)
2208 nitems = 0;
2209 }
2210 i++;
2211 // Construct middle ranges if applicable.
2212 if (orig_range.num_pairs () > 1)
2213 {
2214 unsigned j = i;
2215 for (; j < (orig_range.num_pairs () * 2) - 1; j += 2)
2216 {
2217 // The middle ranges cannot have MAX/MIN, so there's no need
2218 // to check for unsigned overflow on the +1 and -1 here.
2219 tmp = wi::add (orig_range.m_base[j], 1, sign, &ovf);
2220 m_base[nitems++] = tmp;
2221 tmp = subtract_one (orig_range.m_base[j + 1], ttype, ovf);
2222 m_base[nitems++] = tmp;
2223 if (ovf)
2224 nitems -= 2;
2225 }
2226 i = j;
2227 }
2228 // Construct rightmost range.
2229 //
2230 // However, if this will overflow on the PLUS 1, don't even bother.
2231 // This also handles adding one to an unsigned MAX, which doesn't
2232 // set the overflow bit.
2233 if (type_max != orig_range.m_base[i])
2234 {
2235 tmp = add_one (orig_range.m_base[i], ttype, ovf);
2236 m_base[nitems++] = tmp;
2237 m_base[nitems++] = type_max;
2238 if (ovf)
2239 nitems -= 2;
2240 }
2241 m_num_ranges = nitems / 2;
2242
2243 // We disallow undefined or varying coming in, so the result can
2244 // only be a VR_RANGE.
2245 gcc_checking_assert (m_kind == VR_RANGE);
2246
2247 if (flag_checking)
2248 verify_range ();
2249 }
2250
2251 // Remove trailing ranges that this bitmask indicates can't exist.
2252
2253 void
2254 irange_bitmask::adjust_range (irange &r) const
2255 {
2256 if (unknown_p () || r.undefined_p ())
2257 return;
2258
2259 int_range_max range;
2260 tree type = r.type ();
2261 int prec = TYPE_PRECISION (type);
2262 // If there are trailing zeros, create a range representing those bits.
2263 gcc_checking_assert (m_mask != 0);
2264 int z = wi::ctz (m_mask);
2265 if (z)
2266 {
2267 wide_int ub = (wi::one (prec) << z) - 1;
2268 range = int_range<5> (type, wi::zero (prec), ub);
2269 // Then remove the specific value these bits contain from the range.
2270 wide_int value = m_value & ub;
2271 range.intersect (int_range<2> (type, value, value, VR_ANTI_RANGE));
2272 // Inverting produces a list of ranges which can be valid.
2273 range.invert ();
2274 // And finally select R from only those valid values.
2275 r.intersect (range);
2276 return;
2277 }
2278 }
2279
2280 // If the mask can be trivially converted to a range, do so and
2281 // return TRUE.
2282
2283 bool
2284 irange::set_range_from_bitmask ()
2285 {
2286 gcc_checking_assert (!undefined_p ());
2287 if (m_bitmask.unknown_p ())
2288 return false;
2289
2290 // If all the bits are known, this is a singleton.
2291 if (m_bitmask.mask () == 0)
2292 {
2293 set (m_type, m_bitmask.value (), m_bitmask.value ());
2294 return true;
2295 }
2296
2297 unsigned popcount = wi::popcount (m_bitmask.get_nonzero_bits ());
2298
2299 // If we have only one bit set in the mask, we can figure out the
2300 // range immediately.
2301 if (popcount == 1)
2302 {
2303 // Make sure we don't pessimize the range.
2304 if (!contains_p (m_bitmask.get_nonzero_bits ()))
2305 return false;
2306
2307 bool has_zero = contains_zero_p (*this);
2308 wide_int nz = m_bitmask.get_nonzero_bits ();
2309 set (m_type, nz, nz);
2310 m_bitmask.set_nonzero_bits (nz);
2311 if (has_zero)
2312 {
2313 int_range<2> zero;
2314 zero.set_zero (type ());
2315 union_ (zero);
2316 }
2317 if (flag_checking)
2318 verify_range ();
2319 return true;
2320 }
2321 else if (popcount == 0)
2322 {
2323 set_zero (type ());
2324 return true;
2325 }
2326 return false;
2327 }
2328
2329 void
2330 irange::update_bitmask (const irange_bitmask &bm)
2331 {
2332 gcc_checking_assert (!undefined_p ());
2333
2334 // Drop VARYINGs with known bits to a plain range.
2335 if (m_kind == VR_VARYING && !bm.unknown_p ())
2336 m_kind = VR_RANGE;
2337
2338 m_bitmask = bm;
2339 if (!set_range_from_bitmask ())
2340 normalize_kind ();
2341 if (flag_checking)
2342 verify_range ();
2343 }
2344
2345 // Return the bitmask of known bits that includes the bitmask inherent
2346 // in the range.
2347
2348 irange_bitmask
2349 irange::get_bitmask () const
2350 {
2351 gcc_checking_assert (!undefined_p ());
2352
2353 // The mask inherent in the range is calculated on-demand. For
2354 // example, [0,255] does not have known bits set by default. This
2355 // saves us considerable time, because setting it at creation incurs
2356 // a large penalty for irange::set. At the time of writing there
2357 // was a 5% slowdown in VRP if we kept the mask precisely up to date
2358 // at all times. Instead, we default to -1 and set it when
2359 // explicitly requested. However, this function will always return
2360 // the correct mask.
2361 //
2362 // This also means that the mask may have a finer granularity than
2363 // the range and thus contradict it. Think of the mask as an
2364 // enhancement to the range. For example:
2365 //
2366 // [3, 1000] MASK 0xfffffffe VALUE 0x0
2367 //
2368 // 3 is in the range endpoints, but is excluded per the known 0 bits
2369 // in the mask.
2370 //
2371 // See also the note in irange_bitmask::intersect.
2372 irange_bitmask bm
2373 = get_bitmask_from_range (type (), lower_bound (), upper_bound ());
2374 if (!m_bitmask.unknown_p ())
2375 bm.intersect (m_bitmask);
2376 return bm;
2377 }
2378
2379 // Set the nonzero bits in R into THIS. Return TRUE and
2380 // normalize the range if anything changed.
2381
2382 void
2383 vrange::set_nonzero_bits (const wide_int &bits)
2384 {
2385 gcc_checking_assert (!undefined_p ());
2386 irange_bitmask bm (wi::zero (TYPE_PRECISION (type ())), bits);
2387 update_bitmask (bm);
2388 }
2389
2390 // Return the nonzero bits in R.
2391
2392 wide_int
2393 vrange::get_nonzero_bits () const
2394 {
2395 gcc_checking_assert (!undefined_p ());
2396 irange_bitmask bm = get_bitmask ();
2397 return bm.value () | bm.mask ();
2398 }
2399
2400 // Intersect the bitmask in R into THIS and normalize the range.
2401 // Return TRUE if the intersection changed anything.
2402
2403 bool
2404 irange::intersect_bitmask (const irange &r)
2405 {
2406 gcc_checking_assert (!undefined_p () && !r.undefined_p ());
2407
2408 if (m_bitmask == r.m_bitmask)
2409 return false;
2410
2411 irange_bitmask bm = get_bitmask ();
2412 irange_bitmask save = bm;
2413 bm.intersect (r.get_bitmask ());
2414 if (save == bm)
2415 return false;
2416
2417 m_bitmask = bm;
2418
2419 // Updating m_bitmask may still yield a semantic bitmask (as
2420 // returned by get_bitmask) which is functionally equivalent to what
2421 // we originally had. In which case, there's still no change.
2422 if (save == get_bitmask ())
2423 return false;
2424
2425 if (!set_range_from_bitmask ())
2426 normalize_kind ();
2427 m_bitmask.adjust_range (*this);
2428 if (flag_checking)
2429 verify_range ();
2430 return true;
2431 }
2432
2433 // Union the bitmask in R into THIS. Return TRUE and normalize the
2434 // range if anything changed.
2435
2436 bool
2437 irange::union_bitmask (const irange &r)
2438 {
2439 gcc_checking_assert (!undefined_p () && !r.undefined_p ());
2440
2441 if (m_bitmask == r.m_bitmask)
2442 return false;
2443
2444 irange_bitmask bm = get_bitmask ();
2445 irange_bitmask save = bm;
2446 bm.union_ (r.get_bitmask ());
2447 if (save == bm)
2448 return false;
2449
2450 m_bitmask = bm;
2451
2452 // Updating m_bitmask may still yield a semantic bitmask (as
2453 // returned by get_bitmask) which is functionally equivalent to what
2454 // we originally had. In which case, there's still no change.
2455 if (save == get_bitmask ())
2456 return false;
2457
2458 // No need to call set_range_from_mask, because we'll never
2459 // narrow the range. Besides, it would cause endless recursion
2460 // because of the union_ in set_range_from_mask.
2461 normalize_kind ();
2462 return true;
2463 }
2464
2465 tree
2466 irange::lbound () const
2467 {
2468 return wide_int_to_tree (type (), lower_bound ());
2469 }
2470
2471 tree
2472 irange::ubound () const
2473 {
2474 return wide_int_to_tree (type (), upper_bound ());
2475 }
2476
2477 void
2478 irange_bitmask::verify_mask () const
2479 {
2480 gcc_assert (m_value.get_precision () == m_mask.get_precision ());
2481 gcc_checking_assert (wi::bit_and (m_mask, m_value) == 0);
2482 }
2483
2484 void
2485 dump_value_range (FILE *file, const vrange *vr)
2486 {
2487 vr->dump (file);
2488 }
2489
2490 DEBUG_FUNCTION void
2491 debug (const vrange *vr)
2492 {
2493 dump_value_range (stderr, vr);
2494 fprintf (stderr, "\n");
2495 }
2496
2497 DEBUG_FUNCTION void
2498 debug (const vrange &vr)
2499 {
2500 debug (&vr);
2501 }
2502
2503 DEBUG_FUNCTION void
2504 debug (const value_range *vr)
2505 {
2506 dump_value_range (stderr, vr);
2507 fprintf (stderr, "\n");
2508 }
2509
2510 DEBUG_FUNCTION void
2511 debug (const value_range &vr)
2512 {
2513 dump_value_range (stderr, &vr);
2514 fprintf (stderr, "\n");
2515 }
2516
2517 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
2518
2519 bool
2520 vrp_operand_equal_p (const_tree val1, const_tree val2)
2521 {
2522 if (val1 == val2)
2523 return true;
2524 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
2525 return false;
2526 return true;
2527 }
2528
2529 #define DEFINE_INT_RANGE_INSTANCE(N) \
2530 template int_range<N>::int_range(tree_node *, \
2531 const wide_int &, \
2532 const wide_int &, \
2533 value_range_kind); \
2534 template int_range<N>::int_range(tree); \
2535 template int_range<N>::int_range(const irange &); \
2536 template int_range<N>::int_range(const int_range &); \
2537 template int_range<N>& int_range<N>::operator= (const int_range &);
2538
2539 DEFINE_INT_RANGE_INSTANCE(1)
2540 DEFINE_INT_RANGE_INSTANCE(2)
2541 DEFINE_INT_RANGE_INSTANCE(3)
2542 DEFINE_INT_RANGE_INSTANCE(255)
2543
2544 #if CHECKING_P
2545 #include "selftest.h"
2546
2547 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
2548 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
2549 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
2550
2551 namespace selftest
2552 {
2553
2554 static int_range<2>
2555 range (tree type, int a, int b, value_range_kind kind = VR_RANGE)
2556 {
2557 wide_int w1, w2;
2558 if (TYPE_UNSIGNED (type))
2559 {
2560 w1 = wi::uhwi (a, TYPE_PRECISION (type));
2561 w2 = wi::uhwi (b, TYPE_PRECISION (type));
2562 }
2563 else
2564 {
2565 w1 = wi::shwi (a, TYPE_PRECISION (type));
2566 w2 = wi::shwi (b, TYPE_PRECISION (type));
2567 }
2568 return int_range<2> (type, w1, w2, kind);
2569 }
2570
2571 static int_range<2>
2572 range_int (int a, int b, value_range_kind kind = VR_RANGE)
2573 {
2574 return range (integer_type_node, a, b, kind);
2575 }
2576
2577 static int_range<2>
2578 range_uint (int a, int b, value_range_kind kind = VR_RANGE)
2579 {
2580 return range (unsigned_type_node, a, b, kind);
2581 }
2582
2583 static int_range<2>
2584 range_uint128 (int a, int b, value_range_kind kind = VR_RANGE)
2585 {
2586 tree u128_type_node = build_nonstandard_integer_type (128, 1);
2587 return range (u128_type_node, a, b, kind);
2588 }
2589
2590 static int_range<2>
2591 range_uchar (int a, int b, value_range_kind kind = VR_RANGE)
2592 {
2593 return range (unsigned_char_type_node, a, b, kind);
2594 }
2595
2596 static int_range<2>
2597 range_char (int a, int b, value_range_kind kind = VR_RANGE)
2598 {
2599 return range (signed_char_type_node, a, b, kind);
2600 }
2601
2602 static int_range<3>
2603 build_range3 (int a, int b, int c, int d, int e, int f)
2604 {
2605 int_range<3> i1 = range_int (a, b);
2606 int_range<3> i2 = range_int (c, d);
2607 int_range<3> i3 = range_int (e, f);
2608 i1.union_ (i2);
2609 i1.union_ (i3);
2610 return i1;
2611 }
2612
2613 static void
2614 range_tests_irange3 ()
2615 {
2616 int_range<3> r0, r1, r2;
2617 int_range<3> i1, i2, i3;
2618
2619 // ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].
2620 r0 = range_int (10, 20);
2621 r1 = range_int (5, 8);
2622 r0.union_ (r1);
2623 r1 = range_int (1, 3);
2624 r0.union_ (r1);
2625 ASSERT_TRUE (r0 == build_range3 (1, 3, 5, 8, 10, 20));
2626
2627 // [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].
2628 r1 = range_int (-5, 0);
2629 r0.union_ (r1);
2630 ASSERT_TRUE (r0 == build_range3 (-5, 3, 5, 8, 10, 20));
2631
2632 // [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].
2633 r1 = range_int (50, 60);
2634 r0 = range_int (10, 20);
2635 r0.union_ (range_int (30, 40));
2636 r0.union_ (r1);
2637 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
2638 // [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].
2639 r1 = range_int (70, 80);
2640 r0.union_ (r1);
2641
2642 r2 = build_range3 (10, 20, 30, 40, 50, 60);
2643 r2.union_ (range_int (70, 80));
2644 ASSERT_TRUE (r0 == r2);
2645
2646 // [10,20][30,40][50,60] U [6,35] => [6,40][50,60].
2647 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2648 r1 = range_int (6, 35);
2649 r0.union_ (r1);
2650 r1 = range_int (6, 40);
2651 r1.union_ (range_int (50, 60));
2652 ASSERT_TRUE (r0 == r1);
2653
2654 // [10,20][30,40][50,60] U [6,60] => [6,60].
2655 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2656 r1 = range_int (6, 60);
2657 r0.union_ (r1);
2658 ASSERT_TRUE (r0 == range_int (6, 60));
2659
2660 // [10,20][30,40][50,60] U [6,70] => [6,70].
2661 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2662 r1 = range_int (6, 70);
2663 r0.union_ (r1);
2664 ASSERT_TRUE (r0 == range_int (6, 70));
2665
2666 // [10,20][30,40][50,60] U [35,70] => [10,20][30,70].
2667 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2668 r1 = range_int (35, 70);
2669 r0.union_ (r1);
2670 r1 = range_int (10, 20);
2671 r1.union_ (range_int (30, 70));
2672 ASSERT_TRUE (r0 == r1);
2673
2674 // [10,20][30,40][50,60] U [15,35] => [10,40][50,60].
2675 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2676 r1 = range_int (15, 35);
2677 r0.union_ (r1);
2678 r1 = range_int (10, 40);
2679 r1.union_ (range_int (50, 60));
2680 ASSERT_TRUE (r0 == r1);
2681
2682 // [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].
2683 r0 = build_range3 (10, 20, 30, 40, 50, 60);
2684 r1 = range_int (35, 35);
2685 r0.union_ (r1);
2686 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
2687 }
2688
2689 static void
2690 range_tests_int_range_max ()
2691 {
2692 int_range_max big;
2693 unsigned int nrange;
2694
2695 // Build a huge multi-range range.
2696 for (nrange = 0; nrange < 50; ++nrange)
2697 {
2698 int_range<1> tmp = range_int (nrange*10, nrange *10 + 5);
2699 big.union_ (tmp);
2700 }
2701 ASSERT_TRUE (big.num_pairs () == nrange);
2702
2703 // Verify that we can copy it without loosing precision.
2704 int_range_max copy (big);
2705 ASSERT_TRUE (copy.num_pairs () == nrange);
2706
2707 // Inverting it should produce one more sub-range.
2708 big.invert ();
2709 ASSERT_TRUE (big.num_pairs () == nrange + 1);
2710
2711 int_range<1> tmp = range_int (5, 37);
2712 big.intersect (tmp);
2713 ASSERT_TRUE (big.num_pairs () == 4);
2714
2715 // Test that [10,10][20,20] does NOT contain 15.
2716 {
2717 int_range_max i1 = range_int (10, 10);
2718 int_range_max i2 = range_int (20, 20);
2719 i1.union_ (i2);
2720 ASSERT_FALSE (i1.contains_p (INT (15)));
2721 }
2722 }
2723
2724 // Simulate -fstrict-enums where the domain of a type is less than the
2725 // underlying type.
2726
2727 static void
2728 range_tests_strict_enum ()
2729 {
2730 // The enum can only hold [0, 3].
2731 tree rtype = copy_node (unsigned_type_node);
2732 TYPE_MIN_VALUE (rtype) = build_int_cstu (rtype, 0);
2733 TYPE_MAX_VALUE (rtype) = build_int_cstu (rtype, 3);
2734
2735 // Test that even though vr1 covers the strict enum domain ([0, 3]),
2736 // it does not cover the domain of the underlying type.
2737 int_range<1> vr1 = range (rtype, 0, 1);
2738 int_range<1> vr2 = range (rtype, 2, 3);
2739 vr1.union_ (vr2);
2740 ASSERT_TRUE (vr1 == range (rtype, 0, 3));
2741 ASSERT_FALSE (vr1.varying_p ());
2742
2743 // Test that copying to a multi-range does not change things.
2744 int_range<2> ir1 (vr1);
2745 ASSERT_TRUE (ir1 == vr1);
2746 ASSERT_FALSE (ir1.varying_p ());
2747
2748 // The same test as above, but using TYPE_{MIN,MAX}_VALUE instead of [0,3].
2749 vr1 = int_range<2> (rtype,
2750 wi::to_wide (TYPE_MIN_VALUE (rtype)),
2751 wi::to_wide (TYPE_MAX_VALUE (rtype)));
2752 ir1 = vr1;
2753 ASSERT_TRUE (ir1 == vr1);
2754 ASSERT_FALSE (ir1.varying_p ());
2755 }
2756
2757 static void
2758 range_tests_misc ()
2759 {
2760 tree u128_type = build_nonstandard_integer_type (128, /*unsigned=*/1);
2761 int_range<2> i1, i2, i3;
2762 int_range<2> r0, r1, rold;
2763
2764 // Test 1-bit signed integer union.
2765 // [-1,-1] U [0,0] = VARYING.
2766 tree one_bit_type = build_nonstandard_integer_type (1, 0);
2767 wide_int one_bit_min = irange_val_min (one_bit_type);
2768 wide_int one_bit_max = irange_val_max (one_bit_type);
2769 {
2770 int_range<2> min = int_range<2> (one_bit_type, one_bit_min, one_bit_min);
2771 int_range<2> max = int_range<2> (one_bit_type, one_bit_max, one_bit_max);
2772 max.union_ (min);
2773 ASSERT_TRUE (max.varying_p ());
2774 }
2775 // Test that we can set a range of true+false for a 1-bit signed int.
2776 r0 = range_true_and_false (one_bit_type);
2777
2778 // Test inversion of 1-bit signed integers.
2779 {
2780 int_range<2> min = int_range<2> (one_bit_type, one_bit_min, one_bit_min);
2781 int_range<2> max = int_range<2> (one_bit_type, one_bit_max, one_bit_max);
2782 int_range<2> t;
2783 t = min;
2784 t.invert ();
2785 ASSERT_TRUE (t == max);
2786 t = max;
2787 t.invert ();
2788 ASSERT_TRUE (t == min);
2789 }
2790
2791 // Test that NOT(255) is [0..254] in 8-bit land.
2792 int_range<1> not_255 = range_uchar (255, 255, VR_ANTI_RANGE);
2793 ASSERT_TRUE (not_255 == range_uchar (0, 254));
2794
2795 // Test that NOT(0) is [1..255] in 8-bit land.
2796 int_range<2> not_zero;
2797 not_zero.set_nonzero (unsigned_char_type_node);
2798 ASSERT_TRUE (not_zero == range_uchar (1, 255));
2799
2800 // Check that [0,127][0x..ffffff80,0x..ffffff]
2801 // => ~[128, 0x..ffffff7f].
2802 r0 = range_uint128 (0, 127);
2803 wide_int high = wi::minus_one (128);
2804 // low = -1 - 127 => 0x..ffffff80.
2805 wide_int low = wi::sub (high, wi::uhwi (127, 128));
2806 r1 = int_range<1> (u128_type, low, high); // [0x..ffffff80, 0x..ffffffff]
2807 // r0 = [0,127][0x..ffffff80,0x..fffffff].
2808 r0.union_ (r1);
2809 // r1 = [128, 0x..ffffff7f].
2810 r1 = int_range<1> (u128_type,
2811 wi::uhwi (128, 128),
2812 wi::sub (wi::minus_one (128), wi::uhwi (128, 128)));
2813 r0.invert ();
2814 ASSERT_TRUE (r0 == r1);
2815
2816 r0.set_varying (integer_type_node);
2817 wide_int minint = r0.lower_bound ();
2818 wide_int maxint = r0.upper_bound ();
2819
2820 r0.set_varying (short_integer_type_node);
2821
2822 r0.set_varying (unsigned_type_node);
2823 wide_int maxuint = r0.upper_bound ();
2824
2825 // Check that ~[0,5] => [6,MAX] for unsigned int.
2826 r0 = range_uint (0, 5);
2827 r0.invert ();
2828 ASSERT_TRUE (r0 == int_range<1> (unsigned_type_node,
2829 wi::uhwi (6, TYPE_PRECISION (unsigned_type_node)),
2830 maxuint));
2831
2832 // Check that ~[10,MAX] => [0,9] for unsigned int.
2833 r0 = int_range<1> (unsigned_type_node,
2834 wi::uhwi (10, TYPE_PRECISION (unsigned_type_node)),
2835 maxuint);
2836 r0.invert ();
2837 ASSERT_TRUE (r0 == range_uint (0, 9));
2838
2839 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
2840 r0 = range_uint128 (0, 5, VR_ANTI_RANGE);
2841 r1 = int_range<1> (u128_type, wi::uhwi (6, 128), wi::minus_one (128));
2842 ASSERT_TRUE (r0 == r1);
2843
2844 // Check that [~5] is really [-MIN,4][6,MAX].
2845 r0 = range_int (5, 5, VR_ANTI_RANGE);
2846 r1 = int_range<1> (integer_type_node, minint, INT (4));
2847 r1.union_ (int_range<1> (integer_type_node, INT (6), maxint));
2848 ASSERT_FALSE (r1.undefined_p ());
2849 ASSERT_TRUE (r0 == r1);
2850
2851 r1 = range_int (5, 5);
2852 int_range<2> r2 (r1);
2853 ASSERT_TRUE (r1 == r2);
2854
2855 r1 = range_int (5, 10);
2856
2857 r1 = range_int (5, 10);
2858 ASSERT_TRUE (r1.contains_p (INT (7)));
2859
2860 r1 = range_char (0, 20);
2861 ASSERT_TRUE (r1.contains_p (SCHAR(15)));
2862 ASSERT_FALSE (r1.contains_p (SCHAR(300)));
2863
2864 // NOT([10,20]) ==> [-MIN,9][21,MAX].
2865 r0 = r1 = range_int (10, 20);
2866 r2 = int_range<1> (integer_type_node, minint, INT(9));
2867 r2.union_ (int_range<1> (integer_type_node, INT(21), maxint));
2868 ASSERT_FALSE (r2.undefined_p ());
2869 r1.invert ();
2870 ASSERT_TRUE (r1 == r2);
2871 // Test that NOT(NOT(x)) == x.
2872 r2.invert ();
2873 ASSERT_TRUE (r0 == r2);
2874
2875 // Test that booleans and their inverse work as expected.
2876 r0.set_zero (boolean_type_node);
2877 ASSERT_TRUE (r0 == range_false ());
2878 r0.invert ();
2879 ASSERT_TRUE (r0 == range_true ());
2880
2881 // Make sure NULL and non-NULL of pointer types work, and that
2882 // inverses of them are consistent.
2883 tree voidp = build_pointer_type (void_type_node);
2884 prange p0;
2885 p0.set_zero (voidp);
2886 prange p1 = p0;
2887 p0.invert ();
2888 p0.invert ();
2889 ASSERT_TRUE (p0 == p1);
2890
2891 // [10,20] U [15, 30] => [10, 30].
2892 r0 = range_int (10, 20);
2893 r1 = range_int (15, 30);
2894 r0.union_ (r1);
2895 ASSERT_TRUE (r0 == range_int (10, 30));
2896
2897 // [15,40] U [] => [15,40].
2898 r0 = range_int (15, 40);
2899 r1.set_undefined ();
2900 r0.union_ (r1);
2901 ASSERT_TRUE (r0 == range_int (15, 40));
2902
2903 // [10,20] U [10,10] => [10,20].
2904 r0 = range_int (10, 20);
2905 r1 = range_int (10, 10);
2906 r0.union_ (r1);
2907 ASSERT_TRUE (r0 == range_int (10, 20));
2908
2909 // [10,20] U [9,9] => [9,20].
2910 r0 = range_int (10, 20);
2911 r1 = range_int (9, 9);
2912 r0.union_ (r1);
2913 ASSERT_TRUE (r0 == range_int (9, 20));
2914
2915 // [10,20] ^ [15,30] => [15,20].
2916 r0 = range_int (10, 20);
2917 r1 = range_int (15, 30);
2918 r0.intersect (r1);
2919 ASSERT_TRUE (r0 == range_int (15, 20));
2920
2921 // Test the internal sanity of wide_int's wrt HWIs.
2922 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node),
2923 TYPE_SIGN (boolean_type_node))
2924 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node)));
2925
2926 // Test zero_p().
2927 r0 = range_int (0, 0);
2928 ASSERT_TRUE (r0.zero_p ());
2929
2930 // Test nonzero_p().
2931 r0 = range_int (0, 0);
2932 r0.invert ();
2933 ASSERT_TRUE (r0.nonzero_p ());
2934
2935 // r0 = ~[1,1]
2936 r0 = range_int (1, 1, VR_ANTI_RANGE);
2937 // r1 = ~[3,3]
2938 r1 = range_int (3, 3, VR_ANTI_RANGE);
2939
2940 // vv = [0,0][2,2][4, MAX]
2941 int_range<3> vv = r0;
2942 vv.intersect (r1);
2943
2944 ASSERT_TRUE (vv.contains_p (UINT (2)));
2945 ASSERT_TRUE (vv.num_pairs () == 3);
2946
2947 r0 = range_int (1, 1);
2948 // And union it with [0,0][2,2][4,MAX] multi range
2949 r0.union_ (vv);
2950 // The result should be [0,2][4,MAX], or ~[3,3] but it must contain 2
2951 ASSERT_TRUE (r0.contains_p (INT (2)));
2952 }
2953
2954 static void
2955 range_tests_nonzero_bits ()
2956 {
2957 int_range<2> r0, r1;
2958
2959 // Adding nonzero bits to a varying drops the varying.
2960 r0.set_varying (integer_type_node);
2961 r0.set_nonzero_bits (INT (255));
2962 ASSERT_TRUE (!r0.varying_p ());
2963 // Dropping the nonzero bits brings us back to varying.
2964 r0.set_nonzero_bits (INT (-1));
2965 ASSERT_TRUE (r0.varying_p ());
2966
2967 // Test contains_p with nonzero bits.
2968 r0.set_zero (integer_type_node);
2969 ASSERT_TRUE (r0.contains_p (INT (0)));
2970 ASSERT_FALSE (r0.contains_p (INT (1)));
2971 r0.set_nonzero_bits (INT (0xfe));
2972 ASSERT_FALSE (r0.contains_p (INT (0x100)));
2973 ASSERT_FALSE (r0.contains_p (INT (0x3)));
2974
2975 // Union of nonzero bits.
2976 r0.set_varying (integer_type_node);
2977 r0.set_nonzero_bits (INT (0xf0));
2978 r1.set_varying (integer_type_node);
2979 r1.set_nonzero_bits (INT (0xf));
2980 r0.union_ (r1);
2981 ASSERT_TRUE (r0.get_nonzero_bits () == 0xff);
2982
2983 // Intersect of nonzero bits.
2984 r0 = range_int (0, 255);
2985 r0.set_nonzero_bits (INT (0xfe));
2986 r1.set_varying (integer_type_node);
2987 r1.set_nonzero_bits (INT (0xf0));
2988 r0.intersect (r1);
2989 ASSERT_TRUE (r0.get_nonzero_bits () == 0xf0);
2990
2991 // Intersect where the mask of nonzero bits is implicit from the range.
2992 r0.set_varying (integer_type_node);
2993 r1 = range_int (0, 255);
2994 r0.intersect (r1);
2995 ASSERT_TRUE (r0.get_nonzero_bits () == 0xff);
2996
2997 // The union of a mask of 0xff..ffff00 with a mask of 0xff spans the
2998 // entire domain, and makes the range a varying.
2999 r0.set_varying (integer_type_node);
3000 wide_int x = wi::shwi (0xff, TYPE_PRECISION (integer_type_node));
3001 x = wi::bit_not (x);
3002 r0.set_nonzero_bits (x); // 0xff..ff00
3003 r1.set_varying (integer_type_node);
3004 r1.set_nonzero_bits (INT (0xff));
3005 r0.union_ (r1);
3006 ASSERT_TRUE (r0.varying_p ());
3007
3008 // Test that setting a nonzero bit of 1 does not pessimize the range.
3009 r0.set_zero (integer_type_node);
3010 r0.set_nonzero_bits (INT (1));
3011 ASSERT_TRUE (r0.zero_p ());
3012 }
3013
3014 // Build an frange from string endpoints.
3015
3016 static inline frange
3017 frange_float (const char *lb, const char *ub, tree type = float_type_node)
3018 {
3019 REAL_VALUE_TYPE min, max;
3020 gcc_assert (real_from_string (&min, lb) == 0);
3021 gcc_assert (real_from_string (&max, ub) == 0);
3022 return frange (type, min, max);
3023 }
3024
3025 static void
3026 range_tests_nan ()
3027 {
3028 frange r0, r1;
3029 REAL_VALUE_TYPE q, r;
3030 bool signbit;
3031
3032 // Equal ranges but with differing NAN bits are not equal.
3033 if (HONOR_NANS (float_type_node))
3034 {
3035 r1 = frange_float ("10", "12");
3036 r0 = r1;
3037 ASSERT_EQ (r0, r1);
3038 r0.clear_nan ();
3039 ASSERT_NE (r0, r1);
3040 r0.update_nan ();
3041 ASSERT_EQ (r0, r1);
3042
3043 // [10, 20] NAN ^ [30, 40] NAN = NAN.
3044 r0 = frange_float ("10", "20");
3045 r1 = frange_float ("30", "40");
3046 r0.intersect (r1);
3047 ASSERT_TRUE (r0.known_isnan ());
3048
3049 // [3,5] U [5,10] NAN = ... NAN
3050 r0 = frange_float ("3", "5");
3051 r0.clear_nan ();
3052 r1 = frange_float ("5", "10");
3053 r0.union_ (r1);
3054 ASSERT_TRUE (r0.maybe_isnan ());
3055 }
3056
3057 // [5,6] U NAN = [5,6] NAN.
3058 r0 = frange_float ("5", "6");
3059 r0.clear_nan ();
3060 r1.set_nan (float_type_node);
3061 r0.union_ (r1);
3062 real_from_string (&q, "5");
3063 real_from_string (&r, "6");
3064 ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
3065 ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
3066 ASSERT_TRUE (r0.maybe_isnan ());
3067
3068 // NAN U NAN = NAN
3069 r0.set_nan (float_type_node);
3070 r1.set_nan (float_type_node);
3071 r0.union_ (r1);
3072 ASSERT_TRUE (r0.known_isnan ());
3073
3074 // [INF, INF] NAN ^ NAN = NAN
3075 r0.set_nan (float_type_node);
3076 r1 = frange_float ("+Inf", "+Inf");
3077 if (!HONOR_NANS (float_type_node))
3078 r1.update_nan ();
3079 r0.intersect (r1);
3080 ASSERT_TRUE (r0.known_isnan ());
3081
3082 // NAN ^ NAN = NAN
3083 r0.set_nan (float_type_node);
3084 r1.set_nan (float_type_node);
3085 r0.intersect (r1);
3086 ASSERT_TRUE (r0.known_isnan ());
3087
3088 // +NAN ^ -NAN = UNDEFINED
3089 r0.set_nan (float_type_node, false);
3090 r1.set_nan (float_type_node, true);
3091 r0.intersect (r1);
3092 ASSERT_TRUE (r0.undefined_p ());
3093
3094 // VARYING ^ NAN = NAN.
3095 r0.set_nan (float_type_node);
3096 r1.set_varying (float_type_node);
3097 r0.intersect (r1);
3098 ASSERT_TRUE (r0.known_isnan ());
3099
3100 // [3,4] ^ NAN = UNDEFINED.
3101 r0 = frange_float ("3", "4");
3102 r0.clear_nan ();
3103 r1.set_nan (float_type_node);
3104 r0.intersect (r1);
3105 ASSERT_TRUE (r0.undefined_p ());
3106
3107 // [-3, 5] ^ NAN = UNDEFINED
3108 r0 = frange_float ("-3", "5");
3109 r0.clear_nan ();
3110 r1.set_nan (float_type_node);
3111 r0.intersect (r1);
3112 ASSERT_TRUE (r0.undefined_p ());
3113
3114 // Setting the NAN bit to yes does not make us a known NAN.
3115 r0.set_varying (float_type_node);
3116 r0.update_nan ();
3117 ASSERT_FALSE (r0.known_isnan ());
3118
3119 // NAN is in a VARYING.
3120 r0.set_varying (float_type_node);
3121 real_nan (&r, "", 1, TYPE_MODE (float_type_node));
3122 REAL_VALUE_TYPE nan = r;
3123 ASSERT_TRUE (r0.contains_p (nan));
3124
3125 // -NAN is in a VARYING.
3126 r0.set_varying (float_type_node);
3127 q = real_value_negate (&r);
3128 REAL_VALUE_TYPE neg_nan = q;
3129 ASSERT_TRUE (r0.contains_p (neg_nan));
3130
3131 // Clearing the NAN on a [] NAN is the empty set.
3132 r0.set_nan (float_type_node);
3133 r0.clear_nan ();
3134 ASSERT_TRUE (r0.undefined_p ());
3135
3136 // [10,20] NAN ^ [21,25] NAN = [NAN]
3137 r0 = frange_float ("10", "20");
3138 r0.update_nan ();
3139 r1 = frange_float ("21", "25");
3140 r1.update_nan ();
3141 r0.intersect (r1);
3142 ASSERT_TRUE (r0.known_isnan ());
3143
3144 // NAN U [5,6] should be [5,6] +-NAN.
3145 r0.set_nan (float_type_node);
3146 r1 = frange_float ("5", "6");
3147 r1.clear_nan ();
3148 r0.union_ (r1);
3149 real_from_string (&q, "5");
3150 real_from_string (&r, "6");
3151 ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
3152 ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
3153 ASSERT_TRUE (!r0.signbit_p (signbit));
3154 ASSERT_TRUE (r0.maybe_isnan ());
3155
3156 // NAN U NAN shouldn't change anything.
3157 r0.set_nan (float_type_node);
3158 r1.set_nan (float_type_node);
3159 ASSERT_FALSE (r0.union_ (r1));
3160
3161 // [3,5] NAN U NAN shouldn't change anything.
3162 r0 = frange_float ("3", "5");
3163 r1.set_nan (float_type_node);
3164 ASSERT_FALSE (r0.union_ (r1));
3165
3166 // [3,5] U NAN *does* trigger a change.
3167 r0 = frange_float ("3", "5");
3168 r0.clear_nan ();
3169 r1.set_nan (float_type_node);
3170 ASSERT_TRUE (r0.union_ (r1));
3171 }
3172
3173 static void
3174 range_tests_signed_zeros ()
3175 {
3176 REAL_VALUE_TYPE zero = dconst0;
3177 REAL_VALUE_TYPE neg_zero = zero;
3178 neg_zero.sign = 1;
3179 frange r0, r1;
3180 bool signbit;
3181
3182 // [0,0] contains [0,0] but not [-0,-0] and vice versa.
3183 r0 = frange_float ("0.0", "0.0");
3184 r1 = frange_float ("-0.0", "-0.0");
3185 ASSERT_TRUE (r0.contains_p (zero));
3186 ASSERT_TRUE (!r0.contains_p (neg_zero));
3187 ASSERT_TRUE (r1.contains_p (neg_zero));
3188 ASSERT_TRUE (!r1.contains_p (zero));
3189
3190 // Test contains_p() when we know the sign of the zero.
3191 r0 = frange_float ("0.0", "0.0");
3192 ASSERT_TRUE (r0.contains_p (zero));
3193 ASSERT_FALSE (r0.contains_p (neg_zero));
3194 r0 = frange_float ("-0.0", "-0.0");
3195 ASSERT_TRUE (r0.contains_p (neg_zero));
3196 ASSERT_FALSE (r0.contains_p (zero));
3197
3198 r0 = frange_float ("-0.0", "0.0");
3199 ASSERT_TRUE (r0.contains_p (neg_zero));
3200 ASSERT_TRUE (r0.contains_p (zero));
3201
3202 r0 = frange_float ("-3", "5");
3203 ASSERT_TRUE (r0.contains_p (neg_zero));
3204 ASSERT_TRUE (r0.contains_p (zero));
3205
3206 // The intersection of zeros that differ in sign is a NAN (or
3207 // undefined if not honoring NANs).
3208 r0 = frange_float ("-0.0", "-0.0");
3209 r1 = frange_float ("0.0", "0.0");
3210 r0.intersect (r1);
3211 if (HONOR_NANS (float_type_node))
3212 ASSERT_TRUE (r0.known_isnan ());
3213 else
3214 ASSERT_TRUE (r0.undefined_p ());
3215
3216 // The union of zeros that differ in sign is a zero with unknown sign.
3217 r0 = frange_float ("0.0", "0.0");
3218 r1 = frange_float ("-0.0", "-0.0");
3219 r0.union_ (r1);
3220 ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
3221
3222 // [-0, +0] has an unknown sign.
3223 r0 = frange_float ("-0.0", "0.0");
3224 ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
3225
3226 // [-0, +0] ^ [0, 0] is [0, 0]
3227 r0 = frange_float ("-0.0", "0.0");
3228 r1 = frange_float ("0.0", "0.0");
3229 r0.intersect (r1);
3230 ASSERT_TRUE (r0.zero_p ());
3231
3232 r0 = frange_float ("+0", "5");
3233 r0.clear_nan ();
3234 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
3235
3236 r0 = frange_float ("-0", "5");
3237 r0.clear_nan ();
3238 ASSERT_TRUE (!r0.signbit_p (signbit));
3239
3240 r0 = frange_float ("-0", "10");
3241 r1 = frange_float ("0", "5");
3242 r0.intersect (r1);
3243 ASSERT_TRUE (real_iszero (&r0.lower_bound (), false));
3244
3245 r0 = frange_float ("-0", "5");
3246 r1 = frange_float ("0", "5");
3247 r0.union_ (r1);
3248 ASSERT_TRUE (real_iszero (&r0.lower_bound (), true));
3249
3250 r0 = frange_float ("-5", "-0");
3251 r0.update_nan ();
3252 r1 = frange_float ("0", "0");
3253 r1.update_nan ();
3254 r0.intersect (r1);
3255 if (HONOR_NANS (float_type_node))
3256 ASSERT_TRUE (r0.known_isnan ());
3257 else
3258 ASSERT_TRUE (r0.undefined_p ());
3259
3260 r0.set_nonnegative (float_type_node);
3261 if (HONOR_NANS (float_type_node))
3262 ASSERT_TRUE (r0.maybe_isnan ());
3263
3264 // Numbers containing zero should have an unknown SIGNBIT.
3265 r0 = frange_float ("0", "10");
3266 r0.clear_nan ();
3267 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
3268 }
3269
3270 static void
3271 range_tests_signbit ()
3272 {
3273 frange r0, r1;
3274 bool signbit;
3275
3276 // Negative numbers should have the SIGNBIT set.
3277 r0 = frange_float ("-5", "-1");
3278 r0.clear_nan ();
3279 ASSERT_TRUE (r0.signbit_p (signbit) && signbit);
3280 // Positive numbers should have the SIGNBIT clear.
3281 r0 = frange_float ("1", "10");
3282 r0.clear_nan ();
3283 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
3284 // Numbers spanning both positive and negative should have an
3285 // unknown SIGNBIT.
3286 r0 = frange_float ("-10", "10");
3287 r0.clear_nan ();
3288 ASSERT_TRUE (!r0.signbit_p (signbit));
3289 r0.set_varying (float_type_node);
3290 ASSERT_TRUE (!r0.signbit_p (signbit));
3291 }
3292
3293 static void
3294 range_tests_floats ()
3295 {
3296 frange r0, r1;
3297
3298 if (HONOR_NANS (float_type_node))
3299 range_tests_nan ();
3300 range_tests_signbit ();
3301
3302 if (HONOR_SIGNED_ZEROS (float_type_node))
3303 range_tests_signed_zeros ();
3304
3305 // A range of [-INF,+INF] is actually VARYING if no other properties
3306 // are set.
3307 r0 = frange_float ("-Inf", "+Inf");
3308 ASSERT_TRUE (r0.varying_p ());
3309 // ...unless it has some special property...
3310 if (HONOR_NANS (r0.type ()))
3311 {
3312 r0.clear_nan ();
3313 ASSERT_FALSE (r0.varying_p ());
3314 }
3315
3316 // For most architectures, where float and double are different
3317 // sizes, having the same endpoints does not necessarily mean the
3318 // ranges are equal.
3319 if (!types_compatible_p (float_type_node, double_type_node))
3320 {
3321 r0 = frange_float ("3.0", "3.0", float_type_node);
3322 r1 = frange_float ("3.0", "3.0", double_type_node);
3323 ASSERT_NE (r0, r1);
3324 }
3325
3326 // [3,5] U [10,12] = [3,12].
3327 r0 = frange_float ("3", "5");
3328 r1 = frange_float ("10", "12");
3329 r0.union_ (r1);
3330 ASSERT_EQ (r0, frange_float ("3", "12"));
3331
3332 // [5,10] U [4,8] = [4,10]
3333 r0 = frange_float ("5", "10");
3334 r1 = frange_float ("4", "8");
3335 r0.union_ (r1);
3336 ASSERT_EQ (r0, frange_float ("4", "10"));
3337
3338 // [3,5] U [4,10] = [3,10]
3339 r0 = frange_float ("3", "5");
3340 r1 = frange_float ("4", "10");
3341 r0.union_ (r1);
3342 ASSERT_EQ (r0, frange_float ("3", "10"));
3343
3344 // [4,10] U [5,11] = [4,11]
3345 r0 = frange_float ("4", "10");
3346 r1 = frange_float ("5", "11");
3347 r0.union_ (r1);
3348 ASSERT_EQ (r0, frange_float ("4", "11"));
3349
3350 // [3,12] ^ [10,12] = [10,12].
3351 r0 = frange_float ("3", "12");
3352 r1 = frange_float ("10", "12");
3353 r0.intersect (r1);
3354 ASSERT_EQ (r0, frange_float ("10", "12"));
3355
3356 // [10,12] ^ [11,11] = [11,11]
3357 r0 = frange_float ("10", "12");
3358 r1 = frange_float ("11", "11");
3359 r0.intersect (r1);
3360 ASSERT_EQ (r0, frange_float ("11", "11"));
3361
3362 // [10,20] ^ [5,15] = [10,15]
3363 r0 = frange_float ("10", "20");
3364 r1 = frange_float ("5", "15");
3365 r0.intersect (r1);
3366 ASSERT_EQ (r0, frange_float ("10", "15"));
3367
3368 // [10,20] ^ [15,25] = [15,20]
3369 r0 = frange_float ("10", "20");
3370 r1 = frange_float ("15", "25");
3371 r0.intersect (r1);
3372 ASSERT_EQ (r0, frange_float ("15", "20"));
3373
3374 // [10,20] ^ [21,25] = []
3375 r0 = frange_float ("10", "20");
3376 r0.clear_nan ();
3377 r1 = frange_float ("21", "25");
3378 r1.clear_nan ();
3379 r0.intersect (r1);
3380 ASSERT_TRUE (r0.undefined_p ());
3381
3382 if (HONOR_INFINITIES (float_type_node))
3383 {
3384 // Make sure [-Inf, -Inf] doesn't get normalized.
3385 r0 = frange_float ("-Inf", "-Inf");
3386 ASSERT_TRUE (real_isinf (&r0.lower_bound (), true));
3387 ASSERT_TRUE (real_isinf (&r0.upper_bound (), true));
3388 }
3389
3390 // Test that reading back a global range yields the same result as
3391 // what we wrote into it.
3392 tree ssa = make_temp_ssa_name (float_type_node, NULL, "blah");
3393 r0.set_varying (float_type_node);
3394 r0.clear_nan ();
3395 set_range_info (ssa, r0);
3396 get_global_range_query ()->range_of_expr (r1, ssa);
3397 ASSERT_EQ (r0, r1);
3398 }
3399
3400 // Run floating range tests for various combinations of NAN and INF
3401 // support.
3402
3403 static void
3404 range_tests_floats_various ()
3405 {
3406 int save_finite_math_only = flag_finite_math_only;
3407
3408 // Test -ffinite-math-only.
3409 flag_finite_math_only = 1;
3410 range_tests_floats ();
3411 // Test -fno-finite-math-only.
3412 flag_finite_math_only = 0;
3413 range_tests_floats ();
3414
3415 flag_finite_math_only = save_finite_math_only;
3416 }
3417
3418 void
3419 range_tests ()
3420 {
3421 range_tests_irange3 ();
3422 range_tests_int_range_max ();
3423 range_tests_strict_enum ();
3424 range_tests_nonzero_bits ();
3425 range_tests_floats_various ();
3426 range_tests_misc ();
3427 }
3428
3429 } // namespace selftest
3430
3431 #endif // CHECKING_P