]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vrp.c
Put the CL into the right dir.
[thirdparty/gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
71
72 static bool
73 ranges_from_anti_range (const value_range_base *ar,
74 value_range_base *vr0, value_range_base *vr1,
75 bool handle_pointers = false);
76
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
79 static sbitmap *live;
80
81 void
82 value_range::set_equiv (bitmap equiv)
83 {
84 if (undefined_p () || varying_p ())
85 equiv = NULL;
86 /* Since updating the equivalence set involves deep copying the
87 bitmaps, only do it if absolutely necessary.
88
89 All equivalence bitmaps are allocated from the same obstack. So
90 we can use the obstack associated with EQUIV to allocate vr->equiv. */
91 if (m_equiv == NULL
92 && equiv != NULL)
93 m_equiv = BITMAP_ALLOC (equiv->obstack);
94
95 if (equiv != m_equiv)
96 {
97 if (equiv && !bitmap_empty_p (equiv))
98 bitmap_copy (m_equiv, equiv);
99 else
100 bitmap_clear (m_equiv);
101 }
102 }
103
104 /* Initialize value_range. */
105
106 void
107 value_range::set (enum value_range_kind kind, tree min, tree max,
108 bitmap equiv)
109 {
110 value_range_base::set (kind, min, max);
111 set_equiv (equiv);
112 if (flag_checking)
113 check ();
114 }
115
116 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
117 {
118 set (kind, min, max);
119 }
120
121 value_range::value_range (value_range_kind kind, tree min, tree max,
122 bitmap equiv)
123 {
124 m_equiv = NULL;
125 set (kind, min, max, equiv);
126 }
127
128 value_range::value_range (const value_range_base &other)
129 {
130 m_equiv = NULL;
131 set (other.kind (), other.min(), other.max (), NULL);
132 }
133
134 /* Like set, but keep the equivalences in place. */
135
136 void
137 value_range::update (value_range_kind kind, tree min, tree max)
138 {
139 set (kind, min, max,
140 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
141 }
142
143 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
144
145 Note: The code that avoids the bitmap sharing looks at the existing
146 this->m_equiv, so this function cannot be used to initalize an
147 object. Use the constructors for initialization. */
148
149 void
150 value_range::deep_copy (const value_range *from)
151 {
152 set (from->m_kind, from->min (), from->max (), from->m_equiv);
153 }
154
155 void
156 value_range::move (value_range *from)
157 {
158 set (from->m_kind, from->min (), from->max ());
159 m_equiv = from->m_equiv;
160 from->m_equiv = NULL;
161 }
162
163 /* Check the validity of the range. */
164
165 void
166 value_range_base::check ()
167 {
168 switch (m_kind)
169 {
170 case VR_RANGE:
171 case VR_ANTI_RANGE:
172 {
173 int cmp;
174
175 gcc_assert (m_min && m_max);
176
177 gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
178
179 /* Creating ~[-MIN, +MAX] is stupid because that would be
180 the empty set. */
181 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
182 gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
183
184 cmp = compare_values (m_min, m_max);
185 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
186 break;
187 }
188 case VR_UNDEFINED:
189 gcc_assert (!min () && !max ());
190 break;
191 case VR_VARYING:
192 gcc_assert (m_min && m_max);
193 break;
194 default:
195 gcc_unreachable ();
196 }
197 }
198
199 void
200 value_range::check ()
201 {
202 value_range_base::check ();
203 switch (m_kind)
204 {
205 case VR_UNDEFINED:
206 case VR_VARYING:
207 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
208 default:;
209 }
210 }
211
212 /* Equality operator. We purposely do not overload ==, to avoid
213 confusion with the equality bitmap in the derived value_range
214 class. */
215
216 bool
217 value_range_base::equal_p (const value_range_base &other) const
218 {
219 /* Ignore types for undefined. All undefines are equal. */
220 if (undefined_p ())
221 return m_kind == other.m_kind;
222
223 return (m_kind == other.m_kind
224 && vrp_operand_equal_p (m_min, other.m_min)
225 && vrp_operand_equal_p (m_max, other.m_max));
226 }
227
228 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
229 IGNORE_EQUIVS is TRUE. */
230
231 bool
232 value_range::equal_p (const value_range &other, bool ignore_equivs) const
233 {
234 return (value_range_base::equal_p (other)
235 && (ignore_equivs
236 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
237 }
238
239 /* Return TRUE if this is a symbolic range. */
240
241 bool
242 value_range_base::symbolic_p () const
243 {
244 return (!varying_p ()
245 && !undefined_p ()
246 && (!is_gimple_min_invariant (m_min)
247 || !is_gimple_min_invariant (m_max)));
248 }
249
250 /* NOTE: This is not the inverse of symbolic_p because the range
251 could also be varying or undefined. Ideally they should be inverse
252 of each other, with varying only applying to symbolics. Varying of
253 constants would be represented as [-MIN, +MAX]. */
254
255 bool
256 value_range_base::constant_p () const
257 {
258 return (!varying_p ()
259 && !undefined_p ()
260 && TREE_CODE (m_min) == INTEGER_CST
261 && TREE_CODE (m_max) == INTEGER_CST);
262 }
263
264 void
265 value_range_base::set_undefined ()
266 {
267 m_kind = VR_UNDEFINED;
268 m_min = m_max = NULL;
269 }
270
271 void
272 value_range::set_undefined ()
273 {
274 set (VR_UNDEFINED, NULL, NULL, NULL);
275 }
276
277 void
278 value_range_base::set_varying (tree type)
279 {
280 m_kind = VR_VARYING;
281 if (supports_type_p (type))
282 {
283 m_min = vrp_val_min (type, true);
284 m_max = vrp_val_max (type, true);
285 }
286 else
287 /* We can't do anything range-wise with these types. */
288 m_min = m_max = error_mark_node;
289 }
290
291 void
292 value_range::set_varying (tree type)
293 {
294 value_range_base::set_varying (type);
295 equiv_clear ();
296 }
297
298 /* Return TRUE if it is possible that range contains VAL. */
299
300 bool
301 value_range_base::may_contain_p (tree val) const
302 {
303 return value_inside_range (val) != 0;
304 }
305
306 void
307 value_range::equiv_clear ()
308 {
309 if (m_equiv)
310 bitmap_clear (m_equiv);
311 }
312
313 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
314 bitmap. If no equivalence table has been created, OBSTACK is the
315 obstack to use (NULL for the default obstack).
316
317 This is the central point where equivalence processing can be
318 turned on/off. */
319
320 void
321 value_range::equiv_add (const_tree var,
322 const value_range *var_vr,
323 bitmap_obstack *obstack)
324 {
325 if (!m_equiv)
326 m_equiv = BITMAP_ALLOC (obstack);
327 unsigned ver = SSA_NAME_VERSION (var);
328 bitmap_set_bit (m_equiv, ver);
329 if (var_vr && var_vr->m_equiv)
330 bitmap_ior_into (m_equiv, var_vr->m_equiv);
331 }
332
333 /* If range is a singleton, place it in RESULT and return TRUE.
334 Note: A singleton can be any gimple invariant, not just constants.
335 So, [&x, &x] counts as a singleton. */
336
337 bool
338 value_range_base::singleton_p (tree *result) const
339 {
340 if (m_kind == VR_ANTI_RANGE)
341 {
342 if (nonzero_p ())
343 {
344 if (TYPE_PRECISION (type ()) == 1)
345 {
346 if (result)
347 *result = m_max;
348 return true;
349 }
350 return false;
351 }
352
353 value_range_base vr0, vr1;
354 return (ranges_from_anti_range (this, &vr0, &vr1, true)
355 && vr1.undefined_p ()
356 && vr0.singleton_p (result));
357 }
358 if (m_kind == VR_RANGE
359 && vrp_operand_equal_p (min (), max ())
360 && is_gimple_min_invariant (min ()))
361 {
362 if (result)
363 *result = min ();
364 return true;
365 }
366 return false;
367 }
368
369 tree
370 value_range_base::type () const
371 {
372 gcc_assert (m_min || undefined_p ());
373 return TREE_TYPE (min ());
374 }
375
376 void
377 value_range_base::dump (FILE *file) const
378 {
379 if (undefined_p ())
380 fprintf (file, "UNDEFINED");
381 else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
382 {
383 tree ttype = type ();
384
385 print_generic_expr (file, ttype);
386 fprintf (file, " ");
387
388 fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
389
390 if (INTEGRAL_TYPE_P (ttype)
391 && !TYPE_UNSIGNED (ttype)
392 && vrp_val_is_min (min ())
393 && TYPE_PRECISION (ttype) != 1)
394 fprintf (file, "-INF");
395 else
396 print_generic_expr (file, min ());
397
398 fprintf (file, ", ");
399
400 if (INTEGRAL_TYPE_P (ttype)
401 && vrp_val_is_max (max ())
402 && TYPE_PRECISION (ttype) != 1)
403 fprintf (file, "+INF");
404 else
405 print_generic_expr (file, max ());
406
407 fprintf (file, "]");
408 }
409 else if (varying_p ())
410 {
411 print_generic_expr (file, type ());
412 fprintf (file, " VARYING");
413 }
414 else
415 gcc_unreachable ();
416 }
417
418 void
419 value_range_base::dump () const
420 {
421 dump (stderr);
422 }
423
424 void
425 value_range::dump (FILE *file) const
426 {
427 value_range_base::dump (file);
428 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
429 && m_equiv)
430 {
431 bitmap_iterator bi;
432 unsigned i, c = 0;
433
434 fprintf (file, " EQUIVALENCES: { ");
435
436 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
437 {
438 print_generic_expr (file, ssa_name (i));
439 fprintf (file, " ");
440 c++;
441 }
442
443 fprintf (file, "} (%u elements)", c);
444 }
445 }
446
447 void
448 value_range::dump () const
449 {
450 dump (stderr);
451 }
452
453 void
454 dump_value_range (FILE *file, const value_range *vr)
455 {
456 if (!vr)
457 fprintf (file, "[]");
458 else
459 vr->dump (file);
460 }
461
462 void
463 dump_value_range (FILE *file, const value_range_base *vr)
464 {
465 if (!vr)
466 fprintf (file, "[]");
467 else
468 vr->dump (file);
469 }
470
471 DEBUG_FUNCTION void
472 debug (const value_range_base *vr)
473 {
474 dump_value_range (stderr, vr);
475 }
476
477 DEBUG_FUNCTION void
478 debug (const value_range_base &vr)
479 {
480 dump_value_range (stderr, &vr);
481 }
482
483 DEBUG_FUNCTION void
484 debug (const value_range *vr)
485 {
486 dump_value_range (stderr, vr);
487 }
488
489 DEBUG_FUNCTION void
490 debug (const value_range &vr)
491 {
492 dump_value_range (stderr, &vr);
493 }
494
495 /* Return true if the SSA name NAME is live on the edge E. */
496
497 static bool
498 live_on_edge (edge e, tree name)
499 {
500 return (live[e->dest->index]
501 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
502 }
503
504 /* Location information for ASSERT_EXPRs. Each instance of this
505 structure describes an ASSERT_EXPR for an SSA name. Since a single
506 SSA name may have more than one assertion associated with it, these
507 locations are kept in a linked list attached to the corresponding
508 SSA name. */
509 struct assert_locus
510 {
511 /* Basic block where the assertion would be inserted. */
512 basic_block bb;
513
514 /* Some assertions need to be inserted on an edge (e.g., assertions
515 generated by COND_EXPRs). In those cases, BB will be NULL. */
516 edge e;
517
518 /* Pointer to the statement that generated this assertion. */
519 gimple_stmt_iterator si;
520
521 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
522 enum tree_code comp_code;
523
524 /* Value being compared against. */
525 tree val;
526
527 /* Expression to compare. */
528 tree expr;
529
530 /* Next node in the linked list. */
531 assert_locus *next;
532 };
533
534 /* If bit I is present, it means that SSA name N_i has a list of
535 assertions that should be inserted in the IL. */
536 static bitmap need_assert_for;
537
538 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
539 holds a list of ASSERT_LOCUS_T nodes that describe where
540 ASSERT_EXPRs for SSA name N_I should be inserted. */
541 static assert_locus **asserts_for;
542
543 /* Return the maximum value for TYPE. */
544
545 tree
546 vrp_val_max (const_tree type, bool handle_pointers)
547 {
548 if (INTEGRAL_TYPE_P (type))
549 return TYPE_MAX_VALUE (type);
550 if (POINTER_TYPE_P (type) && handle_pointers)
551 {
552 wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
553 return wide_int_to_tree (const_cast<tree> (type), max);
554 }
555 return NULL_TREE;
556 }
557
558 /* Return the minimum value for TYPE. */
559
560 tree
561 vrp_val_min (const_tree type, bool handle_pointers)
562 {
563 if (INTEGRAL_TYPE_P (type))
564 return TYPE_MIN_VALUE (type);
565 if (POINTER_TYPE_P (type) && handle_pointers)
566 return build_zero_cst (const_cast<tree> (type));
567 return NULL_TREE;
568 }
569
570 /* Return whether VAL is equal to the maximum value of its type.
571 We can't do a simple equality comparison with TYPE_MAX_VALUE because
572 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
573 is not == to the integer constant with the same value in the type. */
574
575 bool
576 vrp_val_is_max (const_tree val)
577 {
578 tree type_max = vrp_val_max (TREE_TYPE (val));
579 return (val == type_max
580 || (type_max != NULL_TREE
581 && operand_equal_p (val, type_max, 0)));
582 }
583
584 /* Return whether VAL is equal to the minimum value of its type. */
585
586 bool
587 vrp_val_is_min (const_tree val)
588 {
589 tree type_min = vrp_val_min (TREE_TYPE (val));
590 return (val == type_min
591 || (type_min != NULL_TREE
592 && operand_equal_p (val, type_min, 0)));
593 }
594
595 /* VR_TYPE describes a range with mininum value *MIN and maximum
596 value *MAX. Restrict the range to the set of values that have
597 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
598 return the new range type.
599
600 SGN gives the sign of the values described by the range. */
601
602 enum value_range_kind
603 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
604 wide_int *min, wide_int *max,
605 const wide_int &nonzero_bits,
606 signop sgn)
607 {
608 if (vr_type == VR_ANTI_RANGE)
609 {
610 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
611 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
612 to create an inclusive upper bound for A and an inclusive lower
613 bound for B. */
614 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
615 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
616
617 /* If the calculation of A_MAX wrapped, A is effectively empty
618 and A_MAX is the highest value that satisfies NONZERO_BITS.
619 Likewise if the calculation of B_MIN wrapped, B is effectively
620 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
621 bool a_empty = wi::ge_p (a_max, *min, sgn);
622 bool b_empty = wi::le_p (b_min, *max, sgn);
623
624 /* If both A and B are empty, there are no valid values. */
625 if (a_empty && b_empty)
626 return VR_UNDEFINED;
627
628 /* If exactly one of A or B is empty, return a VR_RANGE for the
629 other one. */
630 if (a_empty || b_empty)
631 {
632 *min = b_min;
633 *max = a_max;
634 gcc_checking_assert (wi::le_p (*min, *max, sgn));
635 return VR_RANGE;
636 }
637
638 /* Update the VR_ANTI_RANGE bounds. */
639 *min = a_max + 1;
640 *max = b_min - 1;
641 gcc_checking_assert (wi::le_p (*min, *max, sgn));
642
643 /* Now check whether the excluded range includes any values that
644 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
645 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
646 {
647 unsigned int precision = min->get_precision ();
648 *min = wi::min_value (precision, sgn);
649 *max = wi::max_value (precision, sgn);
650 vr_type = VR_RANGE;
651 }
652 }
653 if (vr_type == VR_RANGE)
654 {
655 *max = wi::round_down_for_mask (*max, nonzero_bits);
656
657 /* Check that the range contains at least one valid value. */
658 if (wi::gt_p (*min, *max, sgn))
659 return VR_UNDEFINED;
660
661 *min = wi::round_up_for_mask (*min, nonzero_bits);
662 gcc_checking_assert (wi::le_p (*min, *max, sgn));
663 }
664 return vr_type;
665 }
666
667
668 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
669 This means adjusting VRTYPE, MIN and MAX representing the case of a
670 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
671 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
672 In corner cases where MAX+1 or MIN-1 wraps this will fall back
673 to varying.
674 This routine exists to ease canonicalization in the case where we
675 extract ranges from var + CST op limit. */
676
677 void
678 value_range_base::set (enum value_range_kind kind, tree min, tree max)
679 {
680 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
681 if (kind == VR_UNDEFINED)
682 {
683 set_undefined ();
684 return;
685 }
686 else if (kind == VR_VARYING)
687 {
688 gcc_assert (TREE_TYPE (min) == TREE_TYPE (max));
689 tree typ = TREE_TYPE (min);
690 if (supports_type_p (typ))
691 {
692 gcc_assert (vrp_val_min (typ, true));
693 gcc_assert (vrp_val_max (typ, true));
694 }
695 set_varying (typ);
696 return;
697 }
698
699 /* Nothing to canonicalize for symbolic ranges. */
700 if (TREE_CODE (min) != INTEGER_CST
701 || TREE_CODE (max) != INTEGER_CST)
702 {
703 m_kind = kind;
704 m_min = min;
705 m_max = max;
706 return;
707 }
708
709 /* Wrong order for min and max, to swap them and the VR type we need
710 to adjust them. */
711 if (tree_int_cst_lt (max, min))
712 {
713 tree one, tmp;
714
715 /* For one bit precision if max < min, then the swapped
716 range covers all values, so for VR_RANGE it is varying and
717 for VR_ANTI_RANGE empty range, so drop to varying as well. */
718 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
719 {
720 set_varying (TREE_TYPE (min));
721 return;
722 }
723
724 one = build_int_cst (TREE_TYPE (min), 1);
725 tmp = int_const_binop (PLUS_EXPR, max, one);
726 max = int_const_binop (MINUS_EXPR, min, one);
727 min = tmp;
728
729 /* There's one corner case, if we had [C+1, C] before we now have
730 that again. But this represents an empty value range, so drop
731 to varying in this case. */
732 if (tree_int_cst_lt (max, min))
733 {
734 set_varying (TREE_TYPE (min));
735 return;
736 }
737
738 kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
739 }
740
741 tree type = TREE_TYPE (min);
742
743 /* Anti-ranges that can be represented as ranges should be so. */
744 if (kind == VR_ANTI_RANGE)
745 {
746 /* For -fstrict-enums we may receive out-of-range ranges so consider
747 values < -INF and values > INF as -INF/INF as well. */
748 bool is_min = (INTEGRAL_TYPE_P (type)
749 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
750 bool is_max = (INTEGRAL_TYPE_P (type)
751 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
752
753 if (is_min && is_max)
754 {
755 /* We cannot deal with empty ranges, drop to varying.
756 ??? This could be VR_UNDEFINED instead. */
757 set_varying (type);
758 return;
759 }
760 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
761 && (is_min || is_max))
762 {
763 /* Non-empty boolean ranges can always be represented
764 as a singleton range. */
765 if (is_min)
766 min = max = vrp_val_max (TREE_TYPE (min));
767 else
768 min = max = vrp_val_min (TREE_TYPE (min));
769 kind = VR_RANGE;
770 }
771 else if (is_min
772 /* As a special exception preserve non-null ranges. */
773 && !(TYPE_UNSIGNED (TREE_TYPE (min))
774 && integer_zerop (max)))
775 {
776 tree one = build_int_cst (TREE_TYPE (max), 1);
777 min = int_const_binop (PLUS_EXPR, max, one);
778 max = vrp_val_max (TREE_TYPE (max));
779 kind = VR_RANGE;
780 }
781 else if (is_max)
782 {
783 tree one = build_int_cst (TREE_TYPE (min), 1);
784 max = int_const_binop (MINUS_EXPR, min, one);
785 min = vrp_val_min (TREE_TYPE (min));
786 kind = VR_RANGE;
787 }
788 }
789
790 /* Normalize [MIN, MAX] into VARYING and ~[MIN, MAX] into UNDEFINED.
791
792 Avoid using TYPE_{MIN,MAX}_VALUE because -fstrict-enums can
793 restrict those to a subset of what actually fits in the type.
794 Instead use the extremes of the type precision which will allow
795 compare_range_with_value() to check if a value is inside a range,
796 whereas if we used TYPE_*_VAL, said function would just punt
797 upon seeing a VARYING. */
798 unsigned prec = TYPE_PRECISION (type);
799 signop sign = TYPE_SIGN (type);
800 if (wi::eq_p (wi::to_wide (min), wi::min_value (prec, sign))
801 && wi::eq_p (wi::to_wide (max), wi::max_value (prec, sign)))
802 {
803 if (kind == VR_RANGE)
804 set_varying (type);
805 else if (kind == VR_ANTI_RANGE)
806 set_undefined ();
807 else
808 gcc_unreachable ();
809 return;
810 }
811
812 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
813 to make sure VRP iteration terminates, otherwise we can get into
814 oscillations. */
815
816 m_kind = kind;
817 m_min = min;
818 m_max = max;
819 if (flag_checking)
820 check ();
821 }
822
823 void
824 value_range_base::set (tree val)
825 {
826 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
827 if (TREE_OVERFLOW_P (val))
828 val = drop_tree_overflow (val);
829 set (VR_RANGE, val, val);
830 }
831
832 void
833 value_range::set (tree val)
834 {
835 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
836 if (TREE_OVERFLOW_P (val))
837 val = drop_tree_overflow (val);
838 set (VR_RANGE, val, val, NULL);
839 }
840
841 /* Set value range VR to a nonzero range of type TYPE. */
842
843 void
844 value_range_base::set_nonzero (tree type)
845 {
846 tree zero = build_int_cst (type, 0);
847 set (VR_ANTI_RANGE, zero, zero);
848 }
849
850 /* Set value range VR to a ZERO range of type TYPE. */
851
852 void
853 value_range_base::set_zero (tree type)
854 {
855 set (build_int_cst (type, 0));
856 }
857
858 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
859
860 bool
861 vrp_operand_equal_p (const_tree val1, const_tree val2)
862 {
863 if (val1 == val2)
864 return true;
865 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
866 return false;
867 return true;
868 }
869
870 /* Return true, if the bitmaps B1 and B2 are equal. */
871
872 bool
873 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
874 {
875 return (b1 == b2
876 || ((!b1 || bitmap_empty_p (b1))
877 && (!b2 || bitmap_empty_p (b2)))
878 || (b1 && b2
879 && bitmap_equal_p (b1, b2)));
880 }
881
882 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
883 a singleton. */
884
885 bool
886 range_int_cst_p (const value_range_base *vr)
887 {
888 return (vr->kind () == VR_RANGE
889 && TREE_CODE (vr->min ()) == INTEGER_CST
890 && TREE_CODE (vr->max ()) == INTEGER_CST);
891 }
892
893 /* Return true if VR is a INTEGER_CST singleton. */
894
895 bool
896 range_int_cst_singleton_p (const value_range_base *vr)
897 {
898 return (range_int_cst_p (vr)
899 && tree_int_cst_equal (vr->min (), vr->max ()));
900 }
901
902 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
903 otherwise. We only handle additive operations and set NEG to true if the
904 symbol is negated and INV to the invariant part, if any. */
905
906 tree
907 get_single_symbol (tree t, bool *neg, tree *inv)
908 {
909 bool neg_;
910 tree inv_;
911
912 *inv = NULL_TREE;
913 *neg = false;
914
915 if (TREE_CODE (t) == PLUS_EXPR
916 || TREE_CODE (t) == POINTER_PLUS_EXPR
917 || TREE_CODE (t) == MINUS_EXPR)
918 {
919 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
920 {
921 neg_ = (TREE_CODE (t) == MINUS_EXPR);
922 inv_ = TREE_OPERAND (t, 0);
923 t = TREE_OPERAND (t, 1);
924 }
925 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
926 {
927 neg_ = false;
928 inv_ = TREE_OPERAND (t, 1);
929 t = TREE_OPERAND (t, 0);
930 }
931 else
932 return NULL_TREE;
933 }
934 else
935 {
936 neg_ = false;
937 inv_ = NULL_TREE;
938 }
939
940 if (TREE_CODE (t) == NEGATE_EXPR)
941 {
942 t = TREE_OPERAND (t, 0);
943 neg_ = !neg_;
944 }
945
946 if (TREE_CODE (t) != SSA_NAME)
947 return NULL_TREE;
948
949 if (inv_ && TREE_OVERFLOW_P (inv_))
950 inv_ = drop_tree_overflow (inv_);
951
952 *neg = neg_;
953 *inv = inv_;
954 return t;
955 }
956
957 /* The reverse operation: build a symbolic expression with TYPE
958 from symbol SYM, negated according to NEG, and invariant INV. */
959
960 static tree
961 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
962 {
963 const bool pointer_p = POINTER_TYPE_P (type);
964 tree t = sym;
965
966 if (neg)
967 t = build1 (NEGATE_EXPR, type, t);
968
969 if (integer_zerop (inv))
970 return t;
971
972 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
973 }
974
975 /* Return
976 1 if VAL < VAL2
977 0 if !(VAL < VAL2)
978 -2 if those are incomparable. */
979 int
980 operand_less_p (tree val, tree val2)
981 {
982 /* LT is folded faster than GE and others. Inline the common case. */
983 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
984 return tree_int_cst_lt (val, val2);
985 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
986 return val == val2 ? 0 : -2;
987 else
988 {
989 int cmp = compare_values (val, val2);
990 if (cmp == -1)
991 return 1;
992 else if (cmp == 0 || cmp == 1)
993 return 0;
994 else
995 return -2;
996 }
997
998 return 0;
999 }
1000
1001 /* Compare two values VAL1 and VAL2. Return
1002
1003 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1004 -1 if VAL1 < VAL2,
1005 0 if VAL1 == VAL2,
1006 +1 if VAL1 > VAL2, and
1007 +2 if VAL1 != VAL2
1008
1009 This is similar to tree_int_cst_compare but supports pointer values
1010 and values that cannot be compared at compile time.
1011
1012 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1013 true if the return value is only valid if we assume that signed
1014 overflow is undefined. */
1015
1016 int
1017 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1018 {
1019 if (val1 == val2)
1020 return 0;
1021
1022 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1023 both integers. */
1024 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1025 == POINTER_TYPE_P (TREE_TYPE (val2)));
1026
1027 /* Convert the two values into the same type. This is needed because
1028 sizetype causes sign extension even for unsigned types. */
1029 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
1030 val2 = fold_convert (TREE_TYPE (val1), val2);
1031
1032 const bool overflow_undefined
1033 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1034 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1035 tree inv1, inv2;
1036 bool neg1, neg2;
1037 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1038 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1039
1040 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1041 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1042 if (sym1 && sym2)
1043 {
1044 /* Both values must use the same name with the same sign. */
1045 if (sym1 != sym2 || neg1 != neg2)
1046 return -2;
1047
1048 /* [-]NAME + CST == [-]NAME + CST. */
1049 if (inv1 == inv2)
1050 return 0;
1051
1052 /* If overflow is defined we cannot simplify more. */
1053 if (!overflow_undefined)
1054 return -2;
1055
1056 if (strict_overflow_p != NULL
1057 /* Symbolic range building sets TREE_NO_WARNING to declare
1058 that overflow doesn't happen. */
1059 && (!inv1 || !TREE_NO_WARNING (val1))
1060 && (!inv2 || !TREE_NO_WARNING (val2)))
1061 *strict_overflow_p = true;
1062
1063 if (!inv1)
1064 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1065 if (!inv2)
1066 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1067
1068 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1069 TYPE_SIGN (TREE_TYPE (val1)));
1070 }
1071
1072 const bool cst1 = is_gimple_min_invariant (val1);
1073 const bool cst2 = is_gimple_min_invariant (val2);
1074
1075 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1076 it might be possible to say something depending on the constants. */
1077 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1078 {
1079 if (!overflow_undefined)
1080 return -2;
1081
1082 if (strict_overflow_p != NULL
1083 /* Symbolic range building sets TREE_NO_WARNING to declare
1084 that overflow doesn't happen. */
1085 && (!sym1 || !TREE_NO_WARNING (val1))
1086 && (!sym2 || !TREE_NO_WARNING (val2)))
1087 *strict_overflow_p = true;
1088
1089 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1090 tree cst = cst1 ? val1 : val2;
1091 tree inv = cst1 ? inv2 : inv1;
1092
1093 /* Compute the difference between the constants. If it overflows or
1094 underflows, this means that we can trivially compare the NAME with
1095 it and, consequently, the two values with each other. */
1096 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1097 if (wi::cmp (0, wi::to_wide (inv), sgn)
1098 != wi::cmp (diff, wi::to_wide (cst), sgn))
1099 {
1100 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1101 return cst1 ? res : -res;
1102 }
1103
1104 return -2;
1105 }
1106
1107 /* We cannot say anything more for non-constants. */
1108 if (!cst1 || !cst2)
1109 return -2;
1110
1111 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1112 {
1113 /* We cannot compare overflowed values. */
1114 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1115 return -2;
1116
1117 if (TREE_CODE (val1) == INTEGER_CST
1118 && TREE_CODE (val2) == INTEGER_CST)
1119 return tree_int_cst_compare (val1, val2);
1120
1121 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1122 {
1123 if (known_eq (wi::to_poly_widest (val1),
1124 wi::to_poly_widest (val2)))
1125 return 0;
1126 if (known_lt (wi::to_poly_widest (val1),
1127 wi::to_poly_widest (val2)))
1128 return -1;
1129 if (known_gt (wi::to_poly_widest (val1),
1130 wi::to_poly_widest (val2)))
1131 return 1;
1132 }
1133
1134 return -2;
1135 }
1136 else
1137 {
1138 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1139 {
1140 /* We cannot compare overflowed values. */
1141 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1142 return -2;
1143
1144 return tree_int_cst_compare (val1, val2);
1145 }
1146
1147 /* First see if VAL1 and VAL2 are not the same. */
1148 if (operand_equal_p (val1, val2, 0))
1149 return 0;
1150
1151 fold_defer_overflow_warnings ();
1152
1153 /* If VAL1 is a lower address than VAL2, return -1. */
1154 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
1155 if (t && integer_onep (t))
1156 {
1157 fold_undefer_and_ignore_overflow_warnings ();
1158 return -1;
1159 }
1160
1161 /* If VAL1 is a higher address than VAL2, return +1. */
1162 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
1163 if (t && integer_onep (t))
1164 {
1165 fold_undefer_and_ignore_overflow_warnings ();
1166 return 1;
1167 }
1168
1169 /* If VAL1 is different than VAL2, return +2. */
1170 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1171 fold_undefer_and_ignore_overflow_warnings ();
1172 if (t && integer_onep (t))
1173 return 2;
1174
1175 return -2;
1176 }
1177 }
1178
1179 /* Compare values like compare_values_warnv. */
1180
1181 int
1182 compare_values (tree val1, tree val2)
1183 {
1184 bool sop;
1185 return compare_values_warnv (val1, val2, &sop);
1186 }
1187
1188
1189 /* Return 1 if VAL is inside value range.
1190 0 if VAL is not inside value range.
1191 -2 if we cannot tell either way.
1192
1193 Benchmark compile/20001226-1.c compilation time after changing this
1194 function. */
1195
1196 int
1197 value_range_base::value_inside_range (tree val) const
1198 {
1199 int cmp1, cmp2;
1200
1201 if (varying_p ())
1202 return 1;
1203
1204 if (undefined_p ())
1205 return 0;
1206
1207 cmp1 = operand_less_p (val, m_min);
1208 if (cmp1 == -2)
1209 return -2;
1210 if (cmp1 == 1)
1211 return m_kind != VR_RANGE;
1212
1213 cmp2 = operand_less_p (m_max, val);
1214 if (cmp2 == -2)
1215 return -2;
1216
1217 if (m_kind == VR_RANGE)
1218 return !cmp2;
1219 else
1220 return !!cmp2;
1221 }
1222
1223 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
1224
1225 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
1226
1227 Return TRUE if VR was a constant range and we were able to compute
1228 the bit masks. */
1229
1230 bool
1231 vrp_set_zero_nonzero_bits (const tree expr_type,
1232 const value_range_base *vr,
1233 wide_int *may_be_nonzero,
1234 wide_int *must_be_nonzero)
1235 {
1236 if (!range_int_cst_p (vr))
1237 {
1238 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1239 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1240 return false;
1241 }
1242 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1243 wi::to_wide (vr->min ()),
1244 wi::to_wide (vr->max ()),
1245 *may_be_nonzero, *must_be_nonzero);
1246 return true;
1247 }
1248
1249 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1250 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1251 false otherwise. If *AR can be represented with a single range
1252 *VR1 will be VR_UNDEFINED. */
1253
1254 static bool
1255 ranges_from_anti_range (const value_range_base *ar,
1256 value_range_base *vr0, value_range_base *vr1,
1257 bool handle_pointers)
1258 {
1259 tree type = ar->type ();
1260
1261 vr0->set_undefined ();
1262 vr1->set_undefined ();
1263
1264 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1265 [A+1, +INF]. Not sure if this helps in practice, though. */
1266
1267 if (ar->kind () != VR_ANTI_RANGE
1268 || TREE_CODE (ar->min ()) != INTEGER_CST
1269 || TREE_CODE (ar->max ()) != INTEGER_CST
1270 || !vrp_val_min (type, handle_pointers)
1271 || !vrp_val_max (type, handle_pointers))
1272 return false;
1273
1274 if (tree_int_cst_lt (vrp_val_min (type, handle_pointers), ar->min ()))
1275 vr0->set (VR_RANGE,
1276 vrp_val_min (type, handle_pointers),
1277 wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1278 if (tree_int_cst_lt (ar->max (), vrp_val_max (type, handle_pointers)))
1279 vr1->set (VR_RANGE,
1280 wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1281 vrp_val_max (type, handle_pointers));
1282 if (vr0->undefined_p ())
1283 {
1284 *vr0 = *vr1;
1285 vr1->set_undefined ();
1286 }
1287
1288 return !vr0->undefined_p ();
1289 }
1290
1291 /* Extract the components of a value range into a pair of wide ints in
1292 [WMIN, WMAX], after having normalized any symbolics from the input. */
1293
1294 static void inline
1295 extract_range_into_wide_ints (const value_range_base *vr_,
1296 tree type, wide_int &wmin, wide_int &wmax)
1297 {
1298 signop sign = TYPE_SIGN (type);
1299 unsigned int prec = TYPE_PRECISION (type);
1300 gcc_assert (vr_->kind () != VR_ANTI_RANGE || vr_->symbolic_p ());
1301 value_range vr = vr_->normalize_symbolics ();
1302 if (range_int_cst_p (&vr))
1303 {
1304 wmin = wi::to_wide (vr.min ());
1305 wmax = wi::to_wide (vr.max ());
1306 }
1307 else
1308 {
1309 wmin = wi::min_value (prec, sign);
1310 wmax = wi::max_value (prec, sign);
1311 }
1312 }
1313
1314 /* Value range wrapper for wide_int_range_multiplicative_op:
1315
1316 *VR = *VR0 .CODE. *VR1. */
1317
1318 static void
1319 extract_range_from_multiplicative_op (value_range_base *vr,
1320 enum tree_code code, tree type,
1321 const value_range_base *vr0,
1322 const value_range_base *vr1)
1323 {
1324 gcc_assert (code == MULT_EXPR
1325 || code == TRUNC_DIV_EXPR
1326 || code == FLOOR_DIV_EXPR
1327 || code == CEIL_DIV_EXPR
1328 || code == EXACT_DIV_EXPR
1329 || code == ROUND_DIV_EXPR
1330 || code == RSHIFT_EXPR
1331 || code == LSHIFT_EXPR);
1332 if (!range_int_cst_p (vr1))
1333 {
1334 vr->set_varying (type);
1335 return;
1336 }
1337
1338 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1339 useful ranges just from the shift count. E.g.
1340 x >> 63 for signed 64-bit x is always [-1, 0]. */
1341 value_range_base tem = vr0->normalize_symbolics ();
1342 tree vr0_min, vr0_max;
1343 if (tem.kind () == VR_RANGE)
1344 {
1345 vr0_min = tem.min ();
1346 vr0_max = tem.max ();
1347 }
1348 else
1349 {
1350 vr0_min = vrp_val_min (type);
1351 vr0_max = vrp_val_max (type);
1352 }
1353
1354 wide_int res_lb, res_ub;
1355 wide_int vr0_lb = wi::to_wide (vr0_min);
1356 wide_int vr0_ub = wi::to_wide (vr0_max);
1357 wide_int vr1_lb = wi::to_wide (vr1->min ());
1358 wide_int vr1_ub = wi::to_wide (vr1->max ());
1359 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1360 unsigned prec = TYPE_PRECISION (type);
1361
1362 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1363 code, TYPE_SIGN (type), prec,
1364 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1365 overflow_undefined))
1366 vr->set (VR_RANGE, wide_int_to_tree (type, res_lb),
1367 wide_int_to_tree (type, res_ub));
1368 else
1369 vr->set_varying (type);
1370 }
1371
1372 /* If BOUND will include a symbolic bound, adjust it accordingly,
1373 otherwise leave it as is.
1374
1375 CODE is the original operation that combined the bounds (PLUS_EXPR
1376 or MINUS_EXPR).
1377
1378 TYPE is the type of the original operation.
1379
1380 SYM_OPn is the symbolic for OPn if it has a symbolic.
1381
1382 NEG_OPn is TRUE if the OPn was negated. */
1383
1384 static void
1385 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1386 tree sym_op0, tree sym_op1,
1387 bool neg_op0, bool neg_op1)
1388 {
1389 bool minus_p = (code == MINUS_EXPR);
1390 /* If the result bound is constant, we're done; otherwise, build the
1391 symbolic lower bound. */
1392 if (sym_op0 == sym_op1)
1393 ;
1394 else if (sym_op0)
1395 bound = build_symbolic_expr (type, sym_op0,
1396 neg_op0, bound);
1397 else if (sym_op1)
1398 {
1399 /* We may not negate if that might introduce
1400 undefined overflow. */
1401 if (!minus_p
1402 || neg_op1
1403 || TYPE_OVERFLOW_WRAPS (type))
1404 bound = build_symbolic_expr (type, sym_op1,
1405 neg_op1 ^ minus_p, bound);
1406 else
1407 bound = NULL_TREE;
1408 }
1409 }
1410
1411 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1412 int bound according to CODE. CODE is the operation combining the
1413 bound (either a PLUS_EXPR or a MINUS_EXPR).
1414
1415 TYPE is the type of the combine operation.
1416
1417 WI is the wide int to store the result.
1418
1419 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1420 if over/underflow occurred. */
1421
1422 static void
1423 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1424 tree type, tree op0, tree op1)
1425 {
1426 bool minus_p = (code == MINUS_EXPR);
1427 const signop sgn = TYPE_SIGN (type);
1428 const unsigned int prec = TYPE_PRECISION (type);
1429
1430 /* Combine the bounds, if any. */
1431 if (op0 && op1)
1432 {
1433 if (minus_p)
1434 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1435 else
1436 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1437 }
1438 else if (op0)
1439 wi = wi::to_wide (op0);
1440 else if (op1)
1441 {
1442 if (minus_p)
1443 wi = wi::neg (wi::to_wide (op1), &ovf);
1444 else
1445 wi = wi::to_wide (op1);
1446 }
1447 else
1448 wi = wi::shwi (0, prec);
1449 }
1450
1451 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1452 put the result in VR.
1453
1454 TYPE is the type of the range.
1455
1456 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1457 occurred while originally calculating WMIN or WMAX. -1 indicates
1458 underflow. +1 indicates overflow. 0 indicates neither. */
1459
1460 static void
1461 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1462 tree type,
1463 const wide_int &wmin, const wide_int &wmax,
1464 wi::overflow_type min_ovf,
1465 wi::overflow_type max_ovf)
1466 {
1467 const signop sgn = TYPE_SIGN (type);
1468 const unsigned int prec = TYPE_PRECISION (type);
1469
1470 /* For one bit precision if max < min, then the swapped
1471 range covers all values. */
1472 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1473 {
1474 kind = VR_VARYING;
1475 return;
1476 }
1477
1478 if (TYPE_OVERFLOW_WRAPS (type))
1479 {
1480 /* If overflow wraps, truncate the values and adjust the
1481 range kind and bounds appropriately. */
1482 wide_int tmin = wide_int::from (wmin, prec, sgn);
1483 wide_int tmax = wide_int::from (wmax, prec, sgn);
1484 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1485 {
1486 /* If the limits are swapped, we wrapped around and cover
1487 the entire range. We have a similar check at the end of
1488 extract_range_from_binary_expr. */
1489 if (wi::gt_p (tmin, tmax, sgn))
1490 kind = VR_VARYING;
1491 else
1492 {
1493 kind = VR_RANGE;
1494 /* No overflow or both overflow or underflow. The
1495 range kind stays VR_RANGE. */
1496 min = wide_int_to_tree (type, tmin);
1497 max = wide_int_to_tree (type, tmax);
1498 }
1499 return;
1500 }
1501 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1502 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1503 {
1504 /* Min underflow or max overflow. The range kind
1505 changes to VR_ANTI_RANGE. */
1506 bool covers = false;
1507 wide_int tem = tmin;
1508 tmin = tmax + 1;
1509 if (wi::cmp (tmin, tmax, sgn) < 0)
1510 covers = true;
1511 tmax = tem - 1;
1512 if (wi::cmp (tmax, tem, sgn) > 0)
1513 covers = true;
1514 /* If the anti-range would cover nothing, drop to varying.
1515 Likewise if the anti-range bounds are outside of the
1516 types values. */
1517 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1518 {
1519 kind = VR_VARYING;
1520 return;
1521 }
1522 kind = VR_ANTI_RANGE;
1523 min = wide_int_to_tree (type, tmin);
1524 max = wide_int_to_tree (type, tmax);
1525 return;
1526 }
1527 else
1528 {
1529 /* Other underflow and/or overflow, drop to VR_VARYING. */
1530 kind = VR_VARYING;
1531 return;
1532 }
1533 }
1534 else
1535 {
1536 /* If overflow does not wrap, saturate to the types min/max
1537 value. */
1538 wide_int type_min = wi::min_value (prec, sgn);
1539 wide_int type_max = wi::max_value (prec, sgn);
1540 kind = VR_RANGE;
1541 if (min_ovf == wi::OVF_UNDERFLOW)
1542 min = wide_int_to_tree (type, type_min);
1543 else if (min_ovf == wi::OVF_OVERFLOW)
1544 min = wide_int_to_tree (type, type_max);
1545 else
1546 min = wide_int_to_tree (type, wmin);
1547
1548 if (max_ovf == wi::OVF_UNDERFLOW)
1549 max = wide_int_to_tree (type, type_min);
1550 else if (max_ovf == wi::OVF_OVERFLOW)
1551 max = wide_int_to_tree (type, type_max);
1552 else
1553 max = wide_int_to_tree (type, wmax);
1554 }
1555 }
1556
1557 /* Extract range information from a binary operation CODE based on
1558 the ranges of each of its operands *VR0 and *VR1 with resulting
1559 type EXPR_TYPE. The resulting range is stored in *VR. */
1560
1561 void
1562 extract_range_from_binary_expr (value_range_base *vr,
1563 enum tree_code code, tree expr_type,
1564 const value_range_base *vr0_,
1565 const value_range_base *vr1_)
1566 {
1567 signop sign = TYPE_SIGN (expr_type);
1568 unsigned int prec = TYPE_PRECISION (expr_type);
1569 value_range_base vr0 = *vr0_, vr1 = *vr1_;
1570 value_range_base vrtem0, vrtem1;
1571 enum value_range_kind type;
1572 tree min = NULL_TREE, max = NULL_TREE;
1573 int cmp;
1574
1575 if (!INTEGRAL_TYPE_P (expr_type)
1576 && !POINTER_TYPE_P (expr_type))
1577 {
1578 vr->set_varying (expr_type);
1579 return;
1580 }
1581
1582 /* Not all binary expressions can be applied to ranges in a
1583 meaningful way. Handle only arithmetic operations. */
1584 if (code != PLUS_EXPR
1585 && code != MINUS_EXPR
1586 && code != POINTER_PLUS_EXPR
1587 && code != MULT_EXPR
1588 && code != TRUNC_DIV_EXPR
1589 && code != FLOOR_DIV_EXPR
1590 && code != CEIL_DIV_EXPR
1591 && code != EXACT_DIV_EXPR
1592 && code != ROUND_DIV_EXPR
1593 && code != TRUNC_MOD_EXPR
1594 && code != RSHIFT_EXPR
1595 && code != LSHIFT_EXPR
1596 && code != MIN_EXPR
1597 && code != MAX_EXPR
1598 && code != BIT_AND_EXPR
1599 && code != BIT_IOR_EXPR
1600 && code != BIT_XOR_EXPR)
1601 {
1602 vr->set_varying (expr_type);
1603 return;
1604 }
1605
1606 /* If both ranges are UNDEFINED, so is the result. */
1607 if (vr0.undefined_p () && vr1.undefined_p ())
1608 {
1609 vr->set_undefined ();
1610 return;
1611 }
1612 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1613 code. At some point we may want to special-case operations that
1614 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1615 operand. */
1616 else if (vr0.undefined_p ())
1617 vr0.set_varying (expr_type);
1618 else if (vr1.undefined_p ())
1619 vr1.set_varying (expr_type);
1620
1621 /* We get imprecise results from ranges_from_anti_range when
1622 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1623 range, but then we also need to hack up vrp_union. It's just
1624 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1625 if (code == EXACT_DIV_EXPR && vr0.nonzero_p ())
1626 {
1627 vr->set_nonzero (expr_type);
1628 return;
1629 }
1630
1631 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1632 and express ~[] op X as ([]' op X) U ([]'' op X). */
1633 if (vr0.kind () == VR_ANTI_RANGE
1634 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1635 {
1636 extract_range_from_binary_expr (vr, code, expr_type, &vrtem0, vr1_);
1637 if (!vrtem1.undefined_p ())
1638 {
1639 value_range_base vrres;
1640 extract_range_from_binary_expr (&vrres, code, expr_type,
1641 &vrtem1, vr1_);
1642 vr->union_ (&vrres);
1643 }
1644 return;
1645 }
1646 /* Likewise for X op ~[]. */
1647 if (vr1.kind () == VR_ANTI_RANGE
1648 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1649 {
1650 extract_range_from_binary_expr (vr, code, expr_type, vr0_, &vrtem0);
1651 if (!vrtem1.undefined_p ())
1652 {
1653 value_range_base vrres;
1654 extract_range_from_binary_expr (&vrres, code, expr_type,
1655 vr0_, &vrtem1);
1656 vr->union_ (&vrres);
1657 }
1658 return;
1659 }
1660
1661 /* The type of the resulting value range defaults to VR0.TYPE. */
1662 type = vr0.kind ();
1663
1664 /* Refuse to operate on VARYING ranges, ranges of different kinds
1665 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1666 because we may be able to derive a useful range even if one of
1667 the operands is VR_VARYING or symbolic range. Similarly for
1668 divisions, MIN/MAX and PLUS/MINUS.
1669
1670 TODO, we may be able to derive anti-ranges in some cases. */
1671 if (code != BIT_AND_EXPR
1672 && code != BIT_IOR_EXPR
1673 && code != TRUNC_DIV_EXPR
1674 && code != FLOOR_DIV_EXPR
1675 && code != CEIL_DIV_EXPR
1676 && code != EXACT_DIV_EXPR
1677 && code != ROUND_DIV_EXPR
1678 && code != TRUNC_MOD_EXPR
1679 && code != MIN_EXPR
1680 && code != MAX_EXPR
1681 && code != PLUS_EXPR
1682 && code != MINUS_EXPR
1683 && code != RSHIFT_EXPR
1684 && code != POINTER_PLUS_EXPR
1685 && (vr0.varying_p ()
1686 || vr1.varying_p ()
1687 || vr0.kind () != vr1.kind ()
1688 || vr0.symbolic_p ()
1689 || vr1.symbolic_p ()))
1690 {
1691 vr->set_varying (expr_type);
1692 return;
1693 }
1694
1695 /* Now evaluate the expression to determine the new range. */
1696 if (POINTER_TYPE_P (expr_type))
1697 {
1698 if (code == MIN_EXPR || code == MAX_EXPR)
1699 {
1700 /* For MIN/MAX expressions with pointers, we only care about
1701 nullness, if both are non null, then the result is nonnull.
1702 If both are null, then the result is null. Otherwise they
1703 are varying. */
1704 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1705 vr->set_nonzero (expr_type);
1706 else if (vr0.zero_p () && vr1.zero_p ())
1707 vr->set_zero (expr_type);
1708 else
1709 vr->set_varying (expr_type);
1710 }
1711 else if (code == POINTER_PLUS_EXPR)
1712 {
1713 /* For pointer types, we are really only interested in asserting
1714 whether the expression evaluates to non-NULL.
1715 With -fno-delete-null-pointer-checks we need to be more
1716 conservative. As some object might reside at address 0,
1717 then some offset could be added to it and the same offset
1718 subtracted again and the result would be NULL.
1719 E.g.
1720 static int a[12]; where &a[0] is NULL and
1721 ptr = &a[6];
1722 ptr -= 6;
1723 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1724 where the first range doesn't include zero and the second one
1725 doesn't either. As the second operand is sizetype (unsigned),
1726 consider all ranges where the MSB could be set as possible
1727 subtractions where the result might be NULL. */
1728 if ((!range_includes_zero_p (&vr0)
1729 || !range_includes_zero_p (&vr1))
1730 && !TYPE_OVERFLOW_WRAPS (expr_type)
1731 && (flag_delete_null_pointer_checks
1732 || (range_int_cst_p (&vr1)
1733 && !tree_int_cst_sign_bit (vr1.max ()))))
1734 vr->set_nonzero (expr_type);
1735 else if (vr0.zero_p () && vr1.zero_p ())
1736 vr->set_zero (expr_type);
1737 else
1738 vr->set_varying (expr_type);
1739 }
1740 else if (code == BIT_AND_EXPR)
1741 {
1742 /* For pointer types, we are really only interested in asserting
1743 whether the expression evaluates to non-NULL. */
1744 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1745 vr->set_nonzero (expr_type);
1746 else if (vr0.zero_p () || vr1.zero_p ())
1747 vr->set_zero (expr_type);
1748 else
1749 vr->set_varying (expr_type);
1750 }
1751 else
1752 vr->set_varying (expr_type);
1753
1754 return;
1755 }
1756
1757 /* For integer ranges, apply the operation to each end of the
1758 range and see what we end up with. */
1759 if (code == PLUS_EXPR || code == MINUS_EXPR)
1760 {
1761 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
1762 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
1763 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
1764 /* This will normalize things such that calculating
1765 [0,0] - VR_VARYING is not dropped to varying, but is
1766 calculated as [MIN+1, MAX]. */
1767 if (vr0.varying_p ())
1768 {
1769 vr0_kind = VR_RANGE;
1770 vr0_min = vrp_val_min (expr_type);
1771 vr0_max = vrp_val_max (expr_type);
1772 }
1773 if (vr1.varying_p ())
1774 {
1775 vr1_kind = VR_RANGE;
1776 vr1_min = vrp_val_min (expr_type);
1777 vr1_max = vrp_val_max (expr_type);
1778 }
1779
1780 const bool minus_p = (code == MINUS_EXPR);
1781 tree min_op0 = vr0_min;
1782 tree min_op1 = minus_p ? vr1_max : vr1_min;
1783 tree max_op0 = vr0_max;
1784 tree max_op1 = minus_p ? vr1_min : vr1_max;
1785 tree sym_min_op0 = NULL_TREE;
1786 tree sym_min_op1 = NULL_TREE;
1787 tree sym_max_op0 = NULL_TREE;
1788 tree sym_max_op1 = NULL_TREE;
1789 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1790
1791 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1792
1793 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1794 single-symbolic ranges, try to compute the precise resulting range,
1795 but only if we know that this resulting range will also be constant
1796 or single-symbolic. */
1797 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1798 && (TREE_CODE (min_op0) == INTEGER_CST
1799 || (sym_min_op0
1800 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1801 && (TREE_CODE (min_op1) == INTEGER_CST
1802 || (sym_min_op1
1803 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1804 && (!(sym_min_op0 && sym_min_op1)
1805 || (sym_min_op0 == sym_min_op1
1806 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1807 && (TREE_CODE (max_op0) == INTEGER_CST
1808 || (sym_max_op0
1809 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1810 && (TREE_CODE (max_op1) == INTEGER_CST
1811 || (sym_max_op1
1812 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1813 && (!(sym_max_op0 && sym_max_op1)
1814 || (sym_max_op0 == sym_max_op1
1815 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1816 {
1817 wide_int wmin, wmax;
1818 wi::overflow_type min_ovf = wi::OVF_NONE;
1819 wi::overflow_type max_ovf = wi::OVF_NONE;
1820
1821 /* Build the bounds. */
1822 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1823 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1824
1825 /* If we have overflow for the constant part and the resulting
1826 range will be symbolic, drop to VR_VARYING. */
1827 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1828 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1829 {
1830 vr->set_varying (expr_type);
1831 return;
1832 }
1833
1834 /* Adjust the range for possible overflow. */
1835 min = NULL_TREE;
1836 max = NULL_TREE;
1837 set_value_range_with_overflow (type, min, max, expr_type,
1838 wmin, wmax, min_ovf, max_ovf);
1839 if (type == VR_VARYING)
1840 {
1841 vr->set_varying (expr_type);
1842 return;
1843 }
1844
1845 /* Build the symbolic bounds if needed. */
1846 adjust_symbolic_bound (min, code, expr_type,
1847 sym_min_op0, sym_min_op1,
1848 neg_min_op0, neg_min_op1);
1849 adjust_symbolic_bound (max, code, expr_type,
1850 sym_max_op0, sym_max_op1,
1851 neg_max_op0, neg_max_op1);
1852 }
1853 else
1854 {
1855 /* For other cases, for example if we have a PLUS_EXPR with two
1856 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1857 to compute a precise range for such a case.
1858 ??? General even mixed range kind operations can be expressed
1859 by for example transforming ~[3, 5] + [1, 2] to range-only
1860 operations and a union primitive:
1861 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1862 [-INF+1, 4] U [6, +INF(OVF)]
1863 though usually the union is not exactly representable with
1864 a single range or anti-range as the above is
1865 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1866 but one could use a scheme similar to equivalences for this. */
1867 vr->set_varying (expr_type);
1868 return;
1869 }
1870 }
1871 else if (code == MIN_EXPR
1872 || code == MAX_EXPR)
1873 {
1874 wide_int wmin, wmax;
1875 wide_int vr0_min, vr0_max;
1876 wide_int vr1_min, vr1_max;
1877 extract_range_into_wide_ints (&vr0, expr_type, vr0_min, vr0_max);
1878 extract_range_into_wide_ints (&vr1, expr_type, vr1_min, vr1_max);
1879 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1880 vr0_min, vr0_max, vr1_min, vr1_max))
1881 vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1882 wide_int_to_tree (expr_type, wmax));
1883 else
1884 vr->set_varying (expr_type);
1885 return;
1886 }
1887 else if (code == MULT_EXPR)
1888 {
1889 if (!range_int_cst_p (&vr0)
1890 || !range_int_cst_p (&vr1))
1891 {
1892 vr->set_varying (expr_type);
1893 return;
1894 }
1895 extract_range_from_multiplicative_op (vr, code, expr_type, &vr0, &vr1);
1896 return;
1897 }
1898 else if (code == RSHIFT_EXPR
1899 || code == LSHIFT_EXPR)
1900 {
1901 if (range_int_cst_p (&vr1)
1902 && !wide_int_range_shift_undefined_p
1903 (TYPE_SIGN (TREE_TYPE (vr1.min ())),
1904 prec,
1905 wi::to_wide (vr1.min ()),
1906 wi::to_wide (vr1.max ())))
1907 {
1908 if (code == RSHIFT_EXPR)
1909 {
1910 extract_range_from_multiplicative_op (vr, code, expr_type,
1911 &vr0, &vr1);
1912 return;
1913 }
1914 else if (code == LSHIFT_EXPR
1915 && range_int_cst_p (&vr0))
1916 {
1917 wide_int res_lb, res_ub;
1918 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1919 wi::to_wide (vr0.min ()),
1920 wi::to_wide (vr0.max ()),
1921 wi::to_wide (vr1.min ()),
1922 wi::to_wide (vr1.max ()),
1923 TYPE_OVERFLOW_UNDEFINED (expr_type)))
1924 {
1925 min = wide_int_to_tree (expr_type, res_lb);
1926 max = wide_int_to_tree (expr_type, res_ub);
1927 vr->set (VR_RANGE, min, max);
1928 return;
1929 }
1930 }
1931 }
1932 vr->set_varying (expr_type);
1933 return;
1934 }
1935 else if (code == TRUNC_DIV_EXPR
1936 || code == FLOOR_DIV_EXPR
1937 || code == CEIL_DIV_EXPR
1938 || code == EXACT_DIV_EXPR
1939 || code == ROUND_DIV_EXPR)
1940 {
1941 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1942 wide_int wmin, wmax, extra_min, extra_max;
1943 bool extra_range_p;
1944
1945 /* Special case explicit division by zero as undefined. */
1946 if (vr1.zero_p ())
1947 {
1948 vr->set_undefined ();
1949 return;
1950 }
1951
1952 /* First, normalize ranges into constants we can handle. Note
1953 that VR_ANTI_RANGE's of constants were already normalized
1954 before arriving here.
1955
1956 NOTE: As a future improvement, we may be able to do better
1957 with mixed symbolic (anti-)ranges like [0, A]. See note in
1958 ranges_from_anti_range. */
1959 extract_range_into_wide_ints (&vr0, expr_type,
1960 dividend_min, dividend_max);
1961 extract_range_into_wide_ints (&vr1, expr_type,
1962 divisor_min, divisor_max);
1963 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1964 dividend_min, dividend_max,
1965 divisor_min, divisor_max,
1966 TYPE_OVERFLOW_UNDEFINED (expr_type),
1967 extra_range_p, extra_min, extra_max))
1968 {
1969 vr->set_varying (expr_type);
1970 return;
1971 }
1972 vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1973 wide_int_to_tree (expr_type, wmax));
1974 if (extra_range_p)
1975 {
1976 value_range_base
1977 extra_range (VR_RANGE, wide_int_to_tree (expr_type, extra_min),
1978 wide_int_to_tree (expr_type, extra_max));
1979 vr->union_ (&extra_range);
1980 }
1981 return;
1982 }
1983 else if (code == TRUNC_MOD_EXPR)
1984 {
1985 if (vr1.zero_p ())
1986 {
1987 vr->set_undefined ();
1988 return;
1989 }
1990 wide_int wmin, wmax, tmp;
1991 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1992 extract_range_into_wide_ints (&vr0, expr_type, vr0_min, vr0_max);
1993 extract_range_into_wide_ints (&vr1, expr_type, vr1_min, vr1_max);
1994 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1995 vr0_min, vr0_max, vr1_min, vr1_max);
1996 min = wide_int_to_tree (expr_type, wmin);
1997 max = wide_int_to_tree (expr_type, wmax);
1998 vr->set (VR_RANGE, min, max);
1999 return;
2000 }
2001 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2002 {
2003 wide_int may_be_nonzero0, may_be_nonzero1;
2004 wide_int must_be_nonzero0, must_be_nonzero1;
2005 wide_int wmin, wmax;
2006 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
2007 vrp_set_zero_nonzero_bits (expr_type, &vr0,
2008 &may_be_nonzero0, &must_be_nonzero0);
2009 vrp_set_zero_nonzero_bits (expr_type, &vr1,
2010 &may_be_nonzero1, &must_be_nonzero1);
2011 extract_range_into_wide_ints (&vr0, expr_type, vr0_min, vr0_max);
2012 extract_range_into_wide_ints (&vr1, expr_type, vr1_min, vr1_max);
2013 if (code == BIT_AND_EXPR)
2014 {
2015 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
2016 vr0_min, vr0_max,
2017 vr1_min, vr1_max,
2018 must_be_nonzero0,
2019 may_be_nonzero0,
2020 must_be_nonzero1,
2021 may_be_nonzero1))
2022 {
2023 min = wide_int_to_tree (expr_type, wmin);
2024 max = wide_int_to_tree (expr_type, wmax);
2025 vr->set (VR_RANGE, min, max);
2026 }
2027 else
2028 vr->set_varying (expr_type);
2029 return;
2030 }
2031 else if (code == BIT_IOR_EXPR)
2032 {
2033 if (wide_int_range_bit_ior (wmin, wmax, sign,
2034 vr0_min, vr0_max,
2035 vr1_min, vr1_max,
2036 must_be_nonzero0,
2037 may_be_nonzero0,
2038 must_be_nonzero1,
2039 may_be_nonzero1))
2040 {
2041 min = wide_int_to_tree (expr_type, wmin);
2042 max = wide_int_to_tree (expr_type, wmax);
2043 vr->set (VR_RANGE, min, max);
2044 }
2045 else
2046 vr->set_varying (expr_type);
2047 return;
2048 }
2049 else if (code == BIT_XOR_EXPR)
2050 {
2051 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
2052 must_be_nonzero0,
2053 may_be_nonzero0,
2054 must_be_nonzero1,
2055 may_be_nonzero1))
2056 {
2057 min = wide_int_to_tree (expr_type, wmin);
2058 max = wide_int_to_tree (expr_type, wmax);
2059 vr->set (VR_RANGE, min, max);
2060 }
2061 else
2062 vr->set_varying (expr_type);
2063 return;
2064 }
2065 }
2066 else
2067 gcc_unreachable ();
2068
2069 /* If either MIN or MAX overflowed, then set the resulting range to
2070 VARYING. */
2071 if (min == NULL_TREE
2072 || TREE_OVERFLOW_P (min)
2073 || max == NULL_TREE
2074 || TREE_OVERFLOW_P (max))
2075 {
2076 vr->set_varying (expr_type);
2077 return;
2078 }
2079
2080 /* We punt for [-INF, +INF].
2081 We learn nothing when we have INF on both sides.
2082 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
2083 if (vrp_val_is_min (min) && vrp_val_is_max (max))
2084 {
2085 vr->set_varying (expr_type);
2086 return;
2087 }
2088
2089 cmp = compare_values (min, max);
2090 if (cmp == -2 || cmp == 1)
2091 {
2092 /* If the new range has its limits swapped around (MIN > MAX),
2093 then the operation caused one of them to wrap around, mark
2094 the new range VARYING. */
2095 vr->set_varying (expr_type);
2096 }
2097 else
2098 vr->set (type, min, max);
2099 }
2100
2101 /* Extract range information from a unary operation CODE based on
2102 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2103 The resulting range is stored in *VR. */
2104
2105 void
2106 extract_range_from_unary_expr (value_range_base *vr,
2107 enum tree_code code, tree type,
2108 const value_range_base *vr0_, tree op0_type)
2109 {
2110 signop sign = TYPE_SIGN (type);
2111 unsigned int prec = TYPE_PRECISION (type);
2112 value_range_base vr0 = *vr0_;
2113 value_range_base vrtem0, vrtem1;
2114
2115 /* VRP only operates on integral and pointer types. */
2116 if (!(INTEGRAL_TYPE_P (op0_type)
2117 || POINTER_TYPE_P (op0_type))
2118 || !(INTEGRAL_TYPE_P (type)
2119 || POINTER_TYPE_P (type)))
2120 {
2121 vr->set_varying (type);
2122 return;
2123 }
2124
2125 /* If VR0 is UNDEFINED, so is the result. */
2126 if (vr0.undefined_p ())
2127 {
2128 vr->set_undefined ();
2129 return;
2130 }
2131
2132 /* Handle operations that we express in terms of others. */
2133 if (code == PAREN_EXPR)
2134 {
2135 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
2136 *vr = vr0;
2137 return;
2138 }
2139 else if (code == NEGATE_EXPR)
2140 {
2141 /* -X is simply 0 - X, so re-use existing code that also handles
2142 anti-ranges fine. */
2143 value_range_base zero;
2144 zero.set (build_int_cst (type, 0));
2145 extract_range_from_binary_expr (vr, MINUS_EXPR, type, &zero, &vr0);
2146 return;
2147 }
2148 else if (code == BIT_NOT_EXPR)
2149 {
2150 /* ~X is simply -1 - X, so re-use existing code that also handles
2151 anti-ranges fine. */
2152 value_range_base minusone;
2153 minusone.set (build_int_cst (type, -1));
2154 extract_range_from_binary_expr (vr, MINUS_EXPR, type, &minusone, &vr0);
2155 return;
2156 }
2157
2158 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2159 and express op ~[] as (op []') U (op []''). */
2160 if (vr0.kind () == VR_ANTI_RANGE
2161 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2162 {
2163 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2164 if (!vrtem1.undefined_p ())
2165 {
2166 value_range_base vrres;
2167 extract_range_from_unary_expr (&vrres, code, type,
2168 &vrtem1, op0_type);
2169 vr->union_ (&vrres);
2170 }
2171 return;
2172 }
2173
2174 if (CONVERT_EXPR_CODE_P (code))
2175 {
2176 tree inner_type = op0_type;
2177 tree outer_type = type;
2178
2179 /* If the expression involves a pointer, we are only interested in
2180 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).
2181
2182 This may lose precision when converting (char *)~[0,2] to
2183 int, because we'll forget that the pointer can also not be 1
2184 or 2. In practice we don't care, as this is some idiot
2185 storing a magic constant to a pointer. */
2186 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type))
2187 {
2188 if (!range_includes_zero_p (&vr0))
2189 vr->set_nonzero (type);
2190 else if (vr0.zero_p ())
2191 vr->set_zero (type);
2192 else
2193 vr->set_varying (type);
2194 return;
2195 }
2196
2197 /* The POINTER_TYPE_P code above will have dealt with all
2198 pointer anti-ranges. Any remaining anti-ranges at this point
2199 will be integer conversions from SSA names that will be
2200 normalized into VARYING. For instance: ~[x_55, x_55]. */
2201 gcc_assert (vr0.kind () != VR_ANTI_RANGE
2202 || TREE_CODE (vr0.min ()) != INTEGER_CST);
2203
2204 /* NOTES: Previously we were returning VARYING for all symbolics, but
2205 we can do better by treating them as [-MIN, +MAX]. For
2206 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
2207 we can return: ~[0x8000000, 0xffffffff7fffffff].
2208
2209 We were also failing to convert ~[0,0] from char* to unsigned,
2210 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
2211 wide_int vr0_min, vr0_max, wmin, wmax;
2212 signop inner_sign = TYPE_SIGN (inner_type);
2213 signop outer_sign = TYPE_SIGN (outer_type);
2214 unsigned inner_prec = TYPE_PRECISION (inner_type);
2215 unsigned outer_prec = TYPE_PRECISION (outer_type);
2216 extract_range_into_wide_ints (&vr0, inner_type, vr0_min, vr0_max);
2217 if (wide_int_range_convert (wmin, wmax,
2218 inner_sign, inner_prec,
2219 outer_sign, outer_prec,
2220 vr0_min, vr0_max))
2221 {
2222 tree min = wide_int_to_tree (outer_type, wmin);
2223 tree max = wide_int_to_tree (outer_type, wmax);
2224 vr->set (VR_RANGE, min, max);
2225 }
2226 else
2227 vr->set_varying (outer_type);
2228 return;
2229 }
2230 else if (code == ABS_EXPR)
2231 {
2232 wide_int wmin, wmax;
2233 wide_int vr0_min, vr0_max;
2234 extract_range_into_wide_ints (&vr0, type, vr0_min, vr0_max);
2235 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
2236 TYPE_OVERFLOW_UNDEFINED (type)))
2237 vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2238 wide_int_to_tree (type, wmax));
2239 else
2240 vr->set_varying (type);
2241 return;
2242 }
2243 else if (code == ABSU_EXPR)
2244 {
2245 wide_int wmin, wmax;
2246 wide_int vr0_min, vr0_max;
2247 tree signed_type = make_signed_type (TYPE_PRECISION (type));
2248 extract_range_into_wide_ints (&vr0, signed_type, vr0_min, vr0_max);
2249 wide_int_range_absu (wmin, wmax, prec, vr0_min, vr0_max);
2250 vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2251 wide_int_to_tree (type, wmax));
2252 return;
2253 }
2254
2255 /* For unhandled operations fall back to varying. */
2256 vr->set_varying (type);
2257 return;
2258 }
2259
2260 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2261 create a new SSA name N and return the assertion assignment
2262 'N = ASSERT_EXPR <V, V OP W>'. */
2263
2264 static gimple *
2265 build_assert_expr_for (tree cond, tree v)
2266 {
2267 tree a;
2268 gassign *assertion;
2269
2270 gcc_assert (TREE_CODE (v) == SSA_NAME
2271 && COMPARISON_CLASS_P (cond));
2272
2273 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2274 assertion = gimple_build_assign (NULL_TREE, a);
2275
2276 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2277 operand of the ASSERT_EXPR. Create it so the new name and the old one
2278 are registered in the replacement table so that we can fix the SSA web
2279 after adding all the ASSERT_EXPRs. */
2280 tree new_def = create_new_def_for (v, assertion, NULL);
2281 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2282 given we have to be able to fully propagate those out to re-create
2283 valid SSA when removing the asserts. */
2284 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2285 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2286
2287 return assertion;
2288 }
2289
2290
2291 /* Return false if EXPR is a predicate expression involving floating
2292 point values. */
2293
2294 static inline bool
2295 fp_predicate (gimple *stmt)
2296 {
2297 GIMPLE_CHECK (stmt, GIMPLE_COND);
2298
2299 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2300 }
2301
2302 /* If the range of values taken by OP can be inferred after STMT executes,
2303 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2304 describes the inferred range. Return true if a range could be
2305 inferred. */
2306
2307 bool
2308 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2309 {
2310 *val_p = NULL_TREE;
2311 *comp_code_p = ERROR_MARK;
2312
2313 /* Do not attempt to infer anything in names that flow through
2314 abnormal edges. */
2315 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2316 return false;
2317
2318 /* If STMT is the last statement of a basic block with no normal
2319 successors, there is no point inferring anything about any of its
2320 operands. We would not be able to find a proper insertion point
2321 for the assertion, anyway. */
2322 if (stmt_ends_bb_p (stmt))
2323 {
2324 edge_iterator ei;
2325 edge e;
2326
2327 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2328 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2329 break;
2330 if (e == NULL)
2331 return false;
2332 }
2333
2334 if (infer_nonnull_range (stmt, op))
2335 {
2336 *val_p = build_int_cst (TREE_TYPE (op), 0);
2337 *comp_code_p = NE_EXPR;
2338 return true;
2339 }
2340
2341 return false;
2342 }
2343
2344
2345 void dump_asserts_for (FILE *, tree);
2346 void debug_asserts_for (tree);
2347 void dump_all_asserts (FILE *);
2348 void debug_all_asserts (void);
2349
2350 /* Dump all the registered assertions for NAME to FILE. */
2351
2352 void
2353 dump_asserts_for (FILE *file, tree name)
2354 {
2355 assert_locus *loc;
2356
2357 fprintf (file, "Assertions to be inserted for ");
2358 print_generic_expr (file, name);
2359 fprintf (file, "\n");
2360
2361 loc = asserts_for[SSA_NAME_VERSION (name)];
2362 while (loc)
2363 {
2364 fprintf (file, "\t");
2365 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2366 fprintf (file, "\n\tBB #%d", loc->bb->index);
2367 if (loc->e)
2368 {
2369 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2370 loc->e->dest->index);
2371 dump_edge_info (file, loc->e, dump_flags, 0);
2372 }
2373 fprintf (file, "\n\tPREDICATE: ");
2374 print_generic_expr (file, loc->expr);
2375 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2376 print_generic_expr (file, loc->val);
2377 fprintf (file, "\n\n");
2378 loc = loc->next;
2379 }
2380
2381 fprintf (file, "\n");
2382 }
2383
2384
2385 /* Dump all the registered assertions for NAME to stderr. */
2386
2387 DEBUG_FUNCTION void
2388 debug_asserts_for (tree name)
2389 {
2390 dump_asserts_for (stderr, name);
2391 }
2392
2393
2394 /* Dump all the registered assertions for all the names to FILE. */
2395
2396 void
2397 dump_all_asserts (FILE *file)
2398 {
2399 unsigned i;
2400 bitmap_iterator bi;
2401
2402 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2403 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2404 dump_asserts_for (file, ssa_name (i));
2405 fprintf (file, "\n");
2406 }
2407
2408
2409 /* Dump all the registered assertions for all the names to stderr. */
2410
2411 DEBUG_FUNCTION void
2412 debug_all_asserts (void)
2413 {
2414 dump_all_asserts (stderr);
2415 }
2416
2417 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2418
2419 static void
2420 add_assert_info (vec<assert_info> &asserts,
2421 tree name, tree expr, enum tree_code comp_code, tree val)
2422 {
2423 assert_info info;
2424 info.comp_code = comp_code;
2425 info.name = name;
2426 if (TREE_OVERFLOW_P (val))
2427 val = drop_tree_overflow (val);
2428 info.val = val;
2429 info.expr = expr;
2430 asserts.safe_push (info);
2431 if (dump_enabled_p ())
2432 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2433 "Adding assert for %T from %T %s %T\n",
2434 name, expr, op_symbol_code (comp_code), val);
2435 }
2436
2437 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2438 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2439 E->DEST, then register this location as a possible insertion point
2440 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2441
2442 BB, E and SI provide the exact insertion point for the new
2443 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2444 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2445 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2446 must not be NULL. */
2447
2448 static void
2449 register_new_assert_for (tree name, tree expr,
2450 enum tree_code comp_code,
2451 tree val,
2452 basic_block bb,
2453 edge e,
2454 gimple_stmt_iterator si)
2455 {
2456 assert_locus *n, *loc, *last_loc;
2457 basic_block dest_bb;
2458
2459 gcc_checking_assert (bb == NULL || e == NULL);
2460
2461 if (e == NULL)
2462 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2463 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2464
2465 /* Never build an assert comparing against an integer constant with
2466 TREE_OVERFLOW set. This confuses our undefined overflow warning
2467 machinery. */
2468 if (TREE_OVERFLOW_P (val))
2469 val = drop_tree_overflow (val);
2470
2471 /* The new assertion A will be inserted at BB or E. We need to
2472 determine if the new location is dominated by a previously
2473 registered location for A. If we are doing an edge insertion,
2474 assume that A will be inserted at E->DEST. Note that this is not
2475 necessarily true.
2476
2477 If E is a critical edge, it will be split. But even if E is
2478 split, the new block will dominate the same set of blocks that
2479 E->DEST dominates.
2480
2481 The reverse, however, is not true, blocks dominated by E->DEST
2482 will not be dominated by the new block created to split E. So,
2483 if the insertion location is on a critical edge, we will not use
2484 the new location to move another assertion previously registered
2485 at a block dominated by E->DEST. */
2486 dest_bb = (bb) ? bb : e->dest;
2487
2488 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2489 VAL at a block dominating DEST_BB, then we don't need to insert a new
2490 one. Similarly, if the same assertion already exists at a block
2491 dominated by DEST_BB and the new location is not on a critical
2492 edge, then update the existing location for the assertion (i.e.,
2493 move the assertion up in the dominance tree).
2494
2495 Note, this is implemented as a simple linked list because there
2496 should not be more than a handful of assertions registered per
2497 name. If this becomes a performance problem, a table hashed by
2498 COMP_CODE and VAL could be implemented. */
2499 loc = asserts_for[SSA_NAME_VERSION (name)];
2500 last_loc = loc;
2501 while (loc)
2502 {
2503 if (loc->comp_code == comp_code
2504 && (loc->val == val
2505 || operand_equal_p (loc->val, val, 0))
2506 && (loc->expr == expr
2507 || operand_equal_p (loc->expr, expr, 0)))
2508 {
2509 /* If E is not a critical edge and DEST_BB
2510 dominates the existing location for the assertion, move
2511 the assertion up in the dominance tree by updating its
2512 location information. */
2513 if ((e == NULL || !EDGE_CRITICAL_P (e))
2514 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2515 {
2516 loc->bb = dest_bb;
2517 loc->e = e;
2518 loc->si = si;
2519 return;
2520 }
2521 }
2522
2523 /* Update the last node of the list and move to the next one. */
2524 last_loc = loc;
2525 loc = loc->next;
2526 }
2527
2528 /* If we didn't find an assertion already registered for
2529 NAME COMP_CODE VAL, add a new one at the end of the list of
2530 assertions associated with NAME. */
2531 n = XNEW (struct assert_locus);
2532 n->bb = dest_bb;
2533 n->e = e;
2534 n->si = si;
2535 n->comp_code = comp_code;
2536 n->val = val;
2537 n->expr = expr;
2538 n->next = NULL;
2539
2540 if (last_loc)
2541 last_loc->next = n;
2542 else
2543 asserts_for[SSA_NAME_VERSION (name)] = n;
2544
2545 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2546 }
2547
2548 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2549 Extract a suitable test code and value and store them into *CODE_P and
2550 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2551
2552 If no extraction was possible, return FALSE, otherwise return TRUE.
2553
2554 If INVERT is true, then we invert the result stored into *CODE_P. */
2555
2556 static bool
2557 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2558 tree cond_op0, tree cond_op1,
2559 bool invert, enum tree_code *code_p,
2560 tree *val_p)
2561 {
2562 enum tree_code comp_code;
2563 tree val;
2564
2565 /* Otherwise, we have a comparison of the form NAME COMP VAL
2566 or VAL COMP NAME. */
2567 if (name == cond_op1)
2568 {
2569 /* If the predicate is of the form VAL COMP NAME, flip
2570 COMP around because we need to register NAME as the
2571 first operand in the predicate. */
2572 comp_code = swap_tree_comparison (cond_code);
2573 val = cond_op0;
2574 }
2575 else if (name == cond_op0)
2576 {
2577 /* The comparison is of the form NAME COMP VAL, so the
2578 comparison code remains unchanged. */
2579 comp_code = cond_code;
2580 val = cond_op1;
2581 }
2582 else
2583 gcc_unreachable ();
2584
2585 /* Invert the comparison code as necessary. */
2586 if (invert)
2587 comp_code = invert_tree_comparison (comp_code, 0);
2588
2589 /* VRP only handles integral and pointer types. */
2590 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2591 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2592 return false;
2593
2594 /* Do not register always-false predicates.
2595 FIXME: this works around a limitation in fold() when dealing with
2596 enumerations. Given 'enum { N1, N2 } x;', fold will not
2597 fold 'if (x > N2)' to 'if (0)'. */
2598 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2599 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2600 {
2601 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2602 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2603
2604 if (comp_code == GT_EXPR
2605 && (!max
2606 || compare_values (val, max) == 0))
2607 return false;
2608
2609 if (comp_code == LT_EXPR
2610 && (!min
2611 || compare_values (val, min) == 0))
2612 return false;
2613 }
2614 *code_p = comp_code;
2615 *val_p = val;
2616 return true;
2617 }
2618
2619 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2620 (otherwise return VAL). VAL and MASK must be zero-extended for
2621 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2622 (to transform signed values into unsigned) and at the end xor
2623 SGNBIT back. */
2624
2625 static wide_int
2626 masked_increment (const wide_int &val_in, const wide_int &mask,
2627 const wide_int &sgnbit, unsigned int prec)
2628 {
2629 wide_int bit = wi::one (prec), res;
2630 unsigned int i;
2631
2632 wide_int val = val_in ^ sgnbit;
2633 for (i = 0; i < prec; i++, bit += bit)
2634 {
2635 res = mask;
2636 if ((res & bit) == 0)
2637 continue;
2638 res = bit - 1;
2639 res = wi::bit_and_not (val + bit, res);
2640 res &= mask;
2641 if (wi::gtu_p (res, val))
2642 return res ^ sgnbit;
2643 }
2644 return val ^ sgnbit;
2645 }
2646
2647 /* Helper for overflow_comparison_p
2648
2649 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2650 OP1's defining statement to see if it ultimately has the form
2651 OP0 CODE (OP0 PLUS INTEGER_CST)
2652
2653 If so, return TRUE indicating this is an overflow test and store into
2654 *NEW_CST an updated constant that can be used in a narrowed range test.
2655
2656 REVERSED indicates if the comparison was originally:
2657
2658 OP1 CODE' OP0.
2659
2660 This affects how we build the updated constant. */
2661
2662 static bool
2663 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2664 bool follow_assert_exprs, bool reversed, tree *new_cst)
2665 {
2666 /* See if this is a relational operation between two SSA_NAMES with
2667 unsigned, overflow wrapping values. If so, check it more deeply. */
2668 if ((code == LT_EXPR || code == LE_EXPR
2669 || code == GE_EXPR || code == GT_EXPR)
2670 && TREE_CODE (op0) == SSA_NAME
2671 && TREE_CODE (op1) == SSA_NAME
2672 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2673 && TYPE_UNSIGNED (TREE_TYPE (op0))
2674 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2675 {
2676 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2677
2678 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2679 if (follow_assert_exprs)
2680 {
2681 while (gimple_assign_single_p (op1_def)
2682 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2683 {
2684 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2685 if (TREE_CODE (op1) != SSA_NAME)
2686 break;
2687 op1_def = SSA_NAME_DEF_STMT (op1);
2688 }
2689 }
2690
2691 /* Now look at the defining statement of OP1 to see if it adds
2692 or subtracts a nonzero constant from another operand. */
2693 if (op1_def
2694 && is_gimple_assign (op1_def)
2695 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2696 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2697 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2698 {
2699 tree target = gimple_assign_rhs1 (op1_def);
2700
2701 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2702 for one where TARGET appears on the RHS. */
2703 if (follow_assert_exprs)
2704 {
2705 /* Now see if that "other operand" is op0, following the chain
2706 of ASSERT_EXPRs if necessary. */
2707 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2708 while (op0 != target
2709 && gimple_assign_single_p (op0_def)
2710 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2711 {
2712 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2713 if (TREE_CODE (op0) != SSA_NAME)
2714 break;
2715 op0_def = SSA_NAME_DEF_STMT (op0);
2716 }
2717 }
2718
2719 /* If we did not find our target SSA_NAME, then this is not
2720 an overflow test. */
2721 if (op0 != target)
2722 return false;
2723
2724 tree type = TREE_TYPE (op0);
2725 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2726 tree inc = gimple_assign_rhs2 (op1_def);
2727 if (reversed)
2728 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2729 else
2730 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2731 return true;
2732 }
2733 }
2734 return false;
2735 }
2736
2737 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2738 OP1's defining statement to see if it ultimately has the form
2739 OP0 CODE (OP0 PLUS INTEGER_CST)
2740
2741 If so, return TRUE indicating this is an overflow test and store into
2742 *NEW_CST an updated constant that can be used in a narrowed range test.
2743
2744 These statements are left as-is in the IL to facilitate discovery of
2745 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2746 the alternate range representation is often useful within VRP. */
2747
2748 bool
2749 overflow_comparison_p (tree_code code, tree name, tree val,
2750 bool use_equiv_p, tree *new_cst)
2751 {
2752 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2753 return true;
2754 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2755 use_equiv_p, true, new_cst);
2756 }
2757
2758
2759 /* Try to register an edge assertion for SSA name NAME on edge E for
2760 the condition COND contributing to the conditional jump pointed to by BSI.
2761 Invert the condition COND if INVERT is true. */
2762
2763 static void
2764 register_edge_assert_for_2 (tree name, edge e,
2765 enum tree_code cond_code,
2766 tree cond_op0, tree cond_op1, bool invert,
2767 vec<assert_info> &asserts)
2768 {
2769 tree val;
2770 enum tree_code comp_code;
2771
2772 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2773 cond_op0,
2774 cond_op1,
2775 invert, &comp_code, &val))
2776 return;
2777
2778 /* Queue the assert. */
2779 tree x;
2780 if (overflow_comparison_p (comp_code, name, val, false, &x))
2781 {
2782 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2783 ? GT_EXPR : LE_EXPR);
2784 add_assert_info (asserts, name, name, new_code, x);
2785 }
2786 add_assert_info (asserts, name, name, comp_code, val);
2787
2788 /* In the case of NAME <= CST and NAME being defined as
2789 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2790 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2791 This catches range and anti-range tests. */
2792 if ((comp_code == LE_EXPR
2793 || comp_code == GT_EXPR)
2794 && TREE_CODE (val) == INTEGER_CST
2795 && TYPE_UNSIGNED (TREE_TYPE (val)))
2796 {
2797 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2798 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2799
2800 /* Extract CST2 from the (optional) addition. */
2801 if (is_gimple_assign (def_stmt)
2802 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2803 {
2804 name2 = gimple_assign_rhs1 (def_stmt);
2805 cst2 = gimple_assign_rhs2 (def_stmt);
2806 if (TREE_CODE (name2) == SSA_NAME
2807 && TREE_CODE (cst2) == INTEGER_CST)
2808 def_stmt = SSA_NAME_DEF_STMT (name2);
2809 }
2810
2811 /* Extract NAME2 from the (optional) sign-changing cast. */
2812 if (gimple_assign_cast_p (def_stmt))
2813 {
2814 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2815 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2816 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2817 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2818 name3 = gimple_assign_rhs1 (def_stmt);
2819 }
2820
2821 /* If name3 is used later, create an ASSERT_EXPR for it. */
2822 if (name3 != NULL_TREE
2823 && TREE_CODE (name3) == SSA_NAME
2824 && (cst2 == NULL_TREE
2825 || TREE_CODE (cst2) == INTEGER_CST)
2826 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2827 {
2828 tree tmp;
2829
2830 /* Build an expression for the range test. */
2831 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2832 if (cst2 != NULL_TREE)
2833 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2834 add_assert_info (asserts, name3, tmp, comp_code, val);
2835 }
2836
2837 /* If name2 is used later, create an ASSERT_EXPR for it. */
2838 if (name2 != NULL_TREE
2839 && TREE_CODE (name2) == SSA_NAME
2840 && TREE_CODE (cst2) == INTEGER_CST
2841 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2842 {
2843 tree tmp;
2844
2845 /* Build an expression for the range test. */
2846 tmp = name2;
2847 if (TREE_TYPE (name) != TREE_TYPE (name2))
2848 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2849 if (cst2 != NULL_TREE)
2850 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2851 add_assert_info (asserts, name2, tmp, comp_code, val);
2852 }
2853 }
2854
2855 /* In the case of post-in/decrement tests like if (i++) ... and uses
2856 of the in/decremented value on the edge the extra name we want to
2857 assert for is not on the def chain of the name compared. Instead
2858 it is in the set of use stmts.
2859 Similar cases happen for conversions that were simplified through
2860 fold_{sign_changed,widened}_comparison. */
2861 if ((comp_code == NE_EXPR
2862 || comp_code == EQ_EXPR)
2863 && TREE_CODE (val) == INTEGER_CST)
2864 {
2865 imm_use_iterator ui;
2866 gimple *use_stmt;
2867 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2868 {
2869 if (!is_gimple_assign (use_stmt))
2870 continue;
2871
2872 /* Cut off to use-stmts that are dominating the predecessor. */
2873 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2874 continue;
2875
2876 tree name2 = gimple_assign_lhs (use_stmt);
2877 if (TREE_CODE (name2) != SSA_NAME)
2878 continue;
2879
2880 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2881 tree cst;
2882 if (code == PLUS_EXPR
2883 || code == MINUS_EXPR)
2884 {
2885 cst = gimple_assign_rhs2 (use_stmt);
2886 if (TREE_CODE (cst) != INTEGER_CST)
2887 continue;
2888 cst = int_const_binop (code, val, cst);
2889 }
2890 else if (CONVERT_EXPR_CODE_P (code))
2891 {
2892 /* For truncating conversions we cannot record
2893 an inequality. */
2894 if (comp_code == NE_EXPR
2895 && (TYPE_PRECISION (TREE_TYPE (name2))
2896 < TYPE_PRECISION (TREE_TYPE (name))))
2897 continue;
2898 cst = fold_convert (TREE_TYPE (name2), val);
2899 }
2900 else
2901 continue;
2902
2903 if (TREE_OVERFLOW_P (cst))
2904 cst = drop_tree_overflow (cst);
2905 add_assert_info (asserts, name2, name2, comp_code, cst);
2906 }
2907 }
2908
2909 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2910 && TREE_CODE (val) == INTEGER_CST)
2911 {
2912 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2913 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2914 tree val2 = NULL_TREE;
2915 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2916 wide_int mask = wi::zero (prec);
2917 unsigned int nprec = prec;
2918 enum tree_code rhs_code = ERROR_MARK;
2919
2920 if (is_gimple_assign (def_stmt))
2921 rhs_code = gimple_assign_rhs_code (def_stmt);
2922
2923 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2924 assert that A != CST1 -+ CST2. */
2925 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2926 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2927 {
2928 tree op0 = gimple_assign_rhs1 (def_stmt);
2929 tree op1 = gimple_assign_rhs2 (def_stmt);
2930 if (TREE_CODE (op0) == SSA_NAME
2931 && TREE_CODE (op1) == INTEGER_CST)
2932 {
2933 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2934 ? MINUS_EXPR : PLUS_EXPR);
2935 op1 = int_const_binop (reverse_op, val, op1);
2936 if (TREE_OVERFLOW (op1))
2937 op1 = drop_tree_overflow (op1);
2938 add_assert_info (asserts, op0, op0, comp_code, op1);
2939 }
2940 }
2941
2942 /* Add asserts for NAME cmp CST and NAME being defined
2943 as NAME = (int) NAME2. */
2944 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2945 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2946 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2947 && gimple_assign_cast_p (def_stmt))
2948 {
2949 name2 = gimple_assign_rhs1 (def_stmt);
2950 if (CONVERT_EXPR_CODE_P (rhs_code)
2951 && TREE_CODE (name2) == SSA_NAME
2952 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2953 && TYPE_UNSIGNED (TREE_TYPE (name2))
2954 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2955 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2956 || !tree_int_cst_equal (val,
2957 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2958 {
2959 tree tmp, cst;
2960 enum tree_code new_comp_code = comp_code;
2961
2962 cst = fold_convert (TREE_TYPE (name2),
2963 TYPE_MIN_VALUE (TREE_TYPE (val)));
2964 /* Build an expression for the range test. */
2965 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2966 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2967 fold_convert (TREE_TYPE (name2), val));
2968 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2969 {
2970 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2971 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2972 build_int_cst (TREE_TYPE (name2), 1));
2973 }
2974 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2975 }
2976 }
2977
2978 /* Add asserts for NAME cmp CST and NAME being defined as
2979 NAME = NAME2 >> CST2.
2980
2981 Extract CST2 from the right shift. */
2982 if (rhs_code == RSHIFT_EXPR)
2983 {
2984 name2 = gimple_assign_rhs1 (def_stmt);
2985 cst2 = gimple_assign_rhs2 (def_stmt);
2986 if (TREE_CODE (name2) == SSA_NAME
2987 && tree_fits_uhwi_p (cst2)
2988 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2989 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2990 && type_has_mode_precision_p (TREE_TYPE (val)))
2991 {
2992 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2993 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2994 }
2995 }
2996 if (val2 != NULL_TREE
2997 && TREE_CODE (val2) == INTEGER_CST
2998 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2999 TREE_TYPE (val),
3000 val2, cst2), val))
3001 {
3002 enum tree_code new_comp_code = comp_code;
3003 tree tmp, new_val;
3004
3005 tmp = name2;
3006 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
3007 {
3008 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
3009 {
3010 tree type = build_nonstandard_integer_type (prec, 1);
3011 tmp = build1 (NOP_EXPR, type, name2);
3012 val2 = fold_convert (type, val2);
3013 }
3014 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
3015 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
3016 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
3017 }
3018 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3019 {
3020 wide_int minval
3021 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3022 new_val = val2;
3023 if (minval == wi::to_wide (new_val))
3024 new_val = NULL_TREE;
3025 }
3026 else
3027 {
3028 wide_int maxval
3029 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3030 mask |= wi::to_wide (val2);
3031 if (wi::eq_p (mask, maxval))
3032 new_val = NULL_TREE;
3033 else
3034 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
3035 }
3036
3037 if (new_val)
3038 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
3039 }
3040
3041 /* If we have a conversion that doesn't change the value of the source
3042 simply register the same assert for it. */
3043 if (CONVERT_EXPR_CODE_P (rhs_code))
3044 {
3045 wide_int rmin, rmax;
3046 tree rhs1 = gimple_assign_rhs1 (def_stmt);
3047 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
3048 && TREE_CODE (rhs1) == SSA_NAME
3049 /* Make sure the relation preserves the upper/lower boundary of
3050 the range conservatively. */
3051 && (comp_code == NE_EXPR
3052 || comp_code == EQ_EXPR
3053 || (TYPE_SIGN (TREE_TYPE (name))
3054 == TYPE_SIGN (TREE_TYPE (rhs1)))
3055 || ((comp_code == LE_EXPR
3056 || comp_code == LT_EXPR)
3057 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3058 || ((comp_code == GE_EXPR
3059 || comp_code == GT_EXPR)
3060 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
3061 /* And the conversion does not alter the value we compare
3062 against and all values in rhs1 can be represented in
3063 the converted to type. */
3064 && int_fits_type_p (val, TREE_TYPE (rhs1))
3065 && ((TYPE_PRECISION (TREE_TYPE (name))
3066 > TYPE_PRECISION (TREE_TYPE (rhs1)))
3067 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
3068 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
3069 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
3070 add_assert_info (asserts, rhs1, rhs1,
3071 comp_code, fold_convert (TREE_TYPE (rhs1), val));
3072 }
3073
3074 /* Add asserts for NAME cmp CST and NAME being defined as
3075 NAME = NAME2 & CST2.
3076
3077 Extract CST2 from the and.
3078
3079 Also handle
3080 NAME = (unsigned) NAME2;
3081 casts where NAME's type is unsigned and has smaller precision
3082 than NAME2's type as if it was NAME = NAME2 & MASK. */
3083 names[0] = NULL_TREE;
3084 names[1] = NULL_TREE;
3085 cst2 = NULL_TREE;
3086 if (rhs_code == BIT_AND_EXPR
3087 || (CONVERT_EXPR_CODE_P (rhs_code)
3088 && INTEGRAL_TYPE_P (TREE_TYPE (val))
3089 && TYPE_UNSIGNED (TREE_TYPE (val))
3090 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3091 > prec))
3092 {
3093 name2 = gimple_assign_rhs1 (def_stmt);
3094 if (rhs_code == BIT_AND_EXPR)
3095 cst2 = gimple_assign_rhs2 (def_stmt);
3096 else
3097 {
3098 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3099 nprec = TYPE_PRECISION (TREE_TYPE (name2));
3100 }
3101 if (TREE_CODE (name2) == SSA_NAME
3102 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3103 && TREE_CODE (cst2) == INTEGER_CST
3104 && !integer_zerop (cst2)
3105 && (nprec > 1
3106 || TYPE_UNSIGNED (TREE_TYPE (val))))
3107 {
3108 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3109 if (gimple_assign_cast_p (def_stmt2))
3110 {
3111 names[1] = gimple_assign_rhs1 (def_stmt2);
3112 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3113 || TREE_CODE (names[1]) != SSA_NAME
3114 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3115 || (TYPE_PRECISION (TREE_TYPE (name2))
3116 != TYPE_PRECISION (TREE_TYPE (names[1]))))
3117 names[1] = NULL_TREE;
3118 }
3119 names[0] = name2;
3120 }
3121 }
3122 if (names[0] || names[1])
3123 {
3124 wide_int minv, maxv, valv, cst2v;
3125 wide_int tem, sgnbit;
3126 bool valid_p = false, valn, cst2n;
3127 enum tree_code ccode = comp_code;
3128
3129 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3130 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3131 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3132 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3133 /* If CST2 doesn't have most significant bit set,
3134 but VAL is negative, we have comparison like
3135 if ((x & 0x123) > -4) (always true). Just give up. */
3136 if (!cst2n && valn)
3137 ccode = ERROR_MARK;
3138 if (cst2n)
3139 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3140 else
3141 sgnbit = wi::zero (nprec);
3142 minv = valv & cst2v;
3143 switch (ccode)
3144 {
3145 case EQ_EXPR:
3146 /* Minimum unsigned value for equality is VAL & CST2
3147 (should be equal to VAL, otherwise we probably should
3148 have folded the comparison into false) and
3149 maximum unsigned value is VAL | ~CST2. */
3150 maxv = valv | ~cst2v;
3151 valid_p = true;
3152 break;
3153
3154 case NE_EXPR:
3155 tem = valv | ~cst2v;
3156 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
3157 if (valv == 0)
3158 {
3159 cst2n = false;
3160 sgnbit = wi::zero (nprec);
3161 goto gt_expr;
3162 }
3163 /* If (VAL | ~CST2) is all ones, handle it as
3164 (X & CST2) < VAL. */
3165 if (tem == -1)
3166 {
3167 cst2n = false;
3168 valn = false;
3169 sgnbit = wi::zero (nprec);
3170 goto lt_expr;
3171 }
3172 if (!cst2n && wi::neg_p (cst2v))
3173 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3174 if (sgnbit != 0)
3175 {
3176 if (valv == sgnbit)
3177 {
3178 cst2n = true;
3179 valn = true;
3180 goto gt_expr;
3181 }
3182 if (tem == wi::mask (nprec - 1, false, nprec))
3183 {
3184 cst2n = true;
3185 goto lt_expr;
3186 }
3187 if (!cst2n)
3188 sgnbit = wi::zero (nprec);
3189 }
3190 break;
3191
3192 case GE_EXPR:
3193 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3194 is VAL and maximum unsigned value is ~0. For signed
3195 comparison, if CST2 doesn't have most significant bit
3196 set, handle it similarly. If CST2 has MSB set,
3197 the minimum is the same, and maximum is ~0U/2. */
3198 if (minv != valv)
3199 {
3200 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3201 VAL. */
3202 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3203 if (minv == valv)
3204 break;
3205 }
3206 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3207 valid_p = true;
3208 break;
3209
3210 case GT_EXPR:
3211 gt_expr:
3212 /* Find out smallest MINV where MINV > VAL
3213 && (MINV & CST2) == MINV, if any. If VAL is signed and
3214 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
3215 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3216 if (minv == valv)
3217 break;
3218 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3219 valid_p = true;
3220 break;
3221
3222 case LE_EXPR:
3223 /* Minimum unsigned value for <= is 0 and maximum
3224 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3225 Otherwise, find smallest VAL2 where VAL2 > VAL
3226 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3227 as maximum.
3228 For signed comparison, if CST2 doesn't have most
3229 significant bit set, handle it similarly. If CST2 has
3230 MSB set, the maximum is the same and minimum is INT_MIN. */
3231 if (minv == valv)
3232 maxv = valv;
3233 else
3234 {
3235 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3236 if (maxv == valv)
3237 break;
3238 maxv -= 1;
3239 }
3240 maxv |= ~cst2v;
3241 minv = sgnbit;
3242 valid_p = true;
3243 break;
3244
3245 case LT_EXPR:
3246 lt_expr:
3247 /* Minimum unsigned value for < is 0 and maximum
3248 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3249 Otherwise, find smallest VAL2 where VAL2 > VAL
3250 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3251 as maximum.
3252 For signed comparison, if CST2 doesn't have most
3253 significant bit set, handle it similarly. If CST2 has
3254 MSB set, the maximum is the same and minimum is INT_MIN. */
3255 if (minv == valv)
3256 {
3257 if (valv == sgnbit)
3258 break;
3259 maxv = valv;
3260 }
3261 else
3262 {
3263 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3264 if (maxv == valv)
3265 break;
3266 }
3267 maxv -= 1;
3268 maxv |= ~cst2v;
3269 minv = sgnbit;
3270 valid_p = true;
3271 break;
3272
3273 default:
3274 break;
3275 }
3276 if (valid_p
3277 && (maxv - minv) != -1)
3278 {
3279 tree tmp, new_val, type;
3280 int i;
3281
3282 for (i = 0; i < 2; i++)
3283 if (names[i])
3284 {
3285 wide_int maxv2 = maxv;
3286 tmp = names[i];
3287 type = TREE_TYPE (names[i]);
3288 if (!TYPE_UNSIGNED (type))
3289 {
3290 type = build_nonstandard_integer_type (nprec, 1);
3291 tmp = build1 (NOP_EXPR, type, names[i]);
3292 }
3293 if (minv != 0)
3294 {
3295 tmp = build2 (PLUS_EXPR, type, tmp,
3296 wide_int_to_tree (type, -minv));
3297 maxv2 = maxv - minv;
3298 }
3299 new_val = wide_int_to_tree (type, maxv2);
3300 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3301 }
3302 }
3303 }
3304 }
3305 }
3306
3307 /* OP is an operand of a truth value expression which is known to have
3308 a particular value. Register any asserts for OP and for any
3309 operands in OP's defining statement.
3310
3311 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3312 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3313
3314 static void
3315 register_edge_assert_for_1 (tree op, enum tree_code code,
3316 edge e, vec<assert_info> &asserts)
3317 {
3318 gimple *op_def;
3319 tree val;
3320 enum tree_code rhs_code;
3321
3322 /* We only care about SSA_NAMEs. */
3323 if (TREE_CODE (op) != SSA_NAME)
3324 return;
3325
3326 /* We know that OP will have a zero or nonzero value. */
3327 val = build_int_cst (TREE_TYPE (op), 0);
3328 add_assert_info (asserts, op, op, code, val);
3329
3330 /* Now look at how OP is set. If it's set from a comparison,
3331 a truth operation or some bit operations, then we may be able
3332 to register information about the operands of that assignment. */
3333 op_def = SSA_NAME_DEF_STMT (op);
3334 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3335 return;
3336
3337 rhs_code = gimple_assign_rhs_code (op_def);
3338
3339 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3340 {
3341 bool invert = (code == EQ_EXPR ? true : false);
3342 tree op0 = gimple_assign_rhs1 (op_def);
3343 tree op1 = gimple_assign_rhs2 (op_def);
3344
3345 if (TREE_CODE (op0) == SSA_NAME)
3346 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3347 if (TREE_CODE (op1) == SSA_NAME)
3348 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3349 }
3350 else if ((code == NE_EXPR
3351 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3352 || (code == EQ_EXPR
3353 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3354 {
3355 /* Recurse on each operand. */
3356 tree op0 = gimple_assign_rhs1 (op_def);
3357 tree op1 = gimple_assign_rhs2 (op_def);
3358 if (TREE_CODE (op0) == SSA_NAME
3359 && has_single_use (op0))
3360 register_edge_assert_for_1 (op0, code, e, asserts);
3361 if (TREE_CODE (op1) == SSA_NAME
3362 && has_single_use (op1))
3363 register_edge_assert_for_1 (op1, code, e, asserts);
3364 }
3365 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3366 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3367 {
3368 /* Recurse, flipping CODE. */
3369 code = invert_tree_comparison (code, false);
3370 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3371 }
3372 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3373 {
3374 /* Recurse through the copy. */
3375 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3376 }
3377 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3378 {
3379 /* Recurse through the type conversion, unless it is a narrowing
3380 conversion or conversion from non-integral type. */
3381 tree rhs = gimple_assign_rhs1 (op_def);
3382 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3383 && (TYPE_PRECISION (TREE_TYPE (rhs))
3384 <= TYPE_PRECISION (TREE_TYPE (op))))
3385 register_edge_assert_for_1 (rhs, code, e, asserts);
3386 }
3387 }
3388
3389 /* Check if comparison
3390 NAME COND_OP INTEGER_CST
3391 has a form of
3392 (X & 11...100..0) COND_OP XX...X00...0
3393 Such comparison can yield assertions like
3394 X >= XX...X00...0
3395 X <= XX...X11...1
3396 in case of COND_OP being EQ_EXPR or
3397 X < XX...X00...0
3398 X > XX...X11...1
3399 in case of NE_EXPR. */
3400
3401 static bool
3402 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3403 tree *new_name, tree *low, enum tree_code *low_code,
3404 tree *high, enum tree_code *high_code)
3405 {
3406 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3407
3408 if (!is_gimple_assign (def_stmt)
3409 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3410 return false;
3411
3412 tree t = gimple_assign_rhs1 (def_stmt);
3413 tree maskt = gimple_assign_rhs2 (def_stmt);
3414 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3415 return false;
3416
3417 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3418 wide_int inv_mask = ~mask;
3419 /* Must have been removed by now so don't bother optimizing. */
3420 if (mask == 0 || inv_mask == 0)
3421 return false;
3422
3423 /* Assume VALT is INTEGER_CST. */
3424 wi::tree_to_wide_ref val = wi::to_wide (valt);
3425
3426 if ((inv_mask & (inv_mask + 1)) != 0
3427 || (val & mask) != val)
3428 return false;
3429
3430 bool is_range = cond_code == EQ_EXPR;
3431
3432 tree type = TREE_TYPE (t);
3433 wide_int min = wi::min_value (type),
3434 max = wi::max_value (type);
3435
3436 if (is_range)
3437 {
3438 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3439 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3440 }
3441 else
3442 {
3443 /* We can still generate assertion if one of alternatives
3444 is known to always be false. */
3445 if (val == min)
3446 {
3447 *low_code = (enum tree_code) 0;
3448 *high_code = GT_EXPR;
3449 }
3450 else if ((val | inv_mask) == max)
3451 {
3452 *low_code = LT_EXPR;
3453 *high_code = (enum tree_code) 0;
3454 }
3455 else
3456 return false;
3457 }
3458
3459 *new_name = t;
3460 *low = wide_int_to_tree (type, val);
3461 *high = wide_int_to_tree (type, val | inv_mask);
3462
3463 return true;
3464 }
3465
3466 /* Try to register an edge assertion for SSA name NAME on edge E for
3467 the condition COND contributing to the conditional jump pointed to by
3468 SI. */
3469
3470 void
3471 register_edge_assert_for (tree name, edge e,
3472 enum tree_code cond_code, tree cond_op0,
3473 tree cond_op1, vec<assert_info> &asserts)
3474 {
3475 tree val;
3476 enum tree_code comp_code;
3477 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3478
3479 /* Do not attempt to infer anything in names that flow through
3480 abnormal edges. */
3481 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3482 return;
3483
3484 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3485 cond_op0, cond_op1,
3486 is_else_edge,
3487 &comp_code, &val))
3488 return;
3489
3490 /* Register ASSERT_EXPRs for name. */
3491 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3492 cond_op1, is_else_edge, asserts);
3493
3494
3495 /* If COND is effectively an equality test of an SSA_NAME against
3496 the value zero or one, then we may be able to assert values
3497 for SSA_NAMEs which flow into COND. */
3498
3499 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3500 statement of NAME we can assert both operands of the BIT_AND_EXPR
3501 have nonzero value. */
3502 if (((comp_code == EQ_EXPR && integer_onep (val))
3503 || (comp_code == NE_EXPR && integer_zerop (val))))
3504 {
3505 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3506
3507 if (is_gimple_assign (def_stmt)
3508 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3509 {
3510 tree op0 = gimple_assign_rhs1 (def_stmt);
3511 tree op1 = gimple_assign_rhs2 (def_stmt);
3512 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3513 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3514 }
3515 }
3516
3517 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3518 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3519 have zero value. */
3520 if (((comp_code == EQ_EXPR && integer_zerop (val))
3521 || (comp_code == NE_EXPR && integer_onep (val))))
3522 {
3523 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3524
3525 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3526 necessarily zero value, or if type-precision is one. */
3527 if (is_gimple_assign (def_stmt)
3528 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3529 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3530 || comp_code == EQ_EXPR)))
3531 {
3532 tree op0 = gimple_assign_rhs1 (def_stmt);
3533 tree op1 = gimple_assign_rhs2 (def_stmt);
3534 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3535 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3536 }
3537 }
3538
3539 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3540 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3541 && TREE_CODE (val) == INTEGER_CST)
3542 {
3543 enum tree_code low_code, high_code;
3544 tree low, high;
3545 if (is_masked_range_test (name, val, comp_code, &name, &low,
3546 &low_code, &high, &high_code))
3547 {
3548 if (low_code != ERROR_MARK)
3549 register_edge_assert_for_2 (name, e, low_code, name,
3550 low, /*invert*/false, asserts);
3551 if (high_code != ERROR_MARK)
3552 register_edge_assert_for_2 (name, e, high_code, name,
3553 high, /*invert*/false, asserts);
3554 }
3555 }
3556 }
3557
3558 /* Finish found ASSERTS for E and register them at GSI. */
3559
3560 static void
3561 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3562 vec<assert_info> &asserts)
3563 {
3564 for (unsigned i = 0; i < asserts.length (); ++i)
3565 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3566 reachable from E. */
3567 if (live_on_edge (e, asserts[i].name))
3568 register_new_assert_for (asserts[i].name, asserts[i].expr,
3569 asserts[i].comp_code, asserts[i].val,
3570 NULL, e, gsi);
3571 }
3572
3573
3574
3575 /* Determine whether the outgoing edges of BB should receive an
3576 ASSERT_EXPR for each of the operands of BB's LAST statement.
3577 The last statement of BB must be a COND_EXPR.
3578
3579 If any of the sub-graphs rooted at BB have an interesting use of
3580 the predicate operands, an assert location node is added to the
3581 list of assertions for the corresponding operands. */
3582
3583 static void
3584 find_conditional_asserts (basic_block bb, gcond *last)
3585 {
3586 gimple_stmt_iterator bsi;
3587 tree op;
3588 edge_iterator ei;
3589 edge e;
3590 ssa_op_iter iter;
3591
3592 bsi = gsi_for_stmt (last);
3593
3594 /* Look for uses of the operands in each of the sub-graphs
3595 rooted at BB. We need to check each of the outgoing edges
3596 separately, so that we know what kind of ASSERT_EXPR to
3597 insert. */
3598 FOR_EACH_EDGE (e, ei, bb->succs)
3599 {
3600 if (e->dest == bb)
3601 continue;
3602
3603 /* Register the necessary assertions for each operand in the
3604 conditional predicate. */
3605 auto_vec<assert_info, 8> asserts;
3606 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3607 register_edge_assert_for (op, e,
3608 gimple_cond_code (last),
3609 gimple_cond_lhs (last),
3610 gimple_cond_rhs (last), asserts);
3611 finish_register_edge_assert_for (e, bsi, asserts);
3612 }
3613 }
3614
3615 struct case_info
3616 {
3617 tree expr;
3618 basic_block bb;
3619 };
3620
3621 /* Compare two case labels sorting first by the destination bb index
3622 and then by the case value. */
3623
3624 static int
3625 compare_case_labels (const void *p1, const void *p2)
3626 {
3627 const struct case_info *ci1 = (const struct case_info *) p1;
3628 const struct case_info *ci2 = (const struct case_info *) p2;
3629 int idx1 = ci1->bb->index;
3630 int idx2 = ci2->bb->index;
3631
3632 if (idx1 < idx2)
3633 return -1;
3634 else if (idx1 == idx2)
3635 {
3636 /* Make sure the default label is first in a group. */
3637 if (!CASE_LOW (ci1->expr))
3638 return -1;
3639 else if (!CASE_LOW (ci2->expr))
3640 return 1;
3641 else
3642 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3643 CASE_LOW (ci2->expr));
3644 }
3645 else
3646 return 1;
3647 }
3648
3649 /* Determine whether the outgoing edges of BB should receive an
3650 ASSERT_EXPR for each of the operands of BB's LAST statement.
3651 The last statement of BB must be a SWITCH_EXPR.
3652
3653 If any of the sub-graphs rooted at BB have an interesting use of
3654 the predicate operands, an assert location node is added to the
3655 list of assertions for the corresponding operands. */
3656
3657 static void
3658 find_switch_asserts (basic_block bb, gswitch *last)
3659 {
3660 gimple_stmt_iterator bsi;
3661 tree op;
3662 edge e;
3663 struct case_info *ci;
3664 size_t n = gimple_switch_num_labels (last);
3665 #if GCC_VERSION >= 4000
3666 unsigned int idx;
3667 #else
3668 /* Work around GCC 3.4 bug (PR 37086). */
3669 volatile unsigned int idx;
3670 #endif
3671
3672 bsi = gsi_for_stmt (last);
3673 op = gimple_switch_index (last);
3674 if (TREE_CODE (op) != SSA_NAME)
3675 return;
3676
3677 /* Build a vector of case labels sorted by destination label. */
3678 ci = XNEWVEC (struct case_info, n);
3679 for (idx = 0; idx < n; ++idx)
3680 {
3681 ci[idx].expr = gimple_switch_label (last, idx);
3682 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3683 }
3684 edge default_edge = find_edge (bb, ci[0].bb);
3685 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3686
3687 for (idx = 0; idx < n; ++idx)
3688 {
3689 tree min, max;
3690 tree cl = ci[idx].expr;
3691 basic_block cbb = ci[idx].bb;
3692
3693 min = CASE_LOW (cl);
3694 max = CASE_HIGH (cl);
3695
3696 /* If there are multiple case labels with the same destination
3697 we need to combine them to a single value range for the edge. */
3698 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3699 {
3700 /* Skip labels until the last of the group. */
3701 do {
3702 ++idx;
3703 } while (idx < n && cbb == ci[idx].bb);
3704 --idx;
3705
3706 /* Pick up the maximum of the case label range. */
3707 if (CASE_HIGH (ci[idx].expr))
3708 max = CASE_HIGH (ci[idx].expr);
3709 else
3710 max = CASE_LOW (ci[idx].expr);
3711 }
3712
3713 /* Can't extract a useful assertion out of a range that includes the
3714 default label. */
3715 if (min == NULL_TREE)
3716 continue;
3717
3718 /* Find the edge to register the assert expr on. */
3719 e = find_edge (bb, cbb);
3720
3721 /* Register the necessary assertions for the operand in the
3722 SWITCH_EXPR. */
3723 auto_vec<assert_info, 8> asserts;
3724 register_edge_assert_for (op, e,
3725 max ? GE_EXPR : EQ_EXPR,
3726 op, fold_convert (TREE_TYPE (op), min),
3727 asserts);
3728 if (max)
3729 register_edge_assert_for (op, e, LE_EXPR, op,
3730 fold_convert (TREE_TYPE (op), max),
3731 asserts);
3732 finish_register_edge_assert_for (e, bsi, asserts);
3733 }
3734
3735 XDELETEVEC (ci);
3736
3737 if (!live_on_edge (default_edge, op))
3738 return;
3739
3740 /* Now register along the default label assertions that correspond to the
3741 anti-range of each label. */
3742 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3743 if (insertion_limit == 0)
3744 return;
3745
3746 /* We can't do this if the default case shares a label with another case. */
3747 tree default_cl = gimple_switch_default_label (last);
3748 for (idx = 1; idx < n; idx++)
3749 {
3750 tree min, max;
3751 tree cl = gimple_switch_label (last, idx);
3752 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3753 continue;
3754
3755 min = CASE_LOW (cl);
3756 max = CASE_HIGH (cl);
3757
3758 /* Combine contiguous case ranges to reduce the number of assertions
3759 to insert. */
3760 for (idx = idx + 1; idx < n; idx++)
3761 {
3762 tree next_min, next_max;
3763 tree next_cl = gimple_switch_label (last, idx);
3764 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3765 break;
3766
3767 next_min = CASE_LOW (next_cl);
3768 next_max = CASE_HIGH (next_cl);
3769
3770 wide_int difference = (wi::to_wide (next_min)
3771 - wi::to_wide (max ? max : min));
3772 if (wi::eq_p (difference, 1))
3773 max = next_max ? next_max : next_min;
3774 else
3775 break;
3776 }
3777 idx--;
3778
3779 if (max == NULL_TREE)
3780 {
3781 /* Register the assertion OP != MIN. */
3782 auto_vec<assert_info, 8> asserts;
3783 min = fold_convert (TREE_TYPE (op), min);
3784 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3785 asserts);
3786 finish_register_edge_assert_for (default_edge, bsi, asserts);
3787 }
3788 else
3789 {
3790 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3791 which will give OP the anti-range ~[MIN,MAX]. */
3792 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3793 min = fold_convert (TREE_TYPE (uop), min);
3794 max = fold_convert (TREE_TYPE (uop), max);
3795
3796 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3797 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3798 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3799 NULL, default_edge, bsi);
3800 }
3801
3802 if (--insertion_limit == 0)
3803 break;
3804 }
3805 }
3806
3807
3808 /* Traverse all the statements in block BB looking for statements that
3809 may generate useful assertions for the SSA names in their operand.
3810 If a statement produces a useful assertion A for name N_i, then the
3811 list of assertions already generated for N_i is scanned to
3812 determine if A is actually needed.
3813
3814 If N_i already had the assertion A at a location dominating the
3815 current location, then nothing needs to be done. Otherwise, the
3816 new location for A is recorded instead.
3817
3818 1- For every statement S in BB, all the variables used by S are
3819 added to bitmap FOUND_IN_SUBGRAPH.
3820
3821 2- If statement S uses an operand N in a way that exposes a known
3822 value range for N, then if N was not already generated by an
3823 ASSERT_EXPR, create a new assert location for N. For instance,
3824 if N is a pointer and the statement dereferences it, we can
3825 assume that N is not NULL.
3826
3827 3- COND_EXPRs are a special case of #2. We can derive range
3828 information from the predicate but need to insert different
3829 ASSERT_EXPRs for each of the sub-graphs rooted at the
3830 conditional block. If the last statement of BB is a conditional
3831 expression of the form 'X op Y', then
3832
3833 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3834
3835 b) If the conditional is the only entry point to the sub-graph
3836 corresponding to the THEN_CLAUSE, recurse into it. On
3837 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3838 an ASSERT_EXPR is added for the corresponding variable.
3839
3840 c) Repeat step (b) on the ELSE_CLAUSE.
3841
3842 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3843
3844 For instance,
3845
3846 if (a == 9)
3847 b = a;
3848 else
3849 b = c + 1;
3850
3851 In this case, an assertion on the THEN clause is useful to
3852 determine that 'a' is always 9 on that edge. However, an assertion
3853 on the ELSE clause would be unnecessary.
3854
3855 4- If BB does not end in a conditional expression, then we recurse
3856 into BB's dominator children.
3857
3858 At the end of the recursive traversal, every SSA name will have a
3859 list of locations where ASSERT_EXPRs should be added. When a new
3860 location for name N is found, it is registered by calling
3861 register_new_assert_for. That function keeps track of all the
3862 registered assertions to prevent adding unnecessary assertions.
3863 For instance, if a pointer P_4 is dereferenced more than once in a
3864 dominator tree, only the location dominating all the dereference of
3865 P_4 will receive an ASSERT_EXPR. */
3866
3867 static void
3868 find_assert_locations_1 (basic_block bb, sbitmap live)
3869 {
3870 gimple *last;
3871
3872 last = last_stmt (bb);
3873
3874 /* If BB's last statement is a conditional statement involving integer
3875 operands, determine if we need to add ASSERT_EXPRs. */
3876 if (last
3877 && gimple_code (last) == GIMPLE_COND
3878 && !fp_predicate (last)
3879 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3880 find_conditional_asserts (bb, as_a <gcond *> (last));
3881
3882 /* If BB's last statement is a switch statement involving integer
3883 operands, determine if we need to add ASSERT_EXPRs. */
3884 if (last
3885 && gimple_code (last) == GIMPLE_SWITCH
3886 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3887 find_switch_asserts (bb, as_a <gswitch *> (last));
3888
3889 /* Traverse all the statements in BB marking used names and looking
3890 for statements that may infer assertions for their used operands. */
3891 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3892 gsi_prev (&si))
3893 {
3894 gimple *stmt;
3895 tree op;
3896 ssa_op_iter i;
3897
3898 stmt = gsi_stmt (si);
3899
3900 if (is_gimple_debug (stmt))
3901 continue;
3902
3903 /* See if we can derive an assertion for any of STMT's operands. */
3904 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3905 {
3906 tree value;
3907 enum tree_code comp_code;
3908
3909 /* If op is not live beyond this stmt, do not bother to insert
3910 asserts for it. */
3911 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3912 continue;
3913
3914 /* If OP is used in such a way that we can infer a value
3915 range for it, and we don't find a previous assertion for
3916 it, create a new assertion location node for OP. */
3917 if (infer_value_range (stmt, op, &comp_code, &value))
3918 {
3919 /* If we are able to infer a nonzero value range for OP,
3920 then walk backwards through the use-def chain to see if OP
3921 was set via a typecast.
3922
3923 If so, then we can also infer a nonzero value range
3924 for the operand of the NOP_EXPR. */
3925 if (comp_code == NE_EXPR && integer_zerop (value))
3926 {
3927 tree t = op;
3928 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3929
3930 while (is_gimple_assign (def_stmt)
3931 && CONVERT_EXPR_CODE_P
3932 (gimple_assign_rhs_code (def_stmt))
3933 && TREE_CODE
3934 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3935 && POINTER_TYPE_P
3936 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3937 {
3938 t = gimple_assign_rhs1 (def_stmt);
3939 def_stmt = SSA_NAME_DEF_STMT (t);
3940
3941 /* Note we want to register the assert for the
3942 operand of the NOP_EXPR after SI, not after the
3943 conversion. */
3944 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3945 register_new_assert_for (t, t, comp_code, value,
3946 bb, NULL, si);
3947 }
3948 }
3949
3950 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3951 }
3952 }
3953
3954 /* Update live. */
3955 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3956 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3957 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3958 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3959 }
3960
3961 /* Traverse all PHI nodes in BB, updating live. */
3962 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3963 gsi_next (&si))
3964 {
3965 use_operand_p arg_p;
3966 ssa_op_iter i;
3967 gphi *phi = si.phi ();
3968 tree res = gimple_phi_result (phi);
3969
3970 if (virtual_operand_p (res))
3971 continue;
3972
3973 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3974 {
3975 tree arg = USE_FROM_PTR (arg_p);
3976 if (TREE_CODE (arg) == SSA_NAME)
3977 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3978 }
3979
3980 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3981 }
3982 }
3983
3984 /* Do an RPO walk over the function computing SSA name liveness
3985 on-the-fly and deciding on assert expressions to insert. */
3986
3987 static void
3988 find_assert_locations (void)
3989 {
3990 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3991 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3992 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3993 int rpo_cnt, i;
3994
3995 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3996 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3997 for (i = 0; i < rpo_cnt; ++i)
3998 bb_rpo[rpo[i]] = i;
3999
4000 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
4001 the order we compute liveness and insert asserts we otherwise
4002 fail to insert asserts into the loop latch. */
4003 loop_p loop;
4004 FOR_EACH_LOOP (loop, 0)
4005 {
4006 i = loop->latch->index;
4007 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
4008 for (gphi_iterator gsi = gsi_start_phis (loop->header);
4009 !gsi_end_p (gsi); gsi_next (&gsi))
4010 {
4011 gphi *phi = gsi.phi ();
4012 if (virtual_operand_p (gimple_phi_result (phi)))
4013 continue;
4014 tree arg = gimple_phi_arg_def (phi, j);
4015 if (TREE_CODE (arg) == SSA_NAME)
4016 {
4017 if (live[i] == NULL)
4018 {
4019 live[i] = sbitmap_alloc (num_ssa_names);
4020 bitmap_clear (live[i]);
4021 }
4022 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
4023 }
4024 }
4025 }
4026
4027 for (i = rpo_cnt - 1; i >= 0; --i)
4028 {
4029 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
4030 edge e;
4031 edge_iterator ei;
4032
4033 if (!live[rpo[i]])
4034 {
4035 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4036 bitmap_clear (live[rpo[i]]);
4037 }
4038
4039 /* Process BB and update the live information with uses in
4040 this block. */
4041 find_assert_locations_1 (bb, live[rpo[i]]);
4042
4043 /* Merge liveness into the predecessor blocks and free it. */
4044 if (!bitmap_empty_p (live[rpo[i]]))
4045 {
4046 int pred_rpo = i;
4047 FOR_EACH_EDGE (e, ei, bb->preds)
4048 {
4049 int pred = e->src->index;
4050 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4051 continue;
4052
4053 if (!live[pred])
4054 {
4055 live[pred] = sbitmap_alloc (num_ssa_names);
4056 bitmap_clear (live[pred]);
4057 }
4058 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4059
4060 if (bb_rpo[pred] < pred_rpo)
4061 pred_rpo = bb_rpo[pred];
4062 }
4063
4064 /* Record the RPO number of the last visited block that needs
4065 live information from this block. */
4066 last_rpo[rpo[i]] = pred_rpo;
4067 }
4068 else
4069 {
4070 sbitmap_free (live[rpo[i]]);
4071 live[rpo[i]] = NULL;
4072 }
4073
4074 /* We can free all successors live bitmaps if all their
4075 predecessors have been visited already. */
4076 FOR_EACH_EDGE (e, ei, bb->succs)
4077 if (last_rpo[e->dest->index] == i
4078 && live[e->dest->index])
4079 {
4080 sbitmap_free (live[e->dest->index]);
4081 live[e->dest->index] = NULL;
4082 }
4083 }
4084
4085 XDELETEVEC (rpo);
4086 XDELETEVEC (bb_rpo);
4087 XDELETEVEC (last_rpo);
4088 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4089 if (live[i])
4090 sbitmap_free (live[i]);
4091 XDELETEVEC (live);
4092 }
4093
4094 /* Create an ASSERT_EXPR for NAME and insert it in the location
4095 indicated by LOC. Return true if we made any edge insertions. */
4096
4097 static bool
4098 process_assert_insertions_for (tree name, assert_locus *loc)
4099 {
4100 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4101 gimple *stmt;
4102 tree cond;
4103 gimple *assert_stmt;
4104 edge_iterator ei;
4105 edge e;
4106
4107 /* If we have X <=> X do not insert an assert expr for that. */
4108 if (loc->expr == loc->val)
4109 return false;
4110
4111 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4112 assert_stmt = build_assert_expr_for (cond, name);
4113 if (loc->e)
4114 {
4115 /* We have been asked to insert the assertion on an edge. This
4116 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4117 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4118 || (gimple_code (gsi_stmt (loc->si))
4119 == GIMPLE_SWITCH));
4120
4121 gsi_insert_on_edge (loc->e, assert_stmt);
4122 return true;
4123 }
4124
4125 /* If the stmt iterator points at the end then this is an insertion
4126 at the beginning of a block. */
4127 if (gsi_end_p (loc->si))
4128 {
4129 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4130 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4131 return false;
4132
4133 }
4134 /* Otherwise, we can insert right after LOC->SI iff the
4135 statement must not be the last statement in the block. */
4136 stmt = gsi_stmt (loc->si);
4137 if (!stmt_ends_bb_p (stmt))
4138 {
4139 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4140 return false;
4141 }
4142
4143 /* If STMT must be the last statement in BB, we can only insert new
4144 assertions on the non-abnormal edge out of BB. Note that since
4145 STMT is not control flow, there may only be one non-abnormal/eh edge
4146 out of BB. */
4147 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4148 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4149 {
4150 gsi_insert_on_edge (e, assert_stmt);
4151 return true;
4152 }
4153
4154 gcc_unreachable ();
4155 }
4156
4157 /* Qsort helper for sorting assert locations. If stable is true, don't
4158 use iterative_hash_expr because it can be unstable for -fcompare-debug,
4159 on the other side some pointers might be NULL. */
4160
4161 template <bool stable>
4162 static int
4163 compare_assert_loc (const void *pa, const void *pb)
4164 {
4165 assert_locus * const a = *(assert_locus * const *)pa;
4166 assert_locus * const b = *(assert_locus * const *)pb;
4167
4168 /* If stable, some asserts might be optimized away already, sort
4169 them last. */
4170 if (stable)
4171 {
4172 if (a == NULL)
4173 return b != NULL;
4174 else if (b == NULL)
4175 return -1;
4176 }
4177
4178 if (a->e == NULL && b->e != NULL)
4179 return 1;
4180 else if (a->e != NULL && b->e == NULL)
4181 return -1;
4182
4183 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4184 no need to test both a->e and b->e. */
4185
4186 /* Sort after destination index. */
4187 if (a->e == NULL)
4188 ;
4189 else if (a->e->dest->index > b->e->dest->index)
4190 return 1;
4191 else if (a->e->dest->index < b->e->dest->index)
4192 return -1;
4193
4194 /* Sort after comp_code. */
4195 if (a->comp_code > b->comp_code)
4196 return 1;
4197 else if (a->comp_code < b->comp_code)
4198 return -1;
4199
4200 hashval_t ha, hb;
4201
4202 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4203 uses DECL_UID of the VAR_DECL, so sorting might differ between
4204 -g and -g0. When doing the removal of redundant assert exprs
4205 and commonization to successors, this does not matter, but for
4206 the final sort needs to be stable. */
4207 if (stable)
4208 {
4209 ha = 0;
4210 hb = 0;
4211 }
4212 else
4213 {
4214 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4215 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4216 }
4217
4218 /* Break the tie using hashing and source/bb index. */
4219 if (ha == hb)
4220 return (a->e != NULL
4221 ? a->e->src->index - b->e->src->index
4222 : a->bb->index - b->bb->index);
4223 return ha > hb ? 1 : -1;
4224 }
4225
4226 /* Process all the insertions registered for every name N_i registered
4227 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4228 found in ASSERTS_FOR[i]. */
4229
4230 static void
4231 process_assert_insertions (void)
4232 {
4233 unsigned i;
4234 bitmap_iterator bi;
4235 bool update_edges_p = false;
4236 int num_asserts = 0;
4237
4238 if (dump_file && (dump_flags & TDF_DETAILS))
4239 dump_all_asserts (dump_file);
4240
4241 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4242 {
4243 assert_locus *loc = asserts_for[i];
4244 gcc_assert (loc);
4245
4246 auto_vec<assert_locus *, 16> asserts;
4247 for (; loc; loc = loc->next)
4248 asserts.safe_push (loc);
4249 asserts.qsort (compare_assert_loc<false>);
4250
4251 /* Push down common asserts to successors and remove redundant ones. */
4252 unsigned ecnt = 0;
4253 assert_locus *common = NULL;
4254 unsigned commonj = 0;
4255 for (unsigned j = 0; j < asserts.length (); ++j)
4256 {
4257 loc = asserts[j];
4258 if (! loc->e)
4259 common = NULL;
4260 else if (! common
4261 || loc->e->dest != common->e->dest
4262 || loc->comp_code != common->comp_code
4263 || ! operand_equal_p (loc->val, common->val, 0)
4264 || ! operand_equal_p (loc->expr, common->expr, 0))
4265 {
4266 commonj = j;
4267 common = loc;
4268 ecnt = 1;
4269 }
4270 else if (loc->e == asserts[j-1]->e)
4271 {
4272 /* Remove duplicate asserts. */
4273 if (commonj == j - 1)
4274 {
4275 commonj = j;
4276 common = loc;
4277 }
4278 free (asserts[j-1]);
4279 asserts[j-1] = NULL;
4280 }
4281 else
4282 {
4283 ecnt++;
4284 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4285 {
4286 /* We have the same assertion on all incoming edges of a BB.
4287 Insert it at the beginning of that block. */
4288 loc->bb = loc->e->dest;
4289 loc->e = NULL;
4290 loc->si = gsi_none ();
4291 common = NULL;
4292 /* Clear asserts commoned. */
4293 for (; commonj != j; ++commonj)
4294 if (asserts[commonj])
4295 {
4296 free (asserts[commonj]);
4297 asserts[commonj] = NULL;
4298 }
4299 }
4300 }
4301 }
4302
4303 /* The asserts vector sorting above might be unstable for
4304 -fcompare-debug, sort again to ensure a stable sort. */
4305 asserts.qsort (compare_assert_loc<true>);
4306 for (unsigned j = 0; j < asserts.length (); ++j)
4307 {
4308 loc = asserts[j];
4309 if (! loc)
4310 break;
4311 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4312 num_asserts++;
4313 free (loc);
4314 }
4315 }
4316
4317 if (update_edges_p)
4318 gsi_commit_edge_inserts ();
4319
4320 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4321 num_asserts);
4322 }
4323
4324
4325 /* Traverse the flowgraph looking for conditional jumps to insert range
4326 expressions. These range expressions are meant to provide information
4327 to optimizations that need to reason in terms of value ranges. They
4328 will not be expanded into RTL. For instance, given:
4329
4330 x = ...
4331 y = ...
4332 if (x < y)
4333 y = x - 2;
4334 else
4335 x = y + 3;
4336
4337 this pass will transform the code into:
4338
4339 x = ...
4340 y = ...
4341 if (x < y)
4342 {
4343 x = ASSERT_EXPR <x, x < y>
4344 y = x - 2
4345 }
4346 else
4347 {
4348 y = ASSERT_EXPR <y, x >= y>
4349 x = y + 3
4350 }
4351
4352 The idea is that once copy and constant propagation have run, other
4353 optimizations will be able to determine what ranges of values can 'x'
4354 take in different paths of the code, simply by checking the reaching
4355 definition of 'x'. */
4356
4357 static void
4358 insert_range_assertions (void)
4359 {
4360 need_assert_for = BITMAP_ALLOC (NULL);
4361 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4362
4363 calculate_dominance_info (CDI_DOMINATORS);
4364
4365 find_assert_locations ();
4366 if (!bitmap_empty_p (need_assert_for))
4367 {
4368 process_assert_insertions ();
4369 update_ssa (TODO_update_ssa_no_phi);
4370 }
4371
4372 if (dump_file && (dump_flags & TDF_DETAILS))
4373 {
4374 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4375 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4376 }
4377
4378 free (asserts_for);
4379 BITMAP_FREE (need_assert_for);
4380 }
4381
4382 class vrp_prop : public ssa_propagation_engine
4383 {
4384 public:
4385 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4386 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4387
4388 void vrp_initialize (void);
4389 void vrp_finalize (bool);
4390 void check_all_array_refs (void);
4391 void check_array_ref (location_t, tree, bool);
4392 void check_mem_ref (location_t, tree, bool);
4393 void search_for_addr_array (tree, location_t);
4394
4395 class vr_values vr_values;
4396 /* Temporary delegator to minimize code churn. */
4397 const value_range *get_value_range (const_tree op)
4398 { return vr_values.get_value_range (op); }
4399 void set_def_to_varying (const_tree def)
4400 { vr_values.set_def_to_varying (def); }
4401 void set_defs_to_varying (gimple *stmt)
4402 { vr_values.set_defs_to_varying (stmt); }
4403 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4404 tree *output_p, value_range *vr)
4405 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4406 bool update_value_range (const_tree op, value_range *vr)
4407 { return vr_values.update_value_range (op, vr); }
4408 void extract_range_basic (value_range *vr, gimple *stmt)
4409 { vr_values.extract_range_basic (vr, stmt); }
4410 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4411 { vr_values.extract_range_from_phi_node (phi, vr); }
4412 };
4413 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4414 and "struct" hacks. If VRP can determine that the
4415 array subscript is a constant, check if it is outside valid
4416 range. If the array subscript is a RANGE, warn if it is
4417 non-overlapping with valid range.
4418 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4419
4420 void
4421 vrp_prop::check_array_ref (location_t location, tree ref,
4422 bool ignore_off_by_one)
4423 {
4424 const value_range *vr = NULL;
4425 tree low_sub, up_sub;
4426 tree low_bound, up_bound, up_bound_p1;
4427
4428 if (TREE_NO_WARNING (ref))
4429 return;
4430
4431 low_sub = up_sub = TREE_OPERAND (ref, 1);
4432 up_bound = array_ref_up_bound (ref);
4433
4434 if (!up_bound
4435 || TREE_CODE (up_bound) != INTEGER_CST
4436 || (warn_array_bounds < 2
4437 && array_at_struct_end_p (ref)))
4438 {
4439 /* Accesses to trailing arrays via pointers may access storage
4440 beyond the types array bounds. For such arrays, or for flexible
4441 array members, as well as for other arrays of an unknown size,
4442 replace the upper bound with a more permissive one that assumes
4443 the size of the largest object is PTRDIFF_MAX. */
4444 tree eltsize = array_ref_element_size (ref);
4445
4446 if (TREE_CODE (eltsize) != INTEGER_CST
4447 || integer_zerop (eltsize))
4448 {
4449 up_bound = NULL_TREE;
4450 up_bound_p1 = NULL_TREE;
4451 }
4452 else
4453 {
4454 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4455 tree arg = TREE_OPERAND (ref, 0);
4456 poly_int64 off;
4457
4458 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4459 maxbound = wide_int_to_tree (sizetype,
4460 wi::sub (wi::to_wide (maxbound),
4461 off));
4462 else
4463 maxbound = fold_convert (sizetype, maxbound);
4464
4465 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4466
4467 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4468 build_int_cst (ptrdiff_type_node, 1));
4469 }
4470 }
4471 else
4472 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4473 build_int_cst (TREE_TYPE (up_bound), 1));
4474
4475 low_bound = array_ref_low_bound (ref);
4476
4477 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4478
4479 bool warned = false;
4480
4481 /* Empty array. */
4482 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4483 warned = warning_at (location, OPT_Warray_bounds,
4484 "array subscript %E is above array bounds of %qT",
4485 low_bound, artype);
4486
4487 if (TREE_CODE (low_sub) == SSA_NAME)
4488 {
4489 vr = get_value_range (low_sub);
4490 if (!vr->undefined_p () && !vr->varying_p ())
4491 {
4492 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4493 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4494 }
4495 }
4496
4497 if (vr && vr->kind () == VR_ANTI_RANGE)
4498 {
4499 if (up_bound
4500 && TREE_CODE (up_sub) == INTEGER_CST
4501 && (ignore_off_by_one
4502 ? tree_int_cst_lt (up_bound, up_sub)
4503 : tree_int_cst_le (up_bound, up_sub))
4504 && TREE_CODE (low_sub) == INTEGER_CST
4505 && tree_int_cst_le (low_sub, low_bound))
4506 warned = warning_at (location, OPT_Warray_bounds,
4507 "array subscript [%E, %E] is outside "
4508 "array bounds of %qT",
4509 low_sub, up_sub, artype);
4510 }
4511 else if (up_bound
4512 && TREE_CODE (up_sub) == INTEGER_CST
4513 && (ignore_off_by_one
4514 ? !tree_int_cst_le (up_sub, up_bound_p1)
4515 : !tree_int_cst_le (up_sub, up_bound)))
4516 {
4517 if (dump_file && (dump_flags & TDF_DETAILS))
4518 {
4519 fprintf (dump_file, "Array bound warning for ");
4520 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4521 fprintf (dump_file, "\n");
4522 }
4523 warned = warning_at (location, OPT_Warray_bounds,
4524 "array subscript %E is above array bounds of %qT",
4525 up_sub, artype);
4526 }
4527 else if (TREE_CODE (low_sub) == INTEGER_CST
4528 && tree_int_cst_lt (low_sub, low_bound))
4529 {
4530 if (dump_file && (dump_flags & TDF_DETAILS))
4531 {
4532 fprintf (dump_file, "Array bound warning for ");
4533 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4534 fprintf (dump_file, "\n");
4535 }
4536 warned = warning_at (location, OPT_Warray_bounds,
4537 "array subscript %E is below array bounds of %qT",
4538 low_sub, artype);
4539 }
4540
4541 if (warned)
4542 {
4543 ref = TREE_OPERAND (ref, 0);
4544
4545 if (DECL_P (ref))
4546 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4547
4548 TREE_NO_WARNING (ref) = 1;
4549 }
4550 }
4551
4552 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4553 references to string constants. If VRP can determine that the array
4554 subscript is a constant, check if it is outside valid range.
4555 If the array subscript is a RANGE, warn if it is non-overlapping
4556 with valid range.
4557 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4558 (used to allow one-past-the-end indices for code that takes
4559 the address of the just-past-the-end element of an array). */
4560
4561 void
4562 vrp_prop::check_mem_ref (location_t location, tree ref,
4563 bool ignore_off_by_one)
4564 {
4565 if (TREE_NO_WARNING (ref))
4566 return;
4567
4568 tree arg = TREE_OPERAND (ref, 0);
4569 /* The constant and variable offset of the reference. */
4570 tree cstoff = TREE_OPERAND (ref, 1);
4571 tree varoff = NULL_TREE;
4572
4573 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4574
4575 /* The array or string constant bounds in bytes. Initially set
4576 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4577 determined. */
4578 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4579
4580 /* The minimum and maximum intermediate offset. For a reference
4581 to be valid, not only does the final offset/subscript must be
4582 in bounds but all intermediate offsets should be as well.
4583 GCC may be able to deal gracefully with such out-of-bounds
4584 offsets so the checking is only enbaled at -Warray-bounds=2
4585 where it may help detect bugs in uses of the intermediate
4586 offsets that could otherwise not be detectable. */
4587 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4588 offset_int extrema[2] = { 0, wi::abs (ioff) };
4589
4590 /* The range of the byte offset into the reference. */
4591 offset_int offrange[2] = { 0, 0 };
4592
4593 const value_range *vr = NULL;
4594
4595 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4596 The loop computes the range of the final offset for expressions such
4597 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4598 some range. */
4599 const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
4600 for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
4601 {
4602 gimple *def = SSA_NAME_DEF_STMT (arg);
4603 if (!is_gimple_assign (def))
4604 break;
4605
4606 tree_code code = gimple_assign_rhs_code (def);
4607 if (code == POINTER_PLUS_EXPR)
4608 {
4609 arg = gimple_assign_rhs1 (def);
4610 varoff = gimple_assign_rhs2 (def);
4611 }
4612 else if (code == ASSERT_EXPR)
4613 {
4614 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4615 continue;
4616 }
4617 else
4618 return;
4619
4620 /* VAROFF should always be a SSA_NAME here (and not even
4621 INTEGER_CST) but there's no point in taking chances. */
4622 if (TREE_CODE (varoff) != SSA_NAME)
4623 break;
4624
4625 vr = get_value_range (varoff);
4626 if (!vr || vr->undefined_p () || vr->varying_p ())
4627 break;
4628
4629 if (!vr->constant_p ())
4630 break;
4631
4632 if (vr->kind () == VR_RANGE)
4633 {
4634 offset_int min
4635 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4636 offset_int max
4637 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4638 if (min < max)
4639 {
4640 offrange[0] += min;
4641 offrange[1] += max;
4642 }
4643 else
4644 {
4645 /* When MIN >= MAX, the offset is effectively in a union
4646 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4647 Since there is no way to represent such a range across
4648 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4649 to OFFRANGE. */
4650 offrange[0] += arrbounds[0];
4651 offrange[1] += arrbounds[1];
4652 }
4653 }
4654 else
4655 {
4656 /* For an anti-range, analogously to the above, conservatively
4657 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4658 offrange[0] += arrbounds[0];
4659 offrange[1] += arrbounds[1];
4660 }
4661
4662 /* Keep track of the minimum and maximum offset. */
4663 if (offrange[1] < 0 && offrange[1] < extrema[0])
4664 extrema[0] = offrange[1];
4665 if (offrange[0] > 0 && offrange[0] > extrema[1])
4666 extrema[1] = offrange[0];
4667
4668 if (offrange[0] < arrbounds[0])
4669 offrange[0] = arrbounds[0];
4670
4671 if (offrange[1] > arrbounds[1])
4672 offrange[1] = arrbounds[1];
4673 }
4674
4675 if (TREE_CODE (arg) == ADDR_EXPR)
4676 {
4677 arg = TREE_OPERAND (arg, 0);
4678 if (TREE_CODE (arg) != STRING_CST
4679 && TREE_CODE (arg) != VAR_DECL)
4680 return;
4681 }
4682 else
4683 return;
4684
4685 /* The type of the object being referred to. It can be an array,
4686 string literal, or a non-array type when the MEM_REF represents
4687 a reference/subscript via a pointer to an object that is not
4688 an element of an array. References to members of structs and
4689 unions are excluded because MEM_REF doesn't make it possible
4690 to identify the member where the reference originated.
4691 Incomplete types are excluded as well because their size is
4692 not known. */
4693 tree reftype = TREE_TYPE (arg);
4694 if (POINTER_TYPE_P (reftype)
4695 || !COMPLETE_TYPE_P (reftype)
4696 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4697 || RECORD_OR_UNION_TYPE_P (reftype))
4698 return;
4699
4700 offset_int eltsize;
4701 if (TREE_CODE (reftype) == ARRAY_TYPE)
4702 {
4703 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4704
4705 if (tree dom = TYPE_DOMAIN (reftype))
4706 {
4707 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4708 if (array_at_struct_end_p (arg)
4709 || !bnds[0] || !bnds[1])
4710 {
4711 arrbounds[0] = 0;
4712 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4713 }
4714 else
4715 {
4716 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4717 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4718 }
4719 }
4720 else
4721 {
4722 arrbounds[0] = 0;
4723 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4724 }
4725
4726 if (TREE_CODE (ref) == MEM_REF)
4727 {
4728 /* For MEM_REF determine a tighter bound of the non-array
4729 element type. */
4730 tree eltype = TREE_TYPE (reftype);
4731 while (TREE_CODE (eltype) == ARRAY_TYPE)
4732 eltype = TREE_TYPE (eltype);
4733 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4734 }
4735 }
4736 else
4737 {
4738 eltsize = 1;
4739 arrbounds[0] = 0;
4740 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4741 }
4742
4743 offrange[0] += ioff;
4744 offrange[1] += ioff;
4745
4746 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4747 is set (when taking the address of the one-past-last element
4748 of an array) but always use the stricter bound in diagnostics. */
4749 offset_int ubound = arrbounds[1];
4750 if (ignore_off_by_one)
4751 ubound += 1;
4752
4753 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4754 {
4755 /* Treat a reference to a non-array object as one to an array
4756 of a single element. */
4757 if (TREE_CODE (reftype) != ARRAY_TYPE)
4758 reftype = build_array_type_nelts (reftype, 1);
4759
4760 if (TREE_CODE (ref) == MEM_REF)
4761 {
4762 /* Extract the element type out of MEM_REF and use its size
4763 to compute the index to print in the diagnostic; arrays
4764 in MEM_REF don't mean anything. A type with no size like
4765 void is as good as having a size of 1. */
4766 tree type = TREE_TYPE (ref);
4767 while (TREE_CODE (type) == ARRAY_TYPE)
4768 type = TREE_TYPE (type);
4769 if (tree size = TYPE_SIZE_UNIT (type))
4770 {
4771 offrange[0] = offrange[0] / wi::to_offset (size);
4772 offrange[1] = offrange[1] / wi::to_offset (size);
4773 }
4774 }
4775 else
4776 {
4777 /* For anything other than MEM_REF, compute the index to
4778 print in the diagnostic as the offset over element size. */
4779 offrange[0] = offrange[0] / eltsize;
4780 offrange[1] = offrange[1] / eltsize;
4781 }
4782
4783 bool warned;
4784 if (offrange[0] == offrange[1])
4785 warned = warning_at (location, OPT_Warray_bounds,
4786 "array subscript %wi is outside array bounds "
4787 "of %qT",
4788 offrange[0].to_shwi (), reftype);
4789 else
4790 warned = warning_at (location, OPT_Warray_bounds,
4791 "array subscript [%wi, %wi] is outside "
4792 "array bounds of %qT",
4793 offrange[0].to_shwi (),
4794 offrange[1].to_shwi (), reftype);
4795 if (warned && DECL_P (arg))
4796 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4797
4798 if (warned)
4799 TREE_NO_WARNING (ref) = 1;
4800 return;
4801 }
4802
4803 if (warn_array_bounds < 2)
4804 return;
4805
4806 /* At level 2 check also intermediate offsets. */
4807 int i = 0;
4808 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4809 {
4810 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4811
4812 if (warning_at (location, OPT_Warray_bounds,
4813 "intermediate array offset %wi is outside array bounds "
4814 "of %qT", tmpidx, reftype))
4815 TREE_NO_WARNING (ref) = 1;
4816 }
4817 }
4818
4819 /* Searches if the expr T, located at LOCATION computes
4820 address of an ARRAY_REF, and call check_array_ref on it. */
4821
4822 void
4823 vrp_prop::search_for_addr_array (tree t, location_t location)
4824 {
4825 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4826 do
4827 {
4828 if (TREE_CODE (t) == ARRAY_REF)
4829 check_array_ref (location, t, true /*ignore_off_by_one*/);
4830 else if (TREE_CODE (t) == MEM_REF)
4831 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4832
4833 t = TREE_OPERAND (t, 0);
4834 }
4835 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4836
4837 if (TREE_CODE (t) != MEM_REF
4838 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4839 || TREE_NO_WARNING (t))
4840 return;
4841
4842 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4843 tree low_bound, up_bound, el_sz;
4844 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4845 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4846 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4847 return;
4848
4849 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4850 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4851 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4852 if (!low_bound
4853 || TREE_CODE (low_bound) != INTEGER_CST
4854 || !up_bound
4855 || TREE_CODE (up_bound) != INTEGER_CST
4856 || !el_sz
4857 || TREE_CODE (el_sz) != INTEGER_CST)
4858 return;
4859
4860 offset_int idx;
4861 if (!mem_ref_offset (t).is_constant (&idx))
4862 return;
4863
4864 bool warned = false;
4865 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4866 if (idx < 0)
4867 {
4868 if (dump_file && (dump_flags & TDF_DETAILS))
4869 {
4870 fprintf (dump_file, "Array bound warning for ");
4871 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4872 fprintf (dump_file, "\n");
4873 }
4874 warned = warning_at (location, OPT_Warray_bounds,
4875 "array subscript %wi is below "
4876 "array bounds of %qT",
4877 idx.to_shwi (), TREE_TYPE (tem));
4878 }
4879 else if (idx > (wi::to_offset (up_bound)
4880 - wi::to_offset (low_bound) + 1))
4881 {
4882 if (dump_file && (dump_flags & TDF_DETAILS))
4883 {
4884 fprintf (dump_file, "Array bound warning for ");
4885 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4886 fprintf (dump_file, "\n");
4887 }
4888 warned = warning_at (location, OPT_Warray_bounds,
4889 "array subscript %wu is above "
4890 "array bounds of %qT",
4891 idx.to_uhwi (), TREE_TYPE (tem));
4892 }
4893
4894 if (warned)
4895 {
4896 if (DECL_P (t))
4897 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4898
4899 TREE_NO_WARNING (t) = 1;
4900 }
4901 }
4902
4903 /* walk_tree() callback that checks if *TP is
4904 an ARRAY_REF inside an ADDR_EXPR (in which an array
4905 subscript one outside the valid range is allowed). Call
4906 check_array_ref for each ARRAY_REF found. The location is
4907 passed in DATA. */
4908
4909 static tree
4910 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4911 {
4912 tree t = *tp;
4913 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4914 location_t location;
4915
4916 if (EXPR_HAS_LOCATION (t))
4917 location = EXPR_LOCATION (t);
4918 else
4919 location = gimple_location (wi->stmt);
4920
4921 *walk_subtree = TRUE;
4922
4923 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4924 if (TREE_CODE (t) == ARRAY_REF)
4925 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4926 else if (TREE_CODE (t) == MEM_REF)
4927 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4928 else if (TREE_CODE (t) == ADDR_EXPR)
4929 {
4930 vrp_prop->search_for_addr_array (t, location);
4931 *walk_subtree = FALSE;
4932 }
4933
4934 return NULL_TREE;
4935 }
4936
4937 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4938 to walk over all statements of all reachable BBs and call
4939 check_array_bounds on them. */
4940
4941 class check_array_bounds_dom_walker : public dom_walker
4942 {
4943 public:
4944 check_array_bounds_dom_walker (vrp_prop *prop)
4945 : dom_walker (CDI_DOMINATORS,
4946 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4947 flags, so that we can merge in information on
4948 non-executable edges from vrp_folder . */
4949 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4950 m_prop (prop) {}
4951 ~check_array_bounds_dom_walker () {}
4952
4953 edge before_dom_children (basic_block) FINAL OVERRIDE;
4954
4955 private:
4956 vrp_prop *m_prop;
4957 };
4958
4959 /* Implementation of dom_walker::before_dom_children.
4960
4961 Walk over all statements of BB and call check_array_bounds on them,
4962 and determine if there's a unique successor edge. */
4963
4964 edge
4965 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4966 {
4967 gimple_stmt_iterator si;
4968 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4969 {
4970 gimple *stmt = gsi_stmt (si);
4971 struct walk_stmt_info wi;
4972 if (!gimple_has_location (stmt)
4973 || is_gimple_debug (stmt))
4974 continue;
4975
4976 memset (&wi, 0, sizeof (wi));
4977
4978 wi.info = m_prop;
4979
4980 walk_gimple_op (stmt, check_array_bounds, &wi);
4981 }
4982
4983 /* Determine if there's a unique successor edge, and if so, return
4984 that back to dom_walker, ensuring that we don't visit blocks that
4985 became unreachable during the VRP propagation
4986 (PR tree-optimization/83312). */
4987 return find_taken_edge (bb, NULL_TREE);
4988 }
4989
4990 /* Walk over all statements of all reachable BBs and call check_array_bounds
4991 on them. */
4992
4993 void
4994 vrp_prop::check_all_array_refs ()
4995 {
4996 check_array_bounds_dom_walker w (this);
4997 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4998 }
4999
5000 /* Return true if all imm uses of VAR are either in STMT, or
5001 feed (optionally through a chain of single imm uses) GIMPLE_COND
5002 in basic block COND_BB. */
5003
5004 static bool
5005 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
5006 {
5007 use_operand_p use_p, use2_p;
5008 imm_use_iterator iter;
5009
5010 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
5011 if (USE_STMT (use_p) != stmt)
5012 {
5013 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
5014 if (is_gimple_debug (use_stmt))
5015 continue;
5016 while (is_gimple_assign (use_stmt)
5017 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
5018 && single_imm_use (gimple_assign_lhs (use_stmt),
5019 &use2_p, &use_stmt2))
5020 use_stmt = use_stmt2;
5021 if (gimple_code (use_stmt) != GIMPLE_COND
5022 || gimple_bb (use_stmt) != cond_bb)
5023 return false;
5024 }
5025 return true;
5026 }
5027
5028 /* Handle
5029 _4 = x_3 & 31;
5030 if (_4 != 0)
5031 goto <bb 6>;
5032 else
5033 goto <bb 7>;
5034 <bb 6>:
5035 __builtin_unreachable ();
5036 <bb 7>:
5037 x_5 = ASSERT_EXPR <x_3, ...>;
5038 If x_3 has no other immediate uses (checked by caller),
5039 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
5040 from the non-zero bitmask. */
5041
5042 void
5043 maybe_set_nonzero_bits (edge e, tree var)
5044 {
5045 basic_block cond_bb = e->src;
5046 gimple *stmt = last_stmt (cond_bb);
5047 tree cst;
5048
5049 if (stmt == NULL
5050 || gimple_code (stmt) != GIMPLE_COND
5051 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5052 ? EQ_EXPR : NE_EXPR)
5053 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5054 || !integer_zerop (gimple_cond_rhs (stmt)))
5055 return;
5056
5057 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5058 if (!is_gimple_assign (stmt)
5059 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5060 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5061 return;
5062 if (gimple_assign_rhs1 (stmt) != var)
5063 {
5064 gimple *stmt2;
5065
5066 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5067 return;
5068 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5069 if (!gimple_assign_cast_p (stmt2)
5070 || gimple_assign_rhs1 (stmt2) != var
5071 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5072 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5073 != TYPE_PRECISION (TREE_TYPE (var))))
5074 return;
5075 }
5076 cst = gimple_assign_rhs2 (stmt);
5077 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5078 wi::to_wide (cst)));
5079 }
5080
5081 /* Convert range assertion expressions into the implied copies and
5082 copy propagate away the copies. Doing the trivial copy propagation
5083 here avoids the need to run the full copy propagation pass after
5084 VRP.
5085
5086 FIXME, this will eventually lead to copy propagation removing the
5087 names that had useful range information attached to them. For
5088 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5089 then N_i will have the range [3, +INF].
5090
5091 However, by converting the assertion into the implied copy
5092 operation N_i = N_j, we will then copy-propagate N_j into the uses
5093 of N_i and lose the range information. We may want to hold on to
5094 ASSERT_EXPRs a little while longer as the ranges could be used in
5095 things like jump threading.
5096
5097 The problem with keeping ASSERT_EXPRs around is that passes after
5098 VRP need to handle them appropriately.
5099
5100 Another approach would be to make the range information a first
5101 class property of the SSA_NAME so that it can be queried from
5102 any pass. This is made somewhat more complex by the need for
5103 multiple ranges to be associated with one SSA_NAME. */
5104
5105 static void
5106 remove_range_assertions (void)
5107 {
5108 basic_block bb;
5109 gimple_stmt_iterator si;
5110 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5111 a basic block preceeded by GIMPLE_COND branching to it and
5112 __builtin_trap, -1 if not yet checked, 0 otherwise. */
5113 int is_unreachable;
5114
5115 /* Note that the BSI iterator bump happens at the bottom of the
5116 loop and no bump is necessary if we're removing the statement
5117 referenced by the current BSI. */
5118 FOR_EACH_BB_FN (bb, cfun)
5119 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5120 {
5121 gimple *stmt = gsi_stmt (si);
5122
5123 if (is_gimple_assign (stmt)
5124 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5125 {
5126 tree lhs = gimple_assign_lhs (stmt);
5127 tree rhs = gimple_assign_rhs1 (stmt);
5128 tree var;
5129
5130 var = ASSERT_EXPR_VAR (rhs);
5131
5132 if (TREE_CODE (var) == SSA_NAME
5133 && !POINTER_TYPE_P (TREE_TYPE (lhs))
5134 && SSA_NAME_RANGE_INFO (lhs))
5135 {
5136 if (is_unreachable == -1)
5137 {
5138 is_unreachable = 0;
5139 if (single_pred_p (bb)
5140 && assert_unreachable_fallthru_edge_p
5141 (single_pred_edge (bb)))
5142 is_unreachable = 1;
5143 }
5144 /* Handle
5145 if (x_7 >= 10 && x_7 < 20)
5146 __builtin_unreachable ();
5147 x_8 = ASSERT_EXPR <x_7, ...>;
5148 if the only uses of x_7 are in the ASSERT_EXPR and
5149 in the condition. In that case, we can copy the
5150 range info from x_8 computed in this pass also
5151 for x_7. */
5152 if (is_unreachable
5153 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5154 single_pred (bb)))
5155 {
5156 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5157 SSA_NAME_RANGE_INFO (lhs)->get_min (),
5158 SSA_NAME_RANGE_INFO (lhs)->get_max ());
5159 maybe_set_nonzero_bits (single_pred_edge (bb), var);
5160 }
5161 }
5162
5163 /* Propagate the RHS into every use of the LHS. For SSA names
5164 also propagate abnormals as it merely restores the original
5165 IL in this case (an replace_uses_by would assert). */
5166 if (TREE_CODE (var) == SSA_NAME)
5167 {
5168 imm_use_iterator iter;
5169 use_operand_p use_p;
5170 gimple *use_stmt;
5171 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5172 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5173 SET_USE (use_p, var);
5174 }
5175 else
5176 replace_uses_by (lhs, var);
5177
5178 /* And finally, remove the copy, it is not needed. */
5179 gsi_remove (&si, true);
5180 release_defs (stmt);
5181 }
5182 else
5183 {
5184 if (!is_gimple_debug (gsi_stmt (si)))
5185 is_unreachable = 0;
5186 gsi_next (&si);
5187 }
5188 }
5189 }
5190
5191 /* Return true if STMT is interesting for VRP. */
5192
5193 bool
5194 stmt_interesting_for_vrp (gimple *stmt)
5195 {
5196 if (gimple_code (stmt) == GIMPLE_PHI)
5197 {
5198 tree res = gimple_phi_result (stmt);
5199 return (!virtual_operand_p (res)
5200 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5201 || POINTER_TYPE_P (TREE_TYPE (res))));
5202 }
5203 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5204 {
5205 tree lhs = gimple_get_lhs (stmt);
5206
5207 /* In general, assignments with virtual operands are not useful
5208 for deriving ranges, with the obvious exception of calls to
5209 builtin functions. */
5210 if (lhs && TREE_CODE (lhs) == SSA_NAME
5211 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5212 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5213 && (is_gimple_call (stmt)
5214 || !gimple_vuse (stmt)))
5215 return true;
5216 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5217 switch (gimple_call_internal_fn (stmt))
5218 {
5219 case IFN_ADD_OVERFLOW:
5220 case IFN_SUB_OVERFLOW:
5221 case IFN_MUL_OVERFLOW:
5222 case IFN_ATOMIC_COMPARE_EXCHANGE:
5223 /* These internal calls return _Complex integer type,
5224 but are interesting to VRP nevertheless. */
5225 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5226 return true;
5227 break;
5228 default:
5229 break;
5230 }
5231 }
5232 else if (gimple_code (stmt) == GIMPLE_COND
5233 || gimple_code (stmt) == GIMPLE_SWITCH)
5234 return true;
5235
5236 return false;
5237 }
5238
5239 /* Initialization required by ssa_propagate engine. */
5240
5241 void
5242 vrp_prop::vrp_initialize ()
5243 {
5244 basic_block bb;
5245
5246 FOR_EACH_BB_FN (bb, cfun)
5247 {
5248 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5249 gsi_next (&si))
5250 {
5251 gphi *phi = si.phi ();
5252 if (!stmt_interesting_for_vrp (phi))
5253 {
5254 tree lhs = PHI_RESULT (phi);
5255 set_def_to_varying (lhs);
5256 prop_set_simulate_again (phi, false);
5257 }
5258 else
5259 prop_set_simulate_again (phi, true);
5260 }
5261
5262 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5263 gsi_next (&si))
5264 {
5265 gimple *stmt = gsi_stmt (si);
5266
5267 /* If the statement is a control insn, then we do not
5268 want to avoid simulating the statement once. Failure
5269 to do so means that those edges will never get added. */
5270 if (stmt_ends_bb_p (stmt))
5271 prop_set_simulate_again (stmt, true);
5272 else if (!stmt_interesting_for_vrp (stmt))
5273 {
5274 set_defs_to_varying (stmt);
5275 prop_set_simulate_again (stmt, false);
5276 }
5277 else
5278 prop_set_simulate_again (stmt, true);
5279 }
5280 }
5281 }
5282
5283 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5284 that includes the value VAL. The search is restricted to the range
5285 [START_IDX, n - 1] where n is the size of VEC.
5286
5287 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5288 returned.
5289
5290 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5291 it is placed in IDX and false is returned.
5292
5293 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5294 returned. */
5295
5296 bool
5297 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5298 {
5299 size_t n = gimple_switch_num_labels (stmt);
5300 size_t low, high;
5301
5302 /* Find case label for minimum of the value range or the next one.
5303 At each iteration we are searching in [low, high - 1]. */
5304
5305 for (low = start_idx, high = n; high != low; )
5306 {
5307 tree t;
5308 int cmp;
5309 /* Note that i != high, so we never ask for n. */
5310 size_t i = (high + low) / 2;
5311 t = gimple_switch_label (stmt, i);
5312
5313 /* Cache the result of comparing CASE_LOW and val. */
5314 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5315
5316 if (cmp == 0)
5317 {
5318 /* Ranges cannot be empty. */
5319 *idx = i;
5320 return true;
5321 }
5322 else if (cmp > 0)
5323 high = i;
5324 else
5325 {
5326 low = i + 1;
5327 if (CASE_HIGH (t) != NULL
5328 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5329 {
5330 *idx = i;
5331 return true;
5332 }
5333 }
5334 }
5335
5336 *idx = high;
5337 return false;
5338 }
5339
5340 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5341 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5342 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5343 then MAX_IDX < MIN_IDX.
5344 Returns true if the default label is not needed. */
5345
5346 bool
5347 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5348 size_t *max_idx)
5349 {
5350 size_t i, j;
5351 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5352 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5353
5354 if (i == j
5355 && min_take_default
5356 && max_take_default)
5357 {
5358 /* Only the default case label reached.
5359 Return an empty range. */
5360 *min_idx = 1;
5361 *max_idx = 0;
5362 return false;
5363 }
5364 else
5365 {
5366 bool take_default = min_take_default || max_take_default;
5367 tree low, high;
5368 size_t k;
5369
5370 if (max_take_default)
5371 j--;
5372
5373 /* If the case label range is continuous, we do not need
5374 the default case label. Verify that. */
5375 high = CASE_LOW (gimple_switch_label (stmt, i));
5376 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5377 high = CASE_HIGH (gimple_switch_label (stmt, i));
5378 for (k = i + 1; k <= j; ++k)
5379 {
5380 low = CASE_LOW (gimple_switch_label (stmt, k));
5381 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5382 {
5383 take_default = true;
5384 break;
5385 }
5386 high = low;
5387 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5388 high = CASE_HIGH (gimple_switch_label (stmt, k));
5389 }
5390
5391 *min_idx = i;
5392 *max_idx = j;
5393 return !take_default;
5394 }
5395 }
5396
5397 /* Evaluate statement STMT. If the statement produces a useful range,
5398 return SSA_PROP_INTERESTING and record the SSA name with the
5399 interesting range into *OUTPUT_P.
5400
5401 If STMT is a conditional branch and we can determine its truth
5402 value, the taken edge is recorded in *TAKEN_EDGE_P.
5403
5404 If STMT produces a varying value, return SSA_PROP_VARYING. */
5405
5406 enum ssa_prop_result
5407 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5408 {
5409 tree lhs = gimple_get_lhs (stmt);
5410 value_range vr;
5411 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5412
5413 if (*output_p)
5414 {
5415 if (update_value_range (*output_p, &vr))
5416 {
5417 if (dump_file && (dump_flags & TDF_DETAILS))
5418 {
5419 fprintf (dump_file, "Found new range for ");
5420 print_generic_expr (dump_file, *output_p);
5421 fprintf (dump_file, ": ");
5422 dump_value_range (dump_file, &vr);
5423 fprintf (dump_file, "\n");
5424 }
5425
5426 if (vr.varying_p ())
5427 return SSA_PROP_VARYING;
5428
5429 return SSA_PROP_INTERESTING;
5430 }
5431 return SSA_PROP_NOT_INTERESTING;
5432 }
5433
5434 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5435 switch (gimple_call_internal_fn (stmt))
5436 {
5437 case IFN_ADD_OVERFLOW:
5438 case IFN_SUB_OVERFLOW:
5439 case IFN_MUL_OVERFLOW:
5440 case IFN_ATOMIC_COMPARE_EXCHANGE:
5441 /* These internal calls return _Complex integer type,
5442 which VRP does not track, but the immediate uses
5443 thereof might be interesting. */
5444 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5445 {
5446 imm_use_iterator iter;
5447 use_operand_p use_p;
5448 enum ssa_prop_result res = SSA_PROP_VARYING;
5449
5450 set_def_to_varying (lhs);
5451
5452 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5453 {
5454 gimple *use_stmt = USE_STMT (use_p);
5455 if (!is_gimple_assign (use_stmt))
5456 continue;
5457 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5458 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5459 continue;
5460 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5461 tree use_lhs = gimple_assign_lhs (use_stmt);
5462 if (TREE_CODE (rhs1) != rhs_code
5463 || TREE_OPERAND (rhs1, 0) != lhs
5464 || TREE_CODE (use_lhs) != SSA_NAME
5465 || !stmt_interesting_for_vrp (use_stmt)
5466 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5467 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5468 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5469 continue;
5470
5471 /* If there is a change in the value range for any of the
5472 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5473 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5474 or IMAGPART_EXPR immediate uses, but none of them have
5475 a change in their value ranges, return
5476 SSA_PROP_NOT_INTERESTING. If there are no
5477 {REAL,IMAG}PART_EXPR uses at all,
5478 return SSA_PROP_VARYING. */
5479 value_range new_vr;
5480 extract_range_basic (&new_vr, use_stmt);
5481 const value_range *old_vr = get_value_range (use_lhs);
5482 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5483 res = SSA_PROP_INTERESTING;
5484 else
5485 res = SSA_PROP_NOT_INTERESTING;
5486 new_vr.equiv_clear ();
5487 if (res == SSA_PROP_INTERESTING)
5488 {
5489 *output_p = lhs;
5490 return res;
5491 }
5492 }
5493
5494 return res;
5495 }
5496 break;
5497 default:
5498 break;
5499 }
5500
5501 /* All other statements produce nothing of interest for VRP, so mark
5502 their outputs varying and prevent further simulation. */
5503 set_defs_to_varying (stmt);
5504
5505 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5506 }
5507
5508 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5509 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5510 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5511 possible such range. The resulting range is not canonicalized. */
5512
5513 static void
5514 union_ranges (enum value_range_kind *vr0type,
5515 tree *vr0min, tree *vr0max,
5516 enum value_range_kind vr1type,
5517 tree vr1min, tree vr1max)
5518 {
5519 int cmpmin = compare_values (*vr0min, vr1min);
5520 int cmpmax = compare_values (*vr0max, vr1max);
5521 bool mineq = cmpmin == 0;
5522 bool maxeq = cmpmax == 0;
5523
5524 /* [] is vr0, () is vr1 in the following classification comments. */
5525 if (mineq && maxeq)
5526 {
5527 /* [( )] */
5528 if (*vr0type == vr1type)
5529 /* Nothing to do for equal ranges. */
5530 ;
5531 else if ((*vr0type == VR_RANGE
5532 && vr1type == VR_ANTI_RANGE)
5533 || (*vr0type == VR_ANTI_RANGE
5534 && vr1type == VR_RANGE))
5535 {
5536 /* For anti-range with range union the result is varying. */
5537 goto give_up;
5538 }
5539 else
5540 gcc_unreachable ();
5541 }
5542 else if (operand_less_p (*vr0max, vr1min) == 1
5543 || operand_less_p (vr1max, *vr0min) == 1)
5544 {
5545 /* [ ] ( ) or ( ) [ ]
5546 If the ranges have an empty intersection, result of the union
5547 operation is the anti-range or if both are anti-ranges
5548 it covers all. */
5549 if (*vr0type == VR_ANTI_RANGE
5550 && vr1type == VR_ANTI_RANGE)
5551 goto give_up;
5552 else if (*vr0type == VR_ANTI_RANGE
5553 && vr1type == VR_RANGE)
5554 ;
5555 else if (*vr0type == VR_RANGE
5556 && vr1type == VR_ANTI_RANGE)
5557 {
5558 *vr0type = vr1type;
5559 *vr0min = vr1min;
5560 *vr0max = vr1max;
5561 }
5562 else if (*vr0type == VR_RANGE
5563 && vr1type == VR_RANGE)
5564 {
5565 /* The result is the convex hull of both ranges. */
5566 if (operand_less_p (*vr0max, vr1min) == 1)
5567 {
5568 /* If the result can be an anti-range, create one. */
5569 if (TREE_CODE (*vr0max) == INTEGER_CST
5570 && TREE_CODE (vr1min) == INTEGER_CST
5571 && vrp_val_is_min (*vr0min)
5572 && vrp_val_is_max (vr1max))
5573 {
5574 tree min = int_const_binop (PLUS_EXPR,
5575 *vr0max,
5576 build_int_cst (TREE_TYPE (*vr0max), 1));
5577 tree max = int_const_binop (MINUS_EXPR,
5578 vr1min,
5579 build_int_cst (TREE_TYPE (vr1min), 1));
5580 if (!operand_less_p (max, min))
5581 {
5582 *vr0type = VR_ANTI_RANGE;
5583 *vr0min = min;
5584 *vr0max = max;
5585 }
5586 else
5587 *vr0max = vr1max;
5588 }
5589 else
5590 *vr0max = vr1max;
5591 }
5592 else
5593 {
5594 /* If the result can be an anti-range, create one. */
5595 if (TREE_CODE (vr1max) == INTEGER_CST
5596 && TREE_CODE (*vr0min) == INTEGER_CST
5597 && vrp_val_is_min (vr1min)
5598 && vrp_val_is_max (*vr0max))
5599 {
5600 tree min = int_const_binop (PLUS_EXPR,
5601 vr1max,
5602 build_int_cst (TREE_TYPE (vr1max), 1));
5603 tree max = int_const_binop (MINUS_EXPR,
5604 *vr0min,
5605 build_int_cst (TREE_TYPE (*vr0min), 1));
5606 if (!operand_less_p (max, min))
5607 {
5608 *vr0type = VR_ANTI_RANGE;
5609 *vr0min = min;
5610 *vr0max = max;
5611 }
5612 else
5613 *vr0min = vr1min;
5614 }
5615 else
5616 *vr0min = vr1min;
5617 }
5618 }
5619 else
5620 gcc_unreachable ();
5621 }
5622 else if ((maxeq || cmpmax == 1)
5623 && (mineq || cmpmin == -1))
5624 {
5625 /* [ ( ) ] or [( ) ] or [ ( )] */
5626 if (*vr0type == VR_RANGE
5627 && vr1type == VR_RANGE)
5628 ;
5629 else if (*vr0type == VR_ANTI_RANGE
5630 && vr1type == VR_ANTI_RANGE)
5631 {
5632 *vr0type = vr1type;
5633 *vr0min = vr1min;
5634 *vr0max = vr1max;
5635 }
5636 else if (*vr0type == VR_ANTI_RANGE
5637 && vr1type == VR_RANGE)
5638 {
5639 /* Arbitrarily choose the right or left gap. */
5640 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5641 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5642 build_int_cst (TREE_TYPE (vr1min), 1));
5643 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5644 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5645 build_int_cst (TREE_TYPE (vr1max), 1));
5646 else
5647 goto give_up;
5648 }
5649 else if (*vr0type == VR_RANGE
5650 && vr1type == VR_ANTI_RANGE)
5651 /* The result covers everything. */
5652 goto give_up;
5653 else
5654 gcc_unreachable ();
5655 }
5656 else if ((maxeq || cmpmax == -1)
5657 && (mineq || cmpmin == 1))
5658 {
5659 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5660 if (*vr0type == VR_RANGE
5661 && vr1type == VR_RANGE)
5662 {
5663 *vr0type = vr1type;
5664 *vr0min = vr1min;
5665 *vr0max = vr1max;
5666 }
5667 else if (*vr0type == VR_ANTI_RANGE
5668 && vr1type == VR_ANTI_RANGE)
5669 ;
5670 else if (*vr0type == VR_RANGE
5671 && vr1type == VR_ANTI_RANGE)
5672 {
5673 *vr0type = VR_ANTI_RANGE;
5674 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5675 {
5676 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5677 build_int_cst (TREE_TYPE (*vr0min), 1));
5678 *vr0min = vr1min;
5679 }
5680 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5681 {
5682 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5683 build_int_cst (TREE_TYPE (*vr0max), 1));
5684 *vr0max = vr1max;
5685 }
5686 else
5687 goto give_up;
5688 }
5689 else if (*vr0type == VR_ANTI_RANGE
5690 && vr1type == VR_RANGE)
5691 /* The result covers everything. */
5692 goto give_up;
5693 else
5694 gcc_unreachable ();
5695 }
5696 else if (cmpmin == -1
5697 && cmpmax == -1
5698 && (operand_less_p (vr1min, *vr0max) == 1
5699 || operand_equal_p (vr1min, *vr0max, 0)))
5700 {
5701 /* [ ( ] ) or [ ]( ) */
5702 if (*vr0type == VR_RANGE
5703 && vr1type == VR_RANGE)
5704 *vr0max = vr1max;
5705 else if (*vr0type == VR_ANTI_RANGE
5706 && vr1type == VR_ANTI_RANGE)
5707 *vr0min = vr1min;
5708 else if (*vr0type == VR_ANTI_RANGE
5709 && vr1type == VR_RANGE)
5710 {
5711 if (TREE_CODE (vr1min) == INTEGER_CST)
5712 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5713 build_int_cst (TREE_TYPE (vr1min), 1));
5714 else
5715 goto give_up;
5716 }
5717 else if (*vr0type == VR_RANGE
5718 && vr1type == VR_ANTI_RANGE)
5719 {
5720 if (TREE_CODE (*vr0max) == INTEGER_CST)
5721 {
5722 *vr0type = vr1type;
5723 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5724 build_int_cst (TREE_TYPE (*vr0max), 1));
5725 *vr0max = vr1max;
5726 }
5727 else
5728 goto give_up;
5729 }
5730 else
5731 gcc_unreachable ();
5732 }
5733 else if (cmpmin == 1
5734 && cmpmax == 1
5735 && (operand_less_p (*vr0min, vr1max) == 1
5736 || operand_equal_p (*vr0min, vr1max, 0)))
5737 {
5738 /* ( [ ) ] or ( )[ ] */
5739 if (*vr0type == VR_RANGE
5740 && vr1type == VR_RANGE)
5741 *vr0min = vr1min;
5742 else if (*vr0type == VR_ANTI_RANGE
5743 && vr1type == VR_ANTI_RANGE)
5744 *vr0max = vr1max;
5745 else if (*vr0type == VR_ANTI_RANGE
5746 && vr1type == VR_RANGE)
5747 {
5748 if (TREE_CODE (vr1max) == INTEGER_CST)
5749 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5750 build_int_cst (TREE_TYPE (vr1max), 1));
5751 else
5752 goto give_up;
5753 }
5754 else if (*vr0type == VR_RANGE
5755 && vr1type == VR_ANTI_RANGE)
5756 {
5757 if (TREE_CODE (*vr0min) == INTEGER_CST)
5758 {
5759 *vr0type = vr1type;
5760 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5761 build_int_cst (TREE_TYPE (*vr0min), 1));
5762 *vr0min = vr1min;
5763 }
5764 else
5765 goto give_up;
5766 }
5767 else
5768 gcc_unreachable ();
5769 }
5770 else
5771 goto give_up;
5772
5773 return;
5774
5775 give_up:
5776 *vr0type = VR_VARYING;
5777 *vr0min = NULL_TREE;
5778 *vr0max = NULL_TREE;
5779 }
5780
5781 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5782 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5783 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5784 possible such range. The resulting range is not canonicalized. */
5785
5786 static void
5787 intersect_ranges (enum value_range_kind *vr0type,
5788 tree *vr0min, tree *vr0max,
5789 enum value_range_kind vr1type,
5790 tree vr1min, tree vr1max)
5791 {
5792 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5793 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5794
5795 /* [] is vr0, () is vr1 in the following classification comments. */
5796 if (mineq && maxeq)
5797 {
5798 /* [( )] */
5799 if (*vr0type == vr1type)
5800 /* Nothing to do for equal ranges. */
5801 ;
5802 else if ((*vr0type == VR_RANGE
5803 && vr1type == VR_ANTI_RANGE)
5804 || (*vr0type == VR_ANTI_RANGE
5805 && vr1type == VR_RANGE))
5806 {
5807 /* For anti-range with range intersection the result is empty. */
5808 *vr0type = VR_UNDEFINED;
5809 *vr0min = NULL_TREE;
5810 *vr0max = NULL_TREE;
5811 }
5812 else
5813 gcc_unreachable ();
5814 }
5815 else if (operand_less_p (*vr0max, vr1min) == 1
5816 || operand_less_p (vr1max, *vr0min) == 1)
5817 {
5818 /* [ ] ( ) or ( ) [ ]
5819 If the ranges have an empty intersection, the result of the
5820 intersect operation is the range for intersecting an
5821 anti-range with a range or empty when intersecting two ranges. */
5822 if (*vr0type == VR_RANGE
5823 && vr1type == VR_ANTI_RANGE)
5824 ;
5825 else if (*vr0type == VR_ANTI_RANGE
5826 && vr1type == VR_RANGE)
5827 {
5828 *vr0type = vr1type;
5829 *vr0min = vr1min;
5830 *vr0max = vr1max;
5831 }
5832 else if (*vr0type == VR_RANGE
5833 && vr1type == VR_RANGE)
5834 {
5835 *vr0type = VR_UNDEFINED;
5836 *vr0min = NULL_TREE;
5837 *vr0max = NULL_TREE;
5838 }
5839 else if (*vr0type == VR_ANTI_RANGE
5840 && vr1type == VR_ANTI_RANGE)
5841 {
5842 /* If the anti-ranges are adjacent to each other merge them. */
5843 if (TREE_CODE (*vr0max) == INTEGER_CST
5844 && TREE_CODE (vr1min) == INTEGER_CST
5845 && operand_less_p (*vr0max, vr1min) == 1
5846 && integer_onep (int_const_binop (MINUS_EXPR,
5847 vr1min, *vr0max)))
5848 *vr0max = vr1max;
5849 else if (TREE_CODE (vr1max) == INTEGER_CST
5850 && TREE_CODE (*vr0min) == INTEGER_CST
5851 && operand_less_p (vr1max, *vr0min) == 1
5852 && integer_onep (int_const_binop (MINUS_EXPR,
5853 *vr0min, vr1max)))
5854 *vr0min = vr1min;
5855 /* Else arbitrarily take VR0. */
5856 }
5857 }
5858 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5859 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5860 {
5861 /* [ ( ) ] or [( ) ] or [ ( )] */
5862 if (*vr0type == VR_RANGE
5863 && vr1type == VR_RANGE)
5864 {
5865 /* If both are ranges the result is the inner one. */
5866 *vr0type = vr1type;
5867 *vr0min = vr1min;
5868 *vr0max = vr1max;
5869 }
5870 else if (*vr0type == VR_RANGE
5871 && vr1type == VR_ANTI_RANGE)
5872 {
5873 /* Choose the right gap if the left one is empty. */
5874 if (mineq)
5875 {
5876 if (TREE_CODE (vr1max) != INTEGER_CST)
5877 *vr0min = vr1max;
5878 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5879 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5880 *vr0min
5881 = int_const_binop (MINUS_EXPR, vr1max,
5882 build_int_cst (TREE_TYPE (vr1max), -1));
5883 else
5884 *vr0min
5885 = int_const_binop (PLUS_EXPR, vr1max,
5886 build_int_cst (TREE_TYPE (vr1max), 1));
5887 }
5888 /* Choose the left gap if the right one is empty. */
5889 else if (maxeq)
5890 {
5891 if (TREE_CODE (vr1min) != INTEGER_CST)
5892 *vr0max = vr1min;
5893 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5894 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5895 *vr0max
5896 = int_const_binop (PLUS_EXPR, vr1min,
5897 build_int_cst (TREE_TYPE (vr1min), -1));
5898 else
5899 *vr0max
5900 = int_const_binop (MINUS_EXPR, vr1min,
5901 build_int_cst (TREE_TYPE (vr1min), 1));
5902 }
5903 /* Choose the anti-range if the range is effectively varying. */
5904 else if (vrp_val_is_min (*vr0min)
5905 && vrp_val_is_max (*vr0max))
5906 {
5907 *vr0type = vr1type;
5908 *vr0min = vr1min;
5909 *vr0max = vr1max;
5910 }
5911 /* Else choose the range. */
5912 }
5913 else if (*vr0type == VR_ANTI_RANGE
5914 && vr1type == VR_ANTI_RANGE)
5915 /* If both are anti-ranges the result is the outer one. */
5916 ;
5917 else if (*vr0type == VR_ANTI_RANGE
5918 && vr1type == VR_RANGE)
5919 {
5920 /* The intersection is empty. */
5921 *vr0type = VR_UNDEFINED;
5922 *vr0min = NULL_TREE;
5923 *vr0max = NULL_TREE;
5924 }
5925 else
5926 gcc_unreachable ();
5927 }
5928 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5929 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5930 {
5931 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5932 if (*vr0type == VR_RANGE
5933 && vr1type == VR_RANGE)
5934 /* Choose the inner range. */
5935 ;
5936 else if (*vr0type == VR_ANTI_RANGE
5937 && vr1type == VR_RANGE)
5938 {
5939 /* Choose the right gap if the left is empty. */
5940 if (mineq)
5941 {
5942 *vr0type = VR_RANGE;
5943 if (TREE_CODE (*vr0max) != INTEGER_CST)
5944 *vr0min = *vr0max;
5945 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5946 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5947 *vr0min
5948 = int_const_binop (MINUS_EXPR, *vr0max,
5949 build_int_cst (TREE_TYPE (*vr0max), -1));
5950 else
5951 *vr0min
5952 = int_const_binop (PLUS_EXPR, *vr0max,
5953 build_int_cst (TREE_TYPE (*vr0max), 1));
5954 *vr0max = vr1max;
5955 }
5956 /* Choose the left gap if the right is empty. */
5957 else if (maxeq)
5958 {
5959 *vr0type = VR_RANGE;
5960 if (TREE_CODE (*vr0min) != INTEGER_CST)
5961 *vr0max = *vr0min;
5962 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5963 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5964 *vr0max
5965 = int_const_binop (PLUS_EXPR, *vr0min,
5966 build_int_cst (TREE_TYPE (*vr0min), -1));
5967 else
5968 *vr0max
5969 = int_const_binop (MINUS_EXPR, *vr0min,
5970 build_int_cst (TREE_TYPE (*vr0min), 1));
5971 *vr0min = vr1min;
5972 }
5973 /* Choose the anti-range if the range is effectively varying. */
5974 else if (vrp_val_is_min (vr1min)
5975 && vrp_val_is_max (vr1max))
5976 ;
5977 /* Choose the anti-range if it is ~[0,0], that range is special
5978 enough to special case when vr1's range is relatively wide.
5979 At least for types bigger than int - this covers pointers
5980 and arguments to functions like ctz. */
5981 else if (*vr0min == *vr0max
5982 && integer_zerop (*vr0min)
5983 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5984 >= TYPE_PRECISION (integer_type_node))
5985 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5986 && TREE_CODE (vr1max) == INTEGER_CST
5987 && TREE_CODE (vr1min) == INTEGER_CST
5988 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5989 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5990 ;
5991 /* Else choose the range. */
5992 else
5993 {
5994 *vr0type = vr1type;
5995 *vr0min = vr1min;
5996 *vr0max = vr1max;
5997 }
5998 }
5999 else if (*vr0type == VR_ANTI_RANGE
6000 && vr1type == VR_ANTI_RANGE)
6001 {
6002 /* If both are anti-ranges the result is the outer one. */
6003 *vr0type = vr1type;
6004 *vr0min = vr1min;
6005 *vr0max = vr1max;
6006 }
6007 else if (vr1type == VR_ANTI_RANGE
6008 && *vr0type == VR_RANGE)
6009 {
6010 /* The intersection is empty. */
6011 *vr0type = VR_UNDEFINED;
6012 *vr0min = NULL_TREE;
6013 *vr0max = NULL_TREE;
6014 }
6015 else
6016 gcc_unreachable ();
6017 }
6018 else if ((operand_less_p (vr1min, *vr0max) == 1
6019 || operand_equal_p (vr1min, *vr0max, 0))
6020 && operand_less_p (*vr0min, vr1min) == 1)
6021 {
6022 /* [ ( ] ) or [ ]( ) */
6023 if (*vr0type == VR_ANTI_RANGE
6024 && vr1type == VR_ANTI_RANGE)
6025 *vr0max = vr1max;
6026 else if (*vr0type == VR_RANGE
6027 && vr1type == VR_RANGE)
6028 *vr0min = vr1min;
6029 else if (*vr0type == VR_RANGE
6030 && vr1type == VR_ANTI_RANGE)
6031 {
6032 if (TREE_CODE (vr1min) == INTEGER_CST)
6033 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
6034 build_int_cst (TREE_TYPE (vr1min), 1));
6035 else
6036 *vr0max = vr1min;
6037 }
6038 else if (*vr0type == VR_ANTI_RANGE
6039 && vr1type == VR_RANGE)
6040 {
6041 *vr0type = VR_RANGE;
6042 if (TREE_CODE (*vr0max) == INTEGER_CST)
6043 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
6044 build_int_cst (TREE_TYPE (*vr0max), 1));
6045 else
6046 *vr0min = *vr0max;
6047 *vr0max = vr1max;
6048 }
6049 else
6050 gcc_unreachable ();
6051 }
6052 else if ((operand_less_p (*vr0min, vr1max) == 1
6053 || operand_equal_p (*vr0min, vr1max, 0))
6054 && operand_less_p (vr1min, *vr0min) == 1)
6055 {
6056 /* ( [ ) ] or ( )[ ] */
6057 if (*vr0type == VR_ANTI_RANGE
6058 && vr1type == VR_ANTI_RANGE)
6059 *vr0min = vr1min;
6060 else if (*vr0type == VR_RANGE
6061 && vr1type == VR_RANGE)
6062 *vr0max = vr1max;
6063 else if (*vr0type == VR_RANGE
6064 && vr1type == VR_ANTI_RANGE)
6065 {
6066 if (TREE_CODE (vr1max) == INTEGER_CST)
6067 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6068 build_int_cst (TREE_TYPE (vr1max), 1));
6069 else
6070 *vr0min = vr1max;
6071 }
6072 else if (*vr0type == VR_ANTI_RANGE
6073 && vr1type == VR_RANGE)
6074 {
6075 *vr0type = VR_RANGE;
6076 if (TREE_CODE (*vr0min) == INTEGER_CST)
6077 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6078 build_int_cst (TREE_TYPE (*vr0min), 1));
6079 else
6080 *vr0max = *vr0min;
6081 *vr0min = vr1min;
6082 }
6083 else
6084 gcc_unreachable ();
6085 }
6086
6087 /* If we know the intersection is empty, there's no need to
6088 conservatively add anything else to the set. */
6089 if (*vr0type == VR_UNDEFINED)
6090 return;
6091
6092 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6093 result for the intersection. That's always a conservative
6094 correct estimate unless VR1 is a constant singleton range
6095 in which case we choose that. */
6096 if (vr1type == VR_RANGE
6097 && is_gimple_min_invariant (vr1min)
6098 && vrp_operand_equal_p (vr1min, vr1max))
6099 {
6100 *vr0type = vr1type;
6101 *vr0min = vr1min;
6102 *vr0max = vr1max;
6103 }
6104 }
6105
6106
6107 /* Helper for the intersection operation for value ranges. Given two
6108 value ranges VR0 and VR1, return the intersection of the two
6109 ranges. This may not be the smallest possible such range. */
6110
6111 value_range_base
6112 value_range_base::intersect_helper (const value_range_base *vr0,
6113 const value_range_base *vr1)
6114 {
6115 /* If either range is VR_VARYING the other one wins. */
6116 if (vr1->varying_p ())
6117 return *vr0;
6118 if (vr0->varying_p ())
6119 return *vr1;
6120
6121 /* When either range is VR_UNDEFINED the resulting range is
6122 VR_UNDEFINED, too. */
6123 if (vr0->undefined_p ())
6124 return *vr0;
6125 if (vr1->undefined_p ())
6126 return *vr1;
6127
6128 value_range_kind vr0type = vr0->kind ();
6129 tree vr0min = vr0->min ();
6130 tree vr0max = vr0->max ();
6131 intersect_ranges (&vr0type, &vr0min, &vr0max,
6132 vr1->kind (), vr1->min (), vr1->max ());
6133 /* Make sure to canonicalize the result though as the inversion of a
6134 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
6135 fall back to vr0 when this turns things to varying. */
6136 value_range_base tem;
6137 if (vr0type == VR_UNDEFINED)
6138 tem.set_undefined ();
6139 else if (vr0type == VR_VARYING)
6140 tem.set_varying (vr0->type ());
6141 else
6142 tem.set (vr0type, vr0min, vr0max);
6143 /* If that failed, use the saved original VR0. */
6144 if (tem.varying_p ())
6145 return *vr0;
6146
6147 return tem;
6148 }
6149
6150 void
6151 value_range_base::intersect (const value_range_base *other)
6152 {
6153 if (dump_file && (dump_flags & TDF_DETAILS))
6154 {
6155 fprintf (dump_file, "Intersecting\n ");
6156 dump_value_range (dump_file, this);
6157 fprintf (dump_file, "\nand\n ");
6158 dump_value_range (dump_file, other);
6159 fprintf (dump_file, "\n");
6160 }
6161
6162 *this = intersect_helper (this, other);
6163
6164 if (dump_file && (dump_flags & TDF_DETAILS))
6165 {
6166 fprintf (dump_file, "to\n ");
6167 dump_value_range (dump_file, this);
6168 fprintf (dump_file, "\n");
6169 }
6170 }
6171
6172 void
6173 value_range::intersect (const value_range *other)
6174 {
6175 if (dump_file && (dump_flags & TDF_DETAILS))
6176 {
6177 fprintf (dump_file, "Intersecting\n ");
6178 dump_value_range (dump_file, this);
6179 fprintf (dump_file, "\nand\n ");
6180 dump_value_range (dump_file, other);
6181 fprintf (dump_file, "\n");
6182 }
6183
6184 /* If THIS is varying we want to pick up equivalences from OTHER.
6185 Just special-case this here rather than trying to fixup after the
6186 fact. */
6187 if (this->varying_p ())
6188 this->deep_copy (other);
6189 else
6190 {
6191 value_range_base tem = intersect_helper (this, other);
6192 this->update (tem.kind (), tem.min (), tem.max ());
6193
6194 /* If the result is VR_UNDEFINED there is no need to mess with
6195 equivalencies. */
6196 if (!undefined_p ())
6197 {
6198 /* The resulting set of equivalences for range intersection
6199 is the union of the two sets. */
6200 if (m_equiv && other->m_equiv && m_equiv != other->m_equiv)
6201 bitmap_ior_into (m_equiv, other->m_equiv);
6202 else if (other->m_equiv && !m_equiv)
6203 {
6204 /* All equivalence bitmaps are allocated from the same
6205 obstack. So we can use the obstack associated with
6206 VR to allocate this->m_equiv. */
6207 m_equiv = BITMAP_ALLOC (other->m_equiv->obstack);
6208 bitmap_copy (m_equiv, other->m_equiv);
6209 }
6210 }
6211 }
6212
6213 if (dump_file && (dump_flags & TDF_DETAILS))
6214 {
6215 fprintf (dump_file, "to\n ");
6216 dump_value_range (dump_file, this);
6217 fprintf (dump_file, "\n");
6218 }
6219 }
6220
6221 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
6222 VR1, return a range that contains both VR0 and VR1. This may not be the
6223 smallest possible such range. */
6224
6225 value_range_base
6226 value_range_base::union_helper (const value_range_base *vr0,
6227 const value_range_base *vr1)
6228 {
6229 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
6230 if (vr1->undefined_p ()
6231 || vr0->varying_p ())
6232 return *vr0;
6233
6234 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
6235 if (vr0->undefined_p ()
6236 || vr1->varying_p ())
6237 return *vr1;
6238
6239 value_range_kind vr0type = vr0->kind ();
6240 tree vr0min = vr0->min ();
6241 tree vr0max = vr0->max ();
6242 union_ranges (&vr0type, &vr0min, &vr0max,
6243 vr1->kind (), vr1->min (), vr1->max ());
6244
6245 /* Work on a temporary so we can still use vr0 when union returns varying. */
6246 value_range_base tem;
6247 if (vr0type == VR_UNDEFINED)
6248 tem.set_undefined ();
6249 else if (vr0type == VR_VARYING)
6250 tem.set_varying (vr0->type ());
6251 else
6252 tem.set (vr0type, vr0min, vr0max);
6253
6254 /* Failed to find an efficient meet. Before giving up and setting
6255 the result to VARYING, see if we can at least derive a useful
6256 anti-range. */
6257 if (tem.varying_p ()
6258 && range_includes_zero_p (vr0) == 0
6259 && range_includes_zero_p (vr1) == 0)
6260 {
6261 tem.set_nonzero (vr0->type ());
6262 return tem;
6263 }
6264
6265 return tem;
6266 }
6267
6268
6269 /* Meet operation for value ranges. Given two value ranges VR0 and
6270 VR1, store in VR0 a range that contains both VR0 and VR1. This
6271 may not be the smallest possible such range. */
6272
6273 void
6274 value_range_base::union_ (const value_range_base *other)
6275 {
6276 if (dump_file && (dump_flags & TDF_DETAILS))
6277 {
6278 fprintf (dump_file, "Meeting\n ");
6279 dump_value_range (dump_file, this);
6280 fprintf (dump_file, "\nand\n ");
6281 dump_value_range (dump_file, other);
6282 fprintf (dump_file, "\n");
6283 }
6284
6285 *this = union_helper (this, other);
6286
6287 if (dump_file && (dump_flags & TDF_DETAILS))
6288 {
6289 fprintf (dump_file, "to\n ");
6290 dump_value_range (dump_file, this);
6291 fprintf (dump_file, "\n");
6292 }
6293 }
6294
6295 void
6296 value_range::union_ (const value_range *other)
6297 {
6298 if (dump_file && (dump_flags & TDF_DETAILS))
6299 {
6300 fprintf (dump_file, "Meeting\n ");
6301 dump_value_range (dump_file, this);
6302 fprintf (dump_file, "\nand\n ");
6303 dump_value_range (dump_file, other);
6304 fprintf (dump_file, "\n");
6305 }
6306
6307 /* If THIS is undefined we want to pick up equivalences from OTHER.
6308 Just special-case this here rather than trying to fixup after the fact. */
6309 if (this->undefined_p ())
6310 this->deep_copy (other);
6311 else
6312 {
6313 value_range_base tem = union_helper (this, other);
6314 this->update (tem.kind (), tem.min (), tem.max ());
6315
6316 /* The resulting set of equivalences is always the intersection of
6317 the two sets. */
6318 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6319 bitmap_and_into (this->m_equiv, other->m_equiv);
6320 else if (this->m_equiv && !other->m_equiv)
6321 bitmap_clear (this->m_equiv);
6322 }
6323
6324 if (dump_file && (dump_flags & TDF_DETAILS))
6325 {
6326 fprintf (dump_file, "to\n ");
6327 dump_value_range (dump_file, this);
6328 fprintf (dump_file, "\n");
6329 }
6330 }
6331
6332 /* Normalize symbolics into constants. */
6333
6334 value_range_base
6335 value_range_base::normalize_symbolics () const
6336 {
6337 if (varying_p () || undefined_p ())
6338 return *this;
6339 tree ttype = type ();
6340 bool min_symbolic = !is_gimple_min_invariant (min ());
6341 bool max_symbolic = !is_gimple_min_invariant (max ());
6342 if (!min_symbolic && !max_symbolic)
6343 return *this;
6344
6345 // [SYM, SYM] -> VARYING
6346 if (min_symbolic && max_symbolic)
6347 {
6348 value_range_base var;
6349 var.set_varying (ttype);
6350 return var;
6351 }
6352 if (kind () == VR_RANGE)
6353 {
6354 // [SYM, NUM] -> [-MIN, NUM]
6355 if (min_symbolic)
6356 return value_range_base (VR_RANGE, vrp_val_min (ttype), max ());
6357 // [NUM, SYM] -> [NUM, +MAX]
6358 return value_range_base (VR_RANGE, min (), vrp_val_max (ttype));
6359 }
6360 gcc_assert (kind () == VR_ANTI_RANGE);
6361 // ~[SYM, NUM] -> [NUM + 1, +MAX]
6362 if (min_symbolic)
6363 {
6364 if (!vrp_val_is_max (max ()))
6365 {
6366 tree n = wide_int_to_tree (ttype, wi::to_wide (max ()) + 1);
6367 return value_range_base (VR_RANGE, n, vrp_val_max (ttype));
6368 }
6369 value_range_base var;
6370 var.set_varying (ttype);
6371 return var;
6372 }
6373 // ~[NUM, SYM] -> [-MIN, NUM - 1]
6374 if (!vrp_val_is_min (min ()))
6375 {
6376 tree n = wide_int_to_tree (ttype, wi::to_wide (min ()) - 1);
6377 return value_range_base (VR_RANGE, vrp_val_min (ttype), n);
6378 }
6379 value_range_base var;
6380 var.set_varying (ttype);
6381 return var;
6382 }
6383
6384 /* Visit all arguments for PHI node PHI that flow through executable
6385 edges. If a valid value range can be derived from all the incoming
6386 value ranges, set a new range for the LHS of PHI. */
6387
6388 enum ssa_prop_result
6389 vrp_prop::visit_phi (gphi *phi)
6390 {
6391 tree lhs = PHI_RESULT (phi);
6392 value_range vr_result;
6393 extract_range_from_phi_node (phi, &vr_result);
6394 if (update_value_range (lhs, &vr_result))
6395 {
6396 if (dump_file && (dump_flags & TDF_DETAILS))
6397 {
6398 fprintf (dump_file, "Found new range for ");
6399 print_generic_expr (dump_file, lhs);
6400 fprintf (dump_file, ": ");
6401 dump_value_range (dump_file, &vr_result);
6402 fprintf (dump_file, "\n");
6403 }
6404
6405 if (vr_result.varying_p ())
6406 return SSA_PROP_VARYING;
6407
6408 return SSA_PROP_INTERESTING;
6409 }
6410
6411 /* Nothing changed, don't add outgoing edges. */
6412 return SSA_PROP_NOT_INTERESTING;
6413 }
6414
6415 class vrp_folder : public substitute_and_fold_engine
6416 {
6417 public:
6418 tree get_value (tree) FINAL OVERRIDE;
6419 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6420 bool fold_predicate_in (gimple_stmt_iterator *);
6421
6422 class vr_values *vr_values;
6423
6424 /* Delegators. */
6425 tree vrp_evaluate_conditional (tree_code code, tree op0,
6426 tree op1, gimple *stmt)
6427 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6428 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6429 { return vr_values->simplify_stmt_using_ranges (gsi); }
6430 tree op_with_constant_singleton_value_range (tree op)
6431 { return vr_values->op_with_constant_singleton_value_range (op); }
6432 };
6433
6434 /* If the statement pointed by SI has a predicate whose value can be
6435 computed using the value range information computed by VRP, compute
6436 its value and return true. Otherwise, return false. */
6437
6438 bool
6439 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6440 {
6441 bool assignment_p = false;
6442 tree val;
6443 gimple *stmt = gsi_stmt (*si);
6444
6445 if (is_gimple_assign (stmt)
6446 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6447 {
6448 assignment_p = true;
6449 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6450 gimple_assign_rhs1 (stmt),
6451 gimple_assign_rhs2 (stmt),
6452 stmt);
6453 }
6454 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6455 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6456 gimple_cond_lhs (cond_stmt),
6457 gimple_cond_rhs (cond_stmt),
6458 stmt);
6459 else
6460 return false;
6461
6462 if (val)
6463 {
6464 if (assignment_p)
6465 val = fold_convert (gimple_expr_type (stmt), val);
6466
6467 if (dump_file)
6468 {
6469 fprintf (dump_file, "Folding predicate ");
6470 print_gimple_expr (dump_file, stmt, 0);
6471 fprintf (dump_file, " to ");
6472 print_generic_expr (dump_file, val);
6473 fprintf (dump_file, "\n");
6474 }
6475
6476 if (is_gimple_assign (stmt))
6477 gimple_assign_set_rhs_from_tree (si, val);
6478 else
6479 {
6480 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6481 gcond *cond_stmt = as_a <gcond *> (stmt);
6482 if (integer_zerop (val))
6483 gimple_cond_make_false (cond_stmt);
6484 else if (integer_onep (val))
6485 gimple_cond_make_true (cond_stmt);
6486 else
6487 gcc_unreachable ();
6488 }
6489
6490 return true;
6491 }
6492
6493 return false;
6494 }
6495
6496 /* Callback for substitute_and_fold folding the stmt at *SI. */
6497
6498 bool
6499 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6500 {
6501 if (fold_predicate_in (si))
6502 return true;
6503
6504 return simplify_stmt_using_ranges (si);
6505 }
6506
6507 /* If OP has a value range with a single constant value return that,
6508 otherwise return NULL_TREE. This returns OP itself if OP is a
6509 constant.
6510
6511 Implemented as a pure wrapper right now, but this will change. */
6512
6513 tree
6514 vrp_folder::get_value (tree op)
6515 {
6516 return op_with_constant_singleton_value_range (op);
6517 }
6518
6519 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6520 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6521 BB. If no such ASSERT_EXPR is found, return OP. */
6522
6523 static tree
6524 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6525 {
6526 imm_use_iterator imm_iter;
6527 gimple *use_stmt;
6528 use_operand_p use_p;
6529
6530 if (TREE_CODE (op) == SSA_NAME)
6531 {
6532 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6533 {
6534 use_stmt = USE_STMT (use_p);
6535 if (use_stmt != stmt
6536 && gimple_assign_single_p (use_stmt)
6537 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6538 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6539 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6540 return gimple_assign_lhs (use_stmt);
6541 }
6542 }
6543 return op;
6544 }
6545
6546 /* A hack. */
6547 static class vr_values *x_vr_values;
6548
6549 /* A trivial wrapper so that we can present the generic jump threading
6550 code with a simple API for simplifying statements. STMT is the
6551 statement we want to simplify, WITHIN_STMT provides the location
6552 for any overflow warnings. */
6553
6554 static tree
6555 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6556 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6557 basic_block bb)
6558 {
6559 /* First see if the conditional is in the hash table. */
6560 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6561 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6562 return cached_lhs;
6563
6564 vr_values *vr_values = x_vr_values;
6565 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6566 {
6567 tree op0 = gimple_cond_lhs (cond_stmt);
6568 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6569
6570 tree op1 = gimple_cond_rhs (cond_stmt);
6571 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6572
6573 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6574 op0, op1, within_stmt);
6575 }
6576
6577 /* We simplify a switch statement by trying to determine which case label
6578 will be taken. If we are successful then we return the corresponding
6579 CASE_LABEL_EXPR. */
6580 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6581 {
6582 tree op = gimple_switch_index (switch_stmt);
6583 if (TREE_CODE (op) != SSA_NAME)
6584 return NULL_TREE;
6585
6586 op = lhs_of_dominating_assert (op, bb, stmt);
6587
6588 const value_range *vr = vr_values->get_value_range (op);
6589 if (vr->undefined_p ()
6590 || vr->varying_p ()
6591 || vr->symbolic_p ())
6592 return NULL_TREE;
6593
6594 if (vr->kind () == VR_RANGE)
6595 {
6596 size_t i, j;
6597 /* Get the range of labels that contain a part of the operand's
6598 value range. */
6599 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6600
6601 /* Is there only one such label? */
6602 if (i == j)
6603 {
6604 tree label = gimple_switch_label (switch_stmt, i);
6605
6606 /* The i'th label will be taken only if the value range of the
6607 operand is entirely within the bounds of this label. */
6608 if (CASE_HIGH (label) != NULL_TREE
6609 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6610 && tree_int_cst_compare (CASE_HIGH (label),
6611 vr->max ()) >= 0)
6612 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6613 && tree_int_cst_equal (vr->min (), vr->max ())))
6614 return label;
6615 }
6616
6617 /* If there are no such labels then the default label will be
6618 taken. */
6619 if (i > j)
6620 return gimple_switch_label (switch_stmt, 0);
6621 }
6622
6623 if (vr->kind () == VR_ANTI_RANGE)
6624 {
6625 unsigned n = gimple_switch_num_labels (switch_stmt);
6626 tree min_label = gimple_switch_label (switch_stmt, 1);
6627 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6628
6629 /* The default label will be taken only if the anti-range of the
6630 operand is entirely outside the bounds of all the (non-default)
6631 case labels. */
6632 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6633 && (CASE_HIGH (max_label) != NULL_TREE
6634 ? tree_int_cst_compare (vr->max (),
6635 CASE_HIGH (max_label)) >= 0
6636 : tree_int_cst_compare (vr->max (),
6637 CASE_LOW (max_label)) >= 0))
6638 return gimple_switch_label (switch_stmt, 0);
6639 }
6640
6641 return NULL_TREE;
6642 }
6643
6644 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6645 {
6646 tree lhs = gimple_assign_lhs (assign_stmt);
6647 if (TREE_CODE (lhs) == SSA_NAME
6648 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6649 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6650 && stmt_interesting_for_vrp (stmt))
6651 {
6652 edge dummy_e;
6653 tree dummy_tree;
6654 value_range new_vr;
6655 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6656 &dummy_tree, &new_vr);
6657 tree singleton;
6658 if (new_vr.singleton_p (&singleton))
6659 return singleton;
6660 }
6661 }
6662
6663 return NULL_TREE;
6664 }
6665
6666 class vrp_dom_walker : public dom_walker
6667 {
6668 public:
6669 vrp_dom_walker (cdi_direction direction,
6670 class const_and_copies *const_and_copies,
6671 class avail_exprs_stack *avail_exprs_stack)
6672 : dom_walker (direction, REACHABLE_BLOCKS),
6673 m_const_and_copies (const_and_copies),
6674 m_avail_exprs_stack (avail_exprs_stack),
6675 m_dummy_cond (NULL) {}
6676
6677 virtual edge before_dom_children (basic_block);
6678 virtual void after_dom_children (basic_block);
6679
6680 class vr_values *vr_values;
6681
6682 private:
6683 class const_and_copies *m_const_and_copies;
6684 class avail_exprs_stack *m_avail_exprs_stack;
6685
6686 gcond *m_dummy_cond;
6687
6688 };
6689
6690 /* Called before processing dominator children of BB. We want to look
6691 at ASSERT_EXPRs and record information from them in the appropriate
6692 tables.
6693
6694 We could look at other statements here. It's not seen as likely
6695 to significantly increase the jump threads we discover. */
6696
6697 edge
6698 vrp_dom_walker::before_dom_children (basic_block bb)
6699 {
6700 gimple_stmt_iterator gsi;
6701
6702 m_avail_exprs_stack->push_marker ();
6703 m_const_and_copies->push_marker ();
6704 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6705 {
6706 gimple *stmt = gsi_stmt (gsi);
6707 if (gimple_assign_single_p (stmt)
6708 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6709 {
6710 tree rhs1 = gimple_assign_rhs1 (stmt);
6711 tree cond = TREE_OPERAND (rhs1, 1);
6712 tree inverted = invert_truthvalue (cond);
6713 vec<cond_equivalence> p;
6714 p.create (3);
6715 record_conditions (&p, cond, inverted);
6716 for (unsigned int i = 0; i < p.length (); i++)
6717 m_avail_exprs_stack->record_cond (&p[i]);
6718
6719 tree lhs = gimple_assign_lhs (stmt);
6720 m_const_and_copies->record_const_or_copy (lhs,
6721 TREE_OPERAND (rhs1, 0));
6722 p.release ();
6723 continue;
6724 }
6725 break;
6726 }
6727 return NULL;
6728 }
6729
6730 /* Called after processing dominator children of BB. This is where we
6731 actually call into the threader. */
6732 void
6733 vrp_dom_walker::after_dom_children (basic_block bb)
6734 {
6735 if (!m_dummy_cond)
6736 m_dummy_cond = gimple_build_cond (NE_EXPR,
6737 integer_zero_node, integer_zero_node,
6738 NULL, NULL);
6739
6740 x_vr_values = vr_values;
6741 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6742 m_avail_exprs_stack, NULL,
6743 simplify_stmt_for_jump_threading);
6744 x_vr_values = NULL;
6745
6746 m_avail_exprs_stack->pop_to_marker ();
6747 m_const_and_copies->pop_to_marker ();
6748 }
6749
6750 /* Blocks which have more than one predecessor and more than
6751 one successor present jump threading opportunities, i.e.,
6752 when the block is reached from a specific predecessor, we
6753 may be able to determine which of the outgoing edges will
6754 be traversed. When this optimization applies, we are able
6755 to avoid conditionals at runtime and we may expose secondary
6756 optimization opportunities.
6757
6758 This routine is effectively a driver for the generic jump
6759 threading code. It basically just presents the generic code
6760 with edges that may be suitable for jump threading.
6761
6762 Unlike DOM, we do not iterate VRP if jump threading was successful.
6763 While iterating may expose new opportunities for VRP, it is expected
6764 those opportunities would be very limited and the compile time cost
6765 to expose those opportunities would be significant.
6766
6767 As jump threading opportunities are discovered, they are registered
6768 for later realization. */
6769
6770 static void
6771 identify_jump_threads (class vr_values *vr_values)
6772 {
6773 /* Ugh. When substituting values earlier in this pass we can
6774 wipe the dominance information. So rebuild the dominator
6775 information as we need it within the jump threading code. */
6776 calculate_dominance_info (CDI_DOMINATORS);
6777
6778 /* We do not allow VRP information to be used for jump threading
6779 across a back edge in the CFG. Otherwise it becomes too
6780 difficult to avoid eliminating loop exit tests. Of course
6781 EDGE_DFS_BACK is not accurate at this time so we have to
6782 recompute it. */
6783 mark_dfs_back_edges ();
6784
6785 /* Allocate our unwinder stack to unwind any temporary equivalences
6786 that might be recorded. */
6787 const_and_copies *equiv_stack = new const_and_copies ();
6788
6789 hash_table<expr_elt_hasher> *avail_exprs
6790 = new hash_table<expr_elt_hasher> (1024);
6791 avail_exprs_stack *avail_exprs_stack
6792 = new class avail_exprs_stack (avail_exprs);
6793
6794 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6795 walker.vr_values = vr_values;
6796 walker.walk (cfun->cfg->x_entry_block_ptr);
6797
6798 /* We do not actually update the CFG or SSA graphs at this point as
6799 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6800 handle ASSERT_EXPRs gracefully. */
6801 delete equiv_stack;
6802 delete avail_exprs;
6803 delete avail_exprs_stack;
6804 }
6805
6806 /* Traverse all the blocks folding conditionals with known ranges. */
6807
6808 void
6809 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6810 {
6811 size_t i;
6812
6813 /* We have completed propagating through the lattice. */
6814 vr_values.set_lattice_propagation_complete ();
6815
6816 if (dump_file)
6817 {
6818 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6819 vr_values.dump_all_value_ranges (dump_file);
6820 fprintf (dump_file, "\n");
6821 }
6822
6823 /* Set value range to non pointer SSA_NAMEs. */
6824 for (i = 0; i < num_ssa_names; i++)
6825 {
6826 tree name = ssa_name (i);
6827 if (!name)
6828 continue;
6829
6830 const value_range *vr = get_value_range (name);
6831 if (!name || !vr->constant_p ())
6832 continue;
6833
6834 if (POINTER_TYPE_P (TREE_TYPE (name))
6835 && range_includes_zero_p (vr) == 0)
6836 set_ptr_nonnull (name);
6837 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6838 set_range_info (name, *vr);
6839 }
6840
6841 /* If we're checking array refs, we want to merge information on
6842 the executability of each edge between vrp_folder and the
6843 check_array_bounds_dom_walker: each can clear the
6844 EDGE_EXECUTABLE flag on edges, in different ways.
6845
6846 Hence, if we're going to call check_all_array_refs, set
6847 the flag on every edge now, rather than in
6848 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6849 it from some edges. */
6850 if (warn_array_bounds && warn_array_bounds_p)
6851 set_all_edges_as_executable (cfun);
6852
6853 class vrp_folder vrp_folder;
6854 vrp_folder.vr_values = &vr_values;
6855 vrp_folder.substitute_and_fold ();
6856
6857 if (warn_array_bounds && warn_array_bounds_p)
6858 check_all_array_refs ();
6859 }
6860
6861 /* Main entry point to VRP (Value Range Propagation). This pass is
6862 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6863 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6864 Programming Language Design and Implementation, pp. 67-78, 1995.
6865 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6866
6867 This is essentially an SSA-CCP pass modified to deal with ranges
6868 instead of constants.
6869
6870 While propagating ranges, we may find that two or more SSA name
6871 have equivalent, though distinct ranges. For instance,
6872
6873 1 x_9 = p_3->a;
6874 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6875 3 if (p_4 == q_2)
6876 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6877 5 endif
6878 6 if (q_2)
6879
6880 In the code above, pointer p_5 has range [q_2, q_2], but from the
6881 code we can also determine that p_5 cannot be NULL and, if q_2 had
6882 a non-varying range, p_5's range should also be compatible with it.
6883
6884 These equivalences are created by two expressions: ASSERT_EXPR and
6885 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6886 result of another assertion, then we can use the fact that p_5 and
6887 p_4 are equivalent when evaluating p_5's range.
6888
6889 Together with value ranges, we also propagate these equivalences
6890 between names so that we can take advantage of information from
6891 multiple ranges when doing final replacement. Note that this
6892 equivalency relation is transitive but not symmetric.
6893
6894 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6895 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6896 in contexts where that assertion does not hold (e.g., in line 6).
6897
6898 TODO, the main difference between this pass and Patterson's is that
6899 we do not propagate edge probabilities. We only compute whether
6900 edges can be taken or not. That is, instead of having a spectrum
6901 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6902 DON'T KNOW. In the future, it may be worthwhile to propagate
6903 probabilities to aid branch prediction. */
6904
6905 static unsigned int
6906 execute_vrp (bool warn_array_bounds_p)
6907 {
6908
6909 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6910 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6911 scev_initialize ();
6912
6913 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6914 Inserting assertions may split edges which will invalidate
6915 EDGE_DFS_BACK. */
6916 insert_range_assertions ();
6917
6918 threadedge_initialize_values ();
6919
6920 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6921 mark_dfs_back_edges ();
6922
6923 class vrp_prop vrp_prop;
6924 vrp_prop.vrp_initialize ();
6925 vrp_prop.ssa_propagate ();
6926 vrp_prop.vrp_finalize (warn_array_bounds_p);
6927
6928 /* We must identify jump threading opportunities before we release
6929 the datastructures built by VRP. */
6930 identify_jump_threads (&vrp_prop.vr_values);
6931
6932 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6933 was set by a type conversion can often be rewritten to use the
6934 RHS of the type conversion.
6935
6936 However, doing so inhibits jump threading through the comparison.
6937 So that transformation is not performed until after jump threading
6938 is complete. */
6939 basic_block bb;
6940 FOR_EACH_BB_FN (bb, cfun)
6941 {
6942 gimple *last = last_stmt (bb);
6943 if (last && gimple_code (last) == GIMPLE_COND)
6944 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6945 }
6946
6947 free_numbers_of_iterations_estimates (cfun);
6948
6949 /* ASSERT_EXPRs must be removed before finalizing jump threads
6950 as finalizing jump threads calls the CFG cleanup code which
6951 does not properly handle ASSERT_EXPRs. */
6952 remove_range_assertions ();
6953
6954 /* If we exposed any new variables, go ahead and put them into
6955 SSA form now, before we handle jump threading. This simplifies
6956 interactions between rewriting of _DECL nodes into SSA form
6957 and rewriting SSA_NAME nodes into SSA form after block
6958 duplication and CFG manipulation. */
6959 update_ssa (TODO_update_ssa);
6960
6961 /* We identified all the jump threading opportunities earlier, but could
6962 not transform the CFG at that time. This routine transforms the
6963 CFG and arranges for the dominator tree to be rebuilt if necessary.
6964
6965 Note the SSA graph update will occur during the normal TODO
6966 processing by the pass manager. */
6967 thread_through_all_blocks (false);
6968
6969 vrp_prop.vr_values.cleanup_edges_and_switches ();
6970 threadedge_finalize_values ();
6971
6972 scev_finalize ();
6973 loop_optimizer_finalize ();
6974 return 0;
6975 }
6976
6977 namespace {
6978
6979 const pass_data pass_data_vrp =
6980 {
6981 GIMPLE_PASS, /* type */
6982 "vrp", /* name */
6983 OPTGROUP_NONE, /* optinfo_flags */
6984 TV_TREE_VRP, /* tv_id */
6985 PROP_ssa, /* properties_required */
6986 0, /* properties_provided */
6987 0, /* properties_destroyed */
6988 0, /* todo_flags_start */
6989 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6990 };
6991
6992 class pass_vrp : public gimple_opt_pass
6993 {
6994 public:
6995 pass_vrp (gcc::context *ctxt)
6996 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6997 {}
6998
6999 /* opt_pass methods: */
7000 opt_pass * clone () { return new pass_vrp (m_ctxt); }
7001 void set_pass_param (unsigned int n, bool param)
7002 {
7003 gcc_assert (n == 0);
7004 warn_array_bounds_p = param;
7005 }
7006 virtual bool gate (function *) { return flag_tree_vrp != 0; }
7007 virtual unsigned int execute (function *)
7008 { return execute_vrp (warn_array_bounds_p); }
7009
7010 private:
7011 bool warn_array_bounds_p;
7012 }; // class pass_vrp
7013
7014 } // anon namespace
7015
7016 gimple_opt_pass *
7017 make_pass_vrp (gcc::context *ctxt)
7018 {
7019 return new pass_vrp (ctxt);
7020 }
7021
7022
7023 /* Worker for determine_value_range. */
7024
7025 static void
7026 determine_value_range_1 (value_range_base *vr, tree expr)
7027 {
7028 if (BINARY_CLASS_P (expr))
7029 {
7030 value_range_base vr0, vr1;
7031 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
7032 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
7033 extract_range_from_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
7034 &vr0, &vr1);
7035 }
7036 else if (UNARY_CLASS_P (expr))
7037 {
7038 value_range_base vr0;
7039 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
7040 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
7041 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
7042 }
7043 else if (TREE_CODE (expr) == INTEGER_CST)
7044 vr->set (expr);
7045 else
7046 {
7047 value_range_kind kind;
7048 wide_int min, max;
7049 /* For SSA names try to extract range info computed by VRP. Otherwise
7050 fall back to varying. */
7051 if (TREE_CODE (expr) == SSA_NAME
7052 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
7053 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
7054 vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
7055 wide_int_to_tree (TREE_TYPE (expr), max));
7056 else
7057 vr->set_varying (TREE_TYPE (expr));
7058 }
7059 }
7060
7061 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
7062 the determined range type. */
7063
7064 value_range_kind
7065 determine_value_range (tree expr, wide_int *min, wide_int *max)
7066 {
7067 value_range_base vr;
7068 determine_value_range_1 (&vr, expr);
7069 if (vr.constant_p ())
7070 {
7071 *min = wi::to_wide (vr.min ());
7072 *max = wi::to_wide (vr.max ());
7073 return vr.kind ();
7074 }
7075
7076 return VR_VARYING;
7077 }