1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "insn-codes.h"
30 #include "tree-pass.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
40 #include "gimple-fold.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
61 #include "case-cfn-macros.h"
63 #include "alloc-pool.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
68 #include "vr-values.h"
70 #include "wide-int-range.h"
73 ranges_from_anti_range (const value_range_base
*ar
,
74 value_range_base
*vr0
, value_range_base
*vr1
,
75 bool handle_pointers
= false);
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
82 value_range::set_equiv (bitmap equiv
)
84 if (undefined_p () || varying_p ())
86 /* Since updating the equivalence set involves deep copying the
87 bitmaps, only do it if absolutely necessary.
89 All equivalence bitmaps are allocated from the same obstack. So
90 we can use the obstack associated with EQUIV to allocate vr->equiv. */
93 m_equiv
= BITMAP_ALLOC (equiv
->obstack
);
97 if (equiv
&& !bitmap_empty_p (equiv
))
98 bitmap_copy (m_equiv
, equiv
);
100 bitmap_clear (m_equiv
);
104 /* Initialize value_range. */
107 value_range::set (enum value_range_kind kind
, tree min
, tree max
,
110 value_range_base::set (kind
, min
, max
);
116 value_range_base::value_range_base (value_range_kind kind
, tree min
, tree max
)
118 set (kind
, min
, max
);
121 value_range::value_range (value_range_kind kind
, tree min
, tree max
,
125 set (kind
, min
, max
, equiv
);
128 value_range::value_range (const value_range_base
&other
)
131 set (other
.kind (), other
.min(), other
.max (), NULL
);
134 /* Like set, but keep the equivalences in place. */
137 value_range::update (value_range_kind kind
, tree min
, tree max
)
140 (kind
!= VR_UNDEFINED
&& kind
!= VR_VARYING
) ? m_equiv
: NULL
);
143 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
145 Note: The code that avoids the bitmap sharing looks at the existing
146 this->m_equiv, so this function cannot be used to initalize an
147 object. Use the constructors for initialization. */
150 value_range::deep_copy (const value_range
*from
)
152 set (from
->m_kind
, from
->min (), from
->max (), from
->m_equiv
);
156 value_range::move (value_range
*from
)
158 set (from
->m_kind
, from
->min (), from
->max ());
159 m_equiv
= from
->m_equiv
;
160 from
->m_equiv
= NULL
;
163 /* Check the validity of the range. */
166 value_range_base::check ()
175 gcc_assert (m_min
&& m_max
);
177 gcc_assert (!TREE_OVERFLOW_P (m_min
) && !TREE_OVERFLOW_P (m_max
));
179 /* Creating ~[-MIN, +MAX] is stupid because that would be
181 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min
)) && m_kind
== VR_ANTI_RANGE
)
182 gcc_assert (!vrp_val_is_min (m_min
) || !vrp_val_is_max (m_max
));
184 cmp
= compare_values (m_min
, m_max
);
185 gcc_assert (cmp
== 0 || cmp
== -1 || cmp
== -2);
189 gcc_assert (!min () && !max ());
192 gcc_assert (m_min
&& m_max
);
200 value_range::check ()
202 value_range_base::check ();
207 gcc_assert (!m_equiv
|| bitmap_empty_p (m_equiv
));
212 /* Equality operator. We purposely do not overload ==, to avoid
213 confusion with the equality bitmap in the derived value_range
217 value_range_base::equal_p (const value_range_base
&other
) const
219 /* Ignore types for undefined. All undefines are equal. */
221 return m_kind
== other
.m_kind
;
223 return (m_kind
== other
.m_kind
224 && vrp_operand_equal_p (m_min
, other
.m_min
)
225 && vrp_operand_equal_p (m_max
, other
.m_max
));
228 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
229 IGNORE_EQUIVS is TRUE. */
232 value_range::equal_p (const value_range
&other
, bool ignore_equivs
) const
234 return (value_range_base::equal_p (other
)
236 || vrp_bitmap_equal_p (m_equiv
, other
.m_equiv
)));
239 /* Return TRUE if this is a symbolic range. */
242 value_range_base::symbolic_p () const
244 return (!varying_p ()
246 && (!is_gimple_min_invariant (m_min
)
247 || !is_gimple_min_invariant (m_max
)));
250 /* NOTE: This is not the inverse of symbolic_p because the range
251 could also be varying or undefined. Ideally they should be inverse
252 of each other, with varying only applying to symbolics. Varying of
253 constants would be represented as [-MIN, +MAX]. */
256 value_range_base::constant_p () const
258 return (!varying_p ()
260 && TREE_CODE (m_min
) == INTEGER_CST
261 && TREE_CODE (m_max
) == INTEGER_CST
);
265 value_range_base::set_undefined ()
267 m_kind
= VR_UNDEFINED
;
268 m_min
= m_max
= NULL
;
272 value_range::set_undefined ()
274 set (VR_UNDEFINED
, NULL
, NULL
, NULL
);
278 value_range_base::set_varying (tree type
)
281 if (supports_type_p (type
))
283 m_min
= vrp_val_min (type
, true);
284 m_max
= vrp_val_max (type
, true);
287 /* We can't do anything range-wise with these types. */
288 m_min
= m_max
= error_mark_node
;
292 value_range::set_varying (tree type
)
294 value_range_base::set_varying (type
);
298 /* Return TRUE if it is possible that range contains VAL. */
301 value_range_base::may_contain_p (tree val
) const
303 return value_inside_range (val
) != 0;
307 value_range::equiv_clear ()
310 bitmap_clear (m_equiv
);
313 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
314 bitmap. If no equivalence table has been created, OBSTACK is the
315 obstack to use (NULL for the default obstack).
317 This is the central point where equivalence processing can be
321 value_range::equiv_add (const_tree var
,
322 const value_range
*var_vr
,
323 bitmap_obstack
*obstack
)
326 m_equiv
= BITMAP_ALLOC (obstack
);
327 unsigned ver
= SSA_NAME_VERSION (var
);
328 bitmap_set_bit (m_equiv
, ver
);
329 if (var_vr
&& var_vr
->m_equiv
)
330 bitmap_ior_into (m_equiv
, var_vr
->m_equiv
);
333 /* If range is a singleton, place it in RESULT and return TRUE.
334 Note: A singleton can be any gimple invariant, not just constants.
335 So, [&x, &x] counts as a singleton. */
338 value_range_base::singleton_p (tree
*result
) const
340 if (m_kind
== VR_ANTI_RANGE
)
344 if (TYPE_PRECISION (type ()) == 1)
353 value_range_base vr0
, vr1
;
354 return (ranges_from_anti_range (this, &vr0
, &vr1
, true)
355 && vr1
.undefined_p ()
356 && vr0
.singleton_p (result
));
358 if (m_kind
== VR_RANGE
359 && vrp_operand_equal_p (min (), max ())
360 && is_gimple_min_invariant (min ()))
370 value_range_base::type () const
372 gcc_assert (m_min
|| undefined_p ());
373 return TREE_TYPE (min ());
377 value_range_base::dump (FILE *file
) const
380 fprintf (file
, "UNDEFINED");
381 else if (m_kind
== VR_RANGE
|| m_kind
== VR_ANTI_RANGE
)
383 tree ttype
= type ();
385 print_generic_expr (file
, ttype
);
388 fprintf (file
, "%s[", (m_kind
== VR_ANTI_RANGE
) ? "~" : "");
390 if (INTEGRAL_TYPE_P (ttype
)
391 && !TYPE_UNSIGNED (ttype
)
392 && vrp_val_is_min (min ())
393 && TYPE_PRECISION (ttype
) != 1)
394 fprintf (file
, "-INF");
396 print_generic_expr (file
, min ());
398 fprintf (file
, ", ");
400 if (INTEGRAL_TYPE_P (ttype
)
401 && vrp_val_is_max (max ())
402 && TYPE_PRECISION (ttype
) != 1)
403 fprintf (file
, "+INF");
405 print_generic_expr (file
, max ());
409 else if (varying_p ())
411 print_generic_expr (file
, type ());
412 fprintf (file
, " VARYING");
419 value_range_base::dump () const
425 value_range::dump (FILE *file
) const
427 value_range_base::dump (file
);
428 if ((m_kind
== VR_RANGE
|| m_kind
== VR_ANTI_RANGE
)
434 fprintf (file
, " EQUIVALENCES: { ");
436 EXECUTE_IF_SET_IN_BITMAP (m_equiv
, 0, i
, bi
)
438 print_generic_expr (file
, ssa_name (i
));
443 fprintf (file
, "} (%u elements)", c
);
448 value_range::dump () const
454 dump_value_range (FILE *file
, const value_range
*vr
)
457 fprintf (file
, "[]");
463 dump_value_range (FILE *file
, const value_range_base
*vr
)
466 fprintf (file
, "[]");
472 debug (const value_range_base
*vr
)
474 dump_value_range (stderr
, vr
);
478 debug (const value_range_base
&vr
)
480 dump_value_range (stderr
, &vr
);
484 debug (const value_range
*vr
)
486 dump_value_range (stderr
, vr
);
490 debug (const value_range
&vr
)
492 dump_value_range (stderr
, &vr
);
495 /* Return true if the SSA name NAME is live on the edge E. */
498 live_on_edge (edge e
, tree name
)
500 return (live
[e
->dest
->index
]
501 && bitmap_bit_p (live
[e
->dest
->index
], SSA_NAME_VERSION (name
)));
504 /* Location information for ASSERT_EXPRs. Each instance of this
505 structure describes an ASSERT_EXPR for an SSA name. Since a single
506 SSA name may have more than one assertion associated with it, these
507 locations are kept in a linked list attached to the corresponding
511 /* Basic block where the assertion would be inserted. */
514 /* Some assertions need to be inserted on an edge (e.g., assertions
515 generated by COND_EXPRs). In those cases, BB will be NULL. */
518 /* Pointer to the statement that generated this assertion. */
519 gimple_stmt_iterator si
;
521 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
522 enum tree_code comp_code
;
524 /* Value being compared against. */
527 /* Expression to compare. */
530 /* Next node in the linked list. */
534 /* If bit I is present, it means that SSA name N_i has a list of
535 assertions that should be inserted in the IL. */
536 static bitmap need_assert_for
;
538 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
539 holds a list of ASSERT_LOCUS_T nodes that describe where
540 ASSERT_EXPRs for SSA name N_I should be inserted. */
541 static assert_locus
**asserts_for
;
543 /* Return the maximum value for TYPE. */
546 vrp_val_max (const_tree type
, bool handle_pointers
)
548 if (INTEGRAL_TYPE_P (type
))
549 return TYPE_MAX_VALUE (type
);
550 if (POINTER_TYPE_P (type
) && handle_pointers
)
552 wide_int max
= wi::max_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
553 return wide_int_to_tree (const_cast<tree
> (type
), max
);
558 /* Return the minimum value for TYPE. */
561 vrp_val_min (const_tree type
, bool handle_pointers
)
563 if (INTEGRAL_TYPE_P (type
))
564 return TYPE_MIN_VALUE (type
);
565 if (POINTER_TYPE_P (type
) && handle_pointers
)
566 return build_zero_cst (const_cast<tree
> (type
));
570 /* Return whether VAL is equal to the maximum value of its type.
571 We can't do a simple equality comparison with TYPE_MAX_VALUE because
572 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
573 is not == to the integer constant with the same value in the type. */
576 vrp_val_is_max (const_tree val
)
578 tree type_max
= vrp_val_max (TREE_TYPE (val
));
579 return (val
== type_max
580 || (type_max
!= NULL_TREE
581 && operand_equal_p (val
, type_max
, 0)));
584 /* Return whether VAL is equal to the minimum value of its type. */
587 vrp_val_is_min (const_tree val
)
589 tree type_min
= vrp_val_min (TREE_TYPE (val
));
590 return (val
== type_min
591 || (type_min
!= NULL_TREE
592 && operand_equal_p (val
, type_min
, 0)));
595 /* VR_TYPE describes a range with mininum value *MIN and maximum
596 value *MAX. Restrict the range to the set of values that have
597 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
598 return the new range type.
600 SGN gives the sign of the values described by the range. */
602 enum value_range_kind
603 intersect_range_with_nonzero_bits (enum value_range_kind vr_type
,
604 wide_int
*min
, wide_int
*max
,
605 const wide_int
&nonzero_bits
,
608 if (vr_type
== VR_ANTI_RANGE
)
610 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
611 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
612 to create an inclusive upper bound for A and an inclusive lower
614 wide_int a_max
= wi::round_down_for_mask (*min
- 1, nonzero_bits
);
615 wide_int b_min
= wi::round_up_for_mask (*max
+ 1, nonzero_bits
);
617 /* If the calculation of A_MAX wrapped, A is effectively empty
618 and A_MAX is the highest value that satisfies NONZERO_BITS.
619 Likewise if the calculation of B_MIN wrapped, B is effectively
620 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
621 bool a_empty
= wi::ge_p (a_max
, *min
, sgn
);
622 bool b_empty
= wi::le_p (b_min
, *max
, sgn
);
624 /* If both A and B are empty, there are no valid values. */
625 if (a_empty
&& b_empty
)
628 /* If exactly one of A or B is empty, return a VR_RANGE for the
630 if (a_empty
|| b_empty
)
634 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
638 /* Update the VR_ANTI_RANGE bounds. */
641 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
643 /* Now check whether the excluded range includes any values that
644 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
645 if (wi::round_up_for_mask (*min
, nonzero_bits
) == b_min
)
647 unsigned int precision
= min
->get_precision ();
648 *min
= wi::min_value (precision
, sgn
);
649 *max
= wi::max_value (precision
, sgn
);
653 if (vr_type
== VR_RANGE
)
655 *max
= wi::round_down_for_mask (*max
, nonzero_bits
);
657 /* Check that the range contains at least one valid value. */
658 if (wi::gt_p (*min
, *max
, sgn
))
661 *min
= wi::round_up_for_mask (*min
, nonzero_bits
);
662 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
668 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
669 This means adjusting VRTYPE, MIN and MAX representing the case of a
670 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
671 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
672 In corner cases where MAX+1 or MIN-1 wraps this will fall back
674 This routine exists to ease canonicalization in the case where we
675 extract ranges from var + CST op limit. */
678 value_range_base::set (enum value_range_kind kind
, tree min
, tree max
)
680 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
681 if (kind
== VR_UNDEFINED
)
686 else if (kind
== VR_VARYING
)
688 gcc_assert (TREE_TYPE (min
) == TREE_TYPE (max
));
689 tree typ
= TREE_TYPE (min
);
690 if (supports_type_p (typ
))
692 gcc_assert (vrp_val_min (typ
, true));
693 gcc_assert (vrp_val_max (typ
, true));
699 /* Nothing to canonicalize for symbolic ranges. */
700 if (TREE_CODE (min
) != INTEGER_CST
701 || TREE_CODE (max
) != INTEGER_CST
)
709 /* Wrong order for min and max, to swap them and the VR type we need
711 if (tree_int_cst_lt (max
, min
))
715 /* For one bit precision if max < min, then the swapped
716 range covers all values, so for VR_RANGE it is varying and
717 for VR_ANTI_RANGE empty range, so drop to varying as well. */
718 if (TYPE_PRECISION (TREE_TYPE (min
)) == 1)
720 set_varying (TREE_TYPE (min
));
724 one
= build_int_cst (TREE_TYPE (min
), 1);
725 tmp
= int_const_binop (PLUS_EXPR
, max
, one
);
726 max
= int_const_binop (MINUS_EXPR
, min
, one
);
729 /* There's one corner case, if we had [C+1, C] before we now have
730 that again. But this represents an empty value range, so drop
731 to varying in this case. */
732 if (tree_int_cst_lt (max
, min
))
734 set_varying (TREE_TYPE (min
));
738 kind
= kind
== VR_RANGE
? VR_ANTI_RANGE
: VR_RANGE
;
741 tree type
= TREE_TYPE (min
);
743 /* Anti-ranges that can be represented as ranges should be so. */
744 if (kind
== VR_ANTI_RANGE
)
746 /* For -fstrict-enums we may receive out-of-range ranges so consider
747 values < -INF and values > INF as -INF/INF as well. */
748 bool is_min
= (INTEGRAL_TYPE_P (type
)
749 && tree_int_cst_compare (min
, TYPE_MIN_VALUE (type
)) <= 0);
750 bool is_max
= (INTEGRAL_TYPE_P (type
)
751 && tree_int_cst_compare (max
, TYPE_MAX_VALUE (type
)) >= 0);
753 if (is_min
&& is_max
)
755 /* We cannot deal with empty ranges, drop to varying.
756 ??? This could be VR_UNDEFINED instead. */
760 else if (TYPE_PRECISION (TREE_TYPE (min
)) == 1
761 && (is_min
|| is_max
))
763 /* Non-empty boolean ranges can always be represented
764 as a singleton range. */
766 min
= max
= vrp_val_max (TREE_TYPE (min
));
768 min
= max
= vrp_val_min (TREE_TYPE (min
));
772 /* As a special exception preserve non-null ranges. */
773 && !(TYPE_UNSIGNED (TREE_TYPE (min
))
774 && integer_zerop (max
)))
776 tree one
= build_int_cst (TREE_TYPE (max
), 1);
777 min
= int_const_binop (PLUS_EXPR
, max
, one
);
778 max
= vrp_val_max (TREE_TYPE (max
));
783 tree one
= build_int_cst (TREE_TYPE (min
), 1);
784 max
= int_const_binop (MINUS_EXPR
, min
, one
);
785 min
= vrp_val_min (TREE_TYPE (min
));
790 /* Normalize [MIN, MAX] into VARYING and ~[MIN, MAX] into UNDEFINED.
792 Avoid using TYPE_{MIN,MAX}_VALUE because -fstrict-enums can
793 restrict those to a subset of what actually fits in the type.
794 Instead use the extremes of the type precision which will allow
795 compare_range_with_value() to check if a value is inside a range,
796 whereas if we used TYPE_*_VAL, said function would just punt
797 upon seeing a VARYING. */
798 unsigned prec
= TYPE_PRECISION (type
);
799 signop sign
= TYPE_SIGN (type
);
800 if (wi::eq_p (wi::to_wide (min
), wi::min_value (prec
, sign
))
801 && wi::eq_p (wi::to_wide (max
), wi::max_value (prec
, sign
)))
803 if (kind
== VR_RANGE
)
805 else if (kind
== VR_ANTI_RANGE
)
812 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
813 to make sure VRP iteration terminates, otherwise we can get into
824 value_range_base::set (tree val
)
826 gcc_assert (TREE_CODE (val
) == SSA_NAME
|| is_gimple_min_invariant (val
));
827 if (TREE_OVERFLOW_P (val
))
828 val
= drop_tree_overflow (val
);
829 set (VR_RANGE
, val
, val
);
833 value_range::set (tree val
)
835 gcc_assert (TREE_CODE (val
) == SSA_NAME
|| is_gimple_min_invariant (val
));
836 if (TREE_OVERFLOW_P (val
))
837 val
= drop_tree_overflow (val
);
838 set (VR_RANGE
, val
, val
, NULL
);
841 /* Set value range VR to a nonzero range of type TYPE. */
844 value_range_base::set_nonzero (tree type
)
846 tree zero
= build_int_cst (type
, 0);
847 set (VR_ANTI_RANGE
, zero
, zero
);
850 /* Set value range VR to a ZERO range of type TYPE. */
853 value_range_base::set_zero (tree type
)
855 set (build_int_cst (type
, 0));
858 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
861 vrp_operand_equal_p (const_tree val1
, const_tree val2
)
865 if (!val1
|| !val2
|| !operand_equal_p (val1
, val2
, 0))
870 /* Return true, if the bitmaps B1 and B2 are equal. */
873 vrp_bitmap_equal_p (const_bitmap b1
, const_bitmap b2
)
876 || ((!b1
|| bitmap_empty_p (b1
))
877 && (!b2
|| bitmap_empty_p (b2
)))
879 && bitmap_equal_p (b1
, b2
)));
882 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
886 range_int_cst_p (const value_range_base
*vr
)
888 return (vr
->kind () == VR_RANGE
889 && TREE_CODE (vr
->min ()) == INTEGER_CST
890 && TREE_CODE (vr
->max ()) == INTEGER_CST
);
893 /* Return true if VR is a INTEGER_CST singleton. */
896 range_int_cst_singleton_p (const value_range_base
*vr
)
898 return (range_int_cst_p (vr
)
899 && tree_int_cst_equal (vr
->min (), vr
->max ()));
902 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
903 otherwise. We only handle additive operations and set NEG to true if the
904 symbol is negated and INV to the invariant part, if any. */
907 get_single_symbol (tree t
, bool *neg
, tree
*inv
)
915 if (TREE_CODE (t
) == PLUS_EXPR
916 || TREE_CODE (t
) == POINTER_PLUS_EXPR
917 || TREE_CODE (t
) == MINUS_EXPR
)
919 if (is_gimple_min_invariant (TREE_OPERAND (t
, 0)))
921 neg_
= (TREE_CODE (t
) == MINUS_EXPR
);
922 inv_
= TREE_OPERAND (t
, 0);
923 t
= TREE_OPERAND (t
, 1);
925 else if (is_gimple_min_invariant (TREE_OPERAND (t
, 1)))
928 inv_
= TREE_OPERAND (t
, 1);
929 t
= TREE_OPERAND (t
, 0);
940 if (TREE_CODE (t
) == NEGATE_EXPR
)
942 t
= TREE_OPERAND (t
, 0);
946 if (TREE_CODE (t
) != SSA_NAME
)
949 if (inv_
&& TREE_OVERFLOW_P (inv_
))
950 inv_
= drop_tree_overflow (inv_
);
957 /* The reverse operation: build a symbolic expression with TYPE
958 from symbol SYM, negated according to NEG, and invariant INV. */
961 build_symbolic_expr (tree type
, tree sym
, bool neg
, tree inv
)
963 const bool pointer_p
= POINTER_TYPE_P (type
);
967 t
= build1 (NEGATE_EXPR
, type
, t
);
969 if (integer_zerop (inv
))
972 return build2 (pointer_p
? POINTER_PLUS_EXPR
: PLUS_EXPR
, type
, t
, inv
);
978 -2 if those are incomparable. */
980 operand_less_p (tree val
, tree val2
)
982 /* LT is folded faster than GE and others. Inline the common case. */
983 if (TREE_CODE (val
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
984 return tree_int_cst_lt (val
, val2
);
985 else if (TREE_CODE (val
) == SSA_NAME
&& TREE_CODE (val2
) == SSA_NAME
)
986 return val
== val2
? 0 : -2;
989 int cmp
= compare_values (val
, val2
);
992 else if (cmp
== 0 || cmp
== 1)
1001 /* Compare two values VAL1 and VAL2. Return
1003 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1006 +1 if VAL1 > VAL2, and
1009 This is similar to tree_int_cst_compare but supports pointer values
1010 and values that cannot be compared at compile time.
1012 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1013 true if the return value is only valid if we assume that signed
1014 overflow is undefined. */
1017 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
1022 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1024 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
1025 == POINTER_TYPE_P (TREE_TYPE (val2
)));
1027 /* Convert the two values into the same type. This is needed because
1028 sizetype causes sign extension even for unsigned types. */
1029 if (!useless_type_conversion_p (TREE_TYPE (val1
), TREE_TYPE (val2
)))
1030 val2
= fold_convert (TREE_TYPE (val1
), val2
);
1032 const bool overflow_undefined
1033 = INTEGRAL_TYPE_P (TREE_TYPE (val1
))
1034 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
));
1037 tree sym1
= get_single_symbol (val1
, &neg1
, &inv1
);
1038 tree sym2
= get_single_symbol (val2
, &neg2
, &inv2
);
1040 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1041 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1044 /* Both values must use the same name with the same sign. */
1045 if (sym1
!= sym2
|| neg1
!= neg2
)
1048 /* [-]NAME + CST == [-]NAME + CST. */
1052 /* If overflow is defined we cannot simplify more. */
1053 if (!overflow_undefined
)
1056 if (strict_overflow_p
!= NULL
1057 /* Symbolic range building sets TREE_NO_WARNING to declare
1058 that overflow doesn't happen. */
1059 && (!inv1
|| !TREE_NO_WARNING (val1
))
1060 && (!inv2
|| !TREE_NO_WARNING (val2
)))
1061 *strict_overflow_p
= true;
1064 inv1
= build_int_cst (TREE_TYPE (val1
), 0);
1066 inv2
= build_int_cst (TREE_TYPE (val2
), 0);
1068 return wi::cmp (wi::to_wide (inv1
), wi::to_wide (inv2
),
1069 TYPE_SIGN (TREE_TYPE (val1
)));
1072 const bool cst1
= is_gimple_min_invariant (val1
);
1073 const bool cst2
= is_gimple_min_invariant (val2
);
1075 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1076 it might be possible to say something depending on the constants. */
1077 if ((sym1
&& inv1
&& cst2
) || (sym2
&& inv2
&& cst1
))
1079 if (!overflow_undefined
)
1082 if (strict_overflow_p
!= NULL
1083 /* Symbolic range building sets TREE_NO_WARNING to declare
1084 that overflow doesn't happen. */
1085 && (!sym1
|| !TREE_NO_WARNING (val1
))
1086 && (!sym2
|| !TREE_NO_WARNING (val2
)))
1087 *strict_overflow_p
= true;
1089 const signop sgn
= TYPE_SIGN (TREE_TYPE (val1
));
1090 tree cst
= cst1
? val1
: val2
;
1091 tree inv
= cst1
? inv2
: inv1
;
1093 /* Compute the difference between the constants. If it overflows or
1094 underflows, this means that we can trivially compare the NAME with
1095 it and, consequently, the two values with each other. */
1096 wide_int diff
= wi::to_wide (cst
) - wi::to_wide (inv
);
1097 if (wi::cmp (0, wi::to_wide (inv
), sgn
)
1098 != wi::cmp (diff
, wi::to_wide (cst
), sgn
))
1100 const int res
= wi::cmp (wi::to_wide (cst
), wi::to_wide (inv
), sgn
);
1101 return cst1
? res
: -res
;
1107 /* We cannot say anything more for non-constants. */
1111 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
1113 /* We cannot compare overflowed values. */
1114 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
1117 if (TREE_CODE (val1
) == INTEGER_CST
1118 && TREE_CODE (val2
) == INTEGER_CST
)
1119 return tree_int_cst_compare (val1
, val2
);
1121 if (poly_int_tree_p (val1
) && poly_int_tree_p (val2
))
1123 if (known_eq (wi::to_poly_widest (val1
),
1124 wi::to_poly_widest (val2
)))
1126 if (known_lt (wi::to_poly_widest (val1
),
1127 wi::to_poly_widest (val2
)))
1129 if (known_gt (wi::to_poly_widest (val1
),
1130 wi::to_poly_widest (val2
)))
1138 if (TREE_CODE (val1
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
1140 /* We cannot compare overflowed values. */
1141 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
1144 return tree_int_cst_compare (val1
, val2
);
1147 /* First see if VAL1 and VAL2 are not the same. */
1148 if (operand_equal_p (val1
, val2
, 0))
1151 fold_defer_overflow_warnings ();
1153 /* If VAL1 is a lower address than VAL2, return -1. */
1154 tree t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val1
, val2
);
1155 if (t
&& integer_onep (t
))
1157 fold_undefer_and_ignore_overflow_warnings ();
1161 /* If VAL1 is a higher address than VAL2, return +1. */
1162 t
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val2
, val1
);
1163 if (t
&& integer_onep (t
))
1165 fold_undefer_and_ignore_overflow_warnings ();
1169 /* If VAL1 is different than VAL2, return +2. */
1170 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
1171 fold_undefer_and_ignore_overflow_warnings ();
1172 if (t
&& integer_onep (t
))
1179 /* Compare values like compare_values_warnv. */
1182 compare_values (tree val1
, tree val2
)
1185 return compare_values_warnv (val1
, val2
, &sop
);
1189 /* Return 1 if VAL is inside value range.
1190 0 if VAL is not inside value range.
1191 -2 if we cannot tell either way.
1193 Benchmark compile/20001226-1.c compilation time after changing this
1197 value_range_base::value_inside_range (tree val
) const
1207 cmp1
= operand_less_p (val
, m_min
);
1211 return m_kind
!= VR_RANGE
;
1213 cmp2
= operand_less_p (m_max
, val
);
1217 if (m_kind
== VR_RANGE
)
1223 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
1225 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
1227 Return TRUE if VR was a constant range and we were able to compute
1231 vrp_set_zero_nonzero_bits (const tree expr_type
,
1232 const value_range_base
*vr
,
1233 wide_int
*may_be_nonzero
,
1234 wide_int
*must_be_nonzero
)
1236 if (!range_int_cst_p (vr
))
1238 *may_be_nonzero
= wi::minus_one (TYPE_PRECISION (expr_type
));
1239 *must_be_nonzero
= wi::zero (TYPE_PRECISION (expr_type
));
1242 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type
),
1243 wi::to_wide (vr
->min ()),
1244 wi::to_wide (vr
->max ()),
1245 *may_be_nonzero
, *must_be_nonzero
);
1249 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1250 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1251 false otherwise. If *AR can be represented with a single range
1252 *VR1 will be VR_UNDEFINED. */
1255 ranges_from_anti_range (const value_range_base
*ar
,
1256 value_range_base
*vr0
, value_range_base
*vr1
,
1257 bool handle_pointers
)
1259 tree type
= ar
->type ();
1261 vr0
->set_undefined ();
1262 vr1
->set_undefined ();
1264 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1265 [A+1, +INF]. Not sure if this helps in practice, though. */
1267 if (ar
->kind () != VR_ANTI_RANGE
1268 || TREE_CODE (ar
->min ()) != INTEGER_CST
1269 || TREE_CODE (ar
->max ()) != INTEGER_CST
1270 || !vrp_val_min (type
, handle_pointers
)
1271 || !vrp_val_max (type
, handle_pointers
))
1274 if (tree_int_cst_lt (vrp_val_min (type
, handle_pointers
), ar
->min ()))
1276 vrp_val_min (type
, handle_pointers
),
1277 wide_int_to_tree (type
, wi::to_wide (ar
->min ()) - 1));
1278 if (tree_int_cst_lt (ar
->max (), vrp_val_max (type
, handle_pointers
)))
1280 wide_int_to_tree (type
, wi::to_wide (ar
->max ()) + 1),
1281 vrp_val_max (type
, handle_pointers
));
1282 if (vr0
->undefined_p ())
1285 vr1
->set_undefined ();
1288 return !vr0
->undefined_p ();
1291 /* Extract the components of a value range into a pair of wide ints in
1292 [WMIN, WMAX], after having normalized any symbolics from the input. */
1295 extract_range_into_wide_ints (const value_range_base
*vr_
,
1296 tree type
, wide_int
&wmin
, wide_int
&wmax
)
1298 signop sign
= TYPE_SIGN (type
);
1299 unsigned int prec
= TYPE_PRECISION (type
);
1300 gcc_assert (vr_
->kind () != VR_ANTI_RANGE
|| vr_
->symbolic_p ());
1301 value_range vr
= vr_
->normalize_symbolics ();
1302 if (range_int_cst_p (&vr
))
1304 wmin
= wi::to_wide (vr
.min ());
1305 wmax
= wi::to_wide (vr
.max ());
1309 wmin
= wi::min_value (prec
, sign
);
1310 wmax
= wi::max_value (prec
, sign
);
1314 /* Value range wrapper for wide_int_range_multiplicative_op:
1316 *VR = *VR0 .CODE. *VR1. */
1319 extract_range_from_multiplicative_op (value_range_base
*vr
,
1320 enum tree_code code
, tree type
,
1321 const value_range_base
*vr0
,
1322 const value_range_base
*vr1
)
1324 gcc_assert (code
== MULT_EXPR
1325 || code
== TRUNC_DIV_EXPR
1326 || code
== FLOOR_DIV_EXPR
1327 || code
== CEIL_DIV_EXPR
1328 || code
== EXACT_DIV_EXPR
1329 || code
== ROUND_DIV_EXPR
1330 || code
== RSHIFT_EXPR
1331 || code
== LSHIFT_EXPR
);
1332 if (!range_int_cst_p (vr1
))
1334 vr
->set_varying (type
);
1338 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1339 useful ranges just from the shift count. E.g.
1340 x >> 63 for signed 64-bit x is always [-1, 0]. */
1341 value_range_base tem
= vr0
->normalize_symbolics ();
1342 tree vr0_min
, vr0_max
;
1343 if (tem
.kind () == VR_RANGE
)
1345 vr0_min
= tem
.min ();
1346 vr0_max
= tem
.max ();
1350 vr0_min
= vrp_val_min (type
);
1351 vr0_max
= vrp_val_max (type
);
1354 wide_int res_lb
, res_ub
;
1355 wide_int vr0_lb
= wi::to_wide (vr0_min
);
1356 wide_int vr0_ub
= wi::to_wide (vr0_max
);
1357 wide_int vr1_lb
= wi::to_wide (vr1
->min ());
1358 wide_int vr1_ub
= wi::to_wide (vr1
->max ());
1359 bool overflow_undefined
= TYPE_OVERFLOW_UNDEFINED (type
);
1360 unsigned prec
= TYPE_PRECISION (type
);
1362 if (wide_int_range_multiplicative_op (res_lb
, res_ub
,
1363 code
, TYPE_SIGN (type
), prec
,
1364 vr0_lb
, vr0_ub
, vr1_lb
, vr1_ub
,
1365 overflow_undefined
))
1366 vr
->set (VR_RANGE
, wide_int_to_tree (type
, res_lb
),
1367 wide_int_to_tree (type
, res_ub
));
1369 vr
->set_varying (type
);
1372 /* If BOUND will include a symbolic bound, adjust it accordingly,
1373 otherwise leave it as is.
1375 CODE is the original operation that combined the bounds (PLUS_EXPR
1378 TYPE is the type of the original operation.
1380 SYM_OPn is the symbolic for OPn if it has a symbolic.
1382 NEG_OPn is TRUE if the OPn was negated. */
1385 adjust_symbolic_bound (tree
&bound
, enum tree_code code
, tree type
,
1386 tree sym_op0
, tree sym_op1
,
1387 bool neg_op0
, bool neg_op1
)
1389 bool minus_p
= (code
== MINUS_EXPR
);
1390 /* If the result bound is constant, we're done; otherwise, build the
1391 symbolic lower bound. */
1392 if (sym_op0
== sym_op1
)
1395 bound
= build_symbolic_expr (type
, sym_op0
,
1399 /* We may not negate if that might introduce
1400 undefined overflow. */
1403 || TYPE_OVERFLOW_WRAPS (type
))
1404 bound
= build_symbolic_expr (type
, sym_op1
,
1405 neg_op1
^ minus_p
, bound
);
1411 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1412 int bound according to CODE. CODE is the operation combining the
1413 bound (either a PLUS_EXPR or a MINUS_EXPR).
1415 TYPE is the type of the combine operation.
1417 WI is the wide int to store the result.
1419 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1420 if over/underflow occurred. */
1423 combine_bound (enum tree_code code
, wide_int
&wi
, wi::overflow_type
&ovf
,
1424 tree type
, tree op0
, tree op1
)
1426 bool minus_p
= (code
== MINUS_EXPR
);
1427 const signop sgn
= TYPE_SIGN (type
);
1428 const unsigned int prec
= TYPE_PRECISION (type
);
1430 /* Combine the bounds, if any. */
1434 wi
= wi::sub (wi::to_wide (op0
), wi::to_wide (op1
), sgn
, &ovf
);
1436 wi
= wi::add (wi::to_wide (op0
), wi::to_wide (op1
), sgn
, &ovf
);
1439 wi
= wi::to_wide (op0
);
1443 wi
= wi::neg (wi::to_wide (op1
), &ovf
);
1445 wi
= wi::to_wide (op1
);
1448 wi
= wi::shwi (0, prec
);
1451 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1452 put the result in VR.
1454 TYPE is the type of the range.
1456 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1457 occurred while originally calculating WMIN or WMAX. -1 indicates
1458 underflow. +1 indicates overflow. 0 indicates neither. */
1461 set_value_range_with_overflow (value_range_kind
&kind
, tree
&min
, tree
&max
,
1463 const wide_int
&wmin
, const wide_int
&wmax
,
1464 wi::overflow_type min_ovf
,
1465 wi::overflow_type max_ovf
)
1467 const signop sgn
= TYPE_SIGN (type
);
1468 const unsigned int prec
= TYPE_PRECISION (type
);
1470 /* For one bit precision if max < min, then the swapped
1471 range covers all values. */
1472 if (prec
== 1 && wi::lt_p (wmax
, wmin
, sgn
))
1478 if (TYPE_OVERFLOW_WRAPS (type
))
1480 /* If overflow wraps, truncate the values and adjust the
1481 range kind and bounds appropriately. */
1482 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
1483 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
1484 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
1486 /* If the limits are swapped, we wrapped around and cover
1487 the entire range. We have a similar check at the end of
1488 extract_range_from_binary_expr. */
1489 if (wi::gt_p (tmin
, tmax
, sgn
))
1494 /* No overflow or both overflow or underflow. The
1495 range kind stays VR_RANGE. */
1496 min
= wide_int_to_tree (type
, tmin
);
1497 max
= wide_int_to_tree (type
, tmax
);
1501 else if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
1502 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
1504 /* Min underflow or max overflow. The range kind
1505 changes to VR_ANTI_RANGE. */
1506 bool covers
= false;
1507 wide_int tem
= tmin
;
1509 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
1512 if (wi::cmp (tmax
, tem
, sgn
) > 0)
1514 /* If the anti-range would cover nothing, drop to varying.
1515 Likewise if the anti-range bounds are outside of the
1517 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
1522 kind
= VR_ANTI_RANGE
;
1523 min
= wide_int_to_tree (type
, tmin
);
1524 max
= wide_int_to_tree (type
, tmax
);
1529 /* Other underflow and/or overflow, drop to VR_VARYING. */
1536 /* If overflow does not wrap, saturate to the types min/max
1538 wide_int type_min
= wi::min_value (prec
, sgn
);
1539 wide_int type_max
= wi::max_value (prec
, sgn
);
1541 if (min_ovf
== wi::OVF_UNDERFLOW
)
1542 min
= wide_int_to_tree (type
, type_min
);
1543 else if (min_ovf
== wi::OVF_OVERFLOW
)
1544 min
= wide_int_to_tree (type
, type_max
);
1546 min
= wide_int_to_tree (type
, wmin
);
1548 if (max_ovf
== wi::OVF_UNDERFLOW
)
1549 max
= wide_int_to_tree (type
, type_min
);
1550 else if (max_ovf
== wi::OVF_OVERFLOW
)
1551 max
= wide_int_to_tree (type
, type_max
);
1553 max
= wide_int_to_tree (type
, wmax
);
1557 /* Extract range information from a binary operation CODE based on
1558 the ranges of each of its operands *VR0 and *VR1 with resulting
1559 type EXPR_TYPE. The resulting range is stored in *VR. */
1562 extract_range_from_binary_expr (value_range_base
*vr
,
1563 enum tree_code code
, tree expr_type
,
1564 const value_range_base
*vr0_
,
1565 const value_range_base
*vr1_
)
1567 signop sign
= TYPE_SIGN (expr_type
);
1568 unsigned int prec
= TYPE_PRECISION (expr_type
);
1569 value_range_base vr0
= *vr0_
, vr1
= *vr1_
;
1570 value_range_base vrtem0
, vrtem1
;
1571 enum value_range_kind type
;
1572 tree min
= NULL_TREE
, max
= NULL_TREE
;
1575 if (!INTEGRAL_TYPE_P (expr_type
)
1576 && !POINTER_TYPE_P (expr_type
))
1578 vr
->set_varying (expr_type
);
1582 /* Not all binary expressions can be applied to ranges in a
1583 meaningful way. Handle only arithmetic operations. */
1584 if (code
!= PLUS_EXPR
1585 && code
!= MINUS_EXPR
1586 && code
!= POINTER_PLUS_EXPR
1587 && code
!= MULT_EXPR
1588 && code
!= TRUNC_DIV_EXPR
1589 && code
!= FLOOR_DIV_EXPR
1590 && code
!= CEIL_DIV_EXPR
1591 && code
!= EXACT_DIV_EXPR
1592 && code
!= ROUND_DIV_EXPR
1593 && code
!= TRUNC_MOD_EXPR
1594 && code
!= RSHIFT_EXPR
1595 && code
!= LSHIFT_EXPR
1598 && code
!= BIT_AND_EXPR
1599 && code
!= BIT_IOR_EXPR
1600 && code
!= BIT_XOR_EXPR
)
1602 vr
->set_varying (expr_type
);
1606 /* If both ranges are UNDEFINED, so is the result. */
1607 if (vr0
.undefined_p () && vr1
.undefined_p ())
1609 vr
->set_undefined ();
1612 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1613 code. At some point we may want to special-case operations that
1614 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1616 else if (vr0
.undefined_p ())
1617 vr0
.set_varying (expr_type
);
1618 else if (vr1
.undefined_p ())
1619 vr1
.set_varying (expr_type
);
1621 /* We get imprecise results from ranges_from_anti_range when
1622 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1623 range, but then we also need to hack up vrp_union. It's just
1624 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1625 if (code
== EXACT_DIV_EXPR
&& vr0
.nonzero_p ())
1627 vr
->set_nonzero (expr_type
);
1631 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1632 and express ~[] op X as ([]' op X) U ([]'' op X). */
1633 if (vr0
.kind () == VR_ANTI_RANGE
1634 && ranges_from_anti_range (&vr0
, &vrtem0
, &vrtem1
))
1636 extract_range_from_binary_expr (vr
, code
, expr_type
, &vrtem0
, vr1_
);
1637 if (!vrtem1
.undefined_p ())
1639 value_range_base vrres
;
1640 extract_range_from_binary_expr (&vrres
, code
, expr_type
,
1642 vr
->union_ (&vrres
);
1646 /* Likewise for X op ~[]. */
1647 if (vr1
.kind () == VR_ANTI_RANGE
1648 && ranges_from_anti_range (&vr1
, &vrtem0
, &vrtem1
))
1650 extract_range_from_binary_expr (vr
, code
, expr_type
, vr0_
, &vrtem0
);
1651 if (!vrtem1
.undefined_p ())
1653 value_range_base vrres
;
1654 extract_range_from_binary_expr (&vrres
, code
, expr_type
,
1656 vr
->union_ (&vrres
);
1661 /* The type of the resulting value range defaults to VR0.TYPE. */
1664 /* Refuse to operate on VARYING ranges, ranges of different kinds
1665 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1666 because we may be able to derive a useful range even if one of
1667 the operands is VR_VARYING or symbolic range. Similarly for
1668 divisions, MIN/MAX and PLUS/MINUS.
1670 TODO, we may be able to derive anti-ranges in some cases. */
1671 if (code
!= BIT_AND_EXPR
1672 && code
!= BIT_IOR_EXPR
1673 && code
!= TRUNC_DIV_EXPR
1674 && code
!= FLOOR_DIV_EXPR
1675 && code
!= CEIL_DIV_EXPR
1676 && code
!= EXACT_DIV_EXPR
1677 && code
!= ROUND_DIV_EXPR
1678 && code
!= TRUNC_MOD_EXPR
1681 && code
!= PLUS_EXPR
1682 && code
!= MINUS_EXPR
1683 && code
!= RSHIFT_EXPR
1684 && code
!= POINTER_PLUS_EXPR
1685 && (vr0
.varying_p ()
1687 || vr0
.kind () != vr1
.kind ()
1688 || vr0
.symbolic_p ()
1689 || vr1
.symbolic_p ()))
1691 vr
->set_varying (expr_type
);
1695 /* Now evaluate the expression to determine the new range. */
1696 if (POINTER_TYPE_P (expr_type
))
1698 if (code
== MIN_EXPR
|| code
== MAX_EXPR
)
1700 /* For MIN/MAX expressions with pointers, we only care about
1701 nullness, if both are non null, then the result is nonnull.
1702 If both are null, then the result is null. Otherwise they
1704 if (!range_includes_zero_p (&vr0
) && !range_includes_zero_p (&vr1
))
1705 vr
->set_nonzero (expr_type
);
1706 else if (vr0
.zero_p () && vr1
.zero_p ())
1707 vr
->set_zero (expr_type
);
1709 vr
->set_varying (expr_type
);
1711 else if (code
== POINTER_PLUS_EXPR
)
1713 /* For pointer types, we are really only interested in asserting
1714 whether the expression evaluates to non-NULL.
1715 With -fno-delete-null-pointer-checks we need to be more
1716 conservative. As some object might reside at address 0,
1717 then some offset could be added to it and the same offset
1718 subtracted again and the result would be NULL.
1720 static int a[12]; where &a[0] is NULL and
1723 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1724 where the first range doesn't include zero and the second one
1725 doesn't either. As the second operand is sizetype (unsigned),
1726 consider all ranges where the MSB could be set as possible
1727 subtractions where the result might be NULL. */
1728 if ((!range_includes_zero_p (&vr0
)
1729 || !range_includes_zero_p (&vr1
))
1730 && !TYPE_OVERFLOW_WRAPS (expr_type
)
1731 && (flag_delete_null_pointer_checks
1732 || (range_int_cst_p (&vr1
)
1733 && !tree_int_cst_sign_bit (vr1
.max ()))))
1734 vr
->set_nonzero (expr_type
);
1735 else if (vr0
.zero_p () && vr1
.zero_p ())
1736 vr
->set_zero (expr_type
);
1738 vr
->set_varying (expr_type
);
1740 else if (code
== BIT_AND_EXPR
)
1742 /* For pointer types, we are really only interested in asserting
1743 whether the expression evaluates to non-NULL. */
1744 if (!range_includes_zero_p (&vr0
) && !range_includes_zero_p (&vr1
))
1745 vr
->set_nonzero (expr_type
);
1746 else if (vr0
.zero_p () || vr1
.zero_p ())
1747 vr
->set_zero (expr_type
);
1749 vr
->set_varying (expr_type
);
1752 vr
->set_varying (expr_type
);
1757 /* For integer ranges, apply the operation to each end of the
1758 range and see what we end up with. */
1759 if (code
== PLUS_EXPR
|| code
== MINUS_EXPR
)
1761 value_range_kind vr0_kind
= vr0
.kind (), vr1_kind
= vr1
.kind ();
1762 tree vr0_min
= vr0
.min (), vr0_max
= vr0
.max ();
1763 tree vr1_min
= vr1
.min (), vr1_max
= vr1
.max ();
1764 /* This will normalize things such that calculating
1765 [0,0] - VR_VARYING is not dropped to varying, but is
1766 calculated as [MIN+1, MAX]. */
1767 if (vr0
.varying_p ())
1769 vr0_kind
= VR_RANGE
;
1770 vr0_min
= vrp_val_min (expr_type
);
1771 vr0_max
= vrp_val_max (expr_type
);
1773 if (vr1
.varying_p ())
1775 vr1_kind
= VR_RANGE
;
1776 vr1_min
= vrp_val_min (expr_type
);
1777 vr1_max
= vrp_val_max (expr_type
);
1780 const bool minus_p
= (code
== MINUS_EXPR
);
1781 tree min_op0
= vr0_min
;
1782 tree min_op1
= minus_p
? vr1_max
: vr1_min
;
1783 tree max_op0
= vr0_max
;
1784 tree max_op1
= minus_p
? vr1_min
: vr1_max
;
1785 tree sym_min_op0
= NULL_TREE
;
1786 tree sym_min_op1
= NULL_TREE
;
1787 tree sym_max_op0
= NULL_TREE
;
1788 tree sym_max_op1
= NULL_TREE
;
1789 bool neg_min_op0
, neg_min_op1
, neg_max_op0
, neg_max_op1
;
1791 neg_min_op0
= neg_min_op1
= neg_max_op0
= neg_max_op1
= false;
1793 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1794 single-symbolic ranges, try to compute the precise resulting range,
1795 but only if we know that this resulting range will also be constant
1796 or single-symbolic. */
1797 if (vr0_kind
== VR_RANGE
&& vr1_kind
== VR_RANGE
1798 && (TREE_CODE (min_op0
) == INTEGER_CST
1800 = get_single_symbol (min_op0
, &neg_min_op0
, &min_op0
)))
1801 && (TREE_CODE (min_op1
) == INTEGER_CST
1803 = get_single_symbol (min_op1
, &neg_min_op1
, &min_op1
)))
1804 && (!(sym_min_op0
&& sym_min_op1
)
1805 || (sym_min_op0
== sym_min_op1
1806 && neg_min_op0
== (minus_p
? neg_min_op1
: !neg_min_op1
)))
1807 && (TREE_CODE (max_op0
) == INTEGER_CST
1809 = get_single_symbol (max_op0
, &neg_max_op0
, &max_op0
)))
1810 && (TREE_CODE (max_op1
) == INTEGER_CST
1812 = get_single_symbol (max_op1
, &neg_max_op1
, &max_op1
)))
1813 && (!(sym_max_op0
&& sym_max_op1
)
1814 || (sym_max_op0
== sym_max_op1
1815 && neg_max_op0
== (minus_p
? neg_max_op1
: !neg_max_op1
))))
1817 wide_int wmin
, wmax
;
1818 wi::overflow_type min_ovf
= wi::OVF_NONE
;
1819 wi::overflow_type max_ovf
= wi::OVF_NONE
;
1821 /* Build the bounds. */
1822 combine_bound (code
, wmin
, min_ovf
, expr_type
, min_op0
, min_op1
);
1823 combine_bound (code
, wmax
, max_ovf
, expr_type
, max_op0
, max_op1
);
1825 /* If we have overflow for the constant part and the resulting
1826 range will be symbolic, drop to VR_VARYING. */
1827 if (((bool)min_ovf
&& sym_min_op0
!= sym_min_op1
)
1828 || ((bool)max_ovf
&& sym_max_op0
!= sym_max_op1
))
1830 vr
->set_varying (expr_type
);
1834 /* Adjust the range for possible overflow. */
1837 set_value_range_with_overflow (type
, min
, max
, expr_type
,
1838 wmin
, wmax
, min_ovf
, max_ovf
);
1839 if (type
== VR_VARYING
)
1841 vr
->set_varying (expr_type
);
1845 /* Build the symbolic bounds if needed. */
1846 adjust_symbolic_bound (min
, code
, expr_type
,
1847 sym_min_op0
, sym_min_op1
,
1848 neg_min_op0
, neg_min_op1
);
1849 adjust_symbolic_bound (max
, code
, expr_type
,
1850 sym_max_op0
, sym_max_op1
,
1851 neg_max_op0
, neg_max_op1
);
1855 /* For other cases, for example if we have a PLUS_EXPR with two
1856 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1857 to compute a precise range for such a case.
1858 ??? General even mixed range kind operations can be expressed
1859 by for example transforming ~[3, 5] + [1, 2] to range-only
1860 operations and a union primitive:
1861 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1862 [-INF+1, 4] U [6, +INF(OVF)]
1863 though usually the union is not exactly representable with
1864 a single range or anti-range as the above is
1865 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1866 but one could use a scheme similar to equivalences for this. */
1867 vr
->set_varying (expr_type
);
1871 else if (code
== MIN_EXPR
1872 || code
== MAX_EXPR
)
1874 wide_int wmin
, wmax
;
1875 wide_int vr0_min
, vr0_max
;
1876 wide_int vr1_min
, vr1_max
;
1877 extract_range_into_wide_ints (&vr0
, expr_type
, vr0_min
, vr0_max
);
1878 extract_range_into_wide_ints (&vr1
, expr_type
, vr1_min
, vr1_max
);
1879 if (wide_int_range_min_max (wmin
, wmax
, code
, sign
, prec
,
1880 vr0_min
, vr0_max
, vr1_min
, vr1_max
))
1881 vr
->set (VR_RANGE
, wide_int_to_tree (expr_type
, wmin
),
1882 wide_int_to_tree (expr_type
, wmax
));
1884 vr
->set_varying (expr_type
);
1887 else if (code
== MULT_EXPR
)
1889 if (!range_int_cst_p (&vr0
)
1890 || !range_int_cst_p (&vr1
))
1892 vr
->set_varying (expr_type
);
1895 extract_range_from_multiplicative_op (vr
, code
, expr_type
, &vr0
, &vr1
);
1898 else if (code
== RSHIFT_EXPR
1899 || code
== LSHIFT_EXPR
)
1901 if (range_int_cst_p (&vr1
)
1902 && !wide_int_range_shift_undefined_p
1903 (TYPE_SIGN (TREE_TYPE (vr1
.min ())),
1905 wi::to_wide (vr1
.min ()),
1906 wi::to_wide (vr1
.max ())))
1908 if (code
== RSHIFT_EXPR
)
1910 extract_range_from_multiplicative_op (vr
, code
, expr_type
,
1914 else if (code
== LSHIFT_EXPR
1915 && range_int_cst_p (&vr0
))
1917 wide_int res_lb
, res_ub
;
1918 if (wide_int_range_lshift (res_lb
, res_ub
, sign
, prec
,
1919 wi::to_wide (vr0
.min ()),
1920 wi::to_wide (vr0
.max ()),
1921 wi::to_wide (vr1
.min ()),
1922 wi::to_wide (vr1
.max ()),
1923 TYPE_OVERFLOW_UNDEFINED (expr_type
)))
1925 min
= wide_int_to_tree (expr_type
, res_lb
);
1926 max
= wide_int_to_tree (expr_type
, res_ub
);
1927 vr
->set (VR_RANGE
, min
, max
);
1932 vr
->set_varying (expr_type
);
1935 else if (code
== TRUNC_DIV_EXPR
1936 || code
== FLOOR_DIV_EXPR
1937 || code
== CEIL_DIV_EXPR
1938 || code
== EXACT_DIV_EXPR
1939 || code
== ROUND_DIV_EXPR
)
1941 wide_int dividend_min
, dividend_max
, divisor_min
, divisor_max
;
1942 wide_int wmin
, wmax
, extra_min
, extra_max
;
1945 /* Special case explicit division by zero as undefined. */
1948 vr
->set_undefined ();
1952 /* First, normalize ranges into constants we can handle. Note
1953 that VR_ANTI_RANGE's of constants were already normalized
1954 before arriving here.
1956 NOTE: As a future improvement, we may be able to do better
1957 with mixed symbolic (anti-)ranges like [0, A]. See note in
1958 ranges_from_anti_range. */
1959 extract_range_into_wide_ints (&vr0
, expr_type
,
1960 dividend_min
, dividend_max
);
1961 extract_range_into_wide_ints (&vr1
, expr_type
,
1962 divisor_min
, divisor_max
);
1963 if (!wide_int_range_div (wmin
, wmax
, code
, sign
, prec
,
1964 dividend_min
, dividend_max
,
1965 divisor_min
, divisor_max
,
1966 TYPE_OVERFLOW_UNDEFINED (expr_type
),
1967 extra_range_p
, extra_min
, extra_max
))
1969 vr
->set_varying (expr_type
);
1972 vr
->set (VR_RANGE
, wide_int_to_tree (expr_type
, wmin
),
1973 wide_int_to_tree (expr_type
, wmax
));
1977 extra_range (VR_RANGE
, wide_int_to_tree (expr_type
, extra_min
),
1978 wide_int_to_tree (expr_type
, extra_max
));
1979 vr
->union_ (&extra_range
);
1983 else if (code
== TRUNC_MOD_EXPR
)
1987 vr
->set_undefined ();
1990 wide_int wmin
, wmax
, tmp
;
1991 wide_int vr0_min
, vr0_max
, vr1_min
, vr1_max
;
1992 extract_range_into_wide_ints (&vr0
, expr_type
, vr0_min
, vr0_max
);
1993 extract_range_into_wide_ints (&vr1
, expr_type
, vr1_min
, vr1_max
);
1994 wide_int_range_trunc_mod (wmin
, wmax
, sign
, prec
,
1995 vr0_min
, vr0_max
, vr1_min
, vr1_max
);
1996 min
= wide_int_to_tree (expr_type
, wmin
);
1997 max
= wide_int_to_tree (expr_type
, wmax
);
1998 vr
->set (VR_RANGE
, min
, max
);
2001 else if (code
== BIT_AND_EXPR
|| code
== BIT_IOR_EXPR
|| code
== BIT_XOR_EXPR
)
2003 wide_int may_be_nonzero0
, may_be_nonzero1
;
2004 wide_int must_be_nonzero0
, must_be_nonzero1
;
2005 wide_int wmin
, wmax
;
2006 wide_int vr0_min
, vr0_max
, vr1_min
, vr1_max
;
2007 vrp_set_zero_nonzero_bits (expr_type
, &vr0
,
2008 &may_be_nonzero0
, &must_be_nonzero0
);
2009 vrp_set_zero_nonzero_bits (expr_type
, &vr1
,
2010 &may_be_nonzero1
, &must_be_nonzero1
);
2011 extract_range_into_wide_ints (&vr0
, expr_type
, vr0_min
, vr0_max
);
2012 extract_range_into_wide_ints (&vr1
, expr_type
, vr1_min
, vr1_max
);
2013 if (code
== BIT_AND_EXPR
)
2015 if (wide_int_range_bit_and (wmin
, wmax
, sign
, prec
,
2023 min
= wide_int_to_tree (expr_type
, wmin
);
2024 max
= wide_int_to_tree (expr_type
, wmax
);
2025 vr
->set (VR_RANGE
, min
, max
);
2028 vr
->set_varying (expr_type
);
2031 else if (code
== BIT_IOR_EXPR
)
2033 if (wide_int_range_bit_ior (wmin
, wmax
, sign
,
2041 min
= wide_int_to_tree (expr_type
, wmin
);
2042 max
= wide_int_to_tree (expr_type
, wmax
);
2043 vr
->set (VR_RANGE
, min
, max
);
2046 vr
->set_varying (expr_type
);
2049 else if (code
== BIT_XOR_EXPR
)
2051 if (wide_int_range_bit_xor (wmin
, wmax
, sign
, prec
,
2057 min
= wide_int_to_tree (expr_type
, wmin
);
2058 max
= wide_int_to_tree (expr_type
, wmax
);
2059 vr
->set (VR_RANGE
, min
, max
);
2062 vr
->set_varying (expr_type
);
2069 /* If either MIN or MAX overflowed, then set the resulting range to
2071 if (min
== NULL_TREE
2072 || TREE_OVERFLOW_P (min
)
2074 || TREE_OVERFLOW_P (max
))
2076 vr
->set_varying (expr_type
);
2080 /* We punt for [-INF, +INF].
2081 We learn nothing when we have INF on both sides.
2082 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
2083 if (vrp_val_is_min (min
) && vrp_val_is_max (max
))
2085 vr
->set_varying (expr_type
);
2089 cmp
= compare_values (min
, max
);
2090 if (cmp
== -2 || cmp
== 1)
2092 /* If the new range has its limits swapped around (MIN > MAX),
2093 then the operation caused one of them to wrap around, mark
2094 the new range VARYING. */
2095 vr
->set_varying (expr_type
);
2098 vr
->set (type
, min
, max
);
2101 /* Extract range information from a unary operation CODE based on
2102 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2103 The resulting range is stored in *VR. */
2106 extract_range_from_unary_expr (value_range_base
*vr
,
2107 enum tree_code code
, tree type
,
2108 const value_range_base
*vr0_
, tree op0_type
)
2110 signop sign
= TYPE_SIGN (type
);
2111 unsigned int prec
= TYPE_PRECISION (type
);
2112 value_range_base vr0
= *vr0_
;
2113 value_range_base vrtem0
, vrtem1
;
2115 /* VRP only operates on integral and pointer types. */
2116 if (!(INTEGRAL_TYPE_P (op0_type
)
2117 || POINTER_TYPE_P (op0_type
))
2118 || !(INTEGRAL_TYPE_P (type
)
2119 || POINTER_TYPE_P (type
)))
2121 vr
->set_varying (type
);
2125 /* If VR0 is UNDEFINED, so is the result. */
2126 if (vr0
.undefined_p ())
2128 vr
->set_undefined ();
2132 /* Handle operations that we express in terms of others. */
2133 if (code
== PAREN_EXPR
)
2135 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
2139 else if (code
== NEGATE_EXPR
)
2141 /* -X is simply 0 - X, so re-use existing code that also handles
2142 anti-ranges fine. */
2143 value_range_base zero
;
2144 zero
.set (build_int_cst (type
, 0));
2145 extract_range_from_binary_expr (vr
, MINUS_EXPR
, type
, &zero
, &vr0
);
2148 else if (code
== BIT_NOT_EXPR
)
2150 /* ~X is simply -1 - X, so re-use existing code that also handles
2151 anti-ranges fine. */
2152 value_range_base minusone
;
2153 minusone
.set (build_int_cst (type
, -1));
2154 extract_range_from_binary_expr (vr
, MINUS_EXPR
, type
, &minusone
, &vr0
);
2158 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2159 and express op ~[] as (op []') U (op []''). */
2160 if (vr0
.kind () == VR_ANTI_RANGE
2161 && ranges_from_anti_range (&vr0
, &vrtem0
, &vrtem1
))
2163 extract_range_from_unary_expr (vr
, code
, type
, &vrtem0
, op0_type
);
2164 if (!vrtem1
.undefined_p ())
2166 value_range_base vrres
;
2167 extract_range_from_unary_expr (&vrres
, code
, type
,
2169 vr
->union_ (&vrres
);
2174 if (CONVERT_EXPR_CODE_P (code
))
2176 tree inner_type
= op0_type
;
2177 tree outer_type
= type
;
2179 /* If the expression involves a pointer, we are only interested in
2180 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).
2182 This may lose precision when converting (char *)~[0,2] to
2183 int, because we'll forget that the pointer can also not be 1
2184 or 2. In practice we don't care, as this is some idiot
2185 storing a magic constant to a pointer. */
2186 if (POINTER_TYPE_P (type
) || POINTER_TYPE_P (op0_type
))
2188 if (!range_includes_zero_p (&vr0
))
2189 vr
->set_nonzero (type
);
2190 else if (vr0
.zero_p ())
2191 vr
->set_zero (type
);
2193 vr
->set_varying (type
);
2197 /* The POINTER_TYPE_P code above will have dealt with all
2198 pointer anti-ranges. Any remaining anti-ranges at this point
2199 will be integer conversions from SSA names that will be
2200 normalized into VARYING. For instance: ~[x_55, x_55]. */
2201 gcc_assert (vr0
.kind () != VR_ANTI_RANGE
2202 || TREE_CODE (vr0
.min ()) != INTEGER_CST
);
2204 /* NOTES: Previously we were returning VARYING for all symbolics, but
2205 we can do better by treating them as [-MIN, +MAX]. For
2206 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
2207 we can return: ~[0x8000000, 0xffffffff7fffffff].
2209 We were also failing to convert ~[0,0] from char* to unsigned,
2210 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
2211 wide_int vr0_min
, vr0_max
, wmin
, wmax
;
2212 signop inner_sign
= TYPE_SIGN (inner_type
);
2213 signop outer_sign
= TYPE_SIGN (outer_type
);
2214 unsigned inner_prec
= TYPE_PRECISION (inner_type
);
2215 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2216 extract_range_into_wide_ints (&vr0
, inner_type
, vr0_min
, vr0_max
);
2217 if (wide_int_range_convert (wmin
, wmax
,
2218 inner_sign
, inner_prec
,
2219 outer_sign
, outer_prec
,
2222 tree min
= wide_int_to_tree (outer_type
, wmin
);
2223 tree max
= wide_int_to_tree (outer_type
, wmax
);
2224 vr
->set (VR_RANGE
, min
, max
);
2227 vr
->set_varying (outer_type
);
2230 else if (code
== ABS_EXPR
)
2232 wide_int wmin
, wmax
;
2233 wide_int vr0_min
, vr0_max
;
2234 extract_range_into_wide_ints (&vr0
, type
, vr0_min
, vr0_max
);
2235 if (wide_int_range_abs (wmin
, wmax
, sign
, prec
, vr0_min
, vr0_max
,
2236 TYPE_OVERFLOW_UNDEFINED (type
)))
2237 vr
->set (VR_RANGE
, wide_int_to_tree (type
, wmin
),
2238 wide_int_to_tree (type
, wmax
));
2240 vr
->set_varying (type
);
2243 else if (code
== ABSU_EXPR
)
2245 wide_int wmin
, wmax
;
2246 wide_int vr0_min
, vr0_max
;
2247 tree signed_type
= make_signed_type (TYPE_PRECISION (type
));
2248 extract_range_into_wide_ints (&vr0
, signed_type
, vr0_min
, vr0_max
);
2249 wide_int_range_absu (wmin
, wmax
, prec
, vr0_min
, vr0_max
);
2250 vr
->set (VR_RANGE
, wide_int_to_tree (type
, wmin
),
2251 wide_int_to_tree (type
, wmax
));
2255 /* For unhandled operations fall back to varying. */
2256 vr
->set_varying (type
);
2260 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2261 create a new SSA name N and return the assertion assignment
2262 'N = ASSERT_EXPR <V, V OP W>'. */
2265 build_assert_expr_for (tree cond
, tree v
)
2270 gcc_assert (TREE_CODE (v
) == SSA_NAME
2271 && COMPARISON_CLASS_P (cond
));
2273 a
= build2 (ASSERT_EXPR
, TREE_TYPE (v
), v
, cond
);
2274 assertion
= gimple_build_assign (NULL_TREE
, a
);
2276 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2277 operand of the ASSERT_EXPR. Create it so the new name and the old one
2278 are registered in the replacement table so that we can fix the SSA web
2279 after adding all the ASSERT_EXPRs. */
2280 tree new_def
= create_new_def_for (v
, assertion
, NULL
);
2281 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2282 given we have to be able to fully propagate those out to re-create
2283 valid SSA when removing the asserts. */
2284 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v
))
2285 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def
) = 1;
2291 /* Return false if EXPR is a predicate expression involving floating
2295 fp_predicate (gimple
*stmt
)
2297 GIMPLE_CHECK (stmt
, GIMPLE_COND
);
2299 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt
)));
2302 /* If the range of values taken by OP can be inferred after STMT executes,
2303 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2304 describes the inferred range. Return true if a range could be
2308 infer_value_range (gimple
*stmt
, tree op
, tree_code
*comp_code_p
, tree
*val_p
)
2311 *comp_code_p
= ERROR_MARK
;
2313 /* Do not attempt to infer anything in names that flow through
2315 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
2318 /* If STMT is the last statement of a basic block with no normal
2319 successors, there is no point inferring anything about any of its
2320 operands. We would not be able to find a proper insertion point
2321 for the assertion, anyway. */
2322 if (stmt_ends_bb_p (stmt
))
2327 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
2328 if (!(e
->flags
& (EDGE_ABNORMAL
|EDGE_EH
)))
2334 if (infer_nonnull_range (stmt
, op
))
2336 *val_p
= build_int_cst (TREE_TYPE (op
), 0);
2337 *comp_code_p
= NE_EXPR
;
2345 void dump_asserts_for (FILE *, tree
);
2346 void debug_asserts_for (tree
);
2347 void dump_all_asserts (FILE *);
2348 void debug_all_asserts (void);
2350 /* Dump all the registered assertions for NAME to FILE. */
2353 dump_asserts_for (FILE *file
, tree name
)
2357 fprintf (file
, "Assertions to be inserted for ");
2358 print_generic_expr (file
, name
);
2359 fprintf (file
, "\n");
2361 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
2364 fprintf (file
, "\t");
2365 print_gimple_stmt (file
, gsi_stmt (loc
->si
), 0);
2366 fprintf (file
, "\n\tBB #%d", loc
->bb
->index
);
2369 fprintf (file
, "\n\tEDGE %d->%d", loc
->e
->src
->index
,
2370 loc
->e
->dest
->index
);
2371 dump_edge_info (file
, loc
->e
, dump_flags
, 0);
2373 fprintf (file
, "\n\tPREDICATE: ");
2374 print_generic_expr (file
, loc
->expr
);
2375 fprintf (file
, " %s ", get_tree_code_name (loc
->comp_code
));
2376 print_generic_expr (file
, loc
->val
);
2377 fprintf (file
, "\n\n");
2381 fprintf (file
, "\n");
2385 /* Dump all the registered assertions for NAME to stderr. */
2388 debug_asserts_for (tree name
)
2390 dump_asserts_for (stderr
, name
);
2394 /* Dump all the registered assertions for all the names to FILE. */
2397 dump_all_asserts (FILE *file
)
2402 fprintf (file
, "\nASSERT_EXPRs to be inserted\n\n");
2403 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
2404 dump_asserts_for (file
, ssa_name (i
));
2405 fprintf (file
, "\n");
2409 /* Dump all the registered assertions for all the names to stderr. */
2412 debug_all_asserts (void)
2414 dump_all_asserts (stderr
);
2417 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2420 add_assert_info (vec
<assert_info
> &asserts
,
2421 tree name
, tree expr
, enum tree_code comp_code
, tree val
)
2424 info
.comp_code
= comp_code
;
2426 if (TREE_OVERFLOW_P (val
))
2427 val
= drop_tree_overflow (val
);
2430 asserts
.safe_push (info
);
2431 if (dump_enabled_p ())
2432 dump_printf (MSG_NOTE
| MSG_PRIORITY_INTERNALS
,
2433 "Adding assert for %T from %T %s %T\n",
2434 name
, expr
, op_symbol_code (comp_code
), val
);
2437 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2438 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2439 E->DEST, then register this location as a possible insertion point
2440 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2442 BB, E and SI provide the exact insertion point for the new
2443 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2444 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2445 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2446 must not be NULL. */
2449 register_new_assert_for (tree name
, tree expr
,
2450 enum tree_code comp_code
,
2454 gimple_stmt_iterator si
)
2456 assert_locus
*n
, *loc
, *last_loc
;
2457 basic_block dest_bb
;
2459 gcc_checking_assert (bb
== NULL
|| e
== NULL
);
2462 gcc_checking_assert (gimple_code (gsi_stmt (si
)) != GIMPLE_COND
2463 && gimple_code (gsi_stmt (si
)) != GIMPLE_SWITCH
);
2465 /* Never build an assert comparing against an integer constant with
2466 TREE_OVERFLOW set. This confuses our undefined overflow warning
2468 if (TREE_OVERFLOW_P (val
))
2469 val
= drop_tree_overflow (val
);
2471 /* The new assertion A will be inserted at BB or E. We need to
2472 determine if the new location is dominated by a previously
2473 registered location for A. If we are doing an edge insertion,
2474 assume that A will be inserted at E->DEST. Note that this is not
2477 If E is a critical edge, it will be split. But even if E is
2478 split, the new block will dominate the same set of blocks that
2481 The reverse, however, is not true, blocks dominated by E->DEST
2482 will not be dominated by the new block created to split E. So,
2483 if the insertion location is on a critical edge, we will not use
2484 the new location to move another assertion previously registered
2485 at a block dominated by E->DEST. */
2486 dest_bb
= (bb
) ? bb
: e
->dest
;
2488 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2489 VAL at a block dominating DEST_BB, then we don't need to insert a new
2490 one. Similarly, if the same assertion already exists at a block
2491 dominated by DEST_BB and the new location is not on a critical
2492 edge, then update the existing location for the assertion (i.e.,
2493 move the assertion up in the dominance tree).
2495 Note, this is implemented as a simple linked list because there
2496 should not be more than a handful of assertions registered per
2497 name. If this becomes a performance problem, a table hashed by
2498 COMP_CODE and VAL could be implemented. */
2499 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
2503 if (loc
->comp_code
== comp_code
2505 || operand_equal_p (loc
->val
, val
, 0))
2506 && (loc
->expr
== expr
2507 || operand_equal_p (loc
->expr
, expr
, 0)))
2509 /* If E is not a critical edge and DEST_BB
2510 dominates the existing location for the assertion, move
2511 the assertion up in the dominance tree by updating its
2512 location information. */
2513 if ((e
== NULL
|| !EDGE_CRITICAL_P (e
))
2514 && dominated_by_p (CDI_DOMINATORS
, loc
->bb
, dest_bb
))
2523 /* Update the last node of the list and move to the next one. */
2528 /* If we didn't find an assertion already registered for
2529 NAME COMP_CODE VAL, add a new one at the end of the list of
2530 assertions associated with NAME. */
2531 n
= XNEW (struct assert_locus
);
2535 n
->comp_code
= comp_code
;
2543 asserts_for
[SSA_NAME_VERSION (name
)] = n
;
2545 bitmap_set_bit (need_assert_for
, SSA_NAME_VERSION (name
));
2548 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2549 Extract a suitable test code and value and store them into *CODE_P and
2550 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2552 If no extraction was possible, return FALSE, otherwise return TRUE.
2554 If INVERT is true, then we invert the result stored into *CODE_P. */
2557 extract_code_and_val_from_cond_with_ops (tree name
, enum tree_code cond_code
,
2558 tree cond_op0
, tree cond_op1
,
2559 bool invert
, enum tree_code
*code_p
,
2562 enum tree_code comp_code
;
2565 /* Otherwise, we have a comparison of the form NAME COMP VAL
2566 or VAL COMP NAME. */
2567 if (name
== cond_op1
)
2569 /* If the predicate is of the form VAL COMP NAME, flip
2570 COMP around because we need to register NAME as the
2571 first operand in the predicate. */
2572 comp_code
= swap_tree_comparison (cond_code
);
2575 else if (name
== cond_op0
)
2577 /* The comparison is of the form NAME COMP VAL, so the
2578 comparison code remains unchanged. */
2579 comp_code
= cond_code
;
2585 /* Invert the comparison code as necessary. */
2587 comp_code
= invert_tree_comparison (comp_code
, 0);
2589 /* VRP only handles integral and pointer types. */
2590 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
))
2591 && ! POINTER_TYPE_P (TREE_TYPE (val
)))
2594 /* Do not register always-false predicates.
2595 FIXME: this works around a limitation in fold() when dealing with
2596 enumerations. Given 'enum { N1, N2 } x;', fold will not
2597 fold 'if (x > N2)' to 'if (0)'. */
2598 if ((comp_code
== GT_EXPR
|| comp_code
== LT_EXPR
)
2599 && INTEGRAL_TYPE_P (TREE_TYPE (val
)))
2601 tree min
= TYPE_MIN_VALUE (TREE_TYPE (val
));
2602 tree max
= TYPE_MAX_VALUE (TREE_TYPE (val
));
2604 if (comp_code
== GT_EXPR
2606 || compare_values (val
, max
) == 0))
2609 if (comp_code
== LT_EXPR
2611 || compare_values (val
, min
) == 0))
2614 *code_p
= comp_code
;
2619 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2620 (otherwise return VAL). VAL and MASK must be zero-extended for
2621 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2622 (to transform signed values into unsigned) and at the end xor
2626 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
2627 const wide_int
&sgnbit
, unsigned int prec
)
2629 wide_int bit
= wi::one (prec
), res
;
2632 wide_int val
= val_in
^ sgnbit
;
2633 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
2636 if ((res
& bit
) == 0)
2639 res
= wi::bit_and_not (val
+ bit
, res
);
2641 if (wi::gtu_p (res
, val
))
2642 return res
^ sgnbit
;
2644 return val
^ sgnbit
;
2647 /* Helper for overflow_comparison_p
2649 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2650 OP1's defining statement to see if it ultimately has the form
2651 OP0 CODE (OP0 PLUS INTEGER_CST)
2653 If so, return TRUE indicating this is an overflow test and store into
2654 *NEW_CST an updated constant that can be used in a narrowed range test.
2656 REVERSED indicates if the comparison was originally:
2660 This affects how we build the updated constant. */
2663 overflow_comparison_p_1 (enum tree_code code
, tree op0
, tree op1
,
2664 bool follow_assert_exprs
, bool reversed
, tree
*new_cst
)
2666 /* See if this is a relational operation between two SSA_NAMES with
2667 unsigned, overflow wrapping values. If so, check it more deeply. */
2668 if ((code
== LT_EXPR
|| code
== LE_EXPR
2669 || code
== GE_EXPR
|| code
== GT_EXPR
)
2670 && TREE_CODE (op0
) == SSA_NAME
2671 && TREE_CODE (op1
) == SSA_NAME
2672 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
2673 && TYPE_UNSIGNED (TREE_TYPE (op0
))
2674 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0
)))
2676 gimple
*op1_def
= SSA_NAME_DEF_STMT (op1
);
2678 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2679 if (follow_assert_exprs
)
2681 while (gimple_assign_single_p (op1_def
)
2682 && TREE_CODE (gimple_assign_rhs1 (op1_def
)) == ASSERT_EXPR
)
2684 op1
= TREE_OPERAND (gimple_assign_rhs1 (op1_def
), 0);
2685 if (TREE_CODE (op1
) != SSA_NAME
)
2687 op1_def
= SSA_NAME_DEF_STMT (op1
);
2691 /* Now look at the defining statement of OP1 to see if it adds
2692 or subtracts a nonzero constant from another operand. */
2694 && is_gimple_assign (op1_def
)
2695 && gimple_assign_rhs_code (op1_def
) == PLUS_EXPR
2696 && TREE_CODE (gimple_assign_rhs2 (op1_def
)) == INTEGER_CST
2697 && !integer_zerop (gimple_assign_rhs2 (op1_def
)))
2699 tree target
= gimple_assign_rhs1 (op1_def
);
2701 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2702 for one where TARGET appears on the RHS. */
2703 if (follow_assert_exprs
)
2705 /* Now see if that "other operand" is op0, following the chain
2706 of ASSERT_EXPRs if necessary. */
2707 gimple
*op0_def
= SSA_NAME_DEF_STMT (op0
);
2708 while (op0
!= target
2709 && gimple_assign_single_p (op0_def
)
2710 && TREE_CODE (gimple_assign_rhs1 (op0_def
)) == ASSERT_EXPR
)
2712 op0
= TREE_OPERAND (gimple_assign_rhs1 (op0_def
), 0);
2713 if (TREE_CODE (op0
) != SSA_NAME
)
2715 op0_def
= SSA_NAME_DEF_STMT (op0
);
2719 /* If we did not find our target SSA_NAME, then this is not
2720 an overflow test. */
2724 tree type
= TREE_TYPE (op0
);
2725 wide_int max
= wi::max_value (TYPE_PRECISION (type
), UNSIGNED
);
2726 tree inc
= gimple_assign_rhs2 (op1_def
);
2728 *new_cst
= wide_int_to_tree (type
, max
+ wi::to_wide (inc
));
2730 *new_cst
= wide_int_to_tree (type
, max
- wi::to_wide (inc
));
2737 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2738 OP1's defining statement to see if it ultimately has the form
2739 OP0 CODE (OP0 PLUS INTEGER_CST)
2741 If so, return TRUE indicating this is an overflow test and store into
2742 *NEW_CST an updated constant that can be used in a narrowed range test.
2744 These statements are left as-is in the IL to facilitate discovery of
2745 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2746 the alternate range representation is often useful within VRP. */
2749 overflow_comparison_p (tree_code code
, tree name
, tree val
,
2750 bool use_equiv_p
, tree
*new_cst
)
2752 if (overflow_comparison_p_1 (code
, name
, val
, use_equiv_p
, false, new_cst
))
2754 return overflow_comparison_p_1 (swap_tree_comparison (code
), val
, name
,
2755 use_equiv_p
, true, new_cst
);
2759 /* Try to register an edge assertion for SSA name NAME on edge E for
2760 the condition COND contributing to the conditional jump pointed to by BSI.
2761 Invert the condition COND if INVERT is true. */
2764 register_edge_assert_for_2 (tree name
, edge e
,
2765 enum tree_code cond_code
,
2766 tree cond_op0
, tree cond_op1
, bool invert
,
2767 vec
<assert_info
> &asserts
)
2770 enum tree_code comp_code
;
2772 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
2775 invert
, &comp_code
, &val
))
2778 /* Queue the assert. */
2780 if (overflow_comparison_p (comp_code
, name
, val
, false, &x
))
2782 enum tree_code new_code
= ((comp_code
== GT_EXPR
|| comp_code
== GE_EXPR
)
2783 ? GT_EXPR
: LE_EXPR
);
2784 add_assert_info (asserts
, name
, name
, new_code
, x
);
2786 add_assert_info (asserts
, name
, name
, comp_code
, val
);
2788 /* In the case of NAME <= CST and NAME being defined as
2789 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2790 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2791 This catches range and anti-range tests. */
2792 if ((comp_code
== LE_EXPR
2793 || comp_code
== GT_EXPR
)
2794 && TREE_CODE (val
) == INTEGER_CST
2795 && TYPE_UNSIGNED (TREE_TYPE (val
)))
2797 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
2798 tree cst2
= NULL_TREE
, name2
= NULL_TREE
, name3
= NULL_TREE
;
2800 /* Extract CST2 from the (optional) addition. */
2801 if (is_gimple_assign (def_stmt
)
2802 && gimple_assign_rhs_code (def_stmt
) == PLUS_EXPR
)
2804 name2
= gimple_assign_rhs1 (def_stmt
);
2805 cst2
= gimple_assign_rhs2 (def_stmt
);
2806 if (TREE_CODE (name2
) == SSA_NAME
2807 && TREE_CODE (cst2
) == INTEGER_CST
)
2808 def_stmt
= SSA_NAME_DEF_STMT (name2
);
2811 /* Extract NAME2 from the (optional) sign-changing cast. */
2812 if (gimple_assign_cast_p (def_stmt
))
2814 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
))
2815 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
2816 && (TYPE_PRECISION (gimple_expr_type (def_stmt
))
2817 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))))
2818 name3
= gimple_assign_rhs1 (def_stmt
);
2821 /* If name3 is used later, create an ASSERT_EXPR for it. */
2822 if (name3
!= NULL_TREE
2823 && TREE_CODE (name3
) == SSA_NAME
2824 && (cst2
== NULL_TREE
2825 || TREE_CODE (cst2
) == INTEGER_CST
)
2826 && INTEGRAL_TYPE_P (TREE_TYPE (name3
)))
2830 /* Build an expression for the range test. */
2831 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), name3
);
2832 if (cst2
!= NULL_TREE
)
2833 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
2834 add_assert_info (asserts
, name3
, tmp
, comp_code
, val
);
2837 /* If name2 is used later, create an ASSERT_EXPR for it. */
2838 if (name2
!= NULL_TREE
2839 && TREE_CODE (name2
) == SSA_NAME
2840 && TREE_CODE (cst2
) == INTEGER_CST
2841 && INTEGRAL_TYPE_P (TREE_TYPE (name2
)))
2845 /* Build an expression for the range test. */
2847 if (TREE_TYPE (name
) != TREE_TYPE (name2
))
2848 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), tmp
);
2849 if (cst2
!= NULL_TREE
)
2850 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
2851 add_assert_info (asserts
, name2
, tmp
, comp_code
, val
);
2855 /* In the case of post-in/decrement tests like if (i++) ... and uses
2856 of the in/decremented value on the edge the extra name we want to
2857 assert for is not on the def chain of the name compared. Instead
2858 it is in the set of use stmts.
2859 Similar cases happen for conversions that were simplified through
2860 fold_{sign_changed,widened}_comparison. */
2861 if ((comp_code
== NE_EXPR
2862 || comp_code
== EQ_EXPR
)
2863 && TREE_CODE (val
) == INTEGER_CST
)
2865 imm_use_iterator ui
;
2867 FOR_EACH_IMM_USE_STMT (use_stmt
, ui
, name
)
2869 if (!is_gimple_assign (use_stmt
))
2872 /* Cut off to use-stmts that are dominating the predecessor. */
2873 if (!dominated_by_p (CDI_DOMINATORS
, e
->src
, gimple_bb (use_stmt
)))
2876 tree name2
= gimple_assign_lhs (use_stmt
);
2877 if (TREE_CODE (name2
) != SSA_NAME
)
2880 enum tree_code code
= gimple_assign_rhs_code (use_stmt
);
2882 if (code
== PLUS_EXPR
2883 || code
== MINUS_EXPR
)
2885 cst
= gimple_assign_rhs2 (use_stmt
);
2886 if (TREE_CODE (cst
) != INTEGER_CST
)
2888 cst
= int_const_binop (code
, val
, cst
);
2890 else if (CONVERT_EXPR_CODE_P (code
))
2892 /* For truncating conversions we cannot record
2894 if (comp_code
== NE_EXPR
2895 && (TYPE_PRECISION (TREE_TYPE (name2
))
2896 < TYPE_PRECISION (TREE_TYPE (name
))))
2898 cst
= fold_convert (TREE_TYPE (name2
), val
);
2903 if (TREE_OVERFLOW_P (cst
))
2904 cst
= drop_tree_overflow (cst
);
2905 add_assert_info (asserts
, name2
, name2
, comp_code
, cst
);
2909 if (TREE_CODE_CLASS (comp_code
) == tcc_comparison
2910 && TREE_CODE (val
) == INTEGER_CST
)
2912 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
2913 tree name2
= NULL_TREE
, names
[2], cst2
= NULL_TREE
;
2914 tree val2
= NULL_TREE
;
2915 unsigned int prec
= TYPE_PRECISION (TREE_TYPE (val
));
2916 wide_int mask
= wi::zero (prec
);
2917 unsigned int nprec
= prec
;
2918 enum tree_code rhs_code
= ERROR_MARK
;
2920 if (is_gimple_assign (def_stmt
))
2921 rhs_code
= gimple_assign_rhs_code (def_stmt
);
2923 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2924 assert that A != CST1 -+ CST2. */
2925 if ((comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
2926 && (rhs_code
== PLUS_EXPR
|| rhs_code
== MINUS_EXPR
))
2928 tree op0
= gimple_assign_rhs1 (def_stmt
);
2929 tree op1
= gimple_assign_rhs2 (def_stmt
);
2930 if (TREE_CODE (op0
) == SSA_NAME
2931 && TREE_CODE (op1
) == INTEGER_CST
)
2933 enum tree_code reverse_op
= (rhs_code
== PLUS_EXPR
2934 ? MINUS_EXPR
: PLUS_EXPR
);
2935 op1
= int_const_binop (reverse_op
, val
, op1
);
2936 if (TREE_OVERFLOW (op1
))
2937 op1
= drop_tree_overflow (op1
);
2938 add_assert_info (asserts
, op0
, op0
, comp_code
, op1
);
2942 /* Add asserts for NAME cmp CST and NAME being defined
2943 as NAME = (int) NAME2. */
2944 if (!TYPE_UNSIGNED (TREE_TYPE (val
))
2945 && (comp_code
== LE_EXPR
|| comp_code
== LT_EXPR
2946 || comp_code
== GT_EXPR
|| comp_code
== GE_EXPR
)
2947 && gimple_assign_cast_p (def_stmt
))
2949 name2
= gimple_assign_rhs1 (def_stmt
);
2950 if (CONVERT_EXPR_CODE_P (rhs_code
)
2951 && TREE_CODE (name2
) == SSA_NAME
2952 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
2953 && TYPE_UNSIGNED (TREE_TYPE (name2
))
2954 && prec
== TYPE_PRECISION (TREE_TYPE (name2
))
2955 && (comp_code
== LE_EXPR
|| comp_code
== GT_EXPR
2956 || !tree_int_cst_equal (val
,
2957 TYPE_MIN_VALUE (TREE_TYPE (val
)))))
2960 enum tree_code new_comp_code
= comp_code
;
2962 cst
= fold_convert (TREE_TYPE (name2
),
2963 TYPE_MIN_VALUE (TREE_TYPE (val
)));
2964 /* Build an expression for the range test. */
2965 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name2
), name2
, cst
);
2966 cst
= fold_build2 (PLUS_EXPR
, TREE_TYPE (name2
), cst
,
2967 fold_convert (TREE_TYPE (name2
), val
));
2968 if (comp_code
== LT_EXPR
|| comp_code
== GE_EXPR
)
2970 new_comp_code
= comp_code
== LT_EXPR
? LE_EXPR
: GT_EXPR
;
2971 cst
= fold_build2 (MINUS_EXPR
, TREE_TYPE (name2
), cst
,
2972 build_int_cst (TREE_TYPE (name2
), 1));
2974 add_assert_info (asserts
, name2
, tmp
, new_comp_code
, cst
);
2978 /* Add asserts for NAME cmp CST and NAME being defined as
2979 NAME = NAME2 >> CST2.
2981 Extract CST2 from the right shift. */
2982 if (rhs_code
== RSHIFT_EXPR
)
2984 name2
= gimple_assign_rhs1 (def_stmt
);
2985 cst2
= gimple_assign_rhs2 (def_stmt
);
2986 if (TREE_CODE (name2
) == SSA_NAME
2987 && tree_fits_uhwi_p (cst2
)
2988 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
2989 && IN_RANGE (tree_to_uhwi (cst2
), 1, prec
- 1)
2990 && type_has_mode_precision_p (TREE_TYPE (val
)))
2992 mask
= wi::mask (tree_to_uhwi (cst2
), false, prec
);
2993 val2
= fold_binary (LSHIFT_EXPR
, TREE_TYPE (val
), val
, cst2
);
2996 if (val2
!= NULL_TREE
2997 && TREE_CODE (val2
) == INTEGER_CST
2998 && simple_cst_equal (fold_build2 (RSHIFT_EXPR
,
3002 enum tree_code new_comp_code
= comp_code
;
3006 if (comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
3008 if (!TYPE_UNSIGNED (TREE_TYPE (val
)))
3010 tree type
= build_nonstandard_integer_type (prec
, 1);
3011 tmp
= build1 (NOP_EXPR
, type
, name2
);
3012 val2
= fold_convert (type
, val2
);
3014 tmp
= fold_build2 (MINUS_EXPR
, TREE_TYPE (tmp
), tmp
, val2
);
3015 new_val
= wide_int_to_tree (TREE_TYPE (tmp
), mask
);
3016 new_comp_code
= comp_code
== EQ_EXPR
? LE_EXPR
: GT_EXPR
;
3018 else if (comp_code
== LT_EXPR
|| comp_code
== GE_EXPR
)
3021 = wi::min_value (prec
, TYPE_SIGN (TREE_TYPE (val
)));
3023 if (minval
== wi::to_wide (new_val
))
3024 new_val
= NULL_TREE
;
3029 = wi::max_value (prec
, TYPE_SIGN (TREE_TYPE (val
)));
3030 mask
|= wi::to_wide (val2
);
3031 if (wi::eq_p (mask
, maxval
))
3032 new_val
= NULL_TREE
;
3034 new_val
= wide_int_to_tree (TREE_TYPE (val2
), mask
);
3038 add_assert_info (asserts
, name2
, tmp
, new_comp_code
, new_val
);
3041 /* If we have a conversion that doesn't change the value of the source
3042 simply register the same assert for it. */
3043 if (CONVERT_EXPR_CODE_P (rhs_code
))
3045 wide_int rmin
, rmax
;
3046 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
3047 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
3048 && TREE_CODE (rhs1
) == SSA_NAME
3049 /* Make sure the relation preserves the upper/lower boundary of
3050 the range conservatively. */
3051 && (comp_code
== NE_EXPR
3052 || comp_code
== EQ_EXPR
3053 || (TYPE_SIGN (TREE_TYPE (name
))
3054 == TYPE_SIGN (TREE_TYPE (rhs1
)))
3055 || ((comp_code
== LE_EXPR
3056 || comp_code
== LT_EXPR
)
3057 && !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
3058 || ((comp_code
== GE_EXPR
3059 || comp_code
== GT_EXPR
)
3060 && TYPE_UNSIGNED (TREE_TYPE (rhs1
))))
3061 /* And the conversion does not alter the value we compare
3062 against and all values in rhs1 can be represented in
3063 the converted to type. */
3064 && int_fits_type_p (val
, TREE_TYPE (rhs1
))
3065 && ((TYPE_PRECISION (TREE_TYPE (name
))
3066 > TYPE_PRECISION (TREE_TYPE (rhs1
)))
3067 || (get_range_info (rhs1
, &rmin
, &rmax
) == VR_RANGE
3068 && wi::fits_to_tree_p (rmin
, TREE_TYPE (name
))
3069 && wi::fits_to_tree_p (rmax
, TREE_TYPE (name
)))))
3070 add_assert_info (asserts
, rhs1
, rhs1
,
3071 comp_code
, fold_convert (TREE_TYPE (rhs1
), val
));
3074 /* Add asserts for NAME cmp CST and NAME being defined as
3075 NAME = NAME2 & CST2.
3077 Extract CST2 from the and.
3080 NAME = (unsigned) NAME2;
3081 casts where NAME's type is unsigned and has smaller precision
3082 than NAME2's type as if it was NAME = NAME2 & MASK. */
3083 names
[0] = NULL_TREE
;
3084 names
[1] = NULL_TREE
;
3086 if (rhs_code
== BIT_AND_EXPR
3087 || (CONVERT_EXPR_CODE_P (rhs_code
)
3088 && INTEGRAL_TYPE_P (TREE_TYPE (val
))
3089 && TYPE_UNSIGNED (TREE_TYPE (val
))
3090 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
3093 name2
= gimple_assign_rhs1 (def_stmt
);
3094 if (rhs_code
== BIT_AND_EXPR
)
3095 cst2
= gimple_assign_rhs2 (def_stmt
);
3098 cst2
= TYPE_MAX_VALUE (TREE_TYPE (val
));
3099 nprec
= TYPE_PRECISION (TREE_TYPE (name2
));
3101 if (TREE_CODE (name2
) == SSA_NAME
3102 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
3103 && TREE_CODE (cst2
) == INTEGER_CST
3104 && !integer_zerop (cst2
)
3106 || TYPE_UNSIGNED (TREE_TYPE (val
))))
3108 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (name2
);
3109 if (gimple_assign_cast_p (def_stmt2
))
3111 names
[1] = gimple_assign_rhs1 (def_stmt2
);
3112 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2
))
3113 || TREE_CODE (names
[1]) != SSA_NAME
3114 || !INTEGRAL_TYPE_P (TREE_TYPE (names
[1]))
3115 || (TYPE_PRECISION (TREE_TYPE (name2
))
3116 != TYPE_PRECISION (TREE_TYPE (names
[1]))))
3117 names
[1] = NULL_TREE
;
3122 if (names
[0] || names
[1])
3124 wide_int minv
, maxv
, valv
, cst2v
;
3125 wide_int tem
, sgnbit
;
3126 bool valid_p
= false, valn
, cst2n
;
3127 enum tree_code ccode
= comp_code
;
3129 valv
= wide_int::from (wi::to_wide (val
), nprec
, UNSIGNED
);
3130 cst2v
= wide_int::from (wi::to_wide (cst2
), nprec
, UNSIGNED
);
3131 valn
= wi::neg_p (valv
, TYPE_SIGN (TREE_TYPE (val
)));
3132 cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (TREE_TYPE (val
)));
3133 /* If CST2 doesn't have most significant bit set,
3134 but VAL is negative, we have comparison like
3135 if ((x & 0x123) > -4) (always true). Just give up. */
3139 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3141 sgnbit
= wi::zero (nprec
);
3142 minv
= valv
& cst2v
;
3146 /* Minimum unsigned value for equality is VAL & CST2
3147 (should be equal to VAL, otherwise we probably should
3148 have folded the comparison into false) and
3149 maximum unsigned value is VAL | ~CST2. */
3150 maxv
= valv
| ~cst2v
;
3155 tem
= valv
| ~cst2v
;
3156 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
3160 sgnbit
= wi::zero (nprec
);
3163 /* If (VAL | ~CST2) is all ones, handle it as
3164 (X & CST2) < VAL. */
3169 sgnbit
= wi::zero (nprec
);
3172 if (!cst2n
&& wi::neg_p (cst2v
))
3173 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3182 if (tem
== wi::mask (nprec
- 1, false, nprec
))
3188 sgnbit
= wi::zero (nprec
);
3193 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3194 is VAL and maximum unsigned value is ~0. For signed
3195 comparison, if CST2 doesn't have most significant bit
3196 set, handle it similarly. If CST2 has MSB set,
3197 the minimum is the same, and maximum is ~0U/2. */
3200 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3202 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3206 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3212 /* Find out smallest MINV where MINV > VAL
3213 && (MINV & CST2) == MINV, if any. If VAL is signed and
3214 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
3215 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3218 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3223 /* Minimum unsigned value for <= is 0 and maximum
3224 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3225 Otherwise, find smallest VAL2 where VAL2 > VAL
3226 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3228 For signed comparison, if CST2 doesn't have most
3229 significant bit set, handle it similarly. If CST2 has
3230 MSB set, the maximum is the same and minimum is INT_MIN. */
3235 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3247 /* Minimum unsigned value for < is 0 and maximum
3248 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3249 Otherwise, find smallest VAL2 where VAL2 > VAL
3250 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3252 For signed comparison, if CST2 doesn't have most
3253 significant bit set, handle it similarly. If CST2 has
3254 MSB set, the maximum is the same and minimum is INT_MIN. */
3263 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3277 && (maxv
- minv
) != -1)
3279 tree tmp
, new_val
, type
;
3282 for (i
= 0; i
< 2; i
++)
3285 wide_int maxv2
= maxv
;
3287 type
= TREE_TYPE (names
[i
]);
3288 if (!TYPE_UNSIGNED (type
))
3290 type
= build_nonstandard_integer_type (nprec
, 1);
3291 tmp
= build1 (NOP_EXPR
, type
, names
[i
]);
3295 tmp
= build2 (PLUS_EXPR
, type
, tmp
,
3296 wide_int_to_tree (type
, -minv
));
3297 maxv2
= maxv
- minv
;
3299 new_val
= wide_int_to_tree (type
, maxv2
);
3300 add_assert_info (asserts
, names
[i
], tmp
, LE_EXPR
, new_val
);
3307 /* OP is an operand of a truth value expression which is known to have
3308 a particular value. Register any asserts for OP and for any
3309 operands in OP's defining statement.
3311 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3312 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3315 register_edge_assert_for_1 (tree op
, enum tree_code code
,
3316 edge e
, vec
<assert_info
> &asserts
)
3320 enum tree_code rhs_code
;
3322 /* We only care about SSA_NAMEs. */
3323 if (TREE_CODE (op
) != SSA_NAME
)
3326 /* We know that OP will have a zero or nonzero value. */
3327 val
= build_int_cst (TREE_TYPE (op
), 0);
3328 add_assert_info (asserts
, op
, op
, code
, val
);
3330 /* Now look at how OP is set. If it's set from a comparison,
3331 a truth operation or some bit operations, then we may be able
3332 to register information about the operands of that assignment. */
3333 op_def
= SSA_NAME_DEF_STMT (op
);
3334 if (gimple_code (op_def
) != GIMPLE_ASSIGN
)
3337 rhs_code
= gimple_assign_rhs_code (op_def
);
3339 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
3341 bool invert
= (code
== EQ_EXPR
? true : false);
3342 tree op0
= gimple_assign_rhs1 (op_def
);
3343 tree op1
= gimple_assign_rhs2 (op_def
);
3345 if (TREE_CODE (op0
) == SSA_NAME
)
3346 register_edge_assert_for_2 (op0
, e
, rhs_code
, op0
, op1
, invert
, asserts
);
3347 if (TREE_CODE (op1
) == SSA_NAME
)
3348 register_edge_assert_for_2 (op1
, e
, rhs_code
, op0
, op1
, invert
, asserts
);
3350 else if ((code
== NE_EXPR
3351 && gimple_assign_rhs_code (op_def
) == BIT_AND_EXPR
)
3353 && gimple_assign_rhs_code (op_def
) == BIT_IOR_EXPR
))
3355 /* Recurse on each operand. */
3356 tree op0
= gimple_assign_rhs1 (op_def
);
3357 tree op1
= gimple_assign_rhs2 (op_def
);
3358 if (TREE_CODE (op0
) == SSA_NAME
3359 && has_single_use (op0
))
3360 register_edge_assert_for_1 (op0
, code
, e
, asserts
);
3361 if (TREE_CODE (op1
) == SSA_NAME
3362 && has_single_use (op1
))
3363 register_edge_assert_for_1 (op1
, code
, e
, asserts
);
3365 else if (gimple_assign_rhs_code (op_def
) == BIT_NOT_EXPR
3366 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def
))) == 1)
3368 /* Recurse, flipping CODE. */
3369 code
= invert_tree_comparison (code
, false);
3370 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
), code
, e
, asserts
);
3372 else if (gimple_assign_rhs_code (op_def
) == SSA_NAME
)
3374 /* Recurse through the copy. */
3375 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
), code
, e
, asserts
);
3377 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def
)))
3379 /* Recurse through the type conversion, unless it is a narrowing
3380 conversion or conversion from non-integral type. */
3381 tree rhs
= gimple_assign_rhs1 (op_def
);
3382 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs
))
3383 && (TYPE_PRECISION (TREE_TYPE (rhs
))
3384 <= TYPE_PRECISION (TREE_TYPE (op
))))
3385 register_edge_assert_for_1 (rhs
, code
, e
, asserts
);
3389 /* Check if comparison
3390 NAME COND_OP INTEGER_CST
3392 (X & 11...100..0) COND_OP XX...X00...0
3393 Such comparison can yield assertions like
3396 in case of COND_OP being EQ_EXPR or
3399 in case of NE_EXPR. */
3402 is_masked_range_test (tree name
, tree valt
, enum tree_code cond_code
,
3403 tree
*new_name
, tree
*low
, enum tree_code
*low_code
,
3404 tree
*high
, enum tree_code
*high_code
)
3406 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
3408 if (!is_gimple_assign (def_stmt
)
3409 || gimple_assign_rhs_code (def_stmt
) != BIT_AND_EXPR
)
3412 tree t
= gimple_assign_rhs1 (def_stmt
);
3413 tree maskt
= gimple_assign_rhs2 (def_stmt
);
3414 if (TREE_CODE (t
) != SSA_NAME
|| TREE_CODE (maskt
) != INTEGER_CST
)
3417 wi::tree_to_wide_ref mask
= wi::to_wide (maskt
);
3418 wide_int inv_mask
= ~mask
;
3419 /* Must have been removed by now so don't bother optimizing. */
3420 if (mask
== 0 || inv_mask
== 0)
3423 /* Assume VALT is INTEGER_CST. */
3424 wi::tree_to_wide_ref val
= wi::to_wide (valt
);
3426 if ((inv_mask
& (inv_mask
+ 1)) != 0
3427 || (val
& mask
) != val
)
3430 bool is_range
= cond_code
== EQ_EXPR
;
3432 tree type
= TREE_TYPE (t
);
3433 wide_int min
= wi::min_value (type
),
3434 max
= wi::max_value (type
);
3438 *low_code
= val
== min
? ERROR_MARK
: GE_EXPR
;
3439 *high_code
= val
== max
? ERROR_MARK
: LE_EXPR
;
3443 /* We can still generate assertion if one of alternatives
3444 is known to always be false. */
3447 *low_code
= (enum tree_code
) 0;
3448 *high_code
= GT_EXPR
;
3450 else if ((val
| inv_mask
) == max
)
3452 *low_code
= LT_EXPR
;
3453 *high_code
= (enum tree_code
) 0;
3460 *low
= wide_int_to_tree (type
, val
);
3461 *high
= wide_int_to_tree (type
, val
| inv_mask
);
3466 /* Try to register an edge assertion for SSA name NAME on edge E for
3467 the condition COND contributing to the conditional jump pointed to by
3471 register_edge_assert_for (tree name
, edge e
,
3472 enum tree_code cond_code
, tree cond_op0
,
3473 tree cond_op1
, vec
<assert_info
> &asserts
)
3476 enum tree_code comp_code
;
3477 bool is_else_edge
= (e
->flags
& EDGE_FALSE_VALUE
) != 0;
3479 /* Do not attempt to infer anything in names that flow through
3481 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
3484 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
3490 /* Register ASSERT_EXPRs for name. */
3491 register_edge_assert_for_2 (name
, e
, cond_code
, cond_op0
,
3492 cond_op1
, is_else_edge
, asserts
);
3495 /* If COND is effectively an equality test of an SSA_NAME against
3496 the value zero or one, then we may be able to assert values
3497 for SSA_NAMEs which flow into COND. */
3499 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3500 statement of NAME we can assert both operands of the BIT_AND_EXPR
3501 have nonzero value. */
3502 if (((comp_code
== EQ_EXPR
&& integer_onep (val
))
3503 || (comp_code
== NE_EXPR
&& integer_zerop (val
))))
3505 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
3507 if (is_gimple_assign (def_stmt
)
3508 && gimple_assign_rhs_code (def_stmt
) == BIT_AND_EXPR
)
3510 tree op0
= gimple_assign_rhs1 (def_stmt
);
3511 tree op1
= gimple_assign_rhs2 (def_stmt
);
3512 register_edge_assert_for_1 (op0
, NE_EXPR
, e
, asserts
);
3513 register_edge_assert_for_1 (op1
, NE_EXPR
, e
, asserts
);
3517 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3518 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3520 if (((comp_code
== EQ_EXPR
&& integer_zerop (val
))
3521 || (comp_code
== NE_EXPR
&& integer_onep (val
))))
3523 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
3525 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3526 necessarily zero value, or if type-precision is one. */
3527 if (is_gimple_assign (def_stmt
)
3528 && (gimple_assign_rhs_code (def_stmt
) == BIT_IOR_EXPR
3529 && (TYPE_PRECISION (TREE_TYPE (name
)) == 1
3530 || comp_code
== EQ_EXPR
)))
3532 tree op0
= gimple_assign_rhs1 (def_stmt
);
3533 tree op1
= gimple_assign_rhs2 (def_stmt
);
3534 register_edge_assert_for_1 (op0
, EQ_EXPR
, e
, asserts
);
3535 register_edge_assert_for_1 (op1
, EQ_EXPR
, e
, asserts
);
3539 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3540 if ((comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
3541 && TREE_CODE (val
) == INTEGER_CST
)
3543 enum tree_code low_code
, high_code
;
3545 if (is_masked_range_test (name
, val
, comp_code
, &name
, &low
,
3546 &low_code
, &high
, &high_code
))
3548 if (low_code
!= ERROR_MARK
)
3549 register_edge_assert_for_2 (name
, e
, low_code
, name
,
3550 low
, /*invert*/false, asserts
);
3551 if (high_code
!= ERROR_MARK
)
3552 register_edge_assert_for_2 (name
, e
, high_code
, name
,
3553 high
, /*invert*/false, asserts
);
3558 /* Finish found ASSERTS for E and register them at GSI. */
3561 finish_register_edge_assert_for (edge e
, gimple_stmt_iterator gsi
,
3562 vec
<assert_info
> &asserts
)
3564 for (unsigned i
= 0; i
< asserts
.length (); ++i
)
3565 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3566 reachable from E. */
3567 if (live_on_edge (e
, asserts
[i
].name
))
3568 register_new_assert_for (asserts
[i
].name
, asserts
[i
].expr
,
3569 asserts
[i
].comp_code
, asserts
[i
].val
,
3575 /* Determine whether the outgoing edges of BB should receive an
3576 ASSERT_EXPR for each of the operands of BB's LAST statement.
3577 The last statement of BB must be a COND_EXPR.
3579 If any of the sub-graphs rooted at BB have an interesting use of
3580 the predicate operands, an assert location node is added to the
3581 list of assertions for the corresponding operands. */
3584 find_conditional_asserts (basic_block bb
, gcond
*last
)
3586 gimple_stmt_iterator bsi
;
3592 bsi
= gsi_for_stmt (last
);
3594 /* Look for uses of the operands in each of the sub-graphs
3595 rooted at BB. We need to check each of the outgoing edges
3596 separately, so that we know what kind of ASSERT_EXPR to
3598 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3603 /* Register the necessary assertions for each operand in the
3604 conditional predicate. */
3605 auto_vec
<assert_info
, 8> asserts
;
3606 FOR_EACH_SSA_TREE_OPERAND (op
, last
, iter
, SSA_OP_USE
)
3607 register_edge_assert_for (op
, e
,
3608 gimple_cond_code (last
),
3609 gimple_cond_lhs (last
),
3610 gimple_cond_rhs (last
), asserts
);
3611 finish_register_edge_assert_for (e
, bsi
, asserts
);
3621 /* Compare two case labels sorting first by the destination bb index
3622 and then by the case value. */
3625 compare_case_labels (const void *p1
, const void *p2
)
3627 const struct case_info
*ci1
= (const struct case_info
*) p1
;
3628 const struct case_info
*ci2
= (const struct case_info
*) p2
;
3629 int idx1
= ci1
->bb
->index
;
3630 int idx2
= ci2
->bb
->index
;
3634 else if (idx1
== idx2
)
3636 /* Make sure the default label is first in a group. */
3637 if (!CASE_LOW (ci1
->expr
))
3639 else if (!CASE_LOW (ci2
->expr
))
3642 return tree_int_cst_compare (CASE_LOW (ci1
->expr
),
3643 CASE_LOW (ci2
->expr
));
3649 /* Determine whether the outgoing edges of BB should receive an
3650 ASSERT_EXPR for each of the operands of BB's LAST statement.
3651 The last statement of BB must be a SWITCH_EXPR.
3653 If any of the sub-graphs rooted at BB have an interesting use of
3654 the predicate operands, an assert location node is added to the
3655 list of assertions for the corresponding operands. */
3658 find_switch_asserts (basic_block bb
, gswitch
*last
)
3660 gimple_stmt_iterator bsi
;
3663 struct case_info
*ci
;
3664 size_t n
= gimple_switch_num_labels (last
);
3665 #if GCC_VERSION >= 4000
3668 /* Work around GCC 3.4 bug (PR 37086). */
3669 volatile unsigned int idx
;
3672 bsi
= gsi_for_stmt (last
);
3673 op
= gimple_switch_index (last
);
3674 if (TREE_CODE (op
) != SSA_NAME
)
3677 /* Build a vector of case labels sorted by destination label. */
3678 ci
= XNEWVEC (struct case_info
, n
);
3679 for (idx
= 0; idx
< n
; ++idx
)
3681 ci
[idx
].expr
= gimple_switch_label (last
, idx
);
3682 ci
[idx
].bb
= label_to_block (cfun
, CASE_LABEL (ci
[idx
].expr
));
3684 edge default_edge
= find_edge (bb
, ci
[0].bb
);
3685 qsort (ci
, n
, sizeof (struct case_info
), compare_case_labels
);
3687 for (idx
= 0; idx
< n
; ++idx
)
3690 tree cl
= ci
[idx
].expr
;
3691 basic_block cbb
= ci
[idx
].bb
;
3693 min
= CASE_LOW (cl
);
3694 max
= CASE_HIGH (cl
);
3696 /* If there are multiple case labels with the same destination
3697 we need to combine them to a single value range for the edge. */
3698 if (idx
+ 1 < n
&& cbb
== ci
[idx
+ 1].bb
)
3700 /* Skip labels until the last of the group. */
3703 } while (idx
< n
&& cbb
== ci
[idx
].bb
);
3706 /* Pick up the maximum of the case label range. */
3707 if (CASE_HIGH (ci
[idx
].expr
))
3708 max
= CASE_HIGH (ci
[idx
].expr
);
3710 max
= CASE_LOW (ci
[idx
].expr
);
3713 /* Can't extract a useful assertion out of a range that includes the
3715 if (min
== NULL_TREE
)
3718 /* Find the edge to register the assert expr on. */
3719 e
= find_edge (bb
, cbb
);
3721 /* Register the necessary assertions for the operand in the
3723 auto_vec
<assert_info
, 8> asserts
;
3724 register_edge_assert_for (op
, e
,
3725 max
? GE_EXPR
: EQ_EXPR
,
3726 op
, fold_convert (TREE_TYPE (op
), min
),
3729 register_edge_assert_for (op
, e
, LE_EXPR
, op
,
3730 fold_convert (TREE_TYPE (op
), max
),
3732 finish_register_edge_assert_for (e
, bsi
, asserts
);
3737 if (!live_on_edge (default_edge
, op
))
3740 /* Now register along the default label assertions that correspond to the
3741 anti-range of each label. */
3742 int insertion_limit
= PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS
);
3743 if (insertion_limit
== 0)
3746 /* We can't do this if the default case shares a label with another case. */
3747 tree default_cl
= gimple_switch_default_label (last
);
3748 for (idx
= 1; idx
< n
; idx
++)
3751 tree cl
= gimple_switch_label (last
, idx
);
3752 if (CASE_LABEL (cl
) == CASE_LABEL (default_cl
))
3755 min
= CASE_LOW (cl
);
3756 max
= CASE_HIGH (cl
);
3758 /* Combine contiguous case ranges to reduce the number of assertions
3760 for (idx
= idx
+ 1; idx
< n
; idx
++)
3762 tree next_min
, next_max
;
3763 tree next_cl
= gimple_switch_label (last
, idx
);
3764 if (CASE_LABEL (next_cl
) == CASE_LABEL (default_cl
))
3767 next_min
= CASE_LOW (next_cl
);
3768 next_max
= CASE_HIGH (next_cl
);
3770 wide_int difference
= (wi::to_wide (next_min
)
3771 - wi::to_wide (max
? max
: min
));
3772 if (wi::eq_p (difference
, 1))
3773 max
= next_max
? next_max
: next_min
;
3779 if (max
== NULL_TREE
)
3781 /* Register the assertion OP != MIN. */
3782 auto_vec
<assert_info
, 8> asserts
;
3783 min
= fold_convert (TREE_TYPE (op
), min
);
3784 register_edge_assert_for (op
, default_edge
, NE_EXPR
, op
, min
,
3786 finish_register_edge_assert_for (default_edge
, bsi
, asserts
);
3790 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3791 which will give OP the anti-range ~[MIN,MAX]. */
3792 tree uop
= fold_convert (unsigned_type_for (TREE_TYPE (op
)), op
);
3793 min
= fold_convert (TREE_TYPE (uop
), min
);
3794 max
= fold_convert (TREE_TYPE (uop
), max
);
3796 tree lhs
= fold_build2 (MINUS_EXPR
, TREE_TYPE (uop
), uop
, min
);
3797 tree rhs
= int_const_binop (MINUS_EXPR
, max
, min
);
3798 register_new_assert_for (op
, lhs
, GT_EXPR
, rhs
,
3799 NULL
, default_edge
, bsi
);
3802 if (--insertion_limit
== 0)
3808 /* Traverse all the statements in block BB looking for statements that
3809 may generate useful assertions for the SSA names in their operand.
3810 If a statement produces a useful assertion A for name N_i, then the
3811 list of assertions already generated for N_i is scanned to
3812 determine if A is actually needed.
3814 If N_i already had the assertion A at a location dominating the
3815 current location, then nothing needs to be done. Otherwise, the
3816 new location for A is recorded instead.
3818 1- For every statement S in BB, all the variables used by S are
3819 added to bitmap FOUND_IN_SUBGRAPH.
3821 2- If statement S uses an operand N in a way that exposes a known
3822 value range for N, then if N was not already generated by an
3823 ASSERT_EXPR, create a new assert location for N. For instance,
3824 if N is a pointer and the statement dereferences it, we can
3825 assume that N is not NULL.
3827 3- COND_EXPRs are a special case of #2. We can derive range
3828 information from the predicate but need to insert different
3829 ASSERT_EXPRs for each of the sub-graphs rooted at the
3830 conditional block. If the last statement of BB is a conditional
3831 expression of the form 'X op Y', then
3833 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3835 b) If the conditional is the only entry point to the sub-graph
3836 corresponding to the THEN_CLAUSE, recurse into it. On
3837 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3838 an ASSERT_EXPR is added for the corresponding variable.
3840 c) Repeat step (b) on the ELSE_CLAUSE.
3842 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3851 In this case, an assertion on the THEN clause is useful to
3852 determine that 'a' is always 9 on that edge. However, an assertion
3853 on the ELSE clause would be unnecessary.
3855 4- If BB does not end in a conditional expression, then we recurse
3856 into BB's dominator children.
3858 At the end of the recursive traversal, every SSA name will have a
3859 list of locations where ASSERT_EXPRs should be added. When a new
3860 location for name N is found, it is registered by calling
3861 register_new_assert_for. That function keeps track of all the
3862 registered assertions to prevent adding unnecessary assertions.
3863 For instance, if a pointer P_4 is dereferenced more than once in a
3864 dominator tree, only the location dominating all the dereference of
3865 P_4 will receive an ASSERT_EXPR. */
3868 find_assert_locations_1 (basic_block bb
, sbitmap live
)
3872 last
= last_stmt (bb
);
3874 /* If BB's last statement is a conditional statement involving integer
3875 operands, determine if we need to add ASSERT_EXPRs. */
3877 && gimple_code (last
) == GIMPLE_COND
3878 && !fp_predicate (last
)
3879 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
3880 find_conditional_asserts (bb
, as_a
<gcond
*> (last
));
3882 /* If BB's last statement is a switch statement involving integer
3883 operands, determine if we need to add ASSERT_EXPRs. */
3885 && gimple_code (last
) == GIMPLE_SWITCH
3886 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
3887 find_switch_asserts (bb
, as_a
<gswitch
*> (last
));
3889 /* Traverse all the statements in BB marking used names and looking
3890 for statements that may infer assertions for their used operands. */
3891 for (gimple_stmt_iterator si
= gsi_last_bb (bb
); !gsi_end_p (si
);
3898 stmt
= gsi_stmt (si
);
3900 if (is_gimple_debug (stmt
))
3903 /* See if we can derive an assertion for any of STMT's operands. */
3904 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
3907 enum tree_code comp_code
;
3909 /* If op is not live beyond this stmt, do not bother to insert
3911 if (!bitmap_bit_p (live
, SSA_NAME_VERSION (op
)))
3914 /* If OP is used in such a way that we can infer a value
3915 range for it, and we don't find a previous assertion for
3916 it, create a new assertion location node for OP. */
3917 if (infer_value_range (stmt
, op
, &comp_code
, &value
))
3919 /* If we are able to infer a nonzero value range for OP,
3920 then walk backwards through the use-def chain to see if OP
3921 was set via a typecast.
3923 If so, then we can also infer a nonzero value range
3924 for the operand of the NOP_EXPR. */
3925 if (comp_code
== NE_EXPR
&& integer_zerop (value
))
3928 gimple
*def_stmt
= SSA_NAME_DEF_STMT (t
);
3930 while (is_gimple_assign (def_stmt
)
3931 && CONVERT_EXPR_CODE_P
3932 (gimple_assign_rhs_code (def_stmt
))
3934 (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
3936 (TREE_TYPE (gimple_assign_rhs1 (def_stmt
))))
3938 t
= gimple_assign_rhs1 (def_stmt
);
3939 def_stmt
= SSA_NAME_DEF_STMT (t
);
3941 /* Note we want to register the assert for the
3942 operand of the NOP_EXPR after SI, not after the
3944 if (bitmap_bit_p (live
, SSA_NAME_VERSION (t
)))
3945 register_new_assert_for (t
, t
, comp_code
, value
,
3950 register_new_assert_for (op
, op
, comp_code
, value
, bb
, NULL
, si
);
3955 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
3956 bitmap_set_bit (live
, SSA_NAME_VERSION (op
));
3957 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
3958 bitmap_clear_bit (live
, SSA_NAME_VERSION (op
));
3961 /* Traverse all PHI nodes in BB, updating live. */
3962 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
3965 use_operand_p arg_p
;
3967 gphi
*phi
= si
.phi ();
3968 tree res
= gimple_phi_result (phi
);
3970 if (virtual_operand_p (res
))
3973 FOR_EACH_PHI_ARG (arg_p
, phi
, i
, SSA_OP_USE
)
3975 tree arg
= USE_FROM_PTR (arg_p
);
3976 if (TREE_CODE (arg
) == SSA_NAME
)
3977 bitmap_set_bit (live
, SSA_NAME_VERSION (arg
));
3980 bitmap_clear_bit (live
, SSA_NAME_VERSION (res
));
3984 /* Do an RPO walk over the function computing SSA name liveness
3985 on-the-fly and deciding on assert expressions to insert. */
3988 find_assert_locations (void)
3990 int *rpo
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
3991 int *bb_rpo
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
3992 int *last_rpo
= XCNEWVEC (int, last_basic_block_for_fn (cfun
));
3995 live
= XCNEWVEC (sbitmap
, last_basic_block_for_fn (cfun
));
3996 rpo_cnt
= pre_and_rev_post_order_compute (NULL
, rpo
, false);
3997 for (i
= 0; i
< rpo_cnt
; ++i
)
4000 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
4001 the order we compute liveness and insert asserts we otherwise
4002 fail to insert asserts into the loop latch. */
4004 FOR_EACH_LOOP (loop
, 0)
4006 i
= loop
->latch
->index
;
4007 unsigned int j
= single_succ_edge (loop
->latch
)->dest_idx
;
4008 for (gphi_iterator gsi
= gsi_start_phis (loop
->header
);
4009 !gsi_end_p (gsi
); gsi_next (&gsi
))
4011 gphi
*phi
= gsi
.phi ();
4012 if (virtual_operand_p (gimple_phi_result (phi
)))
4014 tree arg
= gimple_phi_arg_def (phi
, j
);
4015 if (TREE_CODE (arg
) == SSA_NAME
)
4017 if (live
[i
] == NULL
)
4019 live
[i
] = sbitmap_alloc (num_ssa_names
);
4020 bitmap_clear (live
[i
]);
4022 bitmap_set_bit (live
[i
], SSA_NAME_VERSION (arg
));
4027 for (i
= rpo_cnt
- 1; i
>= 0; --i
)
4029 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, rpo
[i
]);
4035 live
[rpo
[i
]] = sbitmap_alloc (num_ssa_names
);
4036 bitmap_clear (live
[rpo
[i
]]);
4039 /* Process BB and update the live information with uses in
4041 find_assert_locations_1 (bb
, live
[rpo
[i
]]);
4043 /* Merge liveness into the predecessor blocks and free it. */
4044 if (!bitmap_empty_p (live
[rpo
[i
]]))
4047 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4049 int pred
= e
->src
->index
;
4050 if ((e
->flags
& EDGE_DFS_BACK
) || pred
== ENTRY_BLOCK
)
4055 live
[pred
] = sbitmap_alloc (num_ssa_names
);
4056 bitmap_clear (live
[pred
]);
4058 bitmap_ior (live
[pred
], live
[pred
], live
[rpo
[i
]]);
4060 if (bb_rpo
[pred
] < pred_rpo
)
4061 pred_rpo
= bb_rpo
[pred
];
4064 /* Record the RPO number of the last visited block that needs
4065 live information from this block. */
4066 last_rpo
[rpo
[i
]] = pred_rpo
;
4070 sbitmap_free (live
[rpo
[i
]]);
4071 live
[rpo
[i
]] = NULL
;
4074 /* We can free all successors live bitmaps if all their
4075 predecessors have been visited already. */
4076 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4077 if (last_rpo
[e
->dest
->index
] == i
4078 && live
[e
->dest
->index
])
4080 sbitmap_free (live
[e
->dest
->index
]);
4081 live
[e
->dest
->index
] = NULL
;
4086 XDELETEVEC (bb_rpo
);
4087 XDELETEVEC (last_rpo
);
4088 for (i
= 0; i
< last_basic_block_for_fn (cfun
); ++i
)
4090 sbitmap_free (live
[i
]);
4094 /* Create an ASSERT_EXPR for NAME and insert it in the location
4095 indicated by LOC. Return true if we made any edge insertions. */
4098 process_assert_insertions_for (tree name
, assert_locus
*loc
)
4100 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4103 gimple
*assert_stmt
;
4107 /* If we have X <=> X do not insert an assert expr for that. */
4108 if (loc
->expr
== loc
->val
)
4111 cond
= build2 (loc
->comp_code
, boolean_type_node
, loc
->expr
, loc
->val
);
4112 assert_stmt
= build_assert_expr_for (cond
, name
);
4115 /* We have been asked to insert the assertion on an edge. This
4116 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4117 gcc_checking_assert (gimple_code (gsi_stmt (loc
->si
)) == GIMPLE_COND
4118 || (gimple_code (gsi_stmt (loc
->si
))
4121 gsi_insert_on_edge (loc
->e
, assert_stmt
);
4125 /* If the stmt iterator points at the end then this is an insertion
4126 at the beginning of a block. */
4127 if (gsi_end_p (loc
->si
))
4129 gimple_stmt_iterator si
= gsi_after_labels (loc
->bb
);
4130 gsi_insert_before (&si
, assert_stmt
, GSI_SAME_STMT
);
4134 /* Otherwise, we can insert right after LOC->SI iff the
4135 statement must not be the last statement in the block. */
4136 stmt
= gsi_stmt (loc
->si
);
4137 if (!stmt_ends_bb_p (stmt
))
4139 gsi_insert_after (&loc
->si
, assert_stmt
, GSI_SAME_STMT
);
4143 /* If STMT must be the last statement in BB, we can only insert new
4144 assertions on the non-abnormal edge out of BB. Note that since
4145 STMT is not control flow, there may only be one non-abnormal/eh edge
4147 FOR_EACH_EDGE (e
, ei
, loc
->bb
->succs
)
4148 if (!(e
->flags
& (EDGE_ABNORMAL
|EDGE_EH
)))
4150 gsi_insert_on_edge (e
, assert_stmt
);
4157 /* Qsort helper for sorting assert locations. If stable is true, don't
4158 use iterative_hash_expr because it can be unstable for -fcompare-debug,
4159 on the other side some pointers might be NULL. */
4161 template <bool stable
>
4163 compare_assert_loc (const void *pa
, const void *pb
)
4165 assert_locus
* const a
= *(assert_locus
* const *)pa
;
4166 assert_locus
* const b
= *(assert_locus
* const *)pb
;
4168 /* If stable, some asserts might be optimized away already, sort
4178 if (a
->e
== NULL
&& b
->e
!= NULL
)
4180 else if (a
->e
!= NULL
&& b
->e
== NULL
)
4183 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4184 no need to test both a->e and b->e. */
4186 /* Sort after destination index. */
4189 else if (a
->e
->dest
->index
> b
->e
->dest
->index
)
4191 else if (a
->e
->dest
->index
< b
->e
->dest
->index
)
4194 /* Sort after comp_code. */
4195 if (a
->comp_code
> b
->comp_code
)
4197 else if (a
->comp_code
< b
->comp_code
)
4202 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4203 uses DECL_UID of the VAR_DECL, so sorting might differ between
4204 -g and -g0. When doing the removal of redundant assert exprs
4205 and commonization to successors, this does not matter, but for
4206 the final sort needs to be stable. */
4214 ha
= iterative_hash_expr (a
->expr
, iterative_hash_expr (a
->val
, 0));
4215 hb
= iterative_hash_expr (b
->expr
, iterative_hash_expr (b
->val
, 0));
4218 /* Break the tie using hashing and source/bb index. */
4220 return (a
->e
!= NULL
4221 ? a
->e
->src
->index
- b
->e
->src
->index
4222 : a
->bb
->index
- b
->bb
->index
);
4223 return ha
> hb
? 1 : -1;
4226 /* Process all the insertions registered for every name N_i registered
4227 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4228 found in ASSERTS_FOR[i]. */
4231 process_assert_insertions (void)
4235 bool update_edges_p
= false;
4236 int num_asserts
= 0;
4238 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4239 dump_all_asserts (dump_file
);
4241 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
4243 assert_locus
*loc
= asserts_for
[i
];
4246 auto_vec
<assert_locus
*, 16> asserts
;
4247 for (; loc
; loc
= loc
->next
)
4248 asserts
.safe_push (loc
);
4249 asserts
.qsort (compare_assert_loc
<false>);
4251 /* Push down common asserts to successors and remove redundant ones. */
4253 assert_locus
*common
= NULL
;
4254 unsigned commonj
= 0;
4255 for (unsigned j
= 0; j
< asserts
.length (); ++j
)
4261 || loc
->e
->dest
!= common
->e
->dest
4262 || loc
->comp_code
!= common
->comp_code
4263 || ! operand_equal_p (loc
->val
, common
->val
, 0)
4264 || ! operand_equal_p (loc
->expr
, common
->expr
, 0))
4270 else if (loc
->e
== asserts
[j
-1]->e
)
4272 /* Remove duplicate asserts. */
4273 if (commonj
== j
- 1)
4278 free (asserts
[j
-1]);
4279 asserts
[j
-1] = NULL
;
4284 if (EDGE_COUNT (common
->e
->dest
->preds
) == ecnt
)
4286 /* We have the same assertion on all incoming edges of a BB.
4287 Insert it at the beginning of that block. */
4288 loc
->bb
= loc
->e
->dest
;
4290 loc
->si
= gsi_none ();
4292 /* Clear asserts commoned. */
4293 for (; commonj
!= j
; ++commonj
)
4294 if (asserts
[commonj
])
4296 free (asserts
[commonj
]);
4297 asserts
[commonj
] = NULL
;
4303 /* The asserts vector sorting above might be unstable for
4304 -fcompare-debug, sort again to ensure a stable sort. */
4305 asserts
.qsort (compare_assert_loc
<true>);
4306 for (unsigned j
= 0; j
< asserts
.length (); ++j
)
4311 update_edges_p
|= process_assert_insertions_for (ssa_name (i
), loc
);
4318 gsi_commit_edge_inserts ();
4320 statistics_counter_event (cfun
, "Number of ASSERT_EXPR expressions inserted",
4325 /* Traverse the flowgraph looking for conditional jumps to insert range
4326 expressions. These range expressions are meant to provide information
4327 to optimizations that need to reason in terms of value ranges. They
4328 will not be expanded into RTL. For instance, given:
4337 this pass will transform the code into:
4343 x = ASSERT_EXPR <x, x < y>
4348 y = ASSERT_EXPR <y, x >= y>
4352 The idea is that once copy and constant propagation have run, other
4353 optimizations will be able to determine what ranges of values can 'x'
4354 take in different paths of the code, simply by checking the reaching
4355 definition of 'x'. */
4358 insert_range_assertions (void)
4360 need_assert_for
= BITMAP_ALLOC (NULL
);
4361 asserts_for
= XCNEWVEC (assert_locus
*, num_ssa_names
);
4363 calculate_dominance_info (CDI_DOMINATORS
);
4365 find_assert_locations ();
4366 if (!bitmap_empty_p (need_assert_for
))
4368 process_assert_insertions ();
4369 update_ssa (TODO_update_ssa_no_phi
);
4372 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4374 fprintf (dump_file
, "\nSSA form after inserting ASSERT_EXPRs\n");
4375 dump_function_to_file (current_function_decl
, dump_file
, dump_flags
);
4379 BITMAP_FREE (need_assert_for
);
4382 class vrp_prop
: public ssa_propagation_engine
4385 enum ssa_prop_result
visit_stmt (gimple
*, edge
*, tree
*) FINAL OVERRIDE
;
4386 enum ssa_prop_result
visit_phi (gphi
*) FINAL OVERRIDE
;
4388 void vrp_initialize (void);
4389 void vrp_finalize (bool);
4390 void check_all_array_refs (void);
4391 void check_array_ref (location_t
, tree
, bool);
4392 void check_mem_ref (location_t
, tree
, bool);
4393 void search_for_addr_array (tree
, location_t
);
4395 class vr_values vr_values
;
4396 /* Temporary delegator to minimize code churn. */
4397 const value_range
*get_value_range (const_tree op
)
4398 { return vr_values
.get_value_range (op
); }
4399 void set_def_to_varying (const_tree def
)
4400 { vr_values
.set_def_to_varying (def
); }
4401 void set_defs_to_varying (gimple
*stmt
)
4402 { vr_values
.set_defs_to_varying (stmt
); }
4403 void extract_range_from_stmt (gimple
*stmt
, edge
*taken_edge_p
,
4404 tree
*output_p
, value_range
*vr
)
4405 { vr_values
.extract_range_from_stmt (stmt
, taken_edge_p
, output_p
, vr
); }
4406 bool update_value_range (const_tree op
, value_range
*vr
)
4407 { return vr_values
.update_value_range (op
, vr
); }
4408 void extract_range_basic (value_range
*vr
, gimple
*stmt
)
4409 { vr_values
.extract_range_basic (vr
, stmt
); }
4410 void extract_range_from_phi_node (gphi
*phi
, value_range
*vr
)
4411 { vr_values
.extract_range_from_phi_node (phi
, vr
); }
4413 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4414 and "struct" hacks. If VRP can determine that the
4415 array subscript is a constant, check if it is outside valid
4416 range. If the array subscript is a RANGE, warn if it is
4417 non-overlapping with valid range.
4418 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4421 vrp_prop::check_array_ref (location_t location
, tree ref
,
4422 bool ignore_off_by_one
)
4424 const value_range
*vr
= NULL
;
4425 tree low_sub
, up_sub
;
4426 tree low_bound
, up_bound
, up_bound_p1
;
4428 if (TREE_NO_WARNING (ref
))
4431 low_sub
= up_sub
= TREE_OPERAND (ref
, 1);
4432 up_bound
= array_ref_up_bound (ref
);
4435 || TREE_CODE (up_bound
) != INTEGER_CST
4436 || (warn_array_bounds
< 2
4437 && array_at_struct_end_p (ref
)))
4439 /* Accesses to trailing arrays via pointers may access storage
4440 beyond the types array bounds. For such arrays, or for flexible
4441 array members, as well as for other arrays of an unknown size,
4442 replace the upper bound with a more permissive one that assumes
4443 the size of the largest object is PTRDIFF_MAX. */
4444 tree eltsize
= array_ref_element_size (ref
);
4446 if (TREE_CODE (eltsize
) != INTEGER_CST
4447 || integer_zerop (eltsize
))
4449 up_bound
= NULL_TREE
;
4450 up_bound_p1
= NULL_TREE
;
4454 tree maxbound
= TYPE_MAX_VALUE (ptrdiff_type_node
);
4455 tree arg
= TREE_OPERAND (ref
, 0);
4458 if (get_addr_base_and_unit_offset (arg
, &off
) && known_gt (off
, 0))
4459 maxbound
= wide_int_to_tree (sizetype
,
4460 wi::sub (wi::to_wide (maxbound
),
4463 maxbound
= fold_convert (sizetype
, maxbound
);
4465 up_bound_p1
= int_const_binop (TRUNC_DIV_EXPR
, maxbound
, eltsize
);
4467 up_bound
= int_const_binop (MINUS_EXPR
, up_bound_p1
,
4468 build_int_cst (ptrdiff_type_node
, 1));
4472 up_bound_p1
= int_const_binop (PLUS_EXPR
, up_bound
,
4473 build_int_cst (TREE_TYPE (up_bound
), 1));
4475 low_bound
= array_ref_low_bound (ref
);
4477 tree artype
= TREE_TYPE (TREE_OPERAND (ref
, 0));
4479 bool warned
= false;
4482 if (up_bound
&& tree_int_cst_equal (low_bound
, up_bound_p1
))
4483 warned
= warning_at (location
, OPT_Warray_bounds
,
4484 "array subscript %E is above array bounds of %qT",
4487 if (TREE_CODE (low_sub
) == SSA_NAME
)
4489 vr
= get_value_range (low_sub
);
4490 if (!vr
->undefined_p () && !vr
->varying_p ())
4492 low_sub
= vr
->kind () == VR_RANGE
? vr
->max () : vr
->min ();
4493 up_sub
= vr
->kind () == VR_RANGE
? vr
->min () : vr
->max ();
4497 if (vr
&& vr
->kind () == VR_ANTI_RANGE
)
4500 && TREE_CODE (up_sub
) == INTEGER_CST
4501 && (ignore_off_by_one
4502 ? tree_int_cst_lt (up_bound
, up_sub
)
4503 : tree_int_cst_le (up_bound
, up_sub
))
4504 && TREE_CODE (low_sub
) == INTEGER_CST
4505 && tree_int_cst_le (low_sub
, low_bound
))
4506 warned
= warning_at (location
, OPT_Warray_bounds
,
4507 "array subscript [%E, %E] is outside "
4508 "array bounds of %qT",
4509 low_sub
, up_sub
, artype
);
4512 && TREE_CODE (up_sub
) == INTEGER_CST
4513 && (ignore_off_by_one
4514 ? !tree_int_cst_le (up_sub
, up_bound_p1
)
4515 : !tree_int_cst_le (up_sub
, up_bound
)))
4517 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4519 fprintf (dump_file
, "Array bound warning for ");
4520 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
4521 fprintf (dump_file
, "\n");
4523 warned
= warning_at (location
, OPT_Warray_bounds
,
4524 "array subscript %E is above array bounds of %qT",
4527 else if (TREE_CODE (low_sub
) == INTEGER_CST
4528 && tree_int_cst_lt (low_sub
, low_bound
))
4530 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4532 fprintf (dump_file
, "Array bound warning for ");
4533 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
4534 fprintf (dump_file
, "\n");
4536 warned
= warning_at (location
, OPT_Warray_bounds
,
4537 "array subscript %E is below array bounds of %qT",
4543 ref
= TREE_OPERAND (ref
, 0);
4546 inform (DECL_SOURCE_LOCATION (ref
), "while referencing %qD", ref
);
4548 TREE_NO_WARNING (ref
) = 1;
4552 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4553 references to string constants. If VRP can determine that the array
4554 subscript is a constant, check if it is outside valid range.
4555 If the array subscript is a RANGE, warn if it is non-overlapping
4557 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4558 (used to allow one-past-the-end indices for code that takes
4559 the address of the just-past-the-end element of an array). */
4562 vrp_prop::check_mem_ref (location_t location
, tree ref
,
4563 bool ignore_off_by_one
)
4565 if (TREE_NO_WARNING (ref
))
4568 tree arg
= TREE_OPERAND (ref
, 0);
4569 /* The constant and variable offset of the reference. */
4570 tree cstoff
= TREE_OPERAND (ref
, 1);
4571 tree varoff
= NULL_TREE
;
4573 const offset_int maxobjsize
= tree_to_shwi (max_object_size ());
4575 /* The array or string constant bounds in bytes. Initially set
4576 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4578 offset_int arrbounds
[2] = { -maxobjsize
- 1, maxobjsize
};
4580 /* The minimum and maximum intermediate offset. For a reference
4581 to be valid, not only does the final offset/subscript must be
4582 in bounds but all intermediate offsets should be as well.
4583 GCC may be able to deal gracefully with such out-of-bounds
4584 offsets so the checking is only enbaled at -Warray-bounds=2
4585 where it may help detect bugs in uses of the intermediate
4586 offsets that could otherwise not be detectable. */
4587 offset_int ioff
= wi::to_offset (fold_convert (ptrdiff_type_node
, cstoff
));
4588 offset_int extrema
[2] = { 0, wi::abs (ioff
) };
4590 /* The range of the byte offset into the reference. */
4591 offset_int offrange
[2] = { 0, 0 };
4593 const value_range
*vr
= NULL
;
4595 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4596 The loop computes the range of the final offset for expressions such
4597 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4599 const unsigned limit
= PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT
);
4600 for (unsigned n
= 0; TREE_CODE (arg
) == SSA_NAME
&& n
< limit
; ++n
)
4602 gimple
*def
= SSA_NAME_DEF_STMT (arg
);
4603 if (!is_gimple_assign (def
))
4606 tree_code code
= gimple_assign_rhs_code (def
);
4607 if (code
== POINTER_PLUS_EXPR
)
4609 arg
= gimple_assign_rhs1 (def
);
4610 varoff
= gimple_assign_rhs2 (def
);
4612 else if (code
== ASSERT_EXPR
)
4614 arg
= TREE_OPERAND (gimple_assign_rhs1 (def
), 0);
4620 /* VAROFF should always be a SSA_NAME here (and not even
4621 INTEGER_CST) but there's no point in taking chances. */
4622 if (TREE_CODE (varoff
) != SSA_NAME
)
4625 vr
= get_value_range (varoff
);
4626 if (!vr
|| vr
->undefined_p () || vr
->varying_p ())
4629 if (!vr
->constant_p ())
4632 if (vr
->kind () == VR_RANGE
)
4635 = wi::to_offset (fold_convert (ptrdiff_type_node
, vr
->min ()));
4637 = wi::to_offset (fold_convert (ptrdiff_type_node
, vr
->max ()));
4645 /* When MIN >= MAX, the offset is effectively in a union
4646 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4647 Since there is no way to represent such a range across
4648 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4650 offrange
[0] += arrbounds
[0];
4651 offrange
[1] += arrbounds
[1];
4656 /* For an anti-range, analogously to the above, conservatively
4657 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4658 offrange
[0] += arrbounds
[0];
4659 offrange
[1] += arrbounds
[1];
4662 /* Keep track of the minimum and maximum offset. */
4663 if (offrange
[1] < 0 && offrange
[1] < extrema
[0])
4664 extrema
[0] = offrange
[1];
4665 if (offrange
[0] > 0 && offrange
[0] > extrema
[1])
4666 extrema
[1] = offrange
[0];
4668 if (offrange
[0] < arrbounds
[0])
4669 offrange
[0] = arrbounds
[0];
4671 if (offrange
[1] > arrbounds
[1])
4672 offrange
[1] = arrbounds
[1];
4675 if (TREE_CODE (arg
) == ADDR_EXPR
)
4677 arg
= TREE_OPERAND (arg
, 0);
4678 if (TREE_CODE (arg
) != STRING_CST
4679 && TREE_CODE (arg
) != VAR_DECL
)
4685 /* The type of the object being referred to. It can be an array,
4686 string literal, or a non-array type when the MEM_REF represents
4687 a reference/subscript via a pointer to an object that is not
4688 an element of an array. References to members of structs and
4689 unions are excluded because MEM_REF doesn't make it possible
4690 to identify the member where the reference originated.
4691 Incomplete types are excluded as well because their size is
4693 tree reftype
= TREE_TYPE (arg
);
4694 if (POINTER_TYPE_P (reftype
)
4695 || !COMPLETE_TYPE_P (reftype
)
4696 || TREE_CODE (TYPE_SIZE_UNIT (reftype
)) != INTEGER_CST
4697 || RECORD_OR_UNION_TYPE_P (reftype
))
4701 if (TREE_CODE (reftype
) == ARRAY_TYPE
)
4703 eltsize
= wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype
)));
4705 if (tree dom
= TYPE_DOMAIN (reftype
))
4707 tree bnds
[] = { TYPE_MIN_VALUE (dom
), TYPE_MAX_VALUE (dom
) };
4708 if (array_at_struct_end_p (arg
)
4709 || !bnds
[0] || !bnds
[1])
4712 arrbounds
[1] = wi::lrshift (maxobjsize
, wi::floor_log2 (eltsize
));
4716 arrbounds
[0] = wi::to_offset (bnds
[0]) * eltsize
;
4717 arrbounds
[1] = (wi::to_offset (bnds
[1]) + 1) * eltsize
;
4723 arrbounds
[1] = wi::lrshift (maxobjsize
, wi::floor_log2 (eltsize
));
4726 if (TREE_CODE (ref
) == MEM_REF
)
4728 /* For MEM_REF determine a tighter bound of the non-array
4730 tree eltype
= TREE_TYPE (reftype
);
4731 while (TREE_CODE (eltype
) == ARRAY_TYPE
)
4732 eltype
= TREE_TYPE (eltype
);
4733 eltsize
= wi::to_offset (TYPE_SIZE_UNIT (eltype
));
4740 arrbounds
[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype
));
4743 offrange
[0] += ioff
;
4744 offrange
[1] += ioff
;
4746 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4747 is set (when taking the address of the one-past-last element
4748 of an array) but always use the stricter bound in diagnostics. */
4749 offset_int ubound
= arrbounds
[1];
4750 if (ignore_off_by_one
)
4753 if (offrange
[0] >= ubound
|| offrange
[1] < arrbounds
[0])
4755 /* Treat a reference to a non-array object as one to an array
4756 of a single element. */
4757 if (TREE_CODE (reftype
) != ARRAY_TYPE
)
4758 reftype
= build_array_type_nelts (reftype
, 1);
4760 if (TREE_CODE (ref
) == MEM_REF
)
4762 /* Extract the element type out of MEM_REF and use its size
4763 to compute the index to print in the diagnostic; arrays
4764 in MEM_REF don't mean anything. A type with no size like
4765 void is as good as having a size of 1. */
4766 tree type
= TREE_TYPE (ref
);
4767 while (TREE_CODE (type
) == ARRAY_TYPE
)
4768 type
= TREE_TYPE (type
);
4769 if (tree size
= TYPE_SIZE_UNIT (type
))
4771 offrange
[0] = offrange
[0] / wi::to_offset (size
);
4772 offrange
[1] = offrange
[1] / wi::to_offset (size
);
4777 /* For anything other than MEM_REF, compute the index to
4778 print in the diagnostic as the offset over element size. */
4779 offrange
[0] = offrange
[0] / eltsize
;
4780 offrange
[1] = offrange
[1] / eltsize
;
4784 if (offrange
[0] == offrange
[1])
4785 warned
= warning_at (location
, OPT_Warray_bounds
,
4786 "array subscript %wi is outside array bounds "
4788 offrange
[0].to_shwi (), reftype
);
4790 warned
= warning_at (location
, OPT_Warray_bounds
,
4791 "array subscript [%wi, %wi] is outside "
4792 "array bounds of %qT",
4793 offrange
[0].to_shwi (),
4794 offrange
[1].to_shwi (), reftype
);
4795 if (warned
&& DECL_P (arg
))
4796 inform (DECL_SOURCE_LOCATION (arg
), "while referencing %qD", arg
);
4799 TREE_NO_WARNING (ref
) = 1;
4803 if (warn_array_bounds
< 2)
4806 /* At level 2 check also intermediate offsets. */
4808 if (extrema
[i
] < -arrbounds
[1] || extrema
[i
= 1] > ubound
)
4810 HOST_WIDE_INT tmpidx
= extrema
[i
].to_shwi () / eltsize
.to_shwi ();
4812 if (warning_at (location
, OPT_Warray_bounds
,
4813 "intermediate array offset %wi is outside array bounds "
4814 "of %qT", tmpidx
, reftype
))
4815 TREE_NO_WARNING (ref
) = 1;
4819 /* Searches if the expr T, located at LOCATION computes
4820 address of an ARRAY_REF, and call check_array_ref on it. */
4823 vrp_prop::search_for_addr_array (tree t
, location_t location
)
4825 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4828 if (TREE_CODE (t
) == ARRAY_REF
)
4829 check_array_ref (location
, t
, true /*ignore_off_by_one*/);
4830 else if (TREE_CODE (t
) == MEM_REF
)
4831 check_mem_ref (location
, t
, true /*ignore_off_by_one*/);
4833 t
= TREE_OPERAND (t
, 0);
4835 while (handled_component_p (t
) || TREE_CODE (t
) == MEM_REF
);
4837 if (TREE_CODE (t
) != MEM_REF
4838 || TREE_CODE (TREE_OPERAND (t
, 0)) != ADDR_EXPR
4839 || TREE_NO_WARNING (t
))
4842 tree tem
= TREE_OPERAND (TREE_OPERAND (t
, 0), 0);
4843 tree low_bound
, up_bound
, el_sz
;
4844 if (TREE_CODE (TREE_TYPE (tem
)) != ARRAY_TYPE
4845 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem
))) == ARRAY_TYPE
4846 || !TYPE_DOMAIN (TREE_TYPE (tem
)))
4849 low_bound
= TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
4850 up_bound
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
4851 el_sz
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem
)));
4853 || TREE_CODE (low_bound
) != INTEGER_CST
4855 || TREE_CODE (up_bound
) != INTEGER_CST
4857 || TREE_CODE (el_sz
) != INTEGER_CST
)
4861 if (!mem_ref_offset (t
).is_constant (&idx
))
4864 bool warned
= false;
4865 idx
= wi::sdiv_trunc (idx
, wi::to_offset (el_sz
));
4868 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4870 fprintf (dump_file
, "Array bound warning for ");
4871 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, t
);
4872 fprintf (dump_file
, "\n");
4874 warned
= warning_at (location
, OPT_Warray_bounds
,
4875 "array subscript %wi is below "
4876 "array bounds of %qT",
4877 idx
.to_shwi (), TREE_TYPE (tem
));
4879 else if (idx
> (wi::to_offset (up_bound
)
4880 - wi::to_offset (low_bound
) + 1))
4882 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4884 fprintf (dump_file
, "Array bound warning for ");
4885 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, t
);
4886 fprintf (dump_file
, "\n");
4888 warned
= warning_at (location
, OPT_Warray_bounds
,
4889 "array subscript %wu is above "
4890 "array bounds of %qT",
4891 idx
.to_uhwi (), TREE_TYPE (tem
));
4897 inform (DECL_SOURCE_LOCATION (t
), "while referencing %qD", t
);
4899 TREE_NO_WARNING (t
) = 1;
4903 /* walk_tree() callback that checks if *TP is
4904 an ARRAY_REF inside an ADDR_EXPR (in which an array
4905 subscript one outside the valid range is allowed). Call
4906 check_array_ref for each ARRAY_REF found. The location is
4910 check_array_bounds (tree
*tp
, int *walk_subtree
, void *data
)
4913 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
4914 location_t location
;
4916 if (EXPR_HAS_LOCATION (t
))
4917 location
= EXPR_LOCATION (t
);
4919 location
= gimple_location (wi
->stmt
);
4921 *walk_subtree
= TRUE
;
4923 vrp_prop
*vrp_prop
= (class vrp_prop
*)wi
->info
;
4924 if (TREE_CODE (t
) == ARRAY_REF
)
4925 vrp_prop
->check_array_ref (location
, t
, false /*ignore_off_by_one*/);
4926 else if (TREE_CODE (t
) == MEM_REF
)
4927 vrp_prop
->check_mem_ref (location
, t
, false /*ignore_off_by_one*/);
4928 else if (TREE_CODE (t
) == ADDR_EXPR
)
4930 vrp_prop
->search_for_addr_array (t
, location
);
4931 *walk_subtree
= FALSE
;
4937 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4938 to walk over all statements of all reachable BBs and call
4939 check_array_bounds on them. */
4941 class check_array_bounds_dom_walker
: public dom_walker
4944 check_array_bounds_dom_walker (vrp_prop
*prop
)
4945 : dom_walker (CDI_DOMINATORS
,
4946 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4947 flags, so that we can merge in information on
4948 non-executable edges from vrp_folder . */
4949 REACHABLE_BLOCKS_PRESERVING_FLAGS
),
4951 ~check_array_bounds_dom_walker () {}
4953 edge
before_dom_children (basic_block
) FINAL OVERRIDE
;
4959 /* Implementation of dom_walker::before_dom_children.
4961 Walk over all statements of BB and call check_array_bounds on them,
4962 and determine if there's a unique successor edge. */
4965 check_array_bounds_dom_walker::before_dom_children (basic_block bb
)
4967 gimple_stmt_iterator si
;
4968 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
4970 gimple
*stmt
= gsi_stmt (si
);
4971 struct walk_stmt_info wi
;
4972 if (!gimple_has_location (stmt
)
4973 || is_gimple_debug (stmt
))
4976 memset (&wi
, 0, sizeof (wi
));
4980 walk_gimple_op (stmt
, check_array_bounds
, &wi
);
4983 /* Determine if there's a unique successor edge, and if so, return
4984 that back to dom_walker, ensuring that we don't visit blocks that
4985 became unreachable during the VRP propagation
4986 (PR tree-optimization/83312). */
4987 return find_taken_edge (bb
, NULL_TREE
);
4990 /* Walk over all statements of all reachable BBs and call check_array_bounds
4994 vrp_prop::check_all_array_refs ()
4996 check_array_bounds_dom_walker
w (this);
4997 w
.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
5000 /* Return true if all imm uses of VAR are either in STMT, or
5001 feed (optionally through a chain of single imm uses) GIMPLE_COND
5002 in basic block COND_BB. */
5005 all_imm_uses_in_stmt_or_feed_cond (tree var
, gimple
*stmt
, basic_block cond_bb
)
5007 use_operand_p use_p
, use2_p
;
5008 imm_use_iterator iter
;
5010 FOR_EACH_IMM_USE_FAST (use_p
, iter
, var
)
5011 if (USE_STMT (use_p
) != stmt
)
5013 gimple
*use_stmt
= USE_STMT (use_p
), *use_stmt2
;
5014 if (is_gimple_debug (use_stmt
))
5016 while (is_gimple_assign (use_stmt
)
5017 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
5018 && single_imm_use (gimple_assign_lhs (use_stmt
),
5019 &use2_p
, &use_stmt2
))
5020 use_stmt
= use_stmt2
;
5021 if (gimple_code (use_stmt
) != GIMPLE_COND
5022 || gimple_bb (use_stmt
) != cond_bb
)
5035 __builtin_unreachable ();
5037 x_5 = ASSERT_EXPR <x_3, ...>;
5038 If x_3 has no other immediate uses (checked by caller),
5039 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
5040 from the non-zero bitmask. */
5043 maybe_set_nonzero_bits (edge e
, tree var
)
5045 basic_block cond_bb
= e
->src
;
5046 gimple
*stmt
= last_stmt (cond_bb
);
5050 || gimple_code (stmt
) != GIMPLE_COND
5051 || gimple_cond_code (stmt
) != ((e
->flags
& EDGE_TRUE_VALUE
)
5052 ? EQ_EXPR
: NE_EXPR
)
5053 || TREE_CODE (gimple_cond_lhs (stmt
)) != SSA_NAME
5054 || !integer_zerop (gimple_cond_rhs (stmt
)))
5057 stmt
= SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt
));
5058 if (!is_gimple_assign (stmt
)
5059 || gimple_assign_rhs_code (stmt
) != BIT_AND_EXPR
5060 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
)
5062 if (gimple_assign_rhs1 (stmt
) != var
)
5066 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
5068 stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
5069 if (!gimple_assign_cast_p (stmt2
)
5070 || gimple_assign_rhs1 (stmt2
) != var
5071 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2
))
5072 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt
)))
5073 != TYPE_PRECISION (TREE_TYPE (var
))))
5076 cst
= gimple_assign_rhs2 (stmt
);
5077 set_nonzero_bits (var
, wi::bit_and_not (get_nonzero_bits (var
),
5078 wi::to_wide (cst
)));
5081 /* Convert range assertion expressions into the implied copies and
5082 copy propagate away the copies. Doing the trivial copy propagation
5083 here avoids the need to run the full copy propagation pass after
5086 FIXME, this will eventually lead to copy propagation removing the
5087 names that had useful range information attached to them. For
5088 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5089 then N_i will have the range [3, +INF].
5091 However, by converting the assertion into the implied copy
5092 operation N_i = N_j, we will then copy-propagate N_j into the uses
5093 of N_i and lose the range information. We may want to hold on to
5094 ASSERT_EXPRs a little while longer as the ranges could be used in
5095 things like jump threading.
5097 The problem with keeping ASSERT_EXPRs around is that passes after
5098 VRP need to handle them appropriately.
5100 Another approach would be to make the range information a first
5101 class property of the SSA_NAME so that it can be queried from
5102 any pass. This is made somewhat more complex by the need for
5103 multiple ranges to be associated with one SSA_NAME. */
5106 remove_range_assertions (void)
5109 gimple_stmt_iterator si
;
5110 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5111 a basic block preceeded by GIMPLE_COND branching to it and
5112 __builtin_trap, -1 if not yet checked, 0 otherwise. */
5115 /* Note that the BSI iterator bump happens at the bottom of the
5116 loop and no bump is necessary if we're removing the statement
5117 referenced by the current BSI. */
5118 FOR_EACH_BB_FN (bb
, cfun
)
5119 for (si
= gsi_after_labels (bb
), is_unreachable
= -1; !gsi_end_p (si
);)
5121 gimple
*stmt
= gsi_stmt (si
);
5123 if (is_gimple_assign (stmt
)
5124 && gimple_assign_rhs_code (stmt
) == ASSERT_EXPR
)
5126 tree lhs
= gimple_assign_lhs (stmt
);
5127 tree rhs
= gimple_assign_rhs1 (stmt
);
5130 var
= ASSERT_EXPR_VAR (rhs
);
5132 if (TREE_CODE (var
) == SSA_NAME
5133 && !POINTER_TYPE_P (TREE_TYPE (lhs
))
5134 && SSA_NAME_RANGE_INFO (lhs
))
5136 if (is_unreachable
== -1)
5139 if (single_pred_p (bb
)
5140 && assert_unreachable_fallthru_edge_p
5141 (single_pred_edge (bb
)))
5145 if (x_7 >= 10 && x_7 < 20)
5146 __builtin_unreachable ();
5147 x_8 = ASSERT_EXPR <x_7, ...>;
5148 if the only uses of x_7 are in the ASSERT_EXPR and
5149 in the condition. In that case, we can copy the
5150 range info from x_8 computed in this pass also
5153 && all_imm_uses_in_stmt_or_feed_cond (var
, stmt
,
5156 set_range_info (var
, SSA_NAME_RANGE_TYPE (lhs
),
5157 SSA_NAME_RANGE_INFO (lhs
)->get_min (),
5158 SSA_NAME_RANGE_INFO (lhs
)->get_max ());
5159 maybe_set_nonzero_bits (single_pred_edge (bb
), var
);
5163 /* Propagate the RHS into every use of the LHS. For SSA names
5164 also propagate abnormals as it merely restores the original
5165 IL in this case (an replace_uses_by would assert). */
5166 if (TREE_CODE (var
) == SSA_NAME
)
5168 imm_use_iterator iter
;
5169 use_operand_p use_p
;
5171 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
5172 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
5173 SET_USE (use_p
, var
);
5176 replace_uses_by (lhs
, var
);
5178 /* And finally, remove the copy, it is not needed. */
5179 gsi_remove (&si
, true);
5180 release_defs (stmt
);
5184 if (!is_gimple_debug (gsi_stmt (si
)))
5191 /* Return true if STMT is interesting for VRP. */
5194 stmt_interesting_for_vrp (gimple
*stmt
)
5196 if (gimple_code (stmt
) == GIMPLE_PHI
)
5198 tree res
= gimple_phi_result (stmt
);
5199 return (!virtual_operand_p (res
)
5200 && (INTEGRAL_TYPE_P (TREE_TYPE (res
))
5201 || POINTER_TYPE_P (TREE_TYPE (res
))));
5203 else if (is_gimple_assign (stmt
) || is_gimple_call (stmt
))
5205 tree lhs
= gimple_get_lhs (stmt
);
5207 /* In general, assignments with virtual operands are not useful
5208 for deriving ranges, with the obvious exception of calls to
5209 builtin functions. */
5210 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
5211 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
5212 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
5213 && (is_gimple_call (stmt
)
5214 || !gimple_vuse (stmt
)))
5216 else if (is_gimple_call (stmt
) && gimple_call_internal_p (stmt
))
5217 switch (gimple_call_internal_fn (stmt
))
5219 case IFN_ADD_OVERFLOW
:
5220 case IFN_SUB_OVERFLOW
:
5221 case IFN_MUL_OVERFLOW
:
5222 case IFN_ATOMIC_COMPARE_EXCHANGE
:
5223 /* These internal calls return _Complex integer type,
5224 but are interesting to VRP nevertheless. */
5225 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
5232 else if (gimple_code (stmt
) == GIMPLE_COND
5233 || gimple_code (stmt
) == GIMPLE_SWITCH
)
5239 /* Initialization required by ssa_propagate engine. */
5242 vrp_prop::vrp_initialize ()
5246 FOR_EACH_BB_FN (bb
, cfun
)
5248 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
5251 gphi
*phi
= si
.phi ();
5252 if (!stmt_interesting_for_vrp (phi
))
5254 tree lhs
= PHI_RESULT (phi
);
5255 set_def_to_varying (lhs
);
5256 prop_set_simulate_again (phi
, false);
5259 prop_set_simulate_again (phi
, true);
5262 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
5265 gimple
*stmt
= gsi_stmt (si
);
5267 /* If the statement is a control insn, then we do not
5268 want to avoid simulating the statement once. Failure
5269 to do so means that those edges will never get added. */
5270 if (stmt_ends_bb_p (stmt
))
5271 prop_set_simulate_again (stmt
, true);
5272 else if (!stmt_interesting_for_vrp (stmt
))
5274 set_defs_to_varying (stmt
);
5275 prop_set_simulate_again (stmt
, false);
5278 prop_set_simulate_again (stmt
, true);
5283 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5284 that includes the value VAL. The search is restricted to the range
5285 [START_IDX, n - 1] where n is the size of VEC.
5287 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5290 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5291 it is placed in IDX and false is returned.
5293 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5297 find_case_label_index (gswitch
*stmt
, size_t start_idx
, tree val
, size_t *idx
)
5299 size_t n
= gimple_switch_num_labels (stmt
);
5302 /* Find case label for minimum of the value range or the next one.
5303 At each iteration we are searching in [low, high - 1]. */
5305 for (low
= start_idx
, high
= n
; high
!= low
; )
5309 /* Note that i != high, so we never ask for n. */
5310 size_t i
= (high
+ low
) / 2;
5311 t
= gimple_switch_label (stmt
, i
);
5313 /* Cache the result of comparing CASE_LOW and val. */
5314 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
5318 /* Ranges cannot be empty. */
5327 if (CASE_HIGH (t
) != NULL
5328 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
5340 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5341 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5342 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5343 then MAX_IDX < MIN_IDX.
5344 Returns true if the default label is not needed. */
5347 find_case_label_range (gswitch
*stmt
, tree min
, tree max
, size_t *min_idx
,
5351 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
5352 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
5356 && max_take_default
)
5358 /* Only the default case label reached.
5359 Return an empty range. */
5366 bool take_default
= min_take_default
|| max_take_default
;
5370 if (max_take_default
)
5373 /* If the case label range is continuous, we do not need
5374 the default case label. Verify that. */
5375 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
5376 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
5377 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
5378 for (k
= i
+ 1; k
<= j
; ++k
)
5380 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
5381 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
)))
5383 take_default
= true;
5387 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
5388 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
5393 return !take_default
;
5397 /* Evaluate statement STMT. If the statement produces a useful range,
5398 return SSA_PROP_INTERESTING and record the SSA name with the
5399 interesting range into *OUTPUT_P.
5401 If STMT is a conditional branch and we can determine its truth
5402 value, the taken edge is recorded in *TAKEN_EDGE_P.
5404 If STMT produces a varying value, return SSA_PROP_VARYING. */
5406 enum ssa_prop_result
5407 vrp_prop::visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
5409 tree lhs
= gimple_get_lhs (stmt
);
5411 extract_range_from_stmt (stmt
, taken_edge_p
, output_p
, &vr
);
5415 if (update_value_range (*output_p
, &vr
))
5417 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5419 fprintf (dump_file
, "Found new range for ");
5420 print_generic_expr (dump_file
, *output_p
);
5421 fprintf (dump_file
, ": ");
5422 dump_value_range (dump_file
, &vr
);
5423 fprintf (dump_file
, "\n");
5426 if (vr
.varying_p ())
5427 return SSA_PROP_VARYING
;
5429 return SSA_PROP_INTERESTING
;
5431 return SSA_PROP_NOT_INTERESTING
;
5434 if (is_gimple_call (stmt
) && gimple_call_internal_p (stmt
))
5435 switch (gimple_call_internal_fn (stmt
))
5437 case IFN_ADD_OVERFLOW
:
5438 case IFN_SUB_OVERFLOW
:
5439 case IFN_MUL_OVERFLOW
:
5440 case IFN_ATOMIC_COMPARE_EXCHANGE
:
5441 /* These internal calls return _Complex integer type,
5442 which VRP does not track, but the immediate uses
5443 thereof might be interesting. */
5444 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
5446 imm_use_iterator iter
;
5447 use_operand_p use_p
;
5448 enum ssa_prop_result res
= SSA_PROP_VARYING
;
5450 set_def_to_varying (lhs
);
5452 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
5454 gimple
*use_stmt
= USE_STMT (use_p
);
5455 if (!is_gimple_assign (use_stmt
))
5457 enum tree_code rhs_code
= gimple_assign_rhs_code (use_stmt
);
5458 if (rhs_code
!= REALPART_EXPR
&& rhs_code
!= IMAGPART_EXPR
)
5460 tree rhs1
= gimple_assign_rhs1 (use_stmt
);
5461 tree use_lhs
= gimple_assign_lhs (use_stmt
);
5462 if (TREE_CODE (rhs1
) != rhs_code
5463 || TREE_OPERAND (rhs1
, 0) != lhs
5464 || TREE_CODE (use_lhs
) != SSA_NAME
5465 || !stmt_interesting_for_vrp (use_stmt
)
5466 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
5467 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs
))
5468 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs
))))
5471 /* If there is a change in the value range for any of the
5472 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5473 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5474 or IMAGPART_EXPR immediate uses, but none of them have
5475 a change in their value ranges, return
5476 SSA_PROP_NOT_INTERESTING. If there are no
5477 {REAL,IMAG}PART_EXPR uses at all,
5478 return SSA_PROP_VARYING. */
5480 extract_range_basic (&new_vr
, use_stmt
);
5481 const value_range
*old_vr
= get_value_range (use_lhs
);
5482 if (!old_vr
->equal_p (new_vr
, /*ignore_equivs=*/false))
5483 res
= SSA_PROP_INTERESTING
;
5485 res
= SSA_PROP_NOT_INTERESTING
;
5486 new_vr
.equiv_clear ();
5487 if (res
== SSA_PROP_INTERESTING
)
5501 /* All other statements produce nothing of interest for VRP, so mark
5502 their outputs varying and prevent further simulation. */
5503 set_defs_to_varying (stmt
);
5505 return (*taken_edge_p
) ? SSA_PROP_INTERESTING
: SSA_PROP_VARYING
;
5508 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5509 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5510 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5511 possible such range. The resulting range is not canonicalized. */
5514 union_ranges (enum value_range_kind
*vr0type
,
5515 tree
*vr0min
, tree
*vr0max
,
5516 enum value_range_kind vr1type
,
5517 tree vr1min
, tree vr1max
)
5519 int cmpmin
= compare_values (*vr0min
, vr1min
);
5520 int cmpmax
= compare_values (*vr0max
, vr1max
);
5521 bool mineq
= cmpmin
== 0;
5522 bool maxeq
= cmpmax
== 0;
5524 /* [] is vr0, () is vr1 in the following classification comments. */
5528 if (*vr0type
== vr1type
)
5529 /* Nothing to do for equal ranges. */
5531 else if ((*vr0type
== VR_RANGE
5532 && vr1type
== VR_ANTI_RANGE
)
5533 || (*vr0type
== VR_ANTI_RANGE
5534 && vr1type
== VR_RANGE
))
5536 /* For anti-range with range union the result is varying. */
5542 else if (operand_less_p (*vr0max
, vr1min
) == 1
5543 || operand_less_p (vr1max
, *vr0min
) == 1)
5545 /* [ ] ( ) or ( ) [ ]
5546 If the ranges have an empty intersection, result of the union
5547 operation is the anti-range or if both are anti-ranges
5549 if (*vr0type
== VR_ANTI_RANGE
5550 && vr1type
== VR_ANTI_RANGE
)
5552 else if (*vr0type
== VR_ANTI_RANGE
5553 && vr1type
== VR_RANGE
)
5555 else if (*vr0type
== VR_RANGE
5556 && vr1type
== VR_ANTI_RANGE
)
5562 else if (*vr0type
== VR_RANGE
5563 && vr1type
== VR_RANGE
)
5565 /* The result is the convex hull of both ranges. */
5566 if (operand_less_p (*vr0max
, vr1min
) == 1)
5568 /* If the result can be an anti-range, create one. */
5569 if (TREE_CODE (*vr0max
) == INTEGER_CST
5570 && TREE_CODE (vr1min
) == INTEGER_CST
5571 && vrp_val_is_min (*vr0min
)
5572 && vrp_val_is_max (vr1max
))
5574 tree min
= int_const_binop (PLUS_EXPR
,
5576 build_int_cst (TREE_TYPE (*vr0max
), 1));
5577 tree max
= int_const_binop (MINUS_EXPR
,
5579 build_int_cst (TREE_TYPE (vr1min
), 1));
5580 if (!operand_less_p (max
, min
))
5582 *vr0type
= VR_ANTI_RANGE
;
5594 /* If the result can be an anti-range, create one. */
5595 if (TREE_CODE (vr1max
) == INTEGER_CST
5596 && TREE_CODE (*vr0min
) == INTEGER_CST
5597 && vrp_val_is_min (vr1min
)
5598 && vrp_val_is_max (*vr0max
))
5600 tree min
= int_const_binop (PLUS_EXPR
,
5602 build_int_cst (TREE_TYPE (vr1max
), 1));
5603 tree max
= int_const_binop (MINUS_EXPR
,
5605 build_int_cst (TREE_TYPE (*vr0min
), 1));
5606 if (!operand_less_p (max
, min
))
5608 *vr0type
= VR_ANTI_RANGE
;
5622 else if ((maxeq
|| cmpmax
== 1)
5623 && (mineq
|| cmpmin
== -1))
5625 /* [ ( ) ] or [( ) ] or [ ( )] */
5626 if (*vr0type
== VR_RANGE
5627 && vr1type
== VR_RANGE
)
5629 else if (*vr0type
== VR_ANTI_RANGE
5630 && vr1type
== VR_ANTI_RANGE
)
5636 else if (*vr0type
== VR_ANTI_RANGE
5637 && vr1type
== VR_RANGE
)
5639 /* Arbitrarily choose the right or left gap. */
5640 if (!mineq
&& TREE_CODE (vr1min
) == INTEGER_CST
)
5641 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
5642 build_int_cst (TREE_TYPE (vr1min
), 1));
5643 else if (!maxeq
&& TREE_CODE (vr1max
) == INTEGER_CST
)
5644 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
5645 build_int_cst (TREE_TYPE (vr1max
), 1));
5649 else if (*vr0type
== VR_RANGE
5650 && vr1type
== VR_ANTI_RANGE
)
5651 /* The result covers everything. */
5656 else if ((maxeq
|| cmpmax
== -1)
5657 && (mineq
|| cmpmin
== 1))
5659 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5660 if (*vr0type
== VR_RANGE
5661 && vr1type
== VR_RANGE
)
5667 else if (*vr0type
== VR_ANTI_RANGE
5668 && vr1type
== VR_ANTI_RANGE
)
5670 else if (*vr0type
== VR_RANGE
5671 && vr1type
== VR_ANTI_RANGE
)
5673 *vr0type
= VR_ANTI_RANGE
;
5674 if (!mineq
&& TREE_CODE (*vr0min
) == INTEGER_CST
)
5676 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
5677 build_int_cst (TREE_TYPE (*vr0min
), 1));
5680 else if (!maxeq
&& TREE_CODE (*vr0max
) == INTEGER_CST
)
5682 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
5683 build_int_cst (TREE_TYPE (*vr0max
), 1));
5689 else if (*vr0type
== VR_ANTI_RANGE
5690 && vr1type
== VR_RANGE
)
5691 /* The result covers everything. */
5696 else if (cmpmin
== -1
5698 && (operand_less_p (vr1min
, *vr0max
) == 1
5699 || operand_equal_p (vr1min
, *vr0max
, 0)))
5701 /* [ ( ] ) or [ ]( ) */
5702 if (*vr0type
== VR_RANGE
5703 && vr1type
== VR_RANGE
)
5705 else if (*vr0type
== VR_ANTI_RANGE
5706 && vr1type
== VR_ANTI_RANGE
)
5708 else if (*vr0type
== VR_ANTI_RANGE
5709 && vr1type
== VR_RANGE
)
5711 if (TREE_CODE (vr1min
) == INTEGER_CST
)
5712 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
5713 build_int_cst (TREE_TYPE (vr1min
), 1));
5717 else if (*vr0type
== VR_RANGE
5718 && vr1type
== VR_ANTI_RANGE
)
5720 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
5723 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
5724 build_int_cst (TREE_TYPE (*vr0max
), 1));
5733 else if (cmpmin
== 1
5735 && (operand_less_p (*vr0min
, vr1max
) == 1
5736 || operand_equal_p (*vr0min
, vr1max
, 0)))
5738 /* ( [ ) ] or ( )[ ] */
5739 if (*vr0type
== VR_RANGE
5740 && vr1type
== VR_RANGE
)
5742 else if (*vr0type
== VR_ANTI_RANGE
5743 && vr1type
== VR_ANTI_RANGE
)
5745 else if (*vr0type
== VR_ANTI_RANGE
5746 && vr1type
== VR_RANGE
)
5748 if (TREE_CODE (vr1max
) == INTEGER_CST
)
5749 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
5750 build_int_cst (TREE_TYPE (vr1max
), 1));
5754 else if (*vr0type
== VR_RANGE
5755 && vr1type
== VR_ANTI_RANGE
)
5757 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
5760 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
5761 build_int_cst (TREE_TYPE (*vr0min
), 1));
5776 *vr0type
= VR_VARYING
;
5777 *vr0min
= NULL_TREE
;
5778 *vr0max
= NULL_TREE
;
5781 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5782 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5783 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5784 possible such range. The resulting range is not canonicalized. */
5787 intersect_ranges (enum value_range_kind
*vr0type
,
5788 tree
*vr0min
, tree
*vr0max
,
5789 enum value_range_kind vr1type
,
5790 tree vr1min
, tree vr1max
)
5792 bool mineq
= vrp_operand_equal_p (*vr0min
, vr1min
);
5793 bool maxeq
= vrp_operand_equal_p (*vr0max
, vr1max
);
5795 /* [] is vr0, () is vr1 in the following classification comments. */
5799 if (*vr0type
== vr1type
)
5800 /* Nothing to do for equal ranges. */
5802 else if ((*vr0type
== VR_RANGE
5803 && vr1type
== VR_ANTI_RANGE
)
5804 || (*vr0type
== VR_ANTI_RANGE
5805 && vr1type
== VR_RANGE
))
5807 /* For anti-range with range intersection the result is empty. */
5808 *vr0type
= VR_UNDEFINED
;
5809 *vr0min
= NULL_TREE
;
5810 *vr0max
= NULL_TREE
;
5815 else if (operand_less_p (*vr0max
, vr1min
) == 1
5816 || operand_less_p (vr1max
, *vr0min
) == 1)
5818 /* [ ] ( ) or ( ) [ ]
5819 If the ranges have an empty intersection, the result of the
5820 intersect operation is the range for intersecting an
5821 anti-range with a range or empty when intersecting two ranges. */
5822 if (*vr0type
== VR_RANGE
5823 && vr1type
== VR_ANTI_RANGE
)
5825 else if (*vr0type
== VR_ANTI_RANGE
5826 && vr1type
== VR_RANGE
)
5832 else if (*vr0type
== VR_RANGE
5833 && vr1type
== VR_RANGE
)
5835 *vr0type
= VR_UNDEFINED
;
5836 *vr0min
= NULL_TREE
;
5837 *vr0max
= NULL_TREE
;
5839 else if (*vr0type
== VR_ANTI_RANGE
5840 && vr1type
== VR_ANTI_RANGE
)
5842 /* If the anti-ranges are adjacent to each other merge them. */
5843 if (TREE_CODE (*vr0max
) == INTEGER_CST
5844 && TREE_CODE (vr1min
) == INTEGER_CST
5845 && operand_less_p (*vr0max
, vr1min
) == 1
5846 && integer_onep (int_const_binop (MINUS_EXPR
,
5849 else if (TREE_CODE (vr1max
) == INTEGER_CST
5850 && TREE_CODE (*vr0min
) == INTEGER_CST
5851 && operand_less_p (vr1max
, *vr0min
) == 1
5852 && integer_onep (int_const_binop (MINUS_EXPR
,
5855 /* Else arbitrarily take VR0. */
5858 else if ((maxeq
|| operand_less_p (vr1max
, *vr0max
) == 1)
5859 && (mineq
|| operand_less_p (*vr0min
, vr1min
) == 1))
5861 /* [ ( ) ] or [( ) ] or [ ( )] */
5862 if (*vr0type
== VR_RANGE
5863 && vr1type
== VR_RANGE
)
5865 /* If both are ranges the result is the inner one. */
5870 else if (*vr0type
== VR_RANGE
5871 && vr1type
== VR_ANTI_RANGE
)
5873 /* Choose the right gap if the left one is empty. */
5876 if (TREE_CODE (vr1max
) != INTEGER_CST
)
5878 else if (TYPE_PRECISION (TREE_TYPE (vr1max
)) == 1
5879 && !TYPE_UNSIGNED (TREE_TYPE (vr1max
)))
5881 = int_const_binop (MINUS_EXPR
, vr1max
,
5882 build_int_cst (TREE_TYPE (vr1max
), -1));
5885 = int_const_binop (PLUS_EXPR
, vr1max
,
5886 build_int_cst (TREE_TYPE (vr1max
), 1));
5888 /* Choose the left gap if the right one is empty. */
5891 if (TREE_CODE (vr1min
) != INTEGER_CST
)
5893 else if (TYPE_PRECISION (TREE_TYPE (vr1min
)) == 1
5894 && !TYPE_UNSIGNED (TREE_TYPE (vr1min
)))
5896 = int_const_binop (PLUS_EXPR
, vr1min
,
5897 build_int_cst (TREE_TYPE (vr1min
), -1));
5900 = int_const_binop (MINUS_EXPR
, vr1min
,
5901 build_int_cst (TREE_TYPE (vr1min
), 1));
5903 /* Choose the anti-range if the range is effectively varying. */
5904 else if (vrp_val_is_min (*vr0min
)
5905 && vrp_val_is_max (*vr0max
))
5911 /* Else choose the range. */
5913 else if (*vr0type
== VR_ANTI_RANGE
5914 && vr1type
== VR_ANTI_RANGE
)
5915 /* If both are anti-ranges the result is the outer one. */
5917 else if (*vr0type
== VR_ANTI_RANGE
5918 && vr1type
== VR_RANGE
)
5920 /* The intersection is empty. */
5921 *vr0type
= VR_UNDEFINED
;
5922 *vr0min
= NULL_TREE
;
5923 *vr0max
= NULL_TREE
;
5928 else if ((maxeq
|| operand_less_p (*vr0max
, vr1max
) == 1)
5929 && (mineq
|| operand_less_p (vr1min
, *vr0min
) == 1))
5931 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5932 if (*vr0type
== VR_RANGE
5933 && vr1type
== VR_RANGE
)
5934 /* Choose the inner range. */
5936 else if (*vr0type
== VR_ANTI_RANGE
5937 && vr1type
== VR_RANGE
)
5939 /* Choose the right gap if the left is empty. */
5942 *vr0type
= VR_RANGE
;
5943 if (TREE_CODE (*vr0max
) != INTEGER_CST
)
5945 else if (TYPE_PRECISION (TREE_TYPE (*vr0max
)) == 1
5946 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max
)))
5948 = int_const_binop (MINUS_EXPR
, *vr0max
,
5949 build_int_cst (TREE_TYPE (*vr0max
), -1));
5952 = int_const_binop (PLUS_EXPR
, *vr0max
,
5953 build_int_cst (TREE_TYPE (*vr0max
), 1));
5956 /* Choose the left gap if the right is empty. */
5959 *vr0type
= VR_RANGE
;
5960 if (TREE_CODE (*vr0min
) != INTEGER_CST
)
5962 else if (TYPE_PRECISION (TREE_TYPE (*vr0min
)) == 1
5963 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min
)))
5965 = int_const_binop (PLUS_EXPR
, *vr0min
,
5966 build_int_cst (TREE_TYPE (*vr0min
), -1));
5969 = int_const_binop (MINUS_EXPR
, *vr0min
,
5970 build_int_cst (TREE_TYPE (*vr0min
), 1));
5973 /* Choose the anti-range if the range is effectively varying. */
5974 else if (vrp_val_is_min (vr1min
)
5975 && vrp_val_is_max (vr1max
))
5977 /* Choose the anti-range if it is ~[0,0], that range is special
5978 enough to special case when vr1's range is relatively wide.
5979 At least for types bigger than int - this covers pointers
5980 and arguments to functions like ctz. */
5981 else if (*vr0min
== *vr0max
5982 && integer_zerop (*vr0min
)
5983 && ((TYPE_PRECISION (TREE_TYPE (*vr0min
))
5984 >= TYPE_PRECISION (integer_type_node
))
5985 || POINTER_TYPE_P (TREE_TYPE (*vr0min
)))
5986 && TREE_CODE (vr1max
) == INTEGER_CST
5987 && TREE_CODE (vr1min
) == INTEGER_CST
5988 && (wi::clz (wi::to_wide (vr1max
) - wi::to_wide (vr1min
))
5989 < TYPE_PRECISION (TREE_TYPE (*vr0min
)) / 2))
5991 /* Else choose the range. */
5999 else if (*vr0type
== VR_ANTI_RANGE
6000 && vr1type
== VR_ANTI_RANGE
)
6002 /* If both are anti-ranges the result is the outer one. */
6007 else if (vr1type
== VR_ANTI_RANGE
6008 && *vr0type
== VR_RANGE
)
6010 /* The intersection is empty. */
6011 *vr0type
= VR_UNDEFINED
;
6012 *vr0min
= NULL_TREE
;
6013 *vr0max
= NULL_TREE
;
6018 else if ((operand_less_p (vr1min
, *vr0max
) == 1
6019 || operand_equal_p (vr1min
, *vr0max
, 0))
6020 && operand_less_p (*vr0min
, vr1min
) == 1)
6022 /* [ ( ] ) or [ ]( ) */
6023 if (*vr0type
== VR_ANTI_RANGE
6024 && vr1type
== VR_ANTI_RANGE
)
6026 else if (*vr0type
== VR_RANGE
6027 && vr1type
== VR_RANGE
)
6029 else if (*vr0type
== VR_RANGE
6030 && vr1type
== VR_ANTI_RANGE
)
6032 if (TREE_CODE (vr1min
) == INTEGER_CST
)
6033 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
6034 build_int_cst (TREE_TYPE (vr1min
), 1));
6038 else if (*vr0type
== VR_ANTI_RANGE
6039 && vr1type
== VR_RANGE
)
6041 *vr0type
= VR_RANGE
;
6042 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
6043 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
6044 build_int_cst (TREE_TYPE (*vr0max
), 1));
6052 else if ((operand_less_p (*vr0min
, vr1max
) == 1
6053 || operand_equal_p (*vr0min
, vr1max
, 0))
6054 && operand_less_p (vr1min
, *vr0min
) == 1)
6056 /* ( [ ) ] or ( )[ ] */
6057 if (*vr0type
== VR_ANTI_RANGE
6058 && vr1type
== VR_ANTI_RANGE
)
6060 else if (*vr0type
== VR_RANGE
6061 && vr1type
== VR_RANGE
)
6063 else if (*vr0type
== VR_RANGE
6064 && vr1type
== VR_ANTI_RANGE
)
6066 if (TREE_CODE (vr1max
) == INTEGER_CST
)
6067 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
6068 build_int_cst (TREE_TYPE (vr1max
), 1));
6072 else if (*vr0type
== VR_ANTI_RANGE
6073 && vr1type
== VR_RANGE
)
6075 *vr0type
= VR_RANGE
;
6076 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
6077 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
6078 build_int_cst (TREE_TYPE (*vr0min
), 1));
6087 /* If we know the intersection is empty, there's no need to
6088 conservatively add anything else to the set. */
6089 if (*vr0type
== VR_UNDEFINED
)
6092 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6093 result for the intersection. That's always a conservative
6094 correct estimate unless VR1 is a constant singleton range
6095 in which case we choose that. */
6096 if (vr1type
== VR_RANGE
6097 && is_gimple_min_invariant (vr1min
)
6098 && vrp_operand_equal_p (vr1min
, vr1max
))
6107 /* Helper for the intersection operation for value ranges. Given two
6108 value ranges VR0 and VR1, return the intersection of the two
6109 ranges. This may not be the smallest possible such range. */
6112 value_range_base::intersect_helper (const value_range_base
*vr0
,
6113 const value_range_base
*vr1
)
6115 /* If either range is VR_VARYING the other one wins. */
6116 if (vr1
->varying_p ())
6118 if (vr0
->varying_p ())
6121 /* When either range is VR_UNDEFINED the resulting range is
6122 VR_UNDEFINED, too. */
6123 if (vr0
->undefined_p ())
6125 if (vr1
->undefined_p ())
6128 value_range_kind vr0type
= vr0
->kind ();
6129 tree vr0min
= vr0
->min ();
6130 tree vr0max
= vr0
->max ();
6131 intersect_ranges (&vr0type
, &vr0min
, &vr0max
,
6132 vr1
->kind (), vr1
->min (), vr1
->max ());
6133 /* Make sure to canonicalize the result though as the inversion of a
6134 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
6135 fall back to vr0 when this turns things to varying. */
6136 value_range_base tem
;
6137 if (vr0type
== VR_UNDEFINED
)
6138 tem
.set_undefined ();
6139 else if (vr0type
== VR_VARYING
)
6140 tem
.set_varying (vr0
->type ());
6142 tem
.set (vr0type
, vr0min
, vr0max
);
6143 /* If that failed, use the saved original VR0. */
6144 if (tem
.varying_p ())
6151 value_range_base::intersect (const value_range_base
*other
)
6153 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6155 fprintf (dump_file
, "Intersecting\n ");
6156 dump_value_range (dump_file
, this);
6157 fprintf (dump_file
, "\nand\n ");
6158 dump_value_range (dump_file
, other
);
6159 fprintf (dump_file
, "\n");
6162 *this = intersect_helper (this, other
);
6164 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6166 fprintf (dump_file
, "to\n ");
6167 dump_value_range (dump_file
, this);
6168 fprintf (dump_file
, "\n");
6173 value_range::intersect (const value_range
*other
)
6175 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6177 fprintf (dump_file
, "Intersecting\n ");
6178 dump_value_range (dump_file
, this);
6179 fprintf (dump_file
, "\nand\n ");
6180 dump_value_range (dump_file
, other
);
6181 fprintf (dump_file
, "\n");
6184 /* If THIS is varying we want to pick up equivalences from OTHER.
6185 Just special-case this here rather than trying to fixup after the
6187 if (this->varying_p ())
6188 this->deep_copy (other
);
6191 value_range_base tem
= intersect_helper (this, other
);
6192 this->update (tem
.kind (), tem
.min (), tem
.max ());
6194 /* If the result is VR_UNDEFINED there is no need to mess with
6196 if (!undefined_p ())
6198 /* The resulting set of equivalences for range intersection
6199 is the union of the two sets. */
6200 if (m_equiv
&& other
->m_equiv
&& m_equiv
!= other
->m_equiv
)
6201 bitmap_ior_into (m_equiv
, other
->m_equiv
);
6202 else if (other
->m_equiv
&& !m_equiv
)
6204 /* All equivalence bitmaps are allocated from the same
6205 obstack. So we can use the obstack associated with
6206 VR to allocate this->m_equiv. */
6207 m_equiv
= BITMAP_ALLOC (other
->m_equiv
->obstack
);
6208 bitmap_copy (m_equiv
, other
->m_equiv
);
6213 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6215 fprintf (dump_file
, "to\n ");
6216 dump_value_range (dump_file
, this);
6217 fprintf (dump_file
, "\n");
6221 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
6222 VR1, return a range that contains both VR0 and VR1. This may not be the
6223 smallest possible such range. */
6226 value_range_base::union_helper (const value_range_base
*vr0
,
6227 const value_range_base
*vr1
)
6229 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
6230 if (vr1
->undefined_p ()
6231 || vr0
->varying_p ())
6234 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
6235 if (vr0
->undefined_p ()
6236 || vr1
->varying_p ())
6239 value_range_kind vr0type
= vr0
->kind ();
6240 tree vr0min
= vr0
->min ();
6241 tree vr0max
= vr0
->max ();
6242 union_ranges (&vr0type
, &vr0min
, &vr0max
,
6243 vr1
->kind (), vr1
->min (), vr1
->max ());
6245 /* Work on a temporary so we can still use vr0 when union returns varying. */
6246 value_range_base tem
;
6247 if (vr0type
== VR_UNDEFINED
)
6248 tem
.set_undefined ();
6249 else if (vr0type
== VR_VARYING
)
6250 tem
.set_varying (vr0
->type ());
6252 tem
.set (vr0type
, vr0min
, vr0max
);
6254 /* Failed to find an efficient meet. Before giving up and setting
6255 the result to VARYING, see if we can at least derive a useful
6257 if (tem
.varying_p ()
6258 && range_includes_zero_p (vr0
) == 0
6259 && range_includes_zero_p (vr1
) == 0)
6261 tem
.set_nonzero (vr0
->type ());
6269 /* Meet operation for value ranges. Given two value ranges VR0 and
6270 VR1, store in VR0 a range that contains both VR0 and VR1. This
6271 may not be the smallest possible such range. */
6274 value_range_base::union_ (const value_range_base
*other
)
6276 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6278 fprintf (dump_file
, "Meeting\n ");
6279 dump_value_range (dump_file
, this);
6280 fprintf (dump_file
, "\nand\n ");
6281 dump_value_range (dump_file
, other
);
6282 fprintf (dump_file
, "\n");
6285 *this = union_helper (this, other
);
6287 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6289 fprintf (dump_file
, "to\n ");
6290 dump_value_range (dump_file
, this);
6291 fprintf (dump_file
, "\n");
6296 value_range::union_ (const value_range
*other
)
6298 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6300 fprintf (dump_file
, "Meeting\n ");
6301 dump_value_range (dump_file
, this);
6302 fprintf (dump_file
, "\nand\n ");
6303 dump_value_range (dump_file
, other
);
6304 fprintf (dump_file
, "\n");
6307 /* If THIS is undefined we want to pick up equivalences from OTHER.
6308 Just special-case this here rather than trying to fixup after the fact. */
6309 if (this->undefined_p ())
6310 this->deep_copy (other
);
6313 value_range_base tem
= union_helper (this, other
);
6314 this->update (tem
.kind (), tem
.min (), tem
.max ());
6316 /* The resulting set of equivalences is always the intersection of
6318 if (this->m_equiv
&& other
->m_equiv
&& this->m_equiv
!= other
->m_equiv
)
6319 bitmap_and_into (this->m_equiv
, other
->m_equiv
);
6320 else if (this->m_equiv
&& !other
->m_equiv
)
6321 bitmap_clear (this->m_equiv
);
6324 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6326 fprintf (dump_file
, "to\n ");
6327 dump_value_range (dump_file
, this);
6328 fprintf (dump_file
, "\n");
6332 /* Normalize symbolics into constants. */
6335 value_range_base::normalize_symbolics () const
6337 if (varying_p () || undefined_p ())
6339 tree ttype
= type ();
6340 bool min_symbolic
= !is_gimple_min_invariant (min ());
6341 bool max_symbolic
= !is_gimple_min_invariant (max ());
6342 if (!min_symbolic
&& !max_symbolic
)
6345 // [SYM, SYM] -> VARYING
6346 if (min_symbolic
&& max_symbolic
)
6348 value_range_base var
;
6349 var
.set_varying (ttype
);
6352 if (kind () == VR_RANGE
)
6354 // [SYM, NUM] -> [-MIN, NUM]
6356 return value_range_base (VR_RANGE
, vrp_val_min (ttype
), max ());
6357 // [NUM, SYM] -> [NUM, +MAX]
6358 return value_range_base (VR_RANGE
, min (), vrp_val_max (ttype
));
6360 gcc_assert (kind () == VR_ANTI_RANGE
);
6361 // ~[SYM, NUM] -> [NUM + 1, +MAX]
6364 if (!vrp_val_is_max (max ()))
6366 tree n
= wide_int_to_tree (ttype
, wi::to_wide (max ()) + 1);
6367 return value_range_base (VR_RANGE
, n
, vrp_val_max (ttype
));
6369 value_range_base var
;
6370 var
.set_varying (ttype
);
6373 // ~[NUM, SYM] -> [-MIN, NUM - 1]
6374 if (!vrp_val_is_min (min ()))
6376 tree n
= wide_int_to_tree (ttype
, wi::to_wide (min ()) - 1);
6377 return value_range_base (VR_RANGE
, vrp_val_min (ttype
), n
);
6379 value_range_base var
;
6380 var
.set_varying (ttype
);
6384 /* Visit all arguments for PHI node PHI that flow through executable
6385 edges. If a valid value range can be derived from all the incoming
6386 value ranges, set a new range for the LHS of PHI. */
6388 enum ssa_prop_result
6389 vrp_prop::visit_phi (gphi
*phi
)
6391 tree lhs
= PHI_RESULT (phi
);
6392 value_range vr_result
;
6393 extract_range_from_phi_node (phi
, &vr_result
);
6394 if (update_value_range (lhs
, &vr_result
))
6396 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6398 fprintf (dump_file
, "Found new range for ");
6399 print_generic_expr (dump_file
, lhs
);
6400 fprintf (dump_file
, ": ");
6401 dump_value_range (dump_file
, &vr_result
);
6402 fprintf (dump_file
, "\n");
6405 if (vr_result
.varying_p ())
6406 return SSA_PROP_VARYING
;
6408 return SSA_PROP_INTERESTING
;
6411 /* Nothing changed, don't add outgoing edges. */
6412 return SSA_PROP_NOT_INTERESTING
;
6415 class vrp_folder
: public substitute_and_fold_engine
6418 tree
get_value (tree
) FINAL OVERRIDE
;
6419 bool fold_stmt (gimple_stmt_iterator
*) FINAL OVERRIDE
;
6420 bool fold_predicate_in (gimple_stmt_iterator
*);
6422 class vr_values
*vr_values
;
6425 tree
vrp_evaluate_conditional (tree_code code
, tree op0
,
6426 tree op1
, gimple
*stmt
)
6427 { return vr_values
->vrp_evaluate_conditional (code
, op0
, op1
, stmt
); }
6428 bool simplify_stmt_using_ranges (gimple_stmt_iterator
*gsi
)
6429 { return vr_values
->simplify_stmt_using_ranges (gsi
); }
6430 tree
op_with_constant_singleton_value_range (tree op
)
6431 { return vr_values
->op_with_constant_singleton_value_range (op
); }
6434 /* If the statement pointed by SI has a predicate whose value can be
6435 computed using the value range information computed by VRP, compute
6436 its value and return true. Otherwise, return false. */
6439 vrp_folder::fold_predicate_in (gimple_stmt_iterator
*si
)
6441 bool assignment_p
= false;
6443 gimple
*stmt
= gsi_stmt (*si
);
6445 if (is_gimple_assign (stmt
)
6446 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
)
6448 assignment_p
= true;
6449 val
= vrp_evaluate_conditional (gimple_assign_rhs_code (stmt
),
6450 gimple_assign_rhs1 (stmt
),
6451 gimple_assign_rhs2 (stmt
),
6454 else if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
6455 val
= vrp_evaluate_conditional (gimple_cond_code (cond_stmt
),
6456 gimple_cond_lhs (cond_stmt
),
6457 gimple_cond_rhs (cond_stmt
),
6465 val
= fold_convert (gimple_expr_type (stmt
), val
);
6469 fprintf (dump_file
, "Folding predicate ");
6470 print_gimple_expr (dump_file
, stmt
, 0);
6471 fprintf (dump_file
, " to ");
6472 print_generic_expr (dump_file
, val
);
6473 fprintf (dump_file
, "\n");
6476 if (is_gimple_assign (stmt
))
6477 gimple_assign_set_rhs_from_tree (si
, val
);
6480 gcc_assert (gimple_code (stmt
) == GIMPLE_COND
);
6481 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
6482 if (integer_zerop (val
))
6483 gimple_cond_make_false (cond_stmt
);
6484 else if (integer_onep (val
))
6485 gimple_cond_make_true (cond_stmt
);
6496 /* Callback for substitute_and_fold folding the stmt at *SI. */
6499 vrp_folder::fold_stmt (gimple_stmt_iterator
*si
)
6501 if (fold_predicate_in (si
))
6504 return simplify_stmt_using_ranges (si
);
6507 /* If OP has a value range with a single constant value return that,
6508 otherwise return NULL_TREE. This returns OP itself if OP is a
6511 Implemented as a pure wrapper right now, but this will change. */
6514 vrp_folder::get_value (tree op
)
6516 return op_with_constant_singleton_value_range (op
);
6519 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6520 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6521 BB. If no such ASSERT_EXPR is found, return OP. */
6524 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple
*stmt
)
6526 imm_use_iterator imm_iter
;
6528 use_operand_p use_p
;
6530 if (TREE_CODE (op
) == SSA_NAME
)
6532 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
6534 use_stmt
= USE_STMT (use_p
);
6535 if (use_stmt
!= stmt
6536 && gimple_assign_single_p (use_stmt
)
6537 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
6538 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
6539 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
6540 return gimple_assign_lhs (use_stmt
);
6547 static class vr_values
*x_vr_values
;
6549 /* A trivial wrapper so that we can present the generic jump threading
6550 code with a simple API for simplifying statements. STMT is the
6551 statement we want to simplify, WITHIN_STMT provides the location
6552 for any overflow warnings. */
6555 simplify_stmt_for_jump_threading (gimple
*stmt
, gimple
*within_stmt
,
6556 class avail_exprs_stack
*avail_exprs_stack ATTRIBUTE_UNUSED
,
6559 /* First see if the conditional is in the hash table. */
6560 tree cached_lhs
= avail_exprs_stack
->lookup_avail_expr (stmt
, false, true);
6561 if (cached_lhs
&& is_gimple_min_invariant (cached_lhs
))
6564 vr_values
*vr_values
= x_vr_values
;
6565 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
6567 tree op0
= gimple_cond_lhs (cond_stmt
);
6568 op0
= lhs_of_dominating_assert (op0
, bb
, stmt
);
6570 tree op1
= gimple_cond_rhs (cond_stmt
);
6571 op1
= lhs_of_dominating_assert (op1
, bb
, stmt
);
6573 return vr_values
->vrp_evaluate_conditional (gimple_cond_code (cond_stmt
),
6574 op0
, op1
, within_stmt
);
6577 /* We simplify a switch statement by trying to determine which case label
6578 will be taken. If we are successful then we return the corresponding
6580 if (gswitch
*switch_stmt
= dyn_cast
<gswitch
*> (stmt
))
6582 tree op
= gimple_switch_index (switch_stmt
);
6583 if (TREE_CODE (op
) != SSA_NAME
)
6586 op
= lhs_of_dominating_assert (op
, bb
, stmt
);
6588 const value_range
*vr
= vr_values
->get_value_range (op
);
6589 if (vr
->undefined_p ()
6591 || vr
->symbolic_p ())
6594 if (vr
->kind () == VR_RANGE
)
6597 /* Get the range of labels that contain a part of the operand's
6599 find_case_label_range (switch_stmt
, vr
->min (), vr
->max (), &i
, &j
);
6601 /* Is there only one such label? */
6604 tree label
= gimple_switch_label (switch_stmt
, i
);
6606 /* The i'th label will be taken only if the value range of the
6607 operand is entirely within the bounds of this label. */
6608 if (CASE_HIGH (label
) != NULL_TREE
6609 ? (tree_int_cst_compare (CASE_LOW (label
), vr
->min ()) <= 0
6610 && tree_int_cst_compare (CASE_HIGH (label
),
6612 : (tree_int_cst_equal (CASE_LOW (label
), vr
->min ())
6613 && tree_int_cst_equal (vr
->min (), vr
->max ())))
6617 /* If there are no such labels then the default label will be
6620 return gimple_switch_label (switch_stmt
, 0);
6623 if (vr
->kind () == VR_ANTI_RANGE
)
6625 unsigned n
= gimple_switch_num_labels (switch_stmt
);
6626 tree min_label
= gimple_switch_label (switch_stmt
, 1);
6627 tree max_label
= gimple_switch_label (switch_stmt
, n
- 1);
6629 /* The default label will be taken only if the anti-range of the
6630 operand is entirely outside the bounds of all the (non-default)
6632 if (tree_int_cst_compare (vr
->min (), CASE_LOW (min_label
)) <= 0
6633 && (CASE_HIGH (max_label
) != NULL_TREE
6634 ? tree_int_cst_compare (vr
->max (),
6635 CASE_HIGH (max_label
)) >= 0
6636 : tree_int_cst_compare (vr
->max (),
6637 CASE_LOW (max_label
)) >= 0))
6638 return gimple_switch_label (switch_stmt
, 0);
6644 if (gassign
*assign_stmt
= dyn_cast
<gassign
*> (stmt
))
6646 tree lhs
= gimple_assign_lhs (assign_stmt
);
6647 if (TREE_CODE (lhs
) == SSA_NAME
6648 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
6649 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
6650 && stmt_interesting_for_vrp (stmt
))
6655 vr_values
->extract_range_from_stmt (stmt
, &dummy_e
,
6656 &dummy_tree
, &new_vr
);
6658 if (new_vr
.singleton_p (&singleton
))
6666 class vrp_dom_walker
: public dom_walker
6669 vrp_dom_walker (cdi_direction direction
,
6670 class const_and_copies
*const_and_copies
,
6671 class avail_exprs_stack
*avail_exprs_stack
)
6672 : dom_walker (direction
, REACHABLE_BLOCKS
),
6673 m_const_and_copies (const_and_copies
),
6674 m_avail_exprs_stack (avail_exprs_stack
),
6675 m_dummy_cond (NULL
) {}
6677 virtual edge
before_dom_children (basic_block
);
6678 virtual void after_dom_children (basic_block
);
6680 class vr_values
*vr_values
;
6683 class const_and_copies
*m_const_and_copies
;
6684 class avail_exprs_stack
*m_avail_exprs_stack
;
6686 gcond
*m_dummy_cond
;
6690 /* Called before processing dominator children of BB. We want to look
6691 at ASSERT_EXPRs and record information from them in the appropriate
6694 We could look at other statements here. It's not seen as likely
6695 to significantly increase the jump threads we discover. */
6698 vrp_dom_walker::before_dom_children (basic_block bb
)
6700 gimple_stmt_iterator gsi
;
6702 m_avail_exprs_stack
->push_marker ();
6703 m_const_and_copies
->push_marker ();
6704 for (gsi
= gsi_start_nondebug_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6706 gimple
*stmt
= gsi_stmt (gsi
);
6707 if (gimple_assign_single_p (stmt
)
6708 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
6710 tree rhs1
= gimple_assign_rhs1 (stmt
);
6711 tree cond
= TREE_OPERAND (rhs1
, 1);
6712 tree inverted
= invert_truthvalue (cond
);
6713 vec
<cond_equivalence
> p
;
6715 record_conditions (&p
, cond
, inverted
);
6716 for (unsigned int i
= 0; i
< p
.length (); i
++)
6717 m_avail_exprs_stack
->record_cond (&p
[i
]);
6719 tree lhs
= gimple_assign_lhs (stmt
);
6720 m_const_and_copies
->record_const_or_copy (lhs
,
6721 TREE_OPERAND (rhs1
, 0));
6730 /* Called after processing dominator children of BB. This is where we
6731 actually call into the threader. */
6733 vrp_dom_walker::after_dom_children (basic_block bb
)
6736 m_dummy_cond
= gimple_build_cond (NE_EXPR
,
6737 integer_zero_node
, integer_zero_node
,
6740 x_vr_values
= vr_values
;
6741 thread_outgoing_edges (bb
, m_dummy_cond
, m_const_and_copies
,
6742 m_avail_exprs_stack
, NULL
,
6743 simplify_stmt_for_jump_threading
);
6746 m_avail_exprs_stack
->pop_to_marker ();
6747 m_const_and_copies
->pop_to_marker ();
6750 /* Blocks which have more than one predecessor and more than
6751 one successor present jump threading opportunities, i.e.,
6752 when the block is reached from a specific predecessor, we
6753 may be able to determine which of the outgoing edges will
6754 be traversed. When this optimization applies, we are able
6755 to avoid conditionals at runtime and we may expose secondary
6756 optimization opportunities.
6758 This routine is effectively a driver for the generic jump
6759 threading code. It basically just presents the generic code
6760 with edges that may be suitable for jump threading.
6762 Unlike DOM, we do not iterate VRP if jump threading was successful.
6763 While iterating may expose new opportunities for VRP, it is expected
6764 those opportunities would be very limited and the compile time cost
6765 to expose those opportunities would be significant.
6767 As jump threading opportunities are discovered, they are registered
6768 for later realization. */
6771 identify_jump_threads (class vr_values
*vr_values
)
6773 /* Ugh. When substituting values earlier in this pass we can
6774 wipe the dominance information. So rebuild the dominator
6775 information as we need it within the jump threading code. */
6776 calculate_dominance_info (CDI_DOMINATORS
);
6778 /* We do not allow VRP information to be used for jump threading
6779 across a back edge in the CFG. Otherwise it becomes too
6780 difficult to avoid eliminating loop exit tests. Of course
6781 EDGE_DFS_BACK is not accurate at this time so we have to
6783 mark_dfs_back_edges ();
6785 /* Allocate our unwinder stack to unwind any temporary equivalences
6786 that might be recorded. */
6787 const_and_copies
*equiv_stack
= new const_and_copies ();
6789 hash_table
<expr_elt_hasher
> *avail_exprs
6790 = new hash_table
<expr_elt_hasher
> (1024);
6791 avail_exprs_stack
*avail_exprs_stack
6792 = new class avail_exprs_stack (avail_exprs
);
6794 vrp_dom_walker
walker (CDI_DOMINATORS
, equiv_stack
, avail_exprs_stack
);
6795 walker
.vr_values
= vr_values
;
6796 walker
.walk (cfun
->cfg
->x_entry_block_ptr
);
6798 /* We do not actually update the CFG or SSA graphs at this point as
6799 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6800 handle ASSERT_EXPRs gracefully. */
6803 delete avail_exprs_stack
;
6806 /* Traverse all the blocks folding conditionals with known ranges. */
6809 vrp_prop::vrp_finalize (bool warn_array_bounds_p
)
6813 /* We have completed propagating through the lattice. */
6814 vr_values
.set_lattice_propagation_complete ();
6818 fprintf (dump_file
, "\nValue ranges after VRP:\n\n");
6819 vr_values
.dump_all_value_ranges (dump_file
);
6820 fprintf (dump_file
, "\n");
6823 /* Set value range to non pointer SSA_NAMEs. */
6824 for (i
= 0; i
< num_ssa_names
; i
++)
6826 tree name
= ssa_name (i
);
6830 const value_range
*vr
= get_value_range (name
);
6831 if (!name
|| !vr
->constant_p ())
6834 if (POINTER_TYPE_P (TREE_TYPE (name
))
6835 && range_includes_zero_p (vr
) == 0)
6836 set_ptr_nonnull (name
);
6837 else if (!POINTER_TYPE_P (TREE_TYPE (name
)))
6838 set_range_info (name
, *vr
);
6841 /* If we're checking array refs, we want to merge information on
6842 the executability of each edge between vrp_folder and the
6843 check_array_bounds_dom_walker: each can clear the
6844 EDGE_EXECUTABLE flag on edges, in different ways.
6846 Hence, if we're going to call check_all_array_refs, set
6847 the flag on every edge now, rather than in
6848 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6849 it from some edges. */
6850 if (warn_array_bounds
&& warn_array_bounds_p
)
6851 set_all_edges_as_executable (cfun
);
6853 class vrp_folder vrp_folder
;
6854 vrp_folder
.vr_values
= &vr_values
;
6855 vrp_folder
.substitute_and_fold ();
6857 if (warn_array_bounds
&& warn_array_bounds_p
)
6858 check_all_array_refs ();
6861 /* Main entry point to VRP (Value Range Propagation). This pass is
6862 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6863 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6864 Programming Language Design and Implementation, pp. 67-78, 1995.
6865 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6867 This is essentially an SSA-CCP pass modified to deal with ranges
6868 instead of constants.
6870 While propagating ranges, we may find that two or more SSA name
6871 have equivalent, though distinct ranges. For instance,
6874 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6876 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6880 In the code above, pointer p_5 has range [q_2, q_2], but from the
6881 code we can also determine that p_5 cannot be NULL and, if q_2 had
6882 a non-varying range, p_5's range should also be compatible with it.
6884 These equivalences are created by two expressions: ASSERT_EXPR and
6885 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6886 result of another assertion, then we can use the fact that p_5 and
6887 p_4 are equivalent when evaluating p_5's range.
6889 Together with value ranges, we also propagate these equivalences
6890 between names so that we can take advantage of information from
6891 multiple ranges when doing final replacement. Note that this
6892 equivalency relation is transitive but not symmetric.
6894 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6895 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6896 in contexts where that assertion does not hold (e.g., in line 6).
6898 TODO, the main difference between this pass and Patterson's is that
6899 we do not propagate edge probabilities. We only compute whether
6900 edges can be taken or not. That is, instead of having a spectrum
6901 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6902 DON'T KNOW. In the future, it may be worthwhile to propagate
6903 probabilities to aid branch prediction. */
6906 execute_vrp (bool warn_array_bounds_p
)
6909 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
6910 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
6913 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6914 Inserting assertions may split edges which will invalidate
6916 insert_range_assertions ();
6918 threadedge_initialize_values ();
6920 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6921 mark_dfs_back_edges ();
6923 class vrp_prop vrp_prop
;
6924 vrp_prop
.vrp_initialize ();
6925 vrp_prop
.ssa_propagate ();
6926 vrp_prop
.vrp_finalize (warn_array_bounds_p
);
6928 /* We must identify jump threading opportunities before we release
6929 the datastructures built by VRP. */
6930 identify_jump_threads (&vrp_prop
.vr_values
);
6932 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6933 was set by a type conversion can often be rewritten to use the
6934 RHS of the type conversion.
6936 However, doing so inhibits jump threading through the comparison.
6937 So that transformation is not performed until after jump threading
6940 FOR_EACH_BB_FN (bb
, cfun
)
6942 gimple
*last
= last_stmt (bb
);
6943 if (last
&& gimple_code (last
) == GIMPLE_COND
)
6944 vrp_prop
.vr_values
.simplify_cond_using_ranges_2 (as_a
<gcond
*> (last
));
6947 free_numbers_of_iterations_estimates (cfun
);
6949 /* ASSERT_EXPRs must be removed before finalizing jump threads
6950 as finalizing jump threads calls the CFG cleanup code which
6951 does not properly handle ASSERT_EXPRs. */
6952 remove_range_assertions ();
6954 /* If we exposed any new variables, go ahead and put them into
6955 SSA form now, before we handle jump threading. This simplifies
6956 interactions between rewriting of _DECL nodes into SSA form
6957 and rewriting SSA_NAME nodes into SSA form after block
6958 duplication and CFG manipulation. */
6959 update_ssa (TODO_update_ssa
);
6961 /* We identified all the jump threading opportunities earlier, but could
6962 not transform the CFG at that time. This routine transforms the
6963 CFG and arranges for the dominator tree to be rebuilt if necessary.
6965 Note the SSA graph update will occur during the normal TODO
6966 processing by the pass manager. */
6967 thread_through_all_blocks (false);
6969 vrp_prop
.vr_values
.cleanup_edges_and_switches ();
6970 threadedge_finalize_values ();
6973 loop_optimizer_finalize ();
6979 const pass_data pass_data_vrp
=
6981 GIMPLE_PASS
, /* type */
6983 OPTGROUP_NONE
, /* optinfo_flags */
6984 TV_TREE_VRP
, /* tv_id */
6985 PROP_ssa
, /* properties_required */
6986 0, /* properties_provided */
6987 0, /* properties_destroyed */
6988 0, /* todo_flags_start */
6989 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
6992 class pass_vrp
: public gimple_opt_pass
6995 pass_vrp (gcc::context
*ctxt
)
6996 : gimple_opt_pass (pass_data_vrp
, ctxt
), warn_array_bounds_p (false)
6999 /* opt_pass methods: */
7000 opt_pass
* clone () { return new pass_vrp (m_ctxt
); }
7001 void set_pass_param (unsigned int n
, bool param
)
7003 gcc_assert (n
== 0);
7004 warn_array_bounds_p
= param
;
7006 virtual bool gate (function
*) { return flag_tree_vrp
!= 0; }
7007 virtual unsigned int execute (function
*)
7008 { return execute_vrp (warn_array_bounds_p
); }
7011 bool warn_array_bounds_p
;
7012 }; // class pass_vrp
7017 make_pass_vrp (gcc::context
*ctxt
)
7019 return new pass_vrp (ctxt
);
7023 /* Worker for determine_value_range. */
7026 determine_value_range_1 (value_range_base
*vr
, tree expr
)
7028 if (BINARY_CLASS_P (expr
))
7030 value_range_base vr0
, vr1
;
7031 determine_value_range_1 (&vr0
, TREE_OPERAND (expr
, 0));
7032 determine_value_range_1 (&vr1
, TREE_OPERAND (expr
, 1));
7033 extract_range_from_binary_expr (vr
, TREE_CODE (expr
), TREE_TYPE (expr
),
7036 else if (UNARY_CLASS_P (expr
))
7038 value_range_base vr0
;
7039 determine_value_range_1 (&vr0
, TREE_OPERAND (expr
, 0));
7040 extract_range_from_unary_expr (vr
, TREE_CODE (expr
), TREE_TYPE (expr
),
7041 &vr0
, TREE_TYPE (TREE_OPERAND (expr
, 0)));
7043 else if (TREE_CODE (expr
) == INTEGER_CST
)
7047 value_range_kind kind
;
7049 /* For SSA names try to extract range info computed by VRP. Otherwise
7050 fall back to varying. */
7051 if (TREE_CODE (expr
) == SSA_NAME
7052 && INTEGRAL_TYPE_P (TREE_TYPE (expr
))
7053 && (kind
= get_range_info (expr
, &min
, &max
)) != VR_VARYING
)
7054 vr
->set (kind
, wide_int_to_tree (TREE_TYPE (expr
), min
),
7055 wide_int_to_tree (TREE_TYPE (expr
), max
));
7057 vr
->set_varying (TREE_TYPE (expr
));
7061 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
7062 the determined range type. */
7065 determine_value_range (tree expr
, wide_int
*min
, wide_int
*max
)
7067 value_range_base vr
;
7068 determine_value_range_1 (&vr
, expr
);
7069 if (vr
.constant_p ())
7071 *min
= wi::to_wide (vr
.min ());
7072 *max
= wi::to_wide (vr
.max ());