1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-ssa-propagate.h"
38 #include "tree-chrec.h"
39 #include "gimple-fold.h"
44 /* Type of value ranges. See value_range_d for a description of these
46 enum value_range_type
{ VR_UNDEFINED
, VR_RANGE
, VR_ANTI_RANGE
, VR_VARYING
};
48 /* Range of values that can be associated with an SSA_NAME after VRP
52 /* Lattice value represented by this range. */
53 enum value_range_type type
;
55 /* Minimum and maximum values represented by this range. These
56 values should be interpreted as follows:
58 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
61 - If TYPE == VR_RANGE then MIN holds the minimum value and
62 MAX holds the maximum value of the range [MIN, MAX].
64 - If TYPE == ANTI_RANGE the variable is known to NOT
65 take any values in the range [MIN, MAX]. */
69 /* Set of SSA names whose value ranges are equivalent to this one.
70 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
74 typedef struct value_range_d value_range_t
;
76 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
78 /* Set of SSA names found live during the RPO traversal of the function
79 for still active basic-blocks. */
82 /* Return true if the SSA name NAME is live on the edge E. */
85 live_on_edge (edge e
, tree name
)
87 return (live
[e
->dest
->index
]
88 && bitmap_bit_p (live
[e
->dest
->index
], SSA_NAME_VERSION (name
)));
91 /* Local functions. */
92 static int compare_values (tree val1
, tree val2
);
93 static int compare_values_warnv (tree val1
, tree val2
, bool *);
94 static void vrp_meet (value_range_t
*, value_range_t
*);
95 static void vrp_intersect_ranges (value_range_t
*, value_range_t
*);
96 static tree
vrp_evaluate_conditional_warnv_with_ops (enum tree_code
,
97 tree
, tree
, bool, bool *,
100 /* Location information for ASSERT_EXPRs. Each instance of this
101 structure describes an ASSERT_EXPR for an SSA name. Since a single
102 SSA name may have more than one assertion associated with it, these
103 locations are kept in a linked list attached to the corresponding
105 struct assert_locus_d
107 /* Basic block where the assertion would be inserted. */
110 /* Some assertions need to be inserted on an edge (e.g., assertions
111 generated by COND_EXPRs). In those cases, BB will be NULL. */
114 /* Pointer to the statement that generated this assertion. */
115 gimple_stmt_iterator si
;
117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
118 enum tree_code comp_code
;
120 /* Value being compared against. */
123 /* Expression to compare. */
126 /* Next node in the linked list. */
127 struct assert_locus_d
*next
;
130 typedef struct assert_locus_d
*assert_locus_t
;
132 /* If bit I is present, it means that SSA name N_i has a list of
133 assertions that should be inserted in the IL. */
134 static bitmap need_assert_for
;
136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
137 holds a list of ASSERT_LOCUS_T nodes that describe where
138 ASSERT_EXPRs for SSA name N_I should be inserted. */
139 static assert_locus_t
*asserts_for
;
141 /* Value range array. After propagation, VR_VALUE[I] holds the range
142 of values that SSA name N_I may take. */
143 static unsigned num_vr_values
;
144 static value_range_t
**vr_value
;
145 static bool values_propagated
;
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148 number of executable edges we saw the last time we visited the
150 static int *vr_phi_edge_counts
;
157 static vec
<edge
> to_remove_edges
;
158 static vec
<switch_update
> to_update_switch_stmts
;
161 /* Return the maximum value for TYPE. */
164 vrp_val_max (const_tree type
)
166 if (!INTEGRAL_TYPE_P (type
))
169 return TYPE_MAX_VALUE (type
);
172 /* Return the minimum value for TYPE. */
175 vrp_val_min (const_tree type
)
177 if (!INTEGRAL_TYPE_P (type
))
180 return TYPE_MIN_VALUE (type
);
183 /* Return whether VAL is equal to the maximum value of its type. This
184 will be true for a positive overflow infinity. We can't do a
185 simple equality comparison with TYPE_MAX_VALUE because C typedefs
186 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
187 to the integer constant with the same value in the type. */
190 vrp_val_is_max (const_tree val
)
192 tree type_max
= vrp_val_max (TREE_TYPE (val
));
193 return (val
== type_max
194 || (type_max
!= NULL_TREE
195 && operand_equal_p (val
, type_max
, 0)));
198 /* Return whether VAL is equal to the minimum value of its type. This
199 will be true for a negative overflow infinity. */
202 vrp_val_is_min (const_tree val
)
204 tree type_min
= vrp_val_min (TREE_TYPE (val
));
205 return (val
== type_min
206 || (type_min
!= NULL_TREE
207 && operand_equal_p (val
, type_min
, 0)));
211 /* Return whether TYPE should use an overflow infinity distinct from
212 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
213 represent a signed overflow during VRP computations. An infinity
214 is distinct from a half-range, which will go from some number to
215 TYPE_{MIN,MAX}_VALUE. */
218 needs_overflow_infinity (const_tree type
)
220 return INTEGRAL_TYPE_P (type
) && !TYPE_OVERFLOW_WRAPS (type
);
223 /* Return whether TYPE can support our overflow infinity
224 representation: we use the TREE_OVERFLOW flag, which only exists
225 for constants. If TYPE doesn't support this, we don't optimize
226 cases which would require signed overflow--we drop them to
230 supports_overflow_infinity (const_tree type
)
232 tree min
= vrp_val_min (type
), max
= vrp_val_max (type
);
233 #ifdef ENABLE_CHECKING
234 gcc_assert (needs_overflow_infinity (type
));
236 return (min
!= NULL_TREE
237 && CONSTANT_CLASS_P (min
)
239 && CONSTANT_CLASS_P (max
));
242 /* VAL is the maximum or minimum value of a type. Return a
243 corresponding overflow infinity. */
246 make_overflow_infinity (tree val
)
248 gcc_checking_assert (val
!= NULL_TREE
&& CONSTANT_CLASS_P (val
));
249 val
= copy_node (val
);
250 TREE_OVERFLOW (val
) = 1;
254 /* Return a negative overflow infinity for TYPE. */
257 negative_overflow_infinity (tree type
)
259 gcc_checking_assert (supports_overflow_infinity (type
));
260 return make_overflow_infinity (vrp_val_min (type
));
263 /* Return a positive overflow infinity for TYPE. */
266 positive_overflow_infinity (tree type
)
268 gcc_checking_assert (supports_overflow_infinity (type
));
269 return make_overflow_infinity (vrp_val_max (type
));
272 /* Return whether VAL is a negative overflow infinity. */
275 is_negative_overflow_infinity (const_tree val
)
277 return (needs_overflow_infinity (TREE_TYPE (val
))
278 && CONSTANT_CLASS_P (val
)
279 && TREE_OVERFLOW (val
)
280 && vrp_val_is_min (val
));
283 /* Return whether VAL is a positive overflow infinity. */
286 is_positive_overflow_infinity (const_tree val
)
288 return (needs_overflow_infinity (TREE_TYPE (val
))
289 && CONSTANT_CLASS_P (val
)
290 && TREE_OVERFLOW (val
)
291 && vrp_val_is_max (val
));
294 /* Return whether VAL is a positive or negative overflow infinity. */
297 is_overflow_infinity (const_tree val
)
299 return (needs_overflow_infinity (TREE_TYPE (val
))
300 && CONSTANT_CLASS_P (val
)
301 && TREE_OVERFLOW (val
)
302 && (vrp_val_is_min (val
) || vrp_val_is_max (val
)));
305 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
308 stmt_overflow_infinity (gimple stmt
)
310 if (is_gimple_assign (stmt
)
311 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)) ==
313 return is_overflow_infinity (gimple_assign_rhs1 (stmt
));
317 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
318 the same value with TREE_OVERFLOW clear. This can be used to avoid
319 confusing a regular value with an overflow value. */
322 avoid_overflow_infinity (tree val
)
324 if (!is_overflow_infinity (val
))
327 if (vrp_val_is_max (val
))
328 return vrp_val_max (TREE_TYPE (val
));
331 gcc_checking_assert (vrp_val_is_min (val
));
332 return vrp_val_min (TREE_TYPE (val
));
337 /* Return true if ARG is marked with the nonnull attribute in the
338 current function signature. */
341 nonnull_arg_p (const_tree arg
)
343 tree t
, attrs
, fntype
;
344 unsigned HOST_WIDE_INT arg_num
;
346 gcc_assert (TREE_CODE (arg
) == PARM_DECL
&& POINTER_TYPE_P (TREE_TYPE (arg
)));
348 /* The static chain decl is always non null. */
349 if (arg
== cfun
->static_chain_decl
)
352 fntype
= TREE_TYPE (current_function_decl
);
353 for (attrs
= TYPE_ATTRIBUTES (fntype
); attrs
; attrs
= TREE_CHAIN (attrs
))
355 attrs
= lookup_attribute ("nonnull", attrs
);
357 /* If "nonnull" wasn't specified, we know nothing about the argument. */
358 if (attrs
== NULL_TREE
)
361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
362 if (TREE_VALUE (attrs
) == NULL_TREE
)
365 /* Get the position number for ARG in the function signature. */
366 for (arg_num
= 1, t
= DECL_ARGUMENTS (current_function_decl
);
368 t
= DECL_CHAIN (t
), arg_num
++)
374 gcc_assert (t
== arg
);
376 /* Now see if ARG_NUM is mentioned in the nonnull list. */
377 for (t
= TREE_VALUE (attrs
); t
; t
= TREE_CHAIN (t
))
379 if (compare_tree_int (TREE_VALUE (t
), arg_num
) == 0)
388 /* Set value range VR to VR_UNDEFINED. */
391 set_value_range_to_undefined (value_range_t
*vr
)
393 vr
->type
= VR_UNDEFINED
;
394 vr
->min
= vr
->max
= NULL_TREE
;
396 bitmap_clear (vr
->equiv
);
400 /* Set value range VR to VR_VARYING. */
403 set_value_range_to_varying (value_range_t
*vr
)
405 vr
->type
= VR_VARYING
;
406 vr
->min
= vr
->max
= NULL_TREE
;
408 bitmap_clear (vr
->equiv
);
412 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
415 set_value_range (value_range_t
*vr
, enum value_range_type t
, tree min
,
416 tree max
, bitmap equiv
)
418 #if defined ENABLE_CHECKING
419 /* Check the validity of the range. */
420 if (t
== VR_RANGE
|| t
== VR_ANTI_RANGE
)
424 gcc_assert (min
&& max
);
426 if (INTEGRAL_TYPE_P (TREE_TYPE (min
)) && t
== VR_ANTI_RANGE
)
427 gcc_assert (!vrp_val_is_min (min
) || !vrp_val_is_max (max
));
429 cmp
= compare_values (min
, max
);
430 gcc_assert (cmp
== 0 || cmp
== -1 || cmp
== -2);
432 if (needs_overflow_infinity (TREE_TYPE (min
)))
433 gcc_assert (!is_overflow_infinity (min
)
434 || !is_overflow_infinity (max
));
437 if (t
== VR_UNDEFINED
|| t
== VR_VARYING
)
438 gcc_assert (min
== NULL_TREE
&& max
== NULL_TREE
);
440 if (t
== VR_UNDEFINED
|| t
== VR_VARYING
)
441 gcc_assert (equiv
== NULL
|| bitmap_empty_p (equiv
));
448 /* Since updating the equivalence set involves deep copying the
449 bitmaps, only do it if absolutely necessary. */
450 if (vr
->equiv
== NULL
452 vr
->equiv
= BITMAP_ALLOC (NULL
);
454 if (equiv
!= vr
->equiv
)
456 if (equiv
&& !bitmap_empty_p (equiv
))
457 bitmap_copy (vr
->equiv
, equiv
);
459 bitmap_clear (vr
->equiv
);
464 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
465 This means adjusting T, MIN and MAX representing the case of a
466 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
467 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
468 In corner cases where MAX+1 or MIN-1 wraps this will fall back
470 This routine exists to ease canonicalization in the case where we
471 extract ranges from var + CST op limit. */
474 set_and_canonicalize_value_range (value_range_t
*vr
, enum value_range_type t
,
475 tree min
, tree max
, bitmap equiv
)
477 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
478 if (t
== VR_UNDEFINED
)
480 set_value_range_to_undefined (vr
);
483 else if (t
== VR_VARYING
)
485 set_value_range_to_varying (vr
);
489 /* Nothing to canonicalize for symbolic ranges. */
490 if (TREE_CODE (min
) != INTEGER_CST
491 || TREE_CODE (max
) != INTEGER_CST
)
493 set_value_range (vr
, t
, min
, max
, equiv
);
497 /* Wrong order for min and max, to swap them and the VR type we need
499 if (tree_int_cst_lt (max
, min
))
503 /* For one bit precision if max < min, then the swapped
504 range covers all values, so for VR_RANGE it is varying and
505 for VR_ANTI_RANGE empty range, so drop to varying as well. */
506 if (TYPE_PRECISION (TREE_TYPE (min
)) == 1)
508 set_value_range_to_varying (vr
);
512 one
= build_int_cst (TREE_TYPE (min
), 1);
513 tmp
= int_const_binop (PLUS_EXPR
, max
, one
);
514 max
= int_const_binop (MINUS_EXPR
, min
, one
);
517 /* There's one corner case, if we had [C+1, C] before we now have
518 that again. But this represents an empty value range, so drop
519 to varying in this case. */
520 if (tree_int_cst_lt (max
, min
))
522 set_value_range_to_varying (vr
);
526 t
= t
== VR_RANGE
? VR_ANTI_RANGE
: VR_RANGE
;
529 /* Anti-ranges that can be represented as ranges should be so. */
530 if (t
== VR_ANTI_RANGE
)
532 bool is_min
= vrp_val_is_min (min
);
533 bool is_max
= vrp_val_is_max (max
);
535 if (is_min
&& is_max
)
537 /* We cannot deal with empty ranges, drop to varying.
538 ??? This could be VR_UNDEFINED instead. */
539 set_value_range_to_varying (vr
);
542 else if (TYPE_PRECISION (TREE_TYPE (min
)) == 1
543 && (is_min
|| is_max
))
545 /* Non-empty boolean ranges can always be represented
546 as a singleton range. */
548 min
= max
= vrp_val_max (TREE_TYPE (min
));
550 min
= max
= vrp_val_min (TREE_TYPE (min
));
554 /* As a special exception preserve non-null ranges. */
555 && !(TYPE_UNSIGNED (TREE_TYPE (min
))
556 && integer_zerop (max
)))
558 tree one
= build_int_cst (TREE_TYPE (max
), 1);
559 min
= int_const_binop (PLUS_EXPR
, max
, one
);
560 max
= vrp_val_max (TREE_TYPE (max
));
565 tree one
= build_int_cst (TREE_TYPE (min
), 1);
566 max
= int_const_binop (MINUS_EXPR
, min
, one
);
567 min
= vrp_val_min (TREE_TYPE (min
));
572 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
573 if (needs_overflow_infinity (TREE_TYPE (min
))
574 && is_overflow_infinity (min
)
575 && is_overflow_infinity (max
))
577 set_value_range_to_varying (vr
);
581 set_value_range (vr
, t
, min
, max
, equiv
);
584 /* Copy value range FROM into value range TO. */
587 copy_value_range (value_range_t
*to
, value_range_t
*from
)
589 set_value_range (to
, from
->type
, from
->min
, from
->max
, from
->equiv
);
592 /* Set value range VR to a single value. This function is only called
593 with values we get from statements, and exists to clear the
594 TREE_OVERFLOW flag so that we don't think we have an overflow
595 infinity when we shouldn't. */
598 set_value_range_to_value (value_range_t
*vr
, tree val
, bitmap equiv
)
600 gcc_assert (is_gimple_min_invariant (val
));
601 val
= avoid_overflow_infinity (val
);
602 set_value_range (vr
, VR_RANGE
, val
, val
, equiv
);
605 /* Set value range VR to a non-negative range of type TYPE.
606 OVERFLOW_INFINITY indicates whether to use an overflow infinity
607 rather than TYPE_MAX_VALUE; this should be true if we determine
608 that the range is nonnegative based on the assumption that signed
609 overflow does not occur. */
612 set_value_range_to_nonnegative (value_range_t
*vr
, tree type
,
613 bool overflow_infinity
)
617 if (overflow_infinity
&& !supports_overflow_infinity (type
))
619 set_value_range_to_varying (vr
);
623 zero
= build_int_cst (type
, 0);
624 set_value_range (vr
, VR_RANGE
, zero
,
626 ? positive_overflow_infinity (type
)
627 : TYPE_MAX_VALUE (type
)),
631 /* Set value range VR to a non-NULL range of type TYPE. */
634 set_value_range_to_nonnull (value_range_t
*vr
, tree type
)
636 tree zero
= build_int_cst (type
, 0);
637 set_value_range (vr
, VR_ANTI_RANGE
, zero
, zero
, vr
->equiv
);
641 /* Set value range VR to a NULL range of type TYPE. */
644 set_value_range_to_null (value_range_t
*vr
, tree type
)
646 set_value_range_to_value (vr
, build_int_cst (type
, 0), vr
->equiv
);
650 /* Set value range VR to a range of a truthvalue of type TYPE. */
653 set_value_range_to_truthvalue (value_range_t
*vr
, tree type
)
655 if (TYPE_PRECISION (type
) == 1)
656 set_value_range_to_varying (vr
);
658 set_value_range (vr
, VR_RANGE
,
659 build_int_cst (type
, 0), build_int_cst (type
, 1),
664 /* If abs (min) < abs (max), set VR to [-max, max], if
665 abs (min) >= abs (max), set VR to [-min, min]. */
668 abs_extent_range (value_range_t
*vr
, tree min
, tree max
)
672 gcc_assert (TREE_CODE (min
) == INTEGER_CST
);
673 gcc_assert (TREE_CODE (max
) == INTEGER_CST
);
674 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min
)));
675 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min
)));
676 min
= fold_unary (ABS_EXPR
, TREE_TYPE (min
), min
);
677 max
= fold_unary (ABS_EXPR
, TREE_TYPE (max
), max
);
678 if (TREE_OVERFLOW (min
) || TREE_OVERFLOW (max
))
680 set_value_range_to_varying (vr
);
683 cmp
= compare_values (min
, max
);
685 min
= fold_unary (NEGATE_EXPR
, TREE_TYPE (min
), max
);
686 else if (cmp
== 0 || cmp
== 1)
689 min
= fold_unary (NEGATE_EXPR
, TREE_TYPE (min
), min
);
693 set_value_range_to_varying (vr
);
696 set_and_canonicalize_value_range (vr
, VR_RANGE
, min
, max
, NULL
);
700 /* Return value range information for VAR.
702 If we have no values ranges recorded (ie, VRP is not running), then
703 return NULL. Otherwise create an empty range if none existed for VAR. */
705 static value_range_t
*
706 get_value_range (const_tree var
)
708 static const struct value_range_d vr_const_varying
709 = { VR_VARYING
, NULL_TREE
, NULL_TREE
, NULL
};
712 unsigned ver
= SSA_NAME_VERSION (var
);
714 /* If we have no recorded ranges, then return NULL. */
718 /* If we query the range for a new SSA name return an unmodifiable VARYING.
719 We should get here at most from the substitute-and-fold stage which
720 will never try to change values. */
721 if (ver
>= num_vr_values
)
722 return CONST_CAST (value_range_t
*, &vr_const_varying
);
728 /* After propagation finished do not allocate new value-ranges. */
729 if (values_propagated
)
730 return CONST_CAST (value_range_t
*, &vr_const_varying
);
732 /* Create a default value range. */
733 vr_value
[ver
] = vr
= XCNEW (value_range_t
);
735 /* Defer allocating the equivalence set. */
738 /* If VAR is a default definition of a parameter, the variable can
739 take any value in VAR's type. */
740 if (SSA_NAME_IS_DEFAULT_DEF (var
))
742 sym
= SSA_NAME_VAR (var
);
743 if (TREE_CODE (sym
) == PARM_DECL
)
745 /* Try to use the "nonnull" attribute to create ~[0, 0]
746 anti-ranges for pointers. Note that this is only valid with
747 default definitions of PARM_DECLs. */
748 if (POINTER_TYPE_P (TREE_TYPE (sym
))
749 && nonnull_arg_p (sym
))
750 set_value_range_to_nonnull (vr
, TREE_TYPE (sym
));
752 set_value_range_to_varying (vr
);
754 else if (TREE_CODE (sym
) == RESULT_DECL
755 && DECL_BY_REFERENCE (sym
))
756 set_value_range_to_nonnull (vr
, TREE_TYPE (sym
));
762 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
765 vrp_operand_equal_p (const_tree val1
, const_tree val2
)
769 if (!val1
|| !val2
|| !operand_equal_p (val1
, val2
, 0))
771 if (is_overflow_infinity (val1
))
772 return is_overflow_infinity (val2
);
776 /* Return true, if the bitmaps B1 and B2 are equal. */
779 vrp_bitmap_equal_p (const_bitmap b1
, const_bitmap b2
)
782 || ((!b1
|| bitmap_empty_p (b1
))
783 && (!b2
|| bitmap_empty_p (b2
)))
785 && bitmap_equal_p (b1
, b2
)));
788 /* Update the value range and equivalence set for variable VAR to
789 NEW_VR. Return true if NEW_VR is different from VAR's previous
792 NOTE: This function assumes that NEW_VR is a temporary value range
793 object created for the sole purpose of updating VAR's range. The
794 storage used by the equivalence set from NEW_VR will be freed by
795 this function. Do not call update_value_range when NEW_VR
796 is the range object associated with another SSA name. */
799 update_value_range (const_tree var
, value_range_t
*new_vr
)
801 value_range_t
*old_vr
;
804 /* Update the value range, if necessary. */
805 old_vr
= get_value_range (var
);
806 is_new
= old_vr
->type
!= new_vr
->type
807 || !vrp_operand_equal_p (old_vr
->min
, new_vr
->min
)
808 || !vrp_operand_equal_p (old_vr
->max
, new_vr
->max
)
809 || !vrp_bitmap_equal_p (old_vr
->equiv
, new_vr
->equiv
);
813 /* Do not allow transitions up the lattice. The following
814 is slightly more awkward than just new_vr->type < old_vr->type
815 because VR_RANGE and VR_ANTI_RANGE need to be considered
816 the same. We may not have is_new when transitioning to
817 UNDEFINED or from VARYING. */
818 if (new_vr
->type
== VR_UNDEFINED
819 || old_vr
->type
== VR_VARYING
)
820 set_value_range_to_varying (old_vr
);
822 set_value_range (old_vr
, new_vr
->type
, new_vr
->min
, new_vr
->max
,
826 BITMAP_FREE (new_vr
->equiv
);
832 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
833 point where equivalence processing can be turned on/off. */
836 add_equivalence (bitmap
*equiv
, const_tree var
)
838 unsigned ver
= SSA_NAME_VERSION (var
);
839 value_range_t
*vr
= vr_value
[ver
];
842 *equiv
= BITMAP_ALLOC (NULL
);
843 bitmap_set_bit (*equiv
, ver
);
845 bitmap_ior_into (*equiv
, vr
->equiv
);
849 /* Return true if VR is ~[0, 0]. */
852 range_is_nonnull (value_range_t
*vr
)
854 return vr
->type
== VR_ANTI_RANGE
855 && integer_zerop (vr
->min
)
856 && integer_zerop (vr
->max
);
860 /* Return true if VR is [0, 0]. */
863 range_is_null (value_range_t
*vr
)
865 return vr
->type
== VR_RANGE
866 && integer_zerop (vr
->min
)
867 && integer_zerop (vr
->max
);
870 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
874 range_int_cst_p (value_range_t
*vr
)
876 return (vr
->type
== VR_RANGE
877 && TREE_CODE (vr
->max
) == INTEGER_CST
878 && TREE_CODE (vr
->min
) == INTEGER_CST
);
881 /* Return true if VR is a INTEGER_CST singleton. */
884 range_int_cst_singleton_p (value_range_t
*vr
)
886 return (range_int_cst_p (vr
)
887 && !TREE_OVERFLOW (vr
->min
)
888 && !TREE_OVERFLOW (vr
->max
)
889 && tree_int_cst_equal (vr
->min
, vr
->max
));
892 /* Return true if value range VR involves at least one symbol. */
895 symbolic_range_p (value_range_t
*vr
)
897 return (!is_gimple_min_invariant (vr
->min
)
898 || !is_gimple_min_invariant (vr
->max
));
901 /* Return true if value range VR uses an overflow infinity. */
904 overflow_infinity_range_p (value_range_t
*vr
)
906 return (vr
->type
== VR_RANGE
907 && (is_overflow_infinity (vr
->min
)
908 || is_overflow_infinity (vr
->max
)));
911 /* Return false if we can not make a valid comparison based on VR;
912 this will be the case if it uses an overflow infinity and overflow
913 is not undefined (i.e., -fno-strict-overflow is in effect).
914 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
915 uses an overflow infinity. */
918 usable_range_p (value_range_t
*vr
, bool *strict_overflow_p
)
920 gcc_assert (vr
->type
== VR_RANGE
);
921 if (is_overflow_infinity (vr
->min
))
923 *strict_overflow_p
= true;
924 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr
->min
)))
927 if (is_overflow_infinity (vr
->max
))
929 *strict_overflow_p
= true;
930 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr
->max
)))
937 /* Return true if the result of assignment STMT is know to be non-negative.
938 If the return value is based on the assumption that signed overflow is
939 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
940 *STRICT_OVERFLOW_P.*/
943 gimple_assign_nonnegative_warnv_p (gimple stmt
, bool *strict_overflow_p
)
945 enum tree_code code
= gimple_assign_rhs_code (stmt
);
946 switch (get_gimple_rhs_class (code
))
948 case GIMPLE_UNARY_RHS
:
949 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt
),
950 gimple_expr_type (stmt
),
951 gimple_assign_rhs1 (stmt
),
953 case GIMPLE_BINARY_RHS
:
954 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt
),
955 gimple_expr_type (stmt
),
956 gimple_assign_rhs1 (stmt
),
957 gimple_assign_rhs2 (stmt
),
959 case GIMPLE_TERNARY_RHS
:
961 case GIMPLE_SINGLE_RHS
:
962 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt
),
964 case GIMPLE_INVALID_RHS
:
971 /* Return true if return value of call STMT is know to be non-negative.
972 If the return value is based on the assumption that signed overflow is
973 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
974 *STRICT_OVERFLOW_P.*/
977 gimple_call_nonnegative_warnv_p (gimple stmt
, bool *strict_overflow_p
)
979 tree arg0
= gimple_call_num_args (stmt
) > 0 ?
980 gimple_call_arg (stmt
, 0) : NULL_TREE
;
981 tree arg1
= gimple_call_num_args (stmt
) > 1 ?
982 gimple_call_arg (stmt
, 1) : NULL_TREE
;
984 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt
),
985 gimple_call_fndecl (stmt
),
991 /* Return true if STMT is know to to compute a non-negative value.
992 If the return value is based on the assumption that signed overflow is
993 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
994 *STRICT_OVERFLOW_P.*/
997 gimple_stmt_nonnegative_warnv_p (gimple stmt
, bool *strict_overflow_p
)
999 switch (gimple_code (stmt
))
1002 return gimple_assign_nonnegative_warnv_p (stmt
, strict_overflow_p
);
1004 return gimple_call_nonnegative_warnv_p (stmt
, strict_overflow_p
);
1010 /* Return true if the result of assignment STMT is know to be non-zero.
1011 If the return value is based on the assumption that signed overflow is
1012 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1013 *STRICT_OVERFLOW_P.*/
1016 gimple_assign_nonzero_warnv_p (gimple stmt
, bool *strict_overflow_p
)
1018 enum tree_code code
= gimple_assign_rhs_code (stmt
);
1019 switch (get_gimple_rhs_class (code
))
1021 case GIMPLE_UNARY_RHS
:
1022 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt
),
1023 gimple_expr_type (stmt
),
1024 gimple_assign_rhs1 (stmt
),
1026 case GIMPLE_BINARY_RHS
:
1027 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt
),
1028 gimple_expr_type (stmt
),
1029 gimple_assign_rhs1 (stmt
),
1030 gimple_assign_rhs2 (stmt
),
1032 case GIMPLE_TERNARY_RHS
:
1034 case GIMPLE_SINGLE_RHS
:
1035 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt
),
1037 case GIMPLE_INVALID_RHS
:
1044 /* Return true if STMT is know to to compute a non-zero value.
1045 If the return value is based on the assumption that signed overflow is
1046 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1047 *STRICT_OVERFLOW_P.*/
1050 gimple_stmt_nonzero_warnv_p (gimple stmt
, bool *strict_overflow_p
)
1052 switch (gimple_code (stmt
))
1055 return gimple_assign_nonzero_warnv_p (stmt
, strict_overflow_p
);
1057 return gimple_alloca_call_p (stmt
);
1063 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1067 vrp_stmt_computes_nonzero (gimple stmt
, bool *strict_overflow_p
)
1069 if (gimple_stmt_nonzero_warnv_p (stmt
, strict_overflow_p
))
1072 /* If we have an expression of the form &X->a, then the expression
1073 is nonnull if X is nonnull. */
1074 if (is_gimple_assign (stmt
)
1075 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
)
1077 tree expr
= gimple_assign_rhs1 (stmt
);
1078 tree base
= get_base_address (TREE_OPERAND (expr
, 0));
1080 if (base
!= NULL_TREE
1081 && TREE_CODE (base
) == MEM_REF
1082 && TREE_CODE (TREE_OPERAND (base
, 0)) == SSA_NAME
)
1084 value_range_t
*vr
= get_value_range (TREE_OPERAND (base
, 0));
1085 if (range_is_nonnull (vr
))
1093 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1094 a gimple invariant, or SSA_NAME +- CST. */
1097 valid_value_p (tree expr
)
1099 if (TREE_CODE (expr
) == SSA_NAME
)
1102 if (TREE_CODE (expr
) == PLUS_EXPR
1103 || TREE_CODE (expr
) == MINUS_EXPR
)
1104 return (TREE_CODE (TREE_OPERAND (expr
, 0)) == SSA_NAME
1105 && TREE_CODE (TREE_OPERAND (expr
, 1)) == INTEGER_CST
);
1107 return is_gimple_min_invariant (expr
);
1113 -2 if those are incomparable. */
1115 operand_less_p (tree val
, tree val2
)
1117 /* LT is folded faster than GE and others. Inline the common case. */
1118 if (TREE_CODE (val
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
1120 if (TYPE_UNSIGNED (TREE_TYPE (val
)))
1121 return INT_CST_LT_UNSIGNED (val
, val2
);
1124 if (INT_CST_LT (val
, val2
))
1132 fold_defer_overflow_warnings ();
1134 tcmp
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val
, val2
);
1136 fold_undefer_and_ignore_overflow_warnings ();
1139 || TREE_CODE (tcmp
) != INTEGER_CST
)
1142 if (!integer_zerop (tcmp
))
1146 /* val >= val2, not considering overflow infinity. */
1147 if (is_negative_overflow_infinity (val
))
1148 return is_negative_overflow_infinity (val2
) ? 0 : 1;
1149 else if (is_positive_overflow_infinity (val2
))
1150 return is_positive_overflow_infinity (val
) ? 0 : 1;
1155 /* Compare two values VAL1 and VAL2. Return
1157 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1160 +1 if VAL1 > VAL2, and
1163 This is similar to tree_int_cst_compare but supports pointer values
1164 and values that cannot be compared at compile time.
1166 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1167 true if the return value is only valid if we assume that signed
1168 overflow is undefined. */
1171 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
1176 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1178 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
1179 == POINTER_TYPE_P (TREE_TYPE (val2
)));
1180 /* Convert the two values into the same type. This is needed because
1181 sizetype causes sign extension even for unsigned types. */
1182 val2
= fold_convert (TREE_TYPE (val1
), val2
);
1183 STRIP_USELESS_TYPE_CONVERSION (val2
);
1185 if ((TREE_CODE (val1
) == SSA_NAME
1186 || TREE_CODE (val1
) == PLUS_EXPR
1187 || TREE_CODE (val1
) == MINUS_EXPR
)
1188 && (TREE_CODE (val2
) == SSA_NAME
1189 || TREE_CODE (val2
) == PLUS_EXPR
1190 || TREE_CODE (val2
) == MINUS_EXPR
))
1192 tree n1
, c1
, n2
, c2
;
1193 enum tree_code code1
, code2
;
1195 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1196 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1197 same name, return -2. */
1198 if (TREE_CODE (val1
) == SSA_NAME
)
1206 code1
= TREE_CODE (val1
);
1207 n1
= TREE_OPERAND (val1
, 0);
1208 c1
= TREE_OPERAND (val1
, 1);
1209 if (tree_int_cst_sgn (c1
) == -1)
1211 if (is_negative_overflow_infinity (c1
))
1213 c1
= fold_unary_to_constant (NEGATE_EXPR
, TREE_TYPE (c1
), c1
);
1216 code1
= code1
== MINUS_EXPR
? PLUS_EXPR
: MINUS_EXPR
;
1220 if (TREE_CODE (val2
) == SSA_NAME
)
1228 code2
= TREE_CODE (val2
);
1229 n2
= TREE_OPERAND (val2
, 0);
1230 c2
= TREE_OPERAND (val2
, 1);
1231 if (tree_int_cst_sgn (c2
) == -1)
1233 if (is_negative_overflow_infinity (c2
))
1235 c2
= fold_unary_to_constant (NEGATE_EXPR
, TREE_TYPE (c2
), c2
);
1238 code2
= code2
== MINUS_EXPR
? PLUS_EXPR
: MINUS_EXPR
;
1242 /* Both values must use the same name. */
1246 if (code1
== SSA_NAME
1247 && code2
== SSA_NAME
)
1251 /* If overflow is defined we cannot simplify more. */
1252 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
)))
1255 if (strict_overflow_p
!= NULL
1256 && (code1
== SSA_NAME
|| !TREE_NO_WARNING (val1
))
1257 && (code2
== SSA_NAME
|| !TREE_NO_WARNING (val2
)))
1258 *strict_overflow_p
= true;
1260 if (code1
== SSA_NAME
)
1262 if (code2
== PLUS_EXPR
)
1263 /* NAME < NAME + CST */
1265 else if (code2
== MINUS_EXPR
)
1266 /* NAME > NAME - CST */
1269 else if (code1
== PLUS_EXPR
)
1271 if (code2
== SSA_NAME
)
1272 /* NAME + CST > NAME */
1274 else if (code2
== PLUS_EXPR
)
1275 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1276 return compare_values_warnv (c1
, c2
, strict_overflow_p
);
1277 else if (code2
== MINUS_EXPR
)
1278 /* NAME + CST1 > NAME - CST2 */
1281 else if (code1
== MINUS_EXPR
)
1283 if (code2
== SSA_NAME
)
1284 /* NAME - CST < NAME */
1286 else if (code2
== PLUS_EXPR
)
1287 /* NAME - CST1 < NAME + CST2 */
1289 else if (code2
== MINUS_EXPR
)
1290 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1291 C1 and C2 are swapped in the call to compare_values. */
1292 return compare_values_warnv (c2
, c1
, strict_overflow_p
);
1298 /* We cannot compare non-constants. */
1299 if (!is_gimple_min_invariant (val1
) || !is_gimple_min_invariant (val2
))
1302 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
1304 /* We cannot compare overflowed values, except for overflow
1306 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
1308 if (strict_overflow_p
!= NULL
)
1309 *strict_overflow_p
= true;
1310 if (is_negative_overflow_infinity (val1
))
1311 return is_negative_overflow_infinity (val2
) ? 0 : -1;
1312 else if (is_negative_overflow_infinity (val2
))
1314 else if (is_positive_overflow_infinity (val1
))
1315 return is_positive_overflow_infinity (val2
) ? 0 : 1;
1316 else if (is_positive_overflow_infinity (val2
))
1321 return tree_int_cst_compare (val1
, val2
);
1327 /* First see if VAL1 and VAL2 are not the same. */
1328 if (val1
== val2
|| operand_equal_p (val1
, val2
, 0))
1331 /* If VAL1 is a lower address than VAL2, return -1. */
1332 if (operand_less_p (val1
, val2
) == 1)
1335 /* If VAL1 is a higher address than VAL2, return +1. */
1336 if (operand_less_p (val2
, val1
) == 1)
1339 /* If VAL1 is different than VAL2, return +2.
1340 For integer constants we either have already returned -1 or 1
1341 or they are equivalent. We still might succeed in proving
1342 something about non-trivial operands. */
1343 if (TREE_CODE (val1
) != INTEGER_CST
1344 || TREE_CODE (val2
) != INTEGER_CST
)
1346 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
1347 if (t
&& integer_onep (t
))
1355 /* Compare values like compare_values_warnv, but treat comparisons of
1356 nonconstants which rely on undefined overflow as incomparable. */
1359 compare_values (tree val1
, tree val2
)
1365 ret
= compare_values_warnv (val1
, val2
, &sop
);
1367 && (!is_gimple_min_invariant (val1
) || !is_gimple_min_invariant (val2
)))
1373 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1374 0 if VAL is not inside [MIN, MAX],
1375 -2 if we cannot tell either way.
1377 Benchmark compile/20001226-1.c compilation time after changing this
1381 value_inside_range (tree val
, tree min
, tree max
)
1385 cmp1
= operand_less_p (val
, min
);
1391 cmp2
= operand_less_p (max
, val
);
1399 /* Return true if value ranges VR0 and VR1 have a non-empty
1402 Benchmark compile/20001226-1.c compilation time after changing this
1407 value_ranges_intersect_p (value_range_t
*vr0
, value_range_t
*vr1
)
1409 /* The value ranges do not intersect if the maximum of the first range is
1410 less than the minimum of the second range or vice versa.
1411 When those relations are unknown, we can't do any better. */
1412 if (operand_less_p (vr0
->max
, vr1
->min
) != 0)
1414 if (operand_less_p (vr1
->max
, vr0
->min
) != 0)
1420 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1421 include the value zero, -2 if we cannot tell. */
1424 range_includes_zero_p (tree min
, tree max
)
1426 tree zero
= build_int_cst (TREE_TYPE (min
), 0);
1427 return value_inside_range (zero
, min
, max
);
1430 /* Return true if *VR is know to only contain nonnegative values. */
1433 value_range_nonnegative_p (value_range_t
*vr
)
1435 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1436 which would return a useful value should be encoded as a
1438 if (vr
->type
== VR_RANGE
)
1440 int result
= compare_values (vr
->min
, integer_zero_node
);
1441 return (result
== 0 || result
== 1);
1447 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1448 false otherwise or if no value range information is available. */
1451 ssa_name_nonnegative_p (const_tree t
)
1453 value_range_t
*vr
= get_value_range (t
);
1455 if (INTEGRAL_TYPE_P (t
)
1456 && TYPE_UNSIGNED (t
))
1462 return value_range_nonnegative_p (vr
);
1465 /* If *VR has a value rante that is a single constant value return that,
1466 otherwise return NULL_TREE. */
1469 value_range_constant_singleton (value_range_t
*vr
)
1471 if (vr
->type
== VR_RANGE
1472 && operand_equal_p (vr
->min
, vr
->max
, 0)
1473 && is_gimple_min_invariant (vr
->min
))
1479 /* If OP has a value range with a single constant value return that,
1480 otherwise return NULL_TREE. This returns OP itself if OP is a
1484 op_with_constant_singleton_value_range (tree op
)
1486 if (is_gimple_min_invariant (op
))
1489 if (TREE_CODE (op
) != SSA_NAME
)
1492 return value_range_constant_singleton (get_value_range (op
));
1495 /* Return true if op is in a boolean [0, 1] value-range. */
1498 op_with_boolean_value_range_p (tree op
)
1502 if (TYPE_PRECISION (TREE_TYPE (op
)) == 1)
1505 if (integer_zerop (op
)
1506 || integer_onep (op
))
1509 if (TREE_CODE (op
) != SSA_NAME
)
1512 vr
= get_value_range (op
);
1513 return (vr
->type
== VR_RANGE
1514 && integer_zerop (vr
->min
)
1515 && integer_onep (vr
->max
));
1518 /* Extract value range information from an ASSERT_EXPR EXPR and store
1522 extract_range_from_assert (value_range_t
*vr_p
, tree expr
)
1524 tree var
, cond
, limit
, min
, max
, type
;
1525 value_range_t
*limit_vr
;
1526 enum tree_code cond_code
;
1528 var
= ASSERT_EXPR_VAR (expr
);
1529 cond
= ASSERT_EXPR_COND (expr
);
1531 gcc_assert (COMPARISON_CLASS_P (cond
));
1533 /* Find VAR in the ASSERT_EXPR conditional. */
1534 if (var
== TREE_OPERAND (cond
, 0)
1535 || TREE_CODE (TREE_OPERAND (cond
, 0)) == PLUS_EXPR
1536 || TREE_CODE (TREE_OPERAND (cond
, 0)) == NOP_EXPR
)
1538 /* If the predicate is of the form VAR COMP LIMIT, then we just
1539 take LIMIT from the RHS and use the same comparison code. */
1540 cond_code
= TREE_CODE (cond
);
1541 limit
= TREE_OPERAND (cond
, 1);
1542 cond
= TREE_OPERAND (cond
, 0);
1546 /* If the predicate is of the form LIMIT COMP VAR, then we need
1547 to flip around the comparison code to create the proper range
1549 cond_code
= swap_tree_comparison (TREE_CODE (cond
));
1550 limit
= TREE_OPERAND (cond
, 0);
1551 cond
= TREE_OPERAND (cond
, 1);
1554 limit
= avoid_overflow_infinity (limit
);
1556 type
= TREE_TYPE (var
);
1557 gcc_assert (limit
!= var
);
1559 /* For pointer arithmetic, we only keep track of pointer equality
1561 if (POINTER_TYPE_P (type
) && cond_code
!= NE_EXPR
&& cond_code
!= EQ_EXPR
)
1563 set_value_range_to_varying (vr_p
);
1567 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1568 try to use LIMIT's range to avoid creating symbolic ranges
1570 limit_vr
= (TREE_CODE (limit
) == SSA_NAME
) ? get_value_range (limit
) : NULL
;
1572 /* LIMIT's range is only interesting if it has any useful information. */
1574 && (limit_vr
->type
== VR_UNDEFINED
1575 || limit_vr
->type
== VR_VARYING
1576 || symbolic_range_p (limit_vr
)))
1579 /* Initially, the new range has the same set of equivalences of
1580 VAR's range. This will be revised before returning the final
1581 value. Since assertions may be chained via mutually exclusive
1582 predicates, we will need to trim the set of equivalences before
1584 gcc_assert (vr_p
->equiv
== NULL
);
1585 add_equivalence (&vr_p
->equiv
, var
);
1587 /* Extract a new range based on the asserted comparison for VAR and
1588 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1589 will only use it for equality comparisons (EQ_EXPR). For any
1590 other kind of assertion, we cannot derive a range from LIMIT's
1591 anti-range that can be used to describe the new range. For
1592 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1593 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1594 no single range for x_2 that could describe LE_EXPR, so we might
1595 as well build the range [b_4, +INF] for it.
1596 One special case we handle is extracting a range from a
1597 range test encoded as (unsigned)var + CST <= limit. */
1598 if (TREE_CODE (cond
) == NOP_EXPR
1599 || TREE_CODE (cond
) == PLUS_EXPR
)
1601 if (TREE_CODE (cond
) == PLUS_EXPR
)
1603 min
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (TREE_OPERAND (cond
, 1)),
1604 TREE_OPERAND (cond
, 1));
1605 max
= int_const_binop (PLUS_EXPR
, limit
, min
);
1606 cond
= TREE_OPERAND (cond
, 0);
1610 min
= build_int_cst (TREE_TYPE (var
), 0);
1614 /* Make sure to not set TREE_OVERFLOW on the final type
1615 conversion. We are willingly interpreting large positive
1616 unsigned values as negative singed values here. */
1617 min
= force_fit_type (TREE_TYPE (var
),
1618 wide_int::from (min
,
1619 TYPE_PRECISION (TREE_TYPE (var
)),
1620 TYPE_SIGN (TREE_TYPE (min
))),
1622 max
= force_fit_type (TREE_TYPE (var
),
1623 wide_int::from (max
,
1624 TYPE_PRECISION (TREE_TYPE (var
)),
1625 TYPE_SIGN (TREE_TYPE (max
))),
1628 /* We can transform a max, min range to an anti-range or
1629 vice-versa. Use set_and_canonicalize_value_range which does
1631 if (cond_code
== LE_EXPR
)
1632 set_and_canonicalize_value_range (vr_p
, VR_RANGE
,
1633 min
, max
, vr_p
->equiv
);
1634 else if (cond_code
== GT_EXPR
)
1635 set_and_canonicalize_value_range (vr_p
, VR_ANTI_RANGE
,
1636 min
, max
, vr_p
->equiv
);
1640 else if (cond_code
== EQ_EXPR
)
1642 enum value_range_type range_type
;
1646 range_type
= limit_vr
->type
;
1647 min
= limit_vr
->min
;
1648 max
= limit_vr
->max
;
1652 range_type
= VR_RANGE
;
1657 set_value_range (vr_p
, range_type
, min
, max
, vr_p
->equiv
);
1659 /* When asserting the equality VAR == LIMIT and LIMIT is another
1660 SSA name, the new range will also inherit the equivalence set
1662 if (TREE_CODE (limit
) == SSA_NAME
)
1663 add_equivalence (&vr_p
->equiv
, limit
);
1665 else if (cond_code
== NE_EXPR
)
1667 /* As described above, when LIMIT's range is an anti-range and
1668 this assertion is an inequality (NE_EXPR), then we cannot
1669 derive anything from the anti-range. For instance, if
1670 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1671 not imply that VAR's range is [0, 0]. So, in the case of
1672 anti-ranges, we just assert the inequality using LIMIT and
1675 If LIMIT_VR is a range, we can only use it to build a new
1676 anti-range if LIMIT_VR is a single-valued range. For
1677 instance, if LIMIT_VR is [0, 1], the predicate
1678 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1679 Rather, it means that for value 0 VAR should be ~[0, 0]
1680 and for value 1, VAR should be ~[1, 1]. We cannot
1681 represent these ranges.
1683 The only situation in which we can build a valid
1684 anti-range is when LIMIT_VR is a single-valued range
1685 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1686 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1688 && limit_vr
->type
== VR_RANGE
1689 && compare_values (limit_vr
->min
, limit_vr
->max
) == 0)
1691 min
= limit_vr
->min
;
1692 max
= limit_vr
->max
;
1696 /* In any other case, we cannot use LIMIT's range to build a
1697 valid anti-range. */
1701 /* If MIN and MAX cover the whole range for their type, then
1702 just use the original LIMIT. */
1703 if (INTEGRAL_TYPE_P (type
)
1704 && vrp_val_is_min (min
)
1705 && vrp_val_is_max (max
))
1708 set_and_canonicalize_value_range (vr_p
, VR_ANTI_RANGE
,
1709 min
, max
, vr_p
->equiv
);
1711 else if (cond_code
== LE_EXPR
|| cond_code
== LT_EXPR
)
1713 min
= TYPE_MIN_VALUE (type
);
1715 if (limit_vr
== NULL
|| limit_vr
->type
== VR_ANTI_RANGE
)
1719 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1720 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1722 max
= limit_vr
->max
;
1725 /* If the maximum value forces us to be out of bounds, simply punt.
1726 It would be pointless to try and do anything more since this
1727 all should be optimized away above us. */
1728 if ((cond_code
== LT_EXPR
1729 && compare_values (max
, min
) == 0)
1730 || (CONSTANT_CLASS_P (max
) && TREE_OVERFLOW (max
)))
1731 set_value_range_to_varying (vr_p
);
1734 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1735 if (cond_code
== LT_EXPR
)
1737 if (TYPE_PRECISION (TREE_TYPE (max
)) == 1
1738 && !TYPE_UNSIGNED (TREE_TYPE (max
)))
1739 max
= fold_build2 (PLUS_EXPR
, TREE_TYPE (max
), max
,
1740 build_int_cst (TREE_TYPE (max
), -1));
1742 max
= fold_build2 (MINUS_EXPR
, TREE_TYPE (max
), max
,
1743 build_int_cst (TREE_TYPE (max
), 1));
1745 TREE_NO_WARNING (max
) = 1;
1748 set_value_range (vr_p
, VR_RANGE
, min
, max
, vr_p
->equiv
);
1751 else if (cond_code
== GE_EXPR
|| cond_code
== GT_EXPR
)
1753 max
= TYPE_MAX_VALUE (type
);
1755 if (limit_vr
== NULL
|| limit_vr
->type
== VR_ANTI_RANGE
)
1759 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1760 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1762 min
= limit_vr
->min
;
1765 /* If the minimum value forces us to be out of bounds, simply punt.
1766 It would be pointless to try and do anything more since this
1767 all should be optimized away above us. */
1768 if ((cond_code
== GT_EXPR
1769 && compare_values (min
, max
) == 0)
1770 || (CONSTANT_CLASS_P (min
) && TREE_OVERFLOW (min
)))
1771 set_value_range_to_varying (vr_p
);
1774 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1775 if (cond_code
== GT_EXPR
)
1777 if (TYPE_PRECISION (TREE_TYPE (min
)) == 1
1778 && !TYPE_UNSIGNED (TREE_TYPE (min
)))
1779 min
= fold_build2 (MINUS_EXPR
, TREE_TYPE (min
), min
,
1780 build_int_cst (TREE_TYPE (min
), -1));
1782 min
= fold_build2 (PLUS_EXPR
, TREE_TYPE (min
), min
,
1783 build_int_cst (TREE_TYPE (min
), 1));
1785 TREE_NO_WARNING (min
) = 1;
1788 set_value_range (vr_p
, VR_RANGE
, min
, max
, vr_p
->equiv
);
1794 /* Finally intersect the new range with what we already know about var. */
1795 vrp_intersect_ranges (vr_p
, get_value_range (var
));
1799 /* Extract range information from SSA name VAR and store it in VR. If
1800 VAR has an interesting range, use it. Otherwise, create the
1801 range [VAR, VAR] and return it. This is useful in situations where
1802 we may have conditionals testing values of VARYING names. For
1809 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1813 extract_range_from_ssa_name (value_range_t
*vr
, tree var
)
1815 value_range_t
*var_vr
= get_value_range (var
);
1817 if (var_vr
->type
!= VR_UNDEFINED
&& var_vr
->type
!= VR_VARYING
)
1818 copy_value_range (vr
, var_vr
);
1820 set_value_range (vr
, VR_RANGE
, var
, var
, NULL
);
1822 add_equivalence (&vr
->equiv
, var
);
1826 /* Wrapper around int_const_binop. If the operation overflows and we
1827 are not using wrapping arithmetic, then adjust the result to be
1828 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1829 NULL_TREE if we need to use an overflow infinity representation but
1830 the type does not support it. */
1833 vrp_int_const_binop (enum tree_code code
, tree val1
, tree val2
)
1837 res
= int_const_binop (code
, val1
, val2
);
1839 /* If we are using unsigned arithmetic, operate symbolically
1840 on -INF and +INF as int_const_binop only handles signed overflow. */
1841 if (TYPE_UNSIGNED (TREE_TYPE (val1
)))
1843 int checkz
= compare_values (res
, val1
);
1844 bool overflow
= false;
1846 /* Ensure that res = val1 [+*] val2 >= val1
1847 or that res = val1 - val2 <= val1. */
1848 if ((code
== PLUS_EXPR
1849 && !(checkz
== 1 || checkz
== 0))
1850 || (code
== MINUS_EXPR
1851 && !(checkz
== 0 || checkz
== -1)))
1855 /* Checking for multiplication overflow is done by dividing the
1856 output of the multiplication by the first input of the
1857 multiplication. If the result of that division operation is
1858 not equal to the second input of the multiplication, then the
1859 multiplication overflowed. */
1860 else if (code
== MULT_EXPR
&& !integer_zerop (val1
))
1862 tree tmp
= int_const_binop (TRUNC_DIV_EXPR
,
1865 int check
= compare_values (tmp
, val2
);
1873 res
= copy_node (res
);
1874 TREE_OVERFLOW (res
) = 1;
1878 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1
)))
1879 /* If the singed operation wraps then int_const_binop has done
1880 everything we want. */
1882 /* Signed division of -1/0 overflows and by the time it gets here
1883 returns NULL_TREE. */
1886 else if ((TREE_OVERFLOW (res
)
1887 && !TREE_OVERFLOW (val1
)
1888 && !TREE_OVERFLOW (val2
))
1889 || is_overflow_infinity (val1
)
1890 || is_overflow_infinity (val2
))
1892 /* If the operation overflowed but neither VAL1 nor VAL2 are
1893 overflown, return -INF or +INF depending on the operation
1894 and the combination of signs of the operands. */
1895 int sgn1
= tree_int_cst_sgn (val1
);
1896 int sgn2
= tree_int_cst_sgn (val2
);
1898 if (needs_overflow_infinity (TREE_TYPE (res
))
1899 && !supports_overflow_infinity (TREE_TYPE (res
)))
1902 /* We have to punt on adding infinities of different signs,
1903 since we can't tell what the sign of the result should be.
1904 Likewise for subtracting infinities of the same sign. */
1905 if (((code
== PLUS_EXPR
&& sgn1
!= sgn2
)
1906 || (code
== MINUS_EXPR
&& sgn1
== sgn2
))
1907 && is_overflow_infinity (val1
)
1908 && is_overflow_infinity (val2
))
1911 /* Don't try to handle division or shifting of infinities. */
1912 if ((code
== TRUNC_DIV_EXPR
1913 || code
== FLOOR_DIV_EXPR
1914 || code
== CEIL_DIV_EXPR
1915 || code
== EXACT_DIV_EXPR
1916 || code
== ROUND_DIV_EXPR
1917 || code
== RSHIFT_EXPR
)
1918 && (is_overflow_infinity (val1
)
1919 || is_overflow_infinity (val2
)))
1922 /* Notice that we only need to handle the restricted set of
1923 operations handled by extract_range_from_binary_expr.
1924 Among them, only multiplication, addition and subtraction
1925 can yield overflow without overflown operands because we
1926 are working with integral types only... except in the
1927 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1928 for division too. */
1930 /* For multiplication, the sign of the overflow is given
1931 by the comparison of the signs of the operands. */
1932 if ((code
== MULT_EXPR
&& sgn1
== sgn2
)
1933 /* For addition, the operands must be of the same sign
1934 to yield an overflow. Its sign is therefore that
1935 of one of the operands, for example the first. For
1936 infinite operands X + -INF is negative, not positive. */
1937 || (code
== PLUS_EXPR
1939 ? !is_negative_overflow_infinity (val2
)
1940 : is_positive_overflow_infinity (val2
)))
1941 /* For subtraction, non-infinite operands must be of
1942 different signs to yield an overflow. Its sign is
1943 therefore that of the first operand or the opposite of
1944 that of the second operand. A first operand of 0 counts
1945 as positive here, for the corner case 0 - (-INF), which
1946 overflows, but must yield +INF. For infinite operands 0
1947 - INF is negative, not positive. */
1948 || (code
== MINUS_EXPR
1950 ? !is_positive_overflow_infinity (val2
)
1951 : is_negative_overflow_infinity (val2
)))
1952 /* We only get in here with positive shift count, so the
1953 overflow direction is the same as the sign of val1.
1954 Actually rshift does not overflow at all, but we only
1955 handle the case of shifting overflowed -INF and +INF. */
1956 || (code
== RSHIFT_EXPR
1958 /* For division, the only case is -INF / -1 = +INF. */
1959 || code
== TRUNC_DIV_EXPR
1960 || code
== FLOOR_DIV_EXPR
1961 || code
== CEIL_DIV_EXPR
1962 || code
== EXACT_DIV_EXPR
1963 || code
== ROUND_DIV_EXPR
)
1964 return (needs_overflow_infinity (TREE_TYPE (res
))
1965 ? positive_overflow_infinity (TREE_TYPE (res
))
1966 : TYPE_MAX_VALUE (TREE_TYPE (res
)));
1968 return (needs_overflow_infinity (TREE_TYPE (res
))
1969 ? negative_overflow_infinity (TREE_TYPE (res
))
1970 : TYPE_MIN_VALUE (TREE_TYPE (res
)));
1977 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1978 bitmask if some bit is unset, it means for all numbers in the range
1979 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1980 bitmask if some bit is set, it means for all numbers in the range
1981 the bit is 1, otherwise it might be 0 or 1. */
1984 zero_nonzero_bits_from_vr (const tree expr_type
,
1986 wide_int
*may_be_nonzero
,
1987 wide_int
*must_be_nonzero
)
1989 *may_be_nonzero
= wi::minus_one (TYPE_PRECISION (expr_type
));
1990 *must_be_nonzero
= wi::zero (TYPE_PRECISION (expr_type
));
1991 if (!range_int_cst_p (vr
)
1992 || TREE_OVERFLOW (vr
->min
)
1993 || TREE_OVERFLOW (vr
->max
))
1996 if (range_int_cst_singleton_p (vr
))
1998 *may_be_nonzero
= vr
->min
;
1999 *must_be_nonzero
= *may_be_nonzero
;
2001 else if (tree_int_cst_sgn (vr
->min
) >= 0
2002 || tree_int_cst_sgn (vr
->max
) < 0)
2004 wide_int wmin
= vr
->min
;
2005 wide_int wmax
= vr
->max
;
2006 wide_int xor_mask
= wmin
^ wmax
;
2007 *may_be_nonzero
= wmin
| wmax
;
2008 *must_be_nonzero
= wmin
& wmax
;
2011 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
2012 (*may_be_nonzero
).get_precision ());
2013 *may_be_nonzero
= (*may_be_nonzero
) | mask
;
2014 *must_be_nonzero
= (*must_be_nonzero
).and_not (mask
);
2021 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2022 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2023 false otherwise. If *AR can be represented with a single range
2024 *VR1 will be VR_UNDEFINED. */
2027 ranges_from_anti_range (value_range_t
*ar
,
2028 value_range_t
*vr0
, value_range_t
*vr1
)
2030 tree type
= TREE_TYPE (ar
->min
);
2032 vr0
->type
= VR_UNDEFINED
;
2033 vr1
->type
= VR_UNDEFINED
;
2035 if (ar
->type
!= VR_ANTI_RANGE
2036 || TREE_CODE (ar
->min
) != INTEGER_CST
2037 || TREE_CODE (ar
->max
) != INTEGER_CST
2038 || !vrp_val_min (type
)
2039 || !vrp_val_max (type
))
2042 if (!vrp_val_is_min (ar
->min
))
2044 vr0
->type
= VR_RANGE
;
2045 vr0
->min
= vrp_val_min (type
);
2047 = wide_int_to_tree (type
,
2048 wide_int (ar
->min
) - 1);
2050 if (!vrp_val_is_max (ar
->max
))
2052 vr1
->type
= VR_RANGE
;
2054 = wide_int_to_tree (type
,
2055 wide_int (ar
->max
) + 1);
2056 vr1
->max
= vrp_val_max (type
);
2058 if (vr0
->type
== VR_UNDEFINED
)
2061 vr1
->type
= VR_UNDEFINED
;
2064 return vr0
->type
!= VR_UNDEFINED
;
2067 /* Helper to extract a value-range *VR for a multiplicative operation
2071 extract_range_from_multiplicative_op_1 (value_range_t
*vr
,
2072 enum tree_code code
,
2073 value_range_t
*vr0
, value_range_t
*vr1
)
2075 enum value_range_type type
;
2082 /* Multiplications, divisions and shifts are a bit tricky to handle,
2083 depending on the mix of signs we have in the two ranges, we
2084 need to operate on different values to get the minimum and
2085 maximum values for the new range. One approach is to figure
2086 out all the variations of range combinations and do the
2089 However, this involves several calls to compare_values and it
2090 is pretty convoluted. It's simpler to do the 4 operations
2091 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2092 MAX1) and then figure the smallest and largest values to form
2094 gcc_assert (code
== MULT_EXPR
2095 || code
== TRUNC_DIV_EXPR
2096 || code
== FLOOR_DIV_EXPR
2097 || code
== CEIL_DIV_EXPR
2098 || code
== EXACT_DIV_EXPR
2099 || code
== ROUND_DIV_EXPR
2100 || code
== RSHIFT_EXPR
2101 || code
== LSHIFT_EXPR
);
2102 gcc_assert ((vr0
->type
== VR_RANGE
2103 || (code
== MULT_EXPR
&& vr0
->type
== VR_ANTI_RANGE
))
2104 && vr0
->type
== vr1
->type
);
2108 /* Compute the 4 cross operations. */
2110 val
[0] = vrp_int_const_binop (code
, vr0
->min
, vr1
->min
);
2111 if (val
[0] == NULL_TREE
)
2114 if (vr1
->max
== vr1
->min
)
2118 val
[1] = vrp_int_const_binop (code
, vr0
->min
, vr1
->max
);
2119 if (val
[1] == NULL_TREE
)
2123 if (vr0
->max
== vr0
->min
)
2127 val
[2] = vrp_int_const_binop (code
, vr0
->max
, vr1
->min
);
2128 if (val
[2] == NULL_TREE
)
2132 if (vr0
->min
== vr0
->max
|| vr1
->min
== vr1
->max
)
2136 val
[3] = vrp_int_const_binop (code
, vr0
->max
, vr1
->max
);
2137 if (val
[3] == NULL_TREE
)
2143 set_value_range_to_varying (vr
);
2147 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2151 for (i
= 1; i
< 4; i
++)
2153 if (!is_gimple_min_invariant (min
)
2154 || (TREE_OVERFLOW (min
) && !is_overflow_infinity (min
))
2155 || !is_gimple_min_invariant (max
)
2156 || (TREE_OVERFLOW (max
) && !is_overflow_infinity (max
)))
2161 if (!is_gimple_min_invariant (val
[i
])
2162 || (TREE_OVERFLOW (val
[i
])
2163 && !is_overflow_infinity (val
[i
])))
2165 /* If we found an overflowed value, set MIN and MAX
2166 to it so that we set the resulting range to
2172 if (compare_values (val
[i
], min
) == -1)
2175 if (compare_values (val
[i
], max
) == 1)
2180 /* If either MIN or MAX overflowed, then set the resulting range to
2181 VARYING. But we do accept an overflow infinity
2183 if (min
== NULL_TREE
2184 || !is_gimple_min_invariant (min
)
2185 || (TREE_OVERFLOW (min
) && !is_overflow_infinity (min
))
2187 || !is_gimple_min_invariant (max
)
2188 || (TREE_OVERFLOW (max
) && !is_overflow_infinity (max
)))
2190 set_value_range_to_varying (vr
);
2196 2) [-INF, +-INF(OVF)]
2197 3) [+-INF(OVF), +INF]
2198 4) [+-INF(OVF), +-INF(OVF)]
2199 We learn nothing when we have INF and INF(OVF) on both sides.
2200 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2202 if ((vrp_val_is_min (min
) || is_overflow_infinity (min
))
2203 && (vrp_val_is_max (max
) || is_overflow_infinity (max
)))
2205 set_value_range_to_varying (vr
);
2209 cmp
= compare_values (min
, max
);
2210 if (cmp
== -2 || cmp
== 1)
2212 /* If the new range has its limits swapped around (MIN > MAX),
2213 then the operation caused one of them to wrap around, mark
2214 the new range VARYING. */
2215 set_value_range_to_varying (vr
);
2218 set_value_range (vr
, type
, min
, max
, NULL
);
2221 /* Extract range information from a binary operation CODE based on
2222 the ranges of each of its operands, *VR0 and *VR1 with resulting
2223 type EXPR_TYPE. The resulting range is stored in *VR. */
2226 extract_range_from_binary_expr_1 (value_range_t
*vr
,
2227 enum tree_code code
, tree expr_type
,
2228 value_range_t
*vr0_
, value_range_t
*vr1_
)
2230 value_range_t vr0
= *vr0_
, vr1
= *vr1_
;
2231 value_range_t vrtem0
= VR_INITIALIZER
, vrtem1
= VR_INITIALIZER
;
2232 enum value_range_type type
;
2233 tree min
= NULL_TREE
, max
= NULL_TREE
;
2236 if (!INTEGRAL_TYPE_P (expr_type
)
2237 && !POINTER_TYPE_P (expr_type
))
2239 set_value_range_to_varying (vr
);
2243 /* Not all binary expressions can be applied to ranges in a
2244 meaningful way. Handle only arithmetic operations. */
2245 if (code
!= PLUS_EXPR
2246 && code
!= MINUS_EXPR
2247 && code
!= POINTER_PLUS_EXPR
2248 && code
!= MULT_EXPR
2249 && code
!= TRUNC_DIV_EXPR
2250 && code
!= FLOOR_DIV_EXPR
2251 && code
!= CEIL_DIV_EXPR
2252 && code
!= EXACT_DIV_EXPR
2253 && code
!= ROUND_DIV_EXPR
2254 && code
!= TRUNC_MOD_EXPR
2255 && code
!= RSHIFT_EXPR
2256 && code
!= LSHIFT_EXPR
2259 && code
!= BIT_AND_EXPR
2260 && code
!= BIT_IOR_EXPR
2261 && code
!= BIT_XOR_EXPR
)
2263 set_value_range_to_varying (vr
);
2267 /* If both ranges are UNDEFINED, so is the result. */
2268 if (vr0
.type
== VR_UNDEFINED
&& vr1
.type
== VR_UNDEFINED
)
2270 set_value_range_to_undefined (vr
);
2273 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2274 code. At some point we may want to special-case operations that
2275 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2277 else if (vr0
.type
== VR_UNDEFINED
)
2278 set_value_range_to_varying (&vr0
);
2279 else if (vr1
.type
== VR_UNDEFINED
)
2280 set_value_range_to_varying (&vr1
);
2282 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2283 and express ~[] op X as ([]' op X) U ([]'' op X). */
2284 if (vr0
.type
== VR_ANTI_RANGE
2285 && ranges_from_anti_range (&vr0
, &vrtem0
, &vrtem1
))
2287 extract_range_from_binary_expr_1 (vr
, code
, expr_type
, &vrtem0
, vr1_
);
2288 if (vrtem1
.type
!= VR_UNDEFINED
)
2290 value_range_t vrres
= VR_INITIALIZER
;
2291 extract_range_from_binary_expr_1 (&vrres
, code
, expr_type
,
2293 vrp_meet (vr
, &vrres
);
2297 /* Likewise for X op ~[]. */
2298 if (vr1
.type
== VR_ANTI_RANGE
2299 && ranges_from_anti_range (&vr1
, &vrtem0
, &vrtem1
))
2301 extract_range_from_binary_expr_1 (vr
, code
, expr_type
, vr0_
, &vrtem0
);
2302 if (vrtem1
.type
!= VR_UNDEFINED
)
2304 value_range_t vrres
= VR_INITIALIZER
;
2305 extract_range_from_binary_expr_1 (&vrres
, code
, expr_type
,
2307 vrp_meet (vr
, &vrres
);
2312 /* The type of the resulting value range defaults to VR0.TYPE. */
2315 /* Refuse to operate on VARYING ranges, ranges of different kinds
2316 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2317 because we may be able to derive a useful range even if one of
2318 the operands is VR_VARYING or symbolic range. Similarly for
2319 divisions. TODO, we may be able to derive anti-ranges in
2321 if (code
!= BIT_AND_EXPR
2322 && code
!= BIT_IOR_EXPR
2323 && code
!= TRUNC_DIV_EXPR
2324 && code
!= FLOOR_DIV_EXPR
2325 && code
!= CEIL_DIV_EXPR
2326 && code
!= EXACT_DIV_EXPR
2327 && code
!= ROUND_DIV_EXPR
2328 && code
!= TRUNC_MOD_EXPR
2331 && (vr0
.type
== VR_VARYING
2332 || vr1
.type
== VR_VARYING
2333 || vr0
.type
!= vr1
.type
2334 || symbolic_range_p (&vr0
)
2335 || symbolic_range_p (&vr1
)))
2337 set_value_range_to_varying (vr
);
2341 /* Now evaluate the expression to determine the new range. */
2342 if (POINTER_TYPE_P (expr_type
))
2344 if (code
== MIN_EXPR
|| code
== MAX_EXPR
)
2346 /* For MIN/MAX expressions with pointers, we only care about
2347 nullness, if both are non null, then the result is nonnull.
2348 If both are null, then the result is null. Otherwise they
2350 if (range_is_nonnull (&vr0
) && range_is_nonnull (&vr1
))
2351 set_value_range_to_nonnull (vr
, expr_type
);
2352 else if (range_is_null (&vr0
) && range_is_null (&vr1
))
2353 set_value_range_to_null (vr
, expr_type
);
2355 set_value_range_to_varying (vr
);
2357 else if (code
== POINTER_PLUS_EXPR
)
2359 /* For pointer types, we are really only interested in asserting
2360 whether the expression evaluates to non-NULL. */
2361 if (range_is_nonnull (&vr0
) || range_is_nonnull (&vr1
))
2362 set_value_range_to_nonnull (vr
, expr_type
);
2363 else if (range_is_null (&vr0
) && range_is_null (&vr1
))
2364 set_value_range_to_null (vr
, expr_type
);
2366 set_value_range_to_varying (vr
);
2368 else if (code
== BIT_AND_EXPR
)
2370 /* For pointer types, we are really only interested in asserting
2371 whether the expression evaluates to non-NULL. */
2372 if (range_is_nonnull (&vr0
) && range_is_nonnull (&vr1
))
2373 set_value_range_to_nonnull (vr
, expr_type
);
2374 else if (range_is_null (&vr0
) || range_is_null (&vr1
))
2375 set_value_range_to_null (vr
, expr_type
);
2377 set_value_range_to_varying (vr
);
2380 set_value_range_to_varying (vr
);
2385 /* For integer ranges, apply the operation to each end of the
2386 range and see what we end up with. */
2387 if (code
== PLUS_EXPR
|| code
== MINUS_EXPR
)
2389 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2390 ranges compute the precise range for such case if possible. */
2391 if (range_int_cst_p (&vr0
)
2392 && range_int_cst_p (&vr1
))
2394 signop sgn
= TYPE_SIGN (expr_type
);
2395 unsigned int prec
= TYPE_PRECISION (expr_type
);
2396 wide_int min0
= wide_int (vr0
.min
);
2397 wide_int max0
= wide_int (vr0
.max
);
2398 wide_int min1
= wide_int (vr1
.min
);
2399 wide_int max1
= wide_int (vr1
.max
);
2400 wide_int type_min
= wi::min_value (TYPE_PRECISION (expr_type
), sgn
);
2401 wide_int type_max
= wi::max_value (TYPE_PRECISION (expr_type
), sgn
);
2402 wide_int wmin
, wmax
;
2406 if (code
== PLUS_EXPR
)
2411 /* Check for overflow. */
2412 if (wi::cmp (min1
, 0, sgn
) != wi::cmp (wmin
, min0
, sgn
))
2413 min_ovf
= wi::cmp (min0
, wmin
, sgn
);
2414 if (wi::cmp (max1
, 0, sgn
) != wi::cmp (wmax
, max0
, sgn
))
2415 max_ovf
= wi::cmp (max0
, wmax
, sgn
);
2417 else /* if (code == MINUS_EXPR) */
2422 if (wi::cmp (0, max1
, sgn
) != wi::cmp (wmin
, min0
, sgn
))
2423 min_ovf
= wi::cmp (min0
, max1
, sgn
);
2424 if (wi::cmp (0, min1
, sgn
) != wi::cmp (wmax
, max0
, sgn
))
2425 max_ovf
= wi::cmp (max0
, min1
, sgn
);
2428 /* For non-wrapping arithmetic look at possibly smaller
2429 value-ranges of the type. */
2430 if (!TYPE_OVERFLOW_WRAPS (expr_type
))
2432 if (vrp_val_min (expr_type
))
2433 type_min
= wide_int (vrp_val_min (expr_type
));
2434 if (vrp_val_max (expr_type
))
2435 type_max
= wide_int (vrp_val_max (expr_type
));
2438 /* Check for type overflow. */
2441 if (wi::cmp (wmin
, type_min
, sgn
) == -1)
2443 else if (wi::cmp (wmin
, type_max
, sgn
) == 1)
2448 if (wi::cmp (wmax
, type_min
, sgn
) == -1)
2450 else if (wi::cmp (wmax
, type_max
, sgn
) == 1)
2454 if (TYPE_OVERFLOW_WRAPS (expr_type
))
2456 /* If overflow wraps, truncate the values and adjust the
2457 range kind and bounds appropriately. */
2458 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
2459 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
2460 if (min_ovf
== max_ovf
)
2462 /* No overflow or both overflow or underflow. The
2463 range kind stays VR_RANGE. */
2464 min
= wide_int_to_tree (expr_type
, tmin
);
2465 max
= wide_int_to_tree (expr_type
, tmax
);
2467 else if (min_ovf
== -1
2470 /* Underflow and overflow, drop to VR_VARYING. */
2471 set_value_range_to_varying (vr
);
2476 /* Min underflow or max overflow. The range kind
2477 changes to VR_ANTI_RANGE. */
2478 bool covers
= false;
2479 wide_int tem
= tmin
;
2480 gcc_assert ((min_ovf
== -1 && max_ovf
== 0)
2481 || (max_ovf
== 1 && min_ovf
== 0));
2482 type
= VR_ANTI_RANGE
;
2484 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
2487 if (wi::cmp (tmax
, tem
, sgn
) > 0)
2489 /* If the anti-range would cover nothing, drop to varying.
2490 Likewise if the anti-range bounds are outside of the
2492 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
2494 set_value_range_to_varying (vr
);
2497 min
= wide_int_to_tree (expr_type
, tmin
);
2498 max
= wide_int_to_tree (expr_type
, tmax
);
2503 /* If overflow does not wrap, saturate to the types min/max
2507 if (needs_overflow_infinity (expr_type
)
2508 && supports_overflow_infinity (expr_type
))
2509 min
= negative_overflow_infinity (expr_type
);
2511 min
= wide_int_to_tree (expr_type
, type_min
);
2513 else if (min_ovf
== 1)
2515 if (needs_overflow_infinity (expr_type
)
2516 && supports_overflow_infinity (expr_type
))
2517 min
= positive_overflow_infinity (expr_type
);
2519 min
= wide_int_to_tree (expr_type
, type_max
);
2522 min
= wide_int_to_tree (expr_type
, wmin
);
2526 if (needs_overflow_infinity (expr_type
)
2527 && supports_overflow_infinity (expr_type
))
2528 max
= negative_overflow_infinity (expr_type
);
2530 max
= wide_int_to_tree (expr_type
, type_min
);
2532 else if (max_ovf
== 1)
2534 if (needs_overflow_infinity (expr_type
)
2535 && supports_overflow_infinity (expr_type
))
2536 max
= positive_overflow_infinity (expr_type
);
2538 max
= wide_int_to_tree (expr_type
, type_max
);
2541 max
= wide_int_to_tree (expr_type
, wmax
);
2543 if (needs_overflow_infinity (expr_type
)
2544 && supports_overflow_infinity (expr_type
))
2546 if (is_negative_overflow_infinity (vr0
.min
)
2547 || (code
== PLUS_EXPR
2548 ? is_negative_overflow_infinity (vr1
.min
)
2549 : is_positive_overflow_infinity (vr1
.max
)))
2550 min
= negative_overflow_infinity (expr_type
);
2551 if (is_positive_overflow_infinity (vr0
.max
)
2552 || (code
== PLUS_EXPR
2553 ? is_positive_overflow_infinity (vr1
.max
)
2554 : is_negative_overflow_infinity (vr1
.min
)))
2555 max
= positive_overflow_infinity (expr_type
);
2560 /* For other cases, for example if we have a PLUS_EXPR with two
2561 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2562 to compute a precise range for such a case.
2563 ??? General even mixed range kind operations can be expressed
2564 by for example transforming ~[3, 5] + [1, 2] to range-only
2565 operations and a union primitive:
2566 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2567 [-INF+1, 4] U [6, +INF(OVF)]
2568 though usually the union is not exactly representable with
2569 a single range or anti-range as the above is
2570 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2571 but one could use a scheme similar to equivalences for this. */
2572 set_value_range_to_varying (vr
);
2576 else if (code
== MIN_EXPR
2577 || code
== MAX_EXPR
)
2579 if (vr0
.type
== VR_RANGE
2580 && !symbolic_range_p (&vr0
))
2583 if (vr1
.type
== VR_RANGE
2584 && !symbolic_range_p (&vr1
))
2586 /* For operations that make the resulting range directly
2587 proportional to the original ranges, apply the operation to
2588 the same end of each range. */
2589 min
= vrp_int_const_binop (code
, vr0
.min
, vr1
.min
);
2590 max
= vrp_int_const_binop (code
, vr0
.max
, vr1
.max
);
2592 else if (code
== MIN_EXPR
)
2594 min
= vrp_val_min (expr_type
);
2597 else if (code
== MAX_EXPR
)
2600 max
= vrp_val_max (expr_type
);
2603 else if (vr1
.type
== VR_RANGE
2604 && !symbolic_range_p (&vr1
))
2607 if (code
== MIN_EXPR
)
2609 min
= vrp_val_min (expr_type
);
2612 else if (code
== MAX_EXPR
)
2615 max
= vrp_val_max (expr_type
);
2620 set_value_range_to_varying (vr
);
2624 else if (code
== MULT_EXPR
)
2626 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2627 drop to varying. This test requires 2*prec bits if both
2628 operands are signed and 2*prec + 2 bits if either is not. */
2630 signop sign
= TYPE_SIGN (expr_type
);
2631 unsigned int prec
= TYPE_PRECISION (expr_type
);
2632 unsigned int prec2
= (prec
* 2) + (sign
== UNSIGNED
? 2 : 0);
2634 if (range_int_cst_p (&vr0
)
2635 && range_int_cst_p (&vr1
)
2636 && TYPE_OVERFLOW_WRAPS (expr_type
))
2638 wide_int min0
, max0
, min1
, max1
;
2639 wide_int prod0
, prod1
, prod2
, prod3
;
2640 wide_int sizem1
= wi::mask (prec
, false, prec2
);
2641 wide_int size
= sizem1
+ 1;
2643 /* Extend the values using the sign of the result to PREC2.
2644 From here on out, everthing is just signed math no matter
2645 what the input types were. */
2646 min0
= wide_int::from (vr0
.min
, prec2
, sign
);
2647 max0
= wide_int::from (vr0
.max
, prec2
, sign
);
2648 min1
= wide_int::from (vr1
.min
, prec2
, sign
);
2649 max1
= wide_int::from (vr1
.max
, prec2
, sign
);
2651 /* Canonicalize the intervals. */
2652 if (sign
== UNSIGNED
)
2654 if (wi::ltu_p (size
, min0
+ max0
))
2660 if (wi::ltu_p (size
, min1
+ max1
))
2667 prod0
= min0
* min1
;
2668 prod1
= min0
* max1
;
2669 prod2
= max0
* min1
;
2670 prod3
= max0
* max1
;
2672 /* Sort the 4 products so that min is in prod0 and max is in
2674 /* min0min1 > max0max1 */
2675 if (wi::gts_p (prod0
, prod3
))
2677 wide_int tmp
= prod3
;
2682 /* min0max1 > max0min1 */
2683 if (wi::gts_p (prod1
, prod2
))
2685 wide_int tmp
= prod2
;
2690 if (wi::gts_p (prod0
, prod1
))
2692 wide_int tmp
= prod1
;
2697 if (wi::gts_p (prod2
, prod3
))
2699 wide_int tmp
= prod3
;
2704 /* diff = max - min. */
2705 prod2
= prod3
- prod0
;
2706 if (wi::geu_p (prod2
, sizem1
))
2708 /* the range covers all values. */
2709 set_value_range_to_varying (vr
);
2713 /* The following should handle the wrapping and selecting
2714 VR_ANTI_RANGE for us. */
2715 min
= wide_int_to_tree (expr_type
, prod0
);
2716 max
= wide_int_to_tree (expr_type
, prod3
);
2717 set_and_canonicalize_value_range (vr
, VR_RANGE
, min
, max
, NULL
);
2721 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2722 drop to VR_VARYING. It would take more effort to compute a
2723 precise range for such a case. For example, if we have
2724 op0 == 65536 and op1 == 65536 with their ranges both being
2725 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2726 we cannot claim that the product is in ~[0,0]. Note that we
2727 are guaranteed to have vr0.type == vr1.type at this
2729 if (vr0
.type
== VR_ANTI_RANGE
2730 && !TYPE_OVERFLOW_UNDEFINED (expr_type
))
2732 set_value_range_to_varying (vr
);
2736 extract_range_from_multiplicative_op_1 (vr
, code
, &vr0
, &vr1
);
2739 else if (code
== RSHIFT_EXPR
2740 || code
== LSHIFT_EXPR
)
2742 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2743 then drop to VR_VARYING. Outside of this range we get undefined
2744 behavior from the shift operation. We cannot even trust
2745 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2746 shifts, and the operation at the tree level may be widened. */
2747 if (range_int_cst_p (&vr1
)
2748 && compare_tree_int (vr1
.min
, 0) >= 0
2749 && compare_tree_int (vr1
.max
, TYPE_PRECISION (expr_type
)) == -1)
2751 if (code
== RSHIFT_EXPR
)
2753 extract_range_from_multiplicative_op_1 (vr
, code
, &vr0
, &vr1
);
2756 /* We can map lshifts by constants to MULT_EXPR handling. */
2757 else if (code
== LSHIFT_EXPR
2758 && range_int_cst_singleton_p (&vr1
))
2760 bool saved_flag_wrapv
;
2761 value_range_t vr1p
= VR_INITIALIZER
;
2762 vr1p
.type
= VR_RANGE
;
2763 vr1p
.min
= (wide_int_to_tree
2765 wi::set_bit_in_zero (tree_to_shwi (vr1
.min
),
2766 TYPE_PRECISION (expr_type
))));
2767 vr1p
.max
= vr1p
.min
;
2768 /* We have to use a wrapping multiply though as signed overflow
2769 on lshifts is implementation defined in C89. */
2770 saved_flag_wrapv
= flag_wrapv
;
2772 extract_range_from_binary_expr_1 (vr
, MULT_EXPR
, expr_type
,
2774 flag_wrapv
= saved_flag_wrapv
;
2777 else if (code
== LSHIFT_EXPR
2778 && range_int_cst_p (&vr0
))
2780 int prec
= TYPE_PRECISION (expr_type
);
2781 int overflow_pos
= prec
;
2783 wide_int bound
, complement
, low_bound
, high_bound
;
2784 bool uns
= TYPE_UNSIGNED (expr_type
);
2785 bool in_bounds
= false;
2790 bound_shift
= overflow_pos
- tree_to_shwi (vr1
.max
);
2791 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2792 overflow. However, for that to happen, vr1.max needs to be
2793 zero, which means vr1 is a singleton range of zero, which
2794 means it should be handled by the previous LSHIFT_EXPR
2796 bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2797 complement
= ~(bound
- 1);
2802 high_bound
= complement
;
2803 if (wi::ltu_p (vr0
.max
, low_bound
))
2805 /* [5, 6] << [1, 2] == [10, 24]. */
2806 /* We're shifting out only zeroes, the value increases
2810 else if (wi::ltu_p (high_bound
, vr0
.min
))
2812 /* [0xffffff00, 0xffffffff] << [1, 2]
2813 == [0xfffffc00, 0xfffffffe]. */
2814 /* We're shifting out only ones, the value decreases
2821 /* [-1, 1] << [1, 2] == [-4, 4]. */
2822 low_bound
= complement
;
2824 if (wi::lts_p (vr0
.max
, high_bound
)
2825 && wi::lts_p (low_bound
, vr0
.min
))
2827 /* For non-negative numbers, we're shifting out only
2828 zeroes, the value increases monotonically.
2829 For negative numbers, we're shifting out only ones, the
2830 value decreases monotomically. */
2837 extract_range_from_multiplicative_op_1 (vr
, code
, &vr0
, &vr1
);
2842 set_value_range_to_varying (vr
);
2845 else if (code
== TRUNC_DIV_EXPR
2846 || code
== FLOOR_DIV_EXPR
2847 || code
== CEIL_DIV_EXPR
2848 || code
== EXACT_DIV_EXPR
2849 || code
== ROUND_DIV_EXPR
)
2851 if (vr0
.type
!= VR_RANGE
|| symbolic_range_p (&vr0
))
2853 /* For division, if op1 has VR_RANGE but op0 does not, something
2854 can be deduced just from that range. Say [min, max] / [4, max]
2855 gives [min / 4, max / 4] range. */
2856 if (vr1
.type
== VR_RANGE
2857 && !symbolic_range_p (&vr1
)
2858 && range_includes_zero_p (vr1
.min
, vr1
.max
) == 0)
2860 vr0
.type
= type
= VR_RANGE
;
2861 vr0
.min
= vrp_val_min (expr_type
);
2862 vr0
.max
= vrp_val_max (expr_type
);
2866 set_value_range_to_varying (vr
);
2871 /* For divisions, if flag_non_call_exceptions is true, we must
2872 not eliminate a division by zero. */
2873 if (cfun
->can_throw_non_call_exceptions
2874 && (vr1
.type
!= VR_RANGE
2875 || range_includes_zero_p (vr1
.min
, vr1
.max
) != 0))
2877 set_value_range_to_varying (vr
);
2881 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2882 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2884 if (vr0
.type
== VR_RANGE
2885 && (vr1
.type
!= VR_RANGE
2886 || range_includes_zero_p (vr1
.min
, vr1
.max
) != 0))
2888 tree zero
= build_int_cst (TREE_TYPE (vr0
.min
), 0);
2893 if (TYPE_UNSIGNED (expr_type
)
2894 || value_range_nonnegative_p (&vr1
))
2896 /* For unsigned division or when divisor is known
2897 to be non-negative, the range has to cover
2898 all numbers from 0 to max for positive max
2899 and all numbers from min to 0 for negative min. */
2900 cmp
= compare_values (vr0
.max
, zero
);
2903 else if (cmp
== 0 || cmp
== 1)
2907 cmp
= compare_values (vr0
.min
, zero
);
2910 else if (cmp
== 0 || cmp
== -1)
2917 /* Otherwise the range is -max .. max or min .. -min
2918 depending on which bound is bigger in absolute value,
2919 as the division can change the sign. */
2920 abs_extent_range (vr
, vr0
.min
, vr0
.max
);
2923 if (type
== VR_VARYING
)
2925 set_value_range_to_varying (vr
);
2931 extract_range_from_multiplicative_op_1 (vr
, code
, &vr0
, &vr1
);
2935 else if (code
== TRUNC_MOD_EXPR
)
2937 if (vr1
.type
!= VR_RANGE
2938 || range_includes_zero_p (vr1
.min
, vr1
.max
) != 0
2939 || vrp_val_is_min (vr1
.min
))
2941 set_value_range_to_varying (vr
);
2945 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2946 max
= fold_unary_to_constant (ABS_EXPR
, expr_type
, vr1
.min
);
2947 if (tree_int_cst_lt (max
, vr1
.max
))
2949 max
= int_const_binop (MINUS_EXPR
, max
, build_int_cst (TREE_TYPE (max
), 1));
2950 /* If the dividend is non-negative the modulus will be
2951 non-negative as well. */
2952 if (TYPE_UNSIGNED (expr_type
)
2953 || value_range_nonnegative_p (&vr0
))
2954 min
= build_int_cst (TREE_TYPE (max
), 0);
2956 min
= fold_unary_to_constant (NEGATE_EXPR
, expr_type
, max
);
2958 else if (code
== BIT_AND_EXPR
|| code
== BIT_IOR_EXPR
|| code
== BIT_XOR_EXPR
)
2960 bool int_cst_range0
, int_cst_range1
;
2961 wide_int may_be_nonzero0
, may_be_nonzero1
;
2962 wide_int must_be_nonzero0
, must_be_nonzero1
;
2964 int_cst_range0
= zero_nonzero_bits_from_vr (expr_type
, &vr0
, &may_be_nonzero0
,
2966 int_cst_range1
= zero_nonzero_bits_from_vr (expr_type
, &vr1
, &may_be_nonzero1
,
2970 if (code
== BIT_AND_EXPR
)
2973 min
= wide_int_to_tree (expr_type
,
2974 must_be_nonzero0
& must_be_nonzero1
);
2975 wmax
= may_be_nonzero0
& may_be_nonzero1
;
2976 /* If both input ranges contain only negative values we can
2977 truncate the result range maximum to the minimum of the
2978 input range maxima. */
2979 if (int_cst_range0
&& int_cst_range1
2980 && tree_int_cst_sgn (vr0
.max
) < 0
2981 && tree_int_cst_sgn (vr1
.max
) < 0)
2983 wmax
= wi::min (wmax
, vr0
.max
, TYPE_SIGN (expr_type
));
2984 wmax
= wi::min (wmax
, vr1
.max
, TYPE_SIGN (expr_type
));
2986 /* If either input range contains only non-negative values
2987 we can truncate the result range maximum to the respective
2988 maximum of the input range. */
2989 if (int_cst_range0
&& tree_int_cst_sgn (vr0
.min
) >= 0)
2990 wmax
= wi::min (wmax
, vr0
.max
, TYPE_SIGN (expr_type
));
2991 if (int_cst_range1
&& tree_int_cst_sgn (vr1
.min
) >= 0)
2992 wmax
= wi::min (wmax
, vr1
.max
, TYPE_SIGN (expr_type
));
2993 max
= wide_int_to_tree (expr_type
, wmax
);
2995 else if (code
== BIT_IOR_EXPR
)
2998 max
= wide_int_to_tree (expr_type
,
2999 may_be_nonzero0
| may_be_nonzero1
);
3000 wmin
= must_be_nonzero0
| must_be_nonzero1
;
3001 /* If the input ranges contain only positive values we can
3002 truncate the minimum of the result range to the maximum
3003 of the input range minima. */
3004 if (int_cst_range0
&& int_cst_range1
3005 && tree_int_cst_sgn (vr0
.min
) >= 0
3006 && tree_int_cst_sgn (vr1
.min
) >= 0)
3008 wmin
= wi::max (wmin
, vr0
.min
, TYPE_SIGN (expr_type
));
3009 wmin
= wi::max (wmin
, vr1
.min
, TYPE_SIGN (expr_type
));
3011 /* If either input range contains only negative values
3012 we can truncate the minimum of the result range to the
3013 respective minimum range. */
3014 if (int_cst_range0
&& tree_int_cst_sgn (vr0
.max
) < 0)
3015 wmin
= wi::max (wmin
, vr0
.min
, TYPE_SIGN (expr_type
));
3016 if (int_cst_range1
&& tree_int_cst_sgn (vr1
.max
) < 0)
3017 wmin
= wi::max (wmin
, vr1
.min
, TYPE_SIGN (expr_type
));
3018 min
= wide_int_to_tree (expr_type
, wmin
);
3020 else if (code
== BIT_XOR_EXPR
)
3022 wide_int result_zero_bits
, result_one_bits
;
3023 result_zero_bits
= (must_be_nonzero0
& must_be_nonzero1
)
3024 | ~(may_be_nonzero0
| may_be_nonzero1
);
3025 result_one_bits
= must_be_nonzero0
.and_not (may_be_nonzero1
)
3026 | must_be_nonzero1
.and_not (may_be_nonzero0
);
3027 max
= wide_int_to_tree (expr_type
, ~result_zero_bits
);
3028 min
= wide_int_to_tree (expr_type
, result_one_bits
);
3029 /* If the range has all positive or all negative values the
3030 result is better than VARYING. */
3031 if (tree_int_cst_sgn (min
) < 0
3032 || tree_int_cst_sgn (max
) >= 0)
3035 max
= min
= NULL_TREE
;
3041 /* If either MIN or MAX overflowed, then set the resulting range to
3042 VARYING. But we do accept an overflow infinity
3044 if (min
== NULL_TREE
3045 || !is_gimple_min_invariant (min
)
3046 || (TREE_OVERFLOW (min
) && !is_overflow_infinity (min
))
3048 || !is_gimple_min_invariant (max
)
3049 || (TREE_OVERFLOW (max
) && !is_overflow_infinity (max
)))
3051 set_value_range_to_varying (vr
);
3057 2) [-INF, +-INF(OVF)]
3058 3) [+-INF(OVF), +INF]
3059 4) [+-INF(OVF), +-INF(OVF)]
3060 We learn nothing when we have INF and INF(OVF) on both sides.
3061 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3063 if ((vrp_val_is_min (min
) || is_overflow_infinity (min
))
3064 && (vrp_val_is_max (max
) || is_overflow_infinity (max
)))
3066 set_value_range_to_varying (vr
);
3070 cmp
= compare_values (min
, max
);
3071 if (cmp
== -2 || cmp
== 1)
3073 /* If the new range has its limits swapped around (MIN > MAX),
3074 then the operation caused one of them to wrap around, mark
3075 the new range VARYING. */
3076 set_value_range_to_varying (vr
);
3079 set_value_range (vr
, type
, min
, max
, NULL
);
3082 /* Extract range information from a binary expression OP0 CODE OP1 based on
3083 the ranges of each of its operands with resulting type EXPR_TYPE.
3084 The resulting range is stored in *VR. */
3087 extract_range_from_binary_expr (value_range_t
*vr
,
3088 enum tree_code code
,
3089 tree expr_type
, tree op0
, tree op1
)
3091 value_range_t vr0
= VR_INITIALIZER
;
3092 value_range_t vr1
= VR_INITIALIZER
;
3094 /* Get value ranges for each operand. For constant operands, create
3095 a new value range with the operand to simplify processing. */
3096 if (TREE_CODE (op0
) == SSA_NAME
)
3097 vr0
= *(get_value_range (op0
));
3098 else if (is_gimple_min_invariant (op0
))
3099 set_value_range_to_value (&vr0
, op0
, NULL
);
3101 set_value_range_to_varying (&vr0
);
3103 if (TREE_CODE (op1
) == SSA_NAME
)
3104 vr1
= *(get_value_range (op1
));
3105 else if (is_gimple_min_invariant (op1
))
3106 set_value_range_to_value (&vr1
, op1
, NULL
);
3108 set_value_range_to_varying (&vr1
);
3110 extract_range_from_binary_expr_1 (vr
, code
, expr_type
, &vr0
, &vr1
);
3113 /* Extract range information from a unary operation CODE based on
3114 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3115 The The resulting range is stored in *VR. */
3118 extract_range_from_unary_expr_1 (value_range_t
*vr
,
3119 enum tree_code code
, tree type
,
3120 value_range_t
*vr0_
, tree op0_type
)
3122 value_range_t vr0
= *vr0_
, vrtem0
= VR_INITIALIZER
, vrtem1
= VR_INITIALIZER
;
3124 /* VRP only operates on integral and pointer types. */
3125 if (!(INTEGRAL_TYPE_P (op0_type
)
3126 || POINTER_TYPE_P (op0_type
))
3127 || !(INTEGRAL_TYPE_P (type
)
3128 || POINTER_TYPE_P (type
)))
3130 set_value_range_to_varying (vr
);
3134 /* If VR0 is UNDEFINED, so is the result. */
3135 if (vr0
.type
== VR_UNDEFINED
)
3137 set_value_range_to_undefined (vr
);
3141 /* Handle operations that we express in terms of others. */
3142 if (code
== PAREN_EXPR
)
3144 /* PAREN_EXPR is a simple copy. */
3145 copy_value_range (vr
, &vr0
);
3148 else if (code
== NEGATE_EXPR
)
3150 /* -X is simply 0 - X, so re-use existing code that also handles
3151 anti-ranges fine. */
3152 value_range_t zero
= VR_INITIALIZER
;
3153 set_value_range_to_value (&zero
, build_int_cst (type
, 0), NULL
);
3154 extract_range_from_binary_expr_1 (vr
, MINUS_EXPR
, type
, &zero
, &vr0
);
3157 else if (code
== BIT_NOT_EXPR
)
3159 /* ~X is simply -1 - X, so re-use existing code that also handles
3160 anti-ranges fine. */
3161 value_range_t minusone
= VR_INITIALIZER
;
3162 set_value_range_to_value (&minusone
, build_int_cst (type
, -1), NULL
);
3163 extract_range_from_binary_expr_1 (vr
, MINUS_EXPR
,
3164 type
, &minusone
, &vr0
);
3168 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3169 and express op ~[] as (op []') U (op []''). */
3170 if (vr0
.type
== VR_ANTI_RANGE
3171 && ranges_from_anti_range (&vr0
, &vrtem0
, &vrtem1
))
3173 extract_range_from_unary_expr_1 (vr
, code
, type
, &vrtem0
, op0_type
);
3174 if (vrtem1
.type
!= VR_UNDEFINED
)
3176 value_range_t vrres
= VR_INITIALIZER
;
3177 extract_range_from_unary_expr_1 (&vrres
, code
, type
,
3179 vrp_meet (vr
, &vrres
);
3184 if (CONVERT_EXPR_CODE_P (code
))
3186 tree inner_type
= op0_type
;
3187 tree outer_type
= type
;
3189 /* If the expression evaluates to a pointer, we are only interested in
3190 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3191 if (POINTER_TYPE_P (type
))
3193 if (range_is_nonnull (&vr0
))
3194 set_value_range_to_nonnull (vr
, type
);
3195 else if (range_is_null (&vr0
))
3196 set_value_range_to_null (vr
, type
);
3198 set_value_range_to_varying (vr
);
3202 /* If VR0 is varying and we increase the type precision, assume
3203 a full range for the following transformation. */
3204 if (vr0
.type
== VR_VARYING
3205 && INTEGRAL_TYPE_P (inner_type
)
3206 && TYPE_PRECISION (inner_type
) < TYPE_PRECISION (outer_type
))
3208 vr0
.type
= VR_RANGE
;
3209 vr0
.min
= TYPE_MIN_VALUE (inner_type
);
3210 vr0
.max
= TYPE_MAX_VALUE (inner_type
);
3213 /* If VR0 is a constant range or anti-range and the conversion is
3214 not truncating we can convert the min and max values and
3215 canonicalize the resulting range. Otherwise we can do the
3216 conversion if the size of the range is less than what the
3217 precision of the target type can represent and the range is
3218 not an anti-range. */
3219 if ((vr0
.type
== VR_RANGE
3220 || vr0
.type
== VR_ANTI_RANGE
)
3221 && TREE_CODE (vr0
.min
) == INTEGER_CST
3222 && TREE_CODE (vr0
.max
) == INTEGER_CST
3223 && (!is_overflow_infinity (vr0
.min
)
3224 || (vr0
.type
== VR_RANGE
3225 && TYPE_PRECISION (outer_type
) > TYPE_PRECISION (inner_type
)
3226 && needs_overflow_infinity (outer_type
)
3227 && supports_overflow_infinity (outer_type
)))
3228 && (!is_overflow_infinity (vr0
.max
)
3229 || (vr0
.type
== VR_RANGE
3230 && TYPE_PRECISION (outer_type
) > TYPE_PRECISION (inner_type
)
3231 && needs_overflow_infinity (outer_type
)
3232 && supports_overflow_infinity (outer_type
)))
3233 && (TYPE_PRECISION (outer_type
) >= TYPE_PRECISION (inner_type
)
3234 || (vr0
.type
== VR_RANGE
3235 && integer_zerop (int_const_binop (RSHIFT_EXPR
,
3236 int_const_binop (MINUS_EXPR
, vr0
.max
, vr0
.min
),
3237 size_int (TYPE_PRECISION (outer_type
)))))))
3239 tree new_min
, new_max
;
3240 if (is_overflow_infinity (vr0
.min
))
3241 new_min
= negative_overflow_infinity (outer_type
);
3243 new_min
= force_fit_type (outer_type
,
3246 TYPE_PRECISION (outer_type
),
3247 TYPE_SIGN (TREE_TYPE (vr0
.min
))),
3249 if (is_overflow_infinity (vr0
.max
))
3250 new_max
= positive_overflow_infinity (outer_type
);
3252 new_max
= force_fit_type (outer_type
,
3255 TYPE_PRECISION (outer_type
),
3256 TYPE_SIGN (TREE_TYPE (vr0
.max
))),
3258 set_and_canonicalize_value_range (vr
, vr0
.type
,
3259 new_min
, new_max
, NULL
);
3263 set_value_range_to_varying (vr
);
3266 else if (code
== ABS_EXPR
)
3271 /* Pass through vr0 in the easy cases. */
3272 if (TYPE_UNSIGNED (type
)
3273 || value_range_nonnegative_p (&vr0
))
3275 copy_value_range (vr
, &vr0
);
3279 /* For the remaining varying or symbolic ranges we can't do anything
3281 if (vr0
.type
== VR_VARYING
3282 || symbolic_range_p (&vr0
))
3284 set_value_range_to_varying (vr
);
3288 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3290 if (!TYPE_OVERFLOW_UNDEFINED (type
)
3291 && ((vr0
.type
== VR_RANGE
3292 && vrp_val_is_min (vr0
.min
))
3293 || (vr0
.type
== VR_ANTI_RANGE
3294 && !vrp_val_is_min (vr0
.min
))))
3296 set_value_range_to_varying (vr
);
3300 /* ABS_EXPR may flip the range around, if the original range
3301 included negative values. */
3302 if (is_overflow_infinity (vr0
.min
))
3303 min
= positive_overflow_infinity (type
);
3304 else if (!vrp_val_is_min (vr0
.min
))
3305 min
= fold_unary_to_constant (code
, type
, vr0
.min
);
3306 else if (!needs_overflow_infinity (type
))
3307 min
= TYPE_MAX_VALUE (type
);
3308 else if (supports_overflow_infinity (type
))
3309 min
= positive_overflow_infinity (type
);
3312 set_value_range_to_varying (vr
);
3316 if (is_overflow_infinity (vr0
.max
))
3317 max
= positive_overflow_infinity (type
);
3318 else if (!vrp_val_is_min (vr0
.max
))
3319 max
= fold_unary_to_constant (code
, type
, vr0
.max
);
3320 else if (!needs_overflow_infinity (type
))
3321 max
= TYPE_MAX_VALUE (type
);
3322 else if (supports_overflow_infinity (type
)
3323 /* We shouldn't generate [+INF, +INF] as set_value_range
3324 doesn't like this and ICEs. */
3325 && !is_positive_overflow_infinity (min
))
3326 max
= positive_overflow_infinity (type
);
3329 set_value_range_to_varying (vr
);
3333 cmp
= compare_values (min
, max
);
3335 /* If a VR_ANTI_RANGEs contains zero, then we have
3336 ~[-INF, min(MIN, MAX)]. */
3337 if (vr0
.type
== VR_ANTI_RANGE
)
3339 if (range_includes_zero_p (vr0
.min
, vr0
.max
) == 1)
3341 /* Take the lower of the two values. */
3345 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3346 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3347 flag_wrapv is set and the original anti-range doesn't include
3348 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3349 if (TYPE_OVERFLOW_WRAPS (type
))
3351 tree type_min_value
= TYPE_MIN_VALUE (type
);
3353 min
= (vr0
.min
!= type_min_value
3354 ? int_const_binop (PLUS_EXPR
, type_min_value
,
3355 build_int_cst (TREE_TYPE (type_min_value
), 1))
3360 if (overflow_infinity_range_p (&vr0
))
3361 min
= negative_overflow_infinity (type
);
3363 min
= TYPE_MIN_VALUE (type
);
3368 /* All else has failed, so create the range [0, INF], even for
3369 flag_wrapv since TYPE_MIN_VALUE is in the original
3371 vr0
.type
= VR_RANGE
;
3372 min
= build_int_cst (type
, 0);
3373 if (needs_overflow_infinity (type
))
3375 if (supports_overflow_infinity (type
))
3376 max
= positive_overflow_infinity (type
);
3379 set_value_range_to_varying (vr
);
3384 max
= TYPE_MAX_VALUE (type
);
3388 /* If the range contains zero then we know that the minimum value in the
3389 range will be zero. */
3390 else if (range_includes_zero_p (vr0
.min
, vr0
.max
) == 1)
3394 min
= build_int_cst (type
, 0);
3398 /* If the range was reversed, swap MIN and MAX. */
3407 cmp
= compare_values (min
, max
);
3408 if (cmp
== -2 || cmp
== 1)
3410 /* If the new range has its limits swapped around (MIN > MAX),
3411 then the operation caused one of them to wrap around, mark
3412 the new range VARYING. */
3413 set_value_range_to_varying (vr
);
3416 set_value_range (vr
, vr0
.type
, min
, max
, NULL
);
3420 /* For unhandled operations fall back to varying. */
3421 set_value_range_to_varying (vr
);
3426 /* Extract range information from a unary expression CODE OP0 based on
3427 the range of its operand with resulting type TYPE.
3428 The resulting range is stored in *VR. */
3431 extract_range_from_unary_expr (value_range_t
*vr
, enum tree_code code
,
3432 tree type
, tree op0
)
3434 value_range_t vr0
= VR_INITIALIZER
;
3436 /* Get value ranges for the operand. For constant operands, create
3437 a new value range with the operand to simplify processing. */
3438 if (TREE_CODE (op0
) == SSA_NAME
)
3439 vr0
= *(get_value_range (op0
));
3440 else if (is_gimple_min_invariant (op0
))
3441 set_value_range_to_value (&vr0
, op0
, NULL
);
3443 set_value_range_to_varying (&vr0
);
3445 extract_range_from_unary_expr_1 (vr
, code
, type
, &vr0
, TREE_TYPE (op0
));
3449 /* Extract range information from a conditional expression STMT based on
3450 the ranges of each of its operands and the expression code. */
3453 extract_range_from_cond_expr (value_range_t
*vr
, gimple stmt
)
3456 value_range_t vr0
= VR_INITIALIZER
;
3457 value_range_t vr1
= VR_INITIALIZER
;
3459 /* Get value ranges for each operand. For constant operands, create
3460 a new value range with the operand to simplify processing. */
3461 op0
= gimple_assign_rhs2 (stmt
);
3462 if (TREE_CODE (op0
) == SSA_NAME
)
3463 vr0
= *(get_value_range (op0
));
3464 else if (is_gimple_min_invariant (op0
))
3465 set_value_range_to_value (&vr0
, op0
, NULL
);
3467 set_value_range_to_varying (&vr0
);
3469 op1
= gimple_assign_rhs3 (stmt
);
3470 if (TREE_CODE (op1
) == SSA_NAME
)
3471 vr1
= *(get_value_range (op1
));
3472 else if (is_gimple_min_invariant (op1
))
3473 set_value_range_to_value (&vr1
, op1
, NULL
);
3475 set_value_range_to_varying (&vr1
);
3477 /* The resulting value range is the union of the operand ranges */
3478 copy_value_range (vr
, &vr0
);
3479 vrp_meet (vr
, &vr1
);
3483 /* Extract range information from a comparison expression EXPR based
3484 on the range of its operand and the expression code. */
3487 extract_range_from_comparison (value_range_t
*vr
, enum tree_code code
,
3488 tree type
, tree op0
, tree op1
)
3493 val
= vrp_evaluate_conditional_warnv_with_ops (code
, op0
, op1
, false, &sop
,
3496 /* A disadvantage of using a special infinity as an overflow
3497 representation is that we lose the ability to record overflow
3498 when we don't have an infinity. So we have to ignore a result
3499 which relies on overflow. */
3501 if (val
&& !is_overflow_infinity (val
) && !sop
)
3503 /* Since this expression was found on the RHS of an assignment,
3504 its type may be different from _Bool. Convert VAL to EXPR's
3506 val
= fold_convert (type
, val
);
3507 if (is_gimple_min_invariant (val
))
3508 set_value_range_to_value (vr
, val
, vr
->equiv
);
3510 set_value_range (vr
, VR_RANGE
, val
, val
, vr
->equiv
);
3513 /* The result of a comparison is always true or false. */
3514 set_value_range_to_truthvalue (vr
, type
);
3517 /* Try to derive a nonnegative or nonzero range out of STMT relying
3518 primarily on generic routines in fold in conjunction with range data.
3519 Store the result in *VR */
3522 extract_range_basic (value_range_t
*vr
, gimple stmt
)
3525 tree type
= gimple_expr_type (stmt
);
3527 if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
3529 tree fndecl
= gimple_call_fndecl (stmt
), arg
;
3530 int mini
, maxi
, zerov
= 0, prec
;
3532 switch (DECL_FUNCTION_CODE (fndecl
))
3534 case BUILT_IN_CONSTANT_P
:
3535 /* If the call is __builtin_constant_p and the argument is a
3536 function parameter resolve it to false. This avoids bogus
3537 array bound warnings.
3538 ??? We could do this as early as inlining is finished. */
3539 arg
= gimple_call_arg (stmt
, 0);
3540 if (TREE_CODE (arg
) == SSA_NAME
3541 && SSA_NAME_IS_DEFAULT_DEF (arg
)
3542 && TREE_CODE (SSA_NAME_VAR (arg
)) == PARM_DECL
)
3544 set_value_range_to_null (vr
, type
);
3548 /* Both __builtin_ffs* and __builtin_popcount return
3550 CASE_INT_FN (BUILT_IN_FFS
):
3551 CASE_INT_FN (BUILT_IN_POPCOUNT
):
3552 arg
= gimple_call_arg (stmt
, 0);
3553 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
3556 if (TREE_CODE (arg
) == SSA_NAME
)
3558 value_range_t
*vr0
= get_value_range (arg
);
3559 /* If arg is non-zero, then ffs or popcount
3561 if (((vr0
->type
== VR_RANGE
3562 && integer_nonzerop (vr0
->min
))
3563 || (vr0
->type
== VR_ANTI_RANGE
3564 && integer_zerop (vr0
->min
)))
3565 && !TREE_OVERFLOW (vr0
->min
))
3567 /* If some high bits are known to be zero,
3568 we can decrease the maximum. */
3569 if (vr0
->type
== VR_RANGE
3570 && TREE_CODE (vr0
->max
) == INTEGER_CST
3571 && !TREE_OVERFLOW (vr0
->max
))
3572 maxi
= tree_floor_log2 (vr0
->max
) + 1;
3575 /* __builtin_parity* returns [0, 1]. */
3576 CASE_INT_FN (BUILT_IN_PARITY
):
3580 /* __builtin_c[lt]z* return [0, prec-1], except for
3581 when the argument is 0, but that is undefined behavior.
3582 On many targets where the CLZ RTL or optab value is defined
3583 for 0 the value is prec, so include that in the range
3585 CASE_INT_FN (BUILT_IN_CLZ
):
3586 arg
= gimple_call_arg (stmt
, 0);
3587 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
3590 if (optab_handler (clz_optab
, TYPE_MODE (TREE_TYPE (arg
)))
3592 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg
)),
3594 /* Handle only the single common value. */
3596 /* Magic value to give up, unless vr0 proves
3599 if (TREE_CODE (arg
) == SSA_NAME
)
3601 value_range_t
*vr0
= get_value_range (arg
);
3602 /* From clz of VR_RANGE minimum we can compute
3604 if (vr0
->type
== VR_RANGE
3605 && TREE_CODE (vr0
->min
) == INTEGER_CST
3606 && !TREE_OVERFLOW (vr0
->min
))
3608 maxi
= prec
- 1 - tree_floor_log2 (vr0
->min
);
3612 else if (vr0
->type
== VR_ANTI_RANGE
3613 && integer_zerop (vr0
->min
)
3614 && !TREE_OVERFLOW (vr0
->min
))
3621 /* From clz of VR_RANGE maximum we can compute
3623 if (vr0
->type
== VR_RANGE
3624 && TREE_CODE (vr0
->max
) == INTEGER_CST
3625 && !TREE_OVERFLOW (vr0
->max
))
3627 mini
= prec
- 1 - tree_floor_log2 (vr0
->max
);
3635 /* __builtin_ctz* return [0, prec-1], except for
3636 when the argument is 0, but that is undefined behavior.
3637 If there is a ctz optab for this mode and
3638 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3639 otherwise just assume 0 won't be seen. */
3640 CASE_INT_FN (BUILT_IN_CTZ
):
3641 arg
= gimple_call_arg (stmt
, 0);
3642 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
3645 if (optab_handler (ctz_optab
, TYPE_MODE (TREE_TYPE (arg
)))
3647 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg
)),
3650 /* Handle only the two common values. */
3653 else if (zerov
== prec
)
3656 /* Magic value to give up, unless vr0 proves
3660 if (TREE_CODE (arg
) == SSA_NAME
)
3662 value_range_t
*vr0
= get_value_range (arg
);
3663 /* If arg is non-zero, then use [0, prec - 1]. */
3664 if (((vr0
->type
== VR_RANGE
3665 && integer_nonzerop (vr0
->min
))
3666 || (vr0
->type
== VR_ANTI_RANGE
3667 && integer_zerop (vr0
->min
)))
3668 && !TREE_OVERFLOW (vr0
->min
))
3673 /* If some high bits are known to be zero,
3674 we can decrease the result maximum. */
3675 if (vr0
->type
== VR_RANGE
3676 && TREE_CODE (vr0
->max
) == INTEGER_CST
3677 && !TREE_OVERFLOW (vr0
->max
))
3679 maxi
= tree_floor_log2 (vr0
->max
);
3680 /* For vr0 [0, 0] give up. */
3688 /* __builtin_clrsb* returns [0, prec-1]. */
3689 CASE_INT_FN (BUILT_IN_CLRSB
):
3690 arg
= gimple_call_arg (stmt
, 0);
3691 prec
= TYPE_PRECISION (TREE_TYPE (arg
));
3696 set_value_range (vr
, VR_RANGE
, build_int_cst (type
, mini
),
3697 build_int_cst (type
, maxi
), NULL
);
3703 if (INTEGRAL_TYPE_P (type
)
3704 && gimple_stmt_nonnegative_warnv_p (stmt
, &sop
))
3705 set_value_range_to_nonnegative (vr
, type
,
3706 sop
|| stmt_overflow_infinity (stmt
));
3707 else if (vrp_stmt_computes_nonzero (stmt
, &sop
)
3709 set_value_range_to_nonnull (vr
, type
);
3711 set_value_range_to_varying (vr
);
3715 /* Try to compute a useful range out of assignment STMT and store it
3719 extract_range_from_assignment (value_range_t
*vr
, gimple stmt
)
3721 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3723 if (code
== ASSERT_EXPR
)
3724 extract_range_from_assert (vr
, gimple_assign_rhs1 (stmt
));
3725 else if (code
== SSA_NAME
)
3726 extract_range_from_ssa_name (vr
, gimple_assign_rhs1 (stmt
));
3727 else if (TREE_CODE_CLASS (code
) == tcc_binary
)
3728 extract_range_from_binary_expr (vr
, gimple_assign_rhs_code (stmt
),
3729 gimple_expr_type (stmt
),
3730 gimple_assign_rhs1 (stmt
),
3731 gimple_assign_rhs2 (stmt
));
3732 else if (TREE_CODE_CLASS (code
) == tcc_unary
)
3733 extract_range_from_unary_expr (vr
, gimple_assign_rhs_code (stmt
),
3734 gimple_expr_type (stmt
),
3735 gimple_assign_rhs1 (stmt
));
3736 else if (code
== COND_EXPR
)
3737 extract_range_from_cond_expr (vr
, stmt
);
3738 else if (TREE_CODE_CLASS (code
) == tcc_comparison
)
3739 extract_range_from_comparison (vr
, gimple_assign_rhs_code (stmt
),
3740 gimple_expr_type (stmt
),
3741 gimple_assign_rhs1 (stmt
),
3742 gimple_assign_rhs2 (stmt
));
3743 else if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
3744 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
3745 set_value_range_to_value (vr
, gimple_assign_rhs1 (stmt
), NULL
);
3747 set_value_range_to_varying (vr
);
3749 if (vr
->type
== VR_VARYING
)
3750 extract_range_basic (vr
, stmt
);
3753 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3754 would be profitable to adjust VR using scalar evolution information
3755 for VAR. If so, update VR with the new limits. */
3758 adjust_range_with_scev (value_range_t
*vr
, struct loop
*loop
,
3759 gimple stmt
, tree var
)
3761 tree init
, step
, chrec
, tmin
, tmax
, min
, max
, type
, tem
;
3762 enum ev_direction dir
;
3764 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3765 better opportunities than a regular range, but I'm not sure. */
3766 if (vr
->type
== VR_ANTI_RANGE
)
3769 chrec
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, var
));
3771 /* Like in PR19590, scev can return a constant function. */
3772 if (is_gimple_min_invariant (chrec
))
3774 set_value_range_to_value (vr
, chrec
, vr
->equiv
);
3778 if (TREE_CODE (chrec
) != POLYNOMIAL_CHREC
)
3781 init
= initial_condition_in_loop_num (chrec
, loop
->num
);
3782 tem
= op_with_constant_singleton_value_range (init
);
3785 step
= evolution_part_in_loop_num (chrec
, loop
->num
);
3786 tem
= op_with_constant_singleton_value_range (step
);
3790 /* If STEP is symbolic, we can't know whether INIT will be the
3791 minimum or maximum value in the range. Also, unless INIT is
3792 a simple expression, compare_values and possibly other functions
3793 in tree-vrp won't be able to handle it. */
3794 if (step
== NULL_TREE
3795 || !is_gimple_min_invariant (step
)
3796 || !valid_value_p (init
))
3799 dir
= scev_direction (chrec
);
3800 if (/* Do not adjust ranges if we do not know whether the iv increases
3801 or decreases, ... */
3802 dir
== EV_DIR_UNKNOWN
3803 /* ... or if it may wrap. */
3804 || scev_probably_wraps_p (init
, step
, stmt
, get_chrec_loop (chrec
),
3808 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3809 negative_overflow_infinity and positive_overflow_infinity,
3810 because we have concluded that the loop probably does not
3813 type
= TREE_TYPE (var
);
3814 if (POINTER_TYPE_P (type
) || !TYPE_MIN_VALUE (type
))
3815 tmin
= lower_bound_in_type (type
, type
);
3817 tmin
= TYPE_MIN_VALUE (type
);
3818 if (POINTER_TYPE_P (type
) || !TYPE_MAX_VALUE (type
))
3819 tmax
= upper_bound_in_type (type
, type
);
3821 tmax
= TYPE_MAX_VALUE (type
);
3823 /* Try to use estimated number of iterations for the loop to constrain the
3824 final value in the evolution. */
3825 if (TREE_CODE (step
) == INTEGER_CST
3826 && is_gimple_val (init
)
3827 && (TREE_CODE (init
) != SSA_NAME
3828 || get_value_range (init
)->type
== VR_RANGE
))
3832 /* We are only entering here for loop header PHI nodes, so using
3833 the number of latch executions is the correct thing to use. */
3834 if (max_loop_iterations (loop
, &nit
))
3836 value_range_t maxvr
= VR_INITIALIZER
;
3838 signop sgn
= TYPE_SIGN (TREE_TYPE (step
));
3841 wtmp
= wi::mul (step
, nit
, sgn
, &overflow
);
3842 /* If the multiplication overflowed we can't do a meaningful
3843 adjustment. Likewise if the result doesn't fit in the type
3844 of the induction variable. For a signed type we have to
3845 check whether the result has the expected signedness which
3846 is that of the step as number of iterations is unsigned. */
3848 && wi::fits_to_tree_p (wtmp
, TREE_TYPE (init
))
3850 || wi::gts_p (wtmp
, 0) == wi::gts_p (step
, 0)))
3852 tem
= wide_int_to_tree (TREE_TYPE (init
), wtmp
);
3853 extract_range_from_binary_expr (&maxvr
, PLUS_EXPR
,
3854 TREE_TYPE (init
), init
, tem
);
3855 /* Likewise if the addition did. */
3856 if (maxvr
.type
== VR_RANGE
)
3865 if (vr
->type
== VR_VARYING
|| vr
->type
== VR_UNDEFINED
)
3870 /* For VARYING or UNDEFINED ranges, just about anything we get
3871 from scalar evolutions should be better. */
3873 if (dir
== EV_DIR_DECREASES
)
3878 /* If we would create an invalid range, then just assume we
3879 know absolutely nothing. This may be over-conservative,
3880 but it's clearly safe, and should happen only in unreachable
3881 parts of code, or for invalid programs. */
3882 if (compare_values (min
, max
) == 1)
3885 set_value_range (vr
, VR_RANGE
, min
, max
, vr
->equiv
);
3887 else if (vr
->type
== VR_RANGE
)
3892 if (dir
== EV_DIR_DECREASES
)
3894 /* INIT is the maximum value. If INIT is lower than VR->MAX
3895 but no smaller than VR->MIN, set VR->MAX to INIT. */
3896 if (compare_values (init
, max
) == -1)
3899 /* According to the loop information, the variable does not
3900 overflow. If we think it does, probably because of an
3901 overflow due to arithmetic on a different INF value,
3903 if (is_negative_overflow_infinity (min
)
3904 || compare_values (min
, tmin
) == -1)
3910 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3911 if (compare_values (init
, min
) == 1)
3914 if (is_positive_overflow_infinity (max
)
3915 || compare_values (tmax
, max
) == -1)
3919 /* If we just created an invalid range with the minimum
3920 greater than the maximum, we fail conservatively.
3921 This should happen only in unreachable
3922 parts of code, or for invalid programs. */
3923 if (compare_values (min
, max
) == 1)
3926 set_value_range (vr
, VR_RANGE
, min
, max
, vr
->equiv
);
3930 /* Return true if VAR may overflow at STMT. This checks any available
3931 loop information to see if we can determine that VAR does not
3935 vrp_var_may_overflow (tree var
, gimple stmt
)
3938 tree chrec
, init
, step
;
3940 if (current_loops
== NULL
)
3943 l
= loop_containing_stmt (stmt
);
3948 chrec
= instantiate_parameters (l
, analyze_scalar_evolution (l
, var
));
3949 if (TREE_CODE (chrec
) != POLYNOMIAL_CHREC
)
3952 init
= initial_condition_in_loop_num (chrec
, l
->num
);
3953 step
= evolution_part_in_loop_num (chrec
, l
->num
);
3955 if (step
== NULL_TREE
3956 || !is_gimple_min_invariant (step
)
3957 || !valid_value_p (init
))
3960 /* If we get here, we know something useful about VAR based on the
3961 loop information. If it wraps, it may overflow. */
3963 if (scev_probably_wraps_p (init
, step
, stmt
, get_chrec_loop (chrec
),
3967 if (dump_file
&& (dump_flags
& TDF_DETAILS
) != 0)
3969 print_generic_expr (dump_file
, var
, 0);
3970 fprintf (dump_file
, ": loop information indicates does not overflow\n");
3977 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3979 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3980 all the values in the ranges.
3982 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3984 - Return NULL_TREE if it is not always possible to determine the
3985 value of the comparison.
3987 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3988 overflow infinity was used in the test. */
3992 compare_ranges (enum tree_code comp
, value_range_t
*vr0
, value_range_t
*vr1
,
3993 bool *strict_overflow_p
)
3995 /* VARYING or UNDEFINED ranges cannot be compared. */
3996 if (vr0
->type
== VR_VARYING
3997 || vr0
->type
== VR_UNDEFINED
3998 || vr1
->type
== VR_VARYING
3999 || vr1
->type
== VR_UNDEFINED
)
4002 /* Anti-ranges need to be handled separately. */
4003 if (vr0
->type
== VR_ANTI_RANGE
|| vr1
->type
== VR_ANTI_RANGE
)
4005 /* If both are anti-ranges, then we cannot compute any
4007 if (vr0
->type
== VR_ANTI_RANGE
&& vr1
->type
== VR_ANTI_RANGE
)
4010 /* These comparisons are never statically computable. */
4017 /* Equality can be computed only between a range and an
4018 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4019 if (vr0
->type
== VR_RANGE
)
4021 /* To simplify processing, make VR0 the anti-range. */
4022 value_range_t
*tmp
= vr0
;
4027 gcc_assert (comp
== NE_EXPR
|| comp
== EQ_EXPR
);
4029 if (compare_values_warnv (vr0
->min
, vr1
->min
, strict_overflow_p
) == 0
4030 && compare_values_warnv (vr0
->max
, vr1
->max
, strict_overflow_p
) == 0)
4031 return (comp
== NE_EXPR
) ? boolean_true_node
: boolean_false_node
;
4036 if (!usable_range_p (vr0
, strict_overflow_p
)
4037 || !usable_range_p (vr1
, strict_overflow_p
))
4040 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4041 operands around and change the comparison code. */
4042 if (comp
== GT_EXPR
|| comp
== GE_EXPR
)
4045 comp
= (comp
== GT_EXPR
) ? LT_EXPR
: LE_EXPR
;
4051 if (comp
== EQ_EXPR
)
4053 /* Equality may only be computed if both ranges represent
4054 exactly one value. */
4055 if (compare_values_warnv (vr0
->min
, vr0
->max
, strict_overflow_p
) == 0
4056 && compare_values_warnv (vr1
->min
, vr1
->max
, strict_overflow_p
) == 0)
4058 int cmp_min
= compare_values_warnv (vr0
->min
, vr1
->min
,
4060 int cmp_max
= compare_values_warnv (vr0
->max
, vr1
->max
,
4062 if (cmp_min
== 0 && cmp_max
== 0)
4063 return boolean_true_node
;
4064 else if (cmp_min
!= -2 && cmp_max
!= -2)
4065 return boolean_false_node
;
4067 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4068 else if (compare_values_warnv (vr0
->min
, vr1
->max
,
4069 strict_overflow_p
) == 1
4070 || compare_values_warnv (vr1
->min
, vr0
->max
,
4071 strict_overflow_p
) == 1)
4072 return boolean_false_node
;
4076 else if (comp
== NE_EXPR
)
4080 /* If VR0 is completely to the left or completely to the right
4081 of VR1, they are always different. Notice that we need to
4082 make sure that both comparisons yield similar results to
4083 avoid comparing values that cannot be compared at
4085 cmp1
= compare_values_warnv (vr0
->max
, vr1
->min
, strict_overflow_p
);
4086 cmp2
= compare_values_warnv (vr0
->min
, vr1
->max
, strict_overflow_p
);
4087 if ((cmp1
== -1 && cmp2
== -1) || (cmp1
== 1 && cmp2
== 1))
4088 return boolean_true_node
;
4090 /* If VR0 and VR1 represent a single value and are identical,
4092 else if (compare_values_warnv (vr0
->min
, vr0
->max
,
4093 strict_overflow_p
) == 0
4094 && compare_values_warnv (vr1
->min
, vr1
->max
,
4095 strict_overflow_p
) == 0
4096 && compare_values_warnv (vr0
->min
, vr1
->min
,
4097 strict_overflow_p
) == 0
4098 && compare_values_warnv (vr0
->max
, vr1
->max
,
4099 strict_overflow_p
) == 0)
4100 return boolean_false_node
;
4102 /* Otherwise, they may or may not be different. */
4106 else if (comp
== LT_EXPR
|| comp
== LE_EXPR
)
4110 /* If VR0 is to the left of VR1, return true. */
4111 tst
= compare_values_warnv (vr0
->max
, vr1
->min
, strict_overflow_p
);
4112 if ((comp
== LT_EXPR
&& tst
== -1)
4113 || (comp
== LE_EXPR
&& (tst
== -1 || tst
== 0)))
4115 if (overflow_infinity_range_p (vr0
)
4116 || overflow_infinity_range_p (vr1
))
4117 *strict_overflow_p
= true;
4118 return boolean_true_node
;
4121 /* If VR0 is to the right of VR1, return false. */
4122 tst
= compare_values_warnv (vr0
->min
, vr1
->max
, strict_overflow_p
);
4123 if ((comp
== LT_EXPR
&& (tst
== 0 || tst
== 1))
4124 || (comp
== LE_EXPR
&& tst
== 1))
4126 if (overflow_infinity_range_p (vr0
)
4127 || overflow_infinity_range_p (vr1
))
4128 *strict_overflow_p
= true;
4129 return boolean_false_node
;
4132 /* Otherwise, we don't know. */
4140 /* Given a value range VR, a value VAL and a comparison code COMP, return
4141 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4142 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4143 always returns false. Return NULL_TREE if it is not always
4144 possible to determine the value of the comparison. Also set
4145 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4146 infinity was used in the test. */
4149 compare_range_with_value (enum tree_code comp
, value_range_t
*vr
, tree val
,
4150 bool *strict_overflow_p
)
4152 if (vr
->type
== VR_VARYING
|| vr
->type
== VR_UNDEFINED
)
4155 /* Anti-ranges need to be handled separately. */
4156 if (vr
->type
== VR_ANTI_RANGE
)
4158 /* For anti-ranges, the only predicates that we can compute at
4159 compile time are equality and inequality. */
4166 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4167 if (value_inside_range (val
, vr
->min
, vr
->max
) == 1)
4168 return (comp
== NE_EXPR
) ? boolean_true_node
: boolean_false_node
;
4173 if (!usable_range_p (vr
, strict_overflow_p
))
4176 if (comp
== EQ_EXPR
)
4178 /* EQ_EXPR may only be computed if VR represents exactly
4180 if (compare_values_warnv (vr
->min
, vr
->max
, strict_overflow_p
) == 0)
4182 int cmp
= compare_values_warnv (vr
->min
, val
, strict_overflow_p
);
4184 return boolean_true_node
;
4185 else if (cmp
== -1 || cmp
== 1 || cmp
== 2)
4186 return boolean_false_node
;
4188 else if (compare_values_warnv (val
, vr
->min
, strict_overflow_p
) == -1
4189 || compare_values_warnv (vr
->max
, val
, strict_overflow_p
) == -1)
4190 return boolean_false_node
;
4194 else if (comp
== NE_EXPR
)
4196 /* If VAL is not inside VR, then they are always different. */
4197 if (compare_values_warnv (vr
->max
, val
, strict_overflow_p
) == -1
4198 || compare_values_warnv (vr
->min
, val
, strict_overflow_p
) == 1)
4199 return boolean_true_node
;
4201 /* If VR represents exactly one value equal to VAL, then return
4203 if (compare_values_warnv (vr
->min
, vr
->max
, strict_overflow_p
) == 0
4204 && compare_values_warnv (vr
->min
, val
, strict_overflow_p
) == 0)
4205 return boolean_false_node
;
4207 /* Otherwise, they may or may not be different. */
4210 else if (comp
== LT_EXPR
|| comp
== LE_EXPR
)
4214 /* If VR is to the left of VAL, return true. */
4215 tst
= compare_values_warnv (vr
->max
, val
, strict_overflow_p
);
4216 if ((comp
== LT_EXPR
&& tst
== -1)
4217 || (comp
== LE_EXPR
&& (tst
== -1 || tst
== 0)))
4219 if (overflow_infinity_range_p (vr
))
4220 *strict_overflow_p
= true;
4221 return boolean_true_node
;
4224 /* If VR is to the right of VAL, return false. */
4225 tst
= compare_values_warnv (vr
->min
, val
, strict_overflow_p
);
4226 if ((comp
== LT_EXPR
&& (tst
== 0 || tst
== 1))
4227 || (comp
== LE_EXPR
&& tst
== 1))
4229 if (overflow_infinity_range_p (vr
))
4230 *strict_overflow_p
= true;
4231 return boolean_false_node
;
4234 /* Otherwise, we don't know. */
4237 else if (comp
== GT_EXPR
|| comp
== GE_EXPR
)
4241 /* If VR is to the right of VAL, return true. */
4242 tst
= compare_values_warnv (vr
->min
, val
, strict_overflow_p
);
4243 if ((comp
== GT_EXPR
&& tst
== 1)
4244 || (comp
== GE_EXPR
&& (tst
== 0 || tst
== 1)))
4246 if (overflow_infinity_range_p (vr
))
4247 *strict_overflow_p
= true;
4248 return boolean_true_node
;
4251 /* If VR is to the left of VAL, return false. */
4252 tst
= compare_values_warnv (vr
->max
, val
, strict_overflow_p
);
4253 if ((comp
== GT_EXPR
&& (tst
== -1 || tst
== 0))
4254 || (comp
== GE_EXPR
&& tst
== -1))
4256 if (overflow_infinity_range_p (vr
))
4257 *strict_overflow_p
= true;
4258 return boolean_false_node
;
4261 /* Otherwise, we don't know. */
4269 /* Debugging dumps. */
4271 void dump_value_range (FILE *, value_range_t
*);
4272 void debug_value_range (value_range_t
*);
4273 void dump_all_value_ranges (FILE *);
4274 void debug_all_value_ranges (void);
4275 void dump_vr_equiv (FILE *, bitmap
);
4276 void debug_vr_equiv (bitmap
);
4279 /* Dump value range VR to FILE. */
4282 dump_value_range (FILE *file
, value_range_t
*vr
)
4285 fprintf (file
, "[]");
4286 else if (vr
->type
== VR_UNDEFINED
)
4287 fprintf (file
, "UNDEFINED");
4288 else if (vr
->type
== VR_RANGE
|| vr
->type
== VR_ANTI_RANGE
)
4290 tree type
= TREE_TYPE (vr
->min
);
4292 fprintf (file
, "%s[", (vr
->type
== VR_ANTI_RANGE
) ? "~" : "");
4294 if (is_negative_overflow_infinity (vr
->min
))
4295 fprintf (file
, "-INF(OVF)");
4296 else if (INTEGRAL_TYPE_P (type
)
4297 && !TYPE_UNSIGNED (type
)
4298 && vrp_val_is_min (vr
->min
))
4299 fprintf (file
, "-INF");
4301 print_generic_expr (file
, vr
->min
, 0);
4303 fprintf (file
, ", ");
4305 if (is_positive_overflow_infinity (vr
->max
))
4306 fprintf (file
, "+INF(OVF)");
4307 else if (INTEGRAL_TYPE_P (type
)
4308 && vrp_val_is_max (vr
->max
))
4309 fprintf (file
, "+INF");
4311 print_generic_expr (file
, vr
->max
, 0);
4313 fprintf (file
, "]");
4320 fprintf (file
, " EQUIVALENCES: { ");
4322 EXECUTE_IF_SET_IN_BITMAP (vr
->equiv
, 0, i
, bi
)
4324 print_generic_expr (file
, ssa_name (i
), 0);
4325 fprintf (file
, " ");
4329 fprintf (file
, "} (%u elements)", c
);
4332 else if (vr
->type
== VR_VARYING
)
4333 fprintf (file
, "VARYING");
4335 fprintf (file
, "INVALID RANGE");
4339 /* Dump value range VR to stderr. */
4342 debug_value_range (value_range_t
*vr
)
4344 dump_value_range (stderr
, vr
);
4345 fprintf (stderr
, "\n");
4349 /* Dump value ranges of all SSA_NAMEs to FILE. */
4352 dump_all_value_ranges (FILE *file
)
4356 for (i
= 0; i
< num_vr_values
; i
++)
4360 print_generic_expr (file
, ssa_name (i
), 0);
4361 fprintf (file
, ": ");
4362 dump_value_range (file
, vr_value
[i
]);
4363 fprintf (file
, "\n");
4367 fprintf (file
, "\n");
4371 /* Dump all value ranges to stderr. */
4374 debug_all_value_ranges (void)
4376 dump_all_value_ranges (stderr
);
4380 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4381 create a new SSA name N and return the assertion assignment
4382 'V = ASSERT_EXPR <V, V OP W>'. */
4385 build_assert_expr_for (tree cond
, tree v
)
4390 gcc_assert (TREE_CODE (v
) == SSA_NAME
4391 && COMPARISON_CLASS_P (cond
));
4393 a
= build2 (ASSERT_EXPR
, TREE_TYPE (v
), v
, cond
);
4394 assertion
= gimple_build_assign (NULL_TREE
, a
);
4396 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4397 operand of the ASSERT_EXPR. Create it so the new name and the old one
4398 are registered in the replacement table so that we can fix the SSA web
4399 after adding all the ASSERT_EXPRs. */
4400 create_new_def_for (v
, assertion
, NULL
);
4406 /* Return false if EXPR is a predicate expression involving floating
4410 fp_predicate (gimple stmt
)
4412 GIMPLE_CHECK (stmt
, GIMPLE_COND
);
4414 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt
)));
4418 /* If the range of values taken by OP can be inferred after STMT executes,
4419 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4420 describes the inferred range. Return true if a range could be
4424 infer_value_range (gimple stmt
, tree op
, enum tree_code
*comp_code_p
, tree
*val_p
)
4427 *comp_code_p
= ERROR_MARK
;
4429 /* Do not attempt to infer anything in names that flow through
4431 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
4434 /* Similarly, don't infer anything from statements that may throw
4436 if (stmt_could_throw_p (stmt
))
4439 /* If STMT is the last statement of a basic block with no
4440 successors, there is no point inferring anything about any of its
4441 operands. We would not be able to find a proper insertion point
4442 for the assertion, anyway. */
4443 if (stmt_ends_bb_p (stmt
) && EDGE_COUNT (gimple_bb (stmt
)->succs
) == 0)
4446 /* We can only assume that a pointer dereference will yield
4447 non-NULL if -fdelete-null-pointer-checks is enabled. */
4448 if (flag_delete_null_pointer_checks
4449 && POINTER_TYPE_P (TREE_TYPE (op
))
4450 && gimple_code (stmt
) != GIMPLE_ASM
)
4452 unsigned num_uses
, num_loads
, num_stores
;
4454 count_uses_and_derefs (op
, stmt
, &num_uses
, &num_loads
, &num_stores
);
4455 if (num_loads
+ num_stores
> 0)
4457 *val_p
= build_int_cst (TREE_TYPE (op
), 0);
4458 *comp_code_p
= NE_EXPR
;
4467 void dump_asserts_for (FILE *, tree
);
4468 void debug_asserts_for (tree
);
4469 void dump_all_asserts (FILE *);
4470 void debug_all_asserts (void);
4472 /* Dump all the registered assertions for NAME to FILE. */
4475 dump_asserts_for (FILE *file
, tree name
)
4479 fprintf (file
, "Assertions to be inserted for ");
4480 print_generic_expr (file
, name
, 0);
4481 fprintf (file
, "\n");
4483 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
4486 fprintf (file
, "\t");
4487 print_gimple_stmt (file
, gsi_stmt (loc
->si
), 0, 0);
4488 fprintf (file
, "\n\tBB #%d", loc
->bb
->index
);
4491 fprintf (file
, "\n\tEDGE %d->%d", loc
->e
->src
->index
,
4492 loc
->e
->dest
->index
);
4493 dump_edge_info (file
, loc
->e
, dump_flags
, 0);
4495 fprintf (file
, "\n\tPREDICATE: ");
4496 print_generic_expr (file
, name
, 0);
4497 fprintf (file
, " %s ", tree_code_name
[(int)loc
->comp_code
]);
4498 print_generic_expr (file
, loc
->val
, 0);
4499 fprintf (file
, "\n\n");
4503 fprintf (file
, "\n");
4507 /* Dump all the registered assertions for NAME to stderr. */
4510 debug_asserts_for (tree name
)
4512 dump_asserts_for (stderr
, name
);
4516 /* Dump all the registered assertions for all the names to FILE. */
4519 dump_all_asserts (FILE *file
)
4524 fprintf (file
, "\nASSERT_EXPRs to be inserted\n\n");
4525 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
4526 dump_asserts_for (file
, ssa_name (i
));
4527 fprintf (file
, "\n");
4531 /* Dump all the registered assertions for all the names to stderr. */
4534 debug_all_asserts (void)
4536 dump_all_asserts (stderr
);
4540 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4541 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4542 E->DEST, then register this location as a possible insertion point
4543 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4545 BB, E and SI provide the exact insertion point for the new
4546 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4547 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4548 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4549 must not be NULL. */
4552 register_new_assert_for (tree name
, tree expr
,
4553 enum tree_code comp_code
,
4557 gimple_stmt_iterator si
)
4559 assert_locus_t n
, loc
, last_loc
;
4560 basic_block dest_bb
;
4562 gcc_checking_assert (bb
== NULL
|| e
== NULL
);
4565 gcc_checking_assert (gimple_code (gsi_stmt (si
)) != GIMPLE_COND
4566 && gimple_code (gsi_stmt (si
)) != GIMPLE_SWITCH
);
4568 /* Never build an assert comparing against an integer constant with
4569 TREE_OVERFLOW set. This confuses our undefined overflow warning
4571 if (TREE_CODE (val
) == INTEGER_CST
4572 && TREE_OVERFLOW (val
))
4573 val
= wide_int_to_tree (TREE_TYPE (val
), val
);
4575 /* The new assertion A will be inserted at BB or E. We need to
4576 determine if the new location is dominated by a previously
4577 registered location for A. If we are doing an edge insertion,
4578 assume that A will be inserted at E->DEST. Note that this is not
4581 If E is a critical edge, it will be split. But even if E is
4582 split, the new block will dominate the same set of blocks that
4585 The reverse, however, is not true, blocks dominated by E->DEST
4586 will not be dominated by the new block created to split E. So,
4587 if the insertion location is on a critical edge, we will not use
4588 the new location to move another assertion previously registered
4589 at a block dominated by E->DEST. */
4590 dest_bb
= (bb
) ? bb
: e
->dest
;
4592 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4593 VAL at a block dominating DEST_BB, then we don't need to insert a new
4594 one. Similarly, if the same assertion already exists at a block
4595 dominated by DEST_BB and the new location is not on a critical
4596 edge, then update the existing location for the assertion (i.e.,
4597 move the assertion up in the dominance tree).
4599 Note, this is implemented as a simple linked list because there
4600 should not be more than a handful of assertions registered per
4601 name. If this becomes a performance problem, a table hashed by
4602 COMP_CODE and VAL could be implemented. */
4603 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
4607 if (loc
->comp_code
== comp_code
4609 || operand_equal_p (loc
->val
, val
, 0))
4610 && (loc
->expr
== expr
4611 || operand_equal_p (loc
->expr
, expr
, 0)))
4613 /* If E is not a critical edge and DEST_BB
4614 dominates the existing location for the assertion, move
4615 the assertion up in the dominance tree by updating its
4616 location information. */
4617 if ((e
== NULL
|| !EDGE_CRITICAL_P (e
))
4618 && dominated_by_p (CDI_DOMINATORS
, loc
->bb
, dest_bb
))
4627 /* Update the last node of the list and move to the next one. */
4632 /* If we didn't find an assertion already registered for
4633 NAME COMP_CODE VAL, add a new one at the end of the list of
4634 assertions associated with NAME. */
4635 n
= XNEW (struct assert_locus_d
);
4639 n
->comp_code
= comp_code
;
4647 asserts_for
[SSA_NAME_VERSION (name
)] = n
;
4649 bitmap_set_bit (need_assert_for
, SSA_NAME_VERSION (name
));
4652 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4653 Extract a suitable test code and value and store them into *CODE_P and
4654 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4656 If no extraction was possible, return FALSE, otherwise return TRUE.
4658 If INVERT is true, then we invert the result stored into *CODE_P. */
4661 extract_code_and_val_from_cond_with_ops (tree name
, enum tree_code cond_code
,
4662 tree cond_op0
, tree cond_op1
,
4663 bool invert
, enum tree_code
*code_p
,
4666 enum tree_code comp_code
;
4669 /* Otherwise, we have a comparison of the form NAME COMP VAL
4670 or VAL COMP NAME. */
4671 if (name
== cond_op1
)
4673 /* If the predicate is of the form VAL COMP NAME, flip
4674 COMP around because we need to register NAME as the
4675 first operand in the predicate. */
4676 comp_code
= swap_tree_comparison (cond_code
);
4681 /* The comparison is of the form NAME COMP VAL, so the
4682 comparison code remains unchanged. */
4683 comp_code
= cond_code
;
4687 /* Invert the comparison code as necessary. */
4689 comp_code
= invert_tree_comparison (comp_code
, 0);
4691 /* VRP does not handle float types. */
4692 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val
)))
4695 /* Do not register always-false predicates.
4696 FIXME: this works around a limitation in fold() when dealing with
4697 enumerations. Given 'enum { N1, N2 } x;', fold will not
4698 fold 'if (x > N2)' to 'if (0)'. */
4699 if ((comp_code
== GT_EXPR
|| comp_code
== LT_EXPR
)
4700 && INTEGRAL_TYPE_P (TREE_TYPE (val
)))
4702 tree min
= TYPE_MIN_VALUE (TREE_TYPE (val
));
4703 tree max
= TYPE_MAX_VALUE (TREE_TYPE (val
));
4705 if (comp_code
== GT_EXPR
4707 || compare_values (val
, max
) == 0))
4710 if (comp_code
== LT_EXPR
4712 || compare_values (val
, min
) == 0))
4715 *code_p
= comp_code
;
4720 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4721 (otherwise return VAL). VAL and MASK must be zero-extended for
4722 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4723 (to transform signed values into unsigned) and at the end xor
4727 masked_increment (wide_int val
, wide_int mask
, wide_int sgnbit
,
4730 wide_int bit
= wi::one (prec
), res
;
4734 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
4737 if ((res
& bit
) == 0)
4740 res
= (val
+ bit
).and_not (res
);
4742 if (wi::gtu_p (res
, val
))
4743 return res
^ sgnbit
;
4745 return val
^ sgnbit
;
4748 /* Try to register an edge assertion for SSA name NAME on edge E for
4749 the condition COND contributing to the conditional jump pointed to by BSI.
4750 Invert the condition COND if INVERT is true.
4751 Return true if an assertion for NAME could be registered. */
4754 register_edge_assert_for_2 (tree name
, edge e
, gimple_stmt_iterator bsi
,
4755 enum tree_code cond_code
,
4756 tree cond_op0
, tree cond_op1
, bool invert
)
4759 enum tree_code comp_code
;
4760 bool retval
= false;
4762 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
4765 invert
, &comp_code
, &val
))
4768 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4769 reachable from E. */
4770 if (live_on_edge (e
, name
)
4771 && !has_single_use (name
))
4773 register_new_assert_for (name
, name
, comp_code
, val
, NULL
, e
, bsi
);
4777 /* In the case of NAME <= CST and NAME being defined as
4778 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4779 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4780 This catches range and anti-range tests. */
4781 if ((comp_code
== LE_EXPR
4782 || comp_code
== GT_EXPR
)
4783 && TREE_CODE (val
) == INTEGER_CST
4784 && TYPE_UNSIGNED (TREE_TYPE (val
)))
4786 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
4787 tree cst2
= NULL_TREE
, name2
= NULL_TREE
, name3
= NULL_TREE
;
4789 /* Extract CST2 from the (optional) addition. */
4790 if (is_gimple_assign (def_stmt
)
4791 && gimple_assign_rhs_code (def_stmt
) == PLUS_EXPR
)
4793 name2
= gimple_assign_rhs1 (def_stmt
);
4794 cst2
= gimple_assign_rhs2 (def_stmt
);
4795 if (TREE_CODE (name2
) == SSA_NAME
4796 && TREE_CODE (cst2
) == INTEGER_CST
)
4797 def_stmt
= SSA_NAME_DEF_STMT (name2
);
4800 /* Extract NAME2 from the (optional) sign-changing cast. */
4801 if (gimple_assign_cast_p (def_stmt
))
4803 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
))
4804 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
4805 && (TYPE_PRECISION (gimple_expr_type (def_stmt
))
4806 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))))
4807 name3
= gimple_assign_rhs1 (def_stmt
);
4810 /* If name3 is used later, create an ASSERT_EXPR for it. */
4811 if (name3
!= NULL_TREE
4812 && TREE_CODE (name3
) == SSA_NAME
4813 && (cst2
== NULL_TREE
4814 || TREE_CODE (cst2
) == INTEGER_CST
)
4815 && INTEGRAL_TYPE_P (TREE_TYPE (name3
))
4816 && live_on_edge (e
, name3
)
4817 && !has_single_use (name3
))
4821 /* Build an expression for the range test. */
4822 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), name3
);
4823 if (cst2
!= NULL_TREE
)
4824 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
4828 fprintf (dump_file
, "Adding assert for ");
4829 print_generic_expr (dump_file
, name3
, 0);
4830 fprintf (dump_file
, " from ");
4831 print_generic_expr (dump_file
, tmp
, 0);
4832 fprintf (dump_file
, "\n");
4835 register_new_assert_for (name3
, tmp
, comp_code
, val
, NULL
, e
, bsi
);
4840 /* If name2 is used later, create an ASSERT_EXPR for it. */
4841 if (name2
!= NULL_TREE
4842 && TREE_CODE (name2
) == SSA_NAME
4843 && TREE_CODE (cst2
) == INTEGER_CST
4844 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
4845 && live_on_edge (e
, name2
)
4846 && !has_single_use (name2
))
4850 /* Build an expression for the range test. */
4852 if (TREE_TYPE (name
) != TREE_TYPE (name2
))
4853 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), tmp
);
4854 if (cst2
!= NULL_TREE
)
4855 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
4859 fprintf (dump_file
, "Adding assert for ");
4860 print_generic_expr (dump_file
, name2
, 0);
4861 fprintf (dump_file
, " from ");
4862 print_generic_expr (dump_file
, tmp
, 0);
4863 fprintf (dump_file
, "\n");
4866 register_new_assert_for (name2
, tmp
, comp_code
, val
, NULL
, e
, bsi
);
4872 /* In the case of post-in/decrement tests like if (i++) ... and uses
4873 of the in/decremented value on the edge the extra name we want to
4874 assert for is not on the def chain of the name compared. Instead
4875 it is in the set of use stmts. */
4876 if ((comp_code
== NE_EXPR
4877 || comp_code
== EQ_EXPR
)
4878 && TREE_CODE (val
) == INTEGER_CST
)
4880 imm_use_iterator ui
;
4882 FOR_EACH_IMM_USE_STMT (use_stmt
, ui
, name
)
4884 /* Cut off to use-stmts that are in the predecessor. */
4885 if (gimple_bb (use_stmt
) != e
->src
)
4888 if (!is_gimple_assign (use_stmt
))
4891 enum tree_code code
= gimple_assign_rhs_code (use_stmt
);
4892 if (code
!= PLUS_EXPR
4893 && code
!= MINUS_EXPR
)
4896 tree cst
= gimple_assign_rhs2 (use_stmt
);
4897 if (TREE_CODE (cst
) != INTEGER_CST
)
4900 tree name2
= gimple_assign_lhs (use_stmt
);
4901 if (live_on_edge (e
, name2
))
4903 cst
= int_const_binop (code
, val
, cst
);
4904 register_new_assert_for (name2
, name2
, comp_code
, cst
,
4911 if (TREE_CODE_CLASS (comp_code
) == tcc_comparison
4912 && TREE_CODE (val
) == INTEGER_CST
)
4914 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
4915 tree name2
= NULL_TREE
, names
[2], cst2
= NULL_TREE
;
4916 tree val2
= NULL_TREE
;
4917 unsigned int prec
= TYPE_PRECISION (TREE_TYPE (val
));
4918 wide_int mask
= wi::zero (prec
);
4919 unsigned int nprec
= prec
;
4920 enum tree_code rhs_code
= ERROR_MARK
;
4922 if (is_gimple_assign (def_stmt
))
4923 rhs_code
= gimple_assign_rhs_code (def_stmt
);
4925 /* Add asserts for NAME cmp CST and NAME being defined
4926 as NAME = (int) NAME2. */
4927 if (!TYPE_UNSIGNED (TREE_TYPE (val
))
4928 && (comp_code
== LE_EXPR
|| comp_code
== LT_EXPR
4929 || comp_code
== GT_EXPR
|| comp_code
== GE_EXPR
)
4930 && gimple_assign_cast_p (def_stmt
))
4932 name2
= gimple_assign_rhs1 (def_stmt
);
4933 if (CONVERT_EXPR_CODE_P (rhs_code
)
4934 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
4935 && TYPE_UNSIGNED (TREE_TYPE (name2
))
4936 && prec
== TYPE_PRECISION (TREE_TYPE (name2
))
4937 && (comp_code
== LE_EXPR
|| comp_code
== GT_EXPR
4938 || !tree_int_cst_equal (val
,
4939 TYPE_MIN_VALUE (TREE_TYPE (val
))))
4940 && live_on_edge (e
, name2
)
4941 && !has_single_use (name2
))
4944 enum tree_code new_comp_code
= comp_code
;
4946 cst
= fold_convert (TREE_TYPE (name2
),
4947 TYPE_MIN_VALUE (TREE_TYPE (val
)));
4948 /* Build an expression for the range test. */
4949 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name2
), name2
, cst
);
4950 cst
= fold_build2 (PLUS_EXPR
, TREE_TYPE (name2
), cst
,
4951 fold_convert (TREE_TYPE (name2
), val
));
4952 if (comp_code
== LT_EXPR
|| comp_code
== GE_EXPR
)
4954 new_comp_code
= comp_code
== LT_EXPR
? LE_EXPR
: GT_EXPR
;
4955 cst
= fold_build2 (MINUS_EXPR
, TREE_TYPE (name2
), cst
,
4956 build_int_cst (TREE_TYPE (name2
), 1));
4961 fprintf (dump_file
, "Adding assert for ");
4962 print_generic_expr (dump_file
, name2
, 0);
4963 fprintf (dump_file
, " from ");
4964 print_generic_expr (dump_file
, tmp
, 0);
4965 fprintf (dump_file
, "\n");
4968 register_new_assert_for (name2
, tmp
, new_comp_code
, cst
, NULL
,
4975 /* Add asserts for NAME cmp CST and NAME being defined as
4976 NAME = NAME2 >> CST2.
4978 Extract CST2 from the right shift. */
4979 if (rhs_code
== RSHIFT_EXPR
)
4981 name2
= gimple_assign_rhs1 (def_stmt
);
4982 cst2
= gimple_assign_rhs2 (def_stmt
);
4983 if (TREE_CODE (name2
) == SSA_NAME
4984 && tree_fits_uhwi_p (cst2
)
4985 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
4986 && IN_RANGE (tree_to_uhwi (cst2
), 1, prec
- 1)
4987 && prec
== GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val
)))
4988 && live_on_edge (e
, name2
)
4989 && !has_single_use (name2
))
4991 mask
= wi::mask (tree_to_uhwi (cst2
), false, prec
);
4992 val2
= fold_binary (LSHIFT_EXPR
, TREE_TYPE (val
), val
, cst2
);
4995 if (val2
!= NULL_TREE
4996 && TREE_CODE (val2
) == INTEGER_CST
4997 && simple_cst_equal (fold_build2 (RSHIFT_EXPR
,
5001 enum tree_code new_comp_code
= comp_code
;
5005 if (comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
5007 if (!TYPE_UNSIGNED (TREE_TYPE (val
)))
5009 tree type
= build_nonstandard_integer_type (prec
, 1);
5010 tmp
= build1 (NOP_EXPR
, type
, name2
);
5011 val2
= fold_convert (type
, val2
);
5013 tmp
= fold_build2 (MINUS_EXPR
, TREE_TYPE (tmp
), tmp
, val2
);
5014 new_val
= wide_int_to_tree (TREE_TYPE (tmp
), mask
);
5015 new_comp_code
= comp_code
== EQ_EXPR
? LE_EXPR
: GT_EXPR
;
5017 else if (comp_code
== LT_EXPR
|| comp_code
== GE_EXPR
)
5020 = wi::min_value (prec
, TYPE_SIGN (TREE_TYPE (val
)));
5022 if (minval
== wide_int (new_val
))
5023 new_val
= NULL_TREE
;
5028 = wi::max_value (prec
, TYPE_SIGN (TREE_TYPE (val
)));
5029 mask
|= wide_int (val2
);
5031 new_val
= NULL_TREE
;
5033 new_val
= wide_int_to_tree (TREE_TYPE (val2
), mask
);
5040 fprintf (dump_file
, "Adding assert for ");
5041 print_generic_expr (dump_file
, name2
, 0);
5042 fprintf (dump_file
, " from ");
5043 print_generic_expr (dump_file
, tmp
, 0);
5044 fprintf (dump_file
, "\n");
5047 register_new_assert_for (name2
, tmp
, new_comp_code
, new_val
,
5053 /* Add asserts for NAME cmp CST and NAME being defined as
5054 NAME = NAME2 & CST2.
5056 Extract CST2 from the and.
5059 NAME = (unsigned) NAME2;
5060 casts where NAME's type is unsigned and has smaller precision
5061 than NAME2's type as if it was NAME = NAME2 & MASK. */
5062 names
[0] = NULL_TREE
;
5063 names
[1] = NULL_TREE
;
5065 if (rhs_code
== BIT_AND_EXPR
5066 || (CONVERT_EXPR_CODE_P (rhs_code
)
5067 && TREE_CODE (TREE_TYPE (val
)) == INTEGER_TYPE
5068 && TYPE_UNSIGNED (TREE_TYPE (val
))
5069 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
5073 name2
= gimple_assign_rhs1 (def_stmt
);
5074 if (rhs_code
== BIT_AND_EXPR
)
5075 cst2
= gimple_assign_rhs2 (def_stmt
);
5078 cst2
= TYPE_MAX_VALUE (TREE_TYPE (val
));
5079 nprec
= TYPE_PRECISION (TREE_TYPE (name2
));
5081 if (TREE_CODE (name2
) == SSA_NAME
5082 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
5083 && TREE_CODE (cst2
) == INTEGER_CST
5084 && !integer_zerop (cst2
)
5086 || TYPE_UNSIGNED (TREE_TYPE (val
))))
5088 gimple def_stmt2
= SSA_NAME_DEF_STMT (name2
);
5089 if (gimple_assign_cast_p (def_stmt2
))
5091 names
[1] = gimple_assign_rhs1 (def_stmt2
);
5092 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2
))
5093 || !INTEGRAL_TYPE_P (TREE_TYPE (names
[1]))
5094 || (TYPE_PRECISION (TREE_TYPE (name2
))
5095 != TYPE_PRECISION (TREE_TYPE (names
[1])))
5096 || !live_on_edge (e
, names
[1])
5097 || has_single_use (names
[1]))
5098 names
[1] = NULL_TREE
;
5100 if (live_on_edge (e
, name2
)
5101 && !has_single_use (name2
))
5105 if (names
[0] || names
[1])
5107 wide_int minv
, maxv
, valv
, cst2v
;
5108 wide_int tem
, sgnbit
;
5109 bool valid_p
= false, valn
= false, cst2n
= false;
5110 enum tree_code ccode
= comp_code
;
5112 valv
= wide_int::from (val
, nprec
, UNSIGNED
);
5113 cst2v
= wide_int::from (cst2
, nprec
, UNSIGNED
);
5114 if (TYPE_SIGN (TREE_TYPE (val
)) == SIGNED
)
5116 valn
= wi::neg_p (wi::sext (valv
, nprec
));
5117 cst2n
= wi::neg_p (wi::sext (cst2v
, nprec
));
5119 /* If CST2 doesn't have most significant bit set,
5120 but VAL is negative, we have comparison like
5121 if ((x & 0x123) > -4) (always true). Just give up. */
5125 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
5127 sgnbit
= wi::zero (nprec
);
5128 minv
= valv
& cst2v
;
5132 /* Minimum unsigned value for equality is VAL & CST2
5133 (should be equal to VAL, otherwise we probably should
5134 have folded the comparison into false) and
5135 maximum unsigned value is VAL | ~CST2. */
5136 maxv
= valv
| ~cst2v
;
5137 maxv
= wi::zext (maxv
, nprec
);
5142 tem
= valv
| ~cst2v
;
5143 tem
= wi::zext (tem
, nprec
);
5144 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5148 sgnbit
= wi::zero (nprec
);
5151 /* If (VAL | ~CST2) is all ones, handle it as
5152 (X & CST2) < VAL. */
5157 sgnbit
= wi::zero (nprec
);
5160 if (!cst2n
&& wi::neg_p (wi::sext (cst2v
, nprec
)))
5161 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
5170 if (tem
== wi::mask (nprec
- 1, false, nprec
))
5176 sgnbit
= wi::zero (nprec
);
5181 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5182 is VAL and maximum unsigned value is ~0. For signed
5183 comparison, if CST2 doesn't have most significant bit
5184 set, handle it similarly. If CST2 has MSB set,
5185 the minimum is the same, and maximum is ~0U/2. */
5188 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5190 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
5194 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
5200 /* Find out smallest MINV where MINV > VAL
5201 && (MINV & CST2) == MINV, if any. If VAL is signed and
5202 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5203 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
5206 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
5211 /* Minimum unsigned value for <= is 0 and maximum
5212 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5213 Otherwise, find smallest VAL2 where VAL2 > VAL
5214 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5216 For signed comparison, if CST2 doesn't have most
5217 significant bit set, handle it similarly. If CST2 has
5218 MSB set, the maximum is the same and minimum is INT_MIN. */
5223 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
5229 maxv
= wi::zext (maxv
, nprec
);
5236 /* Minimum unsigned value for < is 0 and maximum
5237 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5238 Otherwise, find smallest VAL2 where VAL2 > VAL
5239 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5241 For signed comparison, if CST2 doesn't have most
5242 significant bit set, handle it similarly. If CST2 has
5243 MSB set, the maximum is the same and minimum is INT_MIN. */
5252 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
5258 maxv
= wi::zext (maxv
, nprec
);
5267 && wi::zext (maxv
- minv
, nprec
) != wi::minus_one (nprec
))
5269 tree tmp
, new_val
, type
;
5272 for (i
= 0; i
< 2; i
++)
5275 wide_int maxv2
= maxv
;
5277 type
= TREE_TYPE (names
[i
]);
5278 if (!TYPE_UNSIGNED (type
))
5280 type
= build_nonstandard_integer_type (nprec
, 1);
5281 tmp
= build1 (NOP_EXPR
, type
, names
[i
]);
5285 tmp
= build2 (PLUS_EXPR
, type
, tmp
,
5286 wide_int_to_tree (type
, -minv
));
5287 maxv2
= maxv
- minv
;
5289 new_val
= wide_int_to_tree (type
, maxv2
);
5293 fprintf (dump_file
, "Adding assert for ");
5294 print_generic_expr (dump_file
, names
[i
], 0);
5295 fprintf (dump_file
, " from ");
5296 print_generic_expr (dump_file
, tmp
, 0);
5297 fprintf (dump_file
, "\n");
5300 register_new_assert_for (names
[i
], tmp
, LE_EXPR
,
5301 new_val
, NULL
, e
, bsi
);
5311 /* OP is an operand of a truth value expression which is known to have
5312 a particular value. Register any asserts for OP and for any
5313 operands in OP's defining statement.
5315 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5316 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5319 register_edge_assert_for_1 (tree op
, enum tree_code code
,
5320 edge e
, gimple_stmt_iterator bsi
)
5322 bool retval
= false;
5325 enum tree_code rhs_code
;
5327 /* We only care about SSA_NAMEs. */
5328 if (TREE_CODE (op
) != SSA_NAME
)
5331 /* We know that OP will have a zero or nonzero value. If OP is used
5332 more than once go ahead and register an assert for OP.
5334 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5335 it will always be set for OP (because OP is used in a COND_EXPR in
5337 if (!has_single_use (op
))
5339 val
= build_int_cst (TREE_TYPE (op
), 0);
5340 register_new_assert_for (op
, op
, code
, val
, NULL
, e
, bsi
);
5344 /* Now look at how OP is set. If it's set from a comparison,
5345 a truth operation or some bit operations, then we may be able
5346 to register information about the operands of that assignment. */
5347 op_def
= SSA_NAME_DEF_STMT (op
);
5348 if (gimple_code (op_def
) != GIMPLE_ASSIGN
)
5351 rhs_code
= gimple_assign_rhs_code (op_def
);
5353 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
5355 bool invert
= (code
== EQ_EXPR
? true : false);
5356 tree op0
= gimple_assign_rhs1 (op_def
);
5357 tree op1
= gimple_assign_rhs2 (op_def
);
5359 if (TREE_CODE (op0
) == SSA_NAME
)
5360 retval
|= register_edge_assert_for_2 (op0
, e
, bsi
, rhs_code
, op0
, op1
,
5362 if (TREE_CODE (op1
) == SSA_NAME
)
5363 retval
|= register_edge_assert_for_2 (op1
, e
, bsi
, rhs_code
, op0
, op1
,
5366 else if ((code
== NE_EXPR
5367 && gimple_assign_rhs_code (op_def
) == BIT_AND_EXPR
)
5369 && gimple_assign_rhs_code (op_def
) == BIT_IOR_EXPR
))
5371 /* Recurse on each operand. */
5372 tree op0
= gimple_assign_rhs1 (op_def
);
5373 tree op1
= gimple_assign_rhs2 (op_def
);
5374 if (TREE_CODE (op0
) == SSA_NAME
5375 && has_single_use (op0
))
5376 retval
|= register_edge_assert_for_1 (op0
, code
, e
, bsi
);
5377 if (TREE_CODE (op1
) == SSA_NAME
5378 && has_single_use (op1
))
5379 retval
|= register_edge_assert_for_1 (op1
, code
, e
, bsi
);
5381 else if (gimple_assign_rhs_code (op_def
) == BIT_NOT_EXPR
5382 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def
))) == 1)
5384 /* Recurse, flipping CODE. */
5385 code
= invert_tree_comparison (code
, false);
5386 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
5389 else if (gimple_assign_rhs_code (op_def
) == SSA_NAME
)
5391 /* Recurse through the copy. */
5392 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
5395 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def
)))
5397 /* Recurse through the type conversion. */
5398 retval
|= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
),
5405 /* Try to register an edge assertion for SSA name NAME on edge E for
5406 the condition COND contributing to the conditional jump pointed to by SI.
5407 Return true if an assertion for NAME could be registered. */
5410 register_edge_assert_for (tree name
, edge e
, gimple_stmt_iterator si
,
5411 enum tree_code cond_code
, tree cond_op0
,
5415 enum tree_code comp_code
;
5416 bool retval
= false;
5417 bool is_else_edge
= (e
->flags
& EDGE_FALSE_VALUE
) != 0;
5419 /* Do not attempt to infer anything in names that flow through
5421 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
5424 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
5430 /* Register ASSERT_EXPRs for name. */
5431 retval
|= register_edge_assert_for_2 (name
, e
, si
, cond_code
, cond_op0
,
5432 cond_op1
, is_else_edge
);
5435 /* If COND is effectively an equality test of an SSA_NAME against
5436 the value zero or one, then we may be able to assert values
5437 for SSA_NAMEs which flow into COND. */
5439 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5440 statement of NAME we can assert both operands of the BIT_AND_EXPR
5441 have nonzero value. */
5442 if (((comp_code
== EQ_EXPR
&& integer_onep (val
))
5443 || (comp_code
== NE_EXPR
&& integer_zerop (val
))))
5445 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
5447 if (is_gimple_assign (def_stmt
)
5448 && gimple_assign_rhs_code (def_stmt
) == BIT_AND_EXPR
)
5450 tree op0
= gimple_assign_rhs1 (def_stmt
);
5451 tree op1
= gimple_assign_rhs2 (def_stmt
);
5452 retval
|= register_edge_assert_for_1 (op0
, NE_EXPR
, e
, si
);
5453 retval
|= register_edge_assert_for_1 (op1
, NE_EXPR
, e
, si
);
5457 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5458 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5460 if (((comp_code
== EQ_EXPR
&& integer_zerop (val
))
5461 || (comp_code
== NE_EXPR
&& integer_onep (val
))))
5463 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
5465 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5466 necessarily zero value, or if type-precision is one. */
5467 if (is_gimple_assign (def_stmt
)
5468 && (gimple_assign_rhs_code (def_stmt
) == BIT_IOR_EXPR
5469 && (TYPE_PRECISION (TREE_TYPE (name
)) == 1
5470 || comp_code
== EQ_EXPR
)))
5472 tree op0
= gimple_assign_rhs1 (def_stmt
);
5473 tree op1
= gimple_assign_rhs2 (def_stmt
);
5474 retval
|= register_edge_assert_for_1 (op0
, EQ_EXPR
, e
, si
);
5475 retval
|= register_edge_assert_for_1 (op1
, EQ_EXPR
, e
, si
);
5483 /* Determine whether the outgoing edges of BB should receive an
5484 ASSERT_EXPR for each of the operands of BB's LAST statement.
5485 The last statement of BB must be a COND_EXPR.
5487 If any of the sub-graphs rooted at BB have an interesting use of
5488 the predicate operands, an assert location node is added to the
5489 list of assertions for the corresponding operands. */
5492 find_conditional_asserts (basic_block bb
, gimple last
)
5495 gimple_stmt_iterator bsi
;
5501 need_assert
= false;
5502 bsi
= gsi_for_stmt (last
);
5504 /* Look for uses of the operands in each of the sub-graphs
5505 rooted at BB. We need to check each of the outgoing edges
5506 separately, so that we know what kind of ASSERT_EXPR to
5508 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5513 /* Register the necessary assertions for each operand in the
5514 conditional predicate. */
5515 FOR_EACH_SSA_TREE_OPERAND (op
, last
, iter
, SSA_OP_USE
)
5517 need_assert
|= register_edge_assert_for (op
, e
, bsi
,
5518 gimple_cond_code (last
),
5519 gimple_cond_lhs (last
),
5520 gimple_cond_rhs (last
));
5533 /* Compare two case labels sorting first by the destination bb index
5534 and then by the case value. */
5537 compare_case_labels (const void *p1
, const void *p2
)
5539 const struct case_info
*ci1
= (const struct case_info
*) p1
;
5540 const struct case_info
*ci2
= (const struct case_info
*) p2
;
5541 int idx1
= ci1
->bb
->index
;
5542 int idx2
= ci2
->bb
->index
;
5546 else if (idx1
== idx2
)
5548 /* Make sure the default label is first in a group. */
5549 if (!CASE_LOW (ci1
->expr
))
5551 else if (!CASE_LOW (ci2
->expr
))
5554 return tree_int_cst_compare (CASE_LOW (ci1
->expr
),
5555 CASE_LOW (ci2
->expr
));
5561 /* Determine whether the outgoing edges of BB should receive an
5562 ASSERT_EXPR for each of the operands of BB's LAST statement.
5563 The last statement of BB must be a SWITCH_EXPR.
5565 If any of the sub-graphs rooted at BB have an interesting use of
5566 the predicate operands, an assert location node is added to the
5567 list of assertions for the corresponding operands. */
5570 find_switch_asserts (basic_block bb
, gimple last
)
5573 gimple_stmt_iterator bsi
;
5576 struct case_info
*ci
;
5577 size_t n
= gimple_switch_num_labels (last
);
5578 #if GCC_VERSION >= 4000
5581 /* Work around GCC 3.4 bug (PR 37086). */
5582 volatile unsigned int idx
;
5585 need_assert
= false;
5586 bsi
= gsi_for_stmt (last
);
5587 op
= gimple_switch_index (last
);
5588 if (TREE_CODE (op
) != SSA_NAME
)
5591 /* Build a vector of case labels sorted by destination label. */
5592 ci
= XNEWVEC (struct case_info
, n
);
5593 for (idx
= 0; idx
< n
; ++idx
)
5595 ci
[idx
].expr
= gimple_switch_label (last
, idx
);
5596 ci
[idx
].bb
= label_to_block (CASE_LABEL (ci
[idx
].expr
));
5598 qsort (ci
, n
, sizeof (struct case_info
), compare_case_labels
);
5600 for (idx
= 0; idx
< n
; ++idx
)
5603 tree cl
= ci
[idx
].expr
;
5604 basic_block cbb
= ci
[idx
].bb
;
5606 min
= CASE_LOW (cl
);
5607 max
= CASE_HIGH (cl
);
5609 /* If there are multiple case labels with the same destination
5610 we need to combine them to a single value range for the edge. */
5611 if (idx
+ 1 < n
&& cbb
== ci
[idx
+ 1].bb
)
5613 /* Skip labels until the last of the group. */
5616 } while (idx
< n
&& cbb
== ci
[idx
].bb
);
5619 /* Pick up the maximum of the case label range. */
5620 if (CASE_HIGH (ci
[idx
].expr
))
5621 max
= CASE_HIGH (ci
[idx
].expr
);
5623 max
= CASE_LOW (ci
[idx
].expr
);
5626 /* Nothing to do if the range includes the default label until we
5627 can register anti-ranges. */
5628 if (min
== NULL_TREE
)
5631 /* Find the edge to register the assert expr on. */
5632 e
= find_edge (bb
, cbb
);
5634 /* Register the necessary assertions for the operand in the
5636 need_assert
|= register_edge_assert_for (op
, e
, bsi
,
5637 max
? GE_EXPR
: EQ_EXPR
,
5639 fold_convert (TREE_TYPE (op
),
5643 need_assert
|= register_edge_assert_for (op
, e
, bsi
, LE_EXPR
,
5645 fold_convert (TREE_TYPE (op
),
5655 /* Traverse all the statements in block BB looking for statements that
5656 may generate useful assertions for the SSA names in their operand.
5657 If a statement produces a useful assertion A for name N_i, then the
5658 list of assertions already generated for N_i is scanned to
5659 determine if A is actually needed.
5661 If N_i already had the assertion A at a location dominating the
5662 current location, then nothing needs to be done. Otherwise, the
5663 new location for A is recorded instead.
5665 1- For every statement S in BB, all the variables used by S are
5666 added to bitmap FOUND_IN_SUBGRAPH.
5668 2- If statement S uses an operand N in a way that exposes a known
5669 value range for N, then if N was not already generated by an
5670 ASSERT_EXPR, create a new assert location for N. For instance,
5671 if N is a pointer and the statement dereferences it, we can
5672 assume that N is not NULL.
5674 3- COND_EXPRs are a special case of #2. We can derive range
5675 information from the predicate but need to insert different
5676 ASSERT_EXPRs for each of the sub-graphs rooted at the
5677 conditional block. If the last statement of BB is a conditional
5678 expression of the form 'X op Y', then
5680 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5682 b) If the conditional is the only entry point to the sub-graph
5683 corresponding to the THEN_CLAUSE, recurse into it. On
5684 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5685 an ASSERT_EXPR is added for the corresponding variable.
5687 c) Repeat step (b) on the ELSE_CLAUSE.
5689 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5698 In this case, an assertion on the THEN clause is useful to
5699 determine that 'a' is always 9 on that edge. However, an assertion
5700 on the ELSE clause would be unnecessary.
5702 4- If BB does not end in a conditional expression, then we recurse
5703 into BB's dominator children.
5705 At the end of the recursive traversal, every SSA name will have a
5706 list of locations where ASSERT_EXPRs should be added. When a new
5707 location for name N is found, it is registered by calling
5708 register_new_assert_for. That function keeps track of all the
5709 registered assertions to prevent adding unnecessary assertions.
5710 For instance, if a pointer P_4 is dereferenced more than once in a
5711 dominator tree, only the location dominating all the dereference of
5712 P_4 will receive an ASSERT_EXPR.
5714 If this function returns true, then it means that there are names
5715 for which we need to generate ASSERT_EXPRs. Those assertions are
5716 inserted by process_assert_insertions. */
5719 find_assert_locations_1 (basic_block bb
, sbitmap live
)
5721 gimple_stmt_iterator si
;
5725 need_assert
= false;
5726 last
= last_stmt (bb
);
5728 /* If BB's last statement is a conditional statement involving integer
5729 operands, determine if we need to add ASSERT_EXPRs. */
5731 && gimple_code (last
) == GIMPLE_COND
5732 && !fp_predicate (last
)
5733 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
5734 need_assert
|= find_conditional_asserts (bb
, last
);
5736 /* If BB's last statement is a switch statement involving integer
5737 operands, determine if we need to add ASSERT_EXPRs. */
5739 && gimple_code (last
) == GIMPLE_SWITCH
5740 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
5741 need_assert
|= find_switch_asserts (bb
, last
);
5743 /* Traverse all the statements in BB marking used names and looking
5744 for statements that may infer assertions for their used operands. */
5745 for (si
= gsi_last_bb (bb
); !gsi_end_p (si
); gsi_prev (&si
))
5751 stmt
= gsi_stmt (si
);
5753 if (is_gimple_debug (stmt
))
5756 /* See if we can derive an assertion for any of STMT's operands. */
5757 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5760 enum tree_code comp_code
;
5762 /* If op is not live beyond this stmt, do not bother to insert
5764 if (!bitmap_bit_p (live
, SSA_NAME_VERSION (op
)))
5767 /* If OP is used in such a way that we can infer a value
5768 range for it, and we don't find a previous assertion for
5769 it, create a new assertion location node for OP. */
5770 if (infer_value_range (stmt
, op
, &comp_code
, &value
))
5772 /* If we are able to infer a nonzero value range for OP,
5773 then walk backwards through the use-def chain to see if OP
5774 was set via a typecast.
5776 If so, then we can also infer a nonzero value range
5777 for the operand of the NOP_EXPR. */
5778 if (comp_code
== NE_EXPR
&& integer_zerop (value
))
5781 gimple def_stmt
= SSA_NAME_DEF_STMT (t
);
5783 while (is_gimple_assign (def_stmt
)
5784 && gimple_assign_rhs_code (def_stmt
) == NOP_EXPR
5786 (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
5788 (TREE_TYPE (gimple_assign_rhs1 (def_stmt
))))
5790 t
= gimple_assign_rhs1 (def_stmt
);
5791 def_stmt
= SSA_NAME_DEF_STMT (t
);
5793 /* Note we want to register the assert for the
5794 operand of the NOP_EXPR after SI, not after the
5796 if (! has_single_use (t
))
5798 register_new_assert_for (t
, t
, comp_code
, value
,
5805 register_new_assert_for (op
, op
, comp_code
, value
, bb
, NULL
, si
);
5811 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5812 bitmap_set_bit (live
, SSA_NAME_VERSION (op
));
5813 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
5814 bitmap_clear_bit (live
, SSA_NAME_VERSION (op
));
5817 /* Traverse all PHI nodes in BB, updating live. */
5818 for (si
= gsi_start_phis (bb
); !gsi_end_p(si
); gsi_next (&si
))
5820 use_operand_p arg_p
;
5822 gimple phi
= gsi_stmt (si
);
5823 tree res
= gimple_phi_result (phi
);
5825 if (virtual_operand_p (res
))
5828 FOR_EACH_PHI_ARG (arg_p
, phi
, i
, SSA_OP_USE
)
5830 tree arg
= USE_FROM_PTR (arg_p
);
5831 if (TREE_CODE (arg
) == SSA_NAME
)
5832 bitmap_set_bit (live
, SSA_NAME_VERSION (arg
));
5835 bitmap_clear_bit (live
, SSA_NAME_VERSION (res
));
5841 /* Do an RPO walk over the function computing SSA name liveness
5842 on-the-fly and deciding on assert expressions to insert.
5843 Returns true if there are assert expressions to be inserted. */
5846 find_assert_locations (void)
5848 int *rpo
= XNEWVEC (int, last_basic_block
);
5849 int *bb_rpo
= XNEWVEC (int, last_basic_block
);
5850 int *last_rpo
= XCNEWVEC (int, last_basic_block
);
5854 live
= XCNEWVEC (sbitmap
, last_basic_block
);
5855 rpo_cnt
= pre_and_rev_post_order_compute (NULL
, rpo
, false);
5856 for (i
= 0; i
< rpo_cnt
; ++i
)
5859 need_asserts
= false;
5860 for (i
= rpo_cnt
- 1; i
>= 0; --i
)
5862 basic_block bb
= BASIC_BLOCK (rpo
[i
]);
5868 live
[rpo
[i
]] = sbitmap_alloc (num_ssa_names
);
5869 bitmap_clear (live
[rpo
[i
]]);
5872 /* Process BB and update the live information with uses in
5874 need_asserts
|= find_assert_locations_1 (bb
, live
[rpo
[i
]]);
5876 /* Merge liveness into the predecessor blocks and free it. */
5877 if (!bitmap_empty_p (live
[rpo
[i
]]))
5880 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
5882 int pred
= e
->src
->index
;
5883 if ((e
->flags
& EDGE_DFS_BACK
) || pred
== ENTRY_BLOCK
)
5888 live
[pred
] = sbitmap_alloc (num_ssa_names
);
5889 bitmap_clear (live
[pred
]);
5891 bitmap_ior (live
[pred
], live
[pred
], live
[rpo
[i
]]);
5893 if (bb_rpo
[pred
] < pred_rpo
)
5894 pred_rpo
= bb_rpo
[pred
];
5897 /* Record the RPO number of the last visited block that needs
5898 live information from this block. */
5899 last_rpo
[rpo
[i
]] = pred_rpo
;
5903 sbitmap_free (live
[rpo
[i
]]);
5904 live
[rpo
[i
]] = NULL
;
5907 /* We can free all successors live bitmaps if all their
5908 predecessors have been visited already. */
5909 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5910 if (last_rpo
[e
->dest
->index
] == i
5911 && live
[e
->dest
->index
])
5913 sbitmap_free (live
[e
->dest
->index
]);
5914 live
[e
->dest
->index
] = NULL
;
5919 XDELETEVEC (bb_rpo
);
5920 XDELETEVEC (last_rpo
);
5921 for (i
= 0; i
< last_basic_block
; ++i
)
5923 sbitmap_free (live
[i
]);
5926 return need_asserts
;
5929 /* Create an ASSERT_EXPR for NAME and insert it in the location
5930 indicated by LOC. Return true if we made any edge insertions. */
5933 process_assert_insertions_for (tree name
, assert_locus_t loc
)
5935 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5942 /* If we have X <=> X do not insert an assert expr for that. */
5943 if (loc
->expr
== loc
->val
)
5946 cond
= build2 (loc
->comp_code
, boolean_type_node
, loc
->expr
, loc
->val
);
5947 assert_stmt
= build_assert_expr_for (cond
, name
);
5950 /* We have been asked to insert the assertion on an edge. This
5951 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5952 gcc_checking_assert (gimple_code (gsi_stmt (loc
->si
)) == GIMPLE_COND
5953 || (gimple_code (gsi_stmt (loc
->si
))
5956 gsi_insert_on_edge (loc
->e
, assert_stmt
);
5960 /* Otherwise, we can insert right after LOC->SI iff the
5961 statement must not be the last statement in the block. */
5962 stmt
= gsi_stmt (loc
->si
);
5963 if (!stmt_ends_bb_p (stmt
))
5965 gsi_insert_after (&loc
->si
, assert_stmt
, GSI_SAME_STMT
);
5969 /* If STMT must be the last statement in BB, we can only insert new
5970 assertions on the non-abnormal edge out of BB. Note that since
5971 STMT is not control flow, there may only be one non-abnormal edge
5973 FOR_EACH_EDGE (e
, ei
, loc
->bb
->succs
)
5974 if (!(e
->flags
& EDGE_ABNORMAL
))
5976 gsi_insert_on_edge (e
, assert_stmt
);
5984 /* Process all the insertions registered for every name N_i registered
5985 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5986 found in ASSERTS_FOR[i]. */
5989 process_assert_insertions (void)
5993 bool update_edges_p
= false;
5994 int num_asserts
= 0;
5996 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5997 dump_all_asserts (dump_file
);
5999 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
6001 assert_locus_t loc
= asserts_for
[i
];
6006 assert_locus_t next
= loc
->next
;
6007 update_edges_p
|= process_assert_insertions_for (ssa_name (i
), loc
);
6015 gsi_commit_edge_inserts ();
6017 statistics_counter_event (cfun
, "Number of ASSERT_EXPR expressions inserted",
6022 /* Traverse the flowgraph looking for conditional jumps to insert range
6023 expressions. These range expressions are meant to provide information
6024 to optimizations that need to reason in terms of value ranges. They
6025 will not be expanded into RTL. For instance, given:
6034 this pass will transform the code into:
6040 x = ASSERT_EXPR <x, x < y>
6045 y = ASSERT_EXPR <y, x <= y>
6049 The idea is that once copy and constant propagation have run, other
6050 optimizations will be able to determine what ranges of values can 'x'
6051 take in different paths of the code, simply by checking the reaching
6052 definition of 'x'. */
6055 insert_range_assertions (void)
6057 need_assert_for
= BITMAP_ALLOC (NULL
);
6058 asserts_for
= XCNEWVEC (assert_locus_t
, num_ssa_names
);
6060 calculate_dominance_info (CDI_DOMINATORS
);
6062 if (find_assert_locations ())
6064 process_assert_insertions ();
6065 update_ssa (TODO_update_ssa_no_phi
);
6068 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6070 fprintf (dump_file
, "\nSSA form after inserting ASSERT_EXPRs\n");
6071 dump_function_to_file (current_function_decl
, dump_file
, dump_flags
);
6075 BITMAP_FREE (need_assert_for
);
6078 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6079 and "struct" hacks. If VRP can determine that the
6080 array subscript is a constant, check if it is outside valid
6081 range. If the array subscript is a RANGE, warn if it is
6082 non-overlapping with valid range.
6083 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6086 check_array_ref (location_t location
, tree ref
, bool ignore_off_by_one
)
6088 value_range_t
* vr
= NULL
;
6089 tree low_sub
, up_sub
;
6090 tree low_bound
, up_bound
, up_bound_p1
;
6093 if (TREE_NO_WARNING (ref
))
6096 low_sub
= up_sub
= TREE_OPERAND (ref
, 1);
6097 up_bound
= array_ref_up_bound (ref
);
6099 /* Can not check flexible arrays. */
6101 || TREE_CODE (up_bound
) != INTEGER_CST
)
6104 /* Accesses to trailing arrays via pointers may access storage
6105 beyond the types array bounds. */
6106 base
= get_base_address (ref
);
6107 if (base
&& TREE_CODE (base
) == MEM_REF
)
6109 tree cref
, next
= NULL_TREE
;
6111 if (TREE_CODE (TREE_OPERAND (ref
, 0)) != COMPONENT_REF
)
6114 cref
= TREE_OPERAND (ref
, 0);
6115 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref
, 0))) == RECORD_TYPE
)
6116 for (next
= DECL_CHAIN (TREE_OPERAND (cref
, 1));
6117 next
&& TREE_CODE (next
) != FIELD_DECL
;
6118 next
= DECL_CHAIN (next
))
6121 /* If this is the last field in a struct type or a field in a
6122 union type do not warn. */
6127 low_bound
= array_ref_low_bound (ref
);
6128 up_bound_p1
= int_const_binop (PLUS_EXPR
, up_bound
,
6129 build_int_cst (TREE_TYPE (up_bound
), 1));
6131 if (TREE_CODE (low_sub
) == SSA_NAME
)
6133 vr
= get_value_range (low_sub
);
6134 if (vr
->type
== VR_RANGE
|| vr
->type
== VR_ANTI_RANGE
)
6136 low_sub
= vr
->type
== VR_RANGE
? vr
->max
: vr
->min
;
6137 up_sub
= vr
->type
== VR_RANGE
? vr
->min
: vr
->max
;
6141 if (vr
&& vr
->type
== VR_ANTI_RANGE
)
6143 if (TREE_CODE (up_sub
) == INTEGER_CST
6144 && tree_int_cst_lt (up_bound
, up_sub
)
6145 && TREE_CODE (low_sub
) == INTEGER_CST
6146 && tree_int_cst_lt (low_sub
, low_bound
))
6148 warning_at (location
, OPT_Warray_bounds
,
6149 "array subscript is outside array bounds");
6150 TREE_NO_WARNING (ref
) = 1;
6153 else if (TREE_CODE (up_sub
) == INTEGER_CST
6154 && (ignore_off_by_one
6155 ? (tree_int_cst_lt (up_bound
, up_sub
)
6156 && !tree_int_cst_equal (up_bound_p1
, up_sub
))
6157 : (tree_int_cst_lt (up_bound
, up_sub
)
6158 || tree_int_cst_equal (up_bound_p1
, up_sub
))))
6160 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6162 fprintf (dump_file
, "Array bound warning for ");
6163 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
6164 fprintf (dump_file
, "\n");
6166 warning_at (location
, OPT_Warray_bounds
,
6167 "array subscript is above array bounds");
6168 TREE_NO_WARNING (ref
) = 1;
6170 else if (TREE_CODE (low_sub
) == INTEGER_CST
6171 && tree_int_cst_lt (low_sub
, low_bound
))
6173 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6175 fprintf (dump_file
, "Array bound warning for ");
6176 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
6177 fprintf (dump_file
, "\n");
6179 warning_at (location
, OPT_Warray_bounds
,
6180 "array subscript is below array bounds");
6181 TREE_NO_WARNING (ref
) = 1;
6185 /* Searches if the expr T, located at LOCATION computes
6186 address of an ARRAY_REF, and call check_array_ref on it. */
6189 search_for_addr_array (tree t
, location_t location
)
6191 while (TREE_CODE (t
) == SSA_NAME
)
6193 gimple g
= SSA_NAME_DEF_STMT (t
);
6195 if (gimple_code (g
) != GIMPLE_ASSIGN
)
6198 if (get_gimple_rhs_class (gimple_assign_rhs_code (g
))
6199 != GIMPLE_SINGLE_RHS
)
6202 t
= gimple_assign_rhs1 (g
);
6206 /* We are only interested in addresses of ARRAY_REF's. */
6207 if (TREE_CODE (t
) != ADDR_EXPR
)
6210 /* Check each ARRAY_REFs in the reference chain. */
6213 if (TREE_CODE (t
) == ARRAY_REF
)
6214 check_array_ref (location
, t
, true /*ignore_off_by_one*/);
6216 t
= TREE_OPERAND (t
, 0);
6218 while (handled_component_p (t
));
6220 if (TREE_CODE (t
) == MEM_REF
6221 && TREE_CODE (TREE_OPERAND (t
, 0)) == ADDR_EXPR
6222 && !TREE_NO_WARNING (t
))
6224 tree tem
= TREE_OPERAND (TREE_OPERAND (t
, 0), 0);
6225 tree low_bound
, up_bound
, el_sz
;
6227 if (TREE_CODE (TREE_TYPE (tem
)) != ARRAY_TYPE
6228 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem
))) == ARRAY_TYPE
6229 || !TYPE_DOMAIN (TREE_TYPE (tem
)))
6232 low_bound
= TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
6233 up_bound
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
6234 el_sz
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem
)));
6236 || TREE_CODE (low_bound
) != INTEGER_CST
6238 || TREE_CODE (up_bound
) != INTEGER_CST
6240 || TREE_CODE (el_sz
) != INTEGER_CST
)
6243 idx
= mem_ref_offset (t
);
6244 idx
= wi::sdiv_trunc (idx
, el_sz
);
6245 if (wi::lts_p (idx
, 0))
6247 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6249 fprintf (dump_file
, "Array bound warning for ");
6250 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, t
);
6251 fprintf (dump_file
, "\n");
6253 warning_at (location
, OPT_Warray_bounds
,
6254 "array subscript is below array bounds");
6255 TREE_NO_WARNING (t
) = 1;
6257 else if (wi::gts_p (idx
, addr_wide_int (up_bound
) - low_bound
+ 1))
6259 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6261 fprintf (dump_file
, "Array bound warning for ");
6262 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, t
);
6263 fprintf (dump_file
, "\n");
6265 warning_at (location
, OPT_Warray_bounds
,
6266 "array subscript is above array bounds");
6267 TREE_NO_WARNING (t
) = 1;
6272 /* walk_tree() callback that checks if *TP is
6273 an ARRAY_REF inside an ADDR_EXPR (in which an array
6274 subscript one outside the valid range is allowed). Call
6275 check_array_ref for each ARRAY_REF found. The location is
6279 check_array_bounds (tree
*tp
, int *walk_subtree
, void *data
)
6282 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
6283 location_t location
;
6285 if (EXPR_HAS_LOCATION (t
))
6286 location
= EXPR_LOCATION (t
);
6289 location_t
*locp
= (location_t
*) wi
->info
;
6293 *walk_subtree
= TRUE
;
6295 if (TREE_CODE (t
) == ARRAY_REF
)
6296 check_array_ref (location
, t
, false /*ignore_off_by_one*/);
6298 if (TREE_CODE (t
) == MEM_REF
6299 || (TREE_CODE (t
) == RETURN_EXPR
&& TREE_OPERAND (t
, 0)))
6300 search_for_addr_array (TREE_OPERAND (t
, 0), location
);
6302 if (TREE_CODE (t
) == ADDR_EXPR
)
6303 *walk_subtree
= FALSE
;
6308 /* Walk over all statements of all reachable BBs and call check_array_bounds
6312 check_all_array_refs (void)
6315 gimple_stmt_iterator si
;
6321 bool executable
= false;
6323 /* Skip blocks that were found to be unreachable. */
6324 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
6325 executable
|= !!(e
->flags
& EDGE_EXECUTABLE
);
6329 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
6331 gimple stmt
= gsi_stmt (si
);
6332 struct walk_stmt_info wi
;
6333 if (!gimple_has_location (stmt
))
6336 if (is_gimple_call (stmt
))
6339 size_t n
= gimple_call_num_args (stmt
);
6340 for (i
= 0; i
< n
; i
++)
6342 tree arg
= gimple_call_arg (stmt
, i
);
6343 search_for_addr_array (arg
, gimple_location (stmt
));
6348 memset (&wi
, 0, sizeof (wi
));
6349 wi
.info
= CONST_CAST (void *, (const void *)
6350 gimple_location_ptr (stmt
));
6352 walk_gimple_op (gsi_stmt (si
),
6360 /* Convert range assertion expressions into the implied copies and
6361 copy propagate away the copies. Doing the trivial copy propagation
6362 here avoids the need to run the full copy propagation pass after
6365 FIXME, this will eventually lead to copy propagation removing the
6366 names that had useful range information attached to them. For
6367 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6368 then N_i will have the range [3, +INF].
6370 However, by converting the assertion into the implied copy
6371 operation N_i = N_j, we will then copy-propagate N_j into the uses
6372 of N_i and lose the range information. We may want to hold on to
6373 ASSERT_EXPRs a little while longer as the ranges could be used in
6374 things like jump threading.
6376 The problem with keeping ASSERT_EXPRs around is that passes after
6377 VRP need to handle them appropriately.
6379 Another approach would be to make the range information a first
6380 class property of the SSA_NAME so that it can be queried from
6381 any pass. This is made somewhat more complex by the need for
6382 multiple ranges to be associated with one SSA_NAME. */
6385 remove_range_assertions (void)
6388 gimple_stmt_iterator si
;
6390 /* Note that the BSI iterator bump happens at the bottom of the
6391 loop and no bump is necessary if we're removing the statement
6392 referenced by the current BSI. */
6394 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
);)
6396 gimple stmt
= gsi_stmt (si
);
6399 if (is_gimple_assign (stmt
)
6400 && gimple_assign_rhs_code (stmt
) == ASSERT_EXPR
)
6402 tree rhs
= gimple_assign_rhs1 (stmt
);
6404 tree cond
= fold (ASSERT_EXPR_COND (rhs
));
6405 use_operand_p use_p
;
6406 imm_use_iterator iter
;
6408 gcc_assert (cond
!= boolean_false_node
);
6410 /* Propagate the RHS into every use of the LHS. */
6411 var
= ASSERT_EXPR_VAR (rhs
);
6412 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
,
6413 gimple_assign_lhs (stmt
))
6414 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
6416 SET_USE (use_p
, var
);
6417 gcc_assert (TREE_CODE (var
) == SSA_NAME
);
6420 /* And finally, remove the copy, it is not needed. */
6421 gsi_remove (&si
, true);
6422 release_defs (stmt
);
6430 /* Return true if STMT is interesting for VRP. */
6433 stmt_interesting_for_vrp (gimple stmt
)
6435 if (gimple_code (stmt
) == GIMPLE_PHI
)
6437 tree res
= gimple_phi_result (stmt
);
6438 return (!virtual_operand_p (res
)
6439 && (INTEGRAL_TYPE_P (TREE_TYPE (res
))
6440 || POINTER_TYPE_P (TREE_TYPE (res
))));
6442 else if (is_gimple_assign (stmt
) || is_gimple_call (stmt
))
6444 tree lhs
= gimple_get_lhs (stmt
);
6446 /* In general, assignments with virtual operands are not useful
6447 for deriving ranges, with the obvious exception of calls to
6448 builtin functions. */
6449 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
6450 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
6451 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
6452 && ((is_gimple_call (stmt
)
6453 && gimple_call_fndecl (stmt
) != NULL_TREE
6454 && DECL_BUILT_IN (gimple_call_fndecl (stmt
)))
6455 || !gimple_vuse (stmt
)))
6458 else if (gimple_code (stmt
) == GIMPLE_COND
6459 || gimple_code (stmt
) == GIMPLE_SWITCH
)
6466 /* Initialize local data structures for VRP. */
6469 vrp_initialize (void)
6473 values_propagated
= false;
6474 num_vr_values
= num_ssa_names
;
6475 vr_value
= XCNEWVEC (value_range_t
*, num_vr_values
);
6476 vr_phi_edge_counts
= XCNEWVEC (int, num_ssa_names
);
6480 gimple_stmt_iterator si
;
6482 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
6484 gimple phi
= gsi_stmt (si
);
6485 if (!stmt_interesting_for_vrp (phi
))
6487 tree lhs
= PHI_RESULT (phi
);
6488 set_value_range_to_varying (get_value_range (lhs
));
6489 prop_set_simulate_again (phi
, false);
6492 prop_set_simulate_again (phi
, true);
6495 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
6497 gimple stmt
= gsi_stmt (si
);
6499 /* If the statement is a control insn, then we do not
6500 want to avoid simulating the statement once. Failure
6501 to do so means that those edges will never get added. */
6502 if (stmt_ends_bb_p (stmt
))
6503 prop_set_simulate_again (stmt
, true);
6504 else if (!stmt_interesting_for_vrp (stmt
))
6508 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, i
, SSA_OP_DEF
)
6509 set_value_range_to_varying (get_value_range (def
));
6510 prop_set_simulate_again (stmt
, false);
6513 prop_set_simulate_again (stmt
, true);
6518 /* Return the singleton value-range for NAME or NAME. */
6521 vrp_valueize (tree name
)
6523 if (TREE_CODE (name
) == SSA_NAME
)
6525 value_range_t
*vr
= get_value_range (name
);
6526 if (vr
->type
== VR_RANGE
6527 && (vr
->min
== vr
->max
6528 || operand_equal_p (vr
->min
, vr
->max
, 0)))
6534 /* Visit assignment STMT. If it produces an interesting range, record
6535 the SSA name in *OUTPUT_P. */
6537 static enum ssa_prop_result
6538 vrp_visit_assignment_or_call (gimple stmt
, tree
*output_p
)
6542 enum gimple_code code
= gimple_code (stmt
);
6543 lhs
= gimple_get_lhs (stmt
);
6545 /* We only keep track of ranges in integral and pointer types. */
6546 if (TREE_CODE (lhs
) == SSA_NAME
6547 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
6548 /* It is valid to have NULL MIN/MAX values on a type. See
6549 build_range_type. */
6550 && TYPE_MIN_VALUE (TREE_TYPE (lhs
))
6551 && TYPE_MAX_VALUE (TREE_TYPE (lhs
)))
6552 || POINTER_TYPE_P (TREE_TYPE (lhs
))))
6554 value_range_t new_vr
= VR_INITIALIZER
;
6556 /* Try folding the statement to a constant first. */
6557 tree tem
= gimple_fold_stmt_to_constant (stmt
, vrp_valueize
);
6558 if (tem
&& !is_overflow_infinity (tem
))
6559 set_value_range (&new_vr
, VR_RANGE
, tem
, tem
, NULL
);
6560 /* Then dispatch to value-range extracting functions. */
6561 else if (code
== GIMPLE_CALL
)
6562 extract_range_basic (&new_vr
, stmt
);
6564 extract_range_from_assignment (&new_vr
, stmt
);
6566 if (update_value_range (lhs
, &new_vr
))
6570 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6572 fprintf (dump_file
, "Found new range for ");
6573 print_generic_expr (dump_file
, lhs
, 0);
6574 fprintf (dump_file
, ": ");
6575 dump_value_range (dump_file
, &new_vr
);
6576 fprintf (dump_file
, "\n\n");
6579 if (new_vr
.type
== VR_VARYING
)
6580 return SSA_PROP_VARYING
;
6582 return SSA_PROP_INTERESTING
;
6585 return SSA_PROP_NOT_INTERESTING
;
6588 /* Every other statement produces no useful ranges. */
6589 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_DEF
)
6590 set_value_range_to_varying (get_value_range (def
));
6592 return SSA_PROP_VARYING
;
6595 /* Helper that gets the value range of the SSA_NAME with version I
6596 or a symbolic range containing the SSA_NAME only if the value range
6597 is varying or undefined. */
6599 static inline value_range_t
6600 get_vr_for_comparison (int i
)
6602 value_range_t vr
= *get_value_range (ssa_name (i
));
6604 /* If name N_i does not have a valid range, use N_i as its own
6605 range. This allows us to compare against names that may
6606 have N_i in their ranges. */
6607 if (vr
.type
== VR_VARYING
|| vr
.type
== VR_UNDEFINED
)
6610 vr
.min
= ssa_name (i
);
6611 vr
.max
= ssa_name (i
);
6617 /* Compare all the value ranges for names equivalent to VAR with VAL
6618 using comparison code COMP. Return the same value returned by
6619 compare_range_with_value, including the setting of
6620 *STRICT_OVERFLOW_P. */
6623 compare_name_with_value (enum tree_code comp
, tree var
, tree val
,
6624 bool *strict_overflow_p
)
6630 int used_strict_overflow
;
6632 value_range_t equiv_vr
;
6634 /* Get the set of equivalences for VAR. */
6635 e
= get_value_range (var
)->equiv
;
6637 /* Start at -1. Set it to 0 if we do a comparison without relying
6638 on overflow, or 1 if all comparisons rely on overflow. */
6639 used_strict_overflow
= -1;
6641 /* Compare vars' value range with val. */
6642 equiv_vr
= get_vr_for_comparison (SSA_NAME_VERSION (var
));
6644 retval
= compare_range_with_value (comp
, &equiv_vr
, val
, &sop
);
6646 used_strict_overflow
= sop
? 1 : 0;
6648 /* If the equiv set is empty we have done all work we need to do. */
6652 && used_strict_overflow
> 0)
6653 *strict_overflow_p
= true;
6657 EXECUTE_IF_SET_IN_BITMAP (e
, 0, i
, bi
)
6659 equiv_vr
= get_vr_for_comparison (i
);
6661 t
= compare_range_with_value (comp
, &equiv_vr
, val
, &sop
);
6664 /* If we get different answers from different members
6665 of the equivalence set this check must be in a dead
6666 code region. Folding it to a trap representation
6667 would be correct here. For now just return don't-know. */
6677 used_strict_overflow
= 0;
6678 else if (used_strict_overflow
< 0)
6679 used_strict_overflow
= 1;
6684 && used_strict_overflow
> 0)
6685 *strict_overflow_p
= true;
6691 /* Given a comparison code COMP and names N1 and N2, compare all the
6692 ranges equivalent to N1 against all the ranges equivalent to N2
6693 to determine the value of N1 COMP N2. Return the same value
6694 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6695 whether we relied on an overflow infinity in the comparison. */
6699 compare_names (enum tree_code comp
, tree n1
, tree n2
,
6700 bool *strict_overflow_p
)
6704 bitmap_iterator bi1
, bi2
;
6706 int used_strict_overflow
;
6707 static bitmap_obstack
*s_obstack
= NULL
;
6708 static bitmap s_e1
= NULL
, s_e2
= NULL
;
6710 /* Compare the ranges of every name equivalent to N1 against the
6711 ranges of every name equivalent to N2. */
6712 e1
= get_value_range (n1
)->equiv
;
6713 e2
= get_value_range (n2
)->equiv
;
6715 /* Use the fake bitmaps if e1 or e2 are not available. */
6716 if (s_obstack
== NULL
)
6718 s_obstack
= XNEW (bitmap_obstack
);
6719 bitmap_obstack_initialize (s_obstack
);
6720 s_e1
= BITMAP_ALLOC (s_obstack
);
6721 s_e2
= BITMAP_ALLOC (s_obstack
);
6728 /* Add N1 and N2 to their own set of equivalences to avoid
6729 duplicating the body of the loop just to check N1 and N2
6731 bitmap_set_bit (e1
, SSA_NAME_VERSION (n1
));
6732 bitmap_set_bit (e2
, SSA_NAME_VERSION (n2
));
6734 /* If the equivalence sets have a common intersection, then the two
6735 names can be compared without checking their ranges. */
6736 if (bitmap_intersect_p (e1
, e2
))
6738 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
6739 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
6741 return (comp
== EQ_EXPR
|| comp
== GE_EXPR
|| comp
== LE_EXPR
)
6743 : boolean_false_node
;
6746 /* Start at -1. Set it to 0 if we do a comparison without relying
6747 on overflow, or 1 if all comparisons rely on overflow. */
6748 used_strict_overflow
= -1;
6750 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6751 N2 to their own set of equivalences to avoid duplicating the body
6752 of the loop just to check N1 and N2 ranges. */
6753 EXECUTE_IF_SET_IN_BITMAP (e1
, 0, i1
, bi1
)
6755 value_range_t vr1
= get_vr_for_comparison (i1
);
6757 t
= retval
= NULL_TREE
;
6758 EXECUTE_IF_SET_IN_BITMAP (e2
, 0, i2
, bi2
)
6762 value_range_t vr2
= get_vr_for_comparison (i2
);
6764 t
= compare_ranges (comp
, &vr1
, &vr2
, &sop
);
6767 /* If we get different answers from different members
6768 of the equivalence set this check must be in a dead
6769 code region. Folding it to a trap representation
6770 would be correct here. For now just return don't-know. */
6774 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
6775 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
6781 used_strict_overflow
= 0;
6782 else if (used_strict_overflow
< 0)
6783 used_strict_overflow
= 1;
6789 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
6790 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
6791 if (used_strict_overflow
> 0)
6792 *strict_overflow_p
= true;
6797 /* None of the equivalent ranges are useful in computing this
6799 bitmap_clear_bit (e1
, SSA_NAME_VERSION (n1
));
6800 bitmap_clear_bit (e2
, SSA_NAME_VERSION (n2
));
6804 /* Helper function for vrp_evaluate_conditional_warnv. */
6807 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code
,
6809 bool * strict_overflow_p
)
6811 value_range_t
*vr0
, *vr1
;
6813 vr0
= (TREE_CODE (op0
) == SSA_NAME
) ? get_value_range (op0
) : NULL
;
6814 vr1
= (TREE_CODE (op1
) == SSA_NAME
) ? get_value_range (op1
) : NULL
;
6817 return compare_ranges (code
, vr0
, vr1
, strict_overflow_p
);
6818 else if (vr0
&& vr1
== NULL
)
6819 return compare_range_with_value (code
, vr0
, op1
, strict_overflow_p
);
6820 else if (vr0
== NULL
&& vr1
)
6821 return (compare_range_with_value
6822 (swap_tree_comparison (code
), vr1
, op0
, strict_overflow_p
));
6826 /* Helper function for vrp_evaluate_conditional_warnv. */
6829 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code
, tree op0
,
6830 tree op1
, bool use_equiv_p
,
6831 bool *strict_overflow_p
, bool *only_ranges
)
6835 *only_ranges
= true;
6837 /* We only deal with integral and pointer types. */
6838 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0
))
6839 && !POINTER_TYPE_P (TREE_TYPE (op0
)))
6845 && (ret
= vrp_evaluate_conditional_warnv_with_ops_using_ranges
6846 (code
, op0
, op1
, strict_overflow_p
)))
6848 *only_ranges
= false;
6849 if (TREE_CODE (op0
) == SSA_NAME
&& TREE_CODE (op1
) == SSA_NAME
)
6850 return compare_names (code
, op0
, op1
, strict_overflow_p
);
6851 else if (TREE_CODE (op0
) == SSA_NAME
)
6852 return compare_name_with_value (code
, op0
, op1
, strict_overflow_p
);
6853 else if (TREE_CODE (op1
) == SSA_NAME
)
6854 return (compare_name_with_value
6855 (swap_tree_comparison (code
), op1
, op0
, strict_overflow_p
));
6858 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code
, op0
, op1
,
6863 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6864 information. Return NULL if the conditional can not be evaluated.
6865 The ranges of all the names equivalent with the operands in COND
6866 will be used when trying to compute the value. If the result is
6867 based on undefined signed overflow, issue a warning if
6871 vrp_evaluate_conditional (enum tree_code code
, tree op0
, tree op1
, gimple stmt
)
6877 /* Some passes and foldings leak constants with overflow flag set
6878 into the IL. Avoid doing wrong things with these and bail out. */
6879 if ((TREE_CODE (op0
) == INTEGER_CST
6880 && TREE_OVERFLOW (op0
))
6881 || (TREE_CODE (op1
) == INTEGER_CST
6882 && TREE_OVERFLOW (op1
)))
6886 ret
= vrp_evaluate_conditional_warnv_with_ops (code
, op0
, op1
, true, &sop
,
6891 enum warn_strict_overflow_code wc
;
6892 const char* warnmsg
;
6894 if (is_gimple_min_invariant (ret
))
6896 wc
= WARN_STRICT_OVERFLOW_CONDITIONAL
;
6897 warnmsg
= G_("assuming signed overflow does not occur when "
6898 "simplifying conditional to constant");
6902 wc
= WARN_STRICT_OVERFLOW_COMPARISON
;
6903 warnmsg
= G_("assuming signed overflow does not occur when "
6904 "simplifying conditional");
6907 if (issue_strict_overflow_warning (wc
))
6909 location_t location
;
6911 if (!gimple_has_location (stmt
))
6912 location
= input_location
;
6914 location
= gimple_location (stmt
);
6915 warning_at (location
, OPT_Wstrict_overflow
, "%s", warnmsg
);
6919 if (warn_type_limits
6920 && ret
&& only_ranges
6921 && TREE_CODE_CLASS (code
) == tcc_comparison
6922 && TREE_CODE (op0
) == SSA_NAME
)
6924 /* If the comparison is being folded and the operand on the LHS
6925 is being compared against a constant value that is outside of
6926 the natural range of OP0's type, then the predicate will
6927 always fold regardless of the value of OP0. If -Wtype-limits
6928 was specified, emit a warning. */
6929 tree type
= TREE_TYPE (op0
);
6930 value_range_t
*vr0
= get_value_range (op0
);
6932 if (vr0
->type
!= VR_VARYING
6933 && INTEGRAL_TYPE_P (type
)
6934 && vrp_val_is_min (vr0
->min
)
6935 && vrp_val_is_max (vr0
->max
)
6936 && is_gimple_min_invariant (op1
))
6938 location_t location
;
6940 if (!gimple_has_location (stmt
))
6941 location
= input_location
;
6943 location
= gimple_location (stmt
);
6945 warning_at (location
, OPT_Wtype_limits
,
6947 ? G_("comparison always false "
6948 "due to limited range of data type")
6949 : G_("comparison always true "
6950 "due to limited range of data type"));
6958 /* Visit conditional statement STMT. If we can determine which edge
6959 will be taken out of STMT's basic block, record it in
6960 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6961 SSA_PROP_VARYING. */
6963 static enum ssa_prop_result
6964 vrp_visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
6969 *taken_edge_p
= NULL
;
6971 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6976 fprintf (dump_file
, "\nVisiting conditional with predicate: ");
6977 print_gimple_stmt (dump_file
, stmt
, 0, 0);
6978 fprintf (dump_file
, "\nWith known ranges\n");
6980 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, i
, SSA_OP_USE
)
6982 fprintf (dump_file
, "\t");
6983 print_generic_expr (dump_file
, use
, 0);
6984 fprintf (dump_file
, ": ");
6985 dump_value_range (dump_file
, vr_value
[SSA_NAME_VERSION (use
)]);
6988 fprintf (dump_file
, "\n");
6991 /* Compute the value of the predicate COND by checking the known
6992 ranges of each of its operands.
6994 Note that we cannot evaluate all the equivalent ranges here
6995 because those ranges may not yet be final and with the current
6996 propagation strategy, we cannot determine when the value ranges
6997 of the names in the equivalence set have changed.
6999 For instance, given the following code fragment
7003 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7007 Assume that on the first visit to i_14, i_5 has the temporary
7008 range [8, 8] because the second argument to the PHI function is
7009 not yet executable. We derive the range ~[0, 0] for i_14 and the
7010 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7011 the first time, since i_14 is equivalent to the range [8, 8], we
7012 determine that the predicate is always false.
7014 On the next round of propagation, i_13 is determined to be
7015 VARYING, which causes i_5 to drop down to VARYING. So, another
7016 visit to i_14 is scheduled. In this second visit, we compute the
7017 exact same range and equivalence set for i_14, namely ~[0, 0] and
7018 { i_5 }. But we did not have the previous range for i_5
7019 registered, so vrp_visit_assignment thinks that the range for
7020 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7021 is not visited again, which stops propagation from visiting
7022 statements in the THEN clause of that if().
7024 To properly fix this we would need to keep the previous range
7025 value for the names in the equivalence set. This way we would've
7026 discovered that from one visit to the other i_5 changed from
7027 range [8, 8] to VR_VARYING.
7029 However, fixing this apparent limitation may not be worth the
7030 additional checking. Testing on several code bases (GCC, DLV,
7031 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7032 4 more predicates folded in SPEC. */
7035 val
= vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt
),
7036 gimple_cond_lhs (stmt
),
7037 gimple_cond_rhs (stmt
),
7042 *taken_edge_p
= find_taken_edge (gimple_bb (stmt
), val
);
7045 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7047 "\nIgnoring predicate evaluation because "
7048 "it assumes that signed overflow is undefined");
7053 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7055 fprintf (dump_file
, "\nPredicate evaluates to: ");
7056 if (val
== NULL_TREE
)
7057 fprintf (dump_file
, "DON'T KNOW\n");
7059 print_generic_stmt (dump_file
, val
, 0);
7062 return (*taken_edge_p
) ? SSA_PROP_INTERESTING
: SSA_PROP_VARYING
;
7065 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7066 that includes the value VAL. The search is restricted to the range
7067 [START_IDX, n - 1] where n is the size of VEC.
7069 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7072 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7073 it is placed in IDX and false is returned.
7075 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7079 find_case_label_index (gimple stmt
, size_t start_idx
, tree val
, size_t *idx
)
7081 size_t n
= gimple_switch_num_labels (stmt
);
7084 /* Find case label for minimum of the value range or the next one.
7085 At each iteration we are searching in [low, high - 1]. */
7087 for (low
= start_idx
, high
= n
; high
!= low
; )
7091 /* Note that i != high, so we never ask for n. */
7092 size_t i
= (high
+ low
) / 2;
7093 t
= gimple_switch_label (stmt
, i
);
7095 /* Cache the result of comparing CASE_LOW and val. */
7096 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
7100 /* Ranges cannot be empty. */
7109 if (CASE_HIGH (t
) != NULL
7110 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
7122 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7123 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7124 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7125 then MAX_IDX < MIN_IDX.
7126 Returns true if the default label is not needed. */
7129 find_case_label_range (gimple stmt
, tree min
, tree max
, size_t *min_idx
,
7133 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
7134 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
7138 && max_take_default
)
7140 /* Only the default case label reached.
7141 Return an empty range. */
7148 bool take_default
= min_take_default
|| max_take_default
;
7152 if (max_take_default
)
7155 /* If the case label range is continuous, we do not need
7156 the default case label. Verify that. */
7157 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
7158 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
7159 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
7160 for (k
= i
+ 1; k
<= j
; ++k
)
7162 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
7163 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
)))
7165 take_default
= true;
7169 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
7170 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
7175 return !take_default
;
7179 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7180 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7181 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7182 Returns true if the default label is not needed. */
7185 find_case_label_ranges (gimple stmt
, value_range_t
*vr
, size_t *min_idx1
,
7186 size_t *max_idx1
, size_t *min_idx2
,
7190 unsigned int n
= gimple_switch_num_labels (stmt
);
7192 tree case_low
, case_high
;
7193 tree min
= vr
->min
, max
= vr
->max
;
7195 gcc_checking_assert (vr
->type
== VR_RANGE
|| vr
->type
== VR_ANTI_RANGE
);
7197 take_default
= !find_case_label_range (stmt
, min
, max
, &i
, &j
);
7199 /* Set second range to emtpy. */
7203 if (vr
->type
== VR_RANGE
)
7207 return !take_default
;
7210 /* Set first range to all case labels. */
7217 /* Make sure all the values of case labels [i , j] are contained in
7218 range [MIN, MAX]. */
7219 case_low
= CASE_LOW (gimple_switch_label (stmt
, i
));
7220 case_high
= CASE_HIGH (gimple_switch_label (stmt
, j
));
7221 if (tree_int_cst_compare (case_low
, min
) < 0)
7223 if (case_high
!= NULL_TREE
7224 && tree_int_cst_compare (max
, case_high
) < 0)
7230 /* If the range spans case labels [i, j], the corresponding anti-range spans
7231 the labels [1, i - 1] and [j + 1, n - 1]. */
7257 /* Visit switch statement STMT. If we can determine which edge
7258 will be taken out of STMT's basic block, record it in
7259 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7260 SSA_PROP_VARYING. */
7262 static enum ssa_prop_result
7263 vrp_visit_switch_stmt (gimple stmt
, edge
*taken_edge_p
)
7267 size_t i
= 0, j
= 0, k
, l
;
7270 *taken_edge_p
= NULL
;
7271 op
= gimple_switch_index (stmt
);
7272 if (TREE_CODE (op
) != SSA_NAME
)
7273 return SSA_PROP_VARYING
;
7275 vr
= get_value_range (op
);
7276 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7278 fprintf (dump_file
, "\nVisiting switch expression with operand ");
7279 print_generic_expr (dump_file
, op
, 0);
7280 fprintf (dump_file
, " with known range ");
7281 dump_value_range (dump_file
, vr
);
7282 fprintf (dump_file
, "\n");
7285 if ((vr
->type
!= VR_RANGE
7286 && vr
->type
!= VR_ANTI_RANGE
)
7287 || symbolic_range_p (vr
))
7288 return SSA_PROP_VARYING
;
7290 /* Find the single edge that is taken from the switch expression. */
7291 take_default
= !find_case_label_ranges (stmt
, vr
, &i
, &j
, &k
, &l
);
7293 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7297 gcc_assert (take_default
);
7298 val
= gimple_switch_default_label (stmt
);
7302 /* Check if labels with index i to j and maybe the default label
7303 are all reaching the same label. */
7305 val
= gimple_switch_label (stmt
, i
);
7307 && CASE_LABEL (gimple_switch_default_label (stmt
))
7308 != CASE_LABEL (val
))
7310 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7311 fprintf (dump_file
, " not a single destination for this "
7313 return SSA_PROP_VARYING
;
7315 for (++i
; i
<= j
; ++i
)
7317 if (CASE_LABEL (gimple_switch_label (stmt
, i
)) != CASE_LABEL (val
))
7319 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7320 fprintf (dump_file
, " not a single destination for this "
7322 return SSA_PROP_VARYING
;
7327 if (CASE_LABEL (gimple_switch_label (stmt
, k
)) != CASE_LABEL (val
))
7329 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7330 fprintf (dump_file
, " not a single destination for this "
7332 return SSA_PROP_VARYING
;
7337 *taken_edge_p
= find_edge (gimple_bb (stmt
),
7338 label_to_block (CASE_LABEL (val
)));
7340 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7342 fprintf (dump_file
, " will take edge to ");
7343 print_generic_stmt (dump_file
, CASE_LABEL (val
), 0);
7346 return SSA_PROP_INTERESTING
;
7350 /* Evaluate statement STMT. If the statement produces a useful range,
7351 return SSA_PROP_INTERESTING and record the SSA name with the
7352 interesting range into *OUTPUT_P.
7354 If STMT is a conditional branch and we can determine its truth
7355 value, the taken edge is recorded in *TAKEN_EDGE_P.
7357 If STMT produces a varying value, return SSA_PROP_VARYING. */
7359 static enum ssa_prop_result
7360 vrp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
7365 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7367 fprintf (dump_file
, "\nVisiting statement:\n");
7368 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
7369 fprintf (dump_file
, "\n");
7372 if (!stmt_interesting_for_vrp (stmt
))
7373 gcc_assert (stmt_ends_bb_p (stmt
));
7374 else if (is_gimple_assign (stmt
) || is_gimple_call (stmt
))
7376 /* In general, assignments with virtual operands are not useful
7377 for deriving ranges, with the obvious exception of calls to
7378 builtin functions. */
7379 if ((is_gimple_call (stmt
)
7380 && gimple_call_fndecl (stmt
) != NULL_TREE
7381 && DECL_BUILT_IN (gimple_call_fndecl (stmt
)))
7382 || !gimple_vuse (stmt
))
7383 return vrp_visit_assignment_or_call (stmt
, output_p
);
7385 else if (gimple_code (stmt
) == GIMPLE_COND
)
7386 return vrp_visit_cond_stmt (stmt
, taken_edge_p
);
7387 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
7388 return vrp_visit_switch_stmt (stmt
, taken_edge_p
);
7390 /* All other statements produce nothing of interest for VRP, so mark
7391 their outputs varying and prevent further simulation. */
7392 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_DEF
)
7393 set_value_range_to_varying (get_value_range (def
));
7395 return SSA_PROP_VARYING
;
7398 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7399 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7400 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7401 possible such range. The resulting range is not canonicalized. */
7404 union_ranges (enum value_range_type
*vr0type
,
7405 tree
*vr0min
, tree
*vr0max
,
7406 enum value_range_type vr1type
,
7407 tree vr1min
, tree vr1max
)
7409 bool mineq
= operand_equal_p (*vr0min
, vr1min
, 0);
7410 bool maxeq
= operand_equal_p (*vr0max
, vr1max
, 0);
7412 /* [] is vr0, () is vr1 in the following classification comments. */
7416 if (*vr0type
== vr1type
)
7417 /* Nothing to do for equal ranges. */
7419 else if ((*vr0type
== VR_RANGE
7420 && vr1type
== VR_ANTI_RANGE
)
7421 || (*vr0type
== VR_ANTI_RANGE
7422 && vr1type
== VR_RANGE
))
7424 /* For anti-range with range union the result is varying. */
7430 else if (operand_less_p (*vr0max
, vr1min
) == 1
7431 || operand_less_p (vr1max
, *vr0min
) == 1)
7433 /* [ ] ( ) or ( ) [ ]
7434 If the ranges have an empty intersection, result of the union
7435 operation is the anti-range or if both are anti-ranges
7437 if (*vr0type
== VR_ANTI_RANGE
7438 && vr1type
== VR_ANTI_RANGE
)
7440 else if (*vr0type
== VR_ANTI_RANGE
7441 && vr1type
== VR_RANGE
)
7443 else if (*vr0type
== VR_RANGE
7444 && vr1type
== VR_ANTI_RANGE
)
7450 else if (*vr0type
== VR_RANGE
7451 && vr1type
== VR_RANGE
)
7453 /* The result is the convex hull of both ranges. */
7454 if (operand_less_p (*vr0max
, vr1min
) == 1)
7456 /* If the result can be an anti-range, create one. */
7457 if (TREE_CODE (*vr0max
) == INTEGER_CST
7458 && TREE_CODE (vr1min
) == INTEGER_CST
7459 && vrp_val_is_min (*vr0min
)
7460 && vrp_val_is_max (vr1max
))
7462 tree min
= int_const_binop (PLUS_EXPR
,
7464 build_int_cst (TREE_TYPE (*vr0max
), 1));
7465 tree max
= int_const_binop (MINUS_EXPR
,
7467 build_int_cst (TREE_TYPE (vr1min
), 1));
7468 if (!operand_less_p (max
, min
))
7470 *vr0type
= VR_ANTI_RANGE
;
7482 /* If the result can be an anti-range, create one. */
7483 if (TREE_CODE (vr1max
) == INTEGER_CST
7484 && TREE_CODE (*vr0min
) == INTEGER_CST
7485 && vrp_val_is_min (vr1min
)
7486 && vrp_val_is_max (*vr0max
))
7488 tree min
= int_const_binop (PLUS_EXPR
,
7490 build_int_cst (TREE_TYPE (vr1max
), 1));
7491 tree max
= int_const_binop (MINUS_EXPR
,
7493 build_int_cst (TREE_TYPE (*vr0min
), 1));
7494 if (!operand_less_p (max
, min
))
7496 *vr0type
= VR_ANTI_RANGE
;
7510 else if ((maxeq
|| operand_less_p (vr1max
, *vr0max
) == 1)
7511 && (mineq
|| operand_less_p (*vr0min
, vr1min
) == 1))
7513 /* [ ( ) ] or [( ) ] or [ ( )] */
7514 if (*vr0type
== VR_RANGE
7515 && vr1type
== VR_RANGE
)
7517 else if (*vr0type
== VR_ANTI_RANGE
7518 && vr1type
== VR_ANTI_RANGE
)
7524 else if (*vr0type
== VR_ANTI_RANGE
7525 && vr1type
== VR_RANGE
)
7527 /* Arbitrarily choose the right or left gap. */
7528 if (!mineq
&& TREE_CODE (vr1min
) == INTEGER_CST
)
7529 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
7530 build_int_cst (TREE_TYPE (vr1min
), 1));
7531 else if (!maxeq
&& TREE_CODE (vr1max
) == INTEGER_CST
)
7532 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
7533 build_int_cst (TREE_TYPE (vr1max
), 1));
7537 else if (*vr0type
== VR_RANGE
7538 && vr1type
== VR_ANTI_RANGE
)
7539 /* The result covers everything. */
7544 else if ((maxeq
|| operand_less_p (*vr0max
, vr1max
) == 1)
7545 && (mineq
|| operand_less_p (vr1min
, *vr0min
) == 1))
7547 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7548 if (*vr0type
== VR_RANGE
7549 && vr1type
== VR_RANGE
)
7555 else if (*vr0type
== VR_ANTI_RANGE
7556 && vr1type
== VR_ANTI_RANGE
)
7558 else if (*vr0type
== VR_RANGE
7559 && vr1type
== VR_ANTI_RANGE
)
7561 *vr0type
= VR_ANTI_RANGE
;
7562 if (!mineq
&& TREE_CODE (*vr0min
) == INTEGER_CST
)
7564 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
7565 build_int_cst (TREE_TYPE (*vr0min
), 1));
7568 else if (!maxeq
&& TREE_CODE (*vr0max
) == INTEGER_CST
)
7570 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
7571 build_int_cst (TREE_TYPE (*vr0max
), 1));
7577 else if (*vr0type
== VR_ANTI_RANGE
7578 && vr1type
== VR_RANGE
)
7579 /* The result covers everything. */
7584 else if ((operand_less_p (vr1min
, *vr0max
) == 1
7585 || operand_equal_p (vr1min
, *vr0max
, 0))
7586 && operand_less_p (*vr0min
, vr1min
) == 1)
7588 /* [ ( ] ) or [ ]( ) */
7589 if (*vr0type
== VR_RANGE
7590 && vr1type
== VR_RANGE
)
7592 else if (*vr0type
== VR_ANTI_RANGE
7593 && vr1type
== VR_ANTI_RANGE
)
7595 else if (*vr0type
== VR_ANTI_RANGE
7596 && vr1type
== VR_RANGE
)
7598 if (TREE_CODE (vr1min
) == INTEGER_CST
)
7599 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
7600 build_int_cst (TREE_TYPE (vr1min
), 1));
7604 else if (*vr0type
== VR_RANGE
7605 && vr1type
== VR_ANTI_RANGE
)
7607 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
7610 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
7611 build_int_cst (TREE_TYPE (*vr0max
), 1));
7620 else if ((operand_less_p (*vr0min
, vr1max
) == 1
7621 || operand_equal_p (*vr0min
, vr1max
, 0))
7622 && operand_less_p (vr1min
, *vr0min
) == 1)
7624 /* ( [ ) ] or ( )[ ] */
7625 if (*vr0type
== VR_RANGE
7626 && vr1type
== VR_RANGE
)
7628 else if (*vr0type
== VR_ANTI_RANGE
7629 && vr1type
== VR_ANTI_RANGE
)
7631 else if (*vr0type
== VR_ANTI_RANGE
7632 && vr1type
== VR_RANGE
)
7634 if (TREE_CODE (vr1max
) == INTEGER_CST
)
7635 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
7636 build_int_cst (TREE_TYPE (vr1max
), 1));
7640 else if (*vr0type
== VR_RANGE
7641 && vr1type
== VR_ANTI_RANGE
)
7643 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
7647 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
7648 build_int_cst (TREE_TYPE (*vr0min
), 1));
7662 *vr0type
= VR_VARYING
;
7663 *vr0min
= NULL_TREE
;
7664 *vr0max
= NULL_TREE
;
7667 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7668 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7669 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7670 possible such range. The resulting range is not canonicalized. */
7673 intersect_ranges (enum value_range_type
*vr0type
,
7674 tree
*vr0min
, tree
*vr0max
,
7675 enum value_range_type vr1type
,
7676 tree vr1min
, tree vr1max
)
7678 bool mineq
= operand_equal_p (*vr0min
, vr1min
, 0);
7679 bool maxeq
= operand_equal_p (*vr0max
, vr1max
, 0);
7681 /* [] is vr0, () is vr1 in the following classification comments. */
7685 if (*vr0type
== vr1type
)
7686 /* Nothing to do for equal ranges. */
7688 else if ((*vr0type
== VR_RANGE
7689 && vr1type
== VR_ANTI_RANGE
)
7690 || (*vr0type
== VR_ANTI_RANGE
7691 && vr1type
== VR_RANGE
))
7693 /* For anti-range with range intersection the result is empty. */
7694 *vr0type
= VR_UNDEFINED
;
7695 *vr0min
= NULL_TREE
;
7696 *vr0max
= NULL_TREE
;
7701 else if (operand_less_p (*vr0max
, vr1min
) == 1
7702 || operand_less_p (vr1max
, *vr0min
) == 1)
7704 /* [ ] ( ) or ( ) [ ]
7705 If the ranges have an empty intersection, the result of the
7706 intersect operation is the range for intersecting an
7707 anti-range with a range or empty when intersecting two ranges. */
7708 if (*vr0type
== VR_RANGE
7709 && vr1type
== VR_ANTI_RANGE
)
7711 else if (*vr0type
== VR_ANTI_RANGE
7712 && vr1type
== VR_RANGE
)
7718 else if (*vr0type
== VR_RANGE
7719 && vr1type
== VR_RANGE
)
7721 *vr0type
= VR_UNDEFINED
;
7722 *vr0min
= NULL_TREE
;
7723 *vr0max
= NULL_TREE
;
7725 else if (*vr0type
== VR_ANTI_RANGE
7726 && vr1type
== VR_ANTI_RANGE
)
7728 /* If the anti-ranges are adjacent to each other merge them. */
7729 if (TREE_CODE (*vr0max
) == INTEGER_CST
7730 && TREE_CODE (vr1min
) == INTEGER_CST
7731 && operand_less_p (*vr0max
, vr1min
) == 1
7732 && integer_onep (int_const_binop (MINUS_EXPR
,
7735 else if (TREE_CODE (vr1max
) == INTEGER_CST
7736 && TREE_CODE (*vr0min
) == INTEGER_CST
7737 && operand_less_p (vr1max
, *vr0min
) == 1
7738 && integer_onep (int_const_binop (MINUS_EXPR
,
7741 /* Else arbitrarily take VR0. */
7744 else if ((maxeq
|| operand_less_p (vr1max
, *vr0max
) == 1)
7745 && (mineq
|| operand_less_p (*vr0min
, vr1min
) == 1))
7747 /* [ ( ) ] or [( ) ] or [ ( )] */
7748 if (*vr0type
== VR_RANGE
7749 && vr1type
== VR_RANGE
)
7751 /* If both are ranges the result is the inner one. */
7756 else if (*vr0type
== VR_RANGE
7757 && vr1type
== VR_ANTI_RANGE
)
7759 /* Choose the right gap if the left one is empty. */
7762 if (TREE_CODE (vr1max
) == INTEGER_CST
)
7763 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
7764 build_int_cst (TREE_TYPE (vr1max
), 1));
7768 /* Choose the left gap if the right one is empty. */
7771 if (TREE_CODE (vr1min
) == INTEGER_CST
)
7772 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
7773 build_int_cst (TREE_TYPE (vr1min
), 1));
7777 /* Choose the anti-range if the range is effectively varying. */
7778 else if (vrp_val_is_min (*vr0min
)
7779 && vrp_val_is_max (*vr0max
))
7785 /* Else choose the range. */
7787 else if (*vr0type
== VR_ANTI_RANGE
7788 && vr1type
== VR_ANTI_RANGE
)
7789 /* If both are anti-ranges the result is the outer one. */
7791 else if (*vr0type
== VR_ANTI_RANGE
7792 && vr1type
== VR_RANGE
)
7794 /* The intersection is empty. */
7795 *vr0type
= VR_UNDEFINED
;
7796 *vr0min
= NULL_TREE
;
7797 *vr0max
= NULL_TREE
;
7802 else if ((maxeq
|| operand_less_p (*vr0max
, vr1max
) == 1)
7803 && (mineq
|| operand_less_p (vr1min
, *vr0min
) == 1))
7805 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7806 if (*vr0type
== VR_RANGE
7807 && vr1type
== VR_RANGE
)
7808 /* Choose the inner range. */
7810 else if (*vr0type
== VR_ANTI_RANGE
7811 && vr1type
== VR_RANGE
)
7813 /* Choose the right gap if the left is empty. */
7816 *vr0type
= VR_RANGE
;
7817 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
7818 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
7819 build_int_cst (TREE_TYPE (*vr0max
), 1));
7824 /* Choose the left gap if the right is empty. */
7827 *vr0type
= VR_RANGE
;
7828 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
7829 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
7830 build_int_cst (TREE_TYPE (*vr0min
), 1));
7835 /* Choose the anti-range if the range is effectively varying. */
7836 else if (vrp_val_is_min (vr1min
)
7837 && vrp_val_is_max (vr1max
))
7839 /* Else choose the range. */
7847 else if (*vr0type
== VR_ANTI_RANGE
7848 && vr1type
== VR_ANTI_RANGE
)
7850 /* If both are anti-ranges the result is the outer one. */
7855 else if (vr1type
== VR_ANTI_RANGE
7856 && *vr0type
== VR_RANGE
)
7858 /* The intersection is empty. */
7859 *vr0type
= VR_UNDEFINED
;
7860 *vr0min
= NULL_TREE
;
7861 *vr0max
= NULL_TREE
;
7866 else if ((operand_less_p (vr1min
, *vr0max
) == 1
7867 || operand_equal_p (vr1min
, *vr0max
, 0))
7868 && operand_less_p (*vr0min
, vr1min
) == 1)
7870 /* [ ( ] ) or [ ]( ) */
7871 if (*vr0type
== VR_ANTI_RANGE
7872 && vr1type
== VR_ANTI_RANGE
)
7874 else if (*vr0type
== VR_RANGE
7875 && vr1type
== VR_RANGE
)
7877 else if (*vr0type
== VR_RANGE
7878 && vr1type
== VR_ANTI_RANGE
)
7880 if (TREE_CODE (vr1min
) == INTEGER_CST
)
7881 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
7882 build_int_cst (TREE_TYPE (vr1min
), 1));
7886 else if (*vr0type
== VR_ANTI_RANGE
7887 && vr1type
== VR_RANGE
)
7889 *vr0type
= VR_RANGE
;
7890 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
7891 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
7892 build_int_cst (TREE_TYPE (*vr0max
), 1));
7900 else if ((operand_less_p (*vr0min
, vr1max
) == 1
7901 || operand_equal_p (*vr0min
, vr1max
, 0))
7902 && operand_less_p (vr1min
, *vr0min
) == 1)
7904 /* ( [ ) ] or ( )[ ] */
7905 if (*vr0type
== VR_ANTI_RANGE
7906 && vr1type
== VR_ANTI_RANGE
)
7908 else if (*vr0type
== VR_RANGE
7909 && vr1type
== VR_RANGE
)
7911 else if (*vr0type
== VR_RANGE
7912 && vr1type
== VR_ANTI_RANGE
)
7914 if (TREE_CODE (vr1max
) == INTEGER_CST
)
7915 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
7916 build_int_cst (TREE_TYPE (vr1max
), 1));
7920 else if (*vr0type
== VR_ANTI_RANGE
7921 && vr1type
== VR_RANGE
)
7923 *vr0type
= VR_RANGE
;
7924 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
7925 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
7926 build_int_cst (TREE_TYPE (*vr0min
), 1));
7935 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
7936 result for the intersection. That's always a conservative
7937 correct estimate. */
7943 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
7944 in *VR0. This may not be the smallest possible such range. */
7947 vrp_intersect_ranges_1 (value_range_t
*vr0
, value_range_t
*vr1
)
7949 value_range_t saved
;
7951 /* If either range is VR_VARYING the other one wins. */
7952 if (vr1
->type
== VR_VARYING
)
7954 if (vr0
->type
== VR_VARYING
)
7956 copy_value_range (vr0
, vr1
);
7960 /* When either range is VR_UNDEFINED the resulting range is
7961 VR_UNDEFINED, too. */
7962 if (vr0
->type
== VR_UNDEFINED
)
7964 if (vr1
->type
== VR_UNDEFINED
)
7966 set_value_range_to_undefined (vr0
);
7970 /* Save the original vr0 so we can return it as conservative intersection
7971 result when our worker turns things to varying. */
7973 intersect_ranges (&vr0
->type
, &vr0
->min
, &vr0
->max
,
7974 vr1
->type
, vr1
->min
, vr1
->max
);
7975 /* Make sure to canonicalize the result though as the inversion of a
7976 VR_RANGE can still be a VR_RANGE. */
7977 set_and_canonicalize_value_range (vr0
, vr0
->type
,
7978 vr0
->min
, vr0
->max
, vr0
->equiv
);
7979 /* If that failed, use the saved original VR0. */
7980 if (vr0
->type
== VR_VARYING
)
7985 /* If the result is VR_UNDEFINED there is no need to mess with
7986 the equivalencies. */
7987 if (vr0
->type
== VR_UNDEFINED
)
7990 /* The resulting set of equivalences for range intersection is the union of
7992 if (vr0
->equiv
&& vr1
->equiv
&& vr0
->equiv
!= vr1
->equiv
)
7993 bitmap_ior_into (vr0
->equiv
, vr1
->equiv
);
7994 else if (vr1
->equiv
&& !vr0
->equiv
)
7995 bitmap_copy (vr0
->equiv
, vr1
->equiv
);
7999 vrp_intersect_ranges (value_range_t
*vr0
, value_range_t
*vr1
)
8001 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8003 fprintf (dump_file
, "Intersecting\n ");
8004 dump_value_range (dump_file
, vr0
);
8005 fprintf (dump_file
, "\nand\n ");
8006 dump_value_range (dump_file
, vr1
);
8007 fprintf (dump_file
, "\n");
8009 vrp_intersect_ranges_1 (vr0
, vr1
);
8010 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8012 fprintf (dump_file
, "to\n ");
8013 dump_value_range (dump_file
, vr0
);
8014 fprintf (dump_file
, "\n");
8018 /* Meet operation for value ranges. Given two value ranges VR0 and
8019 VR1, store in VR0 a range that contains both VR0 and VR1. This
8020 may not be the smallest possible such range. */
8023 vrp_meet_1 (value_range_t
*vr0
, value_range_t
*vr1
)
8025 value_range_t saved
;
8027 if (vr0
->type
== VR_UNDEFINED
)
8029 set_value_range (vr0
, vr1
->type
, vr1
->min
, vr1
->max
, vr1
->equiv
);
8033 if (vr1
->type
== VR_UNDEFINED
)
8035 /* VR0 already has the resulting range. */
8039 if (vr0
->type
== VR_VARYING
)
8041 /* Nothing to do. VR0 already has the resulting range. */
8045 if (vr1
->type
== VR_VARYING
)
8047 set_value_range_to_varying (vr0
);
8052 union_ranges (&vr0
->type
, &vr0
->min
, &vr0
->max
,
8053 vr1
->type
, vr1
->min
, vr1
->max
);
8054 if (vr0
->type
== VR_VARYING
)
8056 /* Failed to find an efficient meet. Before giving up and setting
8057 the result to VARYING, see if we can at least derive a useful
8058 anti-range. FIXME, all this nonsense about distinguishing
8059 anti-ranges from ranges is necessary because of the odd
8060 semantics of range_includes_zero_p and friends. */
8061 if (((saved
.type
== VR_RANGE
8062 && range_includes_zero_p (saved
.min
, saved
.max
) == 0)
8063 || (saved
.type
== VR_ANTI_RANGE
8064 && range_includes_zero_p (saved
.min
, saved
.max
) == 1))
8065 && ((vr1
->type
== VR_RANGE
8066 && range_includes_zero_p (vr1
->min
, vr1
->max
) == 0)
8067 || (vr1
->type
== VR_ANTI_RANGE
8068 && range_includes_zero_p (vr1
->min
, vr1
->max
) == 1)))
8070 set_value_range_to_nonnull (vr0
, TREE_TYPE (saved
.min
));
8072 /* Since this meet operation did not result from the meeting of
8073 two equivalent names, VR0 cannot have any equivalences. */
8075 bitmap_clear (vr0
->equiv
);
8079 set_value_range_to_varying (vr0
);
8082 set_and_canonicalize_value_range (vr0
, vr0
->type
, vr0
->min
, vr0
->max
,
8084 if (vr0
->type
== VR_VARYING
)
8087 /* The resulting set of equivalences is always the intersection of
8089 if (vr0
->equiv
&& vr1
->equiv
&& vr0
->equiv
!= vr1
->equiv
)
8090 bitmap_and_into (vr0
->equiv
, vr1
->equiv
);
8091 else if (vr0
->equiv
&& !vr1
->equiv
)
8092 bitmap_clear (vr0
->equiv
);
8096 vrp_meet (value_range_t
*vr0
, value_range_t
*vr1
)
8098 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8100 fprintf (dump_file
, "Meeting\n ");
8101 dump_value_range (dump_file
, vr0
);
8102 fprintf (dump_file
, "\nand\n ");
8103 dump_value_range (dump_file
, vr1
);
8104 fprintf (dump_file
, "\n");
8106 vrp_meet_1 (vr0
, vr1
);
8107 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8109 fprintf (dump_file
, "to\n ");
8110 dump_value_range (dump_file
, vr0
);
8111 fprintf (dump_file
, "\n");
8116 /* Visit all arguments for PHI node PHI that flow through executable
8117 edges. If a valid value range can be derived from all the incoming
8118 value ranges, set a new range for the LHS of PHI. */
8120 static enum ssa_prop_result
8121 vrp_visit_phi_node (gimple phi
)
8124 tree lhs
= PHI_RESULT (phi
);
8125 value_range_t
*lhs_vr
= get_value_range (lhs
);
8126 value_range_t vr_result
= VR_INITIALIZER
;
8128 int edges
, old_edges
;
8131 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8133 fprintf (dump_file
, "\nVisiting PHI node: ");
8134 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
8138 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
8140 edge e
= gimple_phi_arg_edge (phi
, i
);
8142 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8145 "\n Argument #%d (%d -> %d %sexecutable)\n",
8146 (int) i
, e
->src
->index
, e
->dest
->index
,
8147 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
8150 if (e
->flags
& EDGE_EXECUTABLE
)
8152 tree arg
= PHI_ARG_DEF (phi
, i
);
8153 value_range_t vr_arg
;
8157 if (TREE_CODE (arg
) == SSA_NAME
)
8159 vr_arg
= *(get_value_range (arg
));
8160 /* Do not allow equivalences or symbolic ranges to leak in from
8161 backedges. That creates invalid equivalencies.
8162 See PR53465 and PR54767. */
8163 if (e
->flags
& EDGE_DFS_BACK
8164 && (vr_arg
.type
== VR_RANGE
8165 || vr_arg
.type
== VR_ANTI_RANGE
))
8167 vr_arg
.equiv
= NULL
;
8168 if (symbolic_range_p (&vr_arg
))
8170 vr_arg
.type
= VR_VARYING
;
8171 vr_arg
.min
= NULL_TREE
;
8172 vr_arg
.max
= NULL_TREE
;
8178 if (is_overflow_infinity (arg
))
8180 arg
= copy_node (arg
);
8181 TREE_OVERFLOW (arg
) = 0;
8184 vr_arg
.type
= VR_RANGE
;
8187 vr_arg
.equiv
= NULL
;
8190 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8192 fprintf (dump_file
, "\t");
8193 print_generic_expr (dump_file
, arg
, dump_flags
);
8194 fprintf (dump_file
, "\n\tValue: ");
8195 dump_value_range (dump_file
, &vr_arg
);
8196 fprintf (dump_file
, "\n");
8200 copy_value_range (&vr_result
, &vr_arg
);
8202 vrp_meet (&vr_result
, &vr_arg
);
8205 if (vr_result
.type
== VR_VARYING
)
8210 if (vr_result
.type
== VR_VARYING
)
8212 else if (vr_result
.type
== VR_UNDEFINED
)
8215 old_edges
= vr_phi_edge_counts
[SSA_NAME_VERSION (lhs
)];
8216 vr_phi_edge_counts
[SSA_NAME_VERSION (lhs
)] = edges
;
8218 /* To prevent infinite iterations in the algorithm, derive ranges
8219 when the new value is slightly bigger or smaller than the
8220 previous one. We don't do this if we have seen a new executable
8221 edge; this helps us avoid an overflow infinity for conditionals
8222 which are not in a loop. If the old value-range was VR_UNDEFINED
8223 use the updated range and iterate one more time. */
8225 && gimple_phi_num_args (phi
) > 1
8226 && edges
== old_edges
8227 && lhs_vr
->type
!= VR_UNDEFINED
)
8229 int cmp_min
= compare_values (lhs_vr
->min
, vr_result
.min
);
8230 int cmp_max
= compare_values (lhs_vr
->max
, vr_result
.max
);
8232 /* For non VR_RANGE or for pointers fall back to varying if
8233 the range changed. */
8234 if ((lhs_vr
->type
!= VR_RANGE
|| vr_result
.type
!= VR_RANGE
8235 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
8236 && (cmp_min
!= 0 || cmp_max
!= 0))
8239 /* If the new minimum is smaller or larger than the previous
8240 one, go all the way to -INF. In the first case, to avoid
8241 iterating millions of times to reach -INF, and in the
8242 other case to avoid infinite bouncing between different
8244 if (cmp_min
> 0 || cmp_min
< 0)
8246 if (!needs_overflow_infinity (TREE_TYPE (vr_result
.min
))
8247 || !vrp_var_may_overflow (lhs
, phi
))
8248 vr_result
.min
= TYPE_MIN_VALUE (TREE_TYPE (vr_result
.min
));
8249 else if (supports_overflow_infinity (TREE_TYPE (vr_result
.min
)))
8251 negative_overflow_infinity (TREE_TYPE (vr_result
.min
));
8254 /* Similarly, if the new maximum is smaller or larger than
8255 the previous one, go all the way to +INF. */
8256 if (cmp_max
< 0 || cmp_max
> 0)
8258 if (!needs_overflow_infinity (TREE_TYPE (vr_result
.max
))
8259 || !vrp_var_may_overflow (lhs
, phi
))
8260 vr_result
.max
= TYPE_MAX_VALUE (TREE_TYPE (vr_result
.max
));
8261 else if (supports_overflow_infinity (TREE_TYPE (vr_result
.max
)))
8263 positive_overflow_infinity (TREE_TYPE (vr_result
.max
));
8266 /* If we dropped either bound to +-INF then if this is a loop
8267 PHI node SCEV may known more about its value-range. */
8268 if ((cmp_min
> 0 || cmp_min
< 0
8269 || cmp_max
< 0 || cmp_max
> 0)
8271 && (l
= loop_containing_stmt (phi
))
8272 && l
->header
== gimple_bb (phi
))
8273 adjust_range_with_scev (&vr_result
, l
, phi
, lhs
);
8275 /* If we will end up with a (-INF, +INF) range, set it to
8276 VARYING. Same if the previous max value was invalid for
8277 the type and we end up with vr_result.min > vr_result.max. */
8278 if ((vrp_val_is_max (vr_result
.max
)
8279 && vrp_val_is_min (vr_result
.min
))
8280 || compare_values (vr_result
.min
,
8285 /* If the new range is different than the previous value, keep
8288 if (update_value_range (lhs
, &vr_result
))
8290 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8292 fprintf (dump_file
, "Found new range for ");
8293 print_generic_expr (dump_file
, lhs
, 0);
8294 fprintf (dump_file
, ": ");
8295 dump_value_range (dump_file
, &vr_result
);
8296 fprintf (dump_file
, "\n\n");
8299 return SSA_PROP_INTERESTING
;
8302 /* Nothing changed, don't add outgoing edges. */
8303 return SSA_PROP_NOT_INTERESTING
;
8305 /* No match found. Set the LHS to VARYING. */
8307 set_value_range_to_varying (lhs_vr
);
8308 return SSA_PROP_VARYING
;
8311 /* Simplify boolean operations if the source is known
8312 to be already a boolean. */
8314 simplify_truth_ops_using_ranges (gimple_stmt_iterator
*gsi
, gimple stmt
)
8316 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
8318 bool need_conversion
;
8320 /* We handle only !=/== case here. */
8321 gcc_assert (rhs_code
== EQ_EXPR
|| rhs_code
== NE_EXPR
);
8323 op0
= gimple_assign_rhs1 (stmt
);
8324 if (!op_with_boolean_value_range_p (op0
))
8327 op1
= gimple_assign_rhs2 (stmt
);
8328 if (!op_with_boolean_value_range_p (op1
))
8331 /* Reduce number of cases to handle to NE_EXPR. As there is no
8332 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8333 if (rhs_code
== EQ_EXPR
)
8335 if (TREE_CODE (op1
) == INTEGER_CST
)
8336 op1
= int_const_binop (BIT_XOR_EXPR
, op1
,
8337 build_int_cst (TREE_TYPE (op1
), 1));
8342 lhs
= gimple_assign_lhs (stmt
);
8344 = !useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (op0
));
8346 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8348 && !TYPE_UNSIGNED (TREE_TYPE (op0
))
8349 && TYPE_PRECISION (TREE_TYPE (op0
)) == 1
8350 && TYPE_PRECISION (TREE_TYPE (lhs
)) > 1)
8353 /* For A != 0 we can substitute A itself. */
8354 if (integer_zerop (op1
))
8355 gimple_assign_set_rhs_with_ops (gsi
,
8357 ? NOP_EXPR
: TREE_CODE (op0
),
8359 /* For A != B we substitute A ^ B. Either with conversion. */
8360 else if (need_conversion
)
8362 tree tem
= make_ssa_name (TREE_TYPE (op0
), NULL
);
8363 gimple newop
= gimple_build_assign_with_ops (BIT_XOR_EXPR
, tem
, op0
, op1
);
8364 gsi_insert_before (gsi
, newop
, GSI_SAME_STMT
);
8365 gimple_assign_set_rhs_with_ops (gsi
, NOP_EXPR
, tem
, NULL_TREE
);
8369 gimple_assign_set_rhs_with_ops (gsi
, BIT_XOR_EXPR
, op0
, op1
);
8370 update_stmt (gsi_stmt (*gsi
));
8375 /* Simplify a division or modulo operator to a right shift or
8376 bitwise and if the first operand is unsigned or is greater
8377 than zero and the second operand is an exact power of two. */
8380 simplify_div_or_mod_using_ranges (gimple stmt
)
8382 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
8384 tree op0
= gimple_assign_rhs1 (stmt
);
8385 tree op1
= gimple_assign_rhs2 (stmt
);
8386 value_range_t
*vr
= get_value_range (gimple_assign_rhs1 (stmt
));
8388 if (TYPE_UNSIGNED (TREE_TYPE (op0
)))
8390 val
= integer_one_node
;
8396 val
= compare_range_with_value (GE_EXPR
, vr
, integer_zero_node
, &sop
);
8400 && integer_onep (val
)
8401 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC
))
8403 location_t location
;
8405 if (!gimple_has_location (stmt
))
8406 location
= input_location
;
8408 location
= gimple_location (stmt
);
8409 warning_at (location
, OPT_Wstrict_overflow
,
8410 "assuming signed overflow does not occur when "
8411 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8415 if (val
&& integer_onep (val
))
8419 if (rhs_code
== TRUNC_DIV_EXPR
)
8421 t
= build_int_cst (integer_type_node
, tree_log2 (op1
));
8422 gimple_assign_set_rhs_code (stmt
, RSHIFT_EXPR
);
8423 gimple_assign_set_rhs1 (stmt
, op0
);
8424 gimple_assign_set_rhs2 (stmt
, t
);
8428 t
= build_int_cst (TREE_TYPE (op1
), 1);
8429 t
= int_const_binop (MINUS_EXPR
, op1
, t
);
8430 t
= fold_convert (TREE_TYPE (op0
), t
);
8432 gimple_assign_set_rhs_code (stmt
, BIT_AND_EXPR
);
8433 gimple_assign_set_rhs1 (stmt
, op0
);
8434 gimple_assign_set_rhs2 (stmt
, t
);
8444 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8445 ABS_EXPR. If the operand is <= 0, then simplify the
8446 ABS_EXPR into a NEGATE_EXPR. */
8449 simplify_abs_using_ranges (gimple stmt
)
8452 tree op
= gimple_assign_rhs1 (stmt
);
8453 tree type
= TREE_TYPE (op
);
8454 value_range_t
*vr
= get_value_range (op
);
8456 if (TYPE_UNSIGNED (type
))
8458 val
= integer_zero_node
;
8464 val
= compare_range_with_value (LE_EXPR
, vr
, integer_zero_node
, &sop
);
8468 val
= compare_range_with_value (GE_EXPR
, vr
, integer_zero_node
,
8473 if (integer_zerop (val
))
8474 val
= integer_one_node
;
8475 else if (integer_onep (val
))
8476 val
= integer_zero_node
;
8481 && (integer_onep (val
) || integer_zerop (val
)))
8483 if (sop
&& issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC
))
8485 location_t location
;
8487 if (!gimple_has_location (stmt
))
8488 location
= input_location
;
8490 location
= gimple_location (stmt
);
8491 warning_at (location
, OPT_Wstrict_overflow
,
8492 "assuming signed overflow does not occur when "
8493 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8496 gimple_assign_set_rhs1 (stmt
, op
);
8497 if (integer_onep (val
))
8498 gimple_assign_set_rhs_code (stmt
, NEGATE_EXPR
);
8500 gimple_assign_set_rhs_code (stmt
, SSA_NAME
);
8509 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8510 If all the bits that are being cleared by & are already
8511 known to be zero from VR, or all the bits that are being
8512 set by | are already known to be one from VR, the bit
8513 operation is redundant. */
8516 simplify_bit_ops_using_ranges (gimple_stmt_iterator
*gsi
, gimple stmt
)
8518 tree op0
= gimple_assign_rhs1 (stmt
);
8519 tree op1
= gimple_assign_rhs2 (stmt
);
8520 tree op
= NULL_TREE
;
8521 value_range_t vr0
= VR_INITIALIZER
;
8522 value_range_t vr1
= VR_INITIALIZER
;
8523 wide_int may_be_nonzero0
, may_be_nonzero1
;
8524 wide_int must_be_nonzero0
, must_be_nonzero1
;
8527 if (TREE_CODE (op0
) == SSA_NAME
)
8528 vr0
= *(get_value_range (op0
));
8529 else if (is_gimple_min_invariant (op0
))
8530 set_value_range_to_value (&vr0
, op0
, NULL
);
8534 if (TREE_CODE (op1
) == SSA_NAME
)
8535 vr1
= *(get_value_range (op1
));
8536 else if (is_gimple_min_invariant (op1
))
8537 set_value_range_to_value (&vr1
, op1
, NULL
);
8541 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0
), &vr0
, &may_be_nonzero0
, &must_be_nonzero0
))
8543 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1
), &vr1
, &may_be_nonzero1
, &must_be_nonzero1
))
8546 switch (gimple_assign_rhs_code (stmt
))
8549 mask
= may_be_nonzero0
.and_not (must_be_nonzero1
);
8555 mask
= may_be_nonzero1
.and_not (must_be_nonzero0
);
8563 mask
= may_be_nonzero0
.and_not (must_be_nonzero1
);
8569 mask
= may_be_nonzero1
.and_not (must_be_nonzero0
);
8580 if (op
== NULL_TREE
)
8583 gimple_assign_set_rhs_with_ops (gsi
, TREE_CODE (op
), op
, NULL
);
8584 update_stmt (gsi_stmt (*gsi
));
8588 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8589 a known value range VR.
8591 If there is one and only one value which will satisfy the
8592 conditional, then return that value. Else return NULL. */
8595 test_for_singularity (enum tree_code cond_code
, tree op0
,
8596 tree op1
, value_range_t
*vr
)
8601 /* Extract minimum/maximum values which satisfy the
8602 the conditional as it was written. */
8603 if (cond_code
== LE_EXPR
|| cond_code
== LT_EXPR
)
8605 /* This should not be negative infinity; there is no overflow
8607 min
= TYPE_MIN_VALUE (TREE_TYPE (op0
));
8610 if (cond_code
== LT_EXPR
&& !is_overflow_infinity (max
))
8612 tree one
= build_int_cst (TREE_TYPE (op0
), 1);
8613 max
= fold_build2 (MINUS_EXPR
, TREE_TYPE (op0
), max
, one
);
8615 TREE_NO_WARNING (max
) = 1;
8618 else if (cond_code
== GE_EXPR
|| cond_code
== GT_EXPR
)
8620 /* This should not be positive infinity; there is no overflow
8622 max
= TYPE_MAX_VALUE (TREE_TYPE (op0
));
8625 if (cond_code
== GT_EXPR
&& !is_overflow_infinity (min
))
8627 tree one
= build_int_cst (TREE_TYPE (op0
), 1);
8628 min
= fold_build2 (PLUS_EXPR
, TREE_TYPE (op0
), min
, one
);
8630 TREE_NO_WARNING (min
) = 1;
8634 /* Now refine the minimum and maximum values using any
8635 value range information we have for op0. */
8638 if (compare_values (vr
->min
, min
) == 1)
8640 if (compare_values (vr
->max
, max
) == -1)
8643 /* If the new min/max values have converged to a single value,
8644 then there is only one value which can satisfy the condition,
8645 return that value. */
8646 if (operand_equal_p (min
, max
, 0) && is_gimple_min_invariant (min
))
8652 /* Return whether the value range *VR fits in an integer type specified
8653 by PRECISION and UNSIGNED_P. */
8656 range_fits_type_p (value_range_t
*vr
, unsigned dest_precision
, signop dest_sgn
)
8659 unsigned src_precision
;
8663 /* We can only handle integral and pointer types. */
8664 src_type
= TREE_TYPE (vr
->min
);
8665 if (!INTEGRAL_TYPE_P (src_type
)
8666 && !POINTER_TYPE_P (src_type
))
8669 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
8670 and so is an identity transform. */
8671 src_precision
= TYPE_PRECISION (TREE_TYPE (vr
->min
));
8672 src_sgn
= TYPE_SIGN (src_type
);
8673 if ((src_precision
< dest_precision
8674 && !(dest_sgn
== UNSIGNED
&& src_sgn
== SIGNED
))
8675 || (src_precision
== dest_precision
&& src_sgn
== dest_sgn
))
8678 /* Now we can only handle ranges with constant bounds. */
8679 if (vr
->type
!= VR_RANGE
8680 || TREE_CODE (vr
->min
) != INTEGER_CST
8681 || TREE_CODE (vr
->max
) != INTEGER_CST
)
8684 /* For sign changes, the MSB of the wide_int has to be clear.
8685 An unsigned value with its MSB set cannot be represented by
8686 a signed wide_int, while a negative value cannot be represented
8687 by an unsigned wide_int. */
8688 if (src_sgn
!= dest_sgn
8689 && (wi::lts_p (vr
->min
, 0) || wi::lts_p (vr
->max
, 0)))
8692 /* Then we can perform the conversion on both ends and compare
8693 the result for equality. */
8694 tem
= wi::ext (vr
->min
, dest_precision
, dest_sgn
);
8697 tem
= wi::ext (vr
->max
, dest_precision
, dest_sgn
);
8704 /* Simplify a conditional using a relational operator to an equality
8705 test if the range information indicates only one value can satisfy
8706 the original conditional. */
8709 simplify_cond_using_ranges (gimple stmt
)
8711 tree op0
= gimple_cond_lhs (stmt
);
8712 tree op1
= gimple_cond_rhs (stmt
);
8713 enum tree_code cond_code
= gimple_cond_code (stmt
);
8715 if (cond_code
!= NE_EXPR
8716 && cond_code
!= EQ_EXPR
8717 && TREE_CODE (op0
) == SSA_NAME
8718 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
8719 && is_gimple_min_invariant (op1
))
8721 value_range_t
*vr
= get_value_range (op0
);
8723 /* If we have range information for OP0, then we might be
8724 able to simplify this conditional. */
8725 if (vr
->type
== VR_RANGE
)
8727 tree new_tree
= test_for_singularity (cond_code
, op0
, op1
, vr
);
8733 fprintf (dump_file
, "Simplified relational ");
8734 print_gimple_stmt (dump_file
, stmt
, 0, 0);
8735 fprintf (dump_file
, " into ");
8738 gimple_cond_set_code (stmt
, EQ_EXPR
);
8739 gimple_cond_set_lhs (stmt
, op0
);
8740 gimple_cond_set_rhs (stmt
, new_tree
);
8746 print_gimple_stmt (dump_file
, stmt
, 0, 0);
8747 fprintf (dump_file
, "\n");
8753 /* Try again after inverting the condition. We only deal
8754 with integral types here, so no need to worry about
8755 issues with inverting FP comparisons. */
8756 cond_code
= invert_tree_comparison (cond_code
, false);
8757 new_tree
= test_for_singularity (cond_code
, op0
, op1
, vr
);
8763 fprintf (dump_file
, "Simplified relational ");
8764 print_gimple_stmt (dump_file
, stmt
, 0, 0);
8765 fprintf (dump_file
, " into ");
8768 gimple_cond_set_code (stmt
, NE_EXPR
);
8769 gimple_cond_set_lhs (stmt
, op0
);
8770 gimple_cond_set_rhs (stmt
, new_tree
);
8776 print_gimple_stmt (dump_file
, stmt
, 0, 0);
8777 fprintf (dump_file
, "\n");
8785 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
8786 see if OP0 was set by a type conversion where the source of
8787 the conversion is another SSA_NAME with a range that fits
8788 into the range of OP0's type.
8790 If so, the conversion is redundant as the earlier SSA_NAME can be
8791 used for the comparison directly if we just massage the constant in the
8793 if (TREE_CODE (op0
) == SSA_NAME
8794 && TREE_CODE (op1
) == INTEGER_CST
)
8796 gimple def_stmt
= SSA_NAME_DEF_STMT (op0
);
8799 if (!is_gimple_assign (def_stmt
)
8800 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
)))
8803 innerop
= gimple_assign_rhs1 (def_stmt
);
8805 if (TREE_CODE (innerop
) == SSA_NAME
8806 && !POINTER_TYPE_P (TREE_TYPE (innerop
)))
8808 value_range_t
*vr
= get_value_range (innerop
);
8810 if (range_int_cst_p (vr
)
8811 && range_fits_type_p (vr
,
8812 TYPE_PRECISION (TREE_TYPE (op0
)),
8813 TYPE_SIGN (TREE_TYPE (op0
)))
8814 && int_fits_type_p (op1
, TREE_TYPE (innerop
))
8815 /* The range must not have overflowed, or if it did overflow
8816 we must not be wrapping/trapping overflow and optimizing
8817 with strict overflow semantics. */
8818 && ((!is_negative_overflow_infinity (vr
->min
)
8819 && !is_positive_overflow_infinity (vr
->max
))
8820 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop
))))
8822 /* If the range overflowed and the user has asked for warnings
8823 when strict overflow semantics were used to optimize code,
8824 issue an appropriate warning. */
8825 if ((is_negative_overflow_infinity (vr
->min
)
8826 || is_positive_overflow_infinity (vr
->max
))
8827 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL
))
8829 location_t location
;
8831 if (!gimple_has_location (stmt
))
8832 location
= input_location
;
8834 location
= gimple_location (stmt
);
8835 warning_at (location
, OPT_Wstrict_overflow
,
8836 "assuming signed overflow does not occur when "
8837 "simplifying conditional");
8840 tree newconst
= fold_convert (TREE_TYPE (innerop
), op1
);
8841 gimple_cond_set_lhs (stmt
, innerop
);
8842 gimple_cond_set_rhs (stmt
, newconst
);
8851 /* Simplify a switch statement using the value range of the switch
8855 simplify_switch_using_ranges (gimple stmt
)
8857 tree op
= gimple_switch_index (stmt
);
8862 size_t i
= 0, j
= 0, n
, n2
;
8865 size_t k
= 1, l
= 0;
8867 if (TREE_CODE (op
) == SSA_NAME
)
8869 vr
= get_value_range (op
);
8871 /* We can only handle integer ranges. */
8872 if ((vr
->type
!= VR_RANGE
8873 && vr
->type
!= VR_ANTI_RANGE
)
8874 || symbolic_range_p (vr
))
8877 /* Find case label for min/max of the value range. */
8878 take_default
= !find_case_label_ranges (stmt
, vr
, &i
, &j
, &k
, &l
);
8880 else if (TREE_CODE (op
) == INTEGER_CST
)
8882 take_default
= !find_case_label_index (stmt
, 1, op
, &i
);
8896 n
= gimple_switch_num_labels (stmt
);
8898 /* Bail out if this is just all edges taken. */
8904 /* Build a new vector of taken case labels. */
8905 vec2
= make_tree_vec (j
- i
+ 1 + l
- k
+ 1 + (int)take_default
);
8908 /* Add the default edge, if necessary. */
8910 TREE_VEC_ELT (vec2
, n2
++) = gimple_switch_default_label (stmt
);
8912 for (; i
<= j
; ++i
, ++n2
)
8913 TREE_VEC_ELT (vec2
, n2
) = gimple_switch_label (stmt
, i
);
8915 for (; k
<= l
; ++k
, ++n2
)
8916 TREE_VEC_ELT (vec2
, n2
) = gimple_switch_label (stmt
, k
);
8918 /* Mark needed edges. */
8919 for (i
= 0; i
< n2
; ++i
)
8921 e
= find_edge (gimple_bb (stmt
),
8922 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2
, i
))));
8923 e
->aux
= (void *)-1;
8926 /* Queue not needed edges for later removal. */
8927 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
8929 if (e
->aux
== (void *)-1)
8935 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
8937 fprintf (dump_file
, "removing unreachable case label\n");
8939 to_remove_edges
.safe_push (e
);
8940 e
->flags
&= ~EDGE_EXECUTABLE
;
8943 /* And queue an update for the stmt. */
8946 to_update_switch_stmts
.safe_push (su
);
8950 /* Simplify an integral conversion from an SSA name in STMT. */
8953 simplify_conversion_using_ranges (gimple stmt
)
8955 tree innerop
, middleop
, finaltype
;
8957 value_range_t
*innervr
;
8958 signop inner_sgn
, middle_sgn
, final_sgn
;
8959 unsigned inner_prec
, middle_prec
, final_prec
;
8960 max_wide_int innermin
, innermed
, innermax
, middlemin
, middlemed
, middlemax
;
8962 finaltype
= TREE_TYPE (gimple_assign_lhs (stmt
));
8963 if (!INTEGRAL_TYPE_P (finaltype
))
8965 middleop
= gimple_assign_rhs1 (stmt
);
8966 def_stmt
= SSA_NAME_DEF_STMT (middleop
);
8967 if (!is_gimple_assign (def_stmt
)
8968 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
)))
8970 innerop
= gimple_assign_rhs1 (def_stmt
);
8971 if (TREE_CODE (innerop
) != SSA_NAME
8972 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop
))
8975 /* Get the value-range of the inner operand. */
8976 innervr
= get_value_range (innerop
);
8977 if (innervr
->type
!= VR_RANGE
8978 || TREE_CODE (innervr
->min
) != INTEGER_CST
8979 || TREE_CODE (innervr
->max
) != INTEGER_CST
)
8982 /* Simulate the conversion chain to check if the result is equal if
8983 the middle conversion is removed. */
8984 innermin
= innervr
->min
;
8985 innermax
= innervr
->max
;
8987 inner_prec
= TYPE_PRECISION (TREE_TYPE (innerop
));
8988 middle_prec
= TYPE_PRECISION (TREE_TYPE (middleop
));
8989 final_prec
= TYPE_PRECISION (finaltype
);
8991 /* If the first conversion is not injective, the second must not
8993 if (wi::gtu_p (innermax
- innermin
,
8994 wi::mask
<max_wide_int
> (middle_prec
, false))
8995 && middle_prec
< final_prec
)
8997 /* We also want a medium value so that we can track the effect that
8998 narrowing conversions with sign change have. */
8999 inner_sgn
= TYPE_SIGN (TREE_TYPE (innerop
));
9000 if (inner_sgn
== UNSIGNED
)
9001 innermed
= wi::shifted_mask
<max_wide_int
> (1, inner_prec
- 1, false);
9004 if (wi::cmp (innermin
, innermed
, inner_sgn
) >= 0
9005 || wi::cmp (innermed
, innermax
, inner_sgn
) >= 0)
9006 innermed
= innermin
;
9008 middle_sgn
= TYPE_SIGN (TREE_TYPE (middleop
));
9009 middlemin
= wi::ext (innermin
, middle_prec
, middle_sgn
);
9010 middlemed
= wi::ext (innermed
, middle_prec
, middle_sgn
);
9011 middlemax
= wi::ext (innermax
, middle_prec
, middle_sgn
);
9013 /* Require that the final conversion applied to both the original
9014 and the intermediate range produces the same result. */
9015 final_sgn
= TYPE_SIGN (finaltype
);
9016 if (wi::ext (middlemin
, final_prec
, final_sgn
)
9017 != wi::ext (innermin
, final_prec
, final_sgn
)
9018 || wi::ext (middlemed
, final_prec
, final_sgn
)
9019 != wi::ext (innermed
, final_prec
, final_sgn
)
9020 || wi::ext (middlemax
, final_prec
, final_sgn
)
9021 != wi::ext (innermax
, final_prec
, final_sgn
))
9024 gimple_assign_set_rhs1 (stmt
, innerop
);
9029 /* Simplify a conversion from integral SSA name to float in STMT. */
9032 simplify_float_conversion_using_ranges (gimple_stmt_iterator
*gsi
, gimple stmt
)
9034 tree rhs1
= gimple_assign_rhs1 (stmt
);
9035 value_range_t
*vr
= get_value_range (rhs1
);
9036 enum machine_mode fltmode
= TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt
)));
9037 enum machine_mode mode
;
9041 /* We can only handle constant ranges. */
9042 if (vr
->type
!= VR_RANGE
9043 || TREE_CODE (vr
->min
) != INTEGER_CST
9044 || TREE_CODE (vr
->max
) != INTEGER_CST
)
9047 /* First check if we can use a signed type in place of an unsigned. */
9048 if (TYPE_UNSIGNED (TREE_TYPE (rhs1
))
9049 && (can_float_p (fltmode
, TYPE_MODE (TREE_TYPE (rhs1
)), 0)
9050 != CODE_FOR_nothing
)
9051 && range_fits_type_p (vr
, TYPE_PRECISION (TREE_TYPE (rhs1
)), SIGNED
))
9052 mode
= TYPE_MODE (TREE_TYPE (rhs1
));
9053 /* If we can do the conversion in the current input mode do nothing. */
9054 else if (can_float_p (fltmode
, TYPE_MODE (TREE_TYPE (rhs1
)),
9055 TYPE_UNSIGNED (TREE_TYPE (rhs1
))) != CODE_FOR_nothing
)
9057 /* Otherwise search for a mode we can use, starting from the narrowest
9058 integer mode available. */
9061 mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
9064 /* If we cannot do a signed conversion to float from mode
9065 or if the value-range does not fit in the signed type
9066 try with a wider mode. */
9067 if (can_float_p (fltmode
, mode
, 0) != CODE_FOR_nothing
9068 && range_fits_type_p (vr
, GET_MODE_PRECISION (mode
), SIGNED
))
9071 mode
= GET_MODE_WIDER_MODE (mode
);
9072 /* But do not widen the input. Instead leave that to the
9073 optabs expansion code. */
9074 if (GET_MODE_PRECISION (mode
) > TYPE_PRECISION (TREE_TYPE (rhs1
)))
9077 while (mode
!= VOIDmode
);
9078 if (mode
== VOIDmode
)
9082 /* It works, insert a truncation or sign-change before the
9083 float conversion. */
9084 tem
= make_ssa_name (build_nonstandard_integer_type
9085 (GET_MODE_PRECISION (mode
), 0), NULL
);
9086 conv
= gimple_build_assign_with_ops (NOP_EXPR
, tem
, rhs1
, NULL_TREE
);
9087 gsi_insert_before (gsi
, conv
, GSI_SAME_STMT
);
9088 gimple_assign_set_rhs1 (stmt
, tem
);
9094 /* Simplify STMT using ranges if possible. */
9097 simplify_stmt_using_ranges (gimple_stmt_iterator
*gsi
)
9099 gimple stmt
= gsi_stmt (*gsi
);
9101 if (is_gimple_assign (stmt
))
9103 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
9104 tree rhs1
= gimple_assign_rhs1 (stmt
);
9110 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9111 if the RHS is zero or one, and the LHS are known to be boolean
9113 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
9114 return simplify_truth_ops_using_ranges (gsi
, stmt
);
9117 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9118 and BIT_AND_EXPR respectively if the first operand is greater
9119 than zero and the second operand is an exact power of two. */
9120 case TRUNC_DIV_EXPR
:
9121 case TRUNC_MOD_EXPR
:
9122 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
9123 && integer_pow2p (gimple_assign_rhs2 (stmt
)))
9124 return simplify_div_or_mod_using_ranges (stmt
);
9127 /* Transform ABS (X) into X or -X as appropriate. */
9129 if (TREE_CODE (rhs1
) == SSA_NAME
9130 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
9131 return simplify_abs_using_ranges (stmt
);
9136 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9137 if all the bits being cleared are already cleared or
9138 all the bits being set are already set. */
9139 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
9140 return simplify_bit_ops_using_ranges (gsi
, stmt
);
9144 if (TREE_CODE (rhs1
) == SSA_NAME
9145 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
9146 return simplify_conversion_using_ranges (stmt
);
9150 if (TREE_CODE (rhs1
) == SSA_NAME
9151 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
9152 return simplify_float_conversion_using_ranges (gsi
, stmt
);
9159 else if (gimple_code (stmt
) == GIMPLE_COND
)
9160 return simplify_cond_using_ranges (stmt
);
9161 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
9162 return simplify_switch_using_ranges (stmt
);
9167 /* If the statement pointed by SI has a predicate whose value can be
9168 computed using the value range information computed by VRP, compute
9169 its value and return true. Otherwise, return false. */
9172 fold_predicate_in (gimple_stmt_iterator
*si
)
9174 bool assignment_p
= false;
9176 gimple stmt
= gsi_stmt (*si
);
9178 if (is_gimple_assign (stmt
)
9179 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
)
9181 assignment_p
= true;
9182 val
= vrp_evaluate_conditional (gimple_assign_rhs_code (stmt
),
9183 gimple_assign_rhs1 (stmt
),
9184 gimple_assign_rhs2 (stmt
),
9187 else if (gimple_code (stmt
) == GIMPLE_COND
)
9188 val
= vrp_evaluate_conditional (gimple_cond_code (stmt
),
9189 gimple_cond_lhs (stmt
),
9190 gimple_cond_rhs (stmt
),
9198 val
= fold_convert (gimple_expr_type (stmt
), val
);
9202 fprintf (dump_file
, "Folding predicate ");
9203 print_gimple_expr (dump_file
, stmt
, 0, 0);
9204 fprintf (dump_file
, " to ");
9205 print_generic_expr (dump_file
, val
, 0);
9206 fprintf (dump_file
, "\n");
9209 if (is_gimple_assign (stmt
))
9210 gimple_assign_set_rhs_from_tree (si
, val
);
9213 gcc_assert (gimple_code (stmt
) == GIMPLE_COND
);
9214 if (integer_zerop (val
))
9215 gimple_cond_make_false (stmt
);
9216 else if (integer_onep (val
))
9217 gimple_cond_make_true (stmt
);
9228 /* Callback for substitute_and_fold folding the stmt at *SI. */
9231 vrp_fold_stmt (gimple_stmt_iterator
*si
)
9233 if (fold_predicate_in (si
))
9236 return simplify_stmt_using_ranges (si
);
9239 /* Stack of dest,src equivalency pairs that need to be restored after
9240 each attempt to thread a block's incoming edge to an outgoing edge.
9242 A NULL entry is used to mark the end of pairs which need to be
9244 static vec
<tree
> equiv_stack
;
9246 /* A trivial wrapper so that we can present the generic jump threading
9247 code with a simple API for simplifying statements. STMT is the
9248 statement we want to simplify, WITHIN_STMT provides the location
9249 for any overflow warnings. */
9252 simplify_stmt_for_jump_threading (gimple stmt
, gimple within_stmt
)
9254 if (gimple_code (stmt
) == GIMPLE_COND
)
9255 return vrp_evaluate_conditional (gimple_cond_code (stmt
),
9256 gimple_cond_lhs (stmt
),
9257 gimple_cond_rhs (stmt
), within_stmt
);
9259 if (gimple_code (stmt
) == GIMPLE_ASSIGN
)
9261 value_range_t new_vr
= VR_INITIALIZER
;
9262 tree lhs
= gimple_assign_lhs (stmt
);
9264 if (TREE_CODE (lhs
) == SSA_NAME
9265 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
9266 || POINTER_TYPE_P (TREE_TYPE (lhs
))))
9268 extract_range_from_assignment (&new_vr
, stmt
);
9269 if (range_int_cst_singleton_p (&new_vr
))
9277 /* Blocks which have more than one predecessor and more than
9278 one successor present jump threading opportunities, i.e.,
9279 when the block is reached from a specific predecessor, we
9280 may be able to determine which of the outgoing edges will
9281 be traversed. When this optimization applies, we are able
9282 to avoid conditionals at runtime and we may expose secondary
9283 optimization opportunities.
9285 This routine is effectively a driver for the generic jump
9286 threading code. It basically just presents the generic code
9287 with edges that may be suitable for jump threading.
9289 Unlike DOM, we do not iterate VRP if jump threading was successful.
9290 While iterating may expose new opportunities for VRP, it is expected
9291 those opportunities would be very limited and the compile time cost
9292 to expose those opportunities would be significant.
9294 As jump threading opportunities are discovered, they are registered
9295 for later realization. */
9298 identify_jump_threads (void)
9305 /* Ugh. When substituting values earlier in this pass we can
9306 wipe the dominance information. So rebuild the dominator
9307 information as we need it within the jump threading code. */
9308 calculate_dominance_info (CDI_DOMINATORS
);
9310 /* We do not allow VRP information to be used for jump threading
9311 across a back edge in the CFG. Otherwise it becomes too
9312 difficult to avoid eliminating loop exit tests. Of course
9313 EDGE_DFS_BACK is not accurate at this time so we have to
9315 mark_dfs_back_edges ();
9317 /* Do not thread across edges we are about to remove. Just marking
9318 them as EDGE_DFS_BACK will do. */
9319 FOR_EACH_VEC_ELT (to_remove_edges
, i
, e
)
9320 e
->flags
|= EDGE_DFS_BACK
;
9322 /* Allocate our unwinder stack to unwind any temporary equivalences
9323 that might be recorded. */
9324 equiv_stack
.create (20);
9326 /* To avoid lots of silly node creation, we create a single
9327 conditional and just modify it in-place when attempting to
9329 dummy
= gimple_build_cond (EQ_EXPR
,
9330 integer_zero_node
, integer_zero_node
,
9333 /* Walk through all the blocks finding those which present a
9334 potential jump threading opportunity. We could set this up
9335 as a dominator walker and record data during the walk, but
9336 I doubt it's worth the effort for the classes of jump
9337 threading opportunities we are trying to identify at this
9338 point in compilation. */
9343 /* If the generic jump threading code does not find this block
9344 interesting, then there is nothing to do. */
9345 if (! potentially_threadable_block (bb
))
9348 /* We only care about blocks ending in a COND_EXPR. While there
9349 may be some value in handling SWITCH_EXPR here, I doubt it's
9350 terribly important. */
9351 last
= gsi_stmt (gsi_last_bb (bb
));
9353 /* We're basically looking for a switch or any kind of conditional with
9354 integral or pointer type arguments. Note the type of the second
9355 argument will be the same as the first argument, so no need to
9356 check it explicitly. */
9357 if (gimple_code (last
) == GIMPLE_SWITCH
9358 || (gimple_code (last
) == GIMPLE_COND
9359 && TREE_CODE (gimple_cond_lhs (last
)) == SSA_NAME
9360 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last
)))
9361 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last
))))
9362 && (TREE_CODE (gimple_cond_rhs (last
)) == SSA_NAME
9363 || is_gimple_min_invariant (gimple_cond_rhs (last
)))))
9367 /* We've got a block with multiple predecessors and multiple
9368 successors which also ends in a suitable conditional or
9369 switch statement. For each predecessor, see if we can thread
9370 it to a specific successor. */
9371 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
9373 /* Do not thread across back edges or abnormal edges
9375 if (e
->flags
& (EDGE_DFS_BACK
| EDGE_COMPLEX
))
9378 thread_across_edge (dummy
, e
, true, &equiv_stack
,
9379 simplify_stmt_for_jump_threading
);
9384 /* We do not actually update the CFG or SSA graphs at this point as
9385 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9386 handle ASSERT_EXPRs gracefully. */
9389 /* We identified all the jump threading opportunities earlier, but could
9390 not transform the CFG at that time. This routine transforms the
9391 CFG and arranges for the dominator tree to be rebuilt if necessary.
9393 Note the SSA graph update will occur during the normal TODO
9394 processing by the pass manager. */
9396 finalize_jump_threads (void)
9398 thread_through_all_blocks (false);
9399 equiv_stack
.release ();
9403 /* Traverse all the blocks folding conditionals with known ranges. */
9410 values_propagated
= true;
9414 fprintf (dump_file
, "\nValue ranges after VRP:\n\n");
9415 dump_all_value_ranges (dump_file
);
9416 fprintf (dump_file
, "\n");
9419 substitute_and_fold (op_with_constant_singleton_value_range
,
9420 vrp_fold_stmt
, false);
9422 if (warn_array_bounds
)
9423 check_all_array_refs ();
9425 /* We must identify jump threading opportunities before we release
9426 the datastructures built by VRP. */
9427 identify_jump_threads ();
9429 /* Free allocated memory. */
9430 for (i
= 0; i
< num_vr_values
; i
++)
9433 BITMAP_FREE (vr_value
[i
]->equiv
);
9438 free (vr_phi_edge_counts
);
9440 /* So that we can distinguish between VRP data being available
9441 and not available. */
9443 vr_phi_edge_counts
= NULL
;
9447 /* Main entry point to VRP (Value Range Propagation). This pass is
9448 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9449 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9450 Programming Language Design and Implementation, pp. 67-78, 1995.
9451 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9453 This is essentially an SSA-CCP pass modified to deal with ranges
9454 instead of constants.
9456 While propagating ranges, we may find that two or more SSA name
9457 have equivalent, though distinct ranges. For instance,
9460 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9462 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9466 In the code above, pointer p_5 has range [q_2, q_2], but from the
9467 code we can also determine that p_5 cannot be NULL and, if q_2 had
9468 a non-varying range, p_5's range should also be compatible with it.
9470 These equivalences are created by two expressions: ASSERT_EXPR and
9471 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9472 result of another assertion, then we can use the fact that p_5 and
9473 p_4 are equivalent when evaluating p_5's range.
9475 Together with value ranges, we also propagate these equivalences
9476 between names so that we can take advantage of information from
9477 multiple ranges when doing final replacement. Note that this
9478 equivalency relation is transitive but not symmetric.
9480 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9481 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9482 in contexts where that assertion does not hold (e.g., in line 6).
9484 TODO, the main difference between this pass and Patterson's is that
9485 we do not propagate edge probabilities. We only compute whether
9486 edges can be taken or not. That is, instead of having a spectrum
9487 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9488 DON'T KNOW. In the future, it may be worthwhile to propagate
9489 probabilities to aid branch prediction. */
9498 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
9499 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
9502 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
9503 Inserting assertions may split edges which will invalidate
9505 insert_range_assertions ();
9507 to_remove_edges
.create (10);
9508 to_update_switch_stmts
.create (5);
9509 threadedge_initialize_values ();
9511 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
9512 mark_dfs_back_edges ();
9515 ssa_propagate (vrp_visit_stmt
, vrp_visit_phi_node
);
9518 free_numbers_of_iterations_estimates ();
9520 /* ASSERT_EXPRs must be removed before finalizing jump threads
9521 as finalizing jump threads calls the CFG cleanup code which
9522 does not properly handle ASSERT_EXPRs. */
9523 remove_range_assertions ();
9525 /* If we exposed any new variables, go ahead and put them into
9526 SSA form now, before we handle jump threading. This simplifies
9527 interactions between rewriting of _DECL nodes into SSA form
9528 and rewriting SSA_NAME nodes into SSA form after block
9529 duplication and CFG manipulation. */
9530 update_ssa (TODO_update_ssa
);
9532 finalize_jump_threads ();
9534 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9535 CFG in a broken state and requires a cfg_cleanup run. */
9536 FOR_EACH_VEC_ELT (to_remove_edges
, i
, e
)
9538 /* Update SWITCH_EXPR case label vector. */
9539 FOR_EACH_VEC_ELT (to_update_switch_stmts
, i
, su
)
9542 size_t n
= TREE_VEC_LENGTH (su
->vec
);
9544 gimple_switch_set_num_labels (su
->stmt
, n
);
9545 for (j
= 0; j
< n
; j
++)
9546 gimple_switch_set_label (su
->stmt
, j
, TREE_VEC_ELT (su
->vec
, j
));
9547 /* As we may have replaced the default label with a regular one
9548 make sure to make it a real default label again. This ensures
9549 optimal expansion. */
9550 label
= gimple_switch_label (su
->stmt
, 0);
9551 CASE_LOW (label
) = NULL_TREE
;
9552 CASE_HIGH (label
) = NULL_TREE
;
9555 if (to_remove_edges
.length () > 0)
9557 free_dominance_info (CDI_DOMINATORS
);
9559 loops_state_set (LOOPS_NEED_FIXUP
);
9562 to_remove_edges
.release ();
9563 to_update_switch_stmts
.release ();
9564 threadedge_finalize_values ();
9567 loop_optimizer_finalize ();
9574 return flag_tree_vrp
!= 0;
9579 const pass_data pass_data_vrp
=
9581 GIMPLE_PASS
, /* type */
9583 OPTGROUP_NONE
, /* optinfo_flags */
9584 true, /* has_gate */
9585 true, /* has_execute */
9586 TV_TREE_VRP
, /* tv_id */
9587 PROP_ssa
, /* properties_required */
9588 0, /* properties_provided */
9589 0, /* properties_destroyed */
9590 0, /* todo_flags_start */
9591 ( TODO_cleanup_cfg
| TODO_update_ssa
9593 | TODO_verify_flow
), /* todo_flags_finish */
9596 class pass_vrp
: public gimple_opt_pass
9599 pass_vrp(gcc::context
*ctxt
)
9600 : gimple_opt_pass(pass_data_vrp
, ctxt
)
9603 /* opt_pass methods: */
9604 opt_pass
* clone () { return new pass_vrp (ctxt_
); }
9605 bool gate () { return gate_vrp (); }
9606 unsigned int execute () { return execute_vrp (); }
9608 }; // class pass_vrp
9613 make_pass_vrp (gcc::context
*ctxt
)
9615 return new pass_vrp (ctxt
);