]>
Commit | Line | Data |
---|---|---|
88dbf20f | 1 | /* Support routines for Value Range Propagation (VRP). |
98f00c5b | 2 | Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 |
7cf0dbf3 | 3 | Free Software Foundation, Inc. |
88dbf20f | 4 | Contributed by Diego Novillo <dnovillo@redhat.com>. |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
8c4c00c1 | 10 | the Free Software Foundation; either version 3, or (at your option) |
88dbf20f | 11 | any later version. |
12 | ||
13 | GCC is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ | |
88dbf20f | 21 | |
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
25 | #include "tm.h" | |
26 | #include "ggc.h" | |
27 | #include "flags.h" | |
28 | #include "tree.h" | |
29 | #include "basic-block.h" | |
30 | #include "tree-flow.h" | |
31 | #include "tree-pass.h" | |
32 | #include "tree-dump.h" | |
ce084dfc | 33 | #include "gimple-pretty-print.h" |
0b205f4c | 34 | #include "diagnostic-core.h" |
a2a1fde2 | 35 | #include "intl.h" |
88dbf20f | 36 | #include "cfgloop.h" |
37 | #include "tree-scalar-evolution.h" | |
38 | #include "tree-ssa-propagate.h" | |
39 | #include "tree-chrec.h" | |
1d0b727d | 40 | #include "gimple-fold.h" |
f0938d2c | 41 | #include "expr.h" |
42 | #include "optabs.h" | |
88dbf20f | 43 | |
75a70cf9 | 44 | |
14f101cf | 45 | /* Type of value ranges. See value_range_d for a description of these |
46 | types. */ | |
47 | enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING }; | |
48 | ||
49 | /* Range of values that can be associated with an SSA_NAME after VRP | |
50 | has executed. */ | |
51 | struct value_range_d | |
52 | { | |
53 | /* Lattice value represented by this range. */ | |
54 | enum value_range_type type; | |
55 | ||
56 | /* Minimum and maximum values represented by this range. These | |
57 | values should be interpreted as follows: | |
58 | ||
59 | - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must | |
60 | be NULL. | |
61 | ||
62 | - If TYPE == VR_RANGE then MIN holds the minimum value and | |
63 | MAX holds the maximum value of the range [MIN, MAX]. | |
64 | ||
65 | - If TYPE == ANTI_RANGE the variable is known to NOT | |
66 | take any values in the range [MIN, MAX]. */ | |
67 | tree min; | |
68 | tree max; | |
69 | ||
70 | /* Set of SSA names whose value ranges are equivalent to this one. | |
71 | This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */ | |
72 | bitmap equiv; | |
73 | }; | |
74 | ||
75 | typedef struct value_range_d value_range_t; | |
76 | ||
748eb1f9 | 77 | #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL } |
78 | ||
17ed8337 | 79 | /* Set of SSA names found live during the RPO traversal of the function |
80 | for still active basic-blocks. */ | |
81 | static sbitmap *live; | |
82 | ||
83 | /* Return true if the SSA name NAME is live on the edge E. */ | |
84 | ||
85 | static bool | |
86 | live_on_edge (edge e, tree name) | |
87 | { | |
88 | return (live[e->dest->index] | |
89 | && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name))); | |
90 | } | |
88dbf20f | 91 | |
88dbf20f | 92 | /* Local functions. */ |
93 | static int compare_values (tree val1, tree val2); | |
c3783c3b | 94 | static int compare_values_warnv (tree val1, tree val2, bool *); |
ec0fa513 | 95 | static void vrp_meet (value_range_t *, value_range_t *); |
04dbf3c4 | 96 | static void vrp_intersect_ranges (value_range_t *, value_range_t *); |
93116081 | 97 | static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, |
e0ad89bd | 98 | tree, tree, bool, bool *, |
99 | bool *); | |
88dbf20f | 100 | |
eea12c72 | 101 | /* Location information for ASSERT_EXPRs. Each instance of this |
102 | structure describes an ASSERT_EXPR for an SSA name. Since a single | |
103 | SSA name may have more than one assertion associated with it, these | |
104 | locations are kept in a linked list attached to the corresponding | |
105 | SSA name. */ | |
106 | struct assert_locus_d | |
88dbf20f | 107 | { |
eea12c72 | 108 | /* Basic block where the assertion would be inserted. */ |
109 | basic_block bb; | |
110 | ||
111 | /* Some assertions need to be inserted on an edge (e.g., assertions | |
112 | generated by COND_EXPRs). In those cases, BB will be NULL. */ | |
113 | edge e; | |
114 | ||
115 | /* Pointer to the statement that generated this assertion. */ | |
75a70cf9 | 116 | gimple_stmt_iterator si; |
eea12c72 | 117 | |
118 | /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ | |
119 | enum tree_code comp_code; | |
120 | ||
121 | /* Value being compared against. */ | |
122 | tree val; | |
123 | ||
bed8bec4 | 124 | /* Expression to compare. */ |
125 | tree expr; | |
126 | ||
eea12c72 | 127 | /* Next node in the linked list. */ |
128 | struct assert_locus_d *next; | |
129 | }; | |
130 | ||
131 | typedef struct assert_locus_d *assert_locus_t; | |
132 | ||
133 | /* If bit I is present, it means that SSA name N_i has a list of | |
134 | assertions that should be inserted in the IL. */ | |
135 | static bitmap need_assert_for; | |
136 | ||
137 | /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] | |
138 | holds a list of ASSERT_LOCUS_T nodes that describe where | |
139 | ASSERT_EXPRs for SSA name N_I should be inserted. */ | |
140 | static assert_locus_t *asserts_for; | |
141 | ||
eea12c72 | 142 | /* Value range array. After propagation, VR_VALUE[I] holds the range |
143 | of values that SSA name N_I may take. */ | |
e0186710 | 144 | static unsigned num_vr_values; |
eea12c72 | 145 | static value_range_t **vr_value; |
e0186710 | 146 | static bool values_propagated; |
88dbf20f | 147 | |
5c7155ca | 148 | /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the |
149 | number of executable edges we saw the last time we visited the | |
150 | node. */ | |
151 | static int *vr_phi_edge_counts; | |
152 | ||
72c30859 | 153 | typedef struct { |
75a70cf9 | 154 | gimple stmt; |
72c30859 | 155 | tree vec; |
156 | } switch_update; | |
157 | ||
158 | static VEC (edge, heap) *to_remove_edges; | |
159 | DEF_VEC_O(switch_update); | |
160 | DEF_VEC_ALLOC_O(switch_update, heap); | |
161 | static VEC (switch_update, heap) *to_update_switch_stmts; | |
162 | ||
88dbf20f | 163 | |
a9538d68 | 164 | /* Return the maximum value for TYPE. */ |
22cdb855 | 165 | |
166 | static inline tree | |
167 | vrp_val_max (const_tree type) | |
168 | { | |
169 | if (!INTEGRAL_TYPE_P (type)) | |
170 | return NULL_TREE; | |
171 | ||
22cdb855 | 172 | return TYPE_MAX_VALUE (type); |
173 | } | |
174 | ||
a9538d68 | 175 | /* Return the minimum value for TYPE. */ |
22cdb855 | 176 | |
177 | static inline tree | |
178 | vrp_val_min (const_tree type) | |
179 | { | |
180 | if (!INTEGRAL_TYPE_P (type)) | |
181 | return NULL_TREE; | |
182 | ||
22cdb855 | 183 | return TYPE_MIN_VALUE (type); |
184 | } | |
185 | ||
186 | /* Return whether VAL is equal to the maximum value of its type. This | |
187 | will be true for a positive overflow infinity. We can't do a | |
188 | simple equality comparison with TYPE_MAX_VALUE because C typedefs | |
189 | and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == | |
190 | to the integer constant with the same value in the type. */ | |
191 | ||
192 | static inline bool | |
193 | vrp_val_is_max (const_tree val) | |
194 | { | |
195 | tree type_max = vrp_val_max (TREE_TYPE (val)); | |
196 | return (val == type_max | |
197 | || (type_max != NULL_TREE | |
198 | && operand_equal_p (val, type_max, 0))); | |
199 | } | |
200 | ||
201 | /* Return whether VAL is equal to the minimum value of its type. This | |
202 | will be true for a negative overflow infinity. */ | |
203 | ||
204 | static inline bool | |
205 | vrp_val_is_min (const_tree val) | |
206 | { | |
207 | tree type_min = vrp_val_min (TREE_TYPE (val)); | |
208 | return (val == type_min | |
209 | || (type_min != NULL_TREE | |
210 | && operand_equal_p (val, type_min, 0))); | |
211 | } | |
212 | ||
213 | ||
c3783c3b | 214 | /* Return whether TYPE should use an overflow infinity distinct from |
215 | TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to | |
216 | represent a signed overflow during VRP computations. An infinity | |
217 | is distinct from a half-range, which will go from some number to | |
218 | TYPE_{MIN,MAX}_VALUE. */ | |
219 | ||
220 | static inline bool | |
9f627b1a | 221 | needs_overflow_infinity (const_tree type) |
c3783c3b | 222 | { |
a9538d68 | 223 | return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); |
c3783c3b | 224 | } |
225 | ||
226 | /* Return whether TYPE can support our overflow infinity | |
227 | representation: we use the TREE_OVERFLOW flag, which only exists | |
228 | for constants. If TYPE doesn't support this, we don't optimize | |
229 | cases which would require signed overflow--we drop them to | |
230 | VARYING. */ | |
231 | ||
232 | static inline bool | |
9f627b1a | 233 | supports_overflow_infinity (const_tree type) |
c3783c3b | 234 | { |
22cdb855 | 235 | tree min = vrp_val_min (type), max = vrp_val_max (type); |
c3783c3b | 236 | #ifdef ENABLE_CHECKING |
237 | gcc_assert (needs_overflow_infinity (type)); | |
238 | #endif | |
22cdb855 | 239 | return (min != NULL_TREE |
240 | && CONSTANT_CLASS_P (min) | |
241 | && max != NULL_TREE | |
242 | && CONSTANT_CLASS_P (max)); | |
c3783c3b | 243 | } |
244 | ||
245 | /* VAL is the maximum or minimum value of a type. Return a | |
246 | corresponding overflow infinity. */ | |
247 | ||
248 | static inline tree | |
249 | make_overflow_infinity (tree val) | |
250 | { | |
1b4345f7 | 251 | gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); |
c3783c3b | 252 | val = copy_node (val); |
253 | TREE_OVERFLOW (val) = 1; | |
254 | return val; | |
255 | } | |
256 | ||
257 | /* Return a negative overflow infinity for TYPE. */ | |
258 | ||
259 | static inline tree | |
260 | negative_overflow_infinity (tree type) | |
261 | { | |
1b4345f7 | 262 | gcc_checking_assert (supports_overflow_infinity (type)); |
22cdb855 | 263 | return make_overflow_infinity (vrp_val_min (type)); |
c3783c3b | 264 | } |
265 | ||
266 | /* Return a positive overflow infinity for TYPE. */ | |
267 | ||
268 | static inline tree | |
269 | positive_overflow_infinity (tree type) | |
270 | { | |
1b4345f7 | 271 | gcc_checking_assert (supports_overflow_infinity (type)); |
22cdb855 | 272 | return make_overflow_infinity (vrp_val_max (type)); |
c3783c3b | 273 | } |
274 | ||
275 | /* Return whether VAL is a negative overflow infinity. */ | |
276 | ||
277 | static inline bool | |
9f627b1a | 278 | is_negative_overflow_infinity (const_tree val) |
c3783c3b | 279 | { |
280 | return (needs_overflow_infinity (TREE_TYPE (val)) | |
281 | && CONSTANT_CLASS_P (val) | |
282 | && TREE_OVERFLOW (val) | |
22cdb855 | 283 | && vrp_val_is_min (val)); |
c3783c3b | 284 | } |
285 | ||
286 | /* Return whether VAL is a positive overflow infinity. */ | |
287 | ||
288 | static inline bool | |
9f627b1a | 289 | is_positive_overflow_infinity (const_tree val) |
c3783c3b | 290 | { |
291 | return (needs_overflow_infinity (TREE_TYPE (val)) | |
292 | && CONSTANT_CLASS_P (val) | |
293 | && TREE_OVERFLOW (val) | |
22cdb855 | 294 | && vrp_val_is_max (val)); |
c3783c3b | 295 | } |
296 | ||
297 | /* Return whether VAL is a positive or negative overflow infinity. */ | |
298 | ||
299 | static inline bool | |
9f627b1a | 300 | is_overflow_infinity (const_tree val) |
c3783c3b | 301 | { |
302 | return (needs_overflow_infinity (TREE_TYPE (val)) | |
303 | && CONSTANT_CLASS_P (val) | |
304 | && TREE_OVERFLOW (val) | |
22cdb855 | 305 | && (vrp_val_is_min (val) || vrp_val_is_max (val))); |
c3783c3b | 306 | } |
307 | ||
75a70cf9 | 308 | /* Return whether STMT has a constant rhs that is_overflow_infinity. */ |
309 | ||
310 | static inline bool | |
311 | stmt_overflow_infinity (gimple stmt) | |
312 | { | |
313 | if (is_gimple_assign (stmt) | |
314 | && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == | |
315 | GIMPLE_SINGLE_RHS) | |
316 | return is_overflow_infinity (gimple_assign_rhs1 (stmt)); | |
317 | return false; | |
318 | } | |
319 | ||
afc1ad6d | 320 | /* If VAL is now an overflow infinity, return VAL. Otherwise, return |
321 | the same value with TREE_OVERFLOW clear. This can be used to avoid | |
322 | confusing a regular value with an overflow value. */ | |
323 | ||
324 | static inline tree | |
325 | avoid_overflow_infinity (tree val) | |
326 | { | |
327 | if (!is_overflow_infinity (val)) | |
328 | return val; | |
329 | ||
22cdb855 | 330 | if (vrp_val_is_max (val)) |
331 | return vrp_val_max (TREE_TYPE (val)); | |
afc1ad6d | 332 | else |
333 | { | |
1b4345f7 | 334 | gcc_checking_assert (vrp_val_is_min (val)); |
22cdb855 | 335 | return vrp_val_min (TREE_TYPE (val)); |
afc1ad6d | 336 | } |
337 | } | |
338 | ||
c3783c3b | 339 | |
909992de | 340 | /* Return true if ARG is marked with the nonnull attribute in the |
341 | current function signature. */ | |
342 | ||
343 | static bool | |
9f627b1a | 344 | nonnull_arg_p (const_tree arg) |
909992de | 345 | { |
346 | tree t, attrs, fntype; | |
347 | unsigned HOST_WIDE_INT arg_num; | |
348 | ||
349 | gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); | |
350 | ||
8d665d11 | 351 | /* The static chain decl is always non null. */ |
352 | if (arg == cfun->static_chain_decl) | |
353 | return true; | |
354 | ||
909992de | 355 | fntype = TREE_TYPE (current_function_decl); |
9ca77b08 | 356 | for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs)) |
909992de | 357 | { |
9ca77b08 | 358 | attrs = lookup_attribute ("nonnull", attrs); |
909992de | 359 | |
9ca77b08 | 360 | /* If "nonnull" wasn't specified, we know nothing about the argument. */ |
361 | if (attrs == NULL_TREE) | |
362 | return false; | |
909992de | 363 | |
9ca77b08 | 364 | /* If "nonnull" applies to all the arguments, then ARG is non-null. */ |
365 | if (TREE_VALUE (attrs) == NULL_TREE) | |
909992de | 366 | return true; |
9ca77b08 | 367 | |
368 | /* Get the position number for ARG in the function signature. */ | |
369 | for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); | |
370 | t; | |
371 | t = DECL_CHAIN (t), arg_num++) | |
372 | { | |
373 | if (t == arg) | |
374 | break; | |
375 | } | |
376 | ||
377 | gcc_assert (t == arg); | |
378 | ||
379 | /* Now see if ARG_NUM is mentioned in the nonnull list. */ | |
380 | for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) | |
381 | { | |
382 | if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) | |
383 | return true; | |
384 | } | |
909992de | 385 | } |
386 | ||
387 | return false; | |
388 | } | |
389 | ||
390 | ||
bc8c1f83 | 391 | /* Set value range VR to VR_UNDEFINED. */ |
392 | ||
393 | static inline void | |
394 | set_value_range_to_undefined (value_range_t *vr) | |
395 | { | |
396 | vr->type = VR_UNDEFINED; | |
397 | vr->min = vr->max = NULL_TREE; | |
398 | if (vr->equiv) | |
399 | bitmap_clear (vr->equiv); | |
400 | } | |
401 | ||
402 | ||
bed8bec4 | 403 | /* Set value range VR to VR_VARYING. */ |
404 | ||
405 | static inline void | |
406 | set_value_range_to_varying (value_range_t *vr) | |
407 | { | |
408 | vr->type = VR_VARYING; | |
409 | vr->min = vr->max = NULL_TREE; | |
410 | if (vr->equiv) | |
411 | bitmap_clear (vr->equiv); | |
412 | } | |
413 | ||
414 | ||
eea12c72 | 415 | /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |
416 | ||
417 | static void | |
418 | set_value_range (value_range_t *vr, enum value_range_type t, tree min, | |
419 | tree max, bitmap equiv) | |
88dbf20f | 420 | { |
421 | #if defined ENABLE_CHECKING | |
eea12c72 | 422 | /* Check the validity of the range. */ |
88dbf20f | 423 | if (t == VR_RANGE || t == VR_ANTI_RANGE) |
424 | { | |
425 | int cmp; | |
426 | ||
427 | gcc_assert (min && max); | |
428 | ||
429 | if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) | |
b876a744 | 430 | gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); |
88dbf20f | 431 | |
432 | cmp = compare_values (min, max); | |
433 | gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); | |
b700987e | 434 | |
435 | if (needs_overflow_infinity (TREE_TYPE (min))) | |
436 | gcc_assert (!is_overflow_infinity (min) | |
437 | || !is_overflow_infinity (max)); | |
88dbf20f | 438 | } |
88dbf20f | 439 | |
eea12c72 | 440 | if (t == VR_UNDEFINED || t == VR_VARYING) |
441 | gcc_assert (min == NULL_TREE && max == NULL_TREE); | |
442 | ||
443 | if (t == VR_UNDEFINED || t == VR_VARYING) | |
444 | gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); | |
445 | #endif | |
88dbf20f | 446 | |
447 | vr->type = t; | |
448 | vr->min = min; | |
449 | vr->max = max; | |
eea12c72 | 450 | |
451 | /* Since updating the equivalence set involves deep copying the | |
452 | bitmaps, only do it if absolutely necessary. */ | |
fbcece5e | 453 | if (vr->equiv == NULL |
454 | && equiv != NULL) | |
eea12c72 | 455 | vr->equiv = BITMAP_ALLOC (NULL); |
456 | ||
457 | if (equiv != vr->equiv) | |
458 | { | |
459 | if (equiv && !bitmap_empty_p (equiv)) | |
460 | bitmap_copy (vr->equiv, equiv); | |
461 | else | |
462 | bitmap_clear (vr->equiv); | |
463 | } | |
88dbf20f | 464 | } |
465 | ||
466 | ||
bed8bec4 | 467 | /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. |
468 | This means adjusting T, MIN and MAX representing the case of a | |
469 | wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] | |
470 | as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. | |
471 | In corner cases where MAX+1 or MIN-1 wraps this will fall back | |
472 | to varying. | |
473 | This routine exists to ease canonicalization in the case where we | |
474 | extract ranges from var + CST op limit. */ | |
88dbf20f | 475 | |
bed8bec4 | 476 | static void |
477 | set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, | |
478 | tree min, tree max, bitmap equiv) | |
88dbf20f | 479 | { |
bc8c1f83 | 480 | /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |
481 | if (t == VR_UNDEFINED) | |
482 | { | |
483 | set_value_range_to_undefined (vr); | |
484 | return; | |
485 | } | |
486 | else if (t == VR_VARYING) | |
487 | { | |
488 | set_value_range_to_varying (vr); | |
489 | return; | |
490 | } | |
491 | ||
492 | /* Nothing to canonicalize for symbolic ranges. */ | |
493 | if (TREE_CODE (min) != INTEGER_CST | |
22cdb855 | 494 | || TREE_CODE (max) != INTEGER_CST) |
bed8bec4 | 495 | { |
496 | set_value_range (vr, t, min, max, equiv); | |
497 | return; | |
498 | } | |
c3783c3b | 499 | |
bed8bec4 | 500 | /* Wrong order for min and max, to swap them and the VR type we need |
501 | to adjust them. */ | |
bed8bec4 | 502 | if (tree_int_cst_lt (max, min)) |
503 | { | |
22cdb855 | 504 | tree one = build_int_cst (TREE_TYPE (min), 1); |
317e2a67 | 505 | tree tmp = int_const_binop (PLUS_EXPR, max, one); |
506 | max = int_const_binop (MINUS_EXPR, min, one); | |
22cdb855 | 507 | min = tmp; |
508 | ||
509 | /* There's one corner case, if we had [C+1, C] before we now have | |
510 | that again. But this represents an empty value range, so drop | |
511 | to varying in this case. */ | |
512 | if (tree_int_cst_lt (max, min)) | |
513 | { | |
514 | set_value_range_to_varying (vr); | |
515 | return; | |
516 | } | |
517 | ||
518 | t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; | |
519 | } | |
520 | ||
521 | /* Anti-ranges that can be represented as ranges should be so. */ | |
522 | if (t == VR_ANTI_RANGE) | |
523 | { | |
524 | bool is_min = vrp_val_is_min (min); | |
525 | bool is_max = vrp_val_is_max (max); | |
526 | ||
527 | if (is_min && is_max) | |
528 | { | |
bc8c1f83 | 529 | /* We cannot deal with empty ranges, drop to varying. |
530 | ??? This could be VR_UNDEFINED instead. */ | |
22cdb855 | 531 | set_value_range_to_varying (vr); |
532 | return; | |
533 | } | |
534 | else if (is_min | |
535 | /* As a special exception preserve non-null ranges. */ | |
536 | && !(TYPE_UNSIGNED (TREE_TYPE (min)) | |
537 | && integer_zerop (max))) | |
538 | { | |
539 | tree one = build_int_cst (TREE_TYPE (max), 1); | |
317e2a67 | 540 | min = int_const_binop (PLUS_EXPR, max, one); |
22cdb855 | 541 | max = vrp_val_max (TREE_TYPE (max)); |
542 | t = VR_RANGE; | |
543 | } | |
544 | else if (is_max) | |
545 | { | |
546 | tree one = build_int_cst (TREE_TYPE (min), 1); | |
317e2a67 | 547 | max = int_const_binop (MINUS_EXPR, min, one); |
22cdb855 | 548 | min = vrp_val_min (TREE_TYPE (min)); |
549 | t = VR_RANGE; | |
550 | } | |
bed8bec4 | 551 | } |
552 | ||
bc8c1f83 | 553 | /* Drop [-INF(OVF), +INF(OVF)] to varying. */ |
554 | if (needs_overflow_infinity (TREE_TYPE (min)) | |
555 | && is_overflow_infinity (min) | |
556 | && is_overflow_infinity (max)) | |
557 | { | |
558 | set_value_range_to_varying (vr); | |
559 | return; | |
560 | } | |
561 | ||
bed8bec4 | 562 | set_value_range (vr, t, min, max, equiv); |
563 | } | |
564 | ||
565 | /* Copy value range FROM into value range TO. */ | |
8dbf774a | 566 | |
567 | static inline void | |
bed8bec4 | 568 | copy_value_range (value_range_t *to, value_range_t *from) |
8dbf774a | 569 | { |
bed8bec4 | 570 | set_value_range (to, from->type, from->min, from->max, from->equiv); |
c3783c3b | 571 | } |
572 | ||
b700987e | 573 | /* Set value range VR to a single value. This function is only called |
574 | with values we get from statements, and exists to clear the | |
575 | TREE_OVERFLOW flag so that we don't think we have an overflow | |
576 | infinity when we shouldn't. */ | |
577 | ||
578 | static inline void | |
4baf1a77 | 579 | set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) |
b700987e | 580 | { |
581 | gcc_assert (is_gimple_min_invariant (val)); | |
afc1ad6d | 582 | val = avoid_overflow_infinity (val); |
4baf1a77 | 583 | set_value_range (vr, VR_RANGE, val, val, equiv); |
b700987e | 584 | } |
585 | ||
c3783c3b | 586 | /* Set value range VR to a non-negative range of type TYPE. |
80777cd8 | 587 | OVERFLOW_INFINITY indicates whether to use an overflow infinity |
c3783c3b | 588 | rather than TYPE_MAX_VALUE; this should be true if we determine |
589 | that the range is nonnegative based on the assumption that signed | |
590 | overflow does not occur. */ | |
591 | ||
592 | static inline void | |
593 | set_value_range_to_nonnegative (value_range_t *vr, tree type, | |
594 | bool overflow_infinity) | |
595 | { | |
596 | tree zero; | |
597 | ||
598 | if (overflow_infinity && !supports_overflow_infinity (type)) | |
599 | { | |
600 | set_value_range_to_varying (vr); | |
601 | return; | |
602 | } | |
603 | ||
604 | zero = build_int_cst (type, 0); | |
605 | set_value_range (vr, VR_RANGE, zero, | |
606 | (overflow_infinity | |
607 | ? positive_overflow_infinity (type) | |
608 | : TYPE_MAX_VALUE (type)), | |
609 | vr->equiv); | |
8dbf774a | 610 | } |
eea12c72 | 611 | |
612 | /* Set value range VR to a non-NULL range of type TYPE. */ | |
613 | ||
614 | static inline void | |
615 | set_value_range_to_nonnull (value_range_t *vr, tree type) | |
616 | { | |
617 | tree zero = build_int_cst (type, 0); | |
618 | set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); | |
619 | } | |
620 | ||
621 | ||
622 | /* Set value range VR to a NULL range of type TYPE. */ | |
623 | ||
624 | static inline void | |
625 | set_value_range_to_null (value_range_t *vr, tree type) | |
626 | { | |
4baf1a77 | 627 | set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); |
eea12c72 | 628 | } |
629 | ||
630 | ||
b9b64cb7 | 631 | /* Set value range VR to a range of a truthvalue of type TYPE. */ |
632 | ||
633 | static inline void | |
634 | set_value_range_to_truthvalue (value_range_t *vr, tree type) | |
635 | { | |
636 | if (TYPE_PRECISION (type) == 1) | |
637 | set_value_range_to_varying (vr); | |
638 | else | |
639 | set_value_range (vr, VR_RANGE, | |
640 | build_int_cst (type, 0), build_int_cst (type, 1), | |
641 | vr->equiv); | |
642 | } | |
643 | ||
644 | ||
e52dd258 | 645 | /* If abs (min) < abs (max), set VR to [-max, max], if |
646 | abs (min) >= abs (max), set VR to [-min, min]. */ | |
647 | ||
648 | static void | |
649 | abs_extent_range (value_range_t *vr, tree min, tree max) | |
650 | { | |
651 | int cmp; | |
652 | ||
653 | gcc_assert (TREE_CODE (min) == INTEGER_CST); | |
654 | gcc_assert (TREE_CODE (max) == INTEGER_CST); | |
655 | gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); | |
656 | gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); | |
657 | min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); | |
658 | max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); | |
659 | if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) | |
660 | { | |
661 | set_value_range_to_varying (vr); | |
662 | return; | |
663 | } | |
664 | cmp = compare_values (min, max); | |
665 | if (cmp == -1) | |
666 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); | |
667 | else if (cmp == 0 || cmp == 1) | |
668 | { | |
669 | max = min; | |
670 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); | |
671 | } | |
672 | else | |
673 | { | |
674 | set_value_range_to_varying (vr); | |
675 | return; | |
676 | } | |
677 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
678 | } | |
679 | ||
680 | ||
48e1416a | 681 | /* Return value range information for VAR. |
8dbf774a | 682 | |
683 | If we have no values ranges recorded (ie, VRP is not running), then | |
684 | return NULL. Otherwise create an empty range if none existed for VAR. */ | |
88dbf20f | 685 | |
eea12c72 | 686 | static value_range_t * |
9f627b1a | 687 | get_value_range (const_tree var) |
88dbf20f | 688 | { |
e0186710 | 689 | static const struct value_range_d vr_const_varying |
690 | = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; | |
eea12c72 | 691 | value_range_t *vr; |
88dbf20f | 692 | tree sym; |
eea12c72 | 693 | unsigned ver = SSA_NAME_VERSION (var); |
88dbf20f | 694 | |
8dbf774a | 695 | /* If we have no recorded ranges, then return NULL. */ |
696 | if (! vr_value) | |
697 | return NULL; | |
698 | ||
e0186710 | 699 | /* If we query the range for a new SSA name return an unmodifiable VARYING. |
700 | We should get here at most from the substitute-and-fold stage which | |
701 | will never try to change values. */ | |
702 | if (ver >= num_vr_values) | |
703 | return CONST_CAST (value_range_t *, &vr_const_varying); | |
704 | ||
eea12c72 | 705 | vr = vr_value[ver]; |
88dbf20f | 706 | if (vr) |
707 | return vr; | |
708 | ||
e0186710 | 709 | /* After propagation finished do not allocate new value-ranges. */ |
710 | if (values_propagated) | |
711 | return CONST_CAST (value_range_t *, &vr_const_varying); | |
712 | ||
88dbf20f | 713 | /* Create a default value range. */ |
43959b95 | 714 | vr_value[ver] = vr = XCNEW (value_range_t); |
88dbf20f | 715 | |
fbcece5e | 716 | /* Defer allocating the equivalence set. */ |
717 | vr->equiv = NULL; | |
eea12c72 | 718 | |
fb41023e | 719 | /* If VAR is a default definition of a parameter, the variable can |
720 | take any value in VAR's type. */ | |
0e443ce1 | 721 | if (SSA_NAME_IS_DEFAULT_DEF (var)) |
909992de | 722 | { |
7ecda5e8 | 723 | sym = SSA_NAME_VAR (var); |
0e443ce1 | 724 | if (TREE_CODE (sym) == PARM_DECL) |
725 | { | |
726 | /* Try to use the "nonnull" attribute to create ~[0, 0] | |
727 | anti-ranges for pointers. Note that this is only valid with | |
728 | default definitions of PARM_DECLs. */ | |
729 | if (POINTER_TYPE_P (TREE_TYPE (sym)) | |
730 | && nonnull_arg_p (sym)) | |
731 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); | |
732 | else | |
733 | set_value_range_to_varying (vr); | |
734 | } | |
735 | else if (TREE_CODE (sym) == RESULT_DECL | |
736 | && DECL_BY_REFERENCE (sym)) | |
909992de | 737 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); |
909992de | 738 | } |
88dbf20f | 739 | |
740 | return vr; | |
741 | } | |
742 | ||
238ad80e | 743 | /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
744 | ||
745 | static inline bool | |
9f627b1a | 746 | vrp_operand_equal_p (const_tree val1, const_tree val2) |
238ad80e | 747 | { |
c3783c3b | 748 | if (val1 == val2) |
749 | return true; | |
750 | if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) | |
751 | return false; | |
752 | if (is_overflow_infinity (val1)) | |
753 | return is_overflow_infinity (val2); | |
754 | return true; | |
238ad80e | 755 | } |
756 | ||
757 | /* Return true, if the bitmaps B1 and B2 are equal. */ | |
758 | ||
759 | static inline bool | |
1f1872fd | 760 | vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
238ad80e | 761 | { |
762 | return (b1 == b2 | |
b1296be0 | 763 | || ((!b1 || bitmap_empty_p (b1)) |
764 | && (!b2 || bitmap_empty_p (b2))) | |
238ad80e | 765 | || (b1 && b2 |
766 | && bitmap_equal_p (b1, b2))); | |
767 | } | |
88dbf20f | 768 | |
eea12c72 | 769 | /* Update the value range and equivalence set for variable VAR to |
770 | NEW_VR. Return true if NEW_VR is different from VAR's previous | |
771 | value. | |
772 | ||
773 | NOTE: This function assumes that NEW_VR is a temporary value range | |
774 | object created for the sole purpose of updating VAR's range. The | |
775 | storage used by the equivalence set from NEW_VR will be freed by | |
776 | this function. Do not call update_value_range when NEW_VR | |
777 | is the range object associated with another SSA name. */ | |
88dbf20f | 778 | |
779 | static inline bool | |
9f627b1a | 780 | update_value_range (const_tree var, value_range_t *new_vr) |
88dbf20f | 781 | { |
eea12c72 | 782 | value_range_t *old_vr; |
783 | bool is_new; | |
784 | ||
785 | /* Update the value range, if necessary. */ | |
786 | old_vr = get_value_range (var); | |
787 | is_new = old_vr->type != new_vr->type | |
238ad80e | 788 | || !vrp_operand_equal_p (old_vr->min, new_vr->min) |
789 | || !vrp_operand_equal_p (old_vr->max, new_vr->max) | |
790 | || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); | |
88dbf20f | 791 | |
eea12c72 | 792 | if (is_new) |
793 | set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, | |
794 | new_vr->equiv); | |
88dbf20f | 795 | |
eea12c72 | 796 | BITMAP_FREE (new_vr->equiv); |
88dbf20f | 797 | |
eea12c72 | 798 | return is_new; |
799 | } | |
88dbf20f | 800 | |
88dbf20f | 801 | |
fbcece5e | 802 | /* Add VAR and VAR's equivalence set to EQUIV. This is the central |
803 | point where equivalence processing can be turned on/off. */ | |
88dbf20f | 804 | |
eea12c72 | 805 | static void |
9f627b1a | 806 | add_equivalence (bitmap *equiv, const_tree var) |
eea12c72 | 807 | { |
808 | unsigned ver = SSA_NAME_VERSION (var); | |
809 | value_range_t *vr = vr_value[ver]; | |
88dbf20f | 810 | |
fbcece5e | 811 | if (*equiv == NULL) |
812 | *equiv = BITMAP_ALLOC (NULL); | |
813 | bitmap_set_bit (*equiv, ver); | |
eea12c72 | 814 | if (vr && vr->equiv) |
fbcece5e | 815 | bitmap_ior_into (*equiv, vr->equiv); |
88dbf20f | 816 | } |
817 | ||
818 | ||
819 | /* Return true if VR is ~[0, 0]. */ | |
820 | ||
821 | static inline bool | |
eea12c72 | 822 | range_is_nonnull (value_range_t *vr) |
88dbf20f | 823 | { |
824 | return vr->type == VR_ANTI_RANGE | |
825 | && integer_zerop (vr->min) | |
826 | && integer_zerop (vr->max); | |
827 | } | |
828 | ||
829 | ||
830 | /* Return true if VR is [0, 0]. */ | |
831 | ||
832 | static inline bool | |
eea12c72 | 833 | range_is_null (value_range_t *vr) |
88dbf20f | 834 | { |
835 | return vr->type == VR_RANGE | |
836 | && integer_zerop (vr->min) | |
837 | && integer_zerop (vr->max); | |
838 | } | |
839 | ||
bca0860e | 840 | /* Return true if max and min of VR are INTEGER_CST. It's not necessary |
841 | a singleton. */ | |
842 | ||
843 | static inline bool | |
844 | range_int_cst_p (value_range_t *vr) | |
845 | { | |
846 | return (vr->type == VR_RANGE | |
847 | && TREE_CODE (vr->max) == INTEGER_CST | |
ac4a8000 | 848 | && TREE_CODE (vr->min) == INTEGER_CST); |
bca0860e | 849 | } |
850 | ||
851 | /* Return true if VR is a INTEGER_CST singleton. */ | |
852 | ||
853 | static inline bool | |
854 | range_int_cst_singleton_p (value_range_t *vr) | |
855 | { | |
856 | return (range_int_cst_p (vr) | |
ac4a8000 | 857 | && !TREE_OVERFLOW (vr->min) |
858 | && !TREE_OVERFLOW (vr->max) | |
bca0860e | 859 | && tree_int_cst_equal (vr->min, vr->max)); |
860 | } | |
88dbf20f | 861 | |
eea12c72 | 862 | /* Return true if value range VR involves at least one symbol. */ |
88dbf20f | 863 | |
eea12c72 | 864 | static inline bool |
865 | symbolic_range_p (value_range_t *vr) | |
88dbf20f | 866 | { |
eea12c72 | 867 | return (!is_gimple_min_invariant (vr->min) |
868 | || !is_gimple_min_invariant (vr->max)); | |
88dbf20f | 869 | } |
870 | ||
80777cd8 | 871 | /* Return true if value range VR uses an overflow infinity. */ |
8dbf774a | 872 | |
c3783c3b | 873 | static inline bool |
874 | overflow_infinity_range_p (value_range_t *vr) | |
8dbf774a | 875 | { |
c3783c3b | 876 | return (vr->type == VR_RANGE |
877 | && (is_overflow_infinity (vr->min) | |
878 | || is_overflow_infinity (vr->max))); | |
879 | } | |
add6ee5e | 880 | |
a2a1fde2 | 881 | /* Return false if we can not make a valid comparison based on VR; |
882 | this will be the case if it uses an overflow infinity and overflow | |
883 | is not undefined (i.e., -fno-strict-overflow is in effect). | |
884 | Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR | |
885 | uses an overflow infinity. */ | |
886 | ||
887 | static bool | |
888 | usable_range_p (value_range_t *vr, bool *strict_overflow_p) | |
889 | { | |
890 | gcc_assert (vr->type == VR_RANGE); | |
891 | if (is_overflow_infinity (vr->min)) | |
892 | { | |
893 | *strict_overflow_p = true; | |
894 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) | |
895 | return false; | |
896 | } | |
897 | if (is_overflow_infinity (vr->max)) | |
898 | { | |
899 | *strict_overflow_p = true; | |
900 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) | |
901 | return false; | |
902 | } | |
903 | return true; | |
904 | } | |
905 | ||
906 | ||
75a70cf9 | 907 | /* Return true if the result of assignment STMT is know to be non-negative. |
908 | If the return value is based on the assumption that signed overflow is | |
909 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
910 | *STRICT_OVERFLOW_P.*/ | |
911 | ||
912 | static bool | |
913 | gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
914 | { | |
915 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
916 | switch (get_gimple_rhs_class (code)) | |
917 | { | |
918 | case GIMPLE_UNARY_RHS: | |
919 | return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), | |
920 | gimple_expr_type (stmt), | |
921 | gimple_assign_rhs1 (stmt), | |
922 | strict_overflow_p); | |
923 | case GIMPLE_BINARY_RHS: | |
924 | return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), | |
925 | gimple_expr_type (stmt), | |
926 | gimple_assign_rhs1 (stmt), | |
927 | gimple_assign_rhs2 (stmt), | |
928 | strict_overflow_p); | |
00f4f705 | 929 | case GIMPLE_TERNARY_RHS: |
930 | return false; | |
75a70cf9 | 931 | case GIMPLE_SINGLE_RHS: |
932 | return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), | |
933 | strict_overflow_p); | |
934 | case GIMPLE_INVALID_RHS: | |
935 | gcc_unreachable (); | |
936 | default: | |
937 | gcc_unreachable (); | |
938 | } | |
939 | } | |
940 | ||
941 | /* Return true if return value of call STMT is know to be non-negative. | |
942 | If the return value is based on the assumption that signed overflow is | |
943 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
944 | *STRICT_OVERFLOW_P.*/ | |
945 | ||
946 | static bool | |
947 | gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
948 | { | |
949 | tree arg0 = gimple_call_num_args (stmt) > 0 ? | |
950 | gimple_call_arg (stmt, 0) : NULL_TREE; | |
951 | tree arg1 = gimple_call_num_args (stmt) > 1 ? | |
952 | gimple_call_arg (stmt, 1) : NULL_TREE; | |
953 | ||
954 | return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), | |
955 | gimple_call_fndecl (stmt), | |
956 | arg0, | |
957 | arg1, | |
958 | strict_overflow_p); | |
959 | } | |
960 | ||
961 | /* Return true if STMT is know to to compute a non-negative value. | |
962 | If the return value is based on the assumption that signed overflow is | |
963 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
964 | *STRICT_OVERFLOW_P.*/ | |
965 | ||
966 | static bool | |
967 | gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
968 | { | |
969 | switch (gimple_code (stmt)) | |
970 | { | |
971 | case GIMPLE_ASSIGN: | |
972 | return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); | |
973 | case GIMPLE_CALL: | |
974 | return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); | |
975 | default: | |
976 | gcc_unreachable (); | |
977 | } | |
978 | } | |
979 | ||
980 | /* Return true if the result of assignment STMT is know to be non-zero. | |
981 | If the return value is based on the assumption that signed overflow is | |
982 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
983 | *STRICT_OVERFLOW_P.*/ | |
984 | ||
985 | static bool | |
986 | gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) | |
987 | { | |
988 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
989 | switch (get_gimple_rhs_class (code)) | |
990 | { | |
991 | case GIMPLE_UNARY_RHS: | |
992 | return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
993 | gimple_expr_type (stmt), | |
994 | gimple_assign_rhs1 (stmt), | |
995 | strict_overflow_p); | |
996 | case GIMPLE_BINARY_RHS: | |
997 | return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
998 | gimple_expr_type (stmt), | |
999 | gimple_assign_rhs1 (stmt), | |
1000 | gimple_assign_rhs2 (stmt), | |
1001 | strict_overflow_p); | |
00f4f705 | 1002 | case GIMPLE_TERNARY_RHS: |
1003 | return false; | |
75a70cf9 | 1004 | case GIMPLE_SINGLE_RHS: |
1005 | return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), | |
1006 | strict_overflow_p); | |
1007 | case GIMPLE_INVALID_RHS: | |
1008 | gcc_unreachable (); | |
1009 | default: | |
1010 | gcc_unreachable (); | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | /* Return true if STMT is know to to compute a non-zero value. | |
1015 | If the return value is based on the assumption that signed overflow is | |
1016 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1017 | *STRICT_OVERFLOW_P.*/ | |
1018 | ||
1019 | static bool | |
1020 | gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) | |
1021 | { | |
1022 | switch (gimple_code (stmt)) | |
1023 | { | |
1024 | case GIMPLE_ASSIGN: | |
1025 | return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); | |
1026 | case GIMPLE_CALL: | |
1027 | return gimple_alloca_call_p (stmt); | |
1028 | default: | |
1029 | gcc_unreachable (); | |
1030 | } | |
1031 | } | |
1032 | ||
c3783c3b | 1033 | /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges |
eea12c72 | 1034 | obtained so far. */ |
88dbf20f | 1035 | |
eea12c72 | 1036 | static bool |
75a70cf9 | 1037 | vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) |
88dbf20f | 1038 | { |
75a70cf9 | 1039 | if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) |
eea12c72 | 1040 | return true; |
88dbf20f | 1041 | |
eea12c72 | 1042 | /* If we have an expression of the form &X->a, then the expression |
1043 | is nonnull if X is nonnull. */ | |
75a70cf9 | 1044 | if (is_gimple_assign (stmt) |
1045 | && gimple_assign_rhs_code (stmt) == ADDR_EXPR) | |
eea12c72 | 1046 | { |
75a70cf9 | 1047 | tree expr = gimple_assign_rhs1 (stmt); |
eea12c72 | 1048 | tree base = get_base_address (TREE_OPERAND (expr, 0)); |
88dbf20f | 1049 | |
eea12c72 | 1050 | if (base != NULL_TREE |
182cf5a9 | 1051 | && TREE_CODE (base) == MEM_REF |
eea12c72 | 1052 | && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) |
1053 | { | |
1054 | value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); | |
1055 | if (range_is_nonnull (vr)) | |
1056 | return true; | |
1057 | } | |
1058 | } | |
e7d43f99 | 1059 | |
eea12c72 | 1060 | return false; |
e7d43f99 | 1061 | } |
1062 | ||
7587869b | 1063 | /* Returns true if EXPR is a valid value (as expected by compare_values) -- |
1064 | a gimple invariant, or SSA_NAME +- CST. */ | |
1065 | ||
1066 | static bool | |
1067 | valid_value_p (tree expr) | |
1068 | { | |
1069 | if (TREE_CODE (expr) == SSA_NAME) | |
1070 | return true; | |
1071 | ||
1072 | if (TREE_CODE (expr) == PLUS_EXPR | |
1073 | || TREE_CODE (expr) == MINUS_EXPR) | |
1074 | return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME | |
1075 | && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); | |
48e1416a | 1076 | |
7587869b | 1077 | return is_gimple_min_invariant (expr); |
1078 | } | |
e7d43f99 | 1079 | |
48e1416a | 1080 | /* Return |
7e8bc5b6 | 1081 | 1 if VAL < VAL2 |
1082 | 0 if !(VAL < VAL2) | |
1083 | -2 if those are incomparable. */ | |
1084 | static inline int | |
1085 | operand_less_p (tree val, tree val2) | |
1086 | { | |
7e8bc5b6 | 1087 | /* LT is folded faster than GE and others. Inline the common case. */ |
1088 | if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) | |
1089 | { | |
1090 | if (TYPE_UNSIGNED (TREE_TYPE (val))) | |
1091 | return INT_CST_LT_UNSIGNED (val, val2); | |
1092 | else | |
c3783c3b | 1093 | { |
1094 | if (INT_CST_LT (val, val2)) | |
1095 | return 1; | |
1096 | } | |
7e8bc5b6 | 1097 | } |
1098 | else | |
c3783c3b | 1099 | { |
1100 | tree tcmp; | |
1101 | ||
f82ccac1 | 1102 | fold_defer_overflow_warnings (); |
1103 | ||
c3783c3b | 1104 | tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |
f82ccac1 | 1105 | |
1106 | fold_undefer_and_ignore_overflow_warnings (); | |
1107 | ||
ffdf1c47 | 1108 | if (!tcmp |
1109 | || TREE_CODE (tcmp) != INTEGER_CST) | |
c3783c3b | 1110 | return -2; |
1111 | ||
1112 | if (!integer_zerop (tcmp)) | |
1113 | return 1; | |
1114 | } | |
1115 | ||
1116 | /* val >= val2, not considering overflow infinity. */ | |
1117 | if (is_negative_overflow_infinity (val)) | |
1118 | return is_negative_overflow_infinity (val2) ? 0 : 1; | |
1119 | else if (is_positive_overflow_infinity (val2)) | |
1120 | return is_positive_overflow_infinity (val) ? 0 : 1; | |
1121 | ||
1122 | return 0; | |
7e8bc5b6 | 1123 | } |
1124 | ||
88dbf20f | 1125 | /* Compare two values VAL1 and VAL2. Return |
48e1416a | 1126 | |
88dbf20f | 1127 | -2 if VAL1 and VAL2 cannot be compared at compile-time, |
1128 | -1 if VAL1 < VAL2, | |
1129 | 0 if VAL1 == VAL2, | |
1130 | +1 if VAL1 > VAL2, and | |
1131 | +2 if VAL1 != VAL2 | |
1132 | ||
1133 | This is similar to tree_int_cst_compare but supports pointer values | |
c3783c3b | 1134 | and values that cannot be compared at compile time. |
1135 | ||
1136 | If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to | |
1137 | true if the return value is only valid if we assume that signed | |
1138 | overflow is undefined. */ | |
88dbf20f | 1139 | |
1140 | static int | |
c3783c3b | 1141 | compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
88dbf20f | 1142 | { |
1143 | if (val1 == val2) | |
1144 | return 0; | |
1145 | ||
a9b4b38e | 1146 | /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
1147 | both integers. */ | |
1148 | gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) | |
1149 | == POINTER_TYPE_P (TREE_TYPE (val2))); | |
0de36bdb | 1150 | /* Convert the two values into the same type. This is needed because |
1151 | sizetype causes sign extension even for unsigned types. */ | |
1152 | val2 = fold_convert (TREE_TYPE (val1), val2); | |
1153 | STRIP_USELESS_TYPE_CONVERSION (val2); | |
a9b4b38e | 1154 | |
88dbf20f | 1155 | if ((TREE_CODE (val1) == SSA_NAME |
1156 | || TREE_CODE (val1) == PLUS_EXPR | |
1157 | || TREE_CODE (val1) == MINUS_EXPR) | |
1158 | && (TREE_CODE (val2) == SSA_NAME | |
1159 | || TREE_CODE (val2) == PLUS_EXPR | |
1160 | || TREE_CODE (val2) == MINUS_EXPR)) | |
1161 | { | |
1162 | tree n1, c1, n2, c2; | |
82086091 | 1163 | enum tree_code code1, code2; |
48e1416a | 1164 | |
88dbf20f | 1165 | /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME', |
1166 | return -1 or +1 accordingly. If VAL1 and VAL2 don't use the | |
1167 | same name, return -2. */ | |
1168 | if (TREE_CODE (val1) == SSA_NAME) | |
1169 | { | |
82086091 | 1170 | code1 = SSA_NAME; |
88dbf20f | 1171 | n1 = val1; |
1172 | c1 = NULL_TREE; | |
1173 | } | |
1174 | else | |
1175 | { | |
82086091 | 1176 | code1 = TREE_CODE (val1); |
88dbf20f | 1177 | n1 = TREE_OPERAND (val1, 0); |
1178 | c1 = TREE_OPERAND (val1, 1); | |
82086091 | 1179 | if (tree_int_cst_sgn (c1) == -1) |
1180 | { | |
c3783c3b | 1181 | if (is_negative_overflow_infinity (c1)) |
1182 | return -2; | |
82086091 | 1183 | c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); |
1184 | if (!c1) | |
1185 | return -2; | |
1186 | code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1187 | } | |
88dbf20f | 1188 | } |
1189 | ||
1190 | if (TREE_CODE (val2) == SSA_NAME) | |
1191 | { | |
82086091 | 1192 | code2 = SSA_NAME; |
88dbf20f | 1193 | n2 = val2; |
1194 | c2 = NULL_TREE; | |
1195 | } | |
1196 | else | |
1197 | { | |
82086091 | 1198 | code2 = TREE_CODE (val2); |
88dbf20f | 1199 | n2 = TREE_OPERAND (val2, 0); |
1200 | c2 = TREE_OPERAND (val2, 1); | |
82086091 | 1201 | if (tree_int_cst_sgn (c2) == -1) |
1202 | { | |
c3783c3b | 1203 | if (is_negative_overflow_infinity (c2)) |
1204 | return -2; | |
82086091 | 1205 | c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); |
1206 | if (!c2) | |
1207 | return -2; | |
1208 | code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1209 | } | |
88dbf20f | 1210 | } |
1211 | ||
1212 | /* Both values must use the same name. */ | |
1213 | if (n1 != n2) | |
1214 | return -2; | |
1215 | ||
82086091 | 1216 | if (code1 == SSA_NAME |
1217 | && code2 == SSA_NAME) | |
1218 | /* NAME == NAME */ | |
1219 | return 0; | |
1220 | ||
1221 | /* If overflow is defined we cannot simplify more. */ | |
981eb798 | 1222 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) |
82086091 | 1223 | return -2; |
1224 | ||
d8f696cf | 1225 | if (strict_overflow_p != NULL |
1226 | && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) | |
1227 | && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) | |
c3783c3b | 1228 | *strict_overflow_p = true; |
1229 | ||
82086091 | 1230 | if (code1 == SSA_NAME) |
88dbf20f | 1231 | { |
82086091 | 1232 | if (code2 == PLUS_EXPR) |
88dbf20f | 1233 | /* NAME < NAME + CST */ |
1234 | return -1; | |
82086091 | 1235 | else if (code2 == MINUS_EXPR) |
88dbf20f | 1236 | /* NAME > NAME - CST */ |
1237 | return 1; | |
1238 | } | |
82086091 | 1239 | else if (code1 == PLUS_EXPR) |
88dbf20f | 1240 | { |
82086091 | 1241 | if (code2 == SSA_NAME) |
88dbf20f | 1242 | /* NAME + CST > NAME */ |
1243 | return 1; | |
82086091 | 1244 | else if (code2 == PLUS_EXPR) |
88dbf20f | 1245 | /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ |
c3783c3b | 1246 | return compare_values_warnv (c1, c2, strict_overflow_p); |
82086091 | 1247 | else if (code2 == MINUS_EXPR) |
88dbf20f | 1248 | /* NAME + CST1 > NAME - CST2 */ |
1249 | return 1; | |
1250 | } | |
82086091 | 1251 | else if (code1 == MINUS_EXPR) |
88dbf20f | 1252 | { |
82086091 | 1253 | if (code2 == SSA_NAME) |
88dbf20f | 1254 | /* NAME - CST < NAME */ |
1255 | return -1; | |
82086091 | 1256 | else if (code2 == PLUS_EXPR) |
88dbf20f | 1257 | /* NAME - CST1 < NAME + CST2 */ |
1258 | return -1; | |
82086091 | 1259 | else if (code2 == MINUS_EXPR) |
88dbf20f | 1260 | /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that |
1261 | C1 and C2 are swapped in the call to compare_values. */ | |
c3783c3b | 1262 | return compare_values_warnv (c2, c1, strict_overflow_p); |
88dbf20f | 1263 | } |
1264 | ||
1265 | gcc_unreachable (); | |
1266 | } | |
1267 | ||
1268 | /* We cannot compare non-constants. */ | |
1269 | if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) | |
1270 | return -2; | |
1271 | ||
a9b4b38e | 1272 | if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
05812c7f | 1273 | { |
c3783c3b | 1274 | /* We cannot compare overflowed values, except for overflow |
1275 | infinities. */ | |
05812c7f | 1276 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
c3783c3b | 1277 | { |
a2a1fde2 | 1278 | if (strict_overflow_p != NULL) |
1279 | *strict_overflow_p = true; | |
c3783c3b | 1280 | if (is_negative_overflow_infinity (val1)) |
1281 | return is_negative_overflow_infinity (val2) ? 0 : -1; | |
1282 | else if (is_negative_overflow_infinity (val2)) | |
1283 | return 1; | |
1284 | else if (is_positive_overflow_infinity (val1)) | |
1285 | return is_positive_overflow_infinity (val2) ? 0 : 1; | |
1286 | else if (is_positive_overflow_infinity (val2)) | |
1287 | return -1; | |
1288 | return -2; | |
1289 | } | |
05812c7f | 1290 | |
1291 | return tree_int_cst_compare (val1, val2); | |
1292 | } | |
88dbf20f | 1293 | else |
1294 | { | |
1295 | tree t; | |
1296 | ||
1297 | /* First see if VAL1 and VAL2 are not the same. */ | |
1298 | if (val1 == val2 || operand_equal_p (val1, val2, 0)) | |
1299 | return 0; | |
48e1416a | 1300 | |
88dbf20f | 1301 | /* If VAL1 is a lower address than VAL2, return -1. */ |
7e8bc5b6 | 1302 | if (operand_less_p (val1, val2) == 1) |
88dbf20f | 1303 | return -1; |
1304 | ||
1305 | /* If VAL1 is a higher address than VAL2, return +1. */ | |
7e8bc5b6 | 1306 | if (operand_less_p (val2, val1) == 1) |
88dbf20f | 1307 | return 1; |
1308 | ||
14dc13e5 | 1309 | /* If VAL1 is different than VAL2, return +2. |
1310 | For integer constants we either have already returned -1 or 1 | |
7920eed5 | 1311 | or they are equivalent. We still might succeed in proving |
1312 | something about non-trivial operands. */ | |
14dc13e5 | 1313 | if (TREE_CODE (val1) != INTEGER_CST |
1314 | || TREE_CODE (val2) != INTEGER_CST) | |
1315 | { | |
1316 | t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); | |
ffdf1c47 | 1317 | if (t && integer_onep (t)) |
14dc13e5 | 1318 | return 2; |
1319 | } | |
88dbf20f | 1320 | |
1321 | return -2; | |
1322 | } | |
1323 | } | |
1324 | ||
a2a1fde2 | 1325 | /* Compare values like compare_values_warnv, but treat comparisons of |
1326 | nonconstants which rely on undefined overflow as incomparable. */ | |
c3783c3b | 1327 | |
1328 | static int | |
1329 | compare_values (tree val1, tree val2) | |
1330 | { | |
1331 | bool sop; | |
1332 | int ret; | |
1333 | ||
1334 | sop = false; | |
1335 | ret = compare_values_warnv (val1, val2, &sop); | |
a2a1fde2 | 1336 | if (sop |
1337 | && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) | |
c3783c3b | 1338 | ret = -2; |
1339 | return ret; | |
1340 | } | |
1341 | ||
88dbf20f | 1342 | |
7d48cd66 | 1343 | /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, |
1344 | 0 if VAL is not inside [MIN, MAX], | |
30a9e679 | 1345 | -2 if we cannot tell either way. |
1346 | ||
7e8bc5b6 | 1347 | Benchmark compile/20001226-1.c compilation time after changing this |
1348 | function. */ | |
88dbf20f | 1349 | |
1350 | static inline int | |
7d48cd66 | 1351 | value_inside_range (tree val, tree min, tree max) |
88dbf20f | 1352 | { |
7e8bc5b6 | 1353 | int cmp1, cmp2; |
88dbf20f | 1354 | |
7d48cd66 | 1355 | cmp1 = operand_less_p (val, min); |
7e8bc5b6 | 1356 | if (cmp1 == -2) |
88dbf20f | 1357 | return -2; |
7e8bc5b6 | 1358 | if (cmp1 == 1) |
1359 | return 0; | |
88dbf20f | 1360 | |
7d48cd66 | 1361 | cmp2 = operand_less_p (max, val); |
7e8bc5b6 | 1362 | if (cmp2 == -2) |
88dbf20f | 1363 | return -2; |
1364 | ||
7e8bc5b6 | 1365 | return !cmp2; |
88dbf20f | 1366 | } |
1367 | ||
1368 | ||
1369 | /* Return true if value ranges VR0 and VR1 have a non-empty | |
48e1416a | 1370 | intersection. |
1371 | ||
7e8bc5b6 | 1372 | Benchmark compile/20001226-1.c compilation time after changing this |
1373 | function. | |
1374 | */ | |
88dbf20f | 1375 | |
1376 | static inline bool | |
eea12c72 | 1377 | value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) |
88dbf20f | 1378 | { |
14dc13e5 | 1379 | /* The value ranges do not intersect if the maximum of the first range is |
1380 | less than the minimum of the second range or vice versa. | |
1381 | When those relations are unknown, we can't do any better. */ | |
1382 | if (operand_less_p (vr0->max, vr1->min) != 0) | |
1383 | return false; | |
1384 | if (operand_less_p (vr1->max, vr0->min) != 0) | |
1385 | return false; | |
1386 | return true; | |
88dbf20f | 1387 | } |
1388 | ||
1389 | ||
7d48cd66 | 1390 | /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not |
1391 | include the value zero, -2 if we cannot tell. */ | |
eea12c72 | 1392 | |
7d48cd66 | 1393 | static inline int |
1394 | range_includes_zero_p (tree min, tree max) | |
eea12c72 | 1395 | { |
7d48cd66 | 1396 | tree zero = build_int_cst (TREE_TYPE (min), 0); |
1397 | return value_inside_range (zero, min, max); | |
eea12c72 | 1398 | } |
1399 | ||
c37659ce | 1400 | /* Return true if *VR is know to only contain nonnegative values. */ |
1401 | ||
1402 | static inline bool | |
1403 | value_range_nonnegative_p (value_range_t *vr) | |
1404 | { | |
713b2724 | 1405 | /* Testing for VR_ANTI_RANGE is not useful here as any anti-range |
1406 | which would return a useful value should be encoded as a | |
1407 | VR_RANGE. */ | |
c37659ce | 1408 | if (vr->type == VR_RANGE) |
1409 | { | |
1410 | int result = compare_values (vr->min, integer_zero_node); | |
1411 | return (result == 0 || result == 1); | |
1412 | } | |
c37659ce | 1413 | |
1414 | return false; | |
1415 | } | |
1416 | ||
8dbf774a | 1417 | /* Return true if T, an SSA_NAME, is known to be nonnegative. Return |
1418 | false otherwise or if no value range information is available. */ | |
1419 | ||
1420 | bool | |
9f627b1a | 1421 | ssa_name_nonnegative_p (const_tree t) |
8dbf774a | 1422 | { |
1423 | value_range_t *vr = get_value_range (t); | |
1424 | ||
ccab2921 | 1425 | if (INTEGRAL_TYPE_P (t) |
1426 | && TYPE_UNSIGNED (t)) | |
1427 | return true; | |
1428 | ||
8dbf774a | 1429 | if (!vr) |
1430 | return false; | |
1431 | ||
c37659ce | 1432 | return value_range_nonnegative_p (vr); |
1433 | } | |
8dbf774a | 1434 | |
c37659ce | 1435 | /* If *VR has a value rante that is a single constant value return that, |
1436 | otherwise return NULL_TREE. */ | |
1437 | ||
1438 | static tree | |
1439 | value_range_constant_singleton (value_range_t *vr) | |
1440 | { | |
1441 | if (vr->type == VR_RANGE | |
1442 | && operand_equal_p (vr->min, vr->max, 0) | |
1443 | && is_gimple_min_invariant (vr->min)) | |
1444 | return vr->min; | |
1445 | ||
1446 | return NULL_TREE; | |
8dbf774a | 1447 | } |
1448 | ||
43ffec67 | 1449 | /* If OP has a value range with a single constant value return that, |
1450 | otherwise return NULL_TREE. This returns OP itself if OP is a | |
1451 | constant. */ | |
1452 | ||
1453 | static tree | |
1454 | op_with_constant_singleton_value_range (tree op) | |
1455 | { | |
43ffec67 | 1456 | if (is_gimple_min_invariant (op)) |
1457 | return op; | |
1458 | ||
1459 | if (TREE_CODE (op) != SSA_NAME) | |
1460 | return NULL_TREE; | |
1461 | ||
c37659ce | 1462 | return value_range_constant_singleton (get_value_range (op)); |
43ffec67 | 1463 | } |
1464 | ||
d6f3306a | 1465 | /* Return true if op is in a boolean [0, 1] value-range. */ |
1466 | ||
1467 | static bool | |
1468 | op_with_boolean_value_range_p (tree op) | |
1469 | { | |
1470 | value_range_t *vr; | |
1471 | ||
1472 | if (TYPE_PRECISION (TREE_TYPE (op)) == 1) | |
1473 | return true; | |
1474 | ||
1475 | if (integer_zerop (op) | |
1476 | || integer_onep (op)) | |
1477 | return true; | |
1478 | ||
1479 | if (TREE_CODE (op) != SSA_NAME) | |
1480 | return false; | |
1481 | ||
1482 | vr = get_value_range (op); | |
1483 | return (vr->type == VR_RANGE | |
1484 | && integer_zerop (vr->min) | |
1485 | && integer_onep (vr->max)); | |
1486 | } | |
eea12c72 | 1487 | |
88dbf20f | 1488 | /* Extract value range information from an ASSERT_EXPR EXPR and store |
1489 | it in *VR_P. */ | |
1490 | ||
1491 | static void | |
eea12c72 | 1492 | extract_range_from_assert (value_range_t *vr_p, tree expr) |
88dbf20f | 1493 | { |
eea12c72 | 1494 | tree var, cond, limit, min, max, type; |
04dbf3c4 | 1495 | value_range_t *limit_vr; |
e7d43f99 | 1496 | enum tree_code cond_code; |
88dbf20f | 1497 | |
1498 | var = ASSERT_EXPR_VAR (expr); | |
1499 | cond = ASSERT_EXPR_COND (expr); | |
1500 | ||
a640bb21 | 1501 | gcc_assert (COMPARISON_CLASS_P (cond)); |
88dbf20f | 1502 | |
1503 | /* Find VAR in the ASSERT_EXPR conditional. */ | |
bed8bec4 | 1504 | if (var == TREE_OPERAND (cond, 0) |
1505 | || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR | |
1506 | || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) | |
eea12c72 | 1507 | { |
1508 | /* If the predicate is of the form VAR COMP LIMIT, then we just | |
1509 | take LIMIT from the RHS and use the same comparison code. */ | |
eea12c72 | 1510 | cond_code = TREE_CODE (cond); |
bed8bec4 | 1511 | limit = TREE_OPERAND (cond, 1); |
1512 | cond = TREE_OPERAND (cond, 0); | |
eea12c72 | 1513 | } |
1514 | else | |
1515 | { | |
1516 | /* If the predicate is of the form LIMIT COMP VAR, then we need | |
1517 | to flip around the comparison code to create the proper range | |
1518 | for VAR. */ | |
6a0aeeaa | 1519 | cond_code = swap_tree_comparison (TREE_CODE (cond)); |
bed8bec4 | 1520 | limit = TREE_OPERAND (cond, 0); |
1521 | cond = TREE_OPERAND (cond, 1); | |
eea12c72 | 1522 | } |
88dbf20f | 1523 | |
afc1ad6d | 1524 | limit = avoid_overflow_infinity (limit); |
1525 | ||
325d00b0 | 1526 | type = TREE_TYPE (var); |
88dbf20f | 1527 | gcc_assert (limit != var); |
1528 | ||
eea12c72 | 1529 | /* For pointer arithmetic, we only keep track of pointer equality |
1530 | and inequality. */ | |
1531 | if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) | |
88dbf20f | 1532 | { |
e7d43f99 | 1533 | set_value_range_to_varying (vr_p); |
88dbf20f | 1534 | return; |
1535 | } | |
1536 | ||
eea12c72 | 1537 | /* If LIMIT is another SSA name and LIMIT has a range of its own, |
1538 | try to use LIMIT's range to avoid creating symbolic ranges | |
1539 | unnecessarily. */ | |
1540 | limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; | |
1541 | ||
1542 | /* LIMIT's range is only interesting if it has any useful information. */ | |
1543 | if (limit_vr | |
1544 | && (limit_vr->type == VR_UNDEFINED | |
1545 | || limit_vr->type == VR_VARYING | |
1546 | || symbolic_range_p (limit_vr))) | |
1547 | limit_vr = NULL; | |
1548 | ||
166b8fc0 | 1549 | /* Initially, the new range has the same set of equivalences of |
1550 | VAR's range. This will be revised before returning the final | |
1551 | value. Since assertions may be chained via mutually exclusive | |
1552 | predicates, we will need to trim the set of equivalences before | |
1553 | we are done. */ | |
eea12c72 | 1554 | gcc_assert (vr_p->equiv == NULL); |
fbcece5e | 1555 | add_equivalence (&vr_p->equiv, var); |
eea12c72 | 1556 | |
1557 | /* Extract a new range based on the asserted comparison for VAR and | |
1558 | LIMIT's value range. Notice that if LIMIT has an anti-range, we | |
1559 | will only use it for equality comparisons (EQ_EXPR). For any | |
1560 | other kind of assertion, we cannot derive a range from LIMIT's | |
1561 | anti-range that can be used to describe the new range. For | |
1562 | instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], | |
1563 | then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is | |
1564 | no single range for x_2 that could describe LE_EXPR, so we might | |
bed8bec4 | 1565 | as well build the range [b_4, +INF] for it. |
1566 | One special case we handle is extracting a range from a | |
1567 | range test encoded as (unsigned)var + CST <= limit. */ | |
1568 | if (TREE_CODE (cond) == NOP_EXPR | |
1569 | || TREE_CODE (cond) == PLUS_EXPR) | |
1570 | { | |
bed8bec4 | 1571 | if (TREE_CODE (cond) == PLUS_EXPR) |
1572 | { | |
22cdb855 | 1573 | min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), |
1574 | TREE_OPERAND (cond, 1)); | |
317e2a67 | 1575 | max = int_const_binop (PLUS_EXPR, limit, min); |
bed8bec4 | 1576 | cond = TREE_OPERAND (cond, 0); |
1577 | } | |
1578 | else | |
22cdb855 | 1579 | { |
1580 | min = build_int_cst (TREE_TYPE (var), 0); | |
1581 | max = limit; | |
1582 | } | |
bed8bec4 | 1583 | |
22cdb855 | 1584 | /* Make sure to not set TREE_OVERFLOW on the final type |
1585 | conversion. We are willingly interpreting large positive | |
1586 | unsigned values as negative singed values here. */ | |
d3237426 | 1587 | min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min), |
1588 | 0, false); | |
1589 | max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max), | |
1590 | 0, false); | |
bed8bec4 | 1591 | |
1592 | /* We can transform a max, min range to an anti-range or | |
1593 | vice-versa. Use set_and_canonicalize_value_range which does | |
1594 | this for us. */ | |
1595 | if (cond_code == LE_EXPR) | |
1596 | set_and_canonicalize_value_range (vr_p, VR_RANGE, | |
1597 | min, max, vr_p->equiv); | |
1598 | else if (cond_code == GT_EXPR) | |
1599 | set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, | |
1600 | min, max, vr_p->equiv); | |
1601 | else | |
1602 | gcc_unreachable (); | |
1603 | } | |
1604 | else if (cond_code == EQ_EXPR) | |
eea12c72 | 1605 | { |
1606 | enum value_range_type range_type; | |
1607 | ||
1608 | if (limit_vr) | |
1609 | { | |
1610 | range_type = limit_vr->type; | |
1611 | min = limit_vr->min; | |
1612 | max = limit_vr->max; | |
1613 | } | |
1614 | else | |
1615 | { | |
1616 | range_type = VR_RANGE; | |
1617 | min = limit; | |
1618 | max = limit; | |
1619 | } | |
1620 | ||
1621 | set_value_range (vr_p, range_type, min, max, vr_p->equiv); | |
1622 | ||
1623 | /* When asserting the equality VAR == LIMIT and LIMIT is another | |
1624 | SSA name, the new range will also inherit the equivalence set | |
1625 | from LIMIT. */ | |
1626 | if (TREE_CODE (limit) == SSA_NAME) | |
fbcece5e | 1627 | add_equivalence (&vr_p->equiv, limit); |
eea12c72 | 1628 | } |
1629 | else if (cond_code == NE_EXPR) | |
1630 | { | |
1631 | /* As described above, when LIMIT's range is an anti-range and | |
1632 | this assertion is an inequality (NE_EXPR), then we cannot | |
1633 | derive anything from the anti-range. For instance, if | |
1634 | LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does | |
1635 | not imply that VAR's range is [0, 0]. So, in the case of | |
1636 | anti-ranges, we just assert the inequality using LIMIT and | |
d461f9a9 | 1637 | not its anti-range. |
1638 | ||
1639 | If LIMIT_VR is a range, we can only use it to build a new | |
1640 | anti-range if LIMIT_VR is a single-valued range. For | |
1641 | instance, if LIMIT_VR is [0, 1], the predicate | |
1642 | VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. | |
1643 | Rather, it means that for value 0 VAR should be ~[0, 0] | |
1644 | and for value 1, VAR should be ~[1, 1]. We cannot | |
1645 | represent these ranges. | |
1646 | ||
1647 | The only situation in which we can build a valid | |
1648 | anti-range is when LIMIT_VR is a single-valued range | |
48e1416a | 1649 | (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, |
d461f9a9 | 1650 | build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ |
1651 | if (limit_vr | |
1652 | && limit_vr->type == VR_RANGE | |
1653 | && compare_values (limit_vr->min, limit_vr->max) == 0) | |
eea12c72 | 1654 | { |
d461f9a9 | 1655 | min = limit_vr->min; |
1656 | max = limit_vr->max; | |
eea12c72 | 1657 | } |
1658 | else | |
1659 | { | |
d461f9a9 | 1660 | /* In any other case, we cannot use LIMIT's range to build a |
1661 | valid anti-range. */ | |
1662 | min = max = limit; | |
eea12c72 | 1663 | } |
1664 | ||
1665 | /* If MIN and MAX cover the whole range for their type, then | |
1666 | just use the original LIMIT. */ | |
1667 | if (INTEGRAL_TYPE_P (type) | |
b876a744 | 1668 | && vrp_val_is_min (min) |
1669 | && vrp_val_is_max (max)) | |
eea12c72 | 1670 | min = max = limit; |
1671 | ||
1672 | set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv); | |
1673 | } | |
1674 | else if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
88dbf20f | 1675 | { |
eea12c72 | 1676 | min = TYPE_MIN_VALUE (type); |
1677 | ||
1678 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1679 | max = limit; | |
1680 | else | |
1681 | { | |
1682 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1683 | range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for | |
1684 | LT_EXPR. */ | |
1685 | max = limit_vr->max; | |
1686 | } | |
1687 | ||
ad2a47a5 | 1688 | /* If the maximum value forces us to be out of bounds, simply punt. |
1689 | It would be pointless to try and do anything more since this | |
1690 | all should be optimized away above us. */ | |
c08d658d | 1691 | if ((cond_code == LT_EXPR |
1692 | && compare_values (max, min) == 0) | |
61349811 | 1693 | || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max))) |
ad2a47a5 | 1694 | set_value_range_to_varying (vr_p); |
1695 | else | |
eea12c72 | 1696 | { |
ad2a47a5 | 1697 | /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ |
c08d658d | 1698 | if (cond_code == LT_EXPR) |
ad2a47a5 | 1699 | { |
0418ac74 | 1700 | if (TYPE_PRECISION (TREE_TYPE (max)) == 1 |
1701 | && !TYPE_UNSIGNED (TREE_TYPE (max))) | |
1702 | max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, | |
1703 | build_int_cst (TREE_TYPE (max), -1)); | |
1704 | else | |
1705 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, | |
1706 | build_int_cst (TREE_TYPE (max), 1)); | |
d8f696cf | 1707 | if (EXPR_P (max)) |
1708 | TREE_NO_WARNING (max) = 1; | |
ad2a47a5 | 1709 | } |
eea12c72 | 1710 | |
ad2a47a5 | 1711 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1712 | } | |
88dbf20f | 1713 | } |
eea12c72 | 1714 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) |
88dbf20f | 1715 | { |
eea12c72 | 1716 | max = TYPE_MAX_VALUE (type); |
1717 | ||
1718 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1719 | min = limit; | |
1720 | else | |
1721 | { | |
1722 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1723 | range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for | |
1724 | GT_EXPR. */ | |
1725 | min = limit_vr->min; | |
1726 | } | |
1727 | ||
ad2a47a5 | 1728 | /* If the minimum value forces us to be out of bounds, simply punt. |
1729 | It would be pointless to try and do anything more since this | |
1730 | all should be optimized away above us. */ | |
c08d658d | 1731 | if ((cond_code == GT_EXPR |
1732 | && compare_values (min, max) == 0) | |
61349811 | 1733 | || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min))) |
ad2a47a5 | 1734 | set_value_range_to_varying (vr_p); |
1735 | else | |
eea12c72 | 1736 | { |
ad2a47a5 | 1737 | /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ |
c08d658d | 1738 | if (cond_code == GT_EXPR) |
ad2a47a5 | 1739 | { |
0418ac74 | 1740 | if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
1741 | && !TYPE_UNSIGNED (TREE_TYPE (min))) | |
1742 | min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, | |
1743 | build_int_cst (TREE_TYPE (min), -1)); | |
1744 | else | |
1745 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, | |
1746 | build_int_cst (TREE_TYPE (min), 1)); | |
d8f696cf | 1747 | if (EXPR_P (min)) |
1748 | TREE_NO_WARNING (min) = 1; | |
ad2a47a5 | 1749 | } |
eea12c72 | 1750 | |
ad2a47a5 | 1751 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1752 | } | |
88dbf20f | 1753 | } |
1754 | else | |
1755 | gcc_unreachable (); | |
1756 | ||
04dbf3c4 | 1757 | /* Finally intersect the new range with what we already know about var. */ |
1758 | vrp_intersect_ranges (vr_p, get_value_range (var)); | |
88dbf20f | 1759 | } |
1760 | ||
1761 | ||
1762 | /* Extract range information from SSA name VAR and store it in VR. If | |
1763 | VAR has an interesting range, use it. Otherwise, create the | |
1764 | range [VAR, VAR] and return it. This is useful in situations where | |
1765 | we may have conditionals testing values of VARYING names. For | |
1766 | instance, | |
1767 | ||
1768 | x_3 = y_5; | |
1769 | if (x_3 > y_5) | |
1770 | ... | |
1771 | ||
1772 | Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is | |
1773 | always false. */ | |
1774 | ||
1775 | static void | |
eea12c72 | 1776 | extract_range_from_ssa_name (value_range_t *vr, tree var) |
88dbf20f | 1777 | { |
eea12c72 | 1778 | value_range_t *var_vr = get_value_range (var); |
88dbf20f | 1779 | |
1780 | if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING) | |
eea12c72 | 1781 | copy_value_range (vr, var_vr); |
88dbf20f | 1782 | else |
eea12c72 | 1783 | set_value_range (vr, VR_RANGE, var, var, NULL); |
1784 | ||
fbcece5e | 1785 | add_equivalence (&vr->equiv, var); |
88dbf20f | 1786 | } |
1787 | ||
1788 | ||
c25c642e | 1789 | /* Wrapper around int_const_binop. If the operation overflows and we |
1790 | are not using wrapping arithmetic, then adjust the result to be | |
c3783c3b | 1791 | -INF or +INF depending on CODE, VAL1 and VAL2. This can return |
1792 | NULL_TREE if we need to use an overflow infinity representation but | |
1793 | the type does not support it. */ | |
c25c642e | 1794 | |
c3783c3b | 1795 | static tree |
c25c642e | 1796 | vrp_int_const_binop (enum tree_code code, tree val1, tree val2) |
1797 | { | |
1798 | tree res; | |
1799 | ||
317e2a67 | 1800 | res = int_const_binop (code, val1, val2); |
c25c642e | 1801 | |
0da2010b | 1802 | /* If we are using unsigned arithmetic, operate symbolically |
1803 | on -INF and +INF as int_const_binop only handles signed overflow. */ | |
1804 | if (TYPE_UNSIGNED (TREE_TYPE (val1))) | |
0bca0790 | 1805 | { |
1806 | int checkz = compare_values (res, val1); | |
9d8b8bc4 | 1807 | bool overflow = false; |
0bca0790 | 1808 | |
38f0f92a | 1809 | /* Ensure that res = val1 [+*] val2 >= val1 |
0bca0790 | 1810 | or that res = val1 - val2 <= val1. */ |
9d8b8bc4 | 1811 | if ((code == PLUS_EXPR |
38f0f92a | 1812 | && !(checkz == 1 || checkz == 0)) |
1813 | || (code == MINUS_EXPR | |
1814 | && !(checkz == 0 || checkz == -1))) | |
9d8b8bc4 | 1815 | { |
1816 | overflow = true; | |
1817 | } | |
1818 | /* Checking for multiplication overflow is done by dividing the | |
1819 | output of the multiplication by the first input of the | |
1820 | multiplication. If the result of that division operation is | |
1821 | not equal to the second input of the multiplication, then the | |
1822 | multiplication overflowed. */ | |
1823 | else if (code == MULT_EXPR && !integer_zerop (val1)) | |
1824 | { | |
1825 | tree tmp = int_const_binop (TRUNC_DIV_EXPR, | |
9fbc4e9e | 1826 | res, |
317e2a67 | 1827 | val1); |
9d8b8bc4 | 1828 | int check = compare_values (tmp, val2); |
1829 | ||
1830 | if (check != 0) | |
1831 | overflow = true; | |
1832 | } | |
1833 | ||
1834 | if (overflow) | |
0bca0790 | 1835 | { |
1836 | res = copy_node (res); | |
1837 | TREE_OVERFLOW (res) = 1; | |
1838 | } | |
9d8b8bc4 | 1839 | |
0bca0790 | 1840 | } |
e1b11b05 | 1841 | else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) |
1842 | /* If the singed operation wraps then int_const_binop has done | |
1843 | everything we want. */ | |
1844 | ; | |
c3783c3b | 1845 | else if ((TREE_OVERFLOW (res) |
1846 | && !TREE_OVERFLOW (val1) | |
1847 | && !TREE_OVERFLOW (val2)) | |
1848 | || is_overflow_infinity (val1) | |
1849 | || is_overflow_infinity (val2)) | |
c25c642e | 1850 | { |
38f0f92a | 1851 | /* If the operation overflowed but neither VAL1 nor VAL2 are |
1852 | overflown, return -INF or +INF depending on the operation | |
1853 | and the combination of signs of the operands. */ | |
c25c642e | 1854 | int sgn1 = tree_int_cst_sgn (val1); |
1855 | int sgn2 = tree_int_cst_sgn (val2); | |
1856 | ||
c3783c3b | 1857 | if (needs_overflow_infinity (TREE_TYPE (res)) |
1858 | && !supports_overflow_infinity (TREE_TYPE (res))) | |
1859 | return NULL_TREE; | |
1860 | ||
659753d3 | 1861 | /* We have to punt on adding infinities of different signs, |
1862 | since we can't tell what the sign of the result should be. | |
1863 | Likewise for subtracting infinities of the same sign. */ | |
1864 | if (((code == PLUS_EXPR && sgn1 != sgn2) | |
1865 | || (code == MINUS_EXPR && sgn1 == sgn2)) | |
c3783c3b | 1866 | && is_overflow_infinity (val1) |
1867 | && is_overflow_infinity (val2)) | |
1868 | return NULL_TREE; | |
1869 | ||
659753d3 | 1870 | /* Don't try to handle division or shifting of infinities. */ |
1871 | if ((code == TRUNC_DIV_EXPR | |
1872 | || code == FLOOR_DIV_EXPR | |
1873 | || code == CEIL_DIV_EXPR | |
1874 | || code == EXACT_DIV_EXPR | |
1875 | || code == ROUND_DIV_EXPR | |
1876 | || code == RSHIFT_EXPR) | |
1877 | && (is_overflow_infinity (val1) | |
1878 | || is_overflow_infinity (val2))) | |
1879 | return NULL_TREE; | |
1880 | ||
a26da925 | 1881 | /* Notice that we only need to handle the restricted set of |
1882 | operations handled by extract_range_from_binary_expr. | |
1883 | Among them, only multiplication, addition and subtraction | |
1884 | can yield overflow without overflown operands because we | |
1885 | are working with integral types only... except in the | |
1886 | case VAL1 = -INF and VAL2 = -1 which overflows to +INF | |
1887 | for division too. */ | |
1888 | ||
1889 | /* For multiplication, the sign of the overflow is given | |
1890 | by the comparison of the signs of the operands. */ | |
1891 | if ((code == MULT_EXPR && sgn1 == sgn2) | |
1892 | /* For addition, the operands must be of the same sign | |
1893 | to yield an overflow. Its sign is therefore that | |
659753d3 | 1894 | of one of the operands, for example the first. For |
1895 | infinite operands X + -INF is negative, not positive. */ | |
1896 | || (code == PLUS_EXPR | |
1897 | && (sgn1 >= 0 | |
1898 | ? !is_negative_overflow_infinity (val2) | |
1899 | : is_positive_overflow_infinity (val2))) | |
c3783c3b | 1900 | /* For subtraction, non-infinite operands must be of |
1901 | different signs to yield an overflow. Its sign is | |
1902 | therefore that of the first operand or the opposite of | |
1903 | that of the second operand. A first operand of 0 counts | |
1904 | as positive here, for the corner case 0 - (-INF), which | |
1905 | overflows, but must yield +INF. For infinite operands 0 | |
1906 | - INF is negative, not positive. */ | |
1907 | || (code == MINUS_EXPR | |
1908 | && (sgn1 >= 0 | |
1909 | ? !is_positive_overflow_infinity (val2) | |
1910 | : is_negative_overflow_infinity (val2))) | |
62065c0b | 1911 | /* We only get in here with positive shift count, so the |
1912 | overflow direction is the same as the sign of val1. | |
1913 | Actually rshift does not overflow at all, but we only | |
1914 | handle the case of shifting overflowed -INF and +INF. */ | |
1915 | || (code == RSHIFT_EXPR | |
1916 | && sgn1 >= 0) | |
a26da925 | 1917 | /* For division, the only case is -INF / -1 = +INF. */ |
1918 | || code == TRUNC_DIV_EXPR | |
1919 | || code == FLOOR_DIV_EXPR | |
1920 | || code == CEIL_DIV_EXPR | |
1921 | || code == EXACT_DIV_EXPR | |
1922 | || code == ROUND_DIV_EXPR) | |
c3783c3b | 1923 | return (needs_overflow_infinity (TREE_TYPE (res)) |
1924 | ? positive_overflow_infinity (TREE_TYPE (res)) | |
1925 | : TYPE_MAX_VALUE (TREE_TYPE (res))); | |
c25c642e | 1926 | else |
c3783c3b | 1927 | return (needs_overflow_infinity (TREE_TYPE (res)) |
1928 | ? negative_overflow_infinity (TREE_TYPE (res)) | |
1929 | : TYPE_MIN_VALUE (TREE_TYPE (res))); | |
c25c642e | 1930 | } |
1931 | ||
1932 | return res; | |
1933 | } | |
1934 | ||
1935 | ||
522b9a02 | 1936 | /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO |
1937 | bitmask if some bit is unset, it means for all numbers in the range | |
1938 | the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO | |
1939 | bitmask if some bit is set, it means for all numbers in the range | |
1940 | the bit is 1, otherwise it might be 0 or 1. */ | |
1941 | ||
1942 | static bool | |
63bb6dcf | 1943 | zero_nonzero_bits_from_vr (value_range_t *vr, |
1944 | double_int *may_be_nonzero, | |
522b9a02 | 1945 | double_int *must_be_nonzero) |
1946 | { | |
63bb6dcf | 1947 | *may_be_nonzero = double_int_minus_one; |
1948 | *must_be_nonzero = double_int_zero; | |
ac4a8000 | 1949 | if (!range_int_cst_p (vr) |
1950 | || TREE_OVERFLOW (vr->min) | |
1951 | || TREE_OVERFLOW (vr->max)) | |
63bb6dcf | 1952 | return false; |
1953 | ||
1954 | if (range_int_cst_singleton_p (vr)) | |
1955 | { | |
1956 | *may_be_nonzero = tree_to_double_int (vr->min); | |
1957 | *must_be_nonzero = *may_be_nonzero; | |
1958 | } | |
1959 | else if (tree_int_cst_sgn (vr->min) >= 0 | |
1960 | || tree_int_cst_sgn (vr->max) < 0) | |
522b9a02 | 1961 | { |
63bb6dcf | 1962 | double_int dmin = tree_to_double_int (vr->min); |
1963 | double_int dmax = tree_to_double_int (vr->max); | |
cf8f0e63 | 1964 | double_int xor_mask = dmin ^ dmax; |
1965 | *may_be_nonzero = dmin | dmax; | |
1966 | *must_be_nonzero = dmin & dmax; | |
63bb6dcf | 1967 | if (xor_mask.high != 0) |
522b9a02 | 1968 | { |
63bb6dcf | 1969 | unsigned HOST_WIDE_INT mask |
1970 | = ((unsigned HOST_WIDE_INT) 1 | |
1971 | << floor_log2 (xor_mask.high)) - 1; | |
1972 | may_be_nonzero->low = ALL_ONES; | |
1973 | may_be_nonzero->high |= mask; | |
1974 | must_be_nonzero->low = 0; | |
1975 | must_be_nonzero->high &= ~mask; | |
522b9a02 | 1976 | } |
63bb6dcf | 1977 | else if (xor_mask.low != 0) |
522b9a02 | 1978 | { |
63bb6dcf | 1979 | unsigned HOST_WIDE_INT mask |
1980 | = ((unsigned HOST_WIDE_INT) 1 | |
1981 | << floor_log2 (xor_mask.low)) - 1; | |
1982 | may_be_nonzero->low |= mask; | |
1983 | must_be_nonzero->low &= ~mask; | |
522b9a02 | 1984 | } |
1985 | } | |
63bb6dcf | 1986 | |
1987 | return true; | |
522b9a02 | 1988 | } |
1989 | ||
748eb1f9 | 1990 | /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |
1991 | so that *VR0 U *VR1 == *AR. Returns true if that is possible, | |
1992 | false otherwise. If *AR can be represented with a single range | |
1993 | *VR1 will be VR_UNDEFINED. */ | |
1994 | ||
1995 | static bool | |
1996 | ranges_from_anti_range (value_range_t *ar, | |
1997 | value_range_t *vr0, value_range_t *vr1) | |
1998 | { | |
1999 | tree type = TREE_TYPE (ar->min); | |
2000 | ||
2001 | vr0->type = VR_UNDEFINED; | |
2002 | vr1->type = VR_UNDEFINED; | |
2003 | ||
2004 | if (ar->type != VR_ANTI_RANGE | |
2005 | || TREE_CODE (ar->min) != INTEGER_CST | |
2006 | || TREE_CODE (ar->max) != INTEGER_CST | |
2007 | || !vrp_val_min (type) | |
2008 | || !vrp_val_max (type)) | |
2009 | return false; | |
2010 | ||
2011 | if (!vrp_val_is_min (ar->min)) | |
2012 | { | |
2013 | vr0->type = VR_RANGE; | |
2014 | vr0->min = vrp_val_min (type); | |
2015 | vr0->max | |
2016 | = double_int_to_tree (type, | |
cf8f0e63 | 2017 | tree_to_double_int (ar->min) - double_int_one); |
748eb1f9 | 2018 | } |
2019 | if (!vrp_val_is_max (ar->max)) | |
2020 | { | |
2021 | vr1->type = VR_RANGE; | |
2022 | vr1->min | |
2023 | = double_int_to_tree (type, | |
cf8f0e63 | 2024 | tree_to_double_int (ar->max) + double_int_one); |
748eb1f9 | 2025 | vr1->max = vrp_val_max (type); |
2026 | } | |
2027 | if (vr0->type == VR_UNDEFINED) | |
2028 | { | |
2029 | *vr0 = *vr1; | |
2030 | vr1->type = VR_UNDEFINED; | |
2031 | } | |
2032 | ||
2033 | return vr0->type != VR_UNDEFINED; | |
2034 | } | |
2035 | ||
5360e345 | 2036 | /* Helper to extract a value-range *VR for a multiplicative operation |
2037 | *VR0 CODE *VR1. */ | |
2038 | ||
2039 | static void | |
2040 | extract_range_from_multiplicative_op_1 (value_range_t *vr, | |
2041 | enum tree_code code, | |
2042 | value_range_t *vr0, value_range_t *vr1) | |
2043 | { | |
2044 | enum value_range_type type; | |
2045 | tree val[4]; | |
2046 | size_t i; | |
2047 | tree min, max; | |
2048 | bool sop; | |
2049 | int cmp; | |
2050 | ||
2051 | /* Multiplications, divisions and shifts are a bit tricky to handle, | |
2052 | depending on the mix of signs we have in the two ranges, we | |
2053 | need to operate on different values to get the minimum and | |
2054 | maximum values for the new range. One approach is to figure | |
2055 | out all the variations of range combinations and do the | |
2056 | operations. | |
2057 | ||
2058 | However, this involves several calls to compare_values and it | |
2059 | is pretty convoluted. It's simpler to do the 4 operations | |
2060 | (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP | |
2061 | MAX1) and then figure the smallest and largest values to form | |
2062 | the new range. */ | |
2063 | gcc_assert (code == MULT_EXPR | |
2064 | || code == TRUNC_DIV_EXPR | |
2065 | || code == FLOOR_DIV_EXPR | |
2066 | || code == CEIL_DIV_EXPR | |
2067 | || code == EXACT_DIV_EXPR | |
2068 | || code == ROUND_DIV_EXPR | |
2069 | || code == RSHIFT_EXPR); | |
2070 | gcc_assert ((vr0->type == VR_RANGE | |
2071 | || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE)) | |
2072 | && vr0->type == vr1->type); | |
2073 | ||
2074 | type = vr0->type; | |
2075 | ||
2076 | /* Compute the 4 cross operations. */ | |
2077 | sop = false; | |
2078 | val[0] = vrp_int_const_binop (code, vr0->min, vr1->min); | |
2079 | if (val[0] == NULL_TREE) | |
2080 | sop = true; | |
2081 | ||
2082 | if (vr1->max == vr1->min) | |
2083 | val[1] = NULL_TREE; | |
2084 | else | |
2085 | { | |
2086 | val[1] = vrp_int_const_binop (code, vr0->min, vr1->max); | |
2087 | if (val[1] == NULL_TREE) | |
2088 | sop = true; | |
2089 | } | |
2090 | ||
2091 | if (vr0->max == vr0->min) | |
2092 | val[2] = NULL_TREE; | |
2093 | else | |
2094 | { | |
2095 | val[2] = vrp_int_const_binop (code, vr0->max, vr1->min); | |
2096 | if (val[2] == NULL_TREE) | |
2097 | sop = true; | |
2098 | } | |
2099 | ||
2100 | if (vr0->min == vr0->max || vr1->min == vr1->max) | |
2101 | val[3] = NULL_TREE; | |
2102 | else | |
2103 | { | |
2104 | val[3] = vrp_int_const_binop (code, vr0->max, vr1->max); | |
2105 | if (val[3] == NULL_TREE) | |
2106 | sop = true; | |
2107 | } | |
2108 | ||
2109 | if (sop) | |
2110 | { | |
2111 | set_value_range_to_varying (vr); | |
2112 | return; | |
2113 | } | |
2114 | ||
2115 | /* Set MIN to the minimum of VAL[i] and MAX to the maximum | |
2116 | of VAL[i]. */ | |
2117 | min = val[0]; | |
2118 | max = val[0]; | |
2119 | for (i = 1; i < 4; i++) | |
2120 | { | |
2121 | if (!is_gimple_min_invariant (min) | |
2122 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2123 | || !is_gimple_min_invariant (max) | |
2124 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2125 | break; | |
2126 | ||
2127 | if (val[i]) | |
2128 | { | |
2129 | if (!is_gimple_min_invariant (val[i]) | |
2130 | || (TREE_OVERFLOW (val[i]) | |
2131 | && !is_overflow_infinity (val[i]))) | |
2132 | { | |
2133 | /* If we found an overflowed value, set MIN and MAX | |
2134 | to it so that we set the resulting range to | |
2135 | VARYING. */ | |
2136 | min = max = val[i]; | |
2137 | break; | |
2138 | } | |
2139 | ||
2140 | if (compare_values (val[i], min) == -1) | |
2141 | min = val[i]; | |
2142 | ||
2143 | if (compare_values (val[i], max) == 1) | |
2144 | max = val[i]; | |
2145 | } | |
2146 | } | |
2147 | ||
2148 | /* If either MIN or MAX overflowed, then set the resulting range to | |
2149 | VARYING. But we do accept an overflow infinity | |
2150 | representation. */ | |
2151 | if (min == NULL_TREE | |
2152 | || !is_gimple_min_invariant (min) | |
2153 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2154 | || max == NULL_TREE | |
2155 | || !is_gimple_min_invariant (max) | |
2156 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2157 | { | |
2158 | set_value_range_to_varying (vr); | |
2159 | return; | |
2160 | } | |
2161 | ||
2162 | /* We punt if: | |
2163 | 1) [-INF, +INF] | |
2164 | 2) [-INF, +-INF(OVF)] | |
2165 | 3) [+-INF(OVF), +INF] | |
2166 | 4) [+-INF(OVF), +-INF(OVF)] | |
2167 | We learn nothing when we have INF and INF(OVF) on both sides. | |
2168 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
2169 | overflow. */ | |
2170 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) | |
2171 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
2172 | { | |
2173 | set_value_range_to_varying (vr); | |
2174 | return; | |
2175 | } | |
2176 | ||
2177 | cmp = compare_values (min, max); | |
2178 | if (cmp == -2 || cmp == 1) | |
2179 | { | |
2180 | /* If the new range has its limits swapped around (MIN > MAX), | |
2181 | then the operation caused one of them to wrap around, mark | |
2182 | the new range VARYING. */ | |
2183 | set_value_range_to_varying (vr); | |
2184 | } | |
2185 | else | |
2186 | set_value_range (vr, type, min, max, NULL); | |
2187 | } | |
522b9a02 | 2188 | |
4f5712bd | 2189 | /* Some quadruple precision helpers. */ |
2190 | static int | |
2191 | quad_int_cmp (double_int l0, double_int h0, | |
2192 | double_int l1, double_int h1, bool uns) | |
2193 | { | |
cf8f0e63 | 2194 | int c = h0.cmp (h1, uns); |
4f5712bd | 2195 | if (c != 0) return c; |
cf8f0e63 | 2196 | return l0.ucmp (l1); |
4f5712bd | 2197 | } |
2198 | ||
2199 | static void | |
2200 | quad_int_pair_sort (double_int *l0, double_int *h0, | |
2201 | double_int *l1, double_int *h1, bool uns) | |
2202 | { | |
2203 | if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0) | |
2204 | { | |
2205 | double_int tmp; | |
2206 | tmp = *l0; *l0 = *l1; *l1 = tmp; | |
2207 | tmp = *h0; *h0 = *h1; *h1 = tmp; | |
2208 | } | |
2209 | } | |
2210 | ||
c37659ce | 2211 | /* Extract range information from a binary operation CODE based on |
2212 | the ranges of each of its operands, *VR0 and *VR1 with resulting | |
2213 | type EXPR_TYPE. The resulting range is stored in *VR. */ | |
88dbf20f | 2214 | |
2215 | static void | |
c37659ce | 2216 | extract_range_from_binary_expr_1 (value_range_t *vr, |
2217 | enum tree_code code, tree expr_type, | |
2218 | value_range_t *vr0_, value_range_t *vr1_) | |
88dbf20f | 2219 | { |
c37659ce | 2220 | value_range_t vr0 = *vr0_, vr1 = *vr1_; |
748eb1f9 | 2221 | value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
0ed3ba34 | 2222 | enum value_range_type type; |
5360e345 | 2223 | tree min = NULL_TREE, max = NULL_TREE; |
88dbf20f | 2224 | int cmp; |
2225 | ||
5360e345 | 2226 | if (!INTEGRAL_TYPE_P (expr_type) |
2227 | && !POINTER_TYPE_P (expr_type)) | |
2228 | { | |
2229 | set_value_range_to_varying (vr); | |
2230 | return; | |
2231 | } | |
2232 | ||
88dbf20f | 2233 | /* Not all binary expressions can be applied to ranges in a |
2234 | meaningful way. Handle only arithmetic operations. */ | |
2235 | if (code != PLUS_EXPR | |
2236 | && code != MINUS_EXPR | |
0de36bdb | 2237 | && code != POINTER_PLUS_EXPR |
88dbf20f | 2238 | && code != MULT_EXPR |
2239 | && code != TRUNC_DIV_EXPR | |
2240 | && code != FLOOR_DIV_EXPR | |
2241 | && code != CEIL_DIV_EXPR | |
2242 | && code != EXACT_DIV_EXPR | |
2243 | && code != ROUND_DIV_EXPR | |
ccab2921 | 2244 | && code != TRUNC_MOD_EXPR |
975070ea | 2245 | && code != RSHIFT_EXPR |
e7ea1c21 | 2246 | && code != LSHIFT_EXPR |
88dbf20f | 2247 | && code != MIN_EXPR |
eea12c72 | 2248 | && code != MAX_EXPR |
b3ded9f8 | 2249 | && code != BIT_AND_EXPR |
6c696748 | 2250 | && code != BIT_IOR_EXPR |
2251 | && code != BIT_XOR_EXPR) | |
88dbf20f | 2252 | { |
e7d43f99 | 2253 | set_value_range_to_varying (vr); |
88dbf20f | 2254 | return; |
2255 | } | |
2256 | ||
fb41023e | 2257 | /* If both ranges are UNDEFINED, so is the result. */ |
2258 | if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) | |
88dbf20f | 2259 | { |
eea12c72 | 2260 | set_value_range_to_undefined (vr); |
88dbf20f | 2261 | return; |
2262 | } | |
fb41023e | 2263 | /* If one of the ranges is UNDEFINED drop it to VARYING for the following |
2264 | code. At some point we may want to special-case operations that | |
2265 | have UNDEFINED result for all or some value-ranges of the not UNDEFINED | |
2266 | operand. */ | |
2267 | else if (vr0.type == VR_UNDEFINED) | |
2268 | set_value_range_to_varying (&vr0); | |
2269 | else if (vr1.type == VR_UNDEFINED) | |
2270 | set_value_range_to_varying (&vr1); | |
88dbf20f | 2271 | |
748eb1f9 | 2272 | /* Now canonicalize anti-ranges to ranges when they are not symbolic |
2273 | and express ~[] op X as ([]' op X) U ([]'' op X). */ | |
2274 | if (vr0.type == VR_ANTI_RANGE | |
2275 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | |
2276 | { | |
2277 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); | |
2278 | if (vrtem1.type != VR_UNDEFINED) | |
2279 | { | |
2280 | value_range_t vrres = VR_INITIALIZER; | |
2281 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, | |
2282 | &vrtem1, vr1_); | |
2283 | vrp_meet (vr, &vrres); | |
2284 | } | |
2285 | return; | |
2286 | } | |
2287 | /* Likewise for X op ~[]. */ | |
2288 | if (vr1.type == VR_ANTI_RANGE | |
2289 | && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) | |
2290 | { | |
2291 | extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); | |
2292 | if (vrtem1.type != VR_UNDEFINED) | |
2293 | { | |
2294 | value_range_t vrres = VR_INITIALIZER; | |
2295 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, | |
2296 | vr0_, &vrtem1); | |
2297 | vrp_meet (vr, &vrres); | |
2298 | } | |
2299 | return; | |
2300 | } | |
2301 | ||
0ed3ba34 | 2302 | /* The type of the resulting value range defaults to VR0.TYPE. */ |
2303 | type = vr0.type; | |
2304 | ||
eea12c72 | 2305 | /* Refuse to operate on VARYING ranges, ranges of different kinds |
b3ded9f8 | 2306 | and symbolic ranges. As an exception, we allow BIT_AND_EXPR |
2307 | because we may be able to derive a useful range even if one of | |
e52dd258 | 2308 | the operands is VR_VARYING or symbolic range. Similarly for |
2309 | divisions. TODO, we may be able to derive anti-ranges in | |
2310 | some cases. */ | |
b3ded9f8 | 2311 | if (code != BIT_AND_EXPR |
cfd7906e | 2312 | && code != BIT_IOR_EXPR |
e52dd258 | 2313 | && code != TRUNC_DIV_EXPR |
2314 | && code != FLOOR_DIV_EXPR | |
2315 | && code != CEIL_DIV_EXPR | |
2316 | && code != EXACT_DIV_EXPR | |
2317 | && code != ROUND_DIV_EXPR | |
ccab2921 | 2318 | && code != TRUNC_MOD_EXPR |
b3ded9f8 | 2319 | && (vr0.type == VR_VARYING |
2320 | || vr1.type == VR_VARYING | |
2321 | || vr0.type != vr1.type | |
2322 | || symbolic_range_p (&vr0) | |
2323 | || symbolic_range_p (&vr1))) | |
88dbf20f | 2324 | { |
e7d43f99 | 2325 | set_value_range_to_varying (vr); |
88dbf20f | 2326 | return; |
2327 | } | |
2328 | ||
2329 | /* Now evaluate the expression to determine the new range. */ | |
c37659ce | 2330 | if (POINTER_TYPE_P (expr_type)) |
88dbf20f | 2331 | { |
c37659ce | 2332 | if (code == MIN_EXPR || code == MAX_EXPR) |
5e3e3575 | 2333 | { |
0de36bdb | 2334 | /* For MIN/MAX expressions with pointers, we only care about |
2335 | nullness, if both are non null, then the result is nonnull. | |
2336 | If both are null, then the result is null. Otherwise they | |
2337 | are varying. */ | |
2338 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
93116081 | 2339 | set_value_range_to_nonnull (vr, expr_type); |
5e3e3575 | 2340 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |
93116081 | 2341 | set_value_range_to_null (vr, expr_type); |
5e3e3575 | 2342 | else |
2343 | set_value_range_to_varying (vr); | |
2344 | } | |
c37659ce | 2345 | else if (code == POINTER_PLUS_EXPR) |
b03fbfbd | 2346 | { |
2347 | /* For pointer types, we are really only interested in asserting | |
2348 | whether the expression evaluates to non-NULL. */ | |
2349 | if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) | |
2350 | set_value_range_to_nonnull (vr, expr_type); | |
2351 | else if (range_is_null (&vr0) && range_is_null (&vr1)) | |
2352 | set_value_range_to_null (vr, expr_type); | |
2353 | else | |
2354 | set_value_range_to_varying (vr); | |
2355 | } | |
2356 | else if (code == BIT_AND_EXPR) | |
2357 | { | |
2358 | /* For pointer types, we are really only interested in asserting | |
2359 | whether the expression evaluates to non-NULL. */ | |
2360 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
2361 | set_value_range_to_nonnull (vr, expr_type); | |
2362 | else if (range_is_null (&vr0) || range_is_null (&vr1)) | |
2363 | set_value_range_to_null (vr, expr_type); | |
2364 | else | |
2365 | set_value_range_to_varying (vr); | |
2366 | } | |
88dbf20f | 2367 | else |
c37659ce | 2368 | set_value_range_to_varying (vr); |
88dbf20f | 2369 | |
2370 | return; | |
2371 | } | |
2372 | ||
2373 | /* For integer ranges, apply the operation to each end of the | |
2374 | range and see what we end up with. */ | |
c5faecd5 | 2375 | if (code == PLUS_EXPR || code == MINUS_EXPR) |
88dbf20f | 2376 | { |
ac4a8000 | 2377 | /* If we have a PLUS_EXPR with two VR_RANGE integer constant |
2378 | ranges compute the precise range for such case if possible. */ | |
2379 | if (range_int_cst_p (&vr0) | |
2380 | && range_int_cst_p (&vr1) | |
c5faecd5 | 2381 | /* We need as many bits as the possibly unsigned inputs. */ |
2382 | && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT) | |
ac4a8000 | 2383 | { |
2384 | double_int min0 = tree_to_double_int (vr0.min); | |
2385 | double_int max0 = tree_to_double_int (vr0.max); | |
2386 | double_int min1 = tree_to_double_int (vr1.min); | |
2387 | double_int max1 = tree_to_double_int (vr1.max); | |
2388 | bool uns = TYPE_UNSIGNED (expr_type); | |
2389 | double_int type_min | |
cf8f0e63 | 2390 | = double_int::min_value (TYPE_PRECISION (expr_type), uns); |
ac4a8000 | 2391 | double_int type_max |
cf8f0e63 | 2392 | = double_int::max_value (TYPE_PRECISION (expr_type), uns); |
ac4a8000 | 2393 | double_int dmin, dmax; |
c5faecd5 | 2394 | int min_ovf = 0; |
2395 | int max_ovf = 0; | |
ac4a8000 | 2396 | |
c5faecd5 | 2397 | if (code == PLUS_EXPR) |
2398 | { | |
cf8f0e63 | 2399 | dmin = min0 + min1; |
2400 | dmax = max0 + max1; | |
c5faecd5 | 2401 | |
2402 | /* Check for overflow in double_int. */ | |
cf8f0e63 | 2403 | if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns)) |
2404 | min_ovf = min0.cmp (dmin, uns); | |
2405 | if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns)) | |
2406 | max_ovf = max0.cmp (dmax, uns); | |
c5faecd5 | 2407 | } |
2408 | else /* if (code == MINUS_EXPR) */ | |
2409 | { | |
cf8f0e63 | 2410 | dmin = min0 - max1; |
2411 | dmax = max0 - min1; | |
2412 | ||
2413 | if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns)) | |
2414 | min_ovf = min0.cmp (max1, uns); | |
2415 | if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns)) | |
2416 | max_ovf = max0.cmp (min1, uns); | |
c5faecd5 | 2417 | } |
2418 | ||
2419 | /* For non-wrapping arithmetic look at possibly smaller | |
2420 | value-ranges of the type. */ | |
2421 | if (!TYPE_OVERFLOW_WRAPS (expr_type)) | |
2422 | { | |
2423 | if (vrp_val_min (expr_type)) | |
2424 | type_min = tree_to_double_int (vrp_val_min (expr_type)); | |
2425 | if (vrp_val_max (expr_type)) | |
2426 | type_max = tree_to_double_int (vrp_val_max (expr_type)); | |
2427 | } | |
2428 | ||
2429 | /* Check for type overflow. */ | |
2430 | if (min_ovf == 0) | |
2431 | { | |
cf8f0e63 | 2432 | if (dmin.cmp (type_min, uns) == -1) |
c5faecd5 | 2433 | min_ovf = -1; |
cf8f0e63 | 2434 | else if (dmin.cmp (type_max, uns) == 1) |
c5faecd5 | 2435 | min_ovf = 1; |
2436 | } | |
2437 | if (max_ovf == 0) | |
2438 | { | |
cf8f0e63 | 2439 | if (dmax.cmp (type_min, uns) == -1) |
c5faecd5 | 2440 | max_ovf = -1; |
cf8f0e63 | 2441 | else if (dmax.cmp (type_max, uns) == 1) |
c5faecd5 | 2442 | max_ovf = 1; |
2443 | } | |
ac4a8000 | 2444 | |
2445 | if (TYPE_OVERFLOW_WRAPS (expr_type)) | |
2446 | { | |
2447 | /* If overflow wraps, truncate the values and adjust the | |
2448 | range kind and bounds appropriately. */ | |
2449 | double_int tmin | |
cf8f0e63 | 2450 | = dmin.ext (TYPE_PRECISION (expr_type), uns); |
ac4a8000 | 2451 | double_int tmax |
cf8f0e63 | 2452 | = dmax.ext (TYPE_PRECISION (expr_type), uns); |
c5faecd5 | 2453 | if (min_ovf == max_ovf) |
ac4a8000 | 2454 | { |
2455 | /* No overflow or both overflow or underflow. The | |
2456 | range kind stays VR_RANGE. */ | |
2457 | min = double_int_to_tree (expr_type, tmin); | |
2458 | max = double_int_to_tree (expr_type, tmax); | |
2459 | } | |
c5faecd5 | 2460 | else if (min_ovf == -1 |
2461 | && max_ovf == 1) | |
ac4a8000 | 2462 | { |
2463 | /* Underflow and overflow, drop to VR_VARYING. */ | |
2464 | set_value_range_to_varying (vr); | |
2465 | return; | |
2466 | } | |
2467 | else | |
2468 | { | |
2469 | /* Min underflow or max overflow. The range kind | |
2470 | changes to VR_ANTI_RANGE. */ | |
b7c599a6 | 2471 | bool covers = false; |
ac4a8000 | 2472 | double_int tem = tmin; |
c5faecd5 | 2473 | gcc_assert ((min_ovf == -1 && max_ovf == 0) |
2474 | || (max_ovf == 1 && min_ovf == 0)); | |
ac4a8000 | 2475 | type = VR_ANTI_RANGE; |
cf8f0e63 | 2476 | tmin = tmax + double_int_one; |
2477 | if (tmin.cmp (tmax, uns) < 0) | |
b7c599a6 | 2478 | covers = true; |
cf8f0e63 | 2479 | tmax = tem + double_int_minus_one; |
b7c599a6 | 2480 | if (double_int_cmp (tmax, tem, uns) > 0) |
2481 | covers = true; | |
ac4a8000 | 2482 | /* If the anti-range would cover nothing, drop to varying. |
2483 | Likewise if the anti-range bounds are outside of the | |
2484 | types values. */ | |
cf8f0e63 | 2485 | if (covers || tmin.cmp (tmax, uns) > 0) |
ac4a8000 | 2486 | { |
2487 | set_value_range_to_varying (vr); | |
2488 | return; | |
2489 | } | |
2490 | min = double_int_to_tree (expr_type, tmin); | |
2491 | max = double_int_to_tree (expr_type, tmax); | |
2492 | } | |
2493 | } | |
2494 | else | |
2495 | { | |
ac4a8000 | 2496 | /* If overflow does not wrap, saturate to the types min/max |
2497 | value. */ | |
c5faecd5 | 2498 | if (min_ovf == -1) |
ac4a8000 | 2499 | { |
2500 | if (needs_overflow_infinity (expr_type) | |
2501 | && supports_overflow_infinity (expr_type)) | |
2502 | min = negative_overflow_infinity (expr_type); | |
2503 | else | |
2504 | min = double_int_to_tree (expr_type, type_min); | |
2505 | } | |
c5faecd5 | 2506 | else if (min_ovf == 1) |
ac4a8000 | 2507 | { |
2508 | if (needs_overflow_infinity (expr_type) | |
2509 | && supports_overflow_infinity (expr_type)) | |
2510 | min = positive_overflow_infinity (expr_type); | |
2511 | else | |
2512 | min = double_int_to_tree (expr_type, type_max); | |
2513 | } | |
2514 | else | |
2515 | min = double_int_to_tree (expr_type, dmin); | |
2516 | ||
c5faecd5 | 2517 | if (max_ovf == -1) |
ac4a8000 | 2518 | { |
2519 | if (needs_overflow_infinity (expr_type) | |
2520 | && supports_overflow_infinity (expr_type)) | |
2521 | max = negative_overflow_infinity (expr_type); | |
2522 | else | |
2523 | max = double_int_to_tree (expr_type, type_min); | |
2524 | } | |
c5faecd5 | 2525 | else if (max_ovf == 1) |
ac4a8000 | 2526 | { |
2527 | if (needs_overflow_infinity (expr_type) | |
2528 | && supports_overflow_infinity (expr_type)) | |
2529 | max = positive_overflow_infinity (expr_type); | |
2530 | else | |
2531 | max = double_int_to_tree (expr_type, type_max); | |
2532 | } | |
2533 | else | |
2534 | max = double_int_to_tree (expr_type, dmax); | |
2535 | } | |
2536 | if (needs_overflow_infinity (expr_type) | |
2537 | && supports_overflow_infinity (expr_type)) | |
2538 | { | |
2539 | if (is_negative_overflow_infinity (vr0.min) | |
c5faecd5 | 2540 | || (code == PLUS_EXPR |
2541 | ? is_negative_overflow_infinity (vr1.min) | |
2542 | : is_positive_overflow_infinity (vr1.max))) | |
ac4a8000 | 2543 | min = negative_overflow_infinity (expr_type); |
2544 | if (is_positive_overflow_infinity (vr0.max) | |
c5faecd5 | 2545 | || (code == PLUS_EXPR |
2546 | ? is_positive_overflow_infinity (vr1.max) | |
2547 | : is_negative_overflow_infinity (vr1.min))) | |
ac4a8000 | 2548 | max = positive_overflow_infinity (expr_type); |
2549 | } | |
2550 | } | |
2551 | else | |
6285cf63 | 2552 | { |
ac4a8000 | 2553 | /* For other cases, for example if we have a PLUS_EXPR with two |
2554 | VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort | |
2555 | to compute a precise range for such a case. | |
2556 | ??? General even mixed range kind operations can be expressed | |
2557 | by for example transforming ~[3, 5] + [1, 2] to range-only | |
2558 | operations and a union primitive: | |
2559 | [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] | |
2560 | [-INF+1, 4] U [6, +INF(OVF)] | |
2561 | though usually the union is not exactly representable with | |
2562 | a single range or anti-range as the above is | |
2563 | [-INF+1, +INF(OVF)] intersected with ~[5, 5] | |
2564 | but one could use a scheme similar to equivalences for this. */ | |
5360e345 | 2565 | set_value_range_to_varying (vr); |
2566 | return; | |
6285cf63 | 2567 | } |
88dbf20f | 2568 | } |
5360e345 | 2569 | else if (code == MIN_EXPR |
2570 | || code == MAX_EXPR) | |
2571 | { | |
2572 | if (vr0.type == VR_ANTI_RANGE) | |
2573 | { | |
2574 | /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs, | |
2575 | the resulting VR_ANTI_RANGE is the same - intersection | |
2576 | of the two ranges. */ | |
2577 | min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min); | |
2578 | max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max); | |
2579 | } | |
2580 | else | |
2581 | { | |
2582 | /* For operations that make the resulting range directly | |
2583 | proportional to the original ranges, apply the operation to | |
2584 | the same end of each range. */ | |
2585 | min = vrp_int_const_binop (code, vr0.min, vr1.min); | |
2586 | max = vrp_int_const_binop (code, vr0.max, vr1.max); | |
2587 | } | |
2588 | } | |
2589 | else if (code == MULT_EXPR) | |
88dbf20f | 2590 | { |
4f5712bd | 2591 | /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not |
2592 | drop to varying. */ | |
2593 | if (range_int_cst_p (&vr0) | |
2594 | && range_int_cst_p (&vr1) | |
2595 | && TYPE_OVERFLOW_WRAPS (expr_type)) | |
2596 | { | |
2597 | double_int min0, max0, min1, max1, sizem1, size; | |
2598 | double_int prod0l, prod0h, prod1l, prod1h, | |
2599 | prod2l, prod2h, prod3l, prod3h; | |
2600 | bool uns0, uns1, uns; | |
2601 | ||
cf8f0e63 | 2602 | sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true); |
2603 | size = sizem1 + double_int_one; | |
4f5712bd | 2604 | |
2605 | min0 = tree_to_double_int (vr0.min); | |
2606 | max0 = tree_to_double_int (vr0.max); | |
2607 | min1 = tree_to_double_int (vr1.min); | |
2608 | max1 = tree_to_double_int (vr1.max); | |
2609 | ||
2610 | uns0 = TYPE_UNSIGNED (expr_type); | |
2611 | uns1 = uns0; | |
2612 | ||
2613 | /* Canonicalize the intervals. */ | |
2614 | if (TYPE_UNSIGNED (expr_type)) | |
2615 | { | |
cf8f0e63 | 2616 | double_int min2 = size - min0; |
2617 | if (min2.cmp (max0, true) < 0) | |
4f5712bd | 2618 | { |
cf8f0e63 | 2619 | min0 = -min2; |
2620 | max0 -= size; | |
4f5712bd | 2621 | uns0 = false; |
2622 | } | |
2623 | ||
cf8f0e63 | 2624 | min2 = size - min1; |
2625 | if (min2.cmp (max1, true) < 0) | |
4f5712bd | 2626 | { |
cf8f0e63 | 2627 | min1 = -min2; |
2628 | max1 -= size; | |
4f5712bd | 2629 | uns1 = false; |
2630 | } | |
2631 | } | |
2632 | uns = uns0 & uns1; | |
2633 | ||
2634 | mul_double_wide_with_sign (min0.low, min0.high, | |
2635 | min1.low, min1.high, | |
2636 | &prod0l.low, &prod0l.high, | |
2637 | &prod0h.low, &prod0h.high, true); | |
cf8f0e63 | 2638 | if (!uns0 && min0.is_negative ()) |
2639 | prod0h -= min1; | |
2640 | if (!uns1 && min1.is_negative ()) | |
2641 | prod0h -= min0; | |
4f5712bd | 2642 | |
2643 | mul_double_wide_with_sign (min0.low, min0.high, | |
2644 | max1.low, max1.high, | |
2645 | &prod1l.low, &prod1l.high, | |
2646 | &prod1h.low, &prod1h.high, true); | |
cf8f0e63 | 2647 | if (!uns0 && min0.is_negative ()) |
2648 | prod1h -= max1; | |
2649 | if (!uns1 && max1.is_negative ()) | |
2650 | prod1h -= min0; | |
4f5712bd | 2651 | |
2652 | mul_double_wide_with_sign (max0.low, max0.high, | |
2653 | min1.low, min1.high, | |
2654 | &prod2l.low, &prod2l.high, | |
2655 | &prod2h.low, &prod2h.high, true); | |
cf8f0e63 | 2656 | if (!uns0 && max0.is_negative ()) |
2657 | prod2h -= min1; | |
2658 | if (!uns1 && min1.is_negative ()) | |
2659 | prod2h -= max0; | |
4f5712bd | 2660 | |
2661 | mul_double_wide_with_sign (max0.low, max0.high, | |
2662 | max1.low, max1.high, | |
2663 | &prod3l.low, &prod3l.high, | |
2664 | &prod3h.low, &prod3h.high, true); | |
cf8f0e63 | 2665 | if (!uns0 && max0.is_negative ()) |
2666 | prod3h -= max1; | |
2667 | if (!uns1 && max1.is_negative ()) | |
2668 | prod3h -= max0; | |
4f5712bd | 2669 | |
2670 | /* Sort the 4 products. */ | |
2671 | quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns); | |
2672 | quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns); | |
2673 | quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns); | |
2674 | quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns); | |
2675 | ||
2676 | /* Max - min. */ | |
cf8f0e63 | 2677 | if (prod0l.is_zero ()) |
4f5712bd | 2678 | { |
2679 | prod1l = double_int_zero; | |
cf8f0e63 | 2680 | prod1h = -prod0h; |
4f5712bd | 2681 | } |
2682 | else | |
2683 | { | |
cf8f0e63 | 2684 | prod1l = -prod0l; |
2685 | prod1h = ~prod0h; | |
4f5712bd | 2686 | } |
cf8f0e63 | 2687 | prod2l = prod3l + prod1l; |
2688 | prod2h = prod3h + prod1h; | |
2689 | if (prod2l.ult (prod3l)) | |
2690 | prod2h += double_int_one; /* carry */ | |
4f5712bd | 2691 | |
cf8f0e63 | 2692 | if (!prod2h.is_zero () |
2693 | || prod2l.cmp (sizem1, true) >= 0) | |
4f5712bd | 2694 | { |
2695 | /* the range covers all values. */ | |
2696 | set_value_range_to_varying (vr); | |
2697 | return; | |
2698 | } | |
2699 | ||
2700 | /* The following should handle the wrapping and selecting | |
2701 | VR_ANTI_RANGE for us. */ | |
2702 | min = double_int_to_tree (expr_type, prod0l); | |
2703 | max = double_int_to_tree (expr_type, prod3l); | |
2704 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
2705 | return; | |
2706 | } | |
2707 | ||
6285cf63 | 2708 | /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, |
2709 | drop to VR_VARYING. It would take more effort to compute a | |
2710 | precise range for such a case. For example, if we have | |
2711 | op0 == 65536 and op1 == 65536 with their ranges both being | |
2712 | ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so | |
2713 | we cannot claim that the product is in ~[0,0]. Note that we | |
2714 | are guaranteed to have vr0.type == vr1.type at this | |
2715 | point. */ | |
5360e345 | 2716 | if (vr0.type == VR_ANTI_RANGE |
c37659ce | 2717 | && !TYPE_OVERFLOW_UNDEFINED (expr_type)) |
6285cf63 | 2718 | { |
2719 | set_value_range_to_varying (vr); | |
2720 | return; | |
2721 | } | |
2722 | ||
5360e345 | 2723 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
2724 | return; | |
2725 | } | |
e208bd44 | 2726 | else if (code == RSHIFT_EXPR |
2727 | || code == LSHIFT_EXPR) | |
5360e345 | 2728 | { |
6291249b | 2729 | /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], |
2730 | then drop to VR_VARYING. Outside of this range we get undefined | |
efec32e0 | 2731 | behavior from the shift operation. We cannot even trust |
6291249b | 2732 | SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl |
2733 | shifts, and the operation at the tree level may be widened. */ | |
e208bd44 | 2734 | if (range_int_cst_p (&vr1) |
2735 | && compare_tree_int (vr1.min, 0) >= 0 | |
2736 | && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) | |
62065c0b | 2737 | { |
e208bd44 | 2738 | if (code == RSHIFT_EXPR) |
2739 | { | |
2740 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); | |
2741 | return; | |
2742 | } | |
2743 | /* We can map lshifts by constants to MULT_EXPR handling. */ | |
2744 | else if (code == LSHIFT_EXPR | |
2745 | && range_int_cst_singleton_p (&vr1)) | |
2746 | { | |
2747 | bool saved_flag_wrapv; | |
2748 | value_range_t vr1p = VR_INITIALIZER; | |
2749 | vr1p.type = VR_RANGE; | |
2750 | vr1p.min | |
2751 | = double_int_to_tree (expr_type, | |
cf8f0e63 | 2752 | double_int_one |
2753 | .llshift (TREE_INT_CST_LOW (vr1.min), | |
2754 | TYPE_PRECISION (expr_type))); | |
e208bd44 | 2755 | vr1p.max = vr1p.min; |
2756 | /* We have to use a wrapping multiply though as signed overflow | |
2757 | on lshifts is implementation defined in C89. */ | |
2758 | saved_flag_wrapv = flag_wrapv; | |
2759 | flag_wrapv = 1; | |
2760 | extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, | |
2761 | &vr0, &vr1p); | |
2762 | flag_wrapv = saved_flag_wrapv; | |
2763 | return; | |
2764 | } | |
e7ea1c21 | 2765 | } |
e7ea1c21 | 2766 | set_value_range_to_varying (vr); |
2767 | return; | |
2768 | } | |
5360e345 | 2769 | else if (code == TRUNC_DIV_EXPR |
2770 | || code == FLOOR_DIV_EXPR | |
2771 | || code == CEIL_DIV_EXPR | |
2772 | || code == EXACT_DIV_EXPR | |
2773 | || code == ROUND_DIV_EXPR) | |
2774 | { | |
2775 | if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) | |
e52dd258 | 2776 | { |
2777 | /* For division, if op1 has VR_RANGE but op0 does not, something | |
2778 | can be deduced just from that range. Say [min, max] / [4, max] | |
2779 | gives [min / 4, max / 4] range. */ | |
2780 | if (vr1.type == VR_RANGE | |
2781 | && !symbolic_range_p (&vr1) | |
7d48cd66 | 2782 | && range_includes_zero_p (vr1.min, vr1.max) == 0) |
e52dd258 | 2783 | { |
2784 | vr0.type = type = VR_RANGE; | |
c37659ce | 2785 | vr0.min = vrp_val_min (expr_type); |
2786 | vr0.max = vrp_val_max (expr_type); | |
e52dd258 | 2787 | } |
2788 | else | |
2789 | { | |
2790 | set_value_range_to_varying (vr); | |
2791 | return; | |
2792 | } | |
2793 | } | |
2794 | ||
47d397e1 | 2795 | /* For divisions, if flag_non_call_exceptions is true, we must |
2796 | not eliminate a division by zero. */ | |
5360e345 | 2797 | if (cfun->can_throw_non_call_exceptions |
47d397e1 | 2798 | && (vr1.type != VR_RANGE |
7d48cd66 | 2799 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
47d397e1 | 2800 | { |
2801 | set_value_range_to_varying (vr); | |
2802 | return; | |
2803 | } | |
2804 | ||
e52dd258 | 2805 | /* For divisions, if op0 is VR_RANGE, we can deduce a range |
2806 | even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can | |
2807 | include 0. */ | |
5360e345 | 2808 | if (vr0.type == VR_RANGE |
e52dd258 | 2809 | && (vr1.type != VR_RANGE |
7d48cd66 | 2810 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
e52dd258 | 2811 | { |
2812 | tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); | |
2813 | int cmp; | |
2814 | ||
e52dd258 | 2815 | min = NULL_TREE; |
2816 | max = NULL_TREE; | |
c37659ce | 2817 | if (TYPE_UNSIGNED (expr_type) |
2818 | || value_range_nonnegative_p (&vr1)) | |
e52dd258 | 2819 | { |
2820 | /* For unsigned division or when divisor is known | |
2821 | to be non-negative, the range has to cover | |
2822 | all numbers from 0 to max for positive max | |
2823 | and all numbers from min to 0 for negative min. */ | |
2824 | cmp = compare_values (vr0.max, zero); | |
2825 | if (cmp == -1) | |
2826 | max = zero; | |
2827 | else if (cmp == 0 || cmp == 1) | |
2828 | max = vr0.max; | |
2829 | else | |
2830 | type = VR_VARYING; | |
2831 | cmp = compare_values (vr0.min, zero); | |
2832 | if (cmp == 1) | |
2833 | min = zero; | |
2834 | else if (cmp == 0 || cmp == -1) | |
2835 | min = vr0.min; | |
2836 | else | |
2837 | type = VR_VARYING; | |
2838 | } | |
2839 | else | |
2840 | { | |
2841 | /* Otherwise the range is -max .. max or min .. -min | |
2842 | depending on which bound is bigger in absolute value, | |
2843 | as the division can change the sign. */ | |
2844 | abs_extent_range (vr, vr0.min, vr0.max); | |
2845 | return; | |
2846 | } | |
2847 | if (type == VR_VARYING) | |
2848 | { | |
2849 | set_value_range_to_varying (vr); | |
2850 | return; | |
2851 | } | |
2852 | } | |
c3783c3b | 2853 | else |
2854 | { | |
5360e345 | 2855 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
2856 | return; | |
eea12c72 | 2857 | } |
2858 | } | |
ebc6c513 | 2859 | else if (code == TRUNC_MOD_EXPR) |
ccab2921 | 2860 | { |
ebc6c513 | 2861 | if (vr1.type != VR_RANGE |
7d48cd66 | 2862 | || range_includes_zero_p (vr1.min, vr1.max) != 0 |
ebc6c513 | 2863 | || vrp_val_is_min (vr1.min)) |
ccab2921 | 2864 | { |
2865 | set_value_range_to_varying (vr); | |
2866 | return; | |
2867 | } | |
2868 | type = VR_RANGE; | |
ebc6c513 | 2869 | /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */ |
c37659ce | 2870 | max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); |
ebc6c513 | 2871 | if (tree_int_cst_lt (max, vr1.max)) |
2872 | max = vr1.max; | |
317e2a67 | 2873 | max = int_const_binop (MINUS_EXPR, max, integer_one_node); |
ebc6c513 | 2874 | /* If the dividend is non-negative the modulus will be |
2875 | non-negative as well. */ | |
c37659ce | 2876 | if (TYPE_UNSIGNED (expr_type) |
2877 | || value_range_nonnegative_p (&vr0)) | |
ebc6c513 | 2878 | min = build_int_cst (TREE_TYPE (max), 0); |
ccab2921 | 2879 | else |
c37659ce | 2880 | min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max); |
ccab2921 | 2881 | } |
6c696748 | 2882 | else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) |
b3ded9f8 | 2883 | { |
522b9a02 | 2884 | bool int_cst_range0, int_cst_range1; |
2885 | double_int may_be_nonzero0, may_be_nonzero1; | |
2886 | double_int must_be_nonzero0, must_be_nonzero1; | |
bca0860e | 2887 | |
522b9a02 | 2888 | int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, |
2889 | &must_be_nonzero0); | |
2890 | int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, | |
2891 | &must_be_nonzero1); | |
bca0860e | 2892 | |
522b9a02 | 2893 | type = VR_RANGE; |
f26adbc1 | 2894 | if (code == BIT_AND_EXPR) |
0c0b52bd | 2895 | { |
63bb6dcf | 2896 | double_int dmax; |
522b9a02 | 2897 | min = double_int_to_tree (expr_type, |
cf8f0e63 | 2898 | must_be_nonzero0 & must_be_nonzero1); |
2899 | dmax = may_be_nonzero0 & may_be_nonzero1; | |
63bb6dcf | 2900 | /* If both input ranges contain only negative values we can |
2901 | truncate the result range maximum to the minimum of the | |
2902 | input range maxima. */ | |
2903 | if (int_cst_range0 && int_cst_range1 | |
2904 | && tree_int_cst_sgn (vr0.max) < 0 | |
2905 | && tree_int_cst_sgn (vr1.max) < 0) | |
0c0b52bd | 2906 | { |
cf8f0e63 | 2907 | dmax = dmax.min (tree_to_double_int (vr0.max), |
63bb6dcf | 2908 | TYPE_UNSIGNED (expr_type)); |
cf8f0e63 | 2909 | dmax = dmax.min (tree_to_double_int (vr1.max), |
63bb6dcf | 2910 | TYPE_UNSIGNED (expr_type)); |
0c0b52bd | 2911 | } |
63bb6dcf | 2912 | /* If either input range contains only non-negative values |
2913 | we can truncate the result range maximum to the respective | |
2914 | maximum of the input range. */ | |
2915 | if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) | |
cf8f0e63 | 2916 | dmax = dmax.min (tree_to_double_int (vr0.max), |
63bb6dcf | 2917 | TYPE_UNSIGNED (expr_type)); |
522b9a02 | 2918 | if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) |
cf8f0e63 | 2919 | dmax = dmax.min (tree_to_double_int (vr1.max), |
63bb6dcf | 2920 | TYPE_UNSIGNED (expr_type)); |
2921 | max = double_int_to_tree (expr_type, dmax); | |
b3ded9f8 | 2922 | } |
f26adbc1 | 2923 | else if (code == BIT_IOR_EXPR) |
e31161b3 | 2924 | { |
63bb6dcf | 2925 | double_int dmin; |
522b9a02 | 2926 | max = double_int_to_tree (expr_type, |
cf8f0e63 | 2927 | may_be_nonzero0 | may_be_nonzero1); |
2928 | dmin = must_be_nonzero0 | must_be_nonzero1; | |
63bb6dcf | 2929 | /* If the input ranges contain only positive values we can |
2930 | truncate the minimum of the result range to the maximum | |
2931 | of the input range minima. */ | |
2932 | if (int_cst_range0 && int_cst_range1 | |
2933 | && tree_int_cst_sgn (vr0.min) >= 0 | |
2934 | && tree_int_cst_sgn (vr1.min) >= 0) | |
f26adbc1 | 2935 | { |
cf8f0e63 | 2936 | dmin = dmin.max (tree_to_double_int (vr0.min), |
2937 | TYPE_UNSIGNED (expr_type)); | |
2938 | dmin = dmin.max (tree_to_double_int (vr1.min), | |
2939 | TYPE_UNSIGNED (expr_type)); | |
f26adbc1 | 2940 | } |
63bb6dcf | 2941 | /* If either input range contains only negative values |
2942 | we can truncate the minimum of the result range to the | |
2943 | respective minimum range. */ | |
2944 | if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) | |
cf8f0e63 | 2945 | dmin = dmin.max (tree_to_double_int (vr0.min), |
2946 | TYPE_UNSIGNED (expr_type)); | |
63bb6dcf | 2947 | if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) |
cf8f0e63 | 2948 | dmin = dmin.max (tree_to_double_int (vr1.min), |
2949 | TYPE_UNSIGNED (expr_type)); | |
63bb6dcf | 2950 | min = double_int_to_tree (expr_type, dmin); |
f26adbc1 | 2951 | } |
6c696748 | 2952 | else if (code == BIT_XOR_EXPR) |
2953 | { | |
2954 | double_int result_zero_bits, result_one_bits; | |
cf8f0e63 | 2955 | result_zero_bits = (must_be_nonzero0 & must_be_nonzero1) |
2956 | | ~(may_be_nonzero0 | may_be_nonzero1); | |
2957 | result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1) | |
2958 | | must_be_nonzero1.and_not (may_be_nonzero0); | |
2959 | max = double_int_to_tree (expr_type, ~result_zero_bits); | |
6c696748 | 2960 | min = double_int_to_tree (expr_type, result_one_bits); |
63bb6dcf | 2961 | /* If the range has all positive or all negative values the |
2962 | result is better than VARYING. */ | |
2963 | if (tree_int_cst_sgn (min) < 0 | |
2964 | || tree_int_cst_sgn (max) >= 0) | |
2965 | ; | |
6c696748 | 2966 | else |
6c696748 | 2967 | max = min = NULL_TREE; |
2968 | } | |
e31161b3 | 2969 | } |
eea12c72 | 2970 | else |
2971 | gcc_unreachable (); | |
ed19cf09 | 2972 | |
c25c642e | 2973 | /* If either MIN or MAX overflowed, then set the resulting range to |
c3783c3b | 2974 | VARYING. But we do accept an overflow infinity |
2975 | representation. */ | |
2976 | if (min == NULL_TREE | |
2977 | || !is_gimple_min_invariant (min) | |
2978 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2979 | || max == NULL_TREE | |
2980 | || !is_gimple_min_invariant (max) | |
2981 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2982 | { | |
2983 | set_value_range_to_varying (vr); | |
2984 | return; | |
2985 | } | |
2986 | ||
c68b42d2 | 2987 | /* We punt if: |
2988 | 1) [-INF, +INF] | |
2989 | 2) [-INF, +-INF(OVF)] | |
2990 | 3) [+-INF(OVF), +INF] | |
2991 | 4) [+-INF(OVF), +-INF(OVF)] | |
2992 | We learn nothing when we have INF and INF(OVF) on both sides. | |
2993 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
2994 | overflow. */ | |
b876a744 | 2995 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) |
2996 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
eea12c72 | 2997 | { |
c25c642e | 2998 | set_value_range_to_varying (vr); |
2999 | return; | |
ed19cf09 | 3000 | } |
3001 | ||
eea12c72 | 3002 | cmp = compare_values (min, max); |
3003 | if (cmp == -2 || cmp == 1) | |
3004 | { | |
3005 | /* If the new range has its limits swapped around (MIN > MAX), | |
3006 | then the operation caused one of them to wrap around, mark | |
3007 | the new range VARYING. */ | |
3008 | set_value_range_to_varying (vr); | |
3009 | } | |
3010 | else | |
0ed3ba34 | 3011 | set_value_range (vr, type, min, max, NULL); |
ed19cf09 | 3012 | } |
3013 | ||
c37659ce | 3014 | /* Extract range information from a binary expression OP0 CODE OP1 based on |
3015 | the ranges of each of its operands with resulting type EXPR_TYPE. | |
3016 | The resulting range is stored in *VR. */ | |
3017 | ||
3018 | static void | |
3019 | extract_range_from_binary_expr (value_range_t *vr, | |
3020 | enum tree_code code, | |
3021 | tree expr_type, tree op0, tree op1) | |
3022 | { | |
748eb1f9 | 3023 | value_range_t vr0 = VR_INITIALIZER; |
3024 | value_range_t vr1 = VR_INITIALIZER; | |
c37659ce | 3025 | |
3026 | /* Get value ranges for each operand. For constant operands, create | |
3027 | a new value range with the operand to simplify processing. */ | |
3028 | if (TREE_CODE (op0) == SSA_NAME) | |
3029 | vr0 = *(get_value_range (op0)); | |
3030 | else if (is_gimple_min_invariant (op0)) | |
3031 | set_value_range_to_value (&vr0, op0, NULL); | |
3032 | else | |
3033 | set_value_range_to_varying (&vr0); | |
3034 | ||
3035 | if (TREE_CODE (op1) == SSA_NAME) | |
3036 | vr1 = *(get_value_range (op1)); | |
3037 | else if (is_gimple_min_invariant (op1)) | |
3038 | set_value_range_to_value (&vr1, op1, NULL); | |
3039 | else | |
3040 | set_value_range_to_varying (&vr1); | |
3041 | ||
3042 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); | |
3043 | } | |
ed19cf09 | 3044 | |
113fbe09 | 3045 | /* Extract range information from a unary operation CODE based on |
3046 | the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. | |
3047 | The The resulting range is stored in *VR. */ | |
88dbf20f | 3048 | |
3049 | static void | |
113fbe09 | 3050 | extract_range_from_unary_expr_1 (value_range_t *vr, |
3051 | enum tree_code code, tree type, | |
3052 | value_range_t *vr0_, tree op0_type) | |
88dbf20f | 3053 | { |
748eb1f9 | 3054 | value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
eea12c72 | 3055 | |
713b2724 | 3056 | /* VRP only operates on integral and pointer types. */ |
3057 | if (!(INTEGRAL_TYPE_P (op0_type) | |
3058 | || POINTER_TYPE_P (op0_type)) | |
3059 | || !(INTEGRAL_TYPE_P (type) | |
3060 | || POINTER_TYPE_P (type))) | |
eea12c72 | 3061 | { |
3062 | set_value_range_to_varying (vr); | |
3063 | return; | |
3064 | } | |
88dbf20f | 3065 | |
713b2724 | 3066 | /* If VR0 is UNDEFINED, so is the result. */ |
3067 | if (vr0.type == VR_UNDEFINED) | |
88dbf20f | 3068 | { |
713b2724 | 3069 | set_value_range_to_undefined (vr); |
88dbf20f | 3070 | return; |
3071 | } | |
3072 | ||
748eb1f9 | 3073 | /* Handle operations that we express in terms of others. */ |
3074 | if (code == PAREN_EXPR) | |
3075 | { | |
3076 | /* PAREN_EXPR is a simple copy. */ | |
3077 | copy_value_range (vr, &vr0); | |
3078 | return; | |
3079 | } | |
3080 | else if (code == NEGATE_EXPR) | |
3081 | { | |
3082 | /* -X is simply 0 - X, so re-use existing code that also handles | |
3083 | anti-ranges fine. */ | |
3084 | value_range_t zero = VR_INITIALIZER; | |
3085 | set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); | |
3086 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); | |
3087 | return; | |
3088 | } | |
3089 | else if (code == BIT_NOT_EXPR) | |
3090 | { | |
3091 | /* ~X is simply -1 - X, so re-use existing code that also handles | |
3092 | anti-ranges fine. */ | |
3093 | value_range_t minusone = VR_INITIALIZER; | |
3094 | set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); | |
3095 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, | |
3096 | type, &minusone, &vr0); | |
3097 | return; | |
3098 | } | |
3099 | ||
3100 | /* Now canonicalize anti-ranges to ranges when they are not symbolic | |
3101 | and express op ~[] as (op []') U (op []''). */ | |
3102 | if (vr0.type == VR_ANTI_RANGE | |
3103 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | |
3104 | { | |
3105 | extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type); | |
3106 | if (vrtem1.type != VR_UNDEFINED) | |
3107 | { | |
3108 | value_range_t vrres = VR_INITIALIZER; | |
3109 | extract_range_from_unary_expr_1 (&vrres, code, type, | |
3110 | &vrtem1, op0_type); | |
3111 | vrp_meet (vr, &vrres); | |
3112 | } | |
3113 | return; | |
3114 | } | |
3115 | ||
713b2724 | 3116 | if (CONVERT_EXPR_CODE_P (code)) |
88dbf20f | 3117 | { |
113fbe09 | 3118 | tree inner_type = op0_type; |
93116081 | 3119 | tree outer_type = type; |
80096ea0 | 3120 | |
713b2724 | 3121 | /* If the expression evaluates to a pointer, we are only interested in |
3122 | determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ | |
3123 | if (POINTER_TYPE_P (type)) | |
3124 | { | |
81e042f9 | 3125 | if (range_is_nonnull (&vr0)) |
3126 | set_value_range_to_nonnull (vr, type); | |
3127 | else if (range_is_null (&vr0)) | |
3128 | set_value_range_to_null (vr, type); | |
713b2724 | 3129 | else |
3130 | set_value_range_to_varying (vr); | |
3131 | return; | |
3132 | } | |
3133 | ||
0d27ac1e | 3134 | /* If VR0 is varying and we increase the type precision, assume |
3135 | a full range for the following transformation. */ | |
3136 | if (vr0.type == VR_VARYING | |
713b2724 | 3137 | && INTEGRAL_TYPE_P (inner_type) |
0d27ac1e | 3138 | && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) |
b8f8bd67 | 3139 | { |
0d27ac1e | 3140 | vr0.type = VR_RANGE; |
3141 | vr0.min = TYPE_MIN_VALUE (inner_type); | |
3142 | vr0.max = TYPE_MAX_VALUE (inner_type); | |
b8f8bd67 | 3143 | } |
3144 | ||
0d27ac1e | 3145 | /* If VR0 is a constant range or anti-range and the conversion is |
3146 | not truncating we can convert the min and max values and | |
3147 | canonicalize the resulting range. Otherwise we can do the | |
3148 | conversion if the size of the range is less than what the | |
3149 | precision of the target type can represent and the range is | |
3150 | not an anti-range. */ | |
3151 | if ((vr0.type == VR_RANGE | |
3152 | || vr0.type == VR_ANTI_RANGE) | |
3153 | && TREE_CODE (vr0.min) == INTEGER_CST | |
3154 | && TREE_CODE (vr0.max) == INTEGER_CST | |
33731620 | 3155 | && (!is_overflow_infinity (vr0.min) |
3156 | || (vr0.type == VR_RANGE | |
3157 | && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) | |
3158 | && needs_overflow_infinity (outer_type) | |
3159 | && supports_overflow_infinity (outer_type))) | |
3160 | && (!is_overflow_infinity (vr0.max) | |
3161 | || (vr0.type == VR_RANGE | |
3162 | && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) | |
3163 | && needs_overflow_infinity (outer_type) | |
3164 | && supports_overflow_infinity (outer_type))) | |
0d27ac1e | 3165 | && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) |
3166 | || (vr0.type == VR_RANGE | |
3167 | && integer_zerop (int_const_binop (RSHIFT_EXPR, | |
317e2a67 | 3168 | int_const_binop (MINUS_EXPR, vr0.max, vr0.min), |
3169 | size_int (TYPE_PRECISION (outer_type))))))) | |
80096ea0 | 3170 | { |
0d27ac1e | 3171 | tree new_min, new_max; |
33731620 | 3172 | if (is_overflow_infinity (vr0.min)) |
3173 | new_min = negative_overflow_infinity (outer_type); | |
df8f94d5 | 3174 | else |
3175 | new_min = force_fit_type_double (outer_type, | |
3176 | tree_to_double_int (vr0.min), | |
3177 | 0, false); | |
33731620 | 3178 | if (is_overflow_infinity (vr0.max)) |
3179 | new_max = positive_overflow_infinity (outer_type); | |
df8f94d5 | 3180 | else |
3181 | new_max = force_fit_type_double (outer_type, | |
3182 | tree_to_double_int (vr0.max), | |
3183 | 0, false); | |
0d27ac1e | 3184 | set_and_canonicalize_value_range (vr, vr0.type, |
3185 | new_min, new_max, NULL); | |
80096ea0 | 3186 | return; |
3187 | } | |
0d27ac1e | 3188 | |
3189 | set_value_range_to_varying (vr); | |
3190 | return; | |
88dbf20f | 3191 | } |
713b2724 | 3192 | else if (code == ABS_EXPR) |
eea12c72 | 3193 | { |
713b2724 | 3194 | tree min, max; |
3195 | int cmp; | |
3196 | ||
3197 | /* Pass through vr0 in the easy cases. */ | |
3198 | if (TYPE_UNSIGNED (type) | |
3199 | || value_range_nonnegative_p (&vr0)) | |
3200 | { | |
3201 | copy_value_range (vr, &vr0); | |
3202 | return; | |
3203 | } | |
3204 | ||
3205 | /* For the remaining varying or symbolic ranges we can't do anything | |
3206 | useful. */ | |
3207 | if (vr0.type == VR_VARYING | |
3208 | || symbolic_range_p (&vr0)) | |
3209 | { | |
3210 | set_value_range_to_varying (vr); | |
3211 | return; | |
3212 | } | |
3213 | ||
8b5d7cdf | 3214 | /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a |
3215 | useful range. */ | |
93116081 | 3216 | if (!TYPE_OVERFLOW_UNDEFINED (type) |
8b5d7cdf | 3217 | && ((vr0.type == VR_RANGE |
b876a744 | 3218 | && vrp_val_is_min (vr0.min)) |
8b5d7cdf | 3219 | || (vr0.type == VR_ANTI_RANGE |
713b2724 | 3220 | && !vrp_val_is_min (vr0.min)))) |
8b5d7cdf | 3221 | { |
3222 | set_value_range_to_varying (vr); | |
3223 | return; | |
3224 | } | |
48e1416a | 3225 | |
eea12c72 | 3226 | /* ABS_EXPR may flip the range around, if the original range |
3227 | included negative values. */ | |
c3783c3b | 3228 | if (is_overflow_infinity (vr0.min)) |
93116081 | 3229 | min = positive_overflow_infinity (type); |
b876a744 | 3230 | else if (!vrp_val_is_min (vr0.min)) |
93116081 | 3231 | min = fold_unary_to_constant (code, type, vr0.min); |
3232 | else if (!needs_overflow_infinity (type)) | |
3233 | min = TYPE_MAX_VALUE (type); | |
3234 | else if (supports_overflow_infinity (type)) | |
3235 | min = positive_overflow_infinity (type); | |
c3783c3b | 3236 | else |
3237 | { | |
3238 | set_value_range_to_varying (vr); | |
3239 | return; | |
3240 | } | |
eea12c72 | 3241 | |
c3783c3b | 3242 | if (is_overflow_infinity (vr0.max)) |
93116081 | 3243 | max = positive_overflow_infinity (type); |
b876a744 | 3244 | else if (!vrp_val_is_min (vr0.max)) |
93116081 | 3245 | max = fold_unary_to_constant (code, type, vr0.max); |
3246 | else if (!needs_overflow_infinity (type)) | |
3247 | max = TYPE_MAX_VALUE (type); | |
eaba51b9 | 3248 | else if (supports_overflow_infinity (type) |
3249 | /* We shouldn't generate [+INF, +INF] as set_value_range | |
3250 | doesn't like this and ICEs. */ | |
3251 | && !is_positive_overflow_infinity (min)) | |
93116081 | 3252 | max = positive_overflow_infinity (type); |
c3783c3b | 3253 | else |
3254 | { | |
3255 | set_value_range_to_varying (vr); | |
3256 | return; | |
3257 | } | |
eea12c72 | 3258 | |
8b5d7cdf | 3259 | cmp = compare_values (min, max); |
3260 | ||
3261 | /* If a VR_ANTI_RANGEs contains zero, then we have | |
3262 | ~[-INF, min(MIN, MAX)]. */ | |
3263 | if (vr0.type == VR_ANTI_RANGE) | |
48e1416a | 3264 | { |
7d48cd66 | 3265 | if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
8b5d7cdf | 3266 | { |
8b5d7cdf | 3267 | /* Take the lower of the two values. */ |
3268 | if (cmp != 1) | |
3269 | max = min; | |
3270 | ||
3271 | /* Create ~[-INF, min (abs(MIN), abs(MAX))] | |
3272 | or ~[-INF + 1, min (abs(MIN), abs(MAX))] when | |
3273 | flag_wrapv is set and the original anti-range doesn't include | |
3274 | TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ | |
93116081 | 3275 | if (TYPE_OVERFLOW_WRAPS (type)) |
c3783c3b | 3276 | { |
93116081 | 3277 | tree type_min_value = TYPE_MIN_VALUE (type); |
c3783c3b | 3278 | |
3279 | min = (vr0.min != type_min_value | |
3280 | ? int_const_binop (PLUS_EXPR, type_min_value, | |
317e2a67 | 3281 | integer_one_node) |
c3783c3b | 3282 | : type_min_value); |
3283 | } | |
3284 | else | |
3285 | { | |
3286 | if (overflow_infinity_range_p (&vr0)) | |
93116081 | 3287 | min = negative_overflow_infinity (type); |
c3783c3b | 3288 | else |
93116081 | 3289 | min = TYPE_MIN_VALUE (type); |
c3783c3b | 3290 | } |
8b5d7cdf | 3291 | } |
3292 | else | |
3293 | { | |
3294 | /* All else has failed, so create the range [0, INF], even for | |
3295 | flag_wrapv since TYPE_MIN_VALUE is in the original | |
3296 | anti-range. */ | |
3297 | vr0.type = VR_RANGE; | |
93116081 | 3298 | min = build_int_cst (type, 0); |
3299 | if (needs_overflow_infinity (type)) | |
c3783c3b | 3300 | { |
93116081 | 3301 | if (supports_overflow_infinity (type)) |
3302 | max = positive_overflow_infinity (type); | |
c3783c3b | 3303 | else |
3304 | { | |
3305 | set_value_range_to_varying (vr); | |
3306 | return; | |
3307 | } | |
3308 | } | |
3309 | else | |
93116081 | 3310 | max = TYPE_MAX_VALUE (type); |
8b5d7cdf | 3311 | } |
3312 | } | |
3313 | ||
3314 | /* If the range contains zero then we know that the minimum value in the | |
3315 | range will be zero. */ | |
7d48cd66 | 3316 | else if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
8b5d7cdf | 3317 | { |
3318 | if (cmp == 1) | |
3319 | max = min; | |
93116081 | 3320 | min = build_int_cst (type, 0); |
8b5d7cdf | 3321 | } |
3322 | else | |
eea12c72 | 3323 | { |
8b5d7cdf | 3324 | /* If the range was reversed, swap MIN and MAX. */ |
3325 | if (cmp == 1) | |
3326 | { | |
3327 | tree t = min; | |
3328 | min = max; | |
3329 | max = t; | |
3330 | } | |
eea12c72 | 3331 | } |
713b2724 | 3332 | |
3333 | cmp = compare_values (min, max); | |
3334 | if (cmp == -2 || cmp == 1) | |
3335 | { | |
3336 | /* If the new range has its limits swapped around (MIN > MAX), | |
3337 | then the operation caused one of them to wrap around, mark | |
3338 | the new range VARYING. */ | |
3339 | set_value_range_to_varying (vr); | |
3340 | } | |
3341 | else | |
3342 | set_value_range (vr, vr0.type, min, max, NULL); | |
3343 | return; | |
eea12c72 | 3344 | } |
88dbf20f | 3345 | |
713b2724 | 3346 | /* For unhandled operations fall back to varying. */ |
3347 | set_value_range_to_varying (vr); | |
3348 | return; | |
eea12c72 | 3349 | } |
3350 | ||
3351 | ||
113fbe09 | 3352 | /* Extract range information from a unary expression CODE OP0 based on |
3353 | the range of its operand with resulting type TYPE. | |
3354 | The resulting range is stored in *VR. */ | |
3355 | ||
3356 | static void | |
3357 | extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, | |
3358 | tree type, tree op0) | |
3359 | { | |
748eb1f9 | 3360 | value_range_t vr0 = VR_INITIALIZER; |
113fbe09 | 3361 | |
3362 | /* Get value ranges for the operand. For constant operands, create | |
3363 | a new value range with the operand to simplify processing. */ | |
3364 | if (TREE_CODE (op0) == SSA_NAME) | |
3365 | vr0 = *(get_value_range (op0)); | |
3366 | else if (is_gimple_min_invariant (op0)) | |
3367 | set_value_range_to_value (&vr0, op0, NULL); | |
3368 | else | |
3369 | set_value_range_to_varying (&vr0); | |
3370 | ||
3371 | extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0)); | |
3372 | } | |
3373 | ||
3374 | ||
8a2caf10 | 3375 | /* Extract range information from a conditional expression STMT based on |
ec0fa513 | 3376 | the ranges of each of its operands and the expression code. */ |
3377 | ||
3378 | static void | |
8a2caf10 | 3379 | extract_range_from_cond_expr (value_range_t *vr, gimple stmt) |
ec0fa513 | 3380 | { |
3381 | tree op0, op1; | |
748eb1f9 | 3382 | value_range_t vr0 = VR_INITIALIZER; |
3383 | value_range_t vr1 = VR_INITIALIZER; | |
ec0fa513 | 3384 | |
3385 | /* Get value ranges for each operand. For constant operands, create | |
3386 | a new value range with the operand to simplify processing. */ | |
8a2caf10 | 3387 | op0 = gimple_assign_rhs2 (stmt); |
ec0fa513 | 3388 | if (TREE_CODE (op0) == SSA_NAME) |
3389 | vr0 = *(get_value_range (op0)); | |
3390 | else if (is_gimple_min_invariant (op0)) | |
4baf1a77 | 3391 | set_value_range_to_value (&vr0, op0, NULL); |
ec0fa513 | 3392 | else |
3393 | set_value_range_to_varying (&vr0); | |
3394 | ||
8a2caf10 | 3395 | op1 = gimple_assign_rhs3 (stmt); |
ec0fa513 | 3396 | if (TREE_CODE (op1) == SSA_NAME) |
3397 | vr1 = *(get_value_range (op1)); | |
3398 | else if (is_gimple_min_invariant (op1)) | |
4baf1a77 | 3399 | set_value_range_to_value (&vr1, op1, NULL); |
ec0fa513 | 3400 | else |
3401 | set_value_range_to_varying (&vr1); | |
3402 | ||
3403 | /* The resulting value range is the union of the operand ranges */ | |
ec0fa513 | 3404 | copy_value_range (vr, &vr0); |
a2623f6b | 3405 | vrp_meet (vr, &vr1); |
ec0fa513 | 3406 | } |
3407 | ||
3408 | ||
eea12c72 | 3409 | /* Extract range information from a comparison expression EXPR based |
3410 | on the range of its operand and the expression code. */ | |
3411 | ||
3412 | static void | |
93116081 | 3413 | extract_range_from_comparison (value_range_t *vr, enum tree_code code, |
3414 | tree type, tree op0, tree op1) | |
eea12c72 | 3415 | { |
c3783c3b | 3416 | bool sop = false; |
75a70cf9 | 3417 | tree val; |
48e1416a | 3418 | |
e0ad89bd | 3419 | val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, |
3420 | NULL); | |
c3783c3b | 3421 | |
3422 | /* A disadvantage of using a special infinity as an overflow | |
3423 | representation is that we lose the ability to record overflow | |
3424 | when we don't have an infinity. So we have to ignore a result | |
3425 | which relies on overflow. */ | |
3426 | ||
3427 | if (val && !is_overflow_infinity (val) && !sop) | |
eea12c72 | 3428 | { |
3429 | /* Since this expression was found on the RHS of an assignment, | |
3430 | its type may be different from _Bool. Convert VAL to EXPR's | |
3431 | type. */ | |
93116081 | 3432 | val = fold_convert (type, val); |
4baf1a77 | 3433 | if (is_gimple_min_invariant (val)) |
3434 | set_value_range_to_value (vr, val, vr->equiv); | |
3435 | else | |
3436 | set_value_range (vr, VR_RANGE, val, val, vr->equiv); | |
eea12c72 | 3437 | } |
3438 | else | |
b9b64cb7 | 3439 | /* The result of a comparison is always true or false. */ |
93116081 | 3440 | set_value_range_to_truthvalue (vr, type); |
88dbf20f | 3441 | } |
3442 | ||
75a70cf9 | 3443 | /* Try to derive a nonnegative or nonzero range out of STMT relying |
3444 | primarily on generic routines in fold in conjunction with range data. | |
3445 | Store the result in *VR */ | |
88dbf20f | 3446 | |
75a70cf9 | 3447 | static void |
3448 | extract_range_basic (value_range_t *vr, gimple stmt) | |
3449 | { | |
3450 | bool sop = false; | |
3451 | tree type = gimple_expr_type (stmt); | |
3452 | ||
3453 | if (INTEGRAL_TYPE_P (type) | |
3454 | && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) | |
3455 | set_value_range_to_nonnegative (vr, type, | |
3456 | sop || stmt_overflow_infinity (stmt)); | |
3457 | else if (vrp_stmt_computes_nonzero (stmt, &sop) | |
3458 | && !sop) | |
3459 | set_value_range_to_nonnull (vr, type); | |
3460 | else | |
3461 | set_value_range_to_varying (vr); | |
3462 | } | |
3463 | ||
3464 | ||
3465 | /* Try to compute a useful range out of assignment STMT and store it | |
eea12c72 | 3466 | in *VR. */ |
88dbf20f | 3467 | |
3468 | static void | |
75a70cf9 | 3469 | extract_range_from_assignment (value_range_t *vr, gimple stmt) |
88dbf20f | 3470 | { |
75a70cf9 | 3471 | enum tree_code code = gimple_assign_rhs_code (stmt); |
88dbf20f | 3472 | |
3473 | if (code == ASSERT_EXPR) | |
75a70cf9 | 3474 | extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); |
88dbf20f | 3475 | else if (code == SSA_NAME) |
75a70cf9 | 3476 | extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); |
cfd7906e | 3477 | else if (TREE_CODE_CLASS (code) == tcc_binary) |
75a70cf9 | 3478 | extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), |
3479 | gimple_expr_type (stmt), | |
3480 | gimple_assign_rhs1 (stmt), | |
3481 | gimple_assign_rhs2 (stmt)); | |
88dbf20f | 3482 | else if (TREE_CODE_CLASS (code) == tcc_unary) |
75a70cf9 | 3483 | extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), |
3484 | gimple_expr_type (stmt), | |
3485 | gimple_assign_rhs1 (stmt)); | |
ec0fa513 | 3486 | else if (code == COND_EXPR) |
8a2caf10 | 3487 | extract_range_from_cond_expr (vr, stmt); |
eea12c72 | 3488 | else if (TREE_CODE_CLASS (code) == tcc_comparison) |
75a70cf9 | 3489 | extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), |
3490 | gimple_expr_type (stmt), | |
3491 | gimple_assign_rhs1 (stmt), | |
3492 | gimple_assign_rhs2 (stmt)); | |
3493 | else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS | |
3494 | && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) | |
3495 | set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); | |
88dbf20f | 3496 | else |
e7d43f99 | 3497 | set_value_range_to_varying (vr); |
8dbf774a | 3498 | |
8dbf774a | 3499 | if (vr->type == VR_VARYING) |
75a70cf9 | 3500 | extract_range_basic (vr, stmt); |
88dbf20f | 3501 | } |
3502 | ||
b3786ab3 | 3503 | /* Given a range VR, a LOOP and a variable VAR, determine whether it |
88dbf20f | 3504 | would be profitable to adjust VR using scalar evolution information |
3505 | for VAR. If so, update VR with the new limits. */ | |
3506 | ||
3507 | static void | |
75a70cf9 | 3508 | adjust_range_with_scev (value_range_t *vr, struct loop *loop, |
3509 | gimple stmt, tree var) | |
88dbf20f | 3510 | { |
9300c776 | 3511 | tree init, step, chrec, tmin, tmax, min, max, type, tem; |
57e3f39a | 3512 | enum ev_direction dir; |
88dbf20f | 3513 | |
3514 | /* TODO. Don't adjust anti-ranges. An anti-range may provide | |
3515 | better opportunities than a regular range, but I'm not sure. */ | |
3516 | if (vr->type == VR_ANTI_RANGE) | |
3517 | return; | |
3518 | ||
903dae48 | 3519 | chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); |
437fd8c0 | 3520 | |
3521 | /* Like in PR19590, scev can return a constant function. */ | |
3522 | if (is_gimple_min_invariant (chrec)) | |
3523 | { | |
fb807d22 | 3524 | set_value_range_to_value (vr, chrec, vr->equiv); |
437fd8c0 | 3525 | return; |
3526 | } | |
3527 | ||
88dbf20f | 3528 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) |
3529 | return; | |
3530 | ||
903dae48 | 3531 | init = initial_condition_in_loop_num (chrec, loop->num); |
9300c776 | 3532 | tem = op_with_constant_singleton_value_range (init); |
3533 | if (tem) | |
3534 | init = tem; | |
903dae48 | 3535 | step = evolution_part_in_loop_num (chrec, loop->num); |
9300c776 | 3536 | tem = op_with_constant_singleton_value_range (step); |
3537 | if (tem) | |
3538 | step = tem; | |
88dbf20f | 3539 | |
3540 | /* If STEP is symbolic, we can't know whether INIT will be the | |
7587869b | 3541 | minimum or maximum value in the range. Also, unless INIT is |
3542 | a simple expression, compare_values and possibly other functions | |
3543 | in tree-vrp won't be able to handle it. */ | |
903dae48 | 3544 | if (step == NULL_TREE |
7587869b | 3545 | || !is_gimple_min_invariant (step) |
3546 | || !valid_value_p (init)) | |
88dbf20f | 3547 | return; |
3548 | ||
57e3f39a | 3549 | dir = scev_direction (chrec); |
3550 | if (/* Do not adjust ranges if we do not know whether the iv increases | |
3551 | or decreases, ... */ | |
3552 | dir == EV_DIR_UNKNOWN | |
3553 | /* ... or if it may wrap. */ | |
17519ba0 | 3554 | || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), |
57e3f39a | 3555 | true)) |
eea12c72 | 3556 | return; |
3557 | ||
c3783c3b | 3558 | /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of |
3559 | negative_overflow_infinity and positive_overflow_infinity, | |
3560 | because we have concluded that the loop probably does not | |
3561 | wrap. */ | |
3562 | ||
318a3281 | 3563 | type = TREE_TYPE (var); |
3564 | if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) | |
3565 | tmin = lower_bound_in_type (type, type); | |
3566 | else | |
3567 | tmin = TYPE_MIN_VALUE (type); | |
3568 | if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) | |
3569 | tmax = upper_bound_in_type (type, type); | |
3570 | else | |
3571 | tmax = TYPE_MAX_VALUE (type); | |
3572 | ||
9b53b827 | 3573 | /* Try to use estimated number of iterations for the loop to constrain the |
8fe79ba5 | 3574 | final value in the evolution. */ |
9b53b827 | 3575 | if (TREE_CODE (step) == INTEGER_CST |
9b53b827 | 3576 | && is_gimple_val (init) |
3577 | && (TREE_CODE (init) != SSA_NAME | |
3578 | || get_value_range (init)->type == VR_RANGE)) | |
3579 | { | |
8fe79ba5 | 3580 | double_int nit; |
3581 | ||
dbf94397 | 3582 | /* We are only entering here for loop header PHI nodes, so using |
3583 | the number of latch executions is the correct thing to use. */ | |
3584 | if (max_loop_iterations (loop, &nit)) | |
8fe79ba5 | 3585 | { |
748eb1f9 | 3586 | value_range_t maxvr = VR_INITIALIZER; |
8fe79ba5 | 3587 | double_int dtmp; |
3588 | bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step)); | |
cf8f0e63 | 3589 | bool overflow = false; |
8fe79ba5 | 3590 | |
cf8f0e63 | 3591 | dtmp = tree_to_double_int (step) |
3592 | .mul_with_sign (nit, unsigned_p, &overflow); | |
8fe79ba5 | 3593 | /* If the multiplication overflowed we can't do a meaningful |
3594 | adjustment. Likewise if the result doesn't fit in the type | |
3595 | of the induction variable. For a signed type we have to | |
3596 | check whether the result has the expected signedness which | |
3597 | is that of the step as number of iterations is unsigned. */ | |
3598 | if (!overflow | |
3599 | && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp) | |
3600 | && (unsigned_p | |
3601 | || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0))) | |
9b53b827 | 3602 | { |
8fe79ba5 | 3603 | tem = double_int_to_tree (TREE_TYPE (init), dtmp); |
3604 | extract_range_from_binary_expr (&maxvr, PLUS_EXPR, | |
3605 | TREE_TYPE (init), init, tem); | |
3606 | /* Likewise if the addition did. */ | |
3607 | if (maxvr.type == VR_RANGE) | |
3608 | { | |
3609 | tmin = maxvr.min; | |
3610 | tmax = maxvr.max; | |
3611 | } | |
9b53b827 | 3612 | } |
3613 | } | |
3614 | } | |
3615 | ||
318a3281 | 3616 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) |
88dbf20f | 3617 | { |
318a3281 | 3618 | min = tmin; |
3619 | max = tmax; | |
3620 | ||
88dbf20f | 3621 | /* For VARYING or UNDEFINED ranges, just about anything we get |
3622 | from scalar evolutions should be better. */ | |
a16bbe8b | 3623 | |
57e3f39a | 3624 | if (dir == EV_DIR_DECREASES) |
a16bbe8b | 3625 | max = init; |
88dbf20f | 3626 | else |
a16bbe8b | 3627 | min = init; |
3628 | ||
3629 | /* If we would create an invalid range, then just assume we | |
3630 | know absolutely nothing. This may be over-conservative, | |
318a3281 | 3631 | but it's clearly safe, and should happen only in unreachable |
3632 | parts of code, or for invalid programs. */ | |
a16bbe8b | 3633 | if (compare_values (min, max) == 1) |
3634 | return; | |
3635 | ||
3636 | set_value_range (vr, VR_RANGE, min, max, vr->equiv); | |
88dbf20f | 3637 | } |
3638 | else if (vr->type == VR_RANGE) | |
3639 | { | |
318a3281 | 3640 | min = vr->min; |
3641 | max = vr->max; | |
f486df52 | 3642 | |
57e3f39a | 3643 | if (dir == EV_DIR_DECREASES) |
88dbf20f | 3644 | { |
f486df52 | 3645 | /* INIT is the maximum value. If INIT is lower than VR->MAX |
3646 | but no smaller than VR->MIN, set VR->MAX to INIT. */ | |
3647 | if (compare_values (init, max) == -1) | |
9b53b827 | 3648 | max = init; |
4d1ddfdf | 3649 | |
3650 | /* According to the loop information, the variable does not | |
3651 | overflow. If we think it does, probably because of an | |
3652 | overflow due to arithmetic on a different INF value, | |
3653 | reset now. */ | |
9b53b827 | 3654 | if (is_negative_overflow_infinity (min) |
3655 | || compare_values (min, tmin) == -1) | |
4d1ddfdf | 3656 | min = tmin; |
9b53b827 | 3657 | |
88dbf20f | 3658 | } |
3659 | else | |
3660 | { | |
3661 | /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ | |
f486df52 | 3662 | if (compare_values (init, min) == 1) |
9b53b827 | 3663 | min = init; |
4d1ddfdf | 3664 | |
9b53b827 | 3665 | if (is_positive_overflow_infinity (max) |
3666 | || compare_values (tmax, max) == -1) | |
4d1ddfdf | 3667 | max = tmax; |
88dbf20f | 3668 | } |
f486df52 | 3669 | |
9b53b827 | 3670 | /* If we just created an invalid range with the minimum |
3671 | greater than the maximum, we fail conservatively. | |
3672 | This should happen only in unreachable | |
3673 | parts of code, or for invalid programs. */ | |
3674 | if (compare_values (min, max) == 1) | |
3675 | return; | |
3676 | ||
eea12c72 | 3677 | set_value_range (vr, VR_RANGE, min, max, vr->equiv); |
88dbf20f | 3678 | } |
3679 | } | |
3680 | ||
4d1ddfdf | 3681 | /* Return true if VAR may overflow at STMT. This checks any available |
3682 | loop information to see if we can determine that VAR does not | |
3683 | overflow. */ | |
3684 | ||
3685 | static bool | |
75a70cf9 | 3686 | vrp_var_may_overflow (tree var, gimple stmt) |
4d1ddfdf | 3687 | { |
3688 | struct loop *l; | |
3689 | tree chrec, init, step; | |
3690 | ||
3691 | if (current_loops == NULL) | |
3692 | return true; | |
3693 | ||
3694 | l = loop_containing_stmt (stmt); | |
ff486875 | 3695 | if (l == NULL |
3696 | || !loop_outer (l)) | |
4d1ddfdf | 3697 | return true; |
3698 | ||
3699 | chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var)); | |
3700 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) | |
3701 | return true; | |
3702 | ||
3703 | init = initial_condition_in_loop_num (chrec, l->num); | |
3704 | step = evolution_part_in_loop_num (chrec, l->num); | |
3705 | ||
3706 | if (step == NULL_TREE | |
3707 | || !is_gimple_min_invariant (step) | |
3708 | || !valid_value_p (init)) | |
3709 | return true; | |
3710 | ||
3711 | /* If we get here, we know something useful about VAR based on the | |
3712 | loop information. If it wraps, it may overflow. */ | |
3713 | ||
3714 | if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), | |
3715 | true)) | |
3716 | return true; | |
3717 | ||
3718 | if (dump_file && (dump_flags & TDF_DETAILS) != 0) | |
3719 | { | |
3720 | print_generic_expr (dump_file, var, 0); | |
3721 | fprintf (dump_file, ": loop information indicates does not overflow\n"); | |
3722 | } | |
3723 | ||
3724 | return false; | |
3725 | } | |
3726 | ||
88dbf20f | 3727 | |
3728 | /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: | |
48e1416a | 3729 | |
eea12c72 | 3730 | - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for |
3731 | all the values in the ranges. | |
88dbf20f | 3732 | |
3733 | - Return BOOLEAN_FALSE_NODE if the comparison always returns false. | |
3734 | ||
eea12c72 | 3735 | - Return NULL_TREE if it is not always possible to determine the |
c3783c3b | 3736 | value of the comparison. |
3737 | ||
3738 | Also set *STRICT_OVERFLOW_P to indicate whether a range with an | |
3739 | overflow infinity was used in the test. */ | |
eea12c72 | 3740 | |
88dbf20f | 3741 | |
3742 | static tree | |
c3783c3b | 3743 | compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, |
3744 | bool *strict_overflow_p) | |
88dbf20f | 3745 | { |
3746 | /* VARYING or UNDEFINED ranges cannot be compared. */ | |
3747 | if (vr0->type == VR_VARYING | |
3748 | || vr0->type == VR_UNDEFINED | |
3749 | || vr1->type == VR_VARYING | |
3750 | || vr1->type == VR_UNDEFINED) | |
3751 | return NULL_TREE; | |
3752 | ||
3753 | /* Anti-ranges need to be handled separately. */ | |
3754 | if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) | |
3755 | { | |
3756 | /* If both are anti-ranges, then we cannot compute any | |
3757 | comparison. */ | |
3758 | if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) | |
3759 | return NULL_TREE; | |
3760 | ||
3761 | /* These comparisons are never statically computable. */ | |
3762 | if (comp == GT_EXPR | |
3763 | || comp == GE_EXPR | |
3764 | || comp == LT_EXPR | |
3765 | || comp == LE_EXPR) | |
3766 | return NULL_TREE; | |
3767 | ||
3768 | /* Equality can be computed only between a range and an | |
3769 | anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ | |
3770 | if (vr0->type == VR_RANGE) | |
3771 | { | |
3772 | /* To simplify processing, make VR0 the anti-range. */ | |
eea12c72 | 3773 | value_range_t *tmp = vr0; |
88dbf20f | 3774 | vr0 = vr1; |
3775 | vr1 = tmp; | |
3776 | } | |
3777 | ||
3778 | gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); | |
3779 | ||
c3783c3b | 3780 | if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 |
3781 | && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) | |
88dbf20f | 3782 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
3783 | ||
3784 | return NULL_TREE; | |
3785 | } | |
3786 | ||
a2a1fde2 | 3787 | if (!usable_range_p (vr0, strict_overflow_p) |
3788 | || !usable_range_p (vr1, strict_overflow_p)) | |
3789 | return NULL_TREE; | |
3790 | ||
88dbf20f | 3791 | /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the |
3792 | operands around and change the comparison code. */ | |
3793 | if (comp == GT_EXPR || comp == GE_EXPR) | |
3794 | { | |
eea12c72 | 3795 | value_range_t *tmp; |
88dbf20f | 3796 | comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; |
3797 | tmp = vr0; | |
3798 | vr0 = vr1; | |
3799 | vr1 = tmp; | |
3800 | } | |
3801 | ||
3802 | if (comp == EQ_EXPR) | |
3803 | { | |
3804 | /* Equality may only be computed if both ranges represent | |
3805 | exactly one value. */ | |
c3783c3b | 3806 | if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 |
3807 | && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) | |
88dbf20f | 3808 | { |
c3783c3b | 3809 | int cmp_min = compare_values_warnv (vr0->min, vr1->min, |
3810 | strict_overflow_p); | |
3811 | int cmp_max = compare_values_warnv (vr0->max, vr1->max, | |
3812 | strict_overflow_p); | |
88dbf20f | 3813 | if (cmp_min == 0 && cmp_max == 0) |
3814 | return boolean_true_node; | |
3815 | else if (cmp_min != -2 && cmp_max != -2) | |
3816 | return boolean_false_node; | |
3817 | } | |
b200b146 | 3818 | /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ |
c3783c3b | 3819 | else if (compare_values_warnv (vr0->min, vr1->max, |
3820 | strict_overflow_p) == 1 | |
3821 | || compare_values_warnv (vr1->min, vr0->max, | |
3822 | strict_overflow_p) == 1) | |
b200b146 | 3823 | return boolean_false_node; |
88dbf20f | 3824 | |
3825 | return NULL_TREE; | |
3826 | } | |
3827 | else if (comp == NE_EXPR) | |
3828 | { | |
3829 | int cmp1, cmp2; | |
3830 | ||
3831 | /* If VR0 is completely to the left or completely to the right | |
3832 | of VR1, they are always different. Notice that we need to | |
3833 | make sure that both comparisons yield similar results to | |
3834 | avoid comparing values that cannot be compared at | |
3835 | compile-time. */ | |
c3783c3b | 3836 | cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
3837 | cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); | |
88dbf20f | 3838 | if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) |
3839 | return boolean_true_node; | |
3840 | ||
3841 | /* If VR0 and VR1 represent a single value and are identical, | |
3842 | return false. */ | |
c3783c3b | 3843 | else if (compare_values_warnv (vr0->min, vr0->max, |
3844 | strict_overflow_p) == 0 | |
3845 | && compare_values_warnv (vr1->min, vr1->max, | |
3846 | strict_overflow_p) == 0 | |
3847 | && compare_values_warnv (vr0->min, vr1->min, | |
3848 | strict_overflow_p) == 0 | |
3849 | && compare_values_warnv (vr0->max, vr1->max, | |
3850 | strict_overflow_p) == 0) | |
88dbf20f | 3851 | return boolean_false_node; |
3852 | ||
3853 | /* Otherwise, they may or may not be different. */ | |
3854 | else | |
3855 | return NULL_TREE; | |
3856 | } | |
3857 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
3858 | { | |
3859 | int tst; | |
3860 | ||
3861 | /* If VR0 is to the left of VR1, return true. */ | |
c3783c3b | 3862 | tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
88dbf20f | 3863 | if ((comp == LT_EXPR && tst == -1) |
3864 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
c3783c3b | 3865 | { |
3866 | if (overflow_infinity_range_p (vr0) | |
3867 | || overflow_infinity_range_p (vr1)) | |
3868 | *strict_overflow_p = true; | |
3869 | return boolean_true_node; | |
3870 | } | |
88dbf20f | 3871 | |
3872 | /* If VR0 is to the right of VR1, return false. */ | |
c3783c3b | 3873 | tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); |
88dbf20f | 3874 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
3875 | || (comp == LE_EXPR && tst == 1)) | |
c3783c3b | 3876 | { |
3877 | if (overflow_infinity_range_p (vr0) | |
3878 | || overflow_infinity_range_p (vr1)) | |
3879 | *strict_overflow_p = true; | |
3880 | return boolean_false_node; | |
3881 | } | |
88dbf20f | 3882 | |
3883 | /* Otherwise, we don't know. */ | |
3884 | return NULL_TREE; | |
3885 | } | |
48e1416a | 3886 | |
88dbf20f | 3887 | gcc_unreachable (); |
3888 | } | |
3889 | ||
3890 | ||
3891 | /* Given a value range VR, a value VAL and a comparison code COMP, return | |
eea12c72 | 3892 | BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the |
88dbf20f | 3893 | values in VR. Return BOOLEAN_FALSE_NODE if the comparison |
3894 | always returns false. Return NULL_TREE if it is not always | |
c3783c3b | 3895 | possible to determine the value of the comparison. Also set |
3896 | *STRICT_OVERFLOW_P to indicate whether a range with an overflow | |
3897 | infinity was used in the test. */ | |
88dbf20f | 3898 | |
3899 | static tree | |
c3783c3b | 3900 | compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, |
3901 | bool *strict_overflow_p) | |
88dbf20f | 3902 | { |
3903 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
3904 | return NULL_TREE; | |
3905 | ||
3906 | /* Anti-ranges need to be handled separately. */ | |
3907 | if (vr->type == VR_ANTI_RANGE) | |
3908 | { | |
3909 | /* For anti-ranges, the only predicates that we can compute at | |
3910 | compile time are equality and inequality. */ | |
3911 | if (comp == GT_EXPR | |
3912 | || comp == GE_EXPR | |
3913 | || comp == LT_EXPR | |
3914 | || comp == LE_EXPR) | |
3915 | return NULL_TREE; | |
3916 | ||
446faf9d | 3917 | /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ |
7d48cd66 | 3918 | if (value_inside_range (val, vr->min, vr->max) == 1) |
88dbf20f | 3919 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
3920 | ||
3921 | return NULL_TREE; | |
3922 | } | |
3923 | ||
a2a1fde2 | 3924 | if (!usable_range_p (vr, strict_overflow_p)) |
3925 | return NULL_TREE; | |
3926 | ||
88dbf20f | 3927 | if (comp == EQ_EXPR) |
3928 | { | |
3929 | /* EQ_EXPR may only be computed if VR represents exactly | |
3930 | one value. */ | |
c3783c3b | 3931 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) |
88dbf20f | 3932 | { |
c3783c3b | 3933 | int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); |
88dbf20f | 3934 | if (cmp == 0) |
3935 | return boolean_true_node; | |
3936 | else if (cmp == -1 || cmp == 1 || cmp == 2) | |
3937 | return boolean_false_node; | |
3938 | } | |
c3783c3b | 3939 | else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 |
3940 | || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) | |
5b9b1fc4 | 3941 | return boolean_false_node; |
88dbf20f | 3942 | |
3943 | return NULL_TREE; | |
3944 | } | |
3945 | else if (comp == NE_EXPR) | |
3946 | { | |
3947 | /* If VAL is not inside VR, then they are always different. */ | |
c3783c3b | 3948 | if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 |
3949 | || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) | |
88dbf20f | 3950 | return boolean_true_node; |
3951 | ||
3952 | /* If VR represents exactly one value equal to VAL, then return | |
3953 | false. */ | |
c3783c3b | 3954 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 |
3955 | && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) | |
88dbf20f | 3956 | return boolean_false_node; |
3957 | ||
3958 | /* Otherwise, they may or may not be different. */ | |
3959 | return NULL_TREE; | |
3960 | } | |
3961 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
3962 | { | |
3963 | int tst; | |
3964 | ||
3965 | /* If VR is to the left of VAL, return true. */ | |
c3783c3b | 3966 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
88dbf20f | 3967 | if ((comp == LT_EXPR && tst == -1) |
3968 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
c3783c3b | 3969 | { |
3970 | if (overflow_infinity_range_p (vr)) | |
3971 | *strict_overflow_p = true; | |
3972 | return boolean_true_node; | |
3973 | } | |
88dbf20f | 3974 | |
3975 | /* If VR is to the right of VAL, return false. */ | |
c3783c3b | 3976 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
88dbf20f | 3977 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
3978 | || (comp == LE_EXPR && tst == 1)) | |
c3783c3b | 3979 | { |
3980 | if (overflow_infinity_range_p (vr)) | |
3981 | *strict_overflow_p = true; | |
3982 | return boolean_false_node; | |
3983 | } | |
88dbf20f | 3984 | |
3985 | /* Otherwise, we don't know. */ | |
3986 | return NULL_TREE; | |
3987 | } | |
3988 | else if (comp == GT_EXPR || comp == GE_EXPR) | |
3989 | { | |
3990 | int tst; | |
3991 | ||
3992 | /* If VR is to the right of VAL, return true. */ | |
c3783c3b | 3993 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
88dbf20f | 3994 | if ((comp == GT_EXPR && tst == 1) |
3995 | || (comp == GE_EXPR && (tst == 0 || tst == 1))) | |
c3783c3b | 3996 | { |
3997 | if (overflow_infinity_range_p (vr)) | |
3998 | *strict_overflow_p = true; | |
3999 | return boolean_true_node; | |
4000 | } | |
88dbf20f | 4001 | |
4002 | /* If VR is to the left of VAL, return false. */ | |
c3783c3b | 4003 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
88dbf20f | 4004 | if ((comp == GT_EXPR && (tst == -1 || tst == 0)) |
4005 | || (comp == GE_EXPR && tst == -1)) | |
c3783c3b | 4006 | { |
4007 | if (overflow_infinity_range_p (vr)) | |
4008 | *strict_overflow_p = true; | |
4009 | return boolean_false_node; | |
4010 | } | |
88dbf20f | 4011 | |
4012 | /* Otherwise, we don't know. */ | |
4013 | return NULL_TREE; | |
4014 | } | |
4015 | ||
4016 | gcc_unreachable (); | |
4017 | } | |
4018 | ||
4019 | ||
4020 | /* Debugging dumps. */ | |
4021 | ||
eea12c72 | 4022 | void dump_value_range (FILE *, value_range_t *); |
4023 | void debug_value_range (value_range_t *); | |
4024 | void dump_all_value_ranges (FILE *); | |
4025 | void debug_all_value_ranges (void); | |
4026 | void dump_vr_equiv (FILE *, bitmap); | |
4027 | void debug_vr_equiv (bitmap); | |
4028 | ||
4029 | ||
4030 | /* Dump value range VR to FILE. */ | |
4031 | ||
88dbf20f | 4032 | void |
eea12c72 | 4033 | dump_value_range (FILE *file, value_range_t *vr) |
88dbf20f | 4034 | { |
4035 | if (vr == NULL) | |
4036 | fprintf (file, "[]"); | |
4037 | else if (vr->type == VR_UNDEFINED) | |
4038 | fprintf (file, "UNDEFINED"); | |
4039 | else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
4040 | { | |
eea12c72 | 4041 | tree type = TREE_TYPE (vr->min); |
4042 | ||
88dbf20f | 4043 | fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); |
eea12c72 | 4044 | |
b876a744 | 4045 | if (is_negative_overflow_infinity (vr->min)) |
c3783c3b | 4046 | fprintf (file, "-INF(OVF)"); |
b876a744 | 4047 | else if (INTEGRAL_TYPE_P (type) |
4048 | && !TYPE_UNSIGNED (type) | |
4049 | && vrp_val_is_min (vr->min)) | |
4050 | fprintf (file, "-INF"); | |
eea12c72 | 4051 | else |
4052 | print_generic_expr (file, vr->min, 0); | |
4053 | ||
88dbf20f | 4054 | fprintf (file, ", "); |
eea12c72 | 4055 | |
b876a744 | 4056 | if (is_positive_overflow_infinity (vr->max)) |
c3783c3b | 4057 | fprintf (file, "+INF(OVF)"); |
b876a744 | 4058 | else if (INTEGRAL_TYPE_P (type) |
4059 | && vrp_val_is_max (vr->max)) | |
4060 | fprintf (file, "+INF"); | |
eea12c72 | 4061 | else |
4062 | print_generic_expr (file, vr->max, 0); | |
4063 | ||
88dbf20f | 4064 | fprintf (file, "]"); |
eea12c72 | 4065 | |
4066 | if (vr->equiv) | |
4067 | { | |
4068 | bitmap_iterator bi; | |
4069 | unsigned i, c = 0; | |
4070 | ||
4071 | fprintf (file, " EQUIVALENCES: { "); | |
4072 | ||
4073 | EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) | |
4074 | { | |
4075 | print_generic_expr (file, ssa_name (i), 0); | |
4076 | fprintf (file, " "); | |
4077 | c++; | |
4078 | } | |
4079 | ||
4080 | fprintf (file, "} (%u elements)", c); | |
4081 | } | |
88dbf20f | 4082 | } |
4083 | else if (vr->type == VR_VARYING) | |
4084 | fprintf (file, "VARYING"); | |
4085 | else | |
4086 | fprintf (file, "INVALID RANGE"); | |
4087 | } | |
4088 | ||
4089 | ||
4090 | /* Dump value range VR to stderr. */ | |
4091 | ||
4b987fac | 4092 | DEBUG_FUNCTION void |
eea12c72 | 4093 | debug_value_range (value_range_t *vr) |
88dbf20f | 4094 | { |
4095 | dump_value_range (stderr, vr); | |
79f0a894 | 4096 | fprintf (stderr, "\n"); |
88dbf20f | 4097 | } |
4098 | ||
4099 | ||
4100 | /* Dump value ranges of all SSA_NAMEs to FILE. */ | |
4101 | ||
4102 | void | |
4103 | dump_all_value_ranges (FILE *file) | |
4104 | { | |
4105 | size_t i; | |
4106 | ||
e0186710 | 4107 | for (i = 0; i < num_vr_values; i++) |
88dbf20f | 4108 | { |
eea12c72 | 4109 | if (vr_value[i]) |
88dbf20f | 4110 | { |
eea12c72 | 4111 | print_generic_expr (file, ssa_name (i), 0); |
88dbf20f | 4112 | fprintf (file, ": "); |
eea12c72 | 4113 | dump_value_range (file, vr_value[i]); |
88dbf20f | 4114 | fprintf (file, "\n"); |
4115 | } | |
4116 | } | |
4117 | ||
4118 | fprintf (file, "\n"); | |
4119 | } | |
4120 | ||
4121 | ||
4122 | /* Dump all value ranges to stderr. */ | |
4123 | ||
4b987fac | 4124 | DEBUG_FUNCTION void |
88dbf20f | 4125 | debug_all_value_ranges (void) |
4126 | { | |
4127 | dump_all_value_ranges (stderr); | |
4128 | } | |
4129 | ||
4130 | ||
88dbf20f | 4131 | /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
4132 | create a new SSA name N and return the assertion assignment | |
4133 | 'V = ASSERT_EXPR <V, V OP W>'. */ | |
4134 | ||
75a70cf9 | 4135 | static gimple |
88dbf20f | 4136 | build_assert_expr_for (tree cond, tree v) |
4137 | { | |
0dc262e0 | 4138 | tree a; |
75a70cf9 | 4139 | gimple assertion; |
88dbf20f | 4140 | |
0dc262e0 | 4141 | gcc_assert (TREE_CODE (v) == SSA_NAME |
4142 | && COMPARISON_CLASS_P (cond)); | |
88dbf20f | 4143 | |
0dc262e0 | 4144 | a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |
4145 | assertion = gimple_build_assign (NULL_TREE, a); | |
88dbf20f | 4146 | |
4147 | /* The new ASSERT_EXPR, creates a new SSA name that replaces the | |
0dc262e0 | 4148 | operand of the ASSERT_EXPR. Create it so the new name and the old one |
4149 | are registered in the replacement table so that we can fix the SSA web | |
4150 | after adding all the ASSERT_EXPRs. */ | |
4151 | create_new_def_for (v, assertion, NULL); | |
88dbf20f | 4152 | |
4153 | return assertion; | |
4154 | } | |
4155 | ||
4156 | ||
4157 | /* Return false if EXPR is a predicate expression involving floating | |
4158 | point values. */ | |
4159 | ||
4160 | static inline bool | |
75a70cf9 | 4161 | fp_predicate (gimple stmt) |
88dbf20f | 4162 | { |
75a70cf9 | 4163 | GIMPLE_CHECK (stmt, GIMPLE_COND); |
4164 | ||
4165 | return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); | |
88dbf20f | 4166 | } |
4167 | ||
4168 | ||
eea12c72 | 4169 | /* If the range of values taken by OP can be inferred after STMT executes, |
4170 | return the comparison code (COMP_CODE_P) and value (VAL_P) that | |
4171 | describes the inferred range. Return true if a range could be | |
4172 | inferred. */ | |
88dbf20f | 4173 | |
eea12c72 | 4174 | static bool |
75a70cf9 | 4175 | infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) |
88dbf20f | 4176 | { |
eea12c72 | 4177 | *val_p = NULL_TREE; |
4178 | *comp_code_p = ERROR_MARK; | |
4179 | ||
2b6719e9 | 4180 | /* Do not attempt to infer anything in names that flow through |
4181 | abnormal edges. */ | |
4182 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) | |
eea12c72 | 4183 | return false; |
4184 | ||
4185 | /* Similarly, don't infer anything from statements that may throw | |
4186 | exceptions. */ | |
75a70cf9 | 4187 | if (stmt_could_throw_p (stmt)) |
eea12c72 | 4188 | return false; |
2b6719e9 | 4189 | |
4a34401c | 4190 | /* If STMT is the last statement of a basic block with no |
4191 | successors, there is no point inferring anything about any of its | |
4192 | operands. We would not be able to find a proper insertion point | |
4193 | for the assertion, anyway. */ | |
75a70cf9 | 4194 | if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0) |
4a34401c | 4195 | return false; |
4196 | ||
581f1885 | 4197 | /* We can only assume that a pointer dereference will yield |
4198 | non-NULL if -fdelete-null-pointer-checks is enabled. */ | |
1f10095d | 4199 | if (flag_delete_null_pointer_checks |
4200 | && POINTER_TYPE_P (TREE_TYPE (op)) | |
75a70cf9 | 4201 | && gimple_code (stmt) != GIMPLE_ASM) |
88dbf20f | 4202 | { |
c227f8de | 4203 | unsigned num_uses, num_loads, num_stores; |
88dbf20f | 4204 | |
c227f8de | 4205 | count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores); |
4206 | if (num_loads + num_stores > 0) | |
88dbf20f | 4207 | { |
eea12c72 | 4208 | *val_p = build_int_cst (TREE_TYPE (op), 0); |
4209 | *comp_code_p = NE_EXPR; | |
4210 | return true; | |
88dbf20f | 4211 | } |
4212 | } | |
4213 | ||
eea12c72 | 4214 | return false; |
88dbf20f | 4215 | } |
4216 | ||
4217 | ||
eea12c72 | 4218 | void dump_asserts_for (FILE *, tree); |
4219 | void debug_asserts_for (tree); | |
4220 | void dump_all_asserts (FILE *); | |
4221 | void debug_all_asserts (void); | |
4222 | ||
4223 | /* Dump all the registered assertions for NAME to FILE. */ | |
4224 | ||
4225 | void | |
4226 | dump_asserts_for (FILE *file, tree name) | |
4227 | { | |
4228 | assert_locus_t loc; | |
4229 | ||
4230 | fprintf (file, "Assertions to be inserted for "); | |
4231 | print_generic_expr (file, name, 0); | |
4232 | fprintf (file, "\n"); | |
4233 | ||
4234 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
4235 | while (loc) | |
4236 | { | |
4237 | fprintf (file, "\t"); | |
75a70cf9 | 4238 | print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); |
eea12c72 | 4239 | fprintf (file, "\n\tBB #%d", loc->bb->index); |
4240 | if (loc->e) | |
4241 | { | |
4242 | fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, | |
4243 | loc->e->dest->index); | |
5147ec07 | 4244 | dump_edge_info (file, loc->e, dump_flags, 0); |
eea12c72 | 4245 | } |
4246 | fprintf (file, "\n\tPREDICATE: "); | |
4247 | print_generic_expr (file, name, 0); | |
4248 | fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]); | |
4249 | print_generic_expr (file, loc->val, 0); | |
4250 | fprintf (file, "\n\n"); | |
4251 | loc = loc->next; | |
4252 | } | |
4253 | ||
4254 | fprintf (file, "\n"); | |
4255 | } | |
4256 | ||
4257 | ||
4258 | /* Dump all the registered assertions for NAME to stderr. */ | |
4259 | ||
4b987fac | 4260 | DEBUG_FUNCTION void |
eea12c72 | 4261 | debug_asserts_for (tree name) |
4262 | { | |
4263 | dump_asserts_for (stderr, name); | |
4264 | } | |
4265 | ||
4266 | ||
4267 | /* Dump all the registered assertions for all the names to FILE. */ | |
4268 | ||
4269 | void | |
4270 | dump_all_asserts (FILE *file) | |
4271 | { | |
4272 | unsigned i; | |
4273 | bitmap_iterator bi; | |
4274 | ||
4275 | fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); | |
4276 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) | |
4277 | dump_asserts_for (file, ssa_name (i)); | |
4278 | fprintf (file, "\n"); | |
4279 | } | |
4280 | ||
4281 | ||
4282 | /* Dump all the registered assertions for all the names to stderr. */ | |
4283 | ||
4b987fac | 4284 | DEBUG_FUNCTION void |
eea12c72 | 4285 | debug_all_asserts (void) |
4286 | { | |
4287 | dump_all_asserts (stderr); | |
4288 | } | |
4289 | ||
4290 | ||
4291 | /* If NAME doesn't have an ASSERT_EXPR registered for asserting | |
bed8bec4 | 4292 | 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
eea12c72 | 4293 | E->DEST, then register this location as a possible insertion point |
bed8bec4 | 4294 | for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |
eea12c72 | 4295 | |
4296 | BB, E and SI provide the exact insertion point for the new | |
4297 | ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted | |
4298 | on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on | |
4299 | BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E | |
4300 | must not be NULL. */ | |
4301 | ||
4302 | static void | |
bed8bec4 | 4303 | register_new_assert_for (tree name, tree expr, |
eea12c72 | 4304 | enum tree_code comp_code, |
4305 | tree val, | |
4306 | basic_block bb, | |
4307 | edge e, | |
75a70cf9 | 4308 | gimple_stmt_iterator si) |
eea12c72 | 4309 | { |
4310 | assert_locus_t n, loc, last_loc; | |
eea12c72 | 4311 | basic_block dest_bb; |
4312 | ||
1b4345f7 | 4313 | gcc_checking_assert (bb == NULL || e == NULL); |
eea12c72 | 4314 | |
4315 | if (e == NULL) | |
1b4345f7 | 4316 | gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |
4317 | && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); | |
eea12c72 | 4318 | |
ca9e4658 | 4319 | /* Never build an assert comparing against an integer constant with |
4320 | TREE_OVERFLOW set. This confuses our undefined overflow warning | |
4321 | machinery. */ | |
4322 | if (TREE_CODE (val) == INTEGER_CST | |
4323 | && TREE_OVERFLOW (val)) | |
4324 | val = build_int_cst_wide (TREE_TYPE (val), | |
4325 | TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val)); | |
4326 | ||
eea12c72 | 4327 | /* The new assertion A will be inserted at BB or E. We need to |
4328 | determine if the new location is dominated by a previously | |
4329 | registered location for A. If we are doing an edge insertion, | |
4330 | assume that A will be inserted at E->DEST. Note that this is not | |
4331 | necessarily true. | |
48e1416a | 4332 | |
eea12c72 | 4333 | If E is a critical edge, it will be split. But even if E is |
4334 | split, the new block will dominate the same set of blocks that | |
4335 | E->DEST dominates. | |
48e1416a | 4336 | |
eea12c72 | 4337 | The reverse, however, is not true, blocks dominated by E->DEST |
4338 | will not be dominated by the new block created to split E. So, | |
4339 | if the insertion location is on a critical edge, we will not use | |
4340 | the new location to move another assertion previously registered | |
4341 | at a block dominated by E->DEST. */ | |
4342 | dest_bb = (bb) ? bb : e->dest; | |
4343 | ||
4344 | /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and | |
4345 | VAL at a block dominating DEST_BB, then we don't need to insert a new | |
4346 | one. Similarly, if the same assertion already exists at a block | |
4347 | dominated by DEST_BB and the new location is not on a critical | |
4348 | edge, then update the existing location for the assertion (i.e., | |
4349 | move the assertion up in the dominance tree). | |
4350 | ||
4351 | Note, this is implemented as a simple linked list because there | |
4352 | should not be more than a handful of assertions registered per | |
4353 | name. If this becomes a performance problem, a table hashed by | |
4354 | COMP_CODE and VAL could be implemented. */ | |
4355 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
4356 | last_loc = loc; | |
eea12c72 | 4357 | while (loc) |
4358 | { | |
4359 | if (loc->comp_code == comp_code | |
4360 | && (loc->val == val | |
bed8bec4 | 4361 | || operand_equal_p (loc->val, val, 0)) |
4362 | && (loc->expr == expr | |
4363 | || operand_equal_p (loc->expr, expr, 0))) | |
eea12c72 | 4364 | { |
4365 | /* If the assertion NAME COMP_CODE VAL has already been | |
4366 | registered at a basic block that dominates DEST_BB, then | |
4367 | we don't need to insert the same assertion again. Note | |
4368 | that we don't check strict dominance here to avoid | |
4369 | replicating the same assertion inside the same basic | |
4370 | block more than once (e.g., when a pointer is | |
4371 | dereferenced several times inside a block). | |
4372 | ||
4373 | An exception to this rule are edge insertions. If the | |
4374 | new assertion is to be inserted on edge E, then it will | |
4375 | dominate all the other insertions that we may want to | |
4376 | insert in DEST_BB. So, if we are doing an edge | |
4377 | insertion, don't do this dominance check. */ | |
4378 | if (e == NULL | |
4379 | && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb)) | |
4380 | return; | |
4381 | ||
4382 | /* Otherwise, if E is not a critical edge and DEST_BB | |
4383 | dominates the existing location for the assertion, move | |
4384 | the assertion up in the dominance tree by updating its | |
4385 | location information. */ | |
4386 | if ((e == NULL || !EDGE_CRITICAL_P (e)) | |
4387 | && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) | |
4388 | { | |
4389 | loc->bb = dest_bb; | |
4390 | loc->e = e; | |
4391 | loc->si = si; | |
4392 | return; | |
4393 | } | |
4394 | } | |
4395 | ||
4396 | /* Update the last node of the list and move to the next one. */ | |
4397 | last_loc = loc; | |
4398 | loc = loc->next; | |
4399 | } | |
4400 | ||
4401 | /* If we didn't find an assertion already registered for | |
4402 | NAME COMP_CODE VAL, add a new one at the end of the list of | |
4403 | assertions associated with NAME. */ | |
4c36ffe6 | 4404 | n = XNEW (struct assert_locus_d); |
eea12c72 | 4405 | n->bb = dest_bb; |
4406 | n->e = e; | |
4407 | n->si = si; | |
4408 | n->comp_code = comp_code; | |
4409 | n->val = val; | |
bed8bec4 | 4410 | n->expr = expr; |
eea12c72 | 4411 | n->next = NULL; |
4412 | ||
4413 | if (last_loc) | |
4414 | last_loc->next = n; | |
4415 | else | |
4416 | asserts_for[SSA_NAME_VERSION (name)] = n; | |
4417 | ||
4418 | bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); | |
4419 | } | |
4420 | ||
a00913c7 | 4421 | /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |
4422 | Extract a suitable test code and value and store them into *CODE_P and | |
4423 | *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. | |
4424 | ||
4425 | If no extraction was possible, return FALSE, otherwise return TRUE. | |
4426 | ||
4427 | If INVERT is true, then we invert the result stored into *CODE_P. */ | |
7433d1d6 | 4428 | |
4429 | static bool | |
4430 | extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, | |
4431 | tree cond_op0, tree cond_op1, | |
4432 | bool invert, enum tree_code *code_p, | |
4433 | tree *val_p) | |
4434 | { | |
4435 | enum tree_code comp_code; | |
4436 | tree val; | |
4437 | ||
4438 | /* Otherwise, we have a comparison of the form NAME COMP VAL | |
4439 | or VAL COMP NAME. */ | |
4440 | if (name == cond_op1) | |
4441 | { | |
4442 | /* If the predicate is of the form VAL COMP NAME, flip | |
4443 | COMP around because we need to register NAME as the | |
4444 | first operand in the predicate. */ | |
4445 | comp_code = swap_tree_comparison (cond_code); | |
4446 | val = cond_op0; | |
4447 | } | |
4448 | else | |
4449 | { | |
4450 | /* The comparison is of the form NAME COMP VAL, so the | |
4451 | comparison code remains unchanged. */ | |
4452 | comp_code = cond_code; | |
4453 | val = cond_op1; | |
4454 | } | |
4455 | ||
4456 | /* Invert the comparison code as necessary. */ | |
4457 | if (invert) | |
4458 | comp_code = invert_tree_comparison (comp_code, 0); | |
4459 | ||
4460 | /* VRP does not handle float types. */ | |
4461 | if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) | |
4462 | return false; | |
4463 | ||
4464 | /* Do not register always-false predicates. | |
4465 | FIXME: this works around a limitation in fold() when dealing with | |
4466 | enumerations. Given 'enum { N1, N2 } x;', fold will not | |
4467 | fold 'if (x > N2)' to 'if (0)'. */ | |
4468 | if ((comp_code == GT_EXPR || comp_code == LT_EXPR) | |
4469 | && INTEGRAL_TYPE_P (TREE_TYPE (val))) | |
4470 | { | |
4471 | tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); | |
4472 | tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); | |
4473 | ||
4474 | if (comp_code == GT_EXPR | |
4475 | && (!max | |
4476 | || compare_values (val, max) == 0)) | |
4477 | return false; | |
4478 | ||
4479 | if (comp_code == LT_EXPR | |
4480 | && (!min | |
4481 | || compare_values (val, min) == 0)) | |
4482 | return false; | |
4483 | } | |
4484 | *code_p = comp_code; | |
4485 | *val_p = val; | |
4486 | return true; | |
4487 | } | |
fecf3b39 | 4488 | |
7139adf8 | 4489 | /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any |
4490 | (otherwise return VAL). VAL and MASK must be zero-extended for | |
4491 | precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT | |
4492 | (to transform signed values into unsigned) and at the end xor | |
4493 | SGNBIT back. */ | |
4494 | ||
4495 | static double_int | |
4496 | masked_increment (double_int val, double_int mask, double_int sgnbit, | |
4497 | unsigned int prec) | |
4498 | { | |
4499 | double_int bit = double_int_one, res; | |
4500 | unsigned int i; | |
4501 | ||
cf8f0e63 | 4502 | val ^= sgnbit; |
4503 | for (i = 0; i < prec; i++, bit += bit) | |
7139adf8 | 4504 | { |
4505 | res = mask; | |
cf8f0e63 | 4506 | if ((res & bit).is_zero ()) |
7139adf8 | 4507 | continue; |
cf8f0e63 | 4508 | res = bit - double_int_one; |
4509 | res = (val + bit).and_not (res); | |
4510 | res &= mask; | |
4511 | if (res.ugt (val)) | |
4512 | return res ^ sgnbit; | |
7139adf8 | 4513 | } |
cf8f0e63 | 4514 | return val ^ sgnbit; |
7139adf8 | 4515 | } |
4516 | ||
bed8bec4 | 4517 | /* Try to register an edge assertion for SSA name NAME on edge E for |
4518 | the condition COND contributing to the conditional jump pointed to by BSI. | |
4519 | Invert the condition COND if INVERT is true. | |
4520 | Return true if an assertion for NAME could be registered. */ | |
4521 | ||
4522 | static bool | |
75a70cf9 | 4523 | register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, |
a00913c7 | 4524 | enum tree_code cond_code, |
4525 | tree cond_op0, tree cond_op1, bool invert) | |
bed8bec4 | 4526 | { |
4527 | tree val; | |
4528 | enum tree_code comp_code; | |
4529 | bool retval = false; | |
4530 | ||
a00913c7 | 4531 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
4532 | cond_op0, | |
4533 | cond_op1, | |
4534 | invert, &comp_code, &val)) | |
bed8bec4 | 4535 | return false; |
4536 | ||
4537 | /* Only register an ASSERT_EXPR if NAME was found in the sub-graph | |
4538 | reachable from E. */ | |
17ed8337 | 4539 | if (live_on_edge (e, name) |
bed8bec4 | 4540 | && !has_single_use (name)) |
4541 | { | |
4542 | register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); | |
4543 | retval = true; | |
4544 | } | |
4545 | ||
4546 | /* In the case of NAME <= CST and NAME being defined as | |
4547 | NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 | |
4548 | and NAME2 <= CST - CST2. We can do the same for NAME > CST. | |
4549 | This catches range and anti-range tests. */ | |
4550 | if ((comp_code == LE_EXPR | |
4551 | || comp_code == GT_EXPR) | |
4552 | && TREE_CODE (val) == INTEGER_CST | |
4553 | && TYPE_UNSIGNED (TREE_TYPE (val))) | |
4554 | { | |
75a70cf9 | 4555 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
22cdb855 | 4556 | tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |
bed8bec4 | 4557 | |
4558 | /* Extract CST2 from the (optional) addition. */ | |
75a70cf9 | 4559 | if (is_gimple_assign (def_stmt) |
4560 | && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) | |
bed8bec4 | 4561 | { |
75a70cf9 | 4562 | name2 = gimple_assign_rhs1 (def_stmt); |
4563 | cst2 = gimple_assign_rhs2 (def_stmt); | |
bed8bec4 | 4564 | if (TREE_CODE (name2) == SSA_NAME |
4565 | && TREE_CODE (cst2) == INTEGER_CST) | |
4566 | def_stmt = SSA_NAME_DEF_STMT (name2); | |
4567 | } | |
4568 | ||
22cdb855 | 4569 | /* Extract NAME2 from the (optional) sign-changing cast. */ |
75a70cf9 | 4570 | if (gimple_assign_cast_p (def_stmt)) |
22cdb855 | 4571 | { |
d9659041 | 4572 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) |
75a70cf9 | 4573 | && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |
4574 | && (TYPE_PRECISION (gimple_expr_type (def_stmt)) | |
4575 | == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) | |
4576 | name3 = gimple_assign_rhs1 (def_stmt); | |
22cdb855 | 4577 | } |
bed8bec4 | 4578 | |
22cdb855 | 4579 | /* If name3 is used later, create an ASSERT_EXPR for it. */ |
4580 | if (name3 != NULL_TREE | |
4581 | && TREE_CODE (name3) == SSA_NAME | |
bed8bec4 | 4582 | && (cst2 == NULL_TREE |
4583 | || TREE_CODE (cst2) == INTEGER_CST) | |
22cdb855 | 4584 | && INTEGRAL_TYPE_P (TREE_TYPE (name3)) |
17ed8337 | 4585 | && live_on_edge (e, name3) |
22cdb855 | 4586 | && !has_single_use (name3)) |
4587 | { | |
4588 | tree tmp; | |
4589 | ||
4590 | /* Build an expression for the range test. */ | |
4591 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); | |
4592 | if (cst2 != NULL_TREE) | |
4593 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
4594 | ||
4595 | if (dump_file) | |
4596 | { | |
4597 | fprintf (dump_file, "Adding assert for "); | |
4598 | print_generic_expr (dump_file, name3, 0); | |
4599 | fprintf (dump_file, " from "); | |
4600 | print_generic_expr (dump_file, tmp, 0); | |
4601 | fprintf (dump_file, "\n"); | |
4602 | } | |
4603 | ||
4604 | register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); | |
4605 | ||
4606 | retval = true; | |
4607 | } | |
4608 | ||
4609 | /* If name2 is used later, create an ASSERT_EXPR for it. */ | |
4610 | if (name2 != NULL_TREE | |
4611 | && TREE_CODE (name2) == SSA_NAME | |
4612 | && TREE_CODE (cst2) == INTEGER_CST | |
4613 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
17ed8337 | 4614 | && live_on_edge (e, name2) |
bed8bec4 | 4615 | && !has_single_use (name2)) |
4616 | { | |
4617 | tree tmp; | |
4618 | ||
4619 | /* Build an expression for the range test. */ | |
4620 | tmp = name2; | |
4621 | if (TREE_TYPE (name) != TREE_TYPE (name2)) | |
4622 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); | |
4623 | if (cst2 != NULL_TREE) | |
4624 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
4625 | ||
4626 | if (dump_file) | |
4627 | { | |
4628 | fprintf (dump_file, "Adding assert for "); | |
4629 | print_generic_expr (dump_file, name2, 0); | |
4630 | fprintf (dump_file, " from "); | |
4631 | print_generic_expr (dump_file, tmp, 0); | |
4632 | fprintf (dump_file, "\n"); | |
4633 | } | |
4634 | ||
4635 | register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); | |
4636 | ||
4637 | retval = true; | |
4638 | } | |
4639 | } | |
4640 | ||
98f00c5b | 4641 | if (TREE_CODE_CLASS (comp_code) == tcc_comparison |
4642 | && TREE_CODE (val) == INTEGER_CST) | |
4643 | { | |
4644 | gimple def_stmt = SSA_NAME_DEF_STMT (name); | |
7139adf8 | 4645 | tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; |
98f00c5b | 4646 | tree val2 = NULL_TREE; |
6b48fd25 | 4647 | double_int mask = double_int_zero; |
4648 | unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); | |
98f00c5b | 4649 | |
22676c8f | 4650 | /* Add asserts for NAME cmp CST and NAME being defined |
4651 | as NAME = (int) NAME2. */ | |
4652 | if (!TYPE_UNSIGNED (TREE_TYPE (val)) | |
4653 | && (comp_code == LE_EXPR || comp_code == LT_EXPR | |
4654 | || comp_code == GT_EXPR || comp_code == GE_EXPR) | |
4655 | && gimple_assign_cast_p (def_stmt)) | |
4656 | { | |
4657 | name2 = gimple_assign_rhs1 (def_stmt); | |
4658 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) | |
4659 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
4660 | && TYPE_UNSIGNED (TREE_TYPE (name2)) | |
4661 | && prec == TYPE_PRECISION (TREE_TYPE (name2)) | |
4662 | && (comp_code == LE_EXPR || comp_code == GT_EXPR | |
4663 | || !tree_int_cst_equal (val, | |
4664 | TYPE_MIN_VALUE (TREE_TYPE (val)))) | |
4665 | && live_on_edge (e, name2) | |
4666 | && !has_single_use (name2)) | |
4667 | { | |
4668 | tree tmp, cst; | |
4669 | enum tree_code new_comp_code = comp_code; | |
4670 | ||
4671 | cst = fold_convert (TREE_TYPE (name2), | |
4672 | TYPE_MIN_VALUE (TREE_TYPE (val))); | |
4673 | /* Build an expression for the range test. */ | |
4674 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); | |
4675 | cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, | |
4676 | fold_convert (TREE_TYPE (name2), val)); | |
4677 | if (comp_code == LT_EXPR || comp_code == GE_EXPR) | |
4678 | { | |
4679 | new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; | |
4680 | cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, | |
4681 | build_int_cst (TREE_TYPE (name2), 1)); | |
4682 | } | |
4683 | ||
4684 | if (dump_file) | |
4685 | { | |
4686 | fprintf (dump_file, "Adding assert for "); | |
4687 | print_generic_expr (dump_file, name2, 0); | |
4688 | fprintf (dump_file, " from "); | |
4689 | print_generic_expr (dump_file, tmp, 0); | |
4690 | fprintf (dump_file, "\n"); | |
4691 | } | |
4692 | ||
4693 | register_new_assert_for (name2, tmp, new_comp_code, cst, NULL, | |
4694 | e, bsi); | |
4695 | ||
4696 | retval = true; | |
4697 | } | |
4698 | } | |
4699 | ||
4700 | /* Add asserts for NAME cmp CST and NAME being defined as | |
4701 | NAME = NAME2 >> CST2. | |
4702 | ||
4703 | Extract CST2 from the right shift. */ | |
98f00c5b | 4704 | if (is_gimple_assign (def_stmt) |
4705 | && gimple_assign_rhs_code (def_stmt) == RSHIFT_EXPR) | |
4706 | { | |
4707 | name2 = gimple_assign_rhs1 (def_stmt); | |
4708 | cst2 = gimple_assign_rhs2 (def_stmt); | |
4709 | if (TREE_CODE (name2) == SSA_NAME | |
4710 | && host_integerp (cst2, 1) | |
98f00c5b | 4711 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
6b48fd25 | 4712 | && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1) |
24cd46a7 | 4713 | && prec <= HOST_BITS_PER_DOUBLE_INT |
27d5d67b | 4714 | && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) |
98f00c5b | 4715 | && live_on_edge (e, name2) |
4716 | && !has_single_use (name2)) | |
4717 | { | |
cf8f0e63 | 4718 | mask = double_int::mask (tree_low_cst (cst2, 1)); |
98f00c5b | 4719 | val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); |
4720 | } | |
4721 | } | |
98f00c5b | 4722 | if (val2 != NULL_TREE |
4723 | && TREE_CODE (val2) == INTEGER_CST | |
4724 | && simple_cst_equal (fold_build2 (RSHIFT_EXPR, | |
4725 | TREE_TYPE (val), | |
4726 | val2, cst2), val)) | |
4727 | { | |
4728 | enum tree_code new_comp_code = comp_code; | |
4729 | tree tmp, new_val; | |
4730 | ||
4731 | tmp = name2; | |
4732 | if (comp_code == EQ_EXPR || comp_code == NE_EXPR) | |
4733 | { | |
4734 | if (!TYPE_UNSIGNED (TREE_TYPE (val))) | |
4735 | { | |
98f00c5b | 4736 | tree type = build_nonstandard_integer_type (prec, 1); |
4737 | tmp = build1 (NOP_EXPR, type, name2); | |
4738 | val2 = fold_convert (type, val2); | |
4739 | } | |
4740 | tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); | |
6b48fd25 | 4741 | new_val = double_int_to_tree (TREE_TYPE (tmp), mask); |
98f00c5b | 4742 | new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; |
4743 | } | |
4744 | else if (comp_code == LT_EXPR || comp_code == GE_EXPR) | |
4745 | new_val = val2; | |
4746 | else | |
4747 | { | |
27d5d67b | 4748 | double_int maxval |
cf8f0e63 | 4749 | = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val))); |
4750 | mask |= tree_to_double_int (val2); | |
4751 | if (mask == maxval) | |
6b48fd25 | 4752 | new_val = NULL_TREE; |
4753 | else | |
4754 | new_val = double_int_to_tree (TREE_TYPE (val2), mask); | |
98f00c5b | 4755 | } |
4756 | ||
6b48fd25 | 4757 | if (new_val) |
98f00c5b | 4758 | { |
6b48fd25 | 4759 | if (dump_file) |
4760 | { | |
4761 | fprintf (dump_file, "Adding assert for "); | |
4762 | print_generic_expr (dump_file, name2, 0); | |
4763 | fprintf (dump_file, " from "); | |
4764 | print_generic_expr (dump_file, tmp, 0); | |
4765 | fprintf (dump_file, "\n"); | |
4766 | } | |
98f00c5b | 4767 | |
6b48fd25 | 4768 | register_new_assert_for (name2, tmp, new_comp_code, new_val, |
4769 | NULL, e, bsi); | |
4770 | retval = true; | |
4771 | } | |
98f00c5b | 4772 | } |
7139adf8 | 4773 | |
4774 | /* Add asserts for NAME cmp CST and NAME being defined as | |
4775 | NAME = NAME2 & CST2. | |
4776 | ||
4777 | Extract CST2 from the and. */ | |
4778 | names[0] = NULL_TREE; | |
4779 | names[1] = NULL_TREE; | |
4780 | cst2 = NULL_TREE; | |
4781 | if (is_gimple_assign (def_stmt) | |
4782 | && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) | |
4783 | { | |
4784 | name2 = gimple_assign_rhs1 (def_stmt); | |
4785 | cst2 = gimple_assign_rhs2 (def_stmt); | |
4786 | if (TREE_CODE (name2) == SSA_NAME | |
4787 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
4788 | && TREE_CODE (cst2) == INTEGER_CST | |
4789 | && !integer_zerop (cst2) | |
24cd46a7 | 4790 | && prec <= HOST_BITS_PER_DOUBLE_INT |
7139adf8 | 4791 | && (prec > 1 |
4792 | || TYPE_UNSIGNED (TREE_TYPE (val)))) | |
4793 | { | |
4794 | gimple def_stmt2 = SSA_NAME_DEF_STMT (name2); | |
4795 | if (gimple_assign_cast_p (def_stmt2)) | |
4796 | { | |
4797 | names[1] = gimple_assign_rhs1 (def_stmt2); | |
4798 | if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) | |
4799 | || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) | |
4800 | || (TYPE_PRECISION (TREE_TYPE (name2)) | |
4801 | != TYPE_PRECISION (TREE_TYPE (names[1]))) | |
4802 | || !live_on_edge (e, names[1]) | |
4803 | || has_single_use (names[1])) | |
4804 | names[1] = NULL_TREE; | |
4805 | } | |
4806 | if (live_on_edge (e, name2) | |
4807 | && !has_single_use (name2)) | |
4808 | names[0] = name2; | |
4809 | } | |
4810 | } | |
4811 | if (names[0] || names[1]) | |
4812 | { | |
4813 | double_int minv, maxv = double_int_zero, valv, cst2v; | |
4814 | double_int tem, sgnbit; | |
4815 | bool valid_p = false, valn = false, cst2n = false; | |
4816 | enum tree_code ccode = comp_code; | |
4817 | ||
cf8f0e63 | 4818 | valv = tree_to_double_int (val).zext (prec); |
4819 | cst2v = tree_to_double_int (cst2).zext (prec); | |
7139adf8 | 4820 | if (!TYPE_UNSIGNED (TREE_TYPE (val))) |
4821 | { | |
cf8f0e63 | 4822 | valn = valv.sext (prec).is_negative (); |
4823 | cst2n = cst2v.sext (prec).is_negative (); | |
7139adf8 | 4824 | } |
4825 | /* If CST2 doesn't have most significant bit set, | |
4826 | but VAL is negative, we have comparison like | |
4827 | if ((x & 0x123) > -4) (always true). Just give up. */ | |
4828 | if (!cst2n && valn) | |
4829 | ccode = ERROR_MARK; | |
4830 | if (cst2n) | |
cf8f0e63 | 4831 | sgnbit = double_int_one.llshift (prec - 1, prec).zext (prec); |
7139adf8 | 4832 | else |
4833 | sgnbit = double_int_zero; | |
cf8f0e63 | 4834 | minv = valv & cst2v; |
7139adf8 | 4835 | switch (ccode) |
4836 | { | |
4837 | case EQ_EXPR: | |
4838 | /* Minimum unsigned value for equality is VAL & CST2 | |
4839 | (should be equal to VAL, otherwise we probably should | |
4840 | have folded the comparison into false) and | |
4841 | maximum unsigned value is VAL | ~CST2. */ | |
cf8f0e63 | 4842 | maxv = valv | ~cst2v; |
4843 | maxv = maxv.zext (prec); | |
7139adf8 | 4844 | valid_p = true; |
4845 | break; | |
4846 | case NE_EXPR: | |
cf8f0e63 | 4847 | tem = valv | ~cst2v; |
4848 | tem = tem.zext (prec); | |
7139adf8 | 4849 | /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ |
cf8f0e63 | 4850 | if (valv.is_zero ()) |
7139adf8 | 4851 | { |
4852 | cst2n = false; | |
4853 | sgnbit = double_int_zero; | |
4854 | goto gt_expr; | |
4855 | } | |
4856 | /* If (VAL | ~CST2) is all ones, handle it as | |
4857 | (X & CST2) < VAL. */ | |
cf8f0e63 | 4858 | if (tem == double_int::mask (prec)) |
7139adf8 | 4859 | { |
4860 | cst2n = false; | |
4861 | valn = false; | |
4862 | sgnbit = double_int_zero; | |
4863 | goto lt_expr; | |
4864 | } | |
4865 | if (!cst2n | |
cf8f0e63 | 4866 | && cst2v.sext (prec).is_negative ()) |
4867 | sgnbit = double_int_one.llshift (prec - 1, prec).zext (prec); | |
4868 | if (!sgnbit.is_zero ()) | |
7139adf8 | 4869 | { |
cf8f0e63 | 4870 | if (valv == sgnbit) |
7139adf8 | 4871 | { |
4872 | cst2n = true; | |
4873 | valn = true; | |
4874 | goto gt_expr; | |
4875 | } | |
cf8f0e63 | 4876 | if (tem == double_int::mask (prec - 1)) |
7139adf8 | 4877 | { |
4878 | cst2n = true; | |
4879 | goto lt_expr; | |
4880 | } | |
4881 | if (!cst2n) | |
4882 | sgnbit = double_int_zero; | |
4883 | } | |
4884 | break; | |
4885 | case GE_EXPR: | |
4886 | /* Minimum unsigned value for >= if (VAL & CST2) == VAL | |
4887 | is VAL and maximum unsigned value is ~0. For signed | |
4888 | comparison, if CST2 doesn't have most significant bit | |
4889 | set, handle it similarly. If CST2 has MSB set, | |
4890 | the minimum is the same, and maximum is ~0U/2. */ | |
cf8f0e63 | 4891 | if (minv != valv) |
7139adf8 | 4892 | { |
4893 | /* If (VAL & CST2) != VAL, X & CST2 can't be equal to | |
4894 | VAL. */ | |
4895 | minv = masked_increment (valv, cst2v, sgnbit, prec); | |
cf8f0e63 | 4896 | if (minv == valv) |
7139adf8 | 4897 | break; |
4898 | } | |
cf8f0e63 | 4899 | maxv = double_int::mask (prec - (cst2n ? 1 : 0)); |
7139adf8 | 4900 | valid_p = true; |
4901 | break; | |
4902 | case GT_EXPR: | |
4903 | gt_expr: | |
4904 | /* Find out smallest MINV where MINV > VAL | |
4905 | && (MINV & CST2) == MINV, if any. If VAL is signed and | |
4906 | CST2 has MSB set, compute it biased by 1 << (prec - 1). */ | |
4907 | minv = masked_increment (valv, cst2v, sgnbit, prec); | |
cf8f0e63 | 4908 | if (minv == valv) |
7139adf8 | 4909 | break; |
cf8f0e63 | 4910 | maxv = double_int::mask (prec - (cst2n ? 1 : 0)); |
7139adf8 | 4911 | valid_p = true; |
4912 | break; | |
4913 | case LE_EXPR: | |
4914 | /* Minimum unsigned value for <= is 0 and maximum | |
4915 | unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. | |
4916 | Otherwise, find smallest VAL2 where VAL2 > VAL | |
4917 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 | |
4918 | as maximum. | |
4919 | For signed comparison, if CST2 doesn't have most | |
4920 | significant bit set, handle it similarly. If CST2 has | |
4921 | MSB set, the maximum is the same and minimum is INT_MIN. */ | |
cf8f0e63 | 4922 | if (minv == valv) |
7139adf8 | 4923 | maxv = valv; |
4924 | else | |
4925 | { | |
4926 | maxv = masked_increment (valv, cst2v, sgnbit, prec); | |
cf8f0e63 | 4927 | if (maxv == valv) |
7139adf8 | 4928 | break; |
cf8f0e63 | 4929 | maxv -= double_int_one; |
7139adf8 | 4930 | } |
cf8f0e63 | 4931 | maxv |= ~cst2v; |
4932 | maxv = maxv.zext (prec); | |
7139adf8 | 4933 | minv = sgnbit; |
4934 | valid_p = true; | |
4935 | break; | |
4936 | case LT_EXPR: | |
4937 | lt_expr: | |
4938 | /* Minimum unsigned value for < is 0 and maximum | |
4939 | unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL. | |
4940 | Otherwise, find smallest VAL2 where VAL2 > VAL | |
4941 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 | |
4942 | as maximum. | |
4943 | For signed comparison, if CST2 doesn't have most | |
4944 | significant bit set, handle it similarly. If CST2 has | |
4945 | MSB set, the maximum is the same and minimum is INT_MIN. */ | |
cf8f0e63 | 4946 | if (minv == valv) |
7139adf8 | 4947 | { |
cf8f0e63 | 4948 | if (valv == sgnbit) |
7139adf8 | 4949 | break; |
4950 | maxv = valv; | |
4951 | } | |
4952 | else | |
4953 | { | |
4954 | maxv = masked_increment (valv, cst2v, sgnbit, prec); | |
cf8f0e63 | 4955 | if (maxv == valv) |
7139adf8 | 4956 | break; |
4957 | } | |
cf8f0e63 | 4958 | maxv -= double_int_one; |
4959 | maxv |= ~cst2v; | |
4960 | maxv = maxv.zext (prec); | |
7139adf8 | 4961 | minv = sgnbit; |
4962 | valid_p = true; | |
4963 | break; | |
4964 | default: | |
4965 | break; | |
4966 | } | |
4967 | if (valid_p | |
cf8f0e63 | 4968 | && (maxv - minv).zext (prec) != double_int::mask (prec)) |
7139adf8 | 4969 | { |
4970 | tree tmp, new_val, type; | |
4971 | int i; | |
4972 | ||
4973 | for (i = 0; i < 2; i++) | |
4974 | if (names[i]) | |
4975 | { | |
4976 | double_int maxv2 = maxv; | |
4977 | tmp = names[i]; | |
4978 | type = TREE_TYPE (names[i]); | |
4979 | if (!TYPE_UNSIGNED (type)) | |
4980 | { | |
4981 | type = build_nonstandard_integer_type (prec, 1); | |
4982 | tmp = build1 (NOP_EXPR, type, names[i]); | |
4983 | } | |
cf8f0e63 | 4984 | if (!minv.is_zero ()) |
7139adf8 | 4985 | { |
4986 | tmp = build2 (PLUS_EXPR, type, tmp, | |
cf8f0e63 | 4987 | double_int_to_tree (type, -minv)); |
4988 | maxv2 = maxv - minv; | |
7139adf8 | 4989 | } |
4990 | new_val = double_int_to_tree (type, maxv2); | |
4991 | ||
4992 | if (dump_file) | |
4993 | { | |
4994 | fprintf (dump_file, "Adding assert for "); | |
4995 | print_generic_expr (dump_file, names[i], 0); | |
4996 | fprintf (dump_file, " from "); | |
4997 | print_generic_expr (dump_file, tmp, 0); | |
4998 | fprintf (dump_file, "\n"); | |
4999 | } | |
5000 | ||
5001 | register_new_assert_for (names[i], tmp, LE_EXPR, | |
5002 | new_val, NULL, e, bsi); | |
5003 | retval = true; | |
5004 | } | |
5005 | } | |
5006 | } | |
98f00c5b | 5007 | } |
5008 | ||
bed8bec4 | 5009 | return retval; |
5010 | } | |
5011 | ||
fecf3b39 | 5012 | /* OP is an operand of a truth value expression which is known to have |
5013 | a particular value. Register any asserts for OP and for any | |
48e1416a | 5014 | operands in OP's defining statement. |
fecf3b39 | 5015 | |
5016 | If CODE is EQ_EXPR, then we want to register OP is zero (false), | |
5017 | if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ | |
5018 | ||
5019 | static bool | |
5020 | register_edge_assert_for_1 (tree op, enum tree_code code, | |
75a70cf9 | 5021 | edge e, gimple_stmt_iterator bsi) |
fecf3b39 | 5022 | { |
13f7fd91 | 5023 | bool retval = false; |
75a70cf9 | 5024 | gimple op_def; |
5025 | tree val; | |
a00913c7 | 5026 | enum tree_code rhs_code; |
eea12c72 | 5027 | |
fecf3b39 | 5028 | /* We only care about SSA_NAMEs. */ |
5029 | if (TREE_CODE (op) != SSA_NAME) | |
eea12c72 | 5030 | return false; |
5031 | ||
fecf3b39 | 5032 | /* We know that OP will have a zero or nonzero value. If OP is used |
48e1416a | 5033 | more than once go ahead and register an assert for OP. |
fecf3b39 | 5034 | |
5035 | The FOUND_IN_SUBGRAPH support is not helpful in this situation as | |
5036 | it will always be set for OP (because OP is used in a COND_EXPR in | |
5037 | the subgraph). */ | |
5038 | if (!has_single_use (op)) | |
5039 | { | |
5040 | val = build_int_cst (TREE_TYPE (op), 0); | |
bed8bec4 | 5041 | register_new_assert_for (op, op, code, val, NULL, e, bsi); |
fecf3b39 | 5042 | retval = true; |
5043 | } | |
5044 | ||
5045 | /* Now look at how OP is set. If it's set from a comparison, | |
5046 | a truth operation or some bit operations, then we may be able | |
5047 | to register information about the operands of that assignment. */ | |
5048 | op_def = SSA_NAME_DEF_STMT (op); | |
75a70cf9 | 5049 | if (gimple_code (op_def) != GIMPLE_ASSIGN) |
fecf3b39 | 5050 | return retval; |
5051 | ||
75a70cf9 | 5052 | rhs_code = gimple_assign_rhs_code (op_def); |
fecf3b39 | 5053 | |
75a70cf9 | 5054 | if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) |
eea12c72 | 5055 | { |
13f7fd91 | 5056 | bool invert = (code == EQ_EXPR ? true : false); |
75a70cf9 | 5057 | tree op0 = gimple_assign_rhs1 (op_def); |
5058 | tree op1 = gimple_assign_rhs2 (op_def); | |
eea12c72 | 5059 | |
bed8bec4 | 5060 | if (TREE_CODE (op0) == SSA_NAME) |
a00913c7 | 5061 | retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, |
5062 | invert); | |
bed8bec4 | 5063 | if (TREE_CODE (op1) == SSA_NAME) |
a00913c7 | 5064 | retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, |
5065 | invert); | |
fecf3b39 | 5066 | } |
5067 | else if ((code == NE_EXPR | |
cfd7906e | 5068 | && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) |
fecf3b39 | 5069 | || (code == EQ_EXPR |
cfd7906e | 5070 | && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) |
fecf3b39 | 5071 | { |
5072 | /* Recurse on each operand. */ | |
75a70cf9 | 5073 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
fecf3b39 | 5074 | code, e, bsi); |
75a70cf9 | 5075 | retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def), |
fecf3b39 | 5076 | code, e, bsi); |
5077 | } | |
eea7f7eb | 5078 | else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR |
5079 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) | |
fecf3b39 | 5080 | { |
13f7fd91 | 5081 | /* Recurse, flipping CODE. */ |
5082 | code = invert_tree_comparison (code, false); | |
75a70cf9 | 5083 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
13f7fd91 | 5084 | code, e, bsi); |
fecf3b39 | 5085 | } |
75a70cf9 | 5086 | else if (gimple_assign_rhs_code (op_def) == SSA_NAME) |
fecf3b39 | 5087 | { |
13f7fd91 | 5088 | /* Recurse through the copy. */ |
75a70cf9 | 5089 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
5090 | code, e, bsi); | |
fecf3b39 | 5091 | } |
d9659041 | 5092 | else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) |
48e1416a | 5093 | { |
13f7fd91 | 5094 | /* Recurse through the type conversion. */ |
75a70cf9 | 5095 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
fecf3b39 | 5096 | code, e, bsi); |
5097 | } | |
eea12c72 | 5098 | |
fecf3b39 | 5099 | return retval; |
5100 | } | |
072e921b | 5101 | |
fecf3b39 | 5102 | /* Try to register an edge assertion for SSA name NAME on edge E for |
5103 | the condition COND contributing to the conditional jump pointed to by SI. | |
5104 | Return true if an assertion for NAME could be registered. */ | |
072e921b | 5105 | |
fecf3b39 | 5106 | static bool |
75a70cf9 | 5107 | register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, |
a00913c7 | 5108 | enum tree_code cond_code, tree cond_op0, |
5109 | tree cond_op1) | |
fecf3b39 | 5110 | { |
5111 | tree val; | |
5112 | enum tree_code comp_code; | |
5113 | bool retval = false; | |
5114 | bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; | |
5115 | ||
5116 | /* Do not attempt to infer anything in names that flow through | |
5117 | abnormal edges. */ | |
5118 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) | |
5119 | return false; | |
5120 | ||
a00913c7 | 5121 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
5122 | cond_op0, cond_op1, | |
5123 | is_else_edge, | |
5124 | &comp_code, &val)) | |
fecf3b39 | 5125 | return false; |
5126 | ||
bed8bec4 | 5127 | /* Register ASSERT_EXPRs for name. */ |
a00913c7 | 5128 | retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, |
5129 | cond_op1, is_else_edge); | |
bed8bec4 | 5130 | |
fecf3b39 | 5131 | |
5132 | /* If COND is effectively an equality test of an SSA_NAME against | |
5133 | the value zero or one, then we may be able to assert values | |
5134 | for SSA_NAMEs which flow into COND. */ | |
5135 | ||
cfd7906e | 5136 | /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining |
5137 | statement of NAME we can assert both operands of the BIT_AND_EXPR | |
fa7637bd | 5138 | have nonzero value. */ |
fecf3b39 | 5139 | if (((comp_code == EQ_EXPR && integer_onep (val)) |
5140 | || (comp_code == NE_EXPR && integer_zerop (val)))) | |
5141 | { | |
75a70cf9 | 5142 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
fecf3b39 | 5143 | |
75a70cf9 | 5144 | if (is_gimple_assign (def_stmt) |
cfd7906e | 5145 | && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) |
fecf3b39 | 5146 | { |
75a70cf9 | 5147 | tree op0 = gimple_assign_rhs1 (def_stmt); |
5148 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
fecf3b39 | 5149 | retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si); |
5150 | retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si); | |
eea12c72 | 5151 | } |
5152 | } | |
fecf3b39 | 5153 | |
cfd7906e | 5154 | /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining |
5155 | statement of NAME we can assert both operands of the BIT_IOR_EXPR | |
fecf3b39 | 5156 | have zero value. */ |
5157 | if (((comp_code == EQ_EXPR && integer_zerop (val)) | |
5158 | || (comp_code == NE_EXPR && integer_onep (val)))) | |
eea12c72 | 5159 | { |
75a70cf9 | 5160 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
fecf3b39 | 5161 | |
cfd7906e | 5162 | /* For BIT_IOR_EXPR only if NAME == 0 both operands have |
5163 | necessarily zero value, or if type-precision is one. */ | |
75a70cf9 | 5164 | if (is_gimple_assign (def_stmt) |
cfd7906e | 5165 | && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR |
5166 | && (TYPE_PRECISION (TREE_TYPE (name)) == 1 | |
5167 | || comp_code == EQ_EXPR))) | |
fecf3b39 | 5168 | { |
75a70cf9 | 5169 | tree op0 = gimple_assign_rhs1 (def_stmt); |
5170 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
fecf3b39 | 5171 | retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si); |
5172 | retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si); | |
5173 | } | |
eea12c72 | 5174 | } |
5175 | ||
fecf3b39 | 5176 | return retval; |
eea12c72 | 5177 | } |
5178 | ||
5179 | ||
eea12c72 | 5180 | /* Determine whether the outgoing edges of BB should receive an |
fecf3b39 | 5181 | ASSERT_EXPR for each of the operands of BB's LAST statement. |
f3d56fef | 5182 | The last statement of BB must be a COND_EXPR. |
eea12c72 | 5183 | |
5184 | If any of the sub-graphs rooted at BB have an interesting use of | |
5185 | the predicate operands, an assert location node is added to the | |
5186 | list of assertions for the corresponding operands. */ | |
5187 | ||
5188 | static bool | |
75a70cf9 | 5189 | find_conditional_asserts (basic_block bb, gimple last) |
eea12c72 | 5190 | { |
5191 | bool need_assert; | |
75a70cf9 | 5192 | gimple_stmt_iterator bsi; |
fecf3b39 | 5193 | tree op; |
eea12c72 | 5194 | edge_iterator ei; |
5195 | edge e; | |
5196 | ssa_op_iter iter; | |
5197 | ||
5198 | need_assert = false; | |
75a70cf9 | 5199 | bsi = gsi_for_stmt (last); |
eea12c72 | 5200 | |
5201 | /* Look for uses of the operands in each of the sub-graphs | |
5202 | rooted at BB. We need to check each of the outgoing edges | |
5203 | separately, so that we know what kind of ASSERT_EXPR to | |
5204 | insert. */ | |
5205 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5206 | { | |
5207 | if (e->dest == bb) | |
5208 | continue; | |
5209 | ||
eea12c72 | 5210 | /* Register the necessary assertions for each operand in the |
5211 | conditional predicate. */ | |
5212 | FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) | |
a00913c7 | 5213 | { |
75a70cf9 | 5214 | need_assert |= register_edge_assert_for (op, e, bsi, |
5215 | gimple_cond_code (last), | |
5216 | gimple_cond_lhs (last), | |
5217 | gimple_cond_rhs (last)); | |
a00913c7 | 5218 | } |
eea12c72 | 5219 | } |
5220 | ||
eea12c72 | 5221 | return need_assert; |
5222 | } | |
5223 | ||
8b318348 | 5224 | struct case_info |
5225 | { | |
5226 | tree expr; | |
5227 | basic_block bb; | |
5228 | }; | |
5229 | ||
5230 | /* Compare two case labels sorting first by the destination bb index | |
f3d56fef | 5231 | and then by the case value. */ |
5232 | ||
5233 | static int | |
5234 | compare_case_labels (const void *p1, const void *p2) | |
5235 | { | |
8b318348 | 5236 | const struct case_info *ci1 = (const struct case_info *) p1; |
5237 | const struct case_info *ci2 = (const struct case_info *) p2; | |
5238 | int idx1 = ci1->bb->index; | |
5239 | int idx2 = ci2->bb->index; | |
f3d56fef | 5240 | |
8b318348 | 5241 | if (idx1 < idx2) |
f3d56fef | 5242 | return -1; |
8b318348 | 5243 | else if (idx1 == idx2) |
f3d56fef | 5244 | { |
5245 | /* Make sure the default label is first in a group. */ | |
8b318348 | 5246 | if (!CASE_LOW (ci1->expr)) |
f3d56fef | 5247 | return -1; |
8b318348 | 5248 | else if (!CASE_LOW (ci2->expr)) |
f3d56fef | 5249 | return 1; |
5250 | else | |
8b318348 | 5251 | return tree_int_cst_compare (CASE_LOW (ci1->expr), |
5252 | CASE_LOW (ci2->expr)); | |
f3d56fef | 5253 | } |
5254 | else | |
5255 | return 1; | |
5256 | } | |
5257 | ||
5258 | /* Determine whether the outgoing edges of BB should receive an | |
5259 | ASSERT_EXPR for each of the operands of BB's LAST statement. | |
5260 | The last statement of BB must be a SWITCH_EXPR. | |
5261 | ||
5262 | If any of the sub-graphs rooted at BB have an interesting use of | |
5263 | the predicate operands, an assert location node is added to the | |
5264 | list of assertions for the corresponding operands. */ | |
5265 | ||
5266 | static bool | |
75a70cf9 | 5267 | find_switch_asserts (basic_block bb, gimple last) |
f3d56fef | 5268 | { |
5269 | bool need_assert; | |
75a70cf9 | 5270 | gimple_stmt_iterator bsi; |
a00913c7 | 5271 | tree op; |
f3d56fef | 5272 | edge e; |
8b318348 | 5273 | struct case_info *ci; |
5274 | size_t n = gimple_switch_num_labels (last); | |
1c7857f5 | 5275 | #if GCC_VERSION >= 4000 |
f3d56fef | 5276 | unsigned int idx; |
1c7857f5 | 5277 | #else |
5278 | /* Work around GCC 3.4 bug (PR 37086). */ | |
5279 | volatile unsigned int idx; | |
5280 | #endif | |
f3d56fef | 5281 | |
5282 | need_assert = false; | |
75a70cf9 | 5283 | bsi = gsi_for_stmt (last); |
5284 | op = gimple_switch_index (last); | |
f3d56fef | 5285 | if (TREE_CODE (op) != SSA_NAME) |
5286 | return false; | |
5287 | ||
5288 | /* Build a vector of case labels sorted by destination label. */ | |
8b318348 | 5289 | ci = XNEWVEC (struct case_info, n); |
f3d56fef | 5290 | for (idx = 0; idx < n; ++idx) |
8b318348 | 5291 | { |
5292 | ci[idx].expr = gimple_switch_label (last, idx); | |
5293 | ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); | |
5294 | } | |
5295 | qsort (ci, n, sizeof (struct case_info), compare_case_labels); | |
f3d56fef | 5296 | |
5297 | for (idx = 0; idx < n; ++idx) | |
5298 | { | |
5299 | tree min, max; | |
8b318348 | 5300 | tree cl = ci[idx].expr; |
5301 | basic_block cbb = ci[idx].bb; | |
f3d56fef | 5302 | |
5303 | min = CASE_LOW (cl); | |
5304 | max = CASE_HIGH (cl); | |
5305 | ||
5306 | /* If there are multiple case labels with the same destination | |
5307 | we need to combine them to a single value range for the edge. */ | |
8b318348 | 5308 | if (idx + 1 < n && cbb == ci[idx + 1].bb) |
f3d56fef | 5309 | { |
5310 | /* Skip labels until the last of the group. */ | |
5311 | do { | |
5312 | ++idx; | |
8b318348 | 5313 | } while (idx < n && cbb == ci[idx].bb); |
f3d56fef | 5314 | --idx; |
5315 | ||
5316 | /* Pick up the maximum of the case label range. */ | |
8b318348 | 5317 | if (CASE_HIGH (ci[idx].expr)) |
5318 | max = CASE_HIGH (ci[idx].expr); | |
f3d56fef | 5319 | else |
8b318348 | 5320 | max = CASE_LOW (ci[idx].expr); |
f3d56fef | 5321 | } |
5322 | ||
5323 | /* Nothing to do if the range includes the default label until we | |
5324 | can register anti-ranges. */ | |
5325 | if (min == NULL_TREE) | |
5326 | continue; | |
5327 | ||
5328 | /* Find the edge to register the assert expr on. */ | |
8b318348 | 5329 | e = find_edge (bb, cbb); |
f3d56fef | 5330 | |
f3d56fef | 5331 | /* Register the necessary assertions for the operand in the |
5332 | SWITCH_EXPR. */ | |
a00913c7 | 5333 | need_assert |= register_edge_assert_for (op, e, bsi, |
5334 | max ? GE_EXPR : EQ_EXPR, | |
5335 | op, | |
5336 | fold_convert (TREE_TYPE (op), | |
5337 | min)); | |
f3d56fef | 5338 | if (max) |
5339 | { | |
a00913c7 | 5340 | need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR, |
5341 | op, | |
5342 | fold_convert (TREE_TYPE (op), | |
5343 | max)); | |
f3d56fef | 5344 | } |
5345 | } | |
5346 | ||
8b318348 | 5347 | XDELETEVEC (ci); |
f3d56fef | 5348 | return need_assert; |
5349 | } | |
5350 | ||
eea12c72 | 5351 | |
5352 | /* Traverse all the statements in block BB looking for statements that | |
5353 | may generate useful assertions for the SSA names in their operand. | |
5354 | If a statement produces a useful assertion A for name N_i, then the | |
5355 | list of assertions already generated for N_i is scanned to | |
5356 | determine if A is actually needed. | |
48e1416a | 5357 | |
eea12c72 | 5358 | If N_i already had the assertion A at a location dominating the |
5359 | current location, then nothing needs to be done. Otherwise, the | |
5360 | new location for A is recorded instead. | |
5361 | ||
5362 | 1- For every statement S in BB, all the variables used by S are | |
5363 | added to bitmap FOUND_IN_SUBGRAPH. | |
5364 | ||
5365 | 2- If statement S uses an operand N in a way that exposes a known | |
5366 | value range for N, then if N was not already generated by an | |
5367 | ASSERT_EXPR, create a new assert location for N. For instance, | |
5368 | if N is a pointer and the statement dereferences it, we can | |
5369 | assume that N is not NULL. | |
5370 | ||
5371 | 3- COND_EXPRs are a special case of #2. We can derive range | |
5372 | information from the predicate but need to insert different | |
5373 | ASSERT_EXPRs for each of the sub-graphs rooted at the | |
5374 | conditional block. If the last statement of BB is a conditional | |
5375 | expression of the form 'X op Y', then | |
5376 | ||
5377 | a) Remove X and Y from the set FOUND_IN_SUBGRAPH. | |
5378 | ||
5379 | b) If the conditional is the only entry point to the sub-graph | |
5380 | corresponding to the THEN_CLAUSE, recurse into it. On | |
5381 | return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then | |
5382 | an ASSERT_EXPR is added for the corresponding variable. | |
5383 | ||
5384 | c) Repeat step (b) on the ELSE_CLAUSE. | |
5385 | ||
5386 | d) Mark X and Y in FOUND_IN_SUBGRAPH. | |
5387 | ||
5388 | For instance, | |
5389 | ||
5390 | if (a == 9) | |
5391 | b = a; | |
5392 | else | |
5393 | b = c + 1; | |
5394 | ||
5395 | In this case, an assertion on the THEN clause is useful to | |
5396 | determine that 'a' is always 9 on that edge. However, an assertion | |
5397 | on the ELSE clause would be unnecessary. | |
5398 | ||
5399 | 4- If BB does not end in a conditional expression, then we recurse | |
5400 | into BB's dominator children. | |
48e1416a | 5401 | |
eea12c72 | 5402 | At the end of the recursive traversal, every SSA name will have a |
5403 | list of locations where ASSERT_EXPRs should be added. When a new | |
5404 | location for name N is found, it is registered by calling | |
5405 | register_new_assert_for. That function keeps track of all the | |
5406 | registered assertions to prevent adding unnecessary assertions. | |
5407 | For instance, if a pointer P_4 is dereferenced more than once in a | |
5408 | dominator tree, only the location dominating all the dereference of | |
5409 | P_4 will receive an ASSERT_EXPR. | |
5410 | ||
5411 | If this function returns true, then it means that there are names | |
5412 | for which we need to generate ASSERT_EXPRs. Those assertions are | |
f3d56fef | 5413 | inserted by process_assert_insertions. */ |
eea12c72 | 5414 | |
5415 | static bool | |
17ed8337 | 5416 | find_assert_locations_1 (basic_block bb, sbitmap live) |
eea12c72 | 5417 | { |
75a70cf9 | 5418 | gimple_stmt_iterator si; |
5419 | gimple last; | |
5420 | gimple phi; | |
eea12c72 | 5421 | bool need_assert; |
eea12c72 | 5422 | |
5423 | need_assert = false; | |
17ed8337 | 5424 | last = last_stmt (bb); |
eea12c72 | 5425 | |
17ed8337 | 5426 | /* If BB's last statement is a conditional statement involving integer |
5427 | operands, determine if we need to add ASSERT_EXPRs. */ | |
5428 | if (last | |
5429 | && gimple_code (last) == GIMPLE_COND | |
5430 | && !fp_predicate (last) | |
5431 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
5432 | need_assert |= find_conditional_asserts (bb, last); | |
eea12c72 | 5433 | |
17ed8337 | 5434 | /* If BB's last statement is a switch statement involving integer |
5435 | operands, determine if we need to add ASSERT_EXPRs. */ | |
5436 | if (last | |
5437 | && gimple_code (last) == GIMPLE_SWITCH | |
5438 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
5439 | need_assert |= find_switch_asserts (bb, last); | |
eea12c72 | 5440 | |
5441 | /* Traverse all the statements in BB marking used names and looking | |
5442 | for statements that may infer assertions for their used operands. */ | |
75a70cf9 | 5443 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
eea12c72 | 5444 | { |
75a70cf9 | 5445 | gimple stmt; |
5446 | tree op; | |
eea12c72 | 5447 | ssa_op_iter i; |
5448 | ||
75a70cf9 | 5449 | stmt = gsi_stmt (si); |
eea12c72 | 5450 | |
9845d120 | 5451 | if (is_gimple_debug (stmt)) |
5452 | continue; | |
5453 | ||
eea12c72 | 5454 | /* See if we can derive an assertion for any of STMT's operands. */ |
5455 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
5456 | { | |
5457 | tree value; | |
5458 | enum tree_code comp_code; | |
5459 | ||
17ed8337 | 5460 | /* Mark OP in our live bitmap. */ |
5461 | SET_BIT (live, SSA_NAME_VERSION (op)); | |
eea12c72 | 5462 | |
eea12c72 | 5463 | /* If OP is used in such a way that we can infer a value |
5464 | range for it, and we don't find a previous assertion for | |
5465 | it, create a new assertion location node for OP. */ | |
5466 | if (infer_value_range (stmt, op, &comp_code, &value)) | |
5467 | { | |
9ca2c29a | 5468 | /* If we are able to infer a nonzero value range for OP, |
581f1885 | 5469 | then walk backwards through the use-def chain to see if OP |
5470 | was set via a typecast. | |
5471 | ||
5472 | If so, then we can also infer a nonzero value range | |
5473 | for the operand of the NOP_EXPR. */ | |
5474 | if (comp_code == NE_EXPR && integer_zerop (value)) | |
5475 | { | |
5476 | tree t = op; | |
75a70cf9 | 5477 | gimple def_stmt = SSA_NAME_DEF_STMT (t); |
48e1416a | 5478 | |
75a70cf9 | 5479 | while (is_gimple_assign (def_stmt) |
5480 | && gimple_assign_rhs_code (def_stmt) == NOP_EXPR | |
35cc02b5 | 5481 | && TREE_CODE |
75a70cf9 | 5482 | (gimple_assign_rhs1 (def_stmt)) == SSA_NAME |
35cc02b5 | 5483 | && POINTER_TYPE_P |
75a70cf9 | 5484 | (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) |
581f1885 | 5485 | { |
75a70cf9 | 5486 | t = gimple_assign_rhs1 (def_stmt); |
581f1885 | 5487 | def_stmt = SSA_NAME_DEF_STMT (t); |
5488 | ||
5489 | /* Note we want to register the assert for the | |
5490 | operand of the NOP_EXPR after SI, not after the | |
5491 | conversion. */ | |
5492 | if (! has_single_use (t)) | |
5493 | { | |
bed8bec4 | 5494 | register_new_assert_for (t, t, comp_code, value, |
581f1885 | 5495 | bb, NULL, si); |
5496 | need_assert = true; | |
5497 | } | |
5498 | } | |
5499 | } | |
5500 | ||
5501 | /* If OP is used only once, namely in this STMT, don't | |
5502 | bother creating an ASSERT_EXPR for it. Such an | |
5503 | ASSERT_EXPR would do nothing but increase compile time. */ | |
5504 | if (!has_single_use (op)) | |
5505 | { | |
bed8bec4 | 5506 | register_new_assert_for (op, op, comp_code, value, |
5507 | bb, NULL, si); | |
581f1885 | 5508 | need_assert = true; |
5509 | } | |
88dbf20f | 5510 | } |
5511 | } | |
88dbf20f | 5512 | } |
5513 | ||
17ed8337 | 5514 | /* Traverse all PHI nodes in BB marking used operands. */ |
5515 | for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si)) | |
5516 | { | |
5517 | use_operand_p arg_p; | |
5518 | ssa_op_iter i; | |
5519 | phi = gsi_stmt (si); | |
f3d56fef | 5520 | |
17ed8337 | 5521 | FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) |
5522 | { | |
5523 | tree arg = USE_FROM_PTR (arg_p); | |
5524 | if (TREE_CODE (arg) == SSA_NAME) | |
5525 | SET_BIT (live, SSA_NAME_VERSION (arg)); | |
5526 | } | |
5527 | } | |
eea12c72 | 5528 | |
5529 | return need_assert; | |
5530 | } | |
5531 | ||
17ed8337 | 5532 | /* Do an RPO walk over the function computing SSA name liveness |
5533 | on-the-fly and deciding on assert expressions to insert. | |
5534 | Returns true if there are assert expressions to be inserted. */ | |
5535 | ||
5536 | static bool | |
5537 | find_assert_locations (void) | |
5538 | { | |
ed7e2206 | 5539 | int *rpo = XNEWVEC (int, last_basic_block); |
5540 | int *bb_rpo = XNEWVEC (int, last_basic_block); | |
5541 | int *last_rpo = XCNEWVEC (int, last_basic_block); | |
17ed8337 | 5542 | int rpo_cnt, i; |
5543 | bool need_asserts; | |
5544 | ||
ed7e2206 | 5545 | live = XCNEWVEC (sbitmap, last_basic_block); |
17ed8337 | 5546 | rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); |
5547 | for (i = 0; i < rpo_cnt; ++i) | |
5548 | bb_rpo[rpo[i]] = i; | |
5549 | ||
5550 | need_asserts = false; | |
ed7e2206 | 5551 | for (i = rpo_cnt - 1; i >= 0; --i) |
17ed8337 | 5552 | { |
5553 | basic_block bb = BASIC_BLOCK (rpo[i]); | |
5554 | edge e; | |
5555 | edge_iterator ei; | |
5556 | ||
5557 | if (!live[rpo[i]]) | |
5558 | { | |
5559 | live[rpo[i]] = sbitmap_alloc (num_ssa_names); | |
5560 | sbitmap_zero (live[rpo[i]]); | |
5561 | } | |
5562 | ||
5563 | /* Process BB and update the live information with uses in | |
5564 | this block. */ | |
5565 | need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]); | |
5566 | ||
5567 | /* Merge liveness into the predecessor blocks and free it. */ | |
5568 | if (!sbitmap_empty_p (live[rpo[i]])) | |
5569 | { | |
5570 | int pred_rpo = i; | |
5571 | FOR_EACH_EDGE (e, ei, bb->preds) | |
5572 | { | |
5573 | int pred = e->src->index; | |
706567b8 | 5574 | if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK) |
17ed8337 | 5575 | continue; |
5576 | ||
5577 | if (!live[pred]) | |
5578 | { | |
5579 | live[pred] = sbitmap_alloc (num_ssa_names); | |
5580 | sbitmap_zero (live[pred]); | |
5581 | } | |
5582 | sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]); | |
5583 | ||
5584 | if (bb_rpo[pred] < pred_rpo) | |
5585 | pred_rpo = bb_rpo[pred]; | |
5586 | } | |
5587 | ||
5588 | /* Record the RPO number of the last visited block that needs | |
5589 | live information from this block. */ | |
5590 | last_rpo[rpo[i]] = pred_rpo; | |
5591 | } | |
5592 | else | |
5593 | { | |
5594 | sbitmap_free (live[rpo[i]]); | |
5595 | live[rpo[i]] = NULL; | |
5596 | } | |
5597 | ||
5598 | /* We can free all successors live bitmaps if all their | |
5599 | predecessors have been visited already. */ | |
5600 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5601 | if (last_rpo[e->dest->index] == i | |
5602 | && live[e->dest->index]) | |
5603 | { | |
5604 | sbitmap_free (live[e->dest->index]); | |
5605 | live[e->dest->index] = NULL; | |
5606 | } | |
5607 | } | |
5608 | ||
5609 | XDELETEVEC (rpo); | |
5610 | XDELETEVEC (bb_rpo); | |
5611 | XDELETEVEC (last_rpo); | |
ed7e2206 | 5612 | for (i = 0; i < last_basic_block; ++i) |
17ed8337 | 5613 | if (live[i]) |
5614 | sbitmap_free (live[i]); | |
5615 | XDELETEVEC (live); | |
5616 | ||
5617 | return need_asserts; | |
5618 | } | |
eea12c72 | 5619 | |
5620 | /* Create an ASSERT_EXPR for NAME and insert it in the location | |
5621 | indicated by LOC. Return true if we made any edge insertions. */ | |
5622 | ||
5623 | static bool | |
5624 | process_assert_insertions_for (tree name, assert_locus_t loc) | |
5625 | { | |
5626 | /* Build the comparison expression NAME_i COMP_CODE VAL. */ | |
75a70cf9 | 5627 | gimple stmt; |
5628 | tree cond; | |
5629 | gimple assert_stmt; | |
eea12c72 | 5630 | edge_iterator ei; |
5631 | edge e; | |
5632 | ||
4fac6da9 | 5633 | /* If we have X <=> X do not insert an assert expr for that. */ |
5634 | if (loc->expr == loc->val) | |
5635 | return false; | |
5636 | ||
bed8bec4 | 5637 | cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); |
75a70cf9 | 5638 | assert_stmt = build_assert_expr_for (cond, name); |
eea12c72 | 5639 | if (loc->e) |
88dbf20f | 5640 | { |
eea12c72 | 5641 | /* We have been asked to insert the assertion on an edge. This |
5642 | is used only by COND_EXPR and SWITCH_EXPR assertions. */ | |
1b4345f7 | 5643 | gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND |
5644 | || (gimple_code (gsi_stmt (loc->si)) | |
5645 | == GIMPLE_SWITCH)); | |
88dbf20f | 5646 | |
75a70cf9 | 5647 | gsi_insert_on_edge (loc->e, assert_stmt); |
eea12c72 | 5648 | return true; |
5649 | } | |
2b6719e9 | 5650 | |
eea12c72 | 5651 | /* Otherwise, we can insert right after LOC->SI iff the |
5652 | statement must not be the last statement in the block. */ | |
75a70cf9 | 5653 | stmt = gsi_stmt (loc->si); |
eea12c72 | 5654 | if (!stmt_ends_bb_p (stmt)) |
5655 | { | |
75a70cf9 | 5656 | gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); |
eea12c72 | 5657 | return false; |
5658 | } | |
2b6719e9 | 5659 | |
eea12c72 | 5660 | /* If STMT must be the last statement in BB, we can only insert new |
5661 | assertions on the non-abnormal edge out of BB. Note that since | |
5662 | STMT is not control flow, there may only be one non-abnormal edge | |
5663 | out of BB. */ | |
5664 | FOR_EACH_EDGE (e, ei, loc->bb->succs) | |
5665 | if (!(e->flags & EDGE_ABNORMAL)) | |
5666 | { | |
75a70cf9 | 5667 | gsi_insert_on_edge (e, assert_stmt); |
eea12c72 | 5668 | return true; |
5669 | } | |
88dbf20f | 5670 | |
eea12c72 | 5671 | gcc_unreachable (); |
5672 | } | |
88dbf20f | 5673 | |
88dbf20f | 5674 | |
eea12c72 | 5675 | /* Process all the insertions registered for every name N_i registered |
5676 | in NEED_ASSERT_FOR. The list of assertions to be inserted are | |
5677 | found in ASSERTS_FOR[i]. */ | |
88dbf20f | 5678 | |
eea12c72 | 5679 | static void |
5680 | process_assert_insertions (void) | |
5681 | { | |
5682 | unsigned i; | |
5683 | bitmap_iterator bi; | |
5684 | bool update_edges_p = false; | |
5685 | int num_asserts = 0; | |
88dbf20f | 5686 | |
eea12c72 | 5687 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5688 | dump_all_asserts (dump_file); | |
4efa33ae | 5689 | |
eea12c72 | 5690 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |
5691 | { | |
5692 | assert_locus_t loc = asserts_for[i]; | |
5693 | gcc_assert (loc); | |
5694 | ||
5695 | while (loc) | |
4efa33ae | 5696 | { |
eea12c72 | 5697 | assert_locus_t next = loc->next; |
5698 | update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); | |
5699 | free (loc); | |
5700 | loc = next; | |
5701 | num_asserts++; | |
4efa33ae | 5702 | } |
88dbf20f | 5703 | } |
88dbf20f | 5704 | |
eea12c72 | 5705 | if (update_edges_p) |
75a70cf9 | 5706 | gsi_commit_edge_inserts (); |
88dbf20f | 5707 | |
581f8050 | 5708 | statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", |
5709 | num_asserts); | |
88dbf20f | 5710 | } |
5711 | ||
5712 | ||
5713 | /* Traverse the flowgraph looking for conditional jumps to insert range | |
5714 | expressions. These range expressions are meant to provide information | |
5715 | to optimizations that need to reason in terms of value ranges. They | |
5716 | will not be expanded into RTL. For instance, given: | |
5717 | ||
5718 | x = ... | |
5719 | y = ... | |
5720 | if (x < y) | |
5721 | y = x - 2; | |
5722 | else | |
5723 | x = y + 3; | |
5724 | ||
5725 | this pass will transform the code into: | |
5726 | ||
5727 | x = ... | |
5728 | y = ... | |
5729 | if (x < y) | |
5730 | { | |
5731 | x = ASSERT_EXPR <x, x < y> | |
5732 | y = x - 2 | |
5733 | } | |
5734 | else | |
5735 | { | |
5736 | y = ASSERT_EXPR <y, x <= y> | |
5737 | x = y + 3 | |
5738 | } | |
5739 | ||
5740 | The idea is that once copy and constant propagation have run, other | |
5741 | optimizations will be able to determine what ranges of values can 'x' | |
5742 | take in different paths of the code, simply by checking the reaching | |
5743 | definition of 'x'. */ | |
5744 | ||
5745 | static void | |
5746 | insert_range_assertions (void) | |
5747 | { | |
eea12c72 | 5748 | need_assert_for = BITMAP_ALLOC (NULL); |
43959b95 | 5749 | asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); |
88dbf20f | 5750 | |
5751 | calculate_dominance_info (CDI_DOMINATORS); | |
5752 | ||
17ed8337 | 5753 | if (find_assert_locations ()) |
88dbf20f | 5754 | { |
eea12c72 | 5755 | process_assert_insertions (); |
88dbf20f | 5756 | update_ssa (TODO_update_ssa_no_phi); |
5757 | } | |
5758 | ||
5759 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5760 | { | |
5761 | fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); | |
5762 | dump_function_to_file (current_function_decl, dump_file, dump_flags); | |
5763 | } | |
5764 | ||
eea12c72 | 5765 | free (asserts_for); |
5766 | BITMAP_FREE (need_assert_for); | |
88dbf20f | 5767 | } |
5768 | ||
5bc96398 | 5769 | /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays |
5770 | and "struct" hacks. If VRP can determine that the | |
f2b32076 | 5771 | array subscript is a constant, check if it is outside valid |
5bc96398 | 5772 | range. If the array subscript is a RANGE, warn if it is |
5773 | non-overlapping with valid range. | |
5774 | IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ | |
5775 | ||
5776 | static void | |
e60a6f7b | 5777 | check_array_ref (location_t location, tree ref, bool ignore_off_by_one) |
5bc96398 | 5778 | { |
5779 | value_range_t* vr = NULL; | |
5780 | tree low_sub, up_sub; | |
c26ce8a9 | 5781 | tree low_bound, up_bound, up_bound_p1; |
5782 | tree base; | |
5783 | ||
5784 | if (TREE_NO_WARNING (ref)) | |
5785 | return; | |
5bc96398 | 5786 | |
5787 | low_sub = up_sub = TREE_OPERAND (ref, 1); | |
c26ce8a9 | 5788 | up_bound = array_ref_up_bound (ref); |
5bc96398 | 5789 | |
c26ce8a9 | 5790 | /* Can not check flexible arrays. */ |
5791 | if (!up_bound | |
5792 | || TREE_CODE (up_bound) != INTEGER_CST) | |
5bc96398 | 5793 | return; |
5794 | ||
c26ce8a9 | 5795 | /* Accesses to trailing arrays via pointers may access storage |
5796 | beyond the types array bounds. */ | |
5797 | base = get_base_address (ref); | |
182cf5a9 | 5798 | if (base && TREE_CODE (base) == MEM_REF) |
c26ce8a9 | 5799 | { |
5800 | tree cref, next = NULL_TREE; | |
5801 | ||
5802 | if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF) | |
5803 | return; | |
5804 | ||
5805 | cref = TREE_OPERAND (ref, 0); | |
5806 | if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE) | |
1767a056 | 5807 | for (next = DECL_CHAIN (TREE_OPERAND (cref, 1)); |
c26ce8a9 | 5808 | next && TREE_CODE (next) != FIELD_DECL; |
1767a056 | 5809 | next = DECL_CHAIN (next)) |
c26ce8a9 | 5810 | ; |
5811 | ||
5812 | /* If this is the last field in a struct type or a field in a | |
5813 | union type do not warn. */ | |
5814 | if (!next) | |
5815 | return; | |
5816 | } | |
5817 | ||
5bc96398 | 5818 | low_bound = array_ref_low_bound (ref); |
317e2a67 | 5819 | up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node); |
5bc96398 | 5820 | |
5821 | if (TREE_CODE (low_sub) == SSA_NAME) | |
5822 | { | |
5823 | vr = get_value_range (low_sub); | |
5824 | if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
5825 | { | |
5826 | low_sub = vr->type == VR_RANGE ? vr->max : vr->min; | |
5827 | up_sub = vr->type == VR_RANGE ? vr->min : vr->max; | |
5828 | } | |
5829 | } | |
5830 | ||
5831 | if (vr && vr->type == VR_ANTI_RANGE) | |
5832 | { | |
5833 | if (TREE_CODE (up_sub) == INTEGER_CST | |
5834 | && tree_int_cst_lt (up_bound, up_sub) | |
5835 | && TREE_CODE (low_sub) == INTEGER_CST | |
5836 | && tree_int_cst_lt (low_sub, low_bound)) | |
5837 | { | |
496ffe87 | 5838 | warning_at (location, OPT_Warray_bounds, |
5839 | "array subscript is outside array bounds"); | |
5bc96398 | 5840 | TREE_NO_WARNING (ref) = 1; |
5841 | } | |
5842 | } | |
5843 | else if (TREE_CODE (up_sub) == INTEGER_CST | |
c26ce8a9 | 5844 | && (ignore_off_by_one |
5845 | ? (tree_int_cst_lt (up_bound, up_sub) | |
5846 | && !tree_int_cst_equal (up_bound_p1, up_sub)) | |
5847 | : (tree_int_cst_lt (up_bound, up_sub) | |
5848 | || tree_int_cst_equal (up_bound_p1, up_sub)))) | |
5bc96398 | 5849 | { |
496ffe87 | 5850 | warning_at (location, OPT_Warray_bounds, |
5851 | "array subscript is above array bounds"); | |
5bc96398 | 5852 | TREE_NO_WARNING (ref) = 1; |
5853 | } | |
5854 | else if (TREE_CODE (low_sub) == INTEGER_CST | |
5855 | && tree_int_cst_lt (low_sub, low_bound)) | |
5856 | { | |
496ffe87 | 5857 | warning_at (location, OPT_Warray_bounds, |
5858 | "array subscript is below array bounds"); | |
5bc96398 | 5859 | TREE_NO_WARNING (ref) = 1; |
5860 | } | |
5861 | } | |
5862 | ||
7988a017 | 5863 | /* Searches if the expr T, located at LOCATION computes |
5864 | address of an ARRAY_REF, and call check_array_ref on it. */ | |
5865 | ||
5866 | static void | |
496ffe87 | 5867 | search_for_addr_array (tree t, location_t location) |
7988a017 | 5868 | { |
5869 | while (TREE_CODE (t) == SSA_NAME) | |
5870 | { | |
75a70cf9 | 5871 | gimple g = SSA_NAME_DEF_STMT (t); |
5872 | ||
5873 | if (gimple_code (g) != GIMPLE_ASSIGN) | |
7988a017 | 5874 | return; |
75a70cf9 | 5875 | |
48e1416a | 5876 | if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) |
8fa85fc5 | 5877 | != GIMPLE_SINGLE_RHS) |
75a70cf9 | 5878 | return; |
5879 | ||
5880 | t = gimple_assign_rhs1 (g); | |
7988a017 | 5881 | } |
5882 | ||
5883 | ||
5884 | /* We are only interested in addresses of ARRAY_REF's. */ | |
48e1416a | 5885 | if (TREE_CODE (t) != ADDR_EXPR) |
7988a017 | 5886 | return; |
5887 | ||
5888 | /* Check each ARRAY_REFs in the reference chain. */ | |
48e1416a | 5889 | do |
7988a017 | 5890 | { |
5891 | if (TREE_CODE (t) == ARRAY_REF) | |
e60a6f7b | 5892 | check_array_ref (location, t, true /*ignore_off_by_one*/); |
7988a017 | 5893 | |
8fa85fc5 | 5894 | t = TREE_OPERAND (t, 0); |
7988a017 | 5895 | } |
5896 | while (handled_component_p (t)); | |
182cf5a9 | 5897 | |
5898 | if (TREE_CODE (t) == MEM_REF | |
5899 | && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR | |
5900 | && !TREE_NO_WARNING (t)) | |
5901 | { | |
5902 | tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); | |
5903 | tree low_bound, up_bound, el_sz; | |
5904 | double_int idx; | |
5905 | if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE | |
5906 | || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE | |
5907 | || !TYPE_DOMAIN (TREE_TYPE (tem))) | |
5908 | return; | |
5909 | ||
5910 | low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | |
5911 | up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | |
5912 | el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); | |
5913 | if (!low_bound | |
5914 | || TREE_CODE (low_bound) != INTEGER_CST | |
5915 | || !up_bound | |
5916 | || TREE_CODE (up_bound) != INTEGER_CST | |
5917 | || !el_sz | |
5918 | || TREE_CODE (el_sz) != INTEGER_CST) | |
5919 | return; | |
5920 | ||
5921 | idx = mem_ref_offset (t); | |
cf8f0e63 | 5922 | idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR); |
5923 | if (idx.slt (double_int_zero)) | |
182cf5a9 | 5924 | { |
5925 | warning_at (location, OPT_Warray_bounds, | |
5926 | "array subscript is below array bounds"); | |
5927 | TREE_NO_WARNING (t) = 1; | |
5928 | } | |
cf8f0e63 | 5929 | else if (idx.sgt (tree_to_double_int (up_bound) |
5930 | - tree_to_double_int (low_bound) | |
5931 | + double_int_one)) | |
182cf5a9 | 5932 | { |
5933 | warning_at (location, OPT_Warray_bounds, | |
5934 | "array subscript is above array bounds"); | |
5935 | TREE_NO_WARNING (t) = 1; | |
5936 | } | |
5937 | } | |
7988a017 | 5938 | } |
5939 | ||
5bc96398 | 5940 | /* walk_tree() callback that checks if *TP is |
5941 | an ARRAY_REF inside an ADDR_EXPR (in which an array | |
5942 | subscript one outside the valid range is allowed). Call | |
48e1416a | 5943 | check_array_ref for each ARRAY_REF found. The location is |
5bc96398 | 5944 | passed in DATA. */ |
5945 | ||
5946 | static tree | |
5947 | check_array_bounds (tree *tp, int *walk_subtree, void *data) | |
5948 | { | |
5949 | tree t = *tp; | |
75a70cf9 | 5950 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; |
e60a6f7b | 5951 | location_t location; |
5952 | ||
5953 | if (EXPR_HAS_LOCATION (t)) | |
5954 | location = EXPR_LOCATION (t); | |
5955 | else | |
5956 | { | |
5957 | location_t *locp = (location_t *) wi->info; | |
5958 | location = *locp; | |
5959 | } | |
5d19e882 | 5960 | |
5bc96398 | 5961 | *walk_subtree = TRUE; |
5962 | ||
5963 | if (TREE_CODE (t) == ARRAY_REF) | |
e60a6f7b | 5964 | check_array_ref (location, t, false /*ignore_off_by_one*/); |
533a9fbc | 5965 | |
182cf5a9 | 5966 | if (TREE_CODE (t) == MEM_REF |
7988a017 | 5967 | || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) |
e60a6f7b | 5968 | search_for_addr_array (TREE_OPERAND (t, 0), location); |
5bc96398 | 5969 | |
7988a017 | 5970 | if (TREE_CODE (t) == ADDR_EXPR) |
5971 | *walk_subtree = FALSE; | |
5972 | ||
5bc96398 | 5973 | return NULL_TREE; |
5974 | } | |
5975 | ||
5976 | /* Walk over all statements of all reachable BBs and call check_array_bounds | |
5977 | on them. */ | |
5978 | ||
5979 | static void | |
5980 | check_all_array_refs (void) | |
5981 | { | |
5982 | basic_block bb; | |
75a70cf9 | 5983 | gimple_stmt_iterator si; |
5bc96398 | 5984 | |
5985 | FOR_EACH_BB (bb) | |
5986 | { | |
8b938617 | 5987 | edge_iterator ei; |
5988 | edge e; | |
5989 | bool executable = false; | |
496ffe87 | 5990 | |
8b938617 | 5991 | /* Skip blocks that were found to be unreachable. */ |
5992 | FOR_EACH_EDGE (e, ei, bb->preds) | |
5993 | executable |= !!(e->flags & EDGE_EXECUTABLE); | |
5994 | if (!executable) | |
5995 | continue; | |
5bc96398 | 5996 | |
75a70cf9 | 5997 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
5998 | { | |
5999 | gimple stmt = gsi_stmt (si); | |
75a70cf9 | 6000 | struct walk_stmt_info wi; |
6001 | if (!gimple_has_location (stmt)) | |
6002 | continue; | |
6003 | ||
6004 | if (is_gimple_call (stmt)) | |
6005 | { | |
6006 | size_t i; | |
6007 | size_t n = gimple_call_num_args (stmt); | |
6008 | for (i = 0; i < n; i++) | |
6009 | { | |
6010 | tree arg = gimple_call_arg (stmt, i); | |
496ffe87 | 6011 | search_for_addr_array (arg, gimple_location (stmt)); |
75a70cf9 | 6012 | } |
6013 | } | |
6014 | else | |
6015 | { | |
6016 | memset (&wi, 0, sizeof (wi)); | |
496ffe87 | 6017 | wi.info = CONST_CAST (void *, (const void *) |
6018 | gimple_location_ptr (stmt)); | |
75a70cf9 | 6019 | |
6020 | walk_gimple_op (gsi_stmt (si), | |
6021 | check_array_bounds, | |
6022 | &wi); | |
6023 | } | |
6024 | } | |
5bc96398 | 6025 | } |
6026 | } | |
88dbf20f | 6027 | |
4dd9ed13 | 6028 | /* Convert range assertion expressions into the implied copies and |
6029 | copy propagate away the copies. Doing the trivial copy propagation | |
6030 | here avoids the need to run the full copy propagation pass after | |
48e1416a | 6031 | VRP. |
6032 | ||
eea12c72 | 6033 | FIXME, this will eventually lead to copy propagation removing the |
6034 | names that had useful range information attached to them. For | |
6035 | instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, | |
6036 | then N_i will have the range [3, +INF]. | |
48e1416a | 6037 | |
eea12c72 | 6038 | However, by converting the assertion into the implied copy |
6039 | operation N_i = N_j, we will then copy-propagate N_j into the uses | |
6040 | of N_i and lose the range information. We may want to hold on to | |
6041 | ASSERT_EXPRs a little while longer as the ranges could be used in | |
6042 | things like jump threading. | |
48e1416a | 6043 | |
eea12c72 | 6044 | The problem with keeping ASSERT_EXPRs around is that passes after |
48e1416a | 6045 | VRP need to handle them appropriately. |
4dd9ed13 | 6046 | |
6047 | Another approach would be to make the range information a first | |
6048 | class property of the SSA_NAME so that it can be queried from | |
6049 | any pass. This is made somewhat more complex by the need for | |
6050 | multiple ranges to be associated with one SSA_NAME. */ | |
88dbf20f | 6051 | |
6052 | static void | |
6053 | remove_range_assertions (void) | |
6054 | { | |
6055 | basic_block bb; | |
75a70cf9 | 6056 | gimple_stmt_iterator si; |
88dbf20f | 6057 | |
4dd9ed13 | 6058 | /* Note that the BSI iterator bump happens at the bottom of the |
6059 | loop and no bump is necessary if we're removing the statement | |
6060 | referenced by the current BSI. */ | |
88dbf20f | 6061 | FOR_EACH_BB (bb) |
75a70cf9 | 6062 | for (si = gsi_start_bb (bb); !gsi_end_p (si);) |
88dbf20f | 6063 | { |
75a70cf9 | 6064 | gimple stmt = gsi_stmt (si); |
6065 | gimple use_stmt; | |
88dbf20f | 6066 | |
75a70cf9 | 6067 | if (is_gimple_assign (stmt) |
6068 | && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) | |
88dbf20f | 6069 | { |
75a70cf9 | 6070 | tree rhs = gimple_assign_rhs1 (stmt); |
6071 | tree var; | |
88dbf20f | 6072 | tree cond = fold (ASSERT_EXPR_COND (rhs)); |
4dd9ed13 | 6073 | use_operand_p use_p; |
6074 | imm_use_iterator iter; | |
6075 | ||
88dbf20f | 6076 | gcc_assert (cond != boolean_false_node); |
4dd9ed13 | 6077 | |
63945aec | 6078 | /* Propagate the RHS into every use of the LHS. */ |
6079 | var = ASSERT_EXPR_VAR (rhs); | |
35cc02b5 | 6080 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, |
75a70cf9 | 6081 | gimple_assign_lhs (stmt)) |
09aca5bc | 6082 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
6083 | { | |
6084 | SET_USE (use_p, var); | |
6085 | gcc_assert (TREE_CODE (var) == SSA_NAME); | |
6086 | } | |
4dd9ed13 | 6087 | |
6088 | /* And finally, remove the copy, it is not needed. */ | |
75a70cf9 | 6089 | gsi_remove (&si, true); |
48e1416a | 6090 | release_defs (stmt); |
88dbf20f | 6091 | } |
4dd9ed13 | 6092 | else |
75a70cf9 | 6093 | gsi_next (&si); |
88dbf20f | 6094 | } |
6095 | } | |
6096 | ||
6097 | ||
6098 | /* Return true if STMT is interesting for VRP. */ | |
6099 | ||
6100 | static bool | |
75a70cf9 | 6101 | stmt_interesting_for_vrp (gimple stmt) |
88dbf20f | 6102 | { |
7c782c9b | 6103 | if (gimple_code (stmt) == GIMPLE_PHI) |
6104 | { | |
6105 | tree res = gimple_phi_result (stmt); | |
6106 | return (!virtual_operand_p (res) | |
6107 | && (INTEGRAL_TYPE_P (TREE_TYPE (res)) | |
6108 | || POINTER_TYPE_P (TREE_TYPE (res)))); | |
6109 | } | |
75a70cf9 | 6110 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) |
88dbf20f | 6111 | { |
75a70cf9 | 6112 | tree lhs = gimple_get_lhs (stmt); |
88dbf20f | 6113 | |
04dde933 | 6114 | /* In general, assignments with virtual operands are not useful |
6115 | for deriving ranges, with the obvious exception of calls to | |
6116 | builtin functions. */ | |
75a70cf9 | 6117 | if (lhs && TREE_CODE (lhs) == SSA_NAME |
88dbf20f | 6118 | && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
6119 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
75a70cf9 | 6120 | && ((is_gimple_call (stmt) |
6121 | && gimple_call_fndecl (stmt) != NULL_TREE | |
446dbdd8 | 6122 | && DECL_BUILT_IN (gimple_call_fndecl (stmt))) |
dd277d48 | 6123 | || !gimple_vuse (stmt))) |
88dbf20f | 6124 | return true; |
6125 | } | |
75a70cf9 | 6126 | else if (gimple_code (stmt) == GIMPLE_COND |
6127 | || gimple_code (stmt) == GIMPLE_SWITCH) | |
88dbf20f | 6128 | return true; |
6129 | ||
6130 | return false; | |
6131 | } | |
6132 | ||
6133 | ||
011528fc | 6134 | /* Initialize local data structures for VRP. */ |
88dbf20f | 6135 | |
eea12c72 | 6136 | static void |
88dbf20f | 6137 | vrp_initialize (void) |
6138 | { | |
6139 | basic_block bb; | |
88dbf20f | 6140 | |
e0186710 | 6141 | values_propagated = false; |
6142 | num_vr_values = num_ssa_names; | |
6143 | vr_value = XCNEWVEC (value_range_t *, num_vr_values); | |
5c7155ca | 6144 | vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); |
88dbf20f | 6145 | |
6146 | FOR_EACH_BB (bb) | |
6147 | { | |
75a70cf9 | 6148 | gimple_stmt_iterator si; |
88dbf20f | 6149 | |
75a70cf9 | 6150 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
88dbf20f | 6151 | { |
75a70cf9 | 6152 | gimple phi = gsi_stmt (si); |
88dbf20f | 6153 | if (!stmt_interesting_for_vrp (phi)) |
6154 | { | |
6155 | tree lhs = PHI_RESULT (phi); | |
e7d43f99 | 6156 | set_value_range_to_varying (get_value_range (lhs)); |
75a70cf9 | 6157 | prop_set_simulate_again (phi, false); |
88dbf20f | 6158 | } |
6159 | else | |
75a70cf9 | 6160 | prop_set_simulate_again (phi, true); |
88dbf20f | 6161 | } |
6162 | ||
75a70cf9 | 6163 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
88dbf20f | 6164 | { |
75a70cf9 | 6165 | gimple stmt = gsi_stmt (si); |
88dbf20f | 6166 | |
2193544e | 6167 | /* If the statement is a control insn, then we do not |
6168 | want to avoid simulating the statement once. Failure | |
6169 | to do so means that those edges will never get added. */ | |
6170 | if (stmt_ends_bb_p (stmt)) | |
6171 | prop_set_simulate_again (stmt, true); | |
6172 | else if (!stmt_interesting_for_vrp (stmt)) | |
88dbf20f | 6173 | { |
6174 | ssa_op_iter i; | |
6175 | tree def; | |
6176 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) | |
e7d43f99 | 6177 | set_value_range_to_varying (get_value_range (def)); |
75a70cf9 | 6178 | prop_set_simulate_again (stmt, false); |
88dbf20f | 6179 | } |
6180 | else | |
2193544e | 6181 | prop_set_simulate_again (stmt, true); |
88dbf20f | 6182 | } |
6183 | } | |
88dbf20f | 6184 | } |
6185 | ||
1d0b727d | 6186 | /* Return the singleton value-range for NAME or NAME. */ |
6187 | ||
6188 | static inline tree | |
6189 | vrp_valueize (tree name) | |
6190 | { | |
6191 | if (TREE_CODE (name) == SSA_NAME) | |
6192 | { | |
6193 | value_range_t *vr = get_value_range (name); | |
6194 | if (vr->type == VR_RANGE | |
6195 | && (vr->min == vr->max | |
6196 | || operand_equal_p (vr->min, vr->max, 0))) | |
6197 | return vr->min; | |
6198 | } | |
6199 | return name; | |
6200 | } | |
88dbf20f | 6201 | |
6202 | /* Visit assignment STMT. If it produces an interesting range, record | |
6203 | the SSA name in *OUTPUT_P. */ | |
6204 | ||
6205 | static enum ssa_prop_result | |
75a70cf9 | 6206 | vrp_visit_assignment_or_call (gimple stmt, tree *output_p) |
88dbf20f | 6207 | { |
75a70cf9 | 6208 | tree def, lhs; |
88dbf20f | 6209 | ssa_op_iter iter; |
75a70cf9 | 6210 | enum gimple_code code = gimple_code (stmt); |
6211 | lhs = gimple_get_lhs (stmt); | |
88dbf20f | 6212 | |
6213 | /* We only keep track of ranges in integral and pointer types. */ | |
6214 | if (TREE_CODE (lhs) == SSA_NAME | |
f064ebd1 | 6215 | && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
6216 | /* It is valid to have NULL MIN/MAX values on a type. See | |
6217 | build_range_type. */ | |
6218 | && TYPE_MIN_VALUE (TREE_TYPE (lhs)) | |
6219 | && TYPE_MAX_VALUE (TREE_TYPE (lhs))) | |
88dbf20f | 6220 | || POINTER_TYPE_P (TREE_TYPE (lhs)))) |
6221 | { | |
748eb1f9 | 6222 | value_range_t new_vr = VR_INITIALIZER; |
eea12c72 | 6223 | |
1d0b727d | 6224 | /* Try folding the statement to a constant first. */ |
6225 | tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize); | |
6226 | if (tem && !is_overflow_infinity (tem)) | |
6227 | set_value_range (&new_vr, VR_RANGE, tem, tem, NULL); | |
6228 | /* Then dispatch to value-range extracting functions. */ | |
6229 | else if (code == GIMPLE_CALL) | |
75a70cf9 | 6230 | extract_range_basic (&new_vr, stmt); |
6231 | else | |
6232 | extract_range_from_assignment (&new_vr, stmt); | |
88dbf20f | 6233 | |
eea12c72 | 6234 | if (update_value_range (lhs, &new_vr)) |
88dbf20f | 6235 | { |
6236 | *output_p = lhs; | |
6237 | ||
6238 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6239 | { | |
eea12c72 | 6240 | fprintf (dump_file, "Found new range for "); |
88dbf20f | 6241 | print_generic_expr (dump_file, lhs, 0); |
eea12c72 | 6242 | fprintf (dump_file, ": "); |
6243 | dump_value_range (dump_file, &new_vr); | |
88dbf20f | 6244 | fprintf (dump_file, "\n\n"); |
6245 | } | |
6246 | ||
6247 | if (new_vr.type == VR_VARYING) | |
6248 | return SSA_PROP_VARYING; | |
6249 | ||
6250 | return SSA_PROP_INTERESTING; | |
6251 | } | |
6252 | ||
6253 | return SSA_PROP_NOT_INTERESTING; | |
6254 | } | |
48e1416a | 6255 | |
eea12c72 | 6256 | /* Every other statement produces no useful ranges. */ |
88dbf20f | 6257 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) |
e7d43f99 | 6258 | set_value_range_to_varying (get_value_range (def)); |
88dbf20f | 6259 | |
6260 | return SSA_PROP_VARYING; | |
6261 | } | |
6262 | ||
fbcece5e | 6263 | /* Helper that gets the value range of the SSA_NAME with version I |
310d2511 | 6264 | or a symbolic range containing the SSA_NAME only if the value range |
fbcece5e | 6265 | is varying or undefined. */ |
6266 | ||
6267 | static inline value_range_t | |
6268 | get_vr_for_comparison (int i) | |
6269 | { | |
e0186710 | 6270 | value_range_t vr = *get_value_range (ssa_name (i)); |
fbcece5e | 6271 | |
6272 | /* If name N_i does not have a valid range, use N_i as its own | |
6273 | range. This allows us to compare against names that may | |
6274 | have N_i in their ranges. */ | |
6275 | if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) | |
6276 | { | |
6277 | vr.type = VR_RANGE; | |
6278 | vr.min = ssa_name (i); | |
6279 | vr.max = ssa_name (i); | |
6280 | } | |
6281 | ||
6282 | return vr; | |
6283 | } | |
88dbf20f | 6284 | |
eea12c72 | 6285 | /* Compare all the value ranges for names equivalent to VAR with VAL |
6286 | using comparison code COMP. Return the same value returned by | |
c3783c3b | 6287 | compare_range_with_value, including the setting of |
6288 | *STRICT_OVERFLOW_P. */ | |
eea12c72 | 6289 | |
6290 | static tree | |
c3783c3b | 6291 | compare_name_with_value (enum tree_code comp, tree var, tree val, |
6292 | bool *strict_overflow_p) | |
eea12c72 | 6293 | { |
6294 | bitmap_iterator bi; | |
6295 | unsigned i; | |
6296 | bitmap e; | |
6297 | tree retval, t; | |
c3783c3b | 6298 | int used_strict_overflow; |
fbcece5e | 6299 | bool sop; |
6300 | value_range_t equiv_vr; | |
eea12c72 | 6301 | |
6302 | /* Get the set of equivalences for VAR. */ | |
6303 | e = get_value_range (var)->equiv; | |
6304 | ||
c3783c3b | 6305 | /* Start at -1. Set it to 0 if we do a comparison without relying |
6306 | on overflow, or 1 if all comparisons rely on overflow. */ | |
6307 | used_strict_overflow = -1; | |
6308 | ||
fbcece5e | 6309 | /* Compare vars' value range with val. */ |
6310 | equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); | |
6311 | sop = false; | |
6312 | retval = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
7192d2a6 | 6313 | if (retval) |
6314 | used_strict_overflow = sop ? 1 : 0; | |
eea12c72 | 6315 | |
fbcece5e | 6316 | /* If the equiv set is empty we have done all work we need to do. */ |
6317 | if (e == NULL) | |
6318 | { | |
6319 | if (retval | |
6320 | && used_strict_overflow > 0) | |
6321 | *strict_overflow_p = true; | |
6322 | return retval; | |
6323 | } | |
eea12c72 | 6324 | |
fbcece5e | 6325 | EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) |
6326 | { | |
6327 | equiv_vr = get_vr_for_comparison (i); | |
c3783c3b | 6328 | sop = false; |
6329 | t = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
eea12c72 | 6330 | if (t) |
6331 | { | |
79f0a894 | 6332 | /* If we get different answers from different members |
6333 | of the equivalence set this check must be in a dead | |
6334 | code region. Folding it to a trap representation | |
6335 | would be correct here. For now just return don't-know. */ | |
6336 | if (retval != NULL | |
6337 | && t != retval) | |
6338 | { | |
6339 | retval = NULL_TREE; | |
6340 | break; | |
6341 | } | |
eea12c72 | 6342 | retval = t; |
c3783c3b | 6343 | |
6344 | if (!sop) | |
6345 | used_strict_overflow = 0; | |
6346 | else if (used_strict_overflow < 0) | |
6347 | used_strict_overflow = 1; | |
eea12c72 | 6348 | } |
6349 | } | |
6350 | ||
fbcece5e | 6351 | if (retval |
6352 | && used_strict_overflow > 0) | |
6353 | *strict_overflow_p = true; | |
eea12c72 | 6354 | |
fbcece5e | 6355 | return retval; |
eea12c72 | 6356 | } |
6357 | ||
6358 | ||
6359 | /* Given a comparison code COMP and names N1 and N2, compare all the | |
9aff9709 | 6360 | ranges equivalent to N1 against all the ranges equivalent to N2 |
eea12c72 | 6361 | to determine the value of N1 COMP N2. Return the same value |
c3783c3b | 6362 | returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate |
6363 | whether we relied on an overflow infinity in the comparison. */ | |
6364 | ||
88dbf20f | 6365 | |
6366 | static tree | |
c3783c3b | 6367 | compare_names (enum tree_code comp, tree n1, tree n2, |
6368 | bool *strict_overflow_p) | |
eea12c72 | 6369 | { |
6370 | tree t, retval; | |
6371 | bitmap e1, e2; | |
6372 | bitmap_iterator bi1, bi2; | |
6373 | unsigned i1, i2; | |
c3783c3b | 6374 | int used_strict_overflow; |
fbcece5e | 6375 | static bitmap_obstack *s_obstack = NULL; |
6376 | static bitmap s_e1 = NULL, s_e2 = NULL; | |
eea12c72 | 6377 | |
6378 | /* Compare the ranges of every name equivalent to N1 against the | |
6379 | ranges of every name equivalent to N2. */ | |
6380 | e1 = get_value_range (n1)->equiv; | |
6381 | e2 = get_value_range (n2)->equiv; | |
6382 | ||
fbcece5e | 6383 | /* Use the fake bitmaps if e1 or e2 are not available. */ |
6384 | if (s_obstack == NULL) | |
6385 | { | |
6386 | s_obstack = XNEW (bitmap_obstack); | |
6387 | bitmap_obstack_initialize (s_obstack); | |
6388 | s_e1 = BITMAP_ALLOC (s_obstack); | |
6389 | s_e2 = BITMAP_ALLOC (s_obstack); | |
6390 | } | |
6391 | if (e1 == NULL) | |
6392 | e1 = s_e1; | |
6393 | if (e2 == NULL) | |
6394 | e2 = s_e2; | |
6395 | ||
eea12c72 | 6396 | /* Add N1 and N2 to their own set of equivalences to avoid |
6397 | duplicating the body of the loop just to check N1 and N2 | |
6398 | ranges. */ | |
6399 | bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); | |
6400 | bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); | |
6401 | ||
6402 | /* If the equivalence sets have a common intersection, then the two | |
6403 | names can be compared without checking their ranges. */ | |
6404 | if (bitmap_intersect_p (e1, e2)) | |
6405 | { | |
6406 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
6407 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
6408 | ||
6409 | return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) | |
6410 | ? boolean_true_node | |
6411 | : boolean_false_node; | |
6412 | } | |
6413 | ||
c3783c3b | 6414 | /* Start at -1. Set it to 0 if we do a comparison without relying |
6415 | on overflow, or 1 if all comparisons rely on overflow. */ | |
6416 | used_strict_overflow = -1; | |
6417 | ||
eea12c72 | 6418 | /* Otherwise, compare all the equivalent ranges. First, add N1 and |
6419 | N2 to their own set of equivalences to avoid duplicating the body | |
6420 | of the loop just to check N1 and N2 ranges. */ | |
6421 | EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) | |
6422 | { | |
fbcece5e | 6423 | value_range_t vr1 = get_vr_for_comparison (i1); |
eea12c72 | 6424 | |
6425 | t = retval = NULL_TREE; | |
6426 | EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) | |
6427 | { | |
29188799 | 6428 | bool sop = false; |
c3783c3b | 6429 | |
fbcece5e | 6430 | value_range_t vr2 = get_vr_for_comparison (i2); |
eea12c72 | 6431 | |
c3783c3b | 6432 | t = compare_ranges (comp, &vr1, &vr2, &sop); |
eea12c72 | 6433 | if (t) |
6434 | { | |
79f0a894 | 6435 | /* If we get different answers from different members |
6436 | of the equivalence set this check must be in a dead | |
6437 | code region. Folding it to a trap representation | |
6438 | would be correct here. For now just return don't-know. */ | |
6439 | if (retval != NULL | |
6440 | && t != retval) | |
6441 | { | |
6442 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
6443 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
6444 | return NULL_TREE; | |
6445 | } | |
eea12c72 | 6446 | retval = t; |
c3783c3b | 6447 | |
6448 | if (!sop) | |
6449 | used_strict_overflow = 0; | |
6450 | else if (used_strict_overflow < 0) | |
6451 | used_strict_overflow = 1; | |
eea12c72 | 6452 | } |
6453 | } | |
6454 | ||
6455 | if (retval) | |
6456 | { | |
6457 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
6458 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
c3783c3b | 6459 | if (used_strict_overflow > 0) |
6460 | *strict_overflow_p = true; | |
eea12c72 | 6461 | return retval; |
6462 | } | |
6463 | } | |
6464 | ||
6465 | /* None of the equivalent ranges are useful in computing this | |
6466 | comparison. */ | |
6467 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
6468 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
6469 | return NULL_TREE; | |
6470 | } | |
6471 | ||
e0ad89bd | 6472 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
6473 | ||
6474 | static tree | |
6475 | vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, | |
6476 | tree op0, tree op1, | |
6477 | bool * strict_overflow_p) | |
6478 | { | |
6479 | value_range_t *vr0, *vr1; | |
6480 | ||
6481 | vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; | |
6482 | vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; | |
6483 | ||
6484 | if (vr0 && vr1) | |
6485 | return compare_ranges (code, vr0, vr1, strict_overflow_p); | |
6486 | else if (vr0 && vr1 == NULL) | |
6487 | return compare_range_with_value (code, vr0, op1, strict_overflow_p); | |
6488 | else if (vr0 == NULL && vr1) | |
6489 | return (compare_range_with_value | |
6490 | (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); | |
6491 | return NULL; | |
6492 | } | |
6493 | ||
93116081 | 6494 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
6495 | ||
6496 | static tree | |
6497 | vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, | |
6498 | tree op1, bool use_equiv_p, | |
e0ad89bd | 6499 | bool *strict_overflow_p, bool *only_ranges) |
93116081 | 6500 | { |
e0ad89bd | 6501 | tree ret; |
6502 | if (only_ranges) | |
6503 | *only_ranges = true; | |
6504 | ||
93116081 | 6505 | /* We only deal with integral and pointer types. */ |
6506 | if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
6507 | && !POINTER_TYPE_P (TREE_TYPE (op0))) | |
6508 | return NULL_TREE; | |
6509 | ||
6510 | if (use_equiv_p) | |
6511 | { | |
e0ad89bd | 6512 | if (only_ranges |
6513 | && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
6514 | (code, op0, op1, strict_overflow_p))) | |
6515 | return ret; | |
6516 | *only_ranges = false; | |
93116081 | 6517 | if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) |
75a70cf9 | 6518 | return compare_names (code, op0, op1, strict_overflow_p); |
93116081 | 6519 | else if (TREE_CODE (op0) == SSA_NAME) |
75a70cf9 | 6520 | return compare_name_with_value (code, op0, op1, strict_overflow_p); |
93116081 | 6521 | else if (TREE_CODE (op1) == SSA_NAME) |
6522 | return (compare_name_with_value | |
75a70cf9 | 6523 | (swap_tree_comparison (code), op1, op0, strict_overflow_p)); |
93116081 | 6524 | } |
6525 | else | |
e0ad89bd | 6526 | return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, |
6527 | strict_overflow_p); | |
93116081 | 6528 | return NULL_TREE; |
6529 | } | |
eea12c72 | 6530 | |
ced5bc56 | 6531 | /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range |
a2a1fde2 | 6532 | information. Return NULL if the conditional can not be evaluated. |
6533 | The ranges of all the names equivalent with the operands in COND | |
6534 | will be used when trying to compute the value. If the result is | |
6535 | based on undefined signed overflow, issue a warning if | |
6536 | appropriate. */ | |
6537 | ||
07aee51b | 6538 | static tree |
75a70cf9 | 6539 | vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) |
a2a1fde2 | 6540 | { |
6541 | bool sop; | |
6542 | tree ret; | |
e0ad89bd | 6543 | bool only_ranges; |
a2a1fde2 | 6544 | |
a000e0d1 | 6545 | /* Some passes and foldings leak constants with overflow flag set |
6546 | into the IL. Avoid doing wrong things with these and bail out. */ | |
6547 | if ((TREE_CODE (op0) == INTEGER_CST | |
6548 | && TREE_OVERFLOW (op0)) | |
6549 | || (TREE_CODE (op1) == INTEGER_CST | |
6550 | && TREE_OVERFLOW (op1))) | |
6551 | return NULL_TREE; | |
6552 | ||
a2a1fde2 | 6553 | sop = false; |
e0ad89bd | 6554 | ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, |
6555 | &only_ranges); | |
a2a1fde2 | 6556 | |
6557 | if (ret && sop) | |
6558 | { | |
6559 | enum warn_strict_overflow_code wc; | |
6560 | const char* warnmsg; | |
6561 | ||
6562 | if (is_gimple_min_invariant (ret)) | |
6563 | { | |
6564 | wc = WARN_STRICT_OVERFLOW_CONDITIONAL; | |
6565 | warnmsg = G_("assuming signed overflow does not occur when " | |
6566 | "simplifying conditional to constant"); | |
6567 | } | |
6568 | else | |
6569 | { | |
6570 | wc = WARN_STRICT_OVERFLOW_COMPARISON; | |
6571 | warnmsg = G_("assuming signed overflow does not occur when " | |
6572 | "simplifying conditional"); | |
6573 | } | |
6574 | ||
6575 | if (issue_strict_overflow_warning (wc)) | |
6576 | { | |
75a70cf9 | 6577 | location_t location; |
a2a1fde2 | 6578 | |
75a70cf9 | 6579 | if (!gimple_has_location (stmt)) |
6580 | location = input_location; | |
a2a1fde2 | 6581 | else |
75a70cf9 | 6582 | location = gimple_location (stmt); |
5fb6a912 | 6583 | warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); |
a2a1fde2 | 6584 | } |
6585 | } | |
6586 | ||
100b67da | 6587 | if (warn_type_limits |
e0ad89bd | 6588 | && ret && only_ranges |
ced5bc56 | 6589 | && TREE_CODE_CLASS (code) == tcc_comparison |
6590 | && TREE_CODE (op0) == SSA_NAME) | |
100b67da | 6591 | { |
6592 | /* If the comparison is being folded and the operand on the LHS | |
6593 | is being compared against a constant value that is outside of | |
6594 | the natural range of OP0's type, then the predicate will | |
6595 | always fold regardless of the value of OP0. If -Wtype-limits | |
6596 | was specified, emit a warning. */ | |
100b67da | 6597 | tree type = TREE_TYPE (op0); |
6598 | value_range_t *vr0 = get_value_range (op0); | |
6599 | ||
6600 | if (vr0->type != VR_VARYING | |
6601 | && INTEGRAL_TYPE_P (type) | |
6602 | && vrp_val_is_min (vr0->min) | |
6603 | && vrp_val_is_max (vr0->max) | |
6604 | && is_gimple_min_invariant (op1)) | |
100b67da | 6605 | { |
75a70cf9 | 6606 | location_t location; |
100b67da | 6607 | |
75a70cf9 | 6608 | if (!gimple_has_location (stmt)) |
6609 | location = input_location; | |
100b67da | 6610 | else |
75a70cf9 | 6611 | location = gimple_location (stmt); |
100b67da | 6612 | |
48e1416a | 6613 | warning_at (location, OPT_Wtype_limits, |
5fb6a912 | 6614 | integer_zerop (ret) |
0aec0de8 | 6615 | ? G_("comparison always false " |
6616 | "due to limited range of data type") | |
6617 | : G_("comparison always true " | |
6618 | "due to limited range of data type")); | |
100b67da | 6619 | } |
6620 | } | |
6621 | ||
a2a1fde2 | 6622 | return ret; |
6623 | } | |
6624 | ||
88dbf20f | 6625 | |
6626 | /* Visit conditional statement STMT. If we can determine which edge | |
6627 | will be taken out of STMT's basic block, record it in | |
6628 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
6629 | SSA_PROP_VARYING. */ | |
6630 | ||
6631 | static enum ssa_prop_result | |
75a70cf9 | 6632 | vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p) |
88dbf20f | 6633 | { |
75a70cf9 | 6634 | tree val; |
c3783c3b | 6635 | bool sop; |
88dbf20f | 6636 | |
6637 | *taken_edge_p = NULL; | |
88dbf20f | 6638 | |
6639 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6640 | { | |
6641 | tree use; | |
6642 | ssa_op_iter i; | |
6643 | ||
6644 | fprintf (dump_file, "\nVisiting conditional with predicate: "); | |
75a70cf9 | 6645 | print_gimple_stmt (dump_file, stmt, 0, 0); |
88dbf20f | 6646 | fprintf (dump_file, "\nWith known ranges\n"); |
48e1416a | 6647 | |
88dbf20f | 6648 | FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) |
6649 | { | |
6650 | fprintf (dump_file, "\t"); | |
6651 | print_generic_expr (dump_file, use, 0); | |
6652 | fprintf (dump_file, ": "); | |
eea12c72 | 6653 | dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); |
88dbf20f | 6654 | } |
6655 | ||
6656 | fprintf (dump_file, "\n"); | |
6657 | } | |
6658 | ||
6659 | /* Compute the value of the predicate COND by checking the known | |
eea12c72 | 6660 | ranges of each of its operands. |
48e1416a | 6661 | |
eea12c72 | 6662 | Note that we cannot evaluate all the equivalent ranges here |
6663 | because those ranges may not yet be final and with the current | |
6664 | propagation strategy, we cannot determine when the value ranges | |
6665 | of the names in the equivalence set have changed. | |
6666 | ||
6667 | For instance, given the following code fragment | |
6668 | ||
6669 | i_5 = PHI <8, i_13> | |
6670 | ... | |
6671 | i_14 = ASSERT_EXPR <i_5, i_5 != 0> | |
6672 | if (i_14 == 1) | |
6673 | ... | |
6674 | ||
6675 | Assume that on the first visit to i_14, i_5 has the temporary | |
6676 | range [8, 8] because the second argument to the PHI function is | |
6677 | not yet executable. We derive the range ~[0, 0] for i_14 and the | |
6678 | equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for | |
6679 | the first time, since i_14 is equivalent to the range [8, 8], we | |
6680 | determine that the predicate is always false. | |
6681 | ||
6682 | On the next round of propagation, i_13 is determined to be | |
6683 | VARYING, which causes i_5 to drop down to VARYING. So, another | |
6684 | visit to i_14 is scheduled. In this second visit, we compute the | |
6685 | exact same range and equivalence set for i_14, namely ~[0, 0] and | |
6686 | { i_5 }. But we did not have the previous range for i_5 | |
6687 | registered, so vrp_visit_assignment thinks that the range for | |
6688 | i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' | |
6689 | is not visited again, which stops propagation from visiting | |
6690 | statements in the THEN clause of that if(). | |
6691 | ||
6692 | To properly fix this we would need to keep the previous range | |
6693 | value for the names in the equivalence set. This way we would've | |
6694 | discovered that from one visit to the other i_5 changed from | |
6695 | range [8, 8] to VR_VARYING. | |
6696 | ||
6697 | However, fixing this apparent limitation may not be worth the | |
6698 | additional checking. Testing on several code bases (GCC, DLV, | |
6699 | MICO, TRAMP3D and SPEC2000) showed that doing this results in | |
6700 | 4 more predicates folded in SPEC. */ | |
c3783c3b | 6701 | sop = false; |
ced5bc56 | 6702 | |
75a70cf9 | 6703 | val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), |
6704 | gimple_cond_lhs (stmt), | |
6705 | gimple_cond_rhs (stmt), | |
e0ad89bd | 6706 | false, &sop, NULL); |
88dbf20f | 6707 | if (val) |
c3783c3b | 6708 | { |
6709 | if (!sop) | |
75a70cf9 | 6710 | *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); |
c3783c3b | 6711 | else |
6712 | { | |
6713 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6714 | fprintf (dump_file, | |
6715 | "\nIgnoring predicate evaluation because " | |
6716 | "it assumes that signed overflow is undefined"); | |
6717 | val = NULL_TREE; | |
6718 | } | |
6719 | } | |
88dbf20f | 6720 | |
6721 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6722 | { | |
6723 | fprintf (dump_file, "\nPredicate evaluates to: "); | |
6724 | if (val == NULL_TREE) | |
6725 | fprintf (dump_file, "DON'T KNOW\n"); | |
6726 | else | |
6727 | print_generic_stmt (dump_file, val, 0); | |
6728 | } | |
6729 | ||
6730 | return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; | |
6731 | } | |
6732 | ||
d31e54f1 | 6733 | /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL |
6734 | that includes the value VAL. The search is restricted to the range | |
75a70cf9 | 6735 | [START_IDX, n - 1] where n is the size of VEC. |
88dbf20f | 6736 | |
d31e54f1 | 6737 | If there is a CASE_LABEL for VAL, its index is placed in IDX and true is |
6738 | returned. | |
6739 | ||
496ffe87 | 6740 | If there is no CASE_LABEL for VAL and there is one that is larger than VAL, |
d31e54f1 | 6741 | it is placed in IDX and false is returned. |
6742 | ||
75a70cf9 | 6743 | If VAL is larger than any CASE_LABEL, n is placed on IDX and false is |
d31e54f1 | 6744 | returned. */ |
b6d7b6c5 | 6745 | |
6746 | static bool | |
75a70cf9 | 6747 | find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx) |
b6d7b6c5 | 6748 | { |
75a70cf9 | 6749 | size_t n = gimple_switch_num_labels (stmt); |
d31e54f1 | 6750 | size_t low, high; |
6751 | ||
6752 | /* Find case label for minimum of the value range or the next one. | |
6753 | At each iteration we are searching in [low, high - 1]. */ | |
b6d7b6c5 | 6754 | |
75a70cf9 | 6755 | for (low = start_idx, high = n; high != low; ) |
b6d7b6c5 | 6756 | { |
6757 | tree t; | |
6758 | int cmp; | |
75a70cf9 | 6759 | /* Note that i != high, so we never ask for n. */ |
d31e54f1 | 6760 | size_t i = (high + low) / 2; |
75a70cf9 | 6761 | t = gimple_switch_label (stmt, i); |
b6d7b6c5 | 6762 | |
6763 | /* Cache the result of comparing CASE_LOW and val. */ | |
6764 | cmp = tree_int_cst_compare (CASE_LOW (t), val); | |
6765 | ||
d31e54f1 | 6766 | if (cmp == 0) |
6767 | { | |
6768 | /* Ranges cannot be empty. */ | |
6769 | *idx = i; | |
6770 | return true; | |
6771 | } | |
6772 | else if (cmp > 0) | |
b6d7b6c5 | 6773 | high = i; |
6774 | else | |
d31e54f1 | 6775 | { |
6776 | low = i + 1; | |
6777 | if (CASE_HIGH (t) != NULL | |
6778 | && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) | |
b6d7b6c5 | 6779 | { |
6780 | *idx = i; | |
6781 | return true; | |
6782 | } | |
6783 | } | |
6784 | } | |
6785 | ||
d31e54f1 | 6786 | *idx = high; |
b6d7b6c5 | 6787 | return false; |
6788 | } | |
6789 | ||
d31e54f1 | 6790 | /* Searches the case label vector VEC for the range of CASE_LABELs that is used |
6791 | for values between MIN and MAX. The first index is placed in MIN_IDX. The | |
6792 | last index is placed in MAX_IDX. If the range of CASE_LABELs is empty | |
6793 | then MAX_IDX < MIN_IDX. | |
6794 | Returns true if the default label is not needed. */ | |
6795 | ||
6796 | static bool | |
75a70cf9 | 6797 | find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx, |
6798 | size_t *max_idx) | |
d31e54f1 | 6799 | { |
6800 | size_t i, j; | |
75a70cf9 | 6801 | bool min_take_default = !find_case_label_index (stmt, 1, min, &i); |
6802 | bool max_take_default = !find_case_label_index (stmt, i, max, &j); | |
d31e54f1 | 6803 | |
6804 | if (i == j | |
6805 | && min_take_default | |
6806 | && max_take_default) | |
6807 | { | |
48e1416a | 6808 | /* Only the default case label reached. |
d31e54f1 | 6809 | Return an empty range. */ |
6810 | *min_idx = 1; | |
6811 | *max_idx = 0; | |
6812 | return false; | |
6813 | } | |
6814 | else | |
6815 | { | |
6816 | bool take_default = min_take_default || max_take_default; | |
6817 | tree low, high; | |
6818 | size_t k; | |
6819 | ||
6820 | if (max_take_default) | |
6821 | j--; | |
6822 | ||
6823 | /* If the case label range is continuous, we do not need | |
6824 | the default case label. Verify that. */ | |
75a70cf9 | 6825 | high = CASE_LOW (gimple_switch_label (stmt, i)); |
6826 | if (CASE_HIGH (gimple_switch_label (stmt, i))) | |
6827 | high = CASE_HIGH (gimple_switch_label (stmt, i)); | |
d31e54f1 | 6828 | for (k = i + 1; k <= j; ++k) |
6829 | { | |
75a70cf9 | 6830 | low = CASE_LOW (gimple_switch_label (stmt, k)); |
317e2a67 | 6831 | if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) |
d31e54f1 | 6832 | { |
6833 | take_default = true; | |
6834 | break; | |
6835 | } | |
6836 | high = low; | |
75a70cf9 | 6837 | if (CASE_HIGH (gimple_switch_label (stmt, k))) |
6838 | high = CASE_HIGH (gimple_switch_label (stmt, k)); | |
d31e54f1 | 6839 | } |
6840 | ||
6841 | *min_idx = i; | |
6842 | *max_idx = j; | |
6843 | return !take_default; | |
6844 | } | |
6845 | } | |
6846 | ||
98a8539f | 6847 | /* Searches the case label vector VEC for the ranges of CASE_LABELs that are |
6848 | used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and | |
6849 | MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. | |
6850 | Returns true if the default label is not needed. */ | |
6851 | ||
6852 | static bool | |
6853 | find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1, | |
6854 | size_t *max_idx1, size_t *min_idx2, | |
6855 | size_t *max_idx2) | |
6856 | { | |
6857 | size_t i, j, k, l; | |
6858 | unsigned int n = gimple_switch_num_labels (stmt); | |
6859 | bool take_default; | |
6860 | tree case_low, case_high; | |
6861 | tree min = vr->min, max = vr->max; | |
6862 | ||
6863 | gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE); | |
6864 | ||
6865 | take_default = !find_case_label_range (stmt, min, max, &i, &j); | |
6866 | ||
6867 | /* Set second range to emtpy. */ | |
6868 | *min_idx2 = 1; | |
6869 | *max_idx2 = 0; | |
6870 | ||
6871 | if (vr->type == VR_RANGE) | |
6872 | { | |
6873 | *min_idx1 = i; | |
6874 | *max_idx1 = j; | |
6875 | return !take_default; | |
6876 | } | |
6877 | ||
6878 | /* Set first range to all case labels. */ | |
6879 | *min_idx1 = 1; | |
6880 | *max_idx1 = n - 1; | |
6881 | ||
6882 | if (i > j) | |
6883 | return false; | |
6884 | ||
6885 | /* Make sure all the values of case labels [i , j] are contained in | |
6886 | range [MIN, MAX]. */ | |
6887 | case_low = CASE_LOW (gimple_switch_label (stmt, i)); | |
6888 | case_high = CASE_HIGH (gimple_switch_label (stmt, j)); | |
6889 | if (tree_int_cst_compare (case_low, min) < 0) | |
6890 | i += 1; | |
6891 | if (case_high != NULL_TREE | |
6892 | && tree_int_cst_compare (max, case_high) < 0) | |
6893 | j -= 1; | |
6894 | ||
6895 | if (i > j) | |
6896 | return false; | |
6897 | ||
6898 | /* If the range spans case labels [i, j], the corresponding anti-range spans | |
6899 | the labels [1, i - 1] and [j + 1, n - 1]. */ | |
6900 | k = j + 1; | |
6901 | l = n - 1; | |
6902 | if (k > l) | |
6903 | { | |
6904 | k = 1; | |
6905 | l = 0; | |
6906 | } | |
6907 | ||
6908 | j = i - 1; | |
6909 | i = 1; | |
6910 | if (i > j) | |
6911 | { | |
6912 | i = k; | |
6913 | j = l; | |
6914 | k = 1; | |
6915 | l = 0; | |
6916 | } | |
6917 | ||
6918 | *min_idx1 = i; | |
6919 | *max_idx1 = j; | |
6920 | *min_idx2 = k; | |
6921 | *max_idx2 = l; | |
6922 | return false; | |
6923 | } | |
6924 | ||
b6d7b6c5 | 6925 | /* Visit switch statement STMT. If we can determine which edge |
6926 | will be taken out of STMT's basic block, record it in | |
6927 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
6928 | SSA_PROP_VARYING. */ | |
6929 | ||
6930 | static enum ssa_prop_result | |
75a70cf9 | 6931 | vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p) |
b6d7b6c5 | 6932 | { |
6933 | tree op, val; | |
6934 | value_range_t *vr; | |
98a8539f | 6935 | size_t i = 0, j = 0, k, l; |
d31e54f1 | 6936 | bool take_default; |
b6d7b6c5 | 6937 | |
6938 | *taken_edge_p = NULL; | |
75a70cf9 | 6939 | op = gimple_switch_index (stmt); |
b6d7b6c5 | 6940 | if (TREE_CODE (op) != SSA_NAME) |
6941 | return SSA_PROP_VARYING; | |
6942 | ||
6943 | vr = get_value_range (op); | |
6944 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6945 | { | |
6946 | fprintf (dump_file, "\nVisiting switch expression with operand "); | |
6947 | print_generic_expr (dump_file, op, 0); | |
6948 | fprintf (dump_file, " with known range "); | |
6949 | dump_value_range (dump_file, vr); | |
6950 | fprintf (dump_file, "\n"); | |
6951 | } | |
6952 | ||
98a8539f | 6953 | if ((vr->type != VR_RANGE |
6954 | && vr->type != VR_ANTI_RANGE) | |
b6d7b6c5 | 6955 | || symbolic_range_p (vr)) |
6956 | return SSA_PROP_VARYING; | |
6957 | ||
6958 | /* Find the single edge that is taken from the switch expression. */ | |
98a8539f | 6959 | take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); |
b6d7b6c5 | 6960 | |
d31e54f1 | 6961 | /* Check if the range spans no CASE_LABEL. If so, we only reach the default |
6962 | label */ | |
b6d7b6c5 | 6963 | if (j < i) |
d31e54f1 | 6964 | { |
6965 | gcc_assert (take_default); | |
75a70cf9 | 6966 | val = gimple_switch_default_label (stmt); |
d31e54f1 | 6967 | } |
b6d7b6c5 | 6968 | else |
6969 | { | |
d31e54f1 | 6970 | /* Check if labels with index i to j and maybe the default label |
6971 | are all reaching the same label. */ | |
6972 | ||
75a70cf9 | 6973 | val = gimple_switch_label (stmt, i); |
d31e54f1 | 6974 | if (take_default |
75a70cf9 | 6975 | && CASE_LABEL (gimple_switch_default_label (stmt)) |
6976 | != CASE_LABEL (val)) | |
b6d7b6c5 | 6977 | { |
6978 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6979 | fprintf (dump_file, " not a single destination for this " | |
6980 | "range\n"); | |
6981 | return SSA_PROP_VARYING; | |
6982 | } | |
6983 | for (++i; i <= j; ++i) | |
6984 | { | |
75a70cf9 | 6985 | if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) |
b6d7b6c5 | 6986 | { |
6987 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6988 | fprintf (dump_file, " not a single destination for this " | |
6989 | "range\n"); | |
6990 | return SSA_PROP_VARYING; | |
6991 | } | |
6992 | } | |
98a8539f | 6993 | for (; k <= l; ++k) |
6994 | { | |
6995 | if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val)) | |
6996 | { | |
6997 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6998 | fprintf (dump_file, " not a single destination for this " | |
6999 | "range\n"); | |
7000 | return SSA_PROP_VARYING; | |
7001 | } | |
7002 | } | |
b6d7b6c5 | 7003 | } |
7004 | ||
75a70cf9 | 7005 | *taken_edge_p = find_edge (gimple_bb (stmt), |
b6d7b6c5 | 7006 | label_to_block (CASE_LABEL (val))); |
7007 | ||
7008 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7009 | { | |
7010 | fprintf (dump_file, " will take edge to "); | |
7011 | print_generic_stmt (dump_file, CASE_LABEL (val), 0); | |
7012 | } | |
7013 | ||
7014 | return SSA_PROP_INTERESTING; | |
7015 | } | |
7016 | ||
7017 | ||
88dbf20f | 7018 | /* Evaluate statement STMT. If the statement produces a useful range, |
7019 | return SSA_PROP_INTERESTING and record the SSA name with the | |
7020 | interesting range into *OUTPUT_P. | |
7021 | ||
7022 | If STMT is a conditional branch and we can determine its truth | |
7023 | value, the taken edge is recorded in *TAKEN_EDGE_P. | |
7024 | ||
7025 | If STMT produces a varying value, return SSA_PROP_VARYING. */ | |
7026 | ||
7027 | static enum ssa_prop_result | |
75a70cf9 | 7028 | vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) |
88dbf20f | 7029 | { |
7030 | tree def; | |
7031 | ssa_op_iter iter; | |
88dbf20f | 7032 | |
7033 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7034 | { | |
7035 | fprintf (dump_file, "\nVisiting statement:\n"); | |
75a70cf9 | 7036 | print_gimple_stmt (dump_file, stmt, 0, dump_flags); |
88dbf20f | 7037 | fprintf (dump_file, "\n"); |
7038 | } | |
7039 | ||
2193544e | 7040 | if (!stmt_interesting_for_vrp (stmt)) |
7041 | gcc_assert (stmt_ends_bb_p (stmt)); | |
7042 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) | |
04dde933 | 7043 | { |
04dde933 | 7044 | /* In general, assignments with virtual operands are not useful |
7045 | for deriving ranges, with the obvious exception of calls to | |
7046 | builtin functions. */ | |
75a70cf9 | 7047 | if ((is_gimple_call (stmt) |
7048 | && gimple_call_fndecl (stmt) != NULL_TREE | |
446dbdd8 | 7049 | && DECL_BUILT_IN (gimple_call_fndecl (stmt))) |
dd277d48 | 7050 | || !gimple_vuse (stmt)) |
75a70cf9 | 7051 | return vrp_visit_assignment_or_call (stmt, output_p); |
04dde933 | 7052 | } |
75a70cf9 | 7053 | else if (gimple_code (stmt) == GIMPLE_COND) |
88dbf20f | 7054 | return vrp_visit_cond_stmt (stmt, taken_edge_p); |
75a70cf9 | 7055 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
b6d7b6c5 | 7056 | return vrp_visit_switch_stmt (stmt, taken_edge_p); |
88dbf20f | 7057 | |
7058 | /* All other statements produce nothing of interest for VRP, so mark | |
7059 | their outputs varying and prevent further simulation. */ | |
7060 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) | |
e7d43f99 | 7061 | set_value_range_to_varying (get_value_range (def)); |
88dbf20f | 7062 | |
7063 | return SSA_PROP_VARYING; | |
7064 | } | |
7065 | ||
9c0a48ce | 7066 | /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and |
7067 | { VR1TYPE, VR0MIN, VR0MAX } and store the result | |
7068 | in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | |
7069 | possible such range. The resulting range is not canonicalized. */ | |
7070 | ||
7071 | static void | |
7072 | union_ranges (enum value_range_type *vr0type, | |
7073 | tree *vr0min, tree *vr0max, | |
7074 | enum value_range_type vr1type, | |
7075 | tree vr1min, tree vr1max) | |
7076 | { | |
7077 | bool mineq = operand_equal_p (*vr0min, vr1min, 0); | |
7078 | bool maxeq = operand_equal_p (*vr0max, vr1max, 0); | |
7079 | ||
7080 | /* [] is vr0, () is vr1 in the following classification comments. */ | |
7081 | if (mineq && maxeq) | |
7082 | { | |
7083 | /* [( )] */ | |
7084 | if (*vr0type == vr1type) | |
7085 | /* Nothing to do for equal ranges. */ | |
7086 | ; | |
7087 | else if ((*vr0type == VR_RANGE | |
7088 | && vr1type == VR_ANTI_RANGE) | |
7089 | || (*vr0type == VR_ANTI_RANGE | |
7090 | && vr1type == VR_RANGE)) | |
7091 | { | |
7092 | /* For anti-range with range union the result is varying. */ | |
7093 | goto give_up; | |
7094 | } | |
7095 | else | |
7096 | gcc_unreachable (); | |
7097 | } | |
7098 | else if (operand_less_p (*vr0max, vr1min) == 1 | |
7099 | || operand_less_p (vr1max, *vr0min) == 1) | |
7100 | { | |
7101 | /* [ ] ( ) or ( ) [ ] | |
7102 | If the ranges have an empty intersection, result of the union | |
7103 | operation is the anti-range or if both are anti-ranges | |
7104 | it covers all. */ | |
7105 | if (*vr0type == VR_ANTI_RANGE | |
7106 | && vr1type == VR_ANTI_RANGE) | |
7107 | goto give_up; | |
7108 | else if (*vr0type == VR_ANTI_RANGE | |
7109 | && vr1type == VR_RANGE) | |
7110 | ; | |
7111 | else if (*vr0type == VR_RANGE | |
7112 | && vr1type == VR_ANTI_RANGE) | |
7113 | { | |
7114 | *vr0type = vr1type; | |
7115 | *vr0min = vr1min; | |
7116 | *vr0max = vr1max; | |
7117 | } | |
7118 | else if (*vr0type == VR_RANGE | |
7119 | && vr1type == VR_RANGE) | |
7120 | { | |
7121 | /* The result is the convex hull of both ranges. */ | |
7122 | if (operand_less_p (*vr0max, vr1min) == 1) | |
7123 | { | |
7124 | /* If the result can be an anti-range, create one. */ | |
7125 | if (TREE_CODE (*vr0max) == INTEGER_CST | |
7126 | && TREE_CODE (vr1min) == INTEGER_CST | |
7127 | && vrp_val_is_min (*vr0min) | |
7128 | && vrp_val_is_max (vr1max)) | |
7129 | { | |
7130 | tree min = int_const_binop (PLUS_EXPR, | |
7131 | *vr0max, integer_one_node); | |
7132 | tree max = int_const_binop (MINUS_EXPR, | |
7133 | vr1min, integer_one_node); | |
7134 | if (!operand_less_p (max, min)) | |
7135 | { | |
7136 | *vr0type = VR_ANTI_RANGE; | |
7137 | *vr0min = min; | |
7138 | *vr0max = max; | |
7139 | } | |
7140 | else | |
7141 | *vr0max = vr1max; | |
7142 | } | |
7143 | else | |
7144 | *vr0max = vr1max; | |
7145 | } | |
7146 | else | |
7147 | { | |
7148 | /* If the result can be an anti-range, create one. */ | |
7149 | if (TREE_CODE (vr1max) == INTEGER_CST | |
7150 | && TREE_CODE (*vr0min) == INTEGER_CST | |
7151 | && vrp_val_is_min (vr1min) | |
7152 | && vrp_val_is_max (*vr0max)) | |
7153 | { | |
7154 | tree min = int_const_binop (PLUS_EXPR, | |
7155 | vr1max, integer_one_node); | |
7156 | tree max = int_const_binop (MINUS_EXPR, | |
7157 | *vr0min, integer_one_node); | |
7158 | if (!operand_less_p (max, min)) | |
7159 | { | |
7160 | *vr0type = VR_ANTI_RANGE; | |
7161 | *vr0min = min; | |
7162 | *vr0max = max; | |
7163 | } | |
7164 | else | |
7165 | *vr0min = vr1min; | |
7166 | } | |
7167 | else | |
7168 | *vr0min = vr1min; | |
7169 | } | |
7170 | } | |
7171 | else | |
7172 | gcc_unreachable (); | |
7173 | } | |
7174 | else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) | |
7175 | && (mineq || operand_less_p (*vr0min, vr1min) == 1)) | |
7176 | { | |
7177 | /* [ ( ) ] or [( ) ] or [ ( )] */ | |
7178 | if (*vr0type == VR_RANGE | |
7179 | && vr1type == VR_RANGE) | |
7180 | ; | |
7181 | else if (*vr0type == VR_ANTI_RANGE | |
7182 | && vr1type == VR_ANTI_RANGE) | |
7183 | { | |
7184 | *vr0type = vr1type; | |
7185 | *vr0min = vr1min; | |
7186 | *vr0max = vr1max; | |
7187 | } | |
7188 | else if (*vr0type == VR_ANTI_RANGE | |
7189 | && vr1type == VR_RANGE) | |
7190 | { | |
7191 | /* Arbitrarily choose the right or left gap. */ | |
7192 | if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) | |
7193 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node); | |
7194 | else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) | |
7195 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); | |
7196 | else | |
7197 | goto give_up; | |
7198 | } | |
7199 | else if (*vr0type == VR_RANGE | |
7200 | && vr1type == VR_ANTI_RANGE) | |
7201 | /* The result covers everything. */ | |
7202 | goto give_up; | |
7203 | else | |
7204 | gcc_unreachable (); | |
7205 | } | |
7206 | else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) | |
7207 | && (mineq || operand_less_p (vr1min, *vr0min) == 1)) | |
7208 | { | |
7209 | /* ( [ ] ) or ([ ] ) or ( [ ]) */ | |
7210 | if (*vr0type == VR_RANGE | |
7211 | && vr1type == VR_RANGE) | |
7212 | { | |
7213 | *vr0type = vr1type; | |
7214 | *vr0min = vr1min; | |
7215 | *vr0max = vr1max; | |
7216 | } | |
7217 | else if (*vr0type == VR_ANTI_RANGE | |
7218 | && vr1type == VR_ANTI_RANGE) | |
7219 | ; | |
7220 | else if (*vr0type == VR_RANGE | |
7221 | && vr1type == VR_ANTI_RANGE) | |
7222 | { | |
7223 | *vr0type = VR_ANTI_RANGE; | |
7224 | if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) | |
7225 | { | |
7226 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node); | |
7227 | *vr0min = vr1min; | |
7228 | } | |
7229 | else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) | |
7230 | { | |
7231 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node); | |
7232 | *vr0max = vr1max; | |
7233 | } | |
7234 | else | |
7235 | goto give_up; | |
7236 | } | |
7237 | else if (*vr0type == VR_ANTI_RANGE | |
7238 | && vr1type == VR_RANGE) | |
7239 | /* The result covers everything. */ | |
7240 | goto give_up; | |
7241 | else | |
7242 | gcc_unreachable (); | |
7243 | } | |
7244 | else if ((operand_less_p (vr1min, *vr0max) == 1 | |
7245 | || operand_equal_p (vr1min, *vr0max, 0)) | |
7246 | && operand_less_p (*vr0min, vr1min) == 1) | |
7247 | { | |
7248 | /* [ ( ] ) or [ ]( ) */ | |
7249 | if (*vr0type == VR_RANGE | |
7250 | && vr1type == VR_RANGE) | |
7251 | *vr0max = vr1max; | |
7252 | else if (*vr0type == VR_ANTI_RANGE | |
7253 | && vr1type == VR_ANTI_RANGE) | |
7254 | *vr0min = vr1min; | |
7255 | else if (*vr0type == VR_ANTI_RANGE | |
7256 | && vr1type == VR_RANGE) | |
7257 | { | |
7258 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
7259 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node); | |
7260 | else | |
7261 | goto give_up; | |
7262 | } | |
7263 | else if (*vr0type == VR_RANGE | |
7264 | && vr1type == VR_ANTI_RANGE) | |
7265 | { | |
7266 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
7267 | { | |
7268 | *vr0type = vr1type; | |
7269 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node); | |
7270 | *vr0max = vr1max; | |
7271 | } | |
7272 | else | |
7273 | goto give_up; | |
7274 | } | |
7275 | else | |
7276 | gcc_unreachable (); | |
7277 | } | |
7278 | else if ((operand_less_p (*vr0min, vr1max) == 1 | |
7279 | || operand_equal_p (*vr0min, vr1max, 0)) | |
7280 | && operand_less_p (vr1min, *vr0min) == 1) | |
7281 | { | |
7282 | /* ( [ ) ] or ( )[ ] */ | |
7283 | if (*vr0type == VR_RANGE | |
7284 | && vr1type == VR_RANGE) | |
7285 | *vr0min = vr1min; | |
7286 | else if (*vr0type == VR_ANTI_RANGE | |
7287 | && vr1type == VR_ANTI_RANGE) | |
7288 | *vr0max = vr1max; | |
7289 | else if (*vr0type == VR_ANTI_RANGE | |
7290 | && vr1type == VR_RANGE) | |
7291 | { | |
7292 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
7293 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); | |
7294 | else | |
7295 | goto give_up; | |
7296 | } | |
7297 | else if (*vr0type == VR_RANGE | |
7298 | && vr1type == VR_ANTI_RANGE) | |
7299 | { | |
7300 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
7301 | { | |
7302 | *vr0type = vr1type; | |
7303 | *vr0min = vr1min; | |
7304 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node); | |
7305 | } | |
7306 | else | |
7307 | goto give_up; | |
7308 | } | |
7309 | else | |
7310 | gcc_unreachable (); | |
7311 | } | |
7312 | else | |
7313 | goto give_up; | |
7314 | ||
7315 | return; | |
7316 | ||
7317 | give_up: | |
7318 | *vr0type = VR_VARYING; | |
7319 | *vr0min = NULL_TREE; | |
7320 | *vr0max = NULL_TREE; | |
7321 | } | |
7322 | ||
04dbf3c4 | 7323 | /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and |
7324 | { VR1TYPE, VR0MIN, VR0MAX } and store the result | |
7325 | in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | |
7326 | possible such range. The resulting range is not canonicalized. */ | |
7327 | ||
7328 | static void | |
7329 | intersect_ranges (enum value_range_type *vr0type, | |
7330 | tree *vr0min, tree *vr0max, | |
7331 | enum value_range_type vr1type, | |
7332 | tree vr1min, tree vr1max) | |
7333 | { | |
a339107e | 7334 | bool mineq = operand_equal_p (*vr0min, vr1min, 0); |
7335 | bool maxeq = operand_equal_p (*vr0max, vr1max, 0); | |
7336 | ||
04dbf3c4 | 7337 | /* [] is vr0, () is vr1 in the following classification comments. */ |
a339107e | 7338 | if (mineq && maxeq) |
7339 | { | |
7340 | /* [( )] */ | |
7341 | if (*vr0type == vr1type) | |
7342 | /* Nothing to do for equal ranges. */ | |
7343 | ; | |
7344 | else if ((*vr0type == VR_RANGE | |
7345 | && vr1type == VR_ANTI_RANGE) | |
7346 | || (*vr0type == VR_ANTI_RANGE | |
7347 | && vr1type == VR_RANGE)) | |
7348 | { | |
7349 | /* For anti-range with range intersection the result is empty. */ | |
7350 | *vr0type = VR_UNDEFINED; | |
7351 | *vr0min = NULL_TREE; | |
7352 | *vr0max = NULL_TREE; | |
7353 | } | |
7354 | else | |
7355 | gcc_unreachable (); | |
7356 | } | |
7357 | else if (operand_less_p (*vr0max, vr1min) == 1 | |
7358 | || operand_less_p (vr1max, *vr0min) == 1) | |
04dbf3c4 | 7359 | { |
7360 | /* [ ] ( ) or ( ) [ ] | |
7361 | If the ranges have an empty intersection, the result of the | |
7362 | intersect operation is the range for intersecting an | |
ac4a8000 | 7363 | anti-range with a range or empty when intersecting two ranges. */ |
04dbf3c4 | 7364 | if (*vr0type == VR_RANGE |
7365 | && vr1type == VR_ANTI_RANGE) | |
7366 | ; | |
7367 | else if (*vr0type == VR_ANTI_RANGE | |
7368 | && vr1type == VR_RANGE) | |
7369 | { | |
7370 | *vr0type = vr1type; | |
7371 | *vr0min = vr1min; | |
7372 | *vr0max = vr1max; | |
7373 | } | |
7374 | else if (*vr0type == VR_RANGE | |
7375 | && vr1type == VR_RANGE) | |
7376 | { | |
7377 | *vr0type = VR_UNDEFINED; | |
7378 | *vr0min = NULL_TREE; | |
7379 | *vr0max = NULL_TREE; | |
7380 | } | |
7381 | else if (*vr0type == VR_ANTI_RANGE | |
7382 | && vr1type == VR_ANTI_RANGE) | |
7383 | { | |
ac4a8000 | 7384 | /* If the anti-ranges are adjacent to each other merge them. */ |
7385 | if (TREE_CODE (*vr0max) == INTEGER_CST | |
7386 | && TREE_CODE (vr1min) == INTEGER_CST | |
7387 | && operand_less_p (*vr0max, vr1min) == 1 | |
7388 | && integer_onep (int_const_binop (MINUS_EXPR, | |
7389 | vr1min, *vr0max))) | |
7390 | *vr0max = vr1max; | |
7391 | else if (TREE_CODE (vr1max) == INTEGER_CST | |
7392 | && TREE_CODE (*vr0min) == INTEGER_CST | |
7393 | && operand_less_p (vr1max, *vr0min) == 1 | |
7394 | && integer_onep (int_const_binop (MINUS_EXPR, | |
7395 | *vr0min, vr1max))) | |
7396 | *vr0min = vr1min; | |
7397 | /* Else arbitrarily take VR0. */ | |
04dbf3c4 | 7398 | } |
7399 | } | |
a339107e | 7400 | else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) |
7401 | && (mineq || operand_less_p (*vr0min, vr1min) == 1)) | |
04dbf3c4 | 7402 | { |
a339107e | 7403 | /* [ ( ) ] or [( ) ] or [ ( )] */ |
7404 | if (*vr0type == VR_RANGE | |
7405 | && vr1type == VR_RANGE) | |
04dbf3c4 | 7406 | { |
a339107e | 7407 | /* If both are ranges the result is the inner one. */ |
04dbf3c4 | 7408 | *vr0type = vr1type; |
7409 | *vr0min = vr1min; | |
7410 | *vr0max = vr1max; | |
7411 | } | |
a339107e | 7412 | else if (*vr0type == VR_RANGE |
7413 | && vr1type == VR_ANTI_RANGE) | |
7414 | { | |
7415 | /* Choose the right gap if the left one is empty. */ | |
7416 | if (mineq) | |
7417 | { | |
7418 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
7419 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); | |
7420 | else | |
7421 | *vr0min = vr1max; | |
7422 | } | |
7423 | /* Choose the left gap if the right one is empty. */ | |
7424 | else if (maxeq) | |
7425 | { | |
7426 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
7427 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, | |
7428 | integer_one_node); | |
7429 | else | |
7430 | *vr0max = vr1min; | |
7431 | } | |
7432 | /* Choose the anti-range if the range is effectively varying. */ | |
7433 | else if (vrp_val_is_min (*vr0min) | |
7434 | && vrp_val_is_max (*vr0max)) | |
7435 | { | |
7436 | *vr0type = vr1type; | |
7437 | *vr0min = vr1min; | |
7438 | *vr0max = vr1max; | |
7439 | } | |
7440 | /* Else choose the range. */ | |
7441 | } | |
04dbf3c4 | 7442 | else if (*vr0type == VR_ANTI_RANGE |
7443 | && vr1type == VR_ANTI_RANGE) | |
7444 | /* If both are anti-ranges the result is the outer one. */ | |
7445 | ; | |
7446 | else if (*vr0type == VR_ANTI_RANGE | |
7447 | && vr1type == VR_RANGE) | |
7448 | { | |
7449 | /* The intersection is empty. */ | |
7450 | *vr0type = VR_UNDEFINED; | |
7451 | *vr0min = NULL_TREE; | |
7452 | *vr0max = NULL_TREE; | |
7453 | } | |
7454 | else | |
7455 | gcc_unreachable (); | |
7456 | } | |
a339107e | 7457 | else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) |
7458 | && (mineq || operand_less_p (vr1min, *vr0min) == 1)) | |
04dbf3c4 | 7459 | { |
a339107e | 7460 | /* ( [ ] ) or ([ ] ) or ( [ ]) */ |
7461 | if (*vr0type == VR_RANGE | |
7462 | && vr1type == VR_RANGE) | |
7463 | /* Choose the inner range. */ | |
04dbf3c4 | 7464 | ; |
a339107e | 7465 | else if (*vr0type == VR_ANTI_RANGE |
7466 | && vr1type == VR_RANGE) | |
7467 | { | |
7468 | /* Choose the right gap if the left is empty. */ | |
7469 | if (mineq) | |
7470 | { | |
7471 | *vr0type = VR_RANGE; | |
7472 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
7473 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, | |
7474 | integer_one_node); | |
7475 | else | |
7476 | *vr0min = *vr0max; | |
7477 | *vr0max = vr1max; | |
7478 | } | |
7479 | /* Choose the left gap if the right is empty. */ | |
7480 | else if (maxeq) | |
7481 | { | |
7482 | *vr0type = VR_RANGE; | |
7483 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
7484 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | |
7485 | integer_one_node); | |
7486 | else | |
7487 | *vr0max = *vr0min; | |
7488 | *vr0min = vr1min; | |
7489 | } | |
7490 | /* Choose the anti-range if the range is effectively varying. */ | |
7491 | else if (vrp_val_is_min (vr1min) | |
7492 | && vrp_val_is_max (vr1max)) | |
7493 | ; | |
7494 | /* Else choose the range. */ | |
7495 | else | |
7496 | { | |
7497 | *vr0type = vr1type; | |
7498 | *vr0min = vr1min; | |
7499 | *vr0max = vr1max; | |
7500 | } | |
7501 | } | |
04dbf3c4 | 7502 | else if (*vr0type == VR_ANTI_RANGE |
7503 | && vr1type == VR_ANTI_RANGE) | |
7504 | { | |
7505 | /* If both are anti-ranges the result is the outer one. */ | |
7506 | *vr0type = vr1type; | |
7507 | *vr0min = vr1min; | |
7508 | *vr0max = vr1max; | |
7509 | } | |
7510 | else if (vr1type == VR_ANTI_RANGE | |
7511 | && *vr0type == VR_RANGE) | |
7512 | { | |
7513 | /* The intersection is empty. */ | |
7514 | *vr0type = VR_UNDEFINED; | |
7515 | *vr0min = NULL_TREE; | |
7516 | *vr0max = NULL_TREE; | |
7517 | } | |
7518 | else | |
7519 | gcc_unreachable (); | |
7520 | } | |
7521 | else if ((operand_less_p (vr1min, *vr0max) == 1 | |
7522 | || operand_equal_p (vr1min, *vr0max, 0)) | |
a339107e | 7523 | && operand_less_p (*vr0min, vr1min) == 1) |
04dbf3c4 | 7524 | { |
a339107e | 7525 | /* [ ( ] ) or [ ]( ) */ |
04dbf3c4 | 7526 | if (*vr0type == VR_ANTI_RANGE |
7527 | && vr1type == VR_ANTI_RANGE) | |
7528 | *vr0max = vr1max; | |
7529 | else if (*vr0type == VR_RANGE | |
7530 | && vr1type == VR_RANGE) | |
7531 | *vr0min = vr1min; | |
7532 | else if (*vr0type == VR_RANGE | |
7533 | && vr1type == VR_ANTI_RANGE) | |
7534 | { | |
7535 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
7536 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, | |
7537 | integer_one_node); | |
7538 | else | |
7539 | *vr0max = vr1min; | |
7540 | } | |
7541 | else if (*vr0type == VR_ANTI_RANGE | |
7542 | && vr1type == VR_RANGE) | |
7543 | { | |
7544 | *vr0type = VR_RANGE; | |
7545 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
7546 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, | |
7547 | integer_one_node); | |
7548 | else | |
7549 | *vr0min = *vr0max; | |
7550 | *vr0max = vr1max; | |
7551 | } | |
7552 | else | |
7553 | gcc_unreachable (); | |
7554 | } | |
7555 | else if ((operand_less_p (*vr0min, vr1max) == 1 | |
7556 | || operand_equal_p (*vr0min, vr1max, 0)) | |
a339107e | 7557 | && operand_less_p (vr1min, *vr0min) == 1) |
04dbf3c4 | 7558 | { |
a339107e | 7559 | /* ( [ ) ] or ( )[ ] */ |
04dbf3c4 | 7560 | if (*vr0type == VR_ANTI_RANGE |
7561 | && vr1type == VR_ANTI_RANGE) | |
7562 | *vr0min = vr1min; | |
7563 | else if (*vr0type == VR_RANGE | |
7564 | && vr1type == VR_RANGE) | |
7565 | *vr0max = vr1max; | |
7566 | else if (*vr0type == VR_RANGE | |
7567 | && vr1type == VR_ANTI_RANGE) | |
7568 | { | |
7569 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
7570 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, | |
7571 | integer_one_node); | |
7572 | else | |
7573 | *vr0min = vr1max; | |
7574 | } | |
7575 | else if (*vr0type == VR_ANTI_RANGE | |
7576 | && vr1type == VR_RANGE) | |
7577 | { | |
7578 | *vr0type = VR_RANGE; | |
7579 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
7580 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | |
7581 | integer_one_node); | |
7582 | else | |
7583 | *vr0max = *vr0min; | |
7584 | *vr0min = vr1min; | |
7585 | } | |
7586 | else | |
7587 | gcc_unreachable (); | |
7588 | } | |
7589 | ||
7590 | /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as | |
7591 | result for the intersection. That's always a conservative | |
7592 | correct estimate. */ | |
7593 | ||
7594 | return; | |
7595 | } | |
7596 | ||
7597 | ||
7598 | /* Intersect the two value-ranges *VR0 and *VR1 and store the result | |
7599 | in *VR0. This may not be the smallest possible such range. */ | |
7600 | ||
7601 | static void | |
a339107e | 7602 | vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1) |
04dbf3c4 | 7603 | { |
7604 | value_range_t saved; | |
7605 | ||
7606 | /* If either range is VR_VARYING the other one wins. */ | |
7607 | if (vr1->type == VR_VARYING) | |
7608 | return; | |
7609 | if (vr0->type == VR_VARYING) | |
7610 | { | |
7611 | copy_value_range (vr0, vr1); | |
7612 | return; | |
7613 | } | |
7614 | ||
7615 | /* When either range is VR_UNDEFINED the resulting range is | |
7616 | VR_UNDEFINED, too. */ | |
7617 | if (vr0->type == VR_UNDEFINED) | |
7618 | return; | |
7619 | if (vr1->type == VR_UNDEFINED) | |
7620 | { | |
7621 | set_value_range_to_undefined (vr0); | |
7622 | return; | |
7623 | } | |
7624 | ||
7625 | /* Save the original vr0 so we can return it as conservative intersection | |
7626 | result when our worker turns things to varying. */ | |
7627 | saved = *vr0; | |
7628 | intersect_ranges (&vr0->type, &vr0->min, &vr0->max, | |
7629 | vr1->type, vr1->min, vr1->max); | |
7630 | /* Make sure to canonicalize the result though as the inversion of a | |
7631 | VR_RANGE can still be a VR_RANGE. */ | |
7632 | set_and_canonicalize_value_range (vr0, vr0->type, | |
7633 | vr0->min, vr0->max, vr0->equiv); | |
7634 | /* If that failed, use the saved original VR0. */ | |
7635 | if (vr0->type == VR_VARYING) | |
7636 | { | |
7637 | *vr0 = saved; | |
7638 | return; | |
7639 | } | |
7640 | /* If the result is VR_UNDEFINED there is no need to mess with | |
7641 | the equivalencies. */ | |
7642 | if (vr0->type == VR_UNDEFINED) | |
7643 | return; | |
7644 | ||
7645 | /* The resulting set of equivalences for range intersection is the union of | |
7646 | the two sets. */ | |
7647 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | |
7648 | bitmap_ior_into (vr0->equiv, vr1->equiv); | |
7649 | else if (vr1->equiv && !vr0->equiv) | |
7650 | bitmap_copy (vr0->equiv, vr1->equiv); | |
7651 | } | |
88dbf20f | 7652 | |
a339107e | 7653 | static void |
7654 | vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1) | |
7655 | { | |
7656 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7657 | { | |
7658 | fprintf (dump_file, "Intersecting\n "); | |
7659 | dump_value_range (dump_file, vr0); | |
7660 | fprintf (dump_file, "\nand\n "); | |
7661 | dump_value_range (dump_file, vr1); | |
7662 | fprintf (dump_file, "\n"); | |
7663 | } | |
7664 | vrp_intersect_ranges_1 (vr0, vr1); | |
7665 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7666 | { | |
7667 | fprintf (dump_file, "to\n "); | |
7668 | dump_value_range (dump_file, vr0); | |
7669 | fprintf (dump_file, "\n"); | |
7670 | } | |
7671 | } | |
7672 | ||
88dbf20f | 7673 | /* Meet operation for value ranges. Given two value ranges VR0 and |
ab37e731 | 7674 | VR1, store in VR0 a range that contains both VR0 and VR1. This |
7675 | may not be the smallest possible such range. */ | |
88dbf20f | 7676 | |
7677 | static void | |
9c0a48ce | 7678 | vrp_meet_1 (value_range_t *vr0, value_range_t *vr1) |
88dbf20f | 7679 | { |
9c0a48ce | 7680 | value_range_t saved; |
7681 | ||
88dbf20f | 7682 | if (vr0->type == VR_UNDEFINED) |
7683 | { | |
a2623f6b | 7684 | /* Drop equivalences. See PR53465. */ |
7685 | set_value_range (vr0, vr1->type, vr1->min, vr1->max, NULL); | |
88dbf20f | 7686 | return; |
7687 | } | |
7688 | ||
7689 | if (vr1->type == VR_UNDEFINED) | |
7690 | { | |
a2623f6b | 7691 | /* VR0 already has the resulting range, just drop equivalences. |
7692 | See PR53465. */ | |
7693 | if (vr0->equiv) | |
7694 | bitmap_clear (vr0->equiv); | |
88dbf20f | 7695 | return; |
7696 | } | |
7697 | ||
7698 | if (vr0->type == VR_VARYING) | |
7699 | { | |
7700 | /* Nothing to do. VR0 already has the resulting range. */ | |
7701 | return; | |
7702 | } | |
7703 | ||
7704 | if (vr1->type == VR_VARYING) | |
88dbf20f | 7705 | { |
e7d43f99 | 7706 | set_value_range_to_varying (vr0); |
88dbf20f | 7707 | return; |
7708 | } | |
7709 | ||
9c0a48ce | 7710 | saved = *vr0; |
7711 | union_ranges (&vr0->type, &vr0->min, &vr0->max, | |
7712 | vr1->type, vr1->min, vr1->max); | |
7713 | if (vr0->type == VR_VARYING) | |
88dbf20f | 7714 | { |
9c0a48ce | 7715 | /* Failed to find an efficient meet. Before giving up and setting |
7716 | the result to VARYING, see if we can at least derive a useful | |
7717 | anti-range. FIXME, all this nonsense about distinguishing | |
7718 | anti-ranges from ranges is necessary because of the odd | |
7719 | semantics of range_includes_zero_p and friends. */ | |
7d48cd66 | 7720 | if (((saved.type == VR_RANGE |
7721 | && range_includes_zero_p (saved.min, saved.max) == 0) | |
7722 | || (saved.type == VR_ANTI_RANGE | |
7723 | && range_includes_zero_p (saved.min, saved.max) == 1)) | |
7724 | && ((vr1->type == VR_RANGE | |
7725 | && range_includes_zero_p (vr1->min, vr1->max) == 0) | |
7726 | || (vr1->type == VR_ANTI_RANGE | |
7727 | && range_includes_zero_p (vr1->min, vr1->max) == 1))) | |
9c0a48ce | 7728 | { |
7729 | set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min)); | |
7730 | ||
7731 | /* Since this meet operation did not result from the meeting of | |
7732 | two equivalent names, VR0 cannot have any equivalences. */ | |
7733 | if (vr0->equiv) | |
7734 | bitmap_clear (vr0->equiv); | |
7735 | return; | |
72940ce4 | 7736 | } |
eea12c72 | 7737 | |
9c0a48ce | 7738 | set_value_range_to_varying (vr0); |
7739 | return; | |
88dbf20f | 7740 | } |
9c0a48ce | 7741 | set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, |
7742 | vr0->equiv); | |
7743 | if (vr0->type == VR_VARYING) | |
7744 | return; | |
eea12c72 | 7745 | |
72940ce4 | 7746 | /* The resulting set of equivalences is always the intersection of |
9c0a48ce | 7747 | the two sets. */ |
72940ce4 | 7748 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) |
7749 | bitmap_and_into (vr0->equiv, vr1->equiv); | |
7750 | else if (vr0->equiv && !vr1->equiv) | |
7751 | bitmap_clear (vr0->equiv); | |
9c0a48ce | 7752 | } |
72940ce4 | 7753 | |
9c0a48ce | 7754 | static void |
7755 | vrp_meet (value_range_t *vr0, value_range_t *vr1) | |
7756 | { | |
7757 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7758 | { | |
7759 | fprintf (dump_file, "Meeting\n "); | |
7760 | dump_value_range (dump_file, vr0); | |
7761 | fprintf (dump_file, "\nand\n "); | |
7762 | dump_value_range (dump_file, vr1); | |
7763 | fprintf (dump_file, "\n"); | |
7764 | } | |
7765 | vrp_meet_1 (vr0, vr1); | |
7766 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7767 | { | |
7768 | fprintf (dump_file, "to\n "); | |
7769 | dump_value_range (dump_file, vr0); | |
7770 | fprintf (dump_file, "\n"); | |
052f71a4 | 7771 | } |
88dbf20f | 7772 | } |
7773 | ||
eea12c72 | 7774 | |
88dbf20f | 7775 | /* Visit all arguments for PHI node PHI that flow through executable |
7776 | edges. If a valid value range can be derived from all the incoming | |
7777 | value ranges, set a new range for the LHS of PHI. */ | |
7778 | ||
7779 | static enum ssa_prop_result | |
75a70cf9 | 7780 | vrp_visit_phi_node (gimple phi) |
88dbf20f | 7781 | { |
75a70cf9 | 7782 | size_t i; |
88dbf20f | 7783 | tree lhs = PHI_RESULT (phi); |
eea12c72 | 7784 | value_range_t *lhs_vr = get_value_range (lhs); |
748eb1f9 | 7785 | value_range_t vr_result = VR_INITIALIZER; |
a2623f6b | 7786 | bool first = true; |
5c7155ca | 7787 | int edges, old_edges; |
ff486875 | 7788 | struct loop *l; |
eea12c72 | 7789 | |
88dbf20f | 7790 | if (dump_file && (dump_flags & TDF_DETAILS)) |
7791 | { | |
7792 | fprintf (dump_file, "\nVisiting PHI node: "); | |
75a70cf9 | 7793 | print_gimple_stmt (dump_file, phi, 0, dump_flags); |
88dbf20f | 7794 | } |
7795 | ||
5c7155ca | 7796 | edges = 0; |
75a70cf9 | 7797 | for (i = 0; i < gimple_phi_num_args (phi); i++) |
88dbf20f | 7798 | { |
75a70cf9 | 7799 | edge e = gimple_phi_arg_edge (phi, i); |
88dbf20f | 7800 | |
7801 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7802 | { | |
7803 | fprintf (dump_file, | |
7804 | "\n Argument #%d (%d -> %d %sexecutable)\n", | |
75a70cf9 | 7805 | (int) i, e->src->index, e->dest->index, |
88dbf20f | 7806 | (e->flags & EDGE_EXECUTABLE) ? "" : "not "); |
7807 | } | |
7808 | ||
7809 | if (e->flags & EDGE_EXECUTABLE) | |
7810 | { | |
7811 | tree arg = PHI_ARG_DEF (phi, i); | |
eea12c72 | 7812 | value_range_t vr_arg; |
88dbf20f | 7813 | |
5c7155ca | 7814 | ++edges; |
7815 | ||
88dbf20f | 7816 | if (TREE_CODE (arg) == SSA_NAME) |
b9b64cb7 | 7817 | { |
7818 | vr_arg = *(get_value_range (arg)); | |
b9b64cb7 | 7819 | } |
88dbf20f | 7820 | else |
7821 | { | |
b700987e | 7822 | if (is_overflow_infinity (arg)) |
7823 | { | |
7824 | arg = copy_node (arg); | |
7825 | TREE_OVERFLOW (arg) = 0; | |
7826 | } | |
7827 | ||
88dbf20f | 7828 | vr_arg.type = VR_RANGE; |
7829 | vr_arg.min = arg; | |
7830 | vr_arg.max = arg; | |
eea12c72 | 7831 | vr_arg.equiv = NULL; |
88dbf20f | 7832 | } |
7833 | ||
7834 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7835 | { | |
7836 | fprintf (dump_file, "\t"); | |
7837 | print_generic_expr (dump_file, arg, dump_flags); | |
7838 | fprintf (dump_file, "\n\tValue: "); | |
7839 | dump_value_range (dump_file, &vr_arg); | |
7840 | fprintf (dump_file, "\n"); | |
7841 | } | |
7842 | ||
a2623f6b | 7843 | if (first) |
7844 | copy_value_range (&vr_result, &vr_arg); | |
7845 | else | |
7846 | vrp_meet (&vr_result, &vr_arg); | |
7847 | first = false; | |
88dbf20f | 7848 | |
7849 | if (vr_result.type == VR_VARYING) | |
7850 | break; | |
7851 | } | |
7852 | } | |
7853 | ||
7854 | if (vr_result.type == VR_VARYING) | |
eea12c72 | 7855 | goto varying; |
fb41023e | 7856 | else if (vr_result.type == VR_UNDEFINED) |
7857 | goto update_range; | |
88dbf20f | 7858 | |
5c7155ca | 7859 | old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; |
7860 | vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; | |
7861 | ||
88dbf20f | 7862 | /* To prevent infinite iterations in the algorithm, derive ranges |
7863 | when the new value is slightly bigger or smaller than the | |
5c7155ca | 7864 | previous one. We don't do this if we have seen a new executable |
7865 | edge; this helps us avoid an overflow infinity for conditionals | |
bcaf4fd0 | 7866 | which are not in a loop. If the old value-range was VR_UNDEFINED |
7867 | use the updated range and iterate one more time. */ | |
9b53b827 | 7868 | if (edges > 0 |
1e400367 | 7869 | && gimple_phi_num_args (phi) > 1 |
bcaf4fd0 | 7870 | && edges == old_edges |
7871 | && lhs_vr->type != VR_UNDEFINED) | |
9b53b827 | 7872 | { |
7873 | int cmp_min = compare_values (lhs_vr->min, vr_result.min); | |
7874 | int cmp_max = compare_values (lhs_vr->max, vr_result.max); | |
7875 | ||
7876 | /* For non VR_RANGE or for pointers fall back to varying if | |
7877 | the range changed. */ | |
7878 | if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE | |
7879 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
7880 | && (cmp_min != 0 || cmp_max != 0)) | |
7881 | goto varying; | |
7882 | ||
7883 | /* If the new minimum is smaller or larger than the previous | |
7884 | one, go all the way to -INF. In the first case, to avoid | |
7885 | iterating millions of times to reach -INF, and in the | |
7886 | other case to avoid infinite bouncing between different | |
7887 | minimums. */ | |
7888 | if (cmp_min > 0 || cmp_min < 0) | |
7889 | { | |
7890 | if (!needs_overflow_infinity (TREE_TYPE (vr_result.min)) | |
7891 | || !vrp_var_may_overflow (lhs, phi)) | |
7892 | vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)); | |
7893 | else if (supports_overflow_infinity (TREE_TYPE (vr_result.min))) | |
7894 | vr_result.min = | |
7895 | negative_overflow_infinity (TREE_TYPE (vr_result.min)); | |
7896 | } | |
7897 | ||
7898 | /* Similarly, if the new maximum is smaller or larger than | |
7899 | the previous one, go all the way to +INF. */ | |
7900 | if (cmp_max < 0 || cmp_max > 0) | |
7901 | { | |
7902 | if (!needs_overflow_infinity (TREE_TYPE (vr_result.max)) | |
7903 | || !vrp_var_may_overflow (lhs, phi)) | |
7904 | vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)); | |
7905 | else if (supports_overflow_infinity (TREE_TYPE (vr_result.max))) | |
7906 | vr_result.max = | |
7907 | positive_overflow_infinity (TREE_TYPE (vr_result.max)); | |
7908 | } | |
7909 | ||
7910 | /* If we dropped either bound to +-INF then if this is a loop | |
7911 | PHI node SCEV may known more about its value-range. */ | |
7912 | if ((cmp_min > 0 || cmp_min < 0 | |
7913 | || cmp_max < 0 || cmp_max > 0) | |
7914 | && current_loops | |
7915 | && (l = loop_containing_stmt (phi)) | |
7916 | && l->header == gimple_bb (phi)) | |
7917 | adjust_range_with_scev (&vr_result, l, phi, lhs); | |
7918 | ||
7919 | /* If we will end up with a (-INF, +INF) range, set it to | |
7920 | VARYING. Same if the previous max value was invalid for | |
7921 | the type and we end up with vr_result.min > vr_result.max. */ | |
7922 | if ((vrp_val_is_max (vr_result.max) | |
7923 | && vrp_val_is_min (vr_result.min)) | |
7924 | || compare_values (vr_result.min, | |
7925 | vr_result.max) > 0) | |
7926 | goto varying; | |
88dbf20f | 7927 | } |
7928 | ||
7929 | /* If the new range is different than the previous value, keep | |
7930 | iterating. */ | |
fb41023e | 7931 | update_range: |
eea12c72 | 7932 | if (update_value_range (lhs, &vr_result)) |
9300c776 | 7933 | { |
7934 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7935 | { | |
7936 | fprintf (dump_file, "Found new range for "); | |
7937 | print_generic_expr (dump_file, lhs, 0); | |
7938 | fprintf (dump_file, ": "); | |
7939 | dump_value_range (dump_file, &vr_result); | |
7940 | fprintf (dump_file, "\n\n"); | |
7941 | } | |
7942 | ||
7943 | return SSA_PROP_INTERESTING; | |
7944 | } | |
88dbf20f | 7945 | |
7946 | /* Nothing changed, don't add outgoing edges. */ | |
7947 | return SSA_PROP_NOT_INTERESTING; | |
eea12c72 | 7948 | |
7949 | /* No match found. Set the LHS to VARYING. */ | |
7950 | varying: | |
7951 | set_value_range_to_varying (lhs_vr); | |
7952 | return SSA_PROP_VARYING; | |
88dbf20f | 7953 | } |
7954 | ||
e31161b3 | 7955 | /* Simplify boolean operations if the source is known |
7956 | to be already a boolean. */ | |
7957 | static bool | |
7958 | simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
7959 | { | |
7960 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
d6f3306a | 7961 | tree lhs, op0, op1; |
e31161b3 | 7962 | bool need_conversion; |
7963 | ||
eea7f7eb | 7964 | /* We handle only !=/== case here. */ |
7965 | gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); | |
7966 | ||
e31161b3 | 7967 | op0 = gimple_assign_rhs1 (stmt); |
d6f3306a | 7968 | if (!op_with_boolean_value_range_p (op0)) |
7969 | return false; | |
e31161b3 | 7970 | |
eea7f7eb | 7971 | op1 = gimple_assign_rhs2 (stmt); |
d6f3306a | 7972 | if (!op_with_boolean_value_range_p (op1)) |
7973 | return false; | |
eea7f7eb | 7974 | |
d6f3306a | 7975 | /* Reduce number of cases to handle to NE_EXPR. As there is no |
7976 | BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ | |
7977 | if (rhs_code == EQ_EXPR) | |
e31161b3 | 7978 | { |
d6f3306a | 7979 | if (TREE_CODE (op1) == INTEGER_CST) |
7980 | op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node); | |
e31161b3 | 7981 | else |
d6f3306a | 7982 | return false; |
e31161b3 | 7983 | } |
7984 | ||
d6f3306a | 7985 | lhs = gimple_assign_lhs (stmt); |
7986 | need_conversion | |
7987 | = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); | |
e31161b3 | 7988 | |
d6f3306a | 7989 | /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ |
cd5ffed9 | 7990 | if (need_conversion |
7991 | && !TYPE_UNSIGNED (TREE_TYPE (op0)) | |
d6f3306a | 7992 | && TYPE_PRECISION (TREE_TYPE (op0)) == 1 |
7993 | && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) | |
e31161b3 | 7994 | return false; |
7995 | ||
d6f3306a | 7996 | /* For A != 0 we can substitute A itself. */ |
7997 | if (integer_zerop (op1)) | |
7998 | gimple_assign_set_rhs_with_ops (gsi, | |
7999 | need_conversion | |
8000 | ? NOP_EXPR : TREE_CODE (op0), | |
8001 | op0, NULL_TREE); | |
8002 | /* For A != B we substitute A ^ B. Either with conversion. */ | |
8003 | else if (need_conversion) | |
8004 | { | |
03d37e4e | 8005 | tree tem = make_ssa_name (TREE_TYPE (op0), NULL); |
8006 | gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1); | |
d6f3306a | 8007 | gsi_insert_before (gsi, newop, GSI_SAME_STMT); |
d6f3306a | 8008 | gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE); |
8009 | } | |
8010 | /* Or without. */ | |
8011 | else | |
8012 | gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); | |
e31161b3 | 8013 | update_stmt (gsi_stmt (*gsi)); |
d6f3306a | 8014 | |
e31161b3 | 8015 | return true; |
8016 | } | |
8017 | ||
96c8d283 | 8018 | /* Simplify a division or modulo operator to a right shift or |
8019 | bitwise and if the first operand is unsigned or is greater | |
8020 | than zero and the second operand is an exact power of two. */ | |
15ea1735 | 8021 | |
e31161b3 | 8022 | static bool |
75a70cf9 | 8023 | simplify_div_or_mod_using_ranges (gimple stmt) |
15ea1735 | 8024 | { |
75a70cf9 | 8025 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
96c8d283 | 8026 | tree val = NULL; |
75a70cf9 | 8027 | tree op0 = gimple_assign_rhs1 (stmt); |
8028 | tree op1 = gimple_assign_rhs2 (stmt); | |
8029 | value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt)); | |
15ea1735 | 8030 | |
75a70cf9 | 8031 | if (TYPE_UNSIGNED (TREE_TYPE (op0))) |
96c8d283 | 8032 | { |
8033 | val = integer_one_node; | |
8034 | } | |
8035 | else | |
8036 | { | |
c3783c3b | 8037 | bool sop = false; |
8038 | ||
92f9b59a | 8039 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); |
a2a1fde2 | 8040 | |
8041 | if (val | |
8042 | && sop | |
8043 | && integer_onep (val) | |
8044 | && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
8045 | { | |
75a70cf9 | 8046 | location_t location; |
a2a1fde2 | 8047 | |
75a70cf9 | 8048 | if (!gimple_has_location (stmt)) |
8049 | location = input_location; | |
a2a1fde2 | 8050 | else |
75a70cf9 | 8051 | location = gimple_location (stmt); |
5fb6a912 | 8052 | warning_at (location, OPT_Wstrict_overflow, |
8053 | "assuming signed overflow does not occur when " | |
8054 | "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); | |
a2a1fde2 | 8055 | } |
96c8d283 | 8056 | } |
8057 | ||
8058 | if (val && integer_onep (val)) | |
15ea1735 | 8059 | { |
96c8d283 | 8060 | tree t; |
15ea1735 | 8061 | |
96c8d283 | 8062 | if (rhs_code == TRUNC_DIV_EXPR) |
8063 | { | |
7002a1c8 | 8064 | t = build_int_cst (integer_type_node, tree_log2 (op1)); |
75a70cf9 | 8065 | gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); |
8066 | gimple_assign_set_rhs1 (stmt, op0); | |
8067 | gimple_assign_set_rhs2 (stmt, t); | |
96c8d283 | 8068 | } |
8069 | else | |
15ea1735 | 8070 | { |
96c8d283 | 8071 | t = build_int_cst (TREE_TYPE (op1), 1); |
317e2a67 | 8072 | t = int_const_binop (MINUS_EXPR, op1, t); |
96c8d283 | 8073 | t = fold_convert (TREE_TYPE (op0), t); |
75a70cf9 | 8074 | |
8075 | gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); | |
8076 | gimple_assign_set_rhs1 (stmt, op0); | |
8077 | gimple_assign_set_rhs2 (stmt, t); | |
96c8d283 | 8078 | } |
8079 | ||
96c8d283 | 8080 | update_stmt (stmt); |
e31161b3 | 8081 | return true; |
96c8d283 | 8082 | } |
e31161b3 | 8083 | |
8084 | return false; | |
96c8d283 | 8085 | } |
15ea1735 | 8086 | |
96c8d283 | 8087 | /* If the operand to an ABS_EXPR is >= 0, then eliminate the |
8088 | ABS_EXPR. If the operand is <= 0, then simplify the | |
8089 | ABS_EXPR into a NEGATE_EXPR. */ | |
8090 | ||
e31161b3 | 8091 | static bool |
75a70cf9 | 8092 | simplify_abs_using_ranges (gimple stmt) |
96c8d283 | 8093 | { |
8094 | tree val = NULL; | |
75a70cf9 | 8095 | tree op = gimple_assign_rhs1 (stmt); |
96c8d283 | 8096 | tree type = TREE_TYPE (op); |
75a70cf9 | 8097 | value_range_t *vr = get_value_range (op); |
96c8d283 | 8098 | |
8099 | if (TYPE_UNSIGNED (type)) | |
8100 | { | |
8101 | val = integer_zero_node; | |
8102 | } | |
8103 | else if (vr) | |
8104 | { | |
c3783c3b | 8105 | bool sop = false; |
8106 | ||
8107 | val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); | |
96c8d283 | 8108 | if (!val) |
8109 | { | |
c3783c3b | 8110 | sop = false; |
8111 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, | |
8112 | &sop); | |
96c8d283 | 8113 | |
8114 | if (val) | |
15ea1735 | 8115 | { |
96c8d283 | 8116 | if (integer_zerop (val)) |
8117 | val = integer_one_node; | |
8118 | else if (integer_onep (val)) | |
8119 | val = integer_zero_node; | |
8120 | } | |
8121 | } | |
15ea1735 | 8122 | |
96c8d283 | 8123 | if (val |
8124 | && (integer_onep (val) || integer_zerop (val))) | |
8125 | { | |
a2a1fde2 | 8126 | if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) |
8127 | { | |
75a70cf9 | 8128 | location_t location; |
a2a1fde2 | 8129 | |
75a70cf9 | 8130 | if (!gimple_has_location (stmt)) |
8131 | location = input_location; | |
a2a1fde2 | 8132 | else |
75a70cf9 | 8133 | location = gimple_location (stmt); |
5fb6a912 | 8134 | warning_at (location, OPT_Wstrict_overflow, |
8135 | "assuming signed overflow does not occur when " | |
8136 | "simplifying %<abs (X)%> to %<X%> or %<-X%>"); | |
a2a1fde2 | 8137 | } |
8138 | ||
75a70cf9 | 8139 | gimple_assign_set_rhs1 (stmt, op); |
96c8d283 | 8140 | if (integer_onep (val)) |
75a70cf9 | 8141 | gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); |
96c8d283 | 8142 | else |
75a70cf9 | 8143 | gimple_assign_set_rhs_code (stmt, SSA_NAME); |
96c8d283 | 8144 | update_stmt (stmt); |
e31161b3 | 8145 | return true; |
96c8d283 | 8146 | } |
8147 | } | |
e31161b3 | 8148 | |
8149 | return false; | |
96c8d283 | 8150 | } |
8151 | ||
273e780e | 8152 | /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. |
8153 | If all the bits that are being cleared by & are already | |
8154 | known to be zero from VR, or all the bits that are being | |
8155 | set by | are already known to be one from VR, the bit | |
8156 | operation is redundant. */ | |
8157 | ||
8158 | static bool | |
8159 | simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
8160 | { | |
8161 | tree op0 = gimple_assign_rhs1 (stmt); | |
8162 | tree op1 = gimple_assign_rhs2 (stmt); | |
8163 | tree op = NULL_TREE; | |
748eb1f9 | 8164 | value_range_t vr0 = VR_INITIALIZER; |
8165 | value_range_t vr1 = VR_INITIALIZER; | |
273e780e | 8166 | double_int may_be_nonzero0, may_be_nonzero1; |
8167 | double_int must_be_nonzero0, must_be_nonzero1; | |
8168 | double_int mask; | |
8169 | ||
8170 | if (TREE_CODE (op0) == SSA_NAME) | |
8171 | vr0 = *(get_value_range (op0)); | |
8172 | else if (is_gimple_min_invariant (op0)) | |
8173 | set_value_range_to_value (&vr0, op0, NULL); | |
8174 | else | |
8175 | return false; | |
8176 | ||
8177 | if (TREE_CODE (op1) == SSA_NAME) | |
8178 | vr1 = *(get_value_range (op1)); | |
8179 | else if (is_gimple_min_invariant (op1)) | |
8180 | set_value_range_to_value (&vr1, op1, NULL); | |
8181 | else | |
8182 | return false; | |
8183 | ||
8184 | if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0)) | |
8185 | return false; | |
8186 | if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1)) | |
8187 | return false; | |
8188 | ||
8189 | switch (gimple_assign_rhs_code (stmt)) | |
8190 | { | |
8191 | case BIT_AND_EXPR: | |
cf8f0e63 | 8192 | mask = may_be_nonzero0.and_not (must_be_nonzero1); |
8193 | if (mask.is_zero ()) | |
273e780e | 8194 | { |
8195 | op = op0; | |
8196 | break; | |
8197 | } | |
cf8f0e63 | 8198 | mask = may_be_nonzero1.and_not (must_be_nonzero0); |
8199 | if (mask.is_zero ()) | |
273e780e | 8200 | { |
8201 | op = op1; | |
8202 | break; | |
8203 | } | |
8204 | break; | |
8205 | case BIT_IOR_EXPR: | |
cf8f0e63 | 8206 | mask = may_be_nonzero0.and_not (must_be_nonzero1); |
8207 | if (mask.is_zero ()) | |
273e780e | 8208 | { |
8209 | op = op1; | |
8210 | break; | |
8211 | } | |
cf8f0e63 | 8212 | mask = may_be_nonzero1.and_not (must_be_nonzero0); |
8213 | if (mask.is_zero ()) | |
273e780e | 8214 | { |
8215 | op = op0; | |
8216 | break; | |
8217 | } | |
8218 | break; | |
8219 | default: | |
8220 | gcc_unreachable (); | |
8221 | } | |
8222 | ||
8223 | if (op == NULL_TREE) | |
8224 | return false; | |
8225 | ||
8226 | gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL); | |
8227 | update_stmt (gsi_stmt (*gsi)); | |
8228 | return true; | |
8229 | } | |
8230 | ||
15fb6c2c | 8231 | /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has |
8232 | a known value range VR. | |
8233 | ||
8234 | If there is one and only one value which will satisfy the | |
8235 | conditional, then return that value. Else return NULL. */ | |
8236 | ||
8237 | static tree | |
8238 | test_for_singularity (enum tree_code cond_code, tree op0, | |
8239 | tree op1, value_range_t *vr) | |
8240 | { | |
8241 | tree min = NULL; | |
8242 | tree max = NULL; | |
8243 | ||
8244 | /* Extract minimum/maximum values which satisfy the | |
8245 | the conditional as it was written. */ | |
8246 | if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
8247 | { | |
c3783c3b | 8248 | /* This should not be negative infinity; there is no overflow |
8249 | here. */ | |
15fb6c2c | 8250 | min = TYPE_MIN_VALUE (TREE_TYPE (op0)); |
8251 | ||
8252 | max = op1; | |
c3783c3b | 8253 | if (cond_code == LT_EXPR && !is_overflow_infinity (max)) |
15fb6c2c | 8254 | { |
8255 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
31b55b9c | 8256 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); |
d8f696cf | 8257 | if (EXPR_P (max)) |
8258 | TREE_NO_WARNING (max) = 1; | |
15fb6c2c | 8259 | } |
8260 | } | |
8261 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) | |
8262 | { | |
c3783c3b | 8263 | /* This should not be positive infinity; there is no overflow |
8264 | here. */ | |
15fb6c2c | 8265 | max = TYPE_MAX_VALUE (TREE_TYPE (op0)); |
8266 | ||
8267 | min = op1; | |
c3783c3b | 8268 | if (cond_code == GT_EXPR && !is_overflow_infinity (min)) |
15fb6c2c | 8269 | { |
8270 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
4f87bd68 | 8271 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); |
d8f696cf | 8272 | if (EXPR_P (min)) |
8273 | TREE_NO_WARNING (min) = 1; | |
15fb6c2c | 8274 | } |
8275 | } | |
8276 | ||
8277 | /* Now refine the minimum and maximum values using any | |
8278 | value range information we have for op0. */ | |
8279 | if (min && max) | |
8280 | { | |
f133b485 | 8281 | if (compare_values (vr->min, min) == 1) |
15fb6c2c | 8282 | min = vr->min; |
f133b485 | 8283 | if (compare_values (vr->max, max) == -1) |
15fb6c2c | 8284 | max = vr->max; |
8285 | ||
4f87bd68 | 8286 | /* If the new min/max values have converged to a single value, |
8287 | then there is only one value which can satisfy the condition, | |
8288 | return that value. */ | |
8289 | if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) | |
15fb6c2c | 8290 | return min; |
8291 | } | |
8292 | return NULL; | |
8293 | } | |
8294 | ||
96c8d283 | 8295 | /* Simplify a conditional using a relational operator to an equality |
8296 | test if the range information indicates only one value can satisfy | |
8297 | the original conditional. */ | |
8298 | ||
e31161b3 | 8299 | static bool |
75a70cf9 | 8300 | simplify_cond_using_ranges (gimple stmt) |
96c8d283 | 8301 | { |
75a70cf9 | 8302 | tree op0 = gimple_cond_lhs (stmt); |
8303 | tree op1 = gimple_cond_rhs (stmt); | |
8304 | enum tree_code cond_code = gimple_cond_code (stmt); | |
96c8d283 | 8305 | |
8306 | if (cond_code != NE_EXPR | |
8307 | && cond_code != EQ_EXPR | |
8308 | && TREE_CODE (op0) == SSA_NAME | |
8309 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
8310 | && is_gimple_min_invariant (op1)) | |
8311 | { | |
8312 | value_range_t *vr = get_value_range (op0); | |
48e1416a | 8313 | |
96c8d283 | 8314 | /* If we have range information for OP0, then we might be |
8315 | able to simplify this conditional. */ | |
8316 | if (vr->type == VR_RANGE) | |
8317 | { | |
f4e36c33 | 8318 | tree new_tree = test_for_singularity (cond_code, op0, op1, vr); |
96c8d283 | 8319 | |
f4e36c33 | 8320 | if (new_tree) |
96c8d283 | 8321 | { |
15fb6c2c | 8322 | if (dump_file) |
96c8d283 | 8323 | { |
15fb6c2c | 8324 | fprintf (dump_file, "Simplified relational "); |
75a70cf9 | 8325 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 8326 | fprintf (dump_file, " into "); |
15ea1735 | 8327 | } |
8328 | ||
75a70cf9 | 8329 | gimple_cond_set_code (stmt, EQ_EXPR); |
8330 | gimple_cond_set_lhs (stmt, op0); | |
f4e36c33 | 8331 | gimple_cond_set_rhs (stmt, new_tree); |
75a70cf9 | 8332 | |
15fb6c2c | 8333 | update_stmt (stmt); |
8334 | ||
8335 | if (dump_file) | |
15ea1735 | 8336 | { |
75a70cf9 | 8337 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 8338 | fprintf (dump_file, "\n"); |
15ea1735 | 8339 | } |
15fb6c2c | 8340 | |
e31161b3 | 8341 | return true; |
15ea1735 | 8342 | } |
8343 | ||
15fb6c2c | 8344 | /* Try again after inverting the condition. We only deal |
8345 | with integral types here, so no need to worry about | |
8346 | issues with inverting FP comparisons. */ | |
8347 | cond_code = invert_tree_comparison (cond_code, false); | |
f4e36c33 | 8348 | new_tree = test_for_singularity (cond_code, op0, op1, vr); |
15fb6c2c | 8349 | |
f4e36c33 | 8350 | if (new_tree) |
96c8d283 | 8351 | { |
15fb6c2c | 8352 | if (dump_file) |
96c8d283 | 8353 | { |
15fb6c2c | 8354 | fprintf (dump_file, "Simplified relational "); |
75a70cf9 | 8355 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 8356 | fprintf (dump_file, " into "); |
96c8d283 | 8357 | } |
15fb6c2c | 8358 | |
75a70cf9 | 8359 | gimple_cond_set_code (stmt, NE_EXPR); |
8360 | gimple_cond_set_lhs (stmt, op0); | |
f4e36c33 | 8361 | gimple_cond_set_rhs (stmt, new_tree); |
75a70cf9 | 8362 | |
15fb6c2c | 8363 | update_stmt (stmt); |
8364 | ||
8365 | if (dump_file) | |
8366 | { | |
75a70cf9 | 8367 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 8368 | fprintf (dump_file, "\n"); |
8369 | } | |
15fb6c2c | 8370 | |
e31161b3 | 8371 | return true; |
96c8d283 | 8372 | } |
15ea1735 | 8373 | } |
8374 | } | |
e31161b3 | 8375 | |
8376 | return false; | |
15ea1735 | 8377 | } |
8378 | ||
72c30859 | 8379 | /* Simplify a switch statement using the value range of the switch |
8380 | argument. */ | |
8381 | ||
e31161b3 | 8382 | static bool |
75a70cf9 | 8383 | simplify_switch_using_ranges (gimple stmt) |
72c30859 | 8384 | { |
75a70cf9 | 8385 | tree op = gimple_switch_index (stmt); |
72c30859 | 8386 | value_range_t *vr; |
8387 | bool take_default; | |
8388 | edge e; | |
8389 | edge_iterator ei; | |
8390 | size_t i = 0, j = 0, n, n2; | |
75a70cf9 | 8391 | tree vec2; |
72c30859 | 8392 | switch_update su; |
98a8539f | 8393 | size_t k = 1, l = 0; |
72c30859 | 8394 | |
496ffe87 | 8395 | if (TREE_CODE (op) == SSA_NAME) |
8396 | { | |
8397 | vr = get_value_range (op); | |
72c30859 | 8398 | |
496ffe87 | 8399 | /* We can only handle integer ranges. */ |
98a8539f | 8400 | if ((vr->type != VR_RANGE |
8401 | && vr->type != VR_ANTI_RANGE) | |
496ffe87 | 8402 | || symbolic_range_p (vr)) |
8403 | return false; | |
72c30859 | 8404 | |
496ffe87 | 8405 | /* Find case label for min/max of the value range. */ |
98a8539f | 8406 | take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); |
496ffe87 | 8407 | } |
8408 | else if (TREE_CODE (op) == INTEGER_CST) | |
8409 | { | |
8410 | take_default = !find_case_label_index (stmt, 1, op, &i); | |
8411 | if (take_default) | |
8412 | { | |
8413 | i = 1; | |
8414 | j = 0; | |
8415 | } | |
48e1416a | 8416 | else |
496ffe87 | 8417 | { |
8418 | j = i; | |
8419 | } | |
8420 | } | |
8421 | else | |
e31161b3 | 8422 | return false; |
72c30859 | 8423 | |
75a70cf9 | 8424 | n = gimple_switch_num_labels (stmt); |
72c30859 | 8425 | |
8426 | /* Bail out if this is just all edges taken. */ | |
75a70cf9 | 8427 | if (i == 1 |
8428 | && j == n - 1 | |
72c30859 | 8429 | && take_default) |
e31161b3 | 8430 | return false; |
72c30859 | 8431 | |
8432 | /* Build a new vector of taken case labels. */ | |
98a8539f | 8433 | vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); |
75a70cf9 | 8434 | n2 = 0; |
72c30859 | 8435 | |
8436 | /* Add the default edge, if necessary. */ | |
8437 | if (take_default) | |
75a70cf9 | 8438 | TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); |
8439 | ||
8440 | for (; i <= j; ++i, ++n2) | |
8441 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); | |
72c30859 | 8442 | |
98a8539f | 8443 | for (; k <= l; ++k, ++n2) |
8444 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k); | |
8445 | ||
72c30859 | 8446 | /* Mark needed edges. */ |
8447 | for (i = 0; i < n2; ++i) | |
8448 | { | |
75a70cf9 | 8449 | e = find_edge (gimple_bb (stmt), |
72c30859 | 8450 | label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); |
8451 | e->aux = (void *)-1; | |
8452 | } | |
8453 | ||
8454 | /* Queue not needed edges for later removal. */ | |
75a70cf9 | 8455 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
72c30859 | 8456 | { |
8457 | if (e->aux == (void *)-1) | |
8458 | { | |
8459 | e->aux = NULL; | |
8460 | continue; | |
8461 | } | |
8462 | ||
8463 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8464 | { | |
8465 | fprintf (dump_file, "removing unreachable case label\n"); | |
8466 | } | |
8467 | VEC_safe_push (edge, heap, to_remove_edges, e); | |
8b938617 | 8468 | e->flags &= ~EDGE_EXECUTABLE; |
72c30859 | 8469 | } |
8470 | ||
8471 | /* And queue an update for the stmt. */ | |
8472 | su.stmt = stmt; | |
8473 | su.vec = vec2; | |
8474 | VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su); | |
e31161b3 | 8475 | return false; |
72c30859 | 8476 | } |
8477 | ||
7430df61 | 8478 | /* Simplify an integral conversion from an SSA name in STMT. */ |
8479 | ||
8480 | static bool | |
8481 | simplify_conversion_using_ranges (gimple stmt) | |
8482 | { | |
b3b0dcac | 8483 | tree innerop, middleop, finaltype; |
8484 | gimple def_stmt; | |
8485 | value_range_t *innervr; | |
9298e319 | 8486 | bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p; |
8487 | unsigned inner_prec, middle_prec, final_prec; | |
8488 | double_int innermin, innermed, innermax, middlemin, middlemed, middlemax; | |
b3b0dcac | 8489 | |
8490 | finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); | |
5c3a6c62 | 8491 | if (!INTEGRAL_TYPE_P (finaltype)) |
8492 | return false; | |
b3b0dcac | 8493 | middleop = gimple_assign_rhs1 (stmt); |
8494 | def_stmt = SSA_NAME_DEF_STMT (middleop); | |
7430df61 | 8495 | if (!is_gimple_assign (def_stmt) |
8496 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
8497 | return false; | |
b3b0dcac | 8498 | innerop = gimple_assign_rhs1 (def_stmt); |
8499 | if (TREE_CODE (innerop) != SSA_NAME) | |
7430df61 | 8500 | return false; |
b3b0dcac | 8501 | |
8502 | /* Get the value-range of the inner operand. */ | |
8503 | innervr = get_value_range (innerop); | |
8504 | if (innervr->type != VR_RANGE | |
8505 | || TREE_CODE (innervr->min) != INTEGER_CST | |
8506 | || TREE_CODE (innervr->max) != INTEGER_CST) | |
7430df61 | 8507 | return false; |
b3b0dcac | 8508 | |
8509 | /* Simulate the conversion chain to check if the result is equal if | |
8510 | the middle conversion is removed. */ | |
8511 | innermin = tree_to_double_int (innervr->min); | |
8512 | innermax = tree_to_double_int (innervr->max); | |
9298e319 | 8513 | |
8514 | inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); | |
8515 | middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); | |
8516 | final_prec = TYPE_PRECISION (finaltype); | |
8517 | ||
8518 | /* If the first conversion is not injective, the second must not | |
8519 | be widening. */ | |
cf8f0e63 | 8520 | if ((innermax - innermin).ugt (double_int::mask (middle_prec)) |
9298e319 | 8521 | && middle_prec < final_prec) |
7430df61 | 8522 | return false; |
9298e319 | 8523 | /* We also want a medium value so that we can track the effect that |
8524 | narrowing conversions with sign change have. */ | |
8525 | inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop)); | |
8526 | if (inner_unsigned_p) | |
cf8f0e63 | 8527 | innermed = double_int::mask (inner_prec).lrshift (1, inner_prec); |
9298e319 | 8528 | else |
8529 | innermed = double_int_zero; | |
cf8f0e63 | 8530 | if (innermin.cmp (innermed, inner_unsigned_p) >= 0 |
8531 | || innermed.cmp (innermax, inner_unsigned_p) >= 0) | |
9298e319 | 8532 | innermed = innermin; |
8533 | ||
8534 | middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop)); | |
cf8f0e63 | 8535 | middlemin = innermin.ext (middle_prec, middle_unsigned_p); |
8536 | middlemed = innermed.ext (middle_prec, middle_unsigned_p); | |
8537 | middlemax = innermax.ext (middle_prec, middle_unsigned_p); | |
9298e319 | 8538 | |
81e042f9 | 8539 | /* Require that the final conversion applied to both the original |
8540 | and the intermediate range produces the same result. */ | |
9298e319 | 8541 | final_unsigned_p = TYPE_UNSIGNED (finaltype); |
cf8f0e63 | 8542 | if (middlemin.ext (final_prec, final_unsigned_p) |
8543 | != innermin.ext (final_prec, final_unsigned_p) | |
8544 | || middlemed.ext (final_prec, final_unsigned_p) | |
8545 | != innermed.ext (final_prec, final_unsigned_p) | |
8546 | || middlemax.ext (final_prec, final_unsigned_p) | |
8547 | != innermax.ext (final_prec, final_unsigned_p)) | |
b3b0dcac | 8548 | return false; |
8549 | ||
8550 | gimple_assign_set_rhs1 (stmt, innerop); | |
7430df61 | 8551 | update_stmt (stmt); |
8552 | return true; | |
8553 | } | |
8554 | ||
f0938d2c | 8555 | /* Return whether the value range *VR fits in an integer type specified |
8556 | by PRECISION and UNSIGNED_P. */ | |
8557 | ||
8558 | static bool | |
8559 | range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) | |
8560 | { | |
db3e66dc | 8561 | tree src_type; |
8562 | unsigned src_precision; | |
f0938d2c | 8563 | double_int tem; |
8564 | ||
db3e66dc | 8565 | /* We can only handle integral and pointer types. */ |
8566 | src_type = TREE_TYPE (vr->min); | |
8567 | if (!INTEGRAL_TYPE_P (src_type) | |
8568 | && !POINTER_TYPE_P (src_type)) | |
8569 | return false; | |
8570 | ||
8571 | /* An extension is always fine, so is an identity transform. */ | |
8572 | src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); | |
8573 | if (src_precision < precision | |
8574 | || (src_precision == precision | |
8575 | && TYPE_UNSIGNED (src_type) == unsigned_p)) | |
8576 | return true; | |
8577 | ||
8578 | /* Now we can only handle ranges with constant bounds. */ | |
f0938d2c | 8579 | if (vr->type != VR_RANGE |
8580 | || TREE_CODE (vr->min) != INTEGER_CST | |
8581 | || TREE_CODE (vr->max) != INTEGER_CST) | |
8582 | return false; | |
8583 | ||
db3e66dc | 8584 | /* For precision-preserving sign-changes the MSB of the double-int |
8585 | has to be clear. */ | |
8586 | if (src_precision == precision | |
8587 | && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0) | |
8588 | return false; | |
8589 | ||
8590 | /* Then we can perform the conversion on both ends and compare | |
8591 | the result for equality. */ | |
cf8f0e63 | 8592 | tem = tree_to_double_int (vr->min).ext (precision, unsigned_p); |
8593 | if (tree_to_double_int (vr->min) != tem) | |
f0938d2c | 8594 | return false; |
cf8f0e63 | 8595 | tem = tree_to_double_int (vr->max).ext (precision, unsigned_p); |
8596 | if (tree_to_double_int (vr->max) != tem) | |
f0938d2c | 8597 | return false; |
8598 | ||
8599 | return true; | |
8600 | } | |
8601 | ||
8602 | /* Simplify a conversion from integral SSA name to float in STMT. */ | |
8603 | ||
8604 | static bool | |
8605 | simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
8606 | { | |
8607 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
8608 | value_range_t *vr = get_value_range (rhs1); | |
8609 | enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); | |
8610 | enum machine_mode mode; | |
8611 | tree tem; | |
8612 | gimple conv; | |
8613 | ||
8614 | /* We can only handle constant ranges. */ | |
8615 | if (vr->type != VR_RANGE | |
8616 | || TREE_CODE (vr->min) != INTEGER_CST | |
8617 | || TREE_CODE (vr->max) != INTEGER_CST) | |
8618 | return false; | |
8619 | ||
8620 | /* First check if we can use a signed type in place of an unsigned. */ | |
8621 | if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) | |
8622 | && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) | |
8623 | != CODE_FOR_nothing) | |
8624 | && range_fits_type_p (vr, GET_MODE_PRECISION | |
8625 | (TYPE_MODE (TREE_TYPE (rhs1))), 0)) | |
8626 | mode = TYPE_MODE (TREE_TYPE (rhs1)); | |
8627 | /* If we can do the conversion in the current input mode do nothing. */ | |
8628 | else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), | |
8629 | TYPE_UNSIGNED (TREE_TYPE (rhs1)))) | |
8630 | return false; | |
8631 | /* Otherwise search for a mode we can use, starting from the narrowest | |
8632 | integer mode available. */ | |
8633 | else | |
8634 | { | |
8635 | mode = GET_CLASS_NARROWEST_MODE (MODE_INT); | |
8636 | do | |
8637 | { | |
8638 | /* If we cannot do a signed conversion to float from mode | |
8639 | or if the value-range does not fit in the signed type | |
8640 | try with a wider mode. */ | |
8641 | if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing | |
8642 | && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0)) | |
8643 | break; | |
8644 | ||
8645 | mode = GET_MODE_WIDER_MODE (mode); | |
8646 | /* But do not widen the input. Instead leave that to the | |
8647 | optabs expansion code. */ | |
8648 | if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) | |
8649 | return false; | |
8650 | } | |
8651 | while (mode != VOIDmode); | |
8652 | if (mode == VOIDmode) | |
8653 | return false; | |
8654 | } | |
8655 | ||
8656 | /* It works, insert a truncation or sign-change before the | |
8657 | float conversion. */ | |
03d37e4e | 8658 | tem = make_ssa_name (build_nonstandard_integer_type |
f0938d2c | 8659 | (GET_MODE_PRECISION (mode), 0), NULL); |
8660 | conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE); | |
f0938d2c | 8661 | gsi_insert_before (gsi, conv, GSI_SAME_STMT); |
8662 | gimple_assign_set_rhs1 (stmt, tem); | |
8663 | update_stmt (stmt); | |
8664 | ||
8665 | return true; | |
8666 | } | |
8667 | ||
96c8d283 | 8668 | /* Simplify STMT using ranges if possible. */ |
8669 | ||
07aee51b | 8670 | static bool |
e31161b3 | 8671 | simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) |
96c8d283 | 8672 | { |
e31161b3 | 8673 | gimple stmt = gsi_stmt (*gsi); |
75a70cf9 | 8674 | if (is_gimple_assign (stmt)) |
96c8d283 | 8675 | { |
75a70cf9 | 8676 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
7430df61 | 8677 | tree rhs1 = gimple_assign_rhs1 (stmt); |
96c8d283 | 8678 | |
e31161b3 | 8679 | switch (rhs_code) |
8680 | { | |
8681 | case EQ_EXPR: | |
8682 | case NE_EXPR: | |
eea7f7eb | 8683 | /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity |
8684 | if the RHS is zero or one, and the LHS are known to be boolean | |
8685 | values. */ | |
7430df61 | 8686 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
e31161b3 | 8687 | return simplify_truth_ops_using_ranges (gsi, stmt); |
8688 | break; | |
8689 | ||
96c8d283 | 8690 | /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR |
8691 | and BIT_AND_EXPR respectively if the first operand is greater | |
8692 | than zero and the second operand is an exact power of two. */ | |
e31161b3 | 8693 | case TRUNC_DIV_EXPR: |
8694 | case TRUNC_MOD_EXPR: | |
7430df61 | 8695 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) |
e31161b3 | 8696 | && integer_pow2p (gimple_assign_rhs2 (stmt))) |
8697 | return simplify_div_or_mod_using_ranges (stmt); | |
8698 | break; | |
96c8d283 | 8699 | |
8700 | /* Transform ABS (X) into X or -X as appropriate. */ | |
e31161b3 | 8701 | case ABS_EXPR: |
7430df61 | 8702 | if (TREE_CODE (rhs1) == SSA_NAME |
8703 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
e31161b3 | 8704 | return simplify_abs_using_ranges (stmt); |
8705 | break; | |
8706 | ||
273e780e | 8707 | case BIT_AND_EXPR: |
8708 | case BIT_IOR_EXPR: | |
8709 | /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR | |
8710 | if all the bits being cleared are already cleared or | |
8711 | all the bits being set are already set. */ | |
7430df61 | 8712 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
273e780e | 8713 | return simplify_bit_ops_using_ranges (gsi, stmt); |
8714 | break; | |
8715 | ||
7430df61 | 8716 | CASE_CONVERT: |
8717 | if (TREE_CODE (rhs1) == SSA_NAME | |
8718 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
8719 | return simplify_conversion_using_ranges (stmt); | |
8720 | break; | |
8721 | ||
f0938d2c | 8722 | case FLOAT_EXPR: |
8723 | if (TREE_CODE (rhs1) == SSA_NAME | |
8724 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
8725 | return simplify_float_conversion_using_ranges (gsi, stmt); | |
8726 | break; | |
8727 | ||
e31161b3 | 8728 | default: |
8729 | break; | |
8730 | } | |
96c8d283 | 8731 | } |
75a70cf9 | 8732 | else if (gimple_code (stmt) == GIMPLE_COND) |
e31161b3 | 8733 | return simplify_cond_using_ranges (stmt); |
75a70cf9 | 8734 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
e31161b3 | 8735 | return simplify_switch_using_ranges (stmt); |
8736 | ||
8737 | return false; | |
96c8d283 | 8738 | } |
8739 | ||
07aee51b | 8740 | /* If the statement pointed by SI has a predicate whose value can be |
8741 | computed using the value range information computed by VRP, compute | |
8742 | its value and return true. Otherwise, return false. */ | |
8743 | ||
8744 | static bool | |
8745 | fold_predicate_in (gimple_stmt_iterator *si) | |
8746 | { | |
8747 | bool assignment_p = false; | |
8748 | tree val; | |
8749 | gimple stmt = gsi_stmt (*si); | |
8750 | ||
8751 | if (is_gimple_assign (stmt) | |
8752 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) | |
8753 | { | |
8754 | assignment_p = true; | |
8755 | val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), | |
8756 | gimple_assign_rhs1 (stmt), | |
8757 | gimple_assign_rhs2 (stmt), | |
8758 | stmt); | |
8759 | } | |
8760 | else if (gimple_code (stmt) == GIMPLE_COND) | |
8761 | val = vrp_evaluate_conditional (gimple_cond_code (stmt), | |
8762 | gimple_cond_lhs (stmt), | |
8763 | gimple_cond_rhs (stmt), | |
8764 | stmt); | |
8765 | else | |
8766 | return false; | |
8767 | ||
8768 | if (val) | |
8769 | { | |
8770 | if (assignment_p) | |
8771 | val = fold_convert (gimple_expr_type (stmt), val); | |
48e1416a | 8772 | |
07aee51b | 8773 | if (dump_file) |
8774 | { | |
8775 | fprintf (dump_file, "Folding predicate "); | |
8776 | print_gimple_expr (dump_file, stmt, 0, 0); | |
8777 | fprintf (dump_file, " to "); | |
8778 | print_generic_expr (dump_file, val, 0); | |
8779 | fprintf (dump_file, "\n"); | |
8780 | } | |
8781 | ||
8782 | if (is_gimple_assign (stmt)) | |
8783 | gimple_assign_set_rhs_from_tree (si, val); | |
8784 | else | |
8785 | { | |
8786 | gcc_assert (gimple_code (stmt) == GIMPLE_COND); | |
8787 | if (integer_zerop (val)) | |
8788 | gimple_cond_make_false (stmt); | |
8789 | else if (integer_onep (val)) | |
8790 | gimple_cond_make_true (stmt); | |
8791 | else | |
8792 | gcc_unreachable (); | |
8793 | } | |
8794 | ||
8795 | return true; | |
8796 | } | |
8797 | ||
8798 | return false; | |
8799 | } | |
8800 | ||
8801 | /* Callback for substitute_and_fold folding the stmt at *SI. */ | |
8802 | ||
8803 | static bool | |
8804 | vrp_fold_stmt (gimple_stmt_iterator *si) | |
8805 | { | |
8806 | if (fold_predicate_in (si)) | |
8807 | return true; | |
8808 | ||
8809 | return simplify_stmt_using_ranges (si); | |
8810 | } | |
8811 | ||
62b180e1 | 8812 | /* Stack of dest,src equivalency pairs that need to be restored after |
48e1416a | 8813 | each attempt to thread a block's incoming edge to an outgoing edge. |
62b180e1 | 8814 | |
8815 | A NULL entry is used to mark the end of pairs which need to be | |
8816 | restored. */ | |
2b15d2ba | 8817 | static VEC(tree,heap) *equiv_stack; |
62b180e1 | 8818 | |
a2a1fde2 | 8819 | /* A trivial wrapper so that we can present the generic jump threading |
8820 | code with a simple API for simplifying statements. STMT is the | |
8821 | statement we want to simplify, WITHIN_STMT provides the location | |
8822 | for any overflow warnings. */ | |
8823 | ||
62b180e1 | 8824 | static tree |
75a70cf9 | 8825 | simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) |
62b180e1 | 8826 | { |
8827 | /* We only use VRP information to simplify conditionals. This is | |
8828 | overly conservative, but it's unclear if doing more would be | |
8829 | worth the compile time cost. */ | |
75a70cf9 | 8830 | if (gimple_code (stmt) != GIMPLE_COND) |
62b180e1 | 8831 | return NULL; |
8832 | ||
75a70cf9 | 8833 | return vrp_evaluate_conditional (gimple_cond_code (stmt), |
8834 | gimple_cond_lhs (stmt), | |
8835 | gimple_cond_rhs (stmt), within_stmt); | |
62b180e1 | 8836 | } |
8837 | ||
8838 | /* Blocks which have more than one predecessor and more than | |
f0b5f617 | 8839 | one successor present jump threading opportunities, i.e., |
62b180e1 | 8840 | when the block is reached from a specific predecessor, we |
8841 | may be able to determine which of the outgoing edges will | |
8842 | be traversed. When this optimization applies, we are able | |
8843 | to avoid conditionals at runtime and we may expose secondary | |
8844 | optimization opportunities. | |
8845 | ||
8846 | This routine is effectively a driver for the generic jump | |
8847 | threading code. It basically just presents the generic code | |
8848 | with edges that may be suitable for jump threading. | |
8849 | ||
8850 | Unlike DOM, we do not iterate VRP if jump threading was successful. | |
8851 | While iterating may expose new opportunities for VRP, it is expected | |
8852 | those opportunities would be very limited and the compile time cost | |
48e1416a | 8853 | to expose those opportunities would be significant. |
62b180e1 | 8854 | |
8855 | As jump threading opportunities are discovered, they are registered | |
8856 | for later realization. */ | |
8857 | ||
8858 | static void | |
8859 | identify_jump_threads (void) | |
8860 | { | |
8861 | basic_block bb; | |
75a70cf9 | 8862 | gimple dummy; |
72c30859 | 8863 | int i; |
8864 | edge e; | |
62b180e1 | 8865 | |
8866 | /* Ugh. When substituting values earlier in this pass we can | |
8867 | wipe the dominance information. So rebuild the dominator | |
8868 | information as we need it within the jump threading code. */ | |
8869 | calculate_dominance_info (CDI_DOMINATORS); | |
8870 | ||
8871 | /* We do not allow VRP information to be used for jump threading | |
8872 | across a back edge in the CFG. Otherwise it becomes too | |
8873 | difficult to avoid eliminating loop exit tests. Of course | |
8874 | EDGE_DFS_BACK is not accurate at this time so we have to | |
8875 | recompute it. */ | |
8876 | mark_dfs_back_edges (); | |
8877 | ||
72c30859 | 8878 | /* Do not thread across edges we are about to remove. Just marking |
8879 | them as EDGE_DFS_BACK will do. */ | |
48148244 | 8880 | FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e) |
72c30859 | 8881 | e->flags |= EDGE_DFS_BACK; |
8882 | ||
62b180e1 | 8883 | /* Allocate our unwinder stack to unwind any temporary equivalences |
8884 | that might be recorded. */ | |
2b15d2ba | 8885 | equiv_stack = VEC_alloc (tree, heap, 20); |
62b180e1 | 8886 | |
8887 | /* To avoid lots of silly node creation, we create a single | |
8888 | conditional and just modify it in-place when attempting to | |
8889 | thread jumps. */ | |
75a70cf9 | 8890 | dummy = gimple_build_cond (EQ_EXPR, |
8891 | integer_zero_node, integer_zero_node, | |
8892 | NULL, NULL); | |
62b180e1 | 8893 | |
8894 | /* Walk through all the blocks finding those which present a | |
8895 | potential jump threading opportunity. We could set this up | |
8896 | as a dominator walker and record data during the walk, but | |
8897 | I doubt it's worth the effort for the classes of jump | |
8898 | threading opportunities we are trying to identify at this | |
8899 | point in compilation. */ | |
8900 | FOR_EACH_BB (bb) | |
8901 | { | |
75a70cf9 | 8902 | gimple last; |
62b180e1 | 8903 | |
8904 | /* If the generic jump threading code does not find this block | |
8905 | interesting, then there is nothing to do. */ | |
8906 | if (! potentially_threadable_block (bb)) | |
8907 | continue; | |
8908 | ||
8909 | /* We only care about blocks ending in a COND_EXPR. While there | |
8910 | may be some value in handling SWITCH_EXPR here, I doubt it's | |
8911 | terribly important. */ | |
75a70cf9 | 8912 | last = gsi_stmt (gsi_last_bb (bb)); |
62b180e1 | 8913 | |
50258e8d | 8914 | /* We're basically looking for a switch or any kind of conditional with |
69978c67 | 8915 | integral or pointer type arguments. Note the type of the second |
8916 | argument will be the same as the first argument, so no need to | |
8917 | check it explicitly. */ | |
50258e8d | 8918 | if (gimple_code (last) == GIMPLE_SWITCH |
8919 | || (gimple_code (last) == GIMPLE_COND | |
8920 | && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME | |
8921 | && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) | |
8922 | || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))) | |
8923 | && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME | |
8924 | || is_gimple_min_invariant (gimple_cond_rhs (last))))) | |
62b180e1 | 8925 | { |
8926 | edge_iterator ei; | |
62b180e1 | 8927 | |
8928 | /* We've got a block with multiple predecessors and multiple | |
50258e8d | 8929 | successors which also ends in a suitable conditional or |
8930 | switch statement. For each predecessor, see if we can thread | |
8931 | it to a specific successor. */ | |
62b180e1 | 8932 | FOR_EACH_EDGE (e, ei, bb->preds) |
8933 | { | |
8934 | /* Do not thread across back edges or abnormal edges | |
8935 | in the CFG. */ | |
8936 | if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) | |
8937 | continue; | |
8938 | ||
2b15d2ba | 8939 | thread_across_edge (dummy, e, true, &equiv_stack, |
62b180e1 | 8940 | simplify_stmt_for_jump_threading); |
8941 | } | |
8942 | } | |
8943 | } | |
8944 | ||
8945 | /* We do not actually update the CFG or SSA graphs at this point as | |
8946 | ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet | |
8947 | handle ASSERT_EXPRs gracefully. */ | |
8948 | } | |
8949 | ||
8950 | /* We identified all the jump threading opportunities earlier, but could | |
8951 | not transform the CFG at that time. This routine transforms the | |
8952 | CFG and arranges for the dominator tree to be rebuilt if necessary. | |
8953 | ||
8954 | Note the SSA graph update will occur during the normal TODO | |
8955 | processing by the pass manager. */ | |
8956 | static void | |
8957 | finalize_jump_threads (void) | |
8958 | { | |
7e0311ae | 8959 | thread_through_all_blocks (false); |
2b15d2ba | 8960 | VEC_free (tree, heap, equiv_stack); |
62b180e1 | 8961 | } |
96c8d283 | 8962 | |
88dbf20f | 8963 | |
8964 | /* Traverse all the blocks folding conditionals with known ranges. */ | |
8965 | ||
8966 | static void | |
8967 | vrp_finalize (void) | |
8968 | { | |
eea12c72 | 8969 | size_t i; |
e0186710 | 8970 | |
8971 | values_propagated = true; | |
88dbf20f | 8972 | |
8973 | if (dump_file) | |
8974 | { | |
8975 | fprintf (dump_file, "\nValue ranges after VRP:\n\n"); | |
8976 | dump_all_value_ranges (dump_file); | |
8977 | fprintf (dump_file, "\n"); | |
8978 | } | |
8979 | ||
14f101cf | 8980 | substitute_and_fold (op_with_constant_singleton_value_range, |
8981 | vrp_fold_stmt, false); | |
eea12c72 | 8982 | |
5bc96398 | 8983 | if (warn_array_bounds) |
8b938617 | 8984 | check_all_array_refs (); |
5bc96398 | 8985 | |
62b180e1 | 8986 | /* We must identify jump threading opportunities before we release |
8987 | the datastructures built by VRP. */ | |
8988 | identify_jump_threads (); | |
8989 | ||
eea12c72 | 8990 | /* Free allocated memory. */ |
e0186710 | 8991 | for (i = 0; i < num_vr_values; i++) |
eea12c72 | 8992 | if (vr_value[i]) |
8993 | { | |
8994 | BITMAP_FREE (vr_value[i]->equiv); | |
8995 | free (vr_value[i]); | |
8996 | } | |
8997 | ||
eea12c72 | 8998 | free (vr_value); |
5c7155ca | 8999 | free (vr_phi_edge_counts); |
8dbf774a | 9000 | |
9001 | /* So that we can distinguish between VRP data being available | |
9002 | and not available. */ | |
9003 | vr_value = NULL; | |
5c7155ca | 9004 | vr_phi_edge_counts = NULL; |
88dbf20f | 9005 | } |
9006 | ||
9007 | ||
9008 | /* Main entry point to VRP (Value Range Propagation). This pass is | |
9009 | loosely based on J. R. C. Patterson, ``Accurate Static Branch | |
9010 | Prediction by Value Range Propagation,'' in SIGPLAN Conference on | |
9011 | Programming Language Design and Implementation, pp. 67-78, 1995. | |
9012 | Also available at http://citeseer.ist.psu.edu/patterson95accurate.html | |
9013 | ||
9014 | This is essentially an SSA-CCP pass modified to deal with ranges | |
9015 | instead of constants. | |
9016 | ||
eea12c72 | 9017 | While propagating ranges, we may find that two or more SSA name |
9018 | have equivalent, though distinct ranges. For instance, | |
9019 | ||
9020 | 1 x_9 = p_3->a; | |
9021 | 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> | |
9022 | 3 if (p_4 == q_2) | |
9023 | 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; | |
9024 | 5 endif | |
9025 | 6 if (q_2) | |
48e1416a | 9026 | |
eea12c72 | 9027 | In the code above, pointer p_5 has range [q_2, q_2], but from the |
9028 | code we can also determine that p_5 cannot be NULL and, if q_2 had | |
9029 | a non-varying range, p_5's range should also be compatible with it. | |
9030 | ||
9aff9709 | 9031 | These equivalences are created by two expressions: ASSERT_EXPR and |
eea12c72 | 9032 | copy operations. Since p_5 is an assertion on p_4, and p_4 was the |
9033 | result of another assertion, then we can use the fact that p_5 and | |
9034 | p_4 are equivalent when evaluating p_5's range. | |
9035 | ||
9aff9709 | 9036 | Together with value ranges, we also propagate these equivalences |
eea12c72 | 9037 | between names so that we can take advantage of information from |
9038 | multiple ranges when doing final replacement. Note that this | |
9039 | equivalency relation is transitive but not symmetric. | |
48e1416a | 9040 | |
eea12c72 | 9041 | In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we |
9042 | cannot assert that q_2 is equivalent to p_5 because q_2 may be used | |
9043 | in contexts where that assertion does not hold (e.g., in line 6). | |
9044 | ||
88dbf20f | 9045 | TODO, the main difference between this pass and Patterson's is that |
9046 | we do not propagate edge probabilities. We only compute whether | |
9047 | edges can be taken or not. That is, instead of having a spectrum | |
9048 | of jump probabilities between 0 and 1, we only deal with 0, 1 and | |
9049 | DON'T KNOW. In the future, it may be worthwhile to propagate | |
9050 | probabilities to aid branch prediction. */ | |
9051 | ||
2a1990e9 | 9052 | static unsigned int |
88dbf20f | 9053 | execute_vrp (void) |
9054 | { | |
72c30859 | 9055 | int i; |
9056 | edge e; | |
9057 | switch_update *su; | |
9058 | ||
7e0311ae | 9059 | loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
7a3bf727 | 9060 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
9061 | scev_initialize (); | |
7e0311ae | 9062 | |
593f9380 | 9063 | insert_range_assertions (); |
9064 | ||
72c30859 | 9065 | to_remove_edges = VEC_alloc (edge, heap, 10); |
9066 | to_update_switch_stmts = VEC_alloc (switch_update, heap, 5); | |
f003f9fd | 9067 | threadedge_initialize_values (); |
72c30859 | 9068 | |
eea12c72 | 9069 | vrp_initialize (); |
9070 | ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); | |
9071 | vrp_finalize (); | |
88dbf20f | 9072 | |
593f9380 | 9073 | free_numbers_of_iterations_estimates (); |
9074 | ||
62b180e1 | 9075 | /* ASSERT_EXPRs must be removed before finalizing jump threads |
9076 | as finalizing jump threads calls the CFG cleanup code which | |
9077 | does not properly handle ASSERT_EXPRs. */ | |
88dbf20f | 9078 | remove_range_assertions (); |
708dacc2 | 9079 | |
9080 | /* If we exposed any new variables, go ahead and put them into | |
9081 | SSA form now, before we handle jump threading. This simplifies | |
9082 | interactions between rewriting of _DECL nodes into SSA form | |
9083 | and rewriting SSA_NAME nodes into SSA form after block | |
9084 | duplication and CFG manipulation. */ | |
9085 | update_ssa (TODO_update_ssa); | |
9086 | ||
62b180e1 | 9087 | finalize_jump_threads (); |
ffeeba75 | 9088 | |
9089 | /* Remove dead edges from SWITCH_EXPR optimization. This leaves the | |
9090 | CFG in a broken state and requires a cfg_cleanup run. */ | |
48148244 | 9091 | FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e) |
ffeeba75 | 9092 | remove_edge (e); |
9093 | /* Update SWITCH_EXPR case label vector. */ | |
48148244 | 9094 | FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su) |
75a70cf9 | 9095 | { |
9096 | size_t j; | |
9097 | size_t n = TREE_VEC_LENGTH (su->vec); | |
2df2af5a | 9098 | tree label; |
75a70cf9 | 9099 | gimple_switch_set_num_labels (su->stmt, n); |
9100 | for (j = 0; j < n; j++) | |
9101 | gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); | |
2df2af5a | 9102 | /* As we may have replaced the default label with a regular one |
9103 | make sure to make it a real default label again. This ensures | |
9104 | optimal expansion. */ | |
49a70175 | 9105 | label = gimple_switch_label (su->stmt, 0); |
2df2af5a | 9106 | CASE_LOW (label) = NULL_TREE; |
9107 | CASE_HIGH (label) = NULL_TREE; | |
75a70cf9 | 9108 | } |
ffeeba75 | 9109 | |
9110 | if (VEC_length (edge, to_remove_edges) > 0) | |
b06d96b5 | 9111 | free_dominance_info (CDI_DOMINATORS); |
ffeeba75 | 9112 | |
9113 | VEC_free (edge, heap, to_remove_edges); | |
9114 | VEC_free (switch_update, heap, to_update_switch_stmts); | |
f003f9fd | 9115 | threadedge_finalize_values (); |
ffeeba75 | 9116 | |
7a3bf727 | 9117 | scev_finalize (); |
9118 | loop_optimizer_finalize (); | |
2a1990e9 | 9119 | return 0; |
88dbf20f | 9120 | } |
9121 | ||
9122 | static bool | |
9123 | gate_vrp (void) | |
9124 | { | |
9125 | return flag_tree_vrp != 0; | |
9126 | } | |
9127 | ||
20099e35 | 9128 | struct gimple_opt_pass pass_vrp = |
88dbf20f | 9129 | { |
20099e35 | 9130 | { |
9131 | GIMPLE_PASS, | |
88dbf20f | 9132 | "vrp", /* name */ |
9133 | gate_vrp, /* gate */ | |
9134 | execute_vrp, /* execute */ | |
9135 | NULL, /* sub */ | |
9136 | NULL, /* next */ | |
9137 | 0, /* static_pass_number */ | |
9138 | TV_TREE_VRP, /* tv_id */ | |
2f8eb909 | 9139 | PROP_ssa, /* properties_required */ |
88dbf20f | 9140 | 0, /* properties_provided */ |
b6246c40 | 9141 | 0, /* properties_destroyed */ |
88dbf20f | 9142 | 0, /* todo_flags_start */ |
9143 | TODO_cleanup_cfg | |
a2676c4f | 9144 | | TODO_update_ssa |
88dbf20f | 9145 | | TODO_verify_ssa |
a2676c4f | 9146 | | TODO_verify_flow |
a2676c4f | 9147 | | TODO_ggc_collect /* todo_flags_finish */ |
20099e35 | 9148 | } |
88dbf20f | 9149 | }; |