]>
Commit | Line | Data |
---|---|---|
0bca51f0 | 1 | /* Support routines for Value Range Propagation (VRP). |
66647d44 | 2 | Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. |
0bca51f0 DN |
3 | Contributed by Diego Novillo <dnovillo@redhat.com>. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9dcd6f09 | 9 | the Free Software Foundation; either version 3, or (at your option) |
0bca51f0 DN |
10 | any later version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
0bca51f0 DN |
20 | |
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
24 | #include "tm.h" | |
25 | #include "ggc.h" | |
26 | #include "flags.h" | |
27 | #include "tree.h" | |
28 | #include "basic-block.h" | |
29 | #include "tree-flow.h" | |
30 | #include "tree-pass.h" | |
31 | #include "tree-dump.h" | |
32 | #include "timevar.h" | |
33 | #include "diagnostic.h" | |
590b1f2d | 34 | #include "toplev.h" |
0c948c27 | 35 | #include "intl.h" |
0bca51f0 DN |
36 | #include "cfgloop.h" |
37 | #include "tree-scalar-evolution.h" | |
38 | #include "tree-ssa-propagate.h" | |
39 | #include "tree-chrec.h" | |
40 | ||
726a989a | 41 | |
c4ab2baa RG |
42 | /* Set of SSA names found live during the RPO traversal of the function |
43 | for still active basic-blocks. */ | |
44 | static sbitmap *live; | |
45 | ||
46 | /* Return true if the SSA name NAME is live on the edge E. */ | |
47 | ||
48 | static bool | |
49 | live_on_edge (edge e, tree name) | |
50 | { | |
51 | return (live[e->dest->index] | |
52 | && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name))); | |
53 | } | |
0bca51f0 | 54 | |
0bca51f0 DN |
55 | /* Local functions. */ |
56 | static int compare_values (tree val1, tree val2); | |
12df8a7e | 57 | static int compare_values_warnv (tree val1, tree val2, bool *); |
f255541f | 58 | static void vrp_meet (value_range_t *, value_range_t *); |
2d3cd5d5 | 59 | static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, |
6b99f156 JH |
60 | tree, tree, bool, bool *, |
61 | bool *); | |
0bca51f0 | 62 | |
227858d1 DN |
63 | /* Location information for ASSERT_EXPRs. Each instance of this |
64 | structure describes an ASSERT_EXPR for an SSA name. Since a single | |
65 | SSA name may have more than one assertion associated with it, these | |
66 | locations are kept in a linked list attached to the corresponding | |
67 | SSA name. */ | |
68 | struct assert_locus_d | |
0bca51f0 | 69 | { |
227858d1 DN |
70 | /* Basic block where the assertion would be inserted. */ |
71 | basic_block bb; | |
72 | ||
73 | /* Some assertions need to be inserted on an edge (e.g., assertions | |
74 | generated by COND_EXPRs). In those cases, BB will be NULL. */ | |
75 | edge e; | |
76 | ||
77 | /* Pointer to the statement that generated this assertion. */ | |
726a989a | 78 | gimple_stmt_iterator si; |
227858d1 DN |
79 | |
80 | /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ | |
81 | enum tree_code comp_code; | |
82 | ||
83 | /* Value being compared against. */ | |
84 | tree val; | |
85 | ||
2ab8dbf4 RG |
86 | /* Expression to compare. */ |
87 | tree expr; | |
88 | ||
227858d1 DN |
89 | /* Next node in the linked list. */ |
90 | struct assert_locus_d *next; | |
91 | }; | |
92 | ||
93 | typedef struct assert_locus_d *assert_locus_t; | |
94 | ||
95 | /* If bit I is present, it means that SSA name N_i has a list of | |
96 | assertions that should be inserted in the IL. */ | |
97 | static bitmap need_assert_for; | |
98 | ||
99 | /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] | |
100 | holds a list of ASSERT_LOCUS_T nodes that describe where | |
101 | ASSERT_EXPRs for SSA name N_I should be inserted. */ | |
102 | static assert_locus_t *asserts_for; | |
103 | ||
227858d1 DN |
104 | /* Value range array. After propagation, VR_VALUE[I] holds the range |
105 | of values that SSA name N_I may take. */ | |
106 | static value_range_t **vr_value; | |
0bca51f0 | 107 | |
fc6827fe ILT |
108 | /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the |
109 | number of executable edges we saw the last time we visited the | |
110 | node. */ | |
111 | static int *vr_phi_edge_counts; | |
112 | ||
b7814a18 | 113 | typedef struct { |
726a989a | 114 | gimple stmt; |
b7814a18 RG |
115 | tree vec; |
116 | } switch_update; | |
117 | ||
118 | static VEC (edge, heap) *to_remove_edges; | |
119 | DEF_VEC_O(switch_update); | |
120 | DEF_VEC_ALLOC_O(switch_update, heap); | |
121 | static VEC (switch_update, heap) *to_update_switch_stmts; | |
122 | ||
0bca51f0 | 123 | |
84fb43a1 | 124 | /* Return the maximum value for TYPE. */ |
70b7b037 RG |
125 | |
126 | static inline tree | |
127 | vrp_val_max (const_tree type) | |
128 | { | |
129 | if (!INTEGRAL_TYPE_P (type)) | |
130 | return NULL_TREE; | |
131 | ||
70b7b037 RG |
132 | return TYPE_MAX_VALUE (type); |
133 | } | |
134 | ||
84fb43a1 | 135 | /* Return the minimum value for TYPE. */ |
70b7b037 RG |
136 | |
137 | static inline tree | |
138 | vrp_val_min (const_tree type) | |
139 | { | |
140 | if (!INTEGRAL_TYPE_P (type)) | |
141 | return NULL_TREE; | |
142 | ||
70b7b037 RG |
143 | return TYPE_MIN_VALUE (type); |
144 | } | |
145 | ||
146 | /* Return whether VAL is equal to the maximum value of its type. This | |
147 | will be true for a positive overflow infinity. We can't do a | |
148 | simple equality comparison with TYPE_MAX_VALUE because C typedefs | |
149 | and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == | |
150 | to the integer constant with the same value in the type. */ | |
151 | ||
152 | static inline bool | |
153 | vrp_val_is_max (const_tree val) | |
154 | { | |
155 | tree type_max = vrp_val_max (TREE_TYPE (val)); | |
156 | return (val == type_max | |
157 | || (type_max != NULL_TREE | |
158 | && operand_equal_p (val, type_max, 0))); | |
159 | } | |
160 | ||
161 | /* Return whether VAL is equal to the minimum value of its type. This | |
162 | will be true for a negative overflow infinity. */ | |
163 | ||
164 | static inline bool | |
165 | vrp_val_is_min (const_tree val) | |
166 | { | |
167 | tree type_min = vrp_val_min (TREE_TYPE (val)); | |
168 | return (val == type_min | |
169 | || (type_min != NULL_TREE | |
170 | && operand_equal_p (val, type_min, 0))); | |
171 | } | |
172 | ||
173 | ||
12df8a7e ILT |
174 | /* Return whether TYPE should use an overflow infinity distinct from |
175 | TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to | |
176 | represent a signed overflow during VRP computations. An infinity | |
177 | is distinct from a half-range, which will go from some number to | |
178 | TYPE_{MIN,MAX}_VALUE. */ | |
179 | ||
180 | static inline bool | |
58f9752a | 181 | needs_overflow_infinity (const_tree type) |
12df8a7e | 182 | { |
84fb43a1 | 183 | return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); |
12df8a7e ILT |
184 | } |
185 | ||
186 | /* Return whether TYPE can support our overflow infinity | |
187 | representation: we use the TREE_OVERFLOW flag, which only exists | |
188 | for constants. If TYPE doesn't support this, we don't optimize | |
189 | cases which would require signed overflow--we drop them to | |
190 | VARYING. */ | |
191 | ||
192 | static inline bool | |
58f9752a | 193 | supports_overflow_infinity (const_tree type) |
12df8a7e | 194 | { |
70b7b037 | 195 | tree min = vrp_val_min (type), max = vrp_val_max (type); |
12df8a7e ILT |
196 | #ifdef ENABLE_CHECKING |
197 | gcc_assert (needs_overflow_infinity (type)); | |
198 | #endif | |
70b7b037 RG |
199 | return (min != NULL_TREE |
200 | && CONSTANT_CLASS_P (min) | |
201 | && max != NULL_TREE | |
202 | && CONSTANT_CLASS_P (max)); | |
12df8a7e ILT |
203 | } |
204 | ||
205 | /* VAL is the maximum or minimum value of a type. Return a | |
206 | corresponding overflow infinity. */ | |
207 | ||
208 | static inline tree | |
209 | make_overflow_infinity (tree val) | |
210 | { | |
211 | #ifdef ENABLE_CHECKING | |
212 | gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); | |
213 | #endif | |
214 | val = copy_node (val); | |
215 | TREE_OVERFLOW (val) = 1; | |
216 | return val; | |
217 | } | |
218 | ||
219 | /* Return a negative overflow infinity for TYPE. */ | |
220 | ||
221 | static inline tree | |
222 | negative_overflow_infinity (tree type) | |
223 | { | |
224 | #ifdef ENABLE_CHECKING | |
225 | gcc_assert (supports_overflow_infinity (type)); | |
226 | #endif | |
70b7b037 | 227 | return make_overflow_infinity (vrp_val_min (type)); |
12df8a7e ILT |
228 | } |
229 | ||
230 | /* Return a positive overflow infinity for TYPE. */ | |
231 | ||
232 | static inline tree | |
233 | positive_overflow_infinity (tree type) | |
234 | { | |
235 | #ifdef ENABLE_CHECKING | |
236 | gcc_assert (supports_overflow_infinity (type)); | |
237 | #endif | |
70b7b037 | 238 | return make_overflow_infinity (vrp_val_max (type)); |
12df8a7e ILT |
239 | } |
240 | ||
241 | /* Return whether VAL is a negative overflow infinity. */ | |
242 | ||
243 | static inline bool | |
58f9752a | 244 | is_negative_overflow_infinity (const_tree val) |
12df8a7e ILT |
245 | { |
246 | return (needs_overflow_infinity (TREE_TYPE (val)) | |
247 | && CONSTANT_CLASS_P (val) | |
248 | && TREE_OVERFLOW (val) | |
70b7b037 | 249 | && vrp_val_is_min (val)); |
12df8a7e ILT |
250 | } |
251 | ||
252 | /* Return whether VAL is a positive overflow infinity. */ | |
253 | ||
254 | static inline bool | |
58f9752a | 255 | is_positive_overflow_infinity (const_tree val) |
12df8a7e ILT |
256 | { |
257 | return (needs_overflow_infinity (TREE_TYPE (val)) | |
258 | && CONSTANT_CLASS_P (val) | |
259 | && TREE_OVERFLOW (val) | |
70b7b037 | 260 | && vrp_val_is_max (val)); |
12df8a7e ILT |
261 | } |
262 | ||
263 | /* Return whether VAL is a positive or negative overflow infinity. */ | |
264 | ||
265 | static inline bool | |
58f9752a | 266 | is_overflow_infinity (const_tree val) |
12df8a7e ILT |
267 | { |
268 | return (needs_overflow_infinity (TREE_TYPE (val)) | |
269 | && CONSTANT_CLASS_P (val) | |
270 | && TREE_OVERFLOW (val) | |
70b7b037 | 271 | && (vrp_val_is_min (val) || vrp_val_is_max (val))); |
12df8a7e ILT |
272 | } |
273 | ||
726a989a RB |
274 | /* Return whether STMT has a constant rhs that is_overflow_infinity. */ |
275 | ||
276 | static inline bool | |
277 | stmt_overflow_infinity (gimple stmt) | |
278 | { | |
279 | if (is_gimple_assign (stmt) | |
280 | && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == | |
281 | GIMPLE_SINGLE_RHS) | |
282 | return is_overflow_infinity (gimple_assign_rhs1 (stmt)); | |
283 | return false; | |
284 | } | |
285 | ||
b80cca7b ILT |
286 | /* If VAL is now an overflow infinity, return VAL. Otherwise, return |
287 | the same value with TREE_OVERFLOW clear. This can be used to avoid | |
288 | confusing a regular value with an overflow value. */ | |
289 | ||
290 | static inline tree | |
291 | avoid_overflow_infinity (tree val) | |
292 | { | |
293 | if (!is_overflow_infinity (val)) | |
294 | return val; | |
295 | ||
70b7b037 RG |
296 | if (vrp_val_is_max (val)) |
297 | return vrp_val_max (TREE_TYPE (val)); | |
b80cca7b ILT |
298 | else |
299 | { | |
300 | #ifdef ENABLE_CHECKING | |
70b7b037 | 301 | gcc_assert (vrp_val_is_min (val)); |
b80cca7b | 302 | #endif |
70b7b037 | 303 | return vrp_val_min (TREE_TYPE (val)); |
b80cca7b ILT |
304 | } |
305 | } | |
306 | ||
12df8a7e | 307 | |
462508dd DN |
308 | /* Return true if ARG is marked with the nonnull attribute in the |
309 | current function signature. */ | |
310 | ||
311 | static bool | |
58f9752a | 312 | nonnull_arg_p (const_tree arg) |
462508dd DN |
313 | { |
314 | tree t, attrs, fntype; | |
315 | unsigned HOST_WIDE_INT arg_num; | |
316 | ||
317 | gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); | |
318 | ||
7954dc21 AP |
319 | /* The static chain decl is always non null. */ |
320 | if (arg == cfun->static_chain_decl) | |
321 | return true; | |
322 | ||
462508dd DN |
323 | fntype = TREE_TYPE (current_function_decl); |
324 | attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype)); | |
325 | ||
326 | /* If "nonnull" wasn't specified, we know nothing about the argument. */ | |
327 | if (attrs == NULL_TREE) | |
328 | return false; | |
329 | ||
330 | /* If "nonnull" applies to all the arguments, then ARG is non-null. */ | |
331 | if (TREE_VALUE (attrs) == NULL_TREE) | |
332 | return true; | |
333 | ||
334 | /* Get the position number for ARG in the function signature. */ | |
335 | for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); | |
336 | t; | |
337 | t = TREE_CHAIN (t), arg_num++) | |
338 | { | |
339 | if (t == arg) | |
340 | break; | |
341 | } | |
342 | ||
343 | gcc_assert (t == arg); | |
344 | ||
345 | /* Now see if ARG_NUM is mentioned in the nonnull list. */ | |
346 | for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) | |
347 | { | |
348 | if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) | |
349 | return true; | |
350 | } | |
351 | ||
352 | return false; | |
353 | } | |
354 | ||
355 | ||
2ab8dbf4 RG |
356 | /* Set value range VR to VR_VARYING. */ |
357 | ||
358 | static inline void | |
359 | set_value_range_to_varying (value_range_t *vr) | |
360 | { | |
361 | vr->type = VR_VARYING; | |
362 | vr->min = vr->max = NULL_TREE; | |
363 | if (vr->equiv) | |
364 | bitmap_clear (vr->equiv); | |
365 | } | |
366 | ||
367 | ||
227858d1 DN |
368 | /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |
369 | ||
370 | static void | |
371 | set_value_range (value_range_t *vr, enum value_range_type t, tree min, | |
372 | tree max, bitmap equiv) | |
0bca51f0 DN |
373 | { |
374 | #if defined ENABLE_CHECKING | |
227858d1 | 375 | /* Check the validity of the range. */ |
0bca51f0 DN |
376 | if (t == VR_RANGE || t == VR_ANTI_RANGE) |
377 | { | |
378 | int cmp; | |
379 | ||
380 | gcc_assert (min && max); | |
381 | ||
382 | if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) | |
e1f28918 | 383 | gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); |
0bca51f0 DN |
384 | |
385 | cmp = compare_values (min, max); | |
386 | gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); | |
8cf781f0 ILT |
387 | |
388 | if (needs_overflow_infinity (TREE_TYPE (min))) | |
389 | gcc_assert (!is_overflow_infinity (min) | |
390 | || !is_overflow_infinity (max)); | |
0bca51f0 | 391 | } |
0bca51f0 | 392 | |
227858d1 DN |
393 | if (t == VR_UNDEFINED || t == VR_VARYING) |
394 | gcc_assert (min == NULL_TREE && max == NULL_TREE); | |
395 | ||
396 | if (t == VR_UNDEFINED || t == VR_VARYING) | |
397 | gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); | |
398 | #endif | |
0bca51f0 DN |
399 | |
400 | vr->type = t; | |
401 | vr->min = min; | |
402 | vr->max = max; | |
227858d1 DN |
403 | |
404 | /* Since updating the equivalence set involves deep copying the | |
405 | bitmaps, only do it if absolutely necessary. */ | |
f5052e29 RG |
406 | if (vr->equiv == NULL |
407 | && equiv != NULL) | |
227858d1 DN |
408 | vr->equiv = BITMAP_ALLOC (NULL); |
409 | ||
410 | if (equiv != vr->equiv) | |
411 | { | |
412 | if (equiv && !bitmap_empty_p (equiv)) | |
413 | bitmap_copy (vr->equiv, equiv); | |
414 | else | |
415 | bitmap_clear (vr->equiv); | |
416 | } | |
0bca51f0 DN |
417 | } |
418 | ||
419 | ||
2ab8dbf4 RG |
420 | /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. |
421 | This means adjusting T, MIN and MAX representing the case of a | |
422 | wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] | |
423 | as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. | |
424 | In corner cases where MAX+1 or MIN-1 wraps this will fall back | |
425 | to varying. | |
426 | This routine exists to ease canonicalization in the case where we | |
427 | extract ranges from var + CST op limit. */ | |
0bca51f0 | 428 | |
2ab8dbf4 RG |
429 | static void |
430 | set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, | |
431 | tree min, tree max, bitmap equiv) | |
0bca51f0 | 432 | { |
70b7b037 | 433 | /* Nothing to canonicalize for symbolic or unknown or varying ranges. */ |
2ab8dbf4 RG |
434 | if ((t != VR_RANGE |
435 | && t != VR_ANTI_RANGE) | |
436 | || TREE_CODE (min) != INTEGER_CST | |
70b7b037 | 437 | || TREE_CODE (max) != INTEGER_CST) |
2ab8dbf4 RG |
438 | { |
439 | set_value_range (vr, t, min, max, equiv); | |
440 | return; | |
441 | } | |
12df8a7e | 442 | |
2ab8dbf4 RG |
443 | /* Wrong order for min and max, to swap them and the VR type we need |
444 | to adjust them. */ | |
2ab8dbf4 RG |
445 | if (tree_int_cst_lt (max, min)) |
446 | { | |
70b7b037 RG |
447 | tree one = build_int_cst (TREE_TYPE (min), 1); |
448 | tree tmp = int_const_binop (PLUS_EXPR, max, one, 0); | |
449 | max = int_const_binop (MINUS_EXPR, min, one, 0); | |
450 | min = tmp; | |
451 | ||
452 | /* There's one corner case, if we had [C+1, C] before we now have | |
453 | that again. But this represents an empty value range, so drop | |
454 | to varying in this case. */ | |
455 | if (tree_int_cst_lt (max, min)) | |
456 | { | |
457 | set_value_range_to_varying (vr); | |
458 | return; | |
459 | } | |
460 | ||
461 | t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; | |
462 | } | |
463 | ||
464 | /* Anti-ranges that can be represented as ranges should be so. */ | |
465 | if (t == VR_ANTI_RANGE) | |
466 | { | |
467 | bool is_min = vrp_val_is_min (min); | |
468 | bool is_max = vrp_val_is_max (max); | |
469 | ||
470 | if (is_min && is_max) | |
471 | { | |
472 | /* We cannot deal with empty ranges, drop to varying. */ | |
473 | set_value_range_to_varying (vr); | |
474 | return; | |
475 | } | |
476 | else if (is_min | |
477 | /* As a special exception preserve non-null ranges. */ | |
478 | && !(TYPE_UNSIGNED (TREE_TYPE (min)) | |
479 | && integer_zerop (max))) | |
480 | { | |
481 | tree one = build_int_cst (TREE_TYPE (max), 1); | |
482 | min = int_const_binop (PLUS_EXPR, max, one, 0); | |
483 | max = vrp_val_max (TREE_TYPE (max)); | |
484 | t = VR_RANGE; | |
485 | } | |
486 | else if (is_max) | |
487 | { | |
488 | tree one = build_int_cst (TREE_TYPE (min), 1); | |
489 | max = int_const_binop (MINUS_EXPR, min, one, 0); | |
490 | min = vrp_val_min (TREE_TYPE (min)); | |
491 | t = VR_RANGE; | |
492 | } | |
2ab8dbf4 RG |
493 | } |
494 | ||
2ab8dbf4 RG |
495 | set_value_range (vr, t, min, max, equiv); |
496 | } | |
497 | ||
498 | /* Copy value range FROM into value range TO. */ | |
b16caf72 JL |
499 | |
500 | static inline void | |
2ab8dbf4 | 501 | copy_value_range (value_range_t *to, value_range_t *from) |
b16caf72 | 502 | { |
2ab8dbf4 | 503 | set_value_range (to, from->type, from->min, from->max, from->equiv); |
12df8a7e ILT |
504 | } |
505 | ||
8cf781f0 ILT |
506 | /* Set value range VR to a single value. This function is only called |
507 | with values we get from statements, and exists to clear the | |
508 | TREE_OVERFLOW flag so that we don't think we have an overflow | |
509 | infinity when we shouldn't. */ | |
510 | ||
511 | static inline void | |
b60b4711 | 512 | set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) |
8cf781f0 ILT |
513 | { |
514 | gcc_assert (is_gimple_min_invariant (val)); | |
b80cca7b | 515 | val = avoid_overflow_infinity (val); |
b60b4711 | 516 | set_value_range (vr, VR_RANGE, val, val, equiv); |
8cf781f0 ILT |
517 | } |
518 | ||
12df8a7e | 519 | /* Set value range VR to a non-negative range of type TYPE. |
110abdbc | 520 | OVERFLOW_INFINITY indicates whether to use an overflow infinity |
12df8a7e ILT |
521 | rather than TYPE_MAX_VALUE; this should be true if we determine |
522 | that the range is nonnegative based on the assumption that signed | |
523 | overflow does not occur. */ | |
524 | ||
525 | static inline void | |
526 | set_value_range_to_nonnegative (value_range_t *vr, tree type, | |
527 | bool overflow_infinity) | |
528 | { | |
529 | tree zero; | |
530 | ||
531 | if (overflow_infinity && !supports_overflow_infinity (type)) | |
532 | { | |
533 | set_value_range_to_varying (vr); | |
534 | return; | |
535 | } | |
536 | ||
537 | zero = build_int_cst (type, 0); | |
538 | set_value_range (vr, VR_RANGE, zero, | |
539 | (overflow_infinity | |
540 | ? positive_overflow_infinity (type) | |
541 | : TYPE_MAX_VALUE (type)), | |
542 | vr->equiv); | |
b16caf72 | 543 | } |
227858d1 DN |
544 | |
545 | /* Set value range VR to a non-NULL range of type TYPE. */ | |
546 | ||
547 | static inline void | |
548 | set_value_range_to_nonnull (value_range_t *vr, tree type) | |
549 | { | |
550 | tree zero = build_int_cst (type, 0); | |
551 | set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); | |
552 | } | |
553 | ||
554 | ||
555 | /* Set value range VR to a NULL range of type TYPE. */ | |
556 | ||
557 | static inline void | |
558 | set_value_range_to_null (value_range_t *vr, tree type) | |
559 | { | |
b60b4711 | 560 | set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); |
227858d1 DN |
561 | } |
562 | ||
563 | ||
31ab1cc9 RG |
564 | /* Set value range VR to a range of a truthvalue of type TYPE. */ |
565 | ||
566 | static inline void | |
567 | set_value_range_to_truthvalue (value_range_t *vr, tree type) | |
568 | { | |
569 | if (TYPE_PRECISION (type) == 1) | |
570 | set_value_range_to_varying (vr); | |
571 | else | |
572 | set_value_range (vr, VR_RANGE, | |
573 | build_int_cst (type, 0), build_int_cst (type, 1), | |
574 | vr->equiv); | |
575 | } | |
576 | ||
577 | ||
227858d1 DN |
578 | /* Set value range VR to VR_UNDEFINED. */ |
579 | ||
580 | static inline void | |
581 | set_value_range_to_undefined (value_range_t *vr) | |
582 | { | |
583 | vr->type = VR_UNDEFINED; | |
584 | vr->min = vr->max = NULL_TREE; | |
585 | if (vr->equiv) | |
586 | bitmap_clear (vr->equiv); | |
0bca51f0 DN |
587 | } |
588 | ||
589 | ||
193a3681 JJ |
590 | /* If abs (min) < abs (max), set VR to [-max, max], if |
591 | abs (min) >= abs (max), set VR to [-min, min]. */ | |
592 | ||
593 | static void | |
594 | abs_extent_range (value_range_t *vr, tree min, tree max) | |
595 | { | |
596 | int cmp; | |
597 | ||
598 | gcc_assert (TREE_CODE (min) == INTEGER_CST); | |
599 | gcc_assert (TREE_CODE (max) == INTEGER_CST); | |
600 | gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); | |
601 | gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); | |
602 | min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); | |
603 | max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); | |
604 | if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) | |
605 | { | |
606 | set_value_range_to_varying (vr); | |
607 | return; | |
608 | } | |
609 | cmp = compare_values (min, max); | |
610 | if (cmp == -1) | |
611 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); | |
612 | else if (cmp == 0 || cmp == 1) | |
613 | { | |
614 | max = min; | |
615 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); | |
616 | } | |
617 | else | |
618 | { | |
619 | set_value_range_to_varying (vr); | |
620 | return; | |
621 | } | |
622 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
623 | } | |
624 | ||
625 | ||
b16caf72 JL |
626 | /* Return value range information for VAR. |
627 | ||
628 | If we have no values ranges recorded (ie, VRP is not running), then | |
629 | return NULL. Otherwise create an empty range if none existed for VAR. */ | |
0bca51f0 | 630 | |
227858d1 | 631 | static value_range_t * |
58f9752a | 632 | get_value_range (const_tree var) |
0bca51f0 | 633 | { |
227858d1 | 634 | value_range_t *vr; |
0bca51f0 | 635 | tree sym; |
227858d1 | 636 | unsigned ver = SSA_NAME_VERSION (var); |
0bca51f0 | 637 | |
b16caf72 JL |
638 | /* If we have no recorded ranges, then return NULL. */ |
639 | if (! vr_value) | |
640 | return NULL; | |
641 | ||
227858d1 | 642 | vr = vr_value[ver]; |
0bca51f0 DN |
643 | if (vr) |
644 | return vr; | |
645 | ||
646 | /* Create a default value range. */ | |
b9eae1a9 | 647 | vr_value[ver] = vr = XCNEW (value_range_t); |
0bca51f0 | 648 | |
f5052e29 RG |
649 | /* Defer allocating the equivalence set. */ |
650 | vr->equiv = NULL; | |
227858d1 DN |
651 | |
652 | /* If VAR is a default definition, the variable can take any value | |
653 | in VAR's type. */ | |
0bca51f0 | 654 | sym = SSA_NAME_VAR (var); |
cfaab3a9 | 655 | if (SSA_NAME_IS_DEFAULT_DEF (var)) |
462508dd DN |
656 | { |
657 | /* Try to use the "nonnull" attribute to create ~[0, 0] | |
658 | anti-ranges for pointers. Note that this is only valid with | |
659 | default definitions of PARM_DECLs. */ | |
660 | if (TREE_CODE (sym) == PARM_DECL | |
661 | && POINTER_TYPE_P (TREE_TYPE (sym)) | |
662 | && nonnull_arg_p (sym)) | |
663 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); | |
664 | else | |
665 | set_value_range_to_varying (vr); | |
666 | } | |
0bca51f0 DN |
667 | |
668 | return vr; | |
669 | } | |
670 | ||
1ce35d26 RG |
671 | /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
672 | ||
673 | static inline bool | |
58f9752a | 674 | vrp_operand_equal_p (const_tree val1, const_tree val2) |
1ce35d26 | 675 | { |
12df8a7e ILT |
676 | if (val1 == val2) |
677 | return true; | |
678 | if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) | |
679 | return false; | |
680 | if (is_overflow_infinity (val1)) | |
681 | return is_overflow_infinity (val2); | |
682 | return true; | |
1ce35d26 RG |
683 | } |
684 | ||
685 | /* Return true, if the bitmaps B1 and B2 are equal. */ | |
686 | ||
687 | static inline bool | |
22ea9ec0 | 688 | vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
1ce35d26 RG |
689 | { |
690 | return (b1 == b2 | |
691 | || (b1 && b2 | |
692 | && bitmap_equal_p (b1, b2))); | |
693 | } | |
0bca51f0 | 694 | |
227858d1 DN |
695 | /* Update the value range and equivalence set for variable VAR to |
696 | NEW_VR. Return true if NEW_VR is different from VAR's previous | |
697 | value. | |
698 | ||
699 | NOTE: This function assumes that NEW_VR is a temporary value range | |
700 | object created for the sole purpose of updating VAR's range. The | |
701 | storage used by the equivalence set from NEW_VR will be freed by | |
702 | this function. Do not call update_value_range when NEW_VR | |
703 | is the range object associated with another SSA name. */ | |
0bca51f0 DN |
704 | |
705 | static inline bool | |
58f9752a | 706 | update_value_range (const_tree var, value_range_t *new_vr) |
0bca51f0 | 707 | { |
227858d1 DN |
708 | value_range_t *old_vr; |
709 | bool is_new; | |
710 | ||
711 | /* Update the value range, if necessary. */ | |
712 | old_vr = get_value_range (var); | |
713 | is_new = old_vr->type != new_vr->type | |
1ce35d26 RG |
714 | || !vrp_operand_equal_p (old_vr->min, new_vr->min) |
715 | || !vrp_operand_equal_p (old_vr->max, new_vr->max) | |
716 | || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); | |
0bca51f0 | 717 | |
227858d1 DN |
718 | if (is_new) |
719 | set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, | |
720 | new_vr->equiv); | |
0bca51f0 | 721 | |
227858d1 | 722 | BITMAP_FREE (new_vr->equiv); |
0bca51f0 | 723 | |
227858d1 DN |
724 | return is_new; |
725 | } | |
0bca51f0 | 726 | |
0bca51f0 | 727 | |
f5052e29 RG |
728 | /* Add VAR and VAR's equivalence set to EQUIV. This is the central |
729 | point where equivalence processing can be turned on/off. */ | |
0bca51f0 | 730 | |
227858d1 | 731 | static void |
58f9752a | 732 | add_equivalence (bitmap *equiv, const_tree var) |
227858d1 DN |
733 | { |
734 | unsigned ver = SSA_NAME_VERSION (var); | |
735 | value_range_t *vr = vr_value[ver]; | |
0bca51f0 | 736 | |
f5052e29 RG |
737 | if (*equiv == NULL) |
738 | *equiv = BITMAP_ALLOC (NULL); | |
739 | bitmap_set_bit (*equiv, ver); | |
227858d1 | 740 | if (vr && vr->equiv) |
f5052e29 | 741 | bitmap_ior_into (*equiv, vr->equiv); |
0bca51f0 DN |
742 | } |
743 | ||
744 | ||
745 | /* Return true if VR is ~[0, 0]. */ | |
746 | ||
747 | static inline bool | |
227858d1 | 748 | range_is_nonnull (value_range_t *vr) |
0bca51f0 DN |
749 | { |
750 | return vr->type == VR_ANTI_RANGE | |
751 | && integer_zerop (vr->min) | |
752 | && integer_zerop (vr->max); | |
753 | } | |
754 | ||
755 | ||
756 | /* Return true if VR is [0, 0]. */ | |
757 | ||
758 | static inline bool | |
227858d1 | 759 | range_is_null (value_range_t *vr) |
0bca51f0 DN |
760 | { |
761 | return vr->type == VR_RANGE | |
762 | && integer_zerop (vr->min) | |
763 | && integer_zerop (vr->max); | |
764 | } | |
765 | ||
766 | ||
227858d1 | 767 | /* Return true if value range VR involves at least one symbol. */ |
0bca51f0 | 768 | |
227858d1 DN |
769 | static inline bool |
770 | symbolic_range_p (value_range_t *vr) | |
0bca51f0 | 771 | { |
227858d1 DN |
772 | return (!is_gimple_min_invariant (vr->min) |
773 | || !is_gimple_min_invariant (vr->max)); | |
0bca51f0 DN |
774 | } |
775 | ||
110abdbc | 776 | /* Return true if value range VR uses an overflow infinity. */ |
b16caf72 | 777 | |
12df8a7e ILT |
778 | static inline bool |
779 | overflow_infinity_range_p (value_range_t *vr) | |
b16caf72 | 780 | { |
12df8a7e ILT |
781 | return (vr->type == VR_RANGE |
782 | && (is_overflow_infinity (vr->min) | |
783 | || is_overflow_infinity (vr->max))); | |
784 | } | |
6ac01510 | 785 | |
0c948c27 ILT |
786 | /* Return false if we can not make a valid comparison based on VR; |
787 | this will be the case if it uses an overflow infinity and overflow | |
788 | is not undefined (i.e., -fno-strict-overflow is in effect). | |
789 | Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR | |
790 | uses an overflow infinity. */ | |
791 | ||
792 | static bool | |
793 | usable_range_p (value_range_t *vr, bool *strict_overflow_p) | |
794 | { | |
795 | gcc_assert (vr->type == VR_RANGE); | |
796 | if (is_overflow_infinity (vr->min)) | |
797 | { | |
798 | *strict_overflow_p = true; | |
799 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) | |
800 | return false; | |
801 | } | |
802 | if (is_overflow_infinity (vr->max)) | |
803 | { | |
804 | *strict_overflow_p = true; | |
805 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) | |
806 | return false; | |
807 | } | |
808 | return true; | |
809 | } | |
810 | ||
811 | ||
12df8a7e ILT |
812 | /* Like tree_expr_nonnegative_warnv_p, but this function uses value |
813 | ranges obtained so far. */ | |
814 | ||
815 | static bool | |
816 | vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p) | |
817 | { | |
39f8a3b0 RG |
818 | return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p) |
819 | || (TREE_CODE (expr) == SSA_NAME | |
820 | && ssa_name_nonnegative_p (expr))); | |
b16caf72 | 821 | } |
0bca51f0 | 822 | |
726a989a RB |
823 | /* Return true if the result of assignment STMT is know to be non-negative. |
824 | If the return value is based on the assumption that signed overflow is | |
825 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
826 | *STRICT_OVERFLOW_P.*/ | |
827 | ||
828 | static bool | |
829 | gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
830 | { | |
831 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
832 | switch (get_gimple_rhs_class (code)) | |
833 | { | |
834 | case GIMPLE_UNARY_RHS: | |
835 | return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), | |
836 | gimple_expr_type (stmt), | |
837 | gimple_assign_rhs1 (stmt), | |
838 | strict_overflow_p); | |
839 | case GIMPLE_BINARY_RHS: | |
840 | return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), | |
841 | gimple_expr_type (stmt), | |
842 | gimple_assign_rhs1 (stmt), | |
843 | gimple_assign_rhs2 (stmt), | |
844 | strict_overflow_p); | |
845 | case GIMPLE_SINGLE_RHS: | |
846 | return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), | |
847 | strict_overflow_p); | |
848 | case GIMPLE_INVALID_RHS: | |
849 | gcc_unreachable (); | |
850 | default: | |
851 | gcc_unreachable (); | |
852 | } | |
853 | } | |
854 | ||
855 | /* Return true if return value of call STMT is know to be non-negative. | |
856 | If the return value is based on the assumption that signed overflow is | |
857 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
858 | *STRICT_OVERFLOW_P.*/ | |
859 | ||
860 | static bool | |
861 | gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
862 | { | |
863 | tree arg0 = gimple_call_num_args (stmt) > 0 ? | |
864 | gimple_call_arg (stmt, 0) : NULL_TREE; | |
865 | tree arg1 = gimple_call_num_args (stmt) > 1 ? | |
866 | gimple_call_arg (stmt, 1) : NULL_TREE; | |
867 | ||
868 | return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), | |
869 | gimple_call_fndecl (stmt), | |
870 | arg0, | |
871 | arg1, | |
872 | strict_overflow_p); | |
873 | } | |
874 | ||
875 | /* Return true if STMT is know to to compute a non-negative value. | |
876 | If the return value is based on the assumption that signed overflow is | |
877 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
878 | *STRICT_OVERFLOW_P.*/ | |
879 | ||
880 | static bool | |
881 | gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
882 | { | |
883 | switch (gimple_code (stmt)) | |
884 | { | |
885 | case GIMPLE_ASSIGN: | |
886 | return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); | |
887 | case GIMPLE_CALL: | |
888 | return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); | |
889 | default: | |
890 | gcc_unreachable (); | |
891 | } | |
892 | } | |
893 | ||
894 | /* Return true if the result of assignment STMT is know to be non-zero. | |
895 | If the return value is based on the assumption that signed overflow is | |
896 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
897 | *STRICT_OVERFLOW_P.*/ | |
898 | ||
899 | static bool | |
900 | gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) | |
901 | { | |
902 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
903 | switch (get_gimple_rhs_class (code)) | |
904 | { | |
905 | case GIMPLE_UNARY_RHS: | |
906 | return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
907 | gimple_expr_type (stmt), | |
908 | gimple_assign_rhs1 (stmt), | |
909 | strict_overflow_p); | |
910 | case GIMPLE_BINARY_RHS: | |
911 | return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
912 | gimple_expr_type (stmt), | |
913 | gimple_assign_rhs1 (stmt), | |
914 | gimple_assign_rhs2 (stmt), | |
915 | strict_overflow_p); | |
916 | case GIMPLE_SINGLE_RHS: | |
917 | return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), | |
918 | strict_overflow_p); | |
919 | case GIMPLE_INVALID_RHS: | |
920 | gcc_unreachable (); | |
921 | default: | |
922 | gcc_unreachable (); | |
923 | } | |
924 | } | |
925 | ||
926 | /* Return true if STMT is know to to compute a non-zero value. | |
927 | If the return value is based on the assumption that signed overflow is | |
928 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
929 | *STRICT_OVERFLOW_P.*/ | |
930 | ||
931 | static bool | |
932 | gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) | |
933 | { | |
934 | switch (gimple_code (stmt)) | |
935 | { | |
936 | case GIMPLE_ASSIGN: | |
937 | return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); | |
938 | case GIMPLE_CALL: | |
939 | return gimple_alloca_call_p (stmt); | |
940 | default: | |
941 | gcc_unreachable (); | |
942 | } | |
943 | } | |
944 | ||
12df8a7e | 945 | /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges |
227858d1 | 946 | obtained so far. */ |
0bca51f0 | 947 | |
227858d1 | 948 | static bool |
726a989a | 949 | vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) |
0bca51f0 | 950 | { |
726a989a | 951 | if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) |
227858d1 | 952 | return true; |
0bca51f0 | 953 | |
227858d1 DN |
954 | /* If we have an expression of the form &X->a, then the expression |
955 | is nonnull if X is nonnull. */ | |
726a989a RB |
956 | if (is_gimple_assign (stmt) |
957 | && gimple_assign_rhs_code (stmt) == ADDR_EXPR) | |
227858d1 | 958 | { |
726a989a | 959 | tree expr = gimple_assign_rhs1 (stmt); |
227858d1 | 960 | tree base = get_base_address (TREE_OPERAND (expr, 0)); |
0bca51f0 | 961 | |
227858d1 DN |
962 | if (base != NULL_TREE |
963 | && TREE_CODE (base) == INDIRECT_REF | |
964 | && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) | |
965 | { | |
966 | value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); | |
967 | if (range_is_nonnull (vr)) | |
968 | return true; | |
969 | } | |
970 | } | |
b565d777 | 971 | |
227858d1 | 972 | return false; |
b565d777 DN |
973 | } |
974 | ||
04dce5a4 ZD |
975 | /* Returns true if EXPR is a valid value (as expected by compare_values) -- |
976 | a gimple invariant, or SSA_NAME +- CST. */ | |
977 | ||
978 | static bool | |
979 | valid_value_p (tree expr) | |
980 | { | |
981 | if (TREE_CODE (expr) == SSA_NAME) | |
982 | return true; | |
983 | ||
984 | if (TREE_CODE (expr) == PLUS_EXPR | |
985 | || TREE_CODE (expr) == MINUS_EXPR) | |
986 | return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME | |
987 | && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); | |
988 | ||
989 | return is_gimple_min_invariant (expr); | |
990 | } | |
b565d777 | 991 | |
6b3c76a9 JH |
992 | /* Return |
993 | 1 if VAL < VAL2 | |
994 | 0 if !(VAL < VAL2) | |
995 | -2 if those are incomparable. */ | |
996 | static inline int | |
997 | operand_less_p (tree val, tree val2) | |
998 | { | |
6b3c76a9 JH |
999 | /* LT is folded faster than GE and others. Inline the common case. */ |
1000 | if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) | |
1001 | { | |
1002 | if (TYPE_UNSIGNED (TREE_TYPE (val))) | |
1003 | return INT_CST_LT_UNSIGNED (val, val2); | |
1004 | else | |
12df8a7e ILT |
1005 | { |
1006 | if (INT_CST_LT (val, val2)) | |
1007 | return 1; | |
1008 | } | |
6b3c76a9 JH |
1009 | } |
1010 | else | |
12df8a7e ILT |
1011 | { |
1012 | tree tcmp; | |
1013 | ||
c8539275 ILT |
1014 | fold_defer_overflow_warnings (); |
1015 | ||
12df8a7e | 1016 | tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |
c8539275 ILT |
1017 | |
1018 | fold_undefer_and_ignore_overflow_warnings (); | |
1019 | ||
bd03c084 RG |
1020 | if (!tcmp |
1021 | || TREE_CODE (tcmp) != INTEGER_CST) | |
12df8a7e ILT |
1022 | return -2; |
1023 | ||
1024 | if (!integer_zerop (tcmp)) | |
1025 | return 1; | |
1026 | } | |
1027 | ||
1028 | /* val >= val2, not considering overflow infinity. */ | |
1029 | if (is_negative_overflow_infinity (val)) | |
1030 | return is_negative_overflow_infinity (val2) ? 0 : 1; | |
1031 | else if (is_positive_overflow_infinity (val2)) | |
1032 | return is_positive_overflow_infinity (val) ? 0 : 1; | |
1033 | ||
1034 | return 0; | |
6b3c76a9 JH |
1035 | } |
1036 | ||
0bca51f0 DN |
1037 | /* Compare two values VAL1 and VAL2. Return |
1038 | ||
1039 | -2 if VAL1 and VAL2 cannot be compared at compile-time, | |
1040 | -1 if VAL1 < VAL2, | |
1041 | 0 if VAL1 == VAL2, | |
1042 | +1 if VAL1 > VAL2, and | |
1043 | +2 if VAL1 != VAL2 | |
1044 | ||
1045 | This is similar to tree_int_cst_compare but supports pointer values | |
12df8a7e ILT |
1046 | and values that cannot be compared at compile time. |
1047 | ||
1048 | If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to | |
1049 | true if the return value is only valid if we assume that signed | |
1050 | overflow is undefined. */ | |
0bca51f0 DN |
1051 | |
1052 | static int | |
12df8a7e | 1053 | compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
0bca51f0 DN |
1054 | { |
1055 | if (val1 == val2) | |
1056 | return 0; | |
1057 | ||
30abf793 KH |
1058 | /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
1059 | both integers. */ | |
1060 | gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) | |
1061 | == POINTER_TYPE_P (TREE_TYPE (val2))); | |
5be014d5 AP |
1062 | /* Convert the two values into the same type. This is needed because |
1063 | sizetype causes sign extension even for unsigned types. */ | |
1064 | val2 = fold_convert (TREE_TYPE (val1), val2); | |
1065 | STRIP_USELESS_TYPE_CONVERSION (val2); | |
30abf793 | 1066 | |
0bca51f0 DN |
1067 | if ((TREE_CODE (val1) == SSA_NAME |
1068 | || TREE_CODE (val1) == PLUS_EXPR | |
1069 | || TREE_CODE (val1) == MINUS_EXPR) | |
1070 | && (TREE_CODE (val2) == SSA_NAME | |
1071 | || TREE_CODE (val2) == PLUS_EXPR | |
1072 | || TREE_CODE (val2) == MINUS_EXPR)) | |
1073 | { | |
1074 | tree n1, c1, n2, c2; | |
67ac6e63 | 1075 | enum tree_code code1, code2; |
0bca51f0 DN |
1076 | |
1077 | /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME', | |
1078 | return -1 or +1 accordingly. If VAL1 and VAL2 don't use the | |
1079 | same name, return -2. */ | |
1080 | if (TREE_CODE (val1) == SSA_NAME) | |
1081 | { | |
67ac6e63 | 1082 | code1 = SSA_NAME; |
0bca51f0 DN |
1083 | n1 = val1; |
1084 | c1 = NULL_TREE; | |
1085 | } | |
1086 | else | |
1087 | { | |
67ac6e63 | 1088 | code1 = TREE_CODE (val1); |
0bca51f0 DN |
1089 | n1 = TREE_OPERAND (val1, 0); |
1090 | c1 = TREE_OPERAND (val1, 1); | |
67ac6e63 RG |
1091 | if (tree_int_cst_sgn (c1) == -1) |
1092 | { | |
12df8a7e ILT |
1093 | if (is_negative_overflow_infinity (c1)) |
1094 | return -2; | |
67ac6e63 RG |
1095 | c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); |
1096 | if (!c1) | |
1097 | return -2; | |
1098 | code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1099 | } | |
0bca51f0 DN |
1100 | } |
1101 | ||
1102 | if (TREE_CODE (val2) == SSA_NAME) | |
1103 | { | |
67ac6e63 | 1104 | code2 = SSA_NAME; |
0bca51f0 DN |
1105 | n2 = val2; |
1106 | c2 = NULL_TREE; | |
1107 | } | |
1108 | else | |
1109 | { | |
67ac6e63 | 1110 | code2 = TREE_CODE (val2); |
0bca51f0 DN |
1111 | n2 = TREE_OPERAND (val2, 0); |
1112 | c2 = TREE_OPERAND (val2, 1); | |
67ac6e63 RG |
1113 | if (tree_int_cst_sgn (c2) == -1) |
1114 | { | |
12df8a7e ILT |
1115 | if (is_negative_overflow_infinity (c2)) |
1116 | return -2; | |
67ac6e63 RG |
1117 | c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); |
1118 | if (!c2) | |
1119 | return -2; | |
1120 | code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1121 | } | |
0bca51f0 DN |
1122 | } |
1123 | ||
1124 | /* Both values must use the same name. */ | |
1125 | if (n1 != n2) | |
1126 | return -2; | |
1127 | ||
67ac6e63 RG |
1128 | if (code1 == SSA_NAME |
1129 | && code2 == SSA_NAME) | |
1130 | /* NAME == NAME */ | |
1131 | return 0; | |
1132 | ||
1133 | /* If overflow is defined we cannot simplify more. */ | |
eeef0e45 | 1134 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) |
67ac6e63 RG |
1135 | return -2; |
1136 | ||
3fe5bcaf ILT |
1137 | if (strict_overflow_p != NULL |
1138 | && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) | |
1139 | && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) | |
12df8a7e ILT |
1140 | *strict_overflow_p = true; |
1141 | ||
67ac6e63 | 1142 | if (code1 == SSA_NAME) |
0bca51f0 | 1143 | { |
67ac6e63 | 1144 | if (code2 == PLUS_EXPR) |
0bca51f0 DN |
1145 | /* NAME < NAME + CST */ |
1146 | return -1; | |
67ac6e63 | 1147 | else if (code2 == MINUS_EXPR) |
0bca51f0 DN |
1148 | /* NAME > NAME - CST */ |
1149 | return 1; | |
1150 | } | |
67ac6e63 | 1151 | else if (code1 == PLUS_EXPR) |
0bca51f0 | 1152 | { |
67ac6e63 | 1153 | if (code2 == SSA_NAME) |
0bca51f0 DN |
1154 | /* NAME + CST > NAME */ |
1155 | return 1; | |
67ac6e63 | 1156 | else if (code2 == PLUS_EXPR) |
0bca51f0 | 1157 | /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ |
12df8a7e | 1158 | return compare_values_warnv (c1, c2, strict_overflow_p); |
67ac6e63 | 1159 | else if (code2 == MINUS_EXPR) |
0bca51f0 DN |
1160 | /* NAME + CST1 > NAME - CST2 */ |
1161 | return 1; | |
1162 | } | |
67ac6e63 | 1163 | else if (code1 == MINUS_EXPR) |
0bca51f0 | 1164 | { |
67ac6e63 | 1165 | if (code2 == SSA_NAME) |
0bca51f0 DN |
1166 | /* NAME - CST < NAME */ |
1167 | return -1; | |
67ac6e63 | 1168 | else if (code2 == PLUS_EXPR) |
0bca51f0 DN |
1169 | /* NAME - CST1 < NAME + CST2 */ |
1170 | return -1; | |
67ac6e63 | 1171 | else if (code2 == MINUS_EXPR) |
0bca51f0 DN |
1172 | /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that |
1173 | C1 and C2 are swapped in the call to compare_values. */ | |
12df8a7e | 1174 | return compare_values_warnv (c2, c1, strict_overflow_p); |
0bca51f0 DN |
1175 | } |
1176 | ||
1177 | gcc_unreachable (); | |
1178 | } | |
1179 | ||
1180 | /* We cannot compare non-constants. */ | |
1181 | if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) | |
1182 | return -2; | |
1183 | ||
30abf793 | 1184 | if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
87f2a9f5 | 1185 | { |
12df8a7e ILT |
1186 | /* We cannot compare overflowed values, except for overflow |
1187 | infinities. */ | |
87f2a9f5 | 1188 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
12df8a7e | 1189 | { |
0c948c27 ILT |
1190 | if (strict_overflow_p != NULL) |
1191 | *strict_overflow_p = true; | |
12df8a7e ILT |
1192 | if (is_negative_overflow_infinity (val1)) |
1193 | return is_negative_overflow_infinity (val2) ? 0 : -1; | |
1194 | else if (is_negative_overflow_infinity (val2)) | |
1195 | return 1; | |
1196 | else if (is_positive_overflow_infinity (val1)) | |
1197 | return is_positive_overflow_infinity (val2) ? 0 : 1; | |
1198 | else if (is_positive_overflow_infinity (val2)) | |
1199 | return -1; | |
1200 | return -2; | |
1201 | } | |
87f2a9f5 RS |
1202 | |
1203 | return tree_int_cst_compare (val1, val2); | |
1204 | } | |
0bca51f0 DN |
1205 | else |
1206 | { | |
1207 | tree t; | |
1208 | ||
1209 | /* First see if VAL1 and VAL2 are not the same. */ | |
1210 | if (val1 == val2 || operand_equal_p (val1, val2, 0)) | |
1211 | return 0; | |
1212 | ||
1213 | /* If VAL1 is a lower address than VAL2, return -1. */ | |
6b3c76a9 | 1214 | if (operand_less_p (val1, val2) == 1) |
0bca51f0 DN |
1215 | return -1; |
1216 | ||
1217 | /* If VAL1 is a higher address than VAL2, return +1. */ | |
6b3c76a9 | 1218 | if (operand_less_p (val2, val1) == 1) |
0bca51f0 DN |
1219 | return 1; |
1220 | ||
5daffcc7 JH |
1221 | /* If VAL1 is different than VAL2, return +2. |
1222 | For integer constants we either have already returned -1 or 1 | |
2e226e66 KH |
1223 | or they are equivalent. We still might succeed in proving |
1224 | something about non-trivial operands. */ | |
5daffcc7 JH |
1225 | if (TREE_CODE (val1) != INTEGER_CST |
1226 | || TREE_CODE (val2) != INTEGER_CST) | |
1227 | { | |
1228 | t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); | |
bd03c084 | 1229 | if (t && integer_onep (t)) |
5daffcc7 JH |
1230 | return 2; |
1231 | } | |
0bca51f0 DN |
1232 | |
1233 | return -2; | |
1234 | } | |
1235 | } | |
1236 | ||
0c948c27 ILT |
1237 | /* Compare values like compare_values_warnv, but treat comparisons of |
1238 | nonconstants which rely on undefined overflow as incomparable. */ | |
12df8a7e ILT |
1239 | |
1240 | static int | |
1241 | compare_values (tree val1, tree val2) | |
1242 | { | |
1243 | bool sop; | |
1244 | int ret; | |
1245 | ||
1246 | sop = false; | |
1247 | ret = compare_values_warnv (val1, val2, &sop); | |
0c948c27 ILT |
1248 | if (sop |
1249 | && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) | |
12df8a7e ILT |
1250 | ret = -2; |
1251 | return ret; | |
1252 | } | |
1253 | ||
0bca51f0 DN |
1254 | |
1255 | /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX), | |
1256 | 0 if VAL is not inside VR, | |
c83033e7 DN |
1257 | -2 if we cannot tell either way. |
1258 | ||
1259 | FIXME, the current semantics of this functions are a bit quirky | |
1260 | when taken in the context of VRP. In here we do not care | |
1261 | about VR's type. If VR is the anti-range ~[3, 5] the call | |
1262 | value_inside_range (4, VR) will return 1. | |
1263 | ||
1264 | This is counter-intuitive in a strict sense, but the callers | |
1265 | currently expect this. They are calling the function | |
1266 | merely to determine whether VR->MIN <= VAL <= VR->MAX. The | |
1267 | callers are applying the VR_RANGE/VR_ANTI_RANGE semantics | |
1268 | themselves. | |
1269 | ||
1270 | This also applies to value_ranges_intersect_p and | |
1271 | range_includes_zero_p. The semantics of VR_RANGE and | |
1272 | VR_ANTI_RANGE should be encoded here, but that also means | |
6b3c76a9 JH |
1273 | adapting the users of these functions to the new semantics. |
1274 | ||
1275 | Benchmark compile/20001226-1.c compilation time after changing this | |
1276 | function. */ | |
0bca51f0 DN |
1277 | |
1278 | static inline int | |
6b3c76a9 | 1279 | value_inside_range (tree val, value_range_t * vr) |
0bca51f0 | 1280 | { |
6b3c76a9 | 1281 | int cmp1, cmp2; |
0bca51f0 | 1282 | |
6b3c76a9 JH |
1283 | cmp1 = operand_less_p (val, vr->min); |
1284 | if (cmp1 == -2) | |
0bca51f0 | 1285 | return -2; |
6b3c76a9 JH |
1286 | if (cmp1 == 1) |
1287 | return 0; | |
0bca51f0 | 1288 | |
6b3c76a9 JH |
1289 | cmp2 = operand_less_p (vr->max, val); |
1290 | if (cmp2 == -2) | |
0bca51f0 DN |
1291 | return -2; |
1292 | ||
6b3c76a9 | 1293 | return !cmp2; |
0bca51f0 DN |
1294 | } |
1295 | ||
1296 | ||
1297 | /* Return true if value ranges VR0 and VR1 have a non-empty | |
6b3c76a9 JH |
1298 | intersection. |
1299 | ||
1300 | Benchmark compile/20001226-1.c compilation time after changing this | |
1301 | function. | |
1302 | */ | |
0bca51f0 DN |
1303 | |
1304 | static inline bool | |
227858d1 | 1305 | value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) |
0bca51f0 | 1306 | { |
5daffcc7 JH |
1307 | /* The value ranges do not intersect if the maximum of the first range is |
1308 | less than the minimum of the second range or vice versa. | |
1309 | When those relations are unknown, we can't do any better. */ | |
1310 | if (operand_less_p (vr0->max, vr1->min) != 0) | |
1311 | return false; | |
1312 | if (operand_less_p (vr1->max, vr0->min) != 0) | |
1313 | return false; | |
1314 | return true; | |
0bca51f0 DN |
1315 | } |
1316 | ||
1317 | ||
c83033e7 DN |
1318 | /* Return true if VR includes the value zero, false otherwise. FIXME, |
1319 | currently this will return false for an anti-range like ~[-4, 3]. | |
1320 | This will be wrong when the semantics of value_inside_range are | |
1321 | modified (currently the users of this function expect these | |
1322 | semantics). */ | |
227858d1 DN |
1323 | |
1324 | static inline bool | |
1325 | range_includes_zero_p (value_range_t *vr) | |
1326 | { | |
1327 | tree zero; | |
1328 | ||
1329 | gcc_assert (vr->type != VR_UNDEFINED | |
1330 | && vr->type != VR_VARYING | |
1331 | && !symbolic_range_p (vr)); | |
1332 | ||
1333 | zero = build_int_cst (TREE_TYPE (vr->min), 0); | |
1334 | return (value_inside_range (zero, vr) == 1); | |
1335 | } | |
1336 | ||
b16caf72 JL |
1337 | /* Return true if T, an SSA_NAME, is known to be nonnegative. Return |
1338 | false otherwise or if no value range information is available. */ | |
1339 | ||
1340 | bool | |
58f9752a | 1341 | ssa_name_nonnegative_p (const_tree t) |
b16caf72 JL |
1342 | { |
1343 | value_range_t *vr = get_value_range (t); | |
1344 | ||
1345 | if (!vr) | |
1346 | return false; | |
1347 | ||
1348 | /* Testing for VR_ANTI_RANGE is not useful here as any anti-range | |
1349 | which would return a useful value should be encoded as a VR_RANGE. */ | |
1350 | if (vr->type == VR_RANGE) | |
1351 | { | |
1352 | int result = compare_values (vr->min, integer_zero_node); | |
1353 | ||
1354 | return (result == 0 || result == 1); | |
1355 | } | |
1356 | return false; | |
1357 | } | |
1358 | ||
73019a42 RG |
1359 | /* If OP has a value range with a single constant value return that, |
1360 | otherwise return NULL_TREE. This returns OP itself if OP is a | |
1361 | constant. */ | |
1362 | ||
1363 | static tree | |
1364 | op_with_constant_singleton_value_range (tree op) | |
1365 | { | |
1366 | value_range_t *vr; | |
1367 | ||
1368 | if (is_gimple_min_invariant (op)) | |
1369 | return op; | |
1370 | ||
1371 | if (TREE_CODE (op) != SSA_NAME) | |
1372 | return NULL_TREE; | |
1373 | ||
1374 | vr = get_value_range (op); | |
1375 | if (vr->type == VR_RANGE | |
1376 | && operand_equal_p (vr->min, vr->max, 0) | |
1377 | && is_gimple_min_invariant (vr->min)) | |
1378 | return vr->min; | |
1379 | ||
1380 | return NULL_TREE; | |
1381 | } | |
1382 | ||
227858d1 | 1383 | |
0bca51f0 DN |
1384 | /* Extract value range information from an ASSERT_EXPR EXPR and store |
1385 | it in *VR_P. */ | |
1386 | ||
1387 | static void | |
227858d1 | 1388 | extract_range_from_assert (value_range_t *vr_p, tree expr) |
0bca51f0 | 1389 | { |
227858d1 DN |
1390 | tree var, cond, limit, min, max, type; |
1391 | value_range_t *var_vr, *limit_vr; | |
b565d777 | 1392 | enum tree_code cond_code; |
0bca51f0 DN |
1393 | |
1394 | var = ASSERT_EXPR_VAR (expr); | |
1395 | cond = ASSERT_EXPR_COND (expr); | |
1396 | ||
7da4bf7d | 1397 | gcc_assert (COMPARISON_CLASS_P (cond)); |
0bca51f0 DN |
1398 | |
1399 | /* Find VAR in the ASSERT_EXPR conditional. */ | |
2ab8dbf4 RG |
1400 | if (var == TREE_OPERAND (cond, 0) |
1401 | || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR | |
1402 | || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) | |
227858d1 DN |
1403 | { |
1404 | /* If the predicate is of the form VAR COMP LIMIT, then we just | |
1405 | take LIMIT from the RHS and use the same comparison code. */ | |
227858d1 | 1406 | cond_code = TREE_CODE (cond); |
2ab8dbf4 RG |
1407 | limit = TREE_OPERAND (cond, 1); |
1408 | cond = TREE_OPERAND (cond, 0); | |
227858d1 DN |
1409 | } |
1410 | else | |
1411 | { | |
1412 | /* If the predicate is of the form LIMIT COMP VAR, then we need | |
1413 | to flip around the comparison code to create the proper range | |
1414 | for VAR. */ | |
09b2f9e8 | 1415 | cond_code = swap_tree_comparison (TREE_CODE (cond)); |
2ab8dbf4 RG |
1416 | limit = TREE_OPERAND (cond, 0); |
1417 | cond = TREE_OPERAND (cond, 1); | |
227858d1 | 1418 | } |
0bca51f0 | 1419 | |
b80cca7b ILT |
1420 | limit = avoid_overflow_infinity (limit); |
1421 | ||
227858d1 | 1422 | type = TREE_TYPE (limit); |
0bca51f0 DN |
1423 | gcc_assert (limit != var); |
1424 | ||
227858d1 DN |
1425 | /* For pointer arithmetic, we only keep track of pointer equality |
1426 | and inequality. */ | |
1427 | if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) | |
0bca51f0 | 1428 | { |
b565d777 | 1429 | set_value_range_to_varying (vr_p); |
0bca51f0 DN |
1430 | return; |
1431 | } | |
1432 | ||
227858d1 DN |
1433 | /* If LIMIT is another SSA name and LIMIT has a range of its own, |
1434 | try to use LIMIT's range to avoid creating symbolic ranges | |
1435 | unnecessarily. */ | |
1436 | limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; | |
1437 | ||
1438 | /* LIMIT's range is only interesting if it has any useful information. */ | |
1439 | if (limit_vr | |
1440 | && (limit_vr->type == VR_UNDEFINED | |
1441 | || limit_vr->type == VR_VARYING | |
1442 | || symbolic_range_p (limit_vr))) | |
1443 | limit_vr = NULL; | |
1444 | ||
db3d5328 DN |
1445 | /* Initially, the new range has the same set of equivalences of |
1446 | VAR's range. This will be revised before returning the final | |
1447 | value. Since assertions may be chained via mutually exclusive | |
1448 | predicates, we will need to trim the set of equivalences before | |
1449 | we are done. */ | |
227858d1 | 1450 | gcc_assert (vr_p->equiv == NULL); |
f5052e29 | 1451 | add_equivalence (&vr_p->equiv, var); |
227858d1 DN |
1452 | |
1453 | /* Extract a new range based on the asserted comparison for VAR and | |
1454 | LIMIT's value range. Notice that if LIMIT has an anti-range, we | |
1455 | will only use it for equality comparisons (EQ_EXPR). For any | |
1456 | other kind of assertion, we cannot derive a range from LIMIT's | |
1457 | anti-range that can be used to describe the new range. For | |
1458 | instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], | |
1459 | then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is | |
1460 | no single range for x_2 that could describe LE_EXPR, so we might | |
2ab8dbf4 RG |
1461 | as well build the range [b_4, +INF] for it. |
1462 | One special case we handle is extracting a range from a | |
1463 | range test encoded as (unsigned)var + CST <= limit. */ | |
1464 | if (TREE_CODE (cond) == NOP_EXPR | |
1465 | || TREE_CODE (cond) == PLUS_EXPR) | |
1466 | { | |
2ab8dbf4 RG |
1467 | if (TREE_CODE (cond) == PLUS_EXPR) |
1468 | { | |
70b7b037 RG |
1469 | min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), |
1470 | TREE_OPERAND (cond, 1)); | |
1471 | max = int_const_binop (PLUS_EXPR, limit, min, 0); | |
2ab8dbf4 RG |
1472 | cond = TREE_OPERAND (cond, 0); |
1473 | } | |
1474 | else | |
70b7b037 RG |
1475 | { |
1476 | min = build_int_cst (TREE_TYPE (var), 0); | |
1477 | max = limit; | |
1478 | } | |
2ab8dbf4 | 1479 | |
70b7b037 RG |
1480 | /* Make sure to not set TREE_OVERFLOW on the final type |
1481 | conversion. We are willingly interpreting large positive | |
1482 | unsigned values as negative singed values here. */ | |
1483 | min = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (min), | |
1484 | TREE_INT_CST_HIGH (min), 0, false); | |
1485 | max = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (max), | |
1486 | TREE_INT_CST_HIGH (max), 0, false); | |
2ab8dbf4 RG |
1487 | |
1488 | /* We can transform a max, min range to an anti-range or | |
1489 | vice-versa. Use set_and_canonicalize_value_range which does | |
1490 | this for us. */ | |
1491 | if (cond_code == LE_EXPR) | |
1492 | set_and_canonicalize_value_range (vr_p, VR_RANGE, | |
1493 | min, max, vr_p->equiv); | |
1494 | else if (cond_code == GT_EXPR) | |
1495 | set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, | |
1496 | min, max, vr_p->equiv); | |
1497 | else | |
1498 | gcc_unreachable (); | |
1499 | } | |
1500 | else if (cond_code == EQ_EXPR) | |
227858d1 DN |
1501 | { |
1502 | enum value_range_type range_type; | |
1503 | ||
1504 | if (limit_vr) | |
1505 | { | |
1506 | range_type = limit_vr->type; | |
1507 | min = limit_vr->min; | |
1508 | max = limit_vr->max; | |
1509 | } | |
1510 | else | |
1511 | { | |
1512 | range_type = VR_RANGE; | |
1513 | min = limit; | |
1514 | max = limit; | |
1515 | } | |
1516 | ||
1517 | set_value_range (vr_p, range_type, min, max, vr_p->equiv); | |
1518 | ||
1519 | /* When asserting the equality VAR == LIMIT and LIMIT is another | |
1520 | SSA name, the new range will also inherit the equivalence set | |
1521 | from LIMIT. */ | |
1522 | if (TREE_CODE (limit) == SSA_NAME) | |
f5052e29 | 1523 | add_equivalence (&vr_p->equiv, limit); |
227858d1 DN |
1524 | } |
1525 | else if (cond_code == NE_EXPR) | |
1526 | { | |
1527 | /* As described above, when LIMIT's range is an anti-range and | |
1528 | this assertion is an inequality (NE_EXPR), then we cannot | |
1529 | derive anything from the anti-range. For instance, if | |
1530 | LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does | |
1531 | not imply that VAR's range is [0, 0]. So, in the case of | |
1532 | anti-ranges, we just assert the inequality using LIMIT and | |
fde5c44c JM |
1533 | not its anti-range. |
1534 | ||
1535 | If LIMIT_VR is a range, we can only use it to build a new | |
1536 | anti-range if LIMIT_VR is a single-valued range. For | |
1537 | instance, if LIMIT_VR is [0, 1], the predicate | |
1538 | VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. | |
1539 | Rather, it means that for value 0 VAR should be ~[0, 0] | |
1540 | and for value 1, VAR should be ~[1, 1]. We cannot | |
1541 | represent these ranges. | |
1542 | ||
1543 | The only situation in which we can build a valid | |
1544 | anti-range is when LIMIT_VR is a single-valued range | |
1545 | (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, | |
1546 | build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ | |
1547 | if (limit_vr | |
1548 | && limit_vr->type == VR_RANGE | |
1549 | && compare_values (limit_vr->min, limit_vr->max) == 0) | |
227858d1 | 1550 | { |
fde5c44c JM |
1551 | min = limit_vr->min; |
1552 | max = limit_vr->max; | |
227858d1 DN |
1553 | } |
1554 | else | |
1555 | { | |
fde5c44c JM |
1556 | /* In any other case, we cannot use LIMIT's range to build a |
1557 | valid anti-range. */ | |
1558 | min = max = limit; | |
227858d1 DN |
1559 | } |
1560 | ||
1561 | /* If MIN and MAX cover the whole range for their type, then | |
1562 | just use the original LIMIT. */ | |
1563 | if (INTEGRAL_TYPE_P (type) | |
e1f28918 ILT |
1564 | && vrp_val_is_min (min) |
1565 | && vrp_val_is_max (max)) | |
227858d1 DN |
1566 | min = max = limit; |
1567 | ||
1568 | set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv); | |
1569 | } | |
1570 | else if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
0bca51f0 | 1571 | { |
227858d1 DN |
1572 | min = TYPE_MIN_VALUE (type); |
1573 | ||
1574 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1575 | max = limit; | |
1576 | else | |
1577 | { | |
1578 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1579 | range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for | |
1580 | LT_EXPR. */ | |
1581 | max = limit_vr->max; | |
1582 | } | |
1583 | ||
9d6eefd5 EB |
1584 | /* If the maximum value forces us to be out of bounds, simply punt. |
1585 | It would be pointless to try and do anything more since this | |
1586 | all should be optimized away above us. */ | |
7343ff45 ILT |
1587 | if ((cond_code == LT_EXPR |
1588 | && compare_values (max, min) == 0) | |
a9d386a1 | 1589 | || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max))) |
9d6eefd5 EB |
1590 | set_value_range_to_varying (vr_p); |
1591 | else | |
227858d1 | 1592 | { |
9d6eefd5 | 1593 | /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ |
7343ff45 | 1594 | if (cond_code == LT_EXPR) |
9d6eefd5 EB |
1595 | { |
1596 | tree one = build_int_cst (type, 1); | |
1597 | max = fold_build2 (MINUS_EXPR, type, max, one); | |
3fe5bcaf ILT |
1598 | if (EXPR_P (max)) |
1599 | TREE_NO_WARNING (max) = 1; | |
9d6eefd5 | 1600 | } |
227858d1 | 1601 | |
9d6eefd5 EB |
1602 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1603 | } | |
0bca51f0 | 1604 | } |
227858d1 | 1605 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) |
0bca51f0 | 1606 | { |
227858d1 DN |
1607 | max = TYPE_MAX_VALUE (type); |
1608 | ||
1609 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1610 | min = limit; | |
1611 | else | |
1612 | { | |
1613 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1614 | range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for | |
1615 | GT_EXPR. */ | |
1616 | min = limit_vr->min; | |
1617 | } | |
1618 | ||
9d6eefd5 EB |
1619 | /* If the minimum value forces us to be out of bounds, simply punt. |
1620 | It would be pointless to try and do anything more since this | |
1621 | all should be optimized away above us. */ | |
7343ff45 ILT |
1622 | if ((cond_code == GT_EXPR |
1623 | && compare_values (min, max) == 0) | |
a9d386a1 | 1624 | || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min))) |
9d6eefd5 EB |
1625 | set_value_range_to_varying (vr_p); |
1626 | else | |
227858d1 | 1627 | { |
9d6eefd5 | 1628 | /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ |
7343ff45 | 1629 | if (cond_code == GT_EXPR) |
9d6eefd5 EB |
1630 | { |
1631 | tree one = build_int_cst (type, 1); | |
1632 | min = fold_build2 (PLUS_EXPR, type, min, one); | |
3fe5bcaf ILT |
1633 | if (EXPR_P (min)) |
1634 | TREE_NO_WARNING (min) = 1; | |
9d6eefd5 | 1635 | } |
227858d1 | 1636 | |
9d6eefd5 EB |
1637 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1638 | } | |
0bca51f0 DN |
1639 | } |
1640 | else | |
1641 | gcc_unreachable (); | |
1642 | ||
012a7a78 DN |
1643 | /* If VAR already had a known range, it may happen that the new |
1644 | range we have computed and VAR's range are not compatible. For | |
1645 | instance, | |
1646 | ||
1647 | if (p_5 == NULL) | |
1648 | p_6 = ASSERT_EXPR <p_5, p_5 == NULL>; | |
1649 | x_7 = p_6->fld; | |
1650 | p_8 = ASSERT_EXPR <p_6, p_6 != NULL>; | |
1651 | ||
1652 | While the above comes from a faulty program, it will cause an ICE | |
1653 | later because p_8 and p_6 will have incompatible ranges and at | |
1654 | the same time will be considered equivalent. A similar situation | |
1655 | would arise from | |
1656 | ||
1657 | if (i_5 > 10) | |
1658 | i_6 = ASSERT_EXPR <i_5, i_5 > 10>; | |
1659 | if (i_5 < 5) | |
1660 | i_7 = ASSERT_EXPR <i_6, i_6 < 5>; | |
1661 | ||
1662 | Again i_6 and i_7 will have incompatible ranges. It would be | |
1663 | pointless to try and do anything with i_7's range because | |
1664 | anything dominated by 'if (i_5 < 5)' will be optimized away. | |
1665 | Note, due to the wa in which simulation proceeds, the statement | |
1666 | i_7 = ASSERT_EXPR <...> we would never be visited because the | |
c83eecad | 1667 | conditional 'if (i_5 < 5)' always evaluates to false. However, |
012a7a78 DN |
1668 | this extra check does not hurt and may protect against future |
1669 | changes to VRP that may get into a situation similar to the | |
1670 | NULL pointer dereference example. | |
1671 | ||
1672 | Note that these compatibility tests are only needed when dealing | |
1673 | with ranges or a mix of range and anti-range. If VAR_VR and VR_P | |
1674 | are both anti-ranges, they will always be compatible, because two | |
1675 | anti-ranges will always have a non-empty intersection. */ | |
1676 | ||
0bca51f0 | 1677 | var_vr = get_value_range (var); |
0bca51f0 | 1678 | |
012a7a78 DN |
1679 | /* We may need to make adjustments when VR_P and VAR_VR are numeric |
1680 | ranges or anti-ranges. */ | |
1681 | if (vr_p->type == VR_VARYING | |
1682 | || vr_p->type == VR_UNDEFINED | |
1683 | || var_vr->type == VR_VARYING | |
1684 | || var_vr->type == VR_UNDEFINED | |
1685 | || symbolic_range_p (vr_p) | |
1686 | || symbolic_range_p (var_vr)) | |
96644aba | 1687 | return; |
012a7a78 DN |
1688 | |
1689 | if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE) | |
1690 | { | |
1691 | /* If the two ranges have a non-empty intersection, we can | |
1692 | refine the resulting range. Since the assert expression | |
1693 | creates an equivalency and at the same time it asserts a | |
1694 | predicate, we can take the intersection of the two ranges to | |
1695 | get better precision. */ | |
1696 | if (value_ranges_intersect_p (var_vr, vr_p)) | |
1697 | { | |
1698 | /* Use the larger of the two minimums. */ | |
1699 | if (compare_values (vr_p->min, var_vr->min) == -1) | |
1700 | min = var_vr->min; | |
1701 | else | |
1702 | min = vr_p->min; | |
1703 | ||
1704 | /* Use the smaller of the two maximums. */ | |
1705 | if (compare_values (vr_p->max, var_vr->max) == 1) | |
1706 | max = var_vr->max; | |
1707 | else | |
1708 | max = vr_p->max; | |
1709 | ||
1710 | set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv); | |
1711 | } | |
0bca51f0 | 1712 | else |
012a7a78 DN |
1713 | { |
1714 | /* The two ranges do not intersect, set the new range to | |
1715 | VARYING, because we will not be able to do anything | |
1716 | meaningful with it. */ | |
1717 | set_value_range_to_varying (vr_p); | |
1718 | } | |
1719 | } | |
1720 | else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE) | |
1721 | || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE)) | |
1722 | { | |
1723 | /* A range and an anti-range will cancel each other only if | |
1724 | their ends are the same. For instance, in the example above, | |
1725 | p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible, | |
1726 | so VR_P should be set to VR_VARYING. */ | |
1727 | if (compare_values (var_vr->min, vr_p->min) == 0 | |
1728 | && compare_values (var_vr->max, vr_p->max) == 0) | |
1729 | set_value_range_to_varying (vr_p); | |
307d19fe JL |
1730 | else |
1731 | { | |
1732 | tree min, max, anti_min, anti_max, real_min, real_max; | |
b881887e | 1733 | int cmp; |
307d19fe JL |
1734 | |
1735 | /* We want to compute the logical AND of the two ranges; | |
1736 | there are three cases to consider. | |
1737 | ||
1738 | ||
c0220ea4 | 1739 | 1. The VR_ANTI_RANGE range is completely within the |
307d19fe JL |
1740 | VR_RANGE and the endpoints of the ranges are |
1741 | different. In that case the resulting range | |
4f67dfcf JL |
1742 | should be whichever range is more precise. |
1743 | Typically that will be the VR_RANGE. | |
307d19fe JL |
1744 | |
1745 | 2. The VR_ANTI_RANGE is completely disjoint from | |
1746 | the VR_RANGE. In this case the resulting range | |
1747 | should be the VR_RANGE. | |
1748 | ||
1749 | 3. There is some overlap between the VR_ANTI_RANGE | |
1750 | and the VR_RANGE. | |
1751 | ||
1752 | 3a. If the high limit of the VR_ANTI_RANGE resides | |
1753 | within the VR_RANGE, then the result is a new | |
1754 | VR_RANGE starting at the high limit of the | |
fa10beec | 1755 | VR_ANTI_RANGE + 1 and extending to the |
307d19fe JL |
1756 | high limit of the original VR_RANGE. |
1757 | ||
1758 | 3b. If the low limit of the VR_ANTI_RANGE resides | |
1759 | within the VR_RANGE, then the result is a new | |
1760 | VR_RANGE starting at the low limit of the original | |
1761 | VR_RANGE and extending to the low limit of the | |
1762 | VR_ANTI_RANGE - 1. */ | |
1763 | if (vr_p->type == VR_ANTI_RANGE) | |
1764 | { | |
1765 | anti_min = vr_p->min; | |
1766 | anti_max = vr_p->max; | |
1767 | real_min = var_vr->min; | |
1768 | real_max = var_vr->max; | |
1769 | } | |
1770 | else | |
1771 | { | |
1772 | anti_min = var_vr->min; | |
1773 | anti_max = var_vr->max; | |
1774 | real_min = vr_p->min; | |
1775 | real_max = vr_p->max; | |
1776 | } | |
1777 | ||
1778 | ||
1779 | /* Case 1, VR_ANTI_RANGE completely within VR_RANGE, | |
1780 | not including any endpoints. */ | |
1781 | if (compare_values (anti_max, real_max) == -1 | |
1782 | && compare_values (anti_min, real_min) == 1) | |
1783 | { | |
70b7b037 RG |
1784 | /* If the range is covering the whole valid range of |
1785 | the type keep the anti-range. */ | |
1786 | if (!vrp_val_is_min (real_min) | |
1787 | || !vrp_val_is_max (real_max)) | |
1788 | set_value_range (vr_p, VR_RANGE, real_min, | |
1789 | real_max, vr_p->equiv); | |
307d19fe JL |
1790 | } |
1791 | /* Case 2, VR_ANTI_RANGE completely disjoint from | |
1792 | VR_RANGE. */ | |
1793 | else if (compare_values (anti_min, real_max) == 1 | |
1794 | || compare_values (anti_max, real_min) == -1) | |
1795 | { | |
1796 | set_value_range (vr_p, VR_RANGE, real_min, | |
1797 | real_max, vr_p->equiv); | |
1798 | } | |
1799 | /* Case 3a, the anti-range extends into the low | |
1800 | part of the real range. Thus creating a new | |
917f1b7e | 1801 | low for the real range. */ |
b881887e RG |
1802 | else if (((cmp = compare_values (anti_max, real_min)) == 1 |
1803 | || cmp == 0) | |
307d19fe JL |
1804 | && compare_values (anti_max, real_max) == -1) |
1805 | { | |
12df8a7e ILT |
1806 | gcc_assert (!is_positive_overflow_infinity (anti_max)); |
1807 | if (needs_overflow_infinity (TREE_TYPE (anti_max)) | |
e1f28918 | 1808 | && vrp_val_is_max (anti_max)) |
12df8a7e ILT |
1809 | { |
1810 | if (!supports_overflow_infinity (TREE_TYPE (var_vr->min))) | |
1811 | { | |
1812 | set_value_range_to_varying (vr_p); | |
1813 | return; | |
1814 | } | |
1815 | min = positive_overflow_infinity (TREE_TYPE (var_vr->min)); | |
1816 | } | |
42289977 | 1817 | else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min))) |
12df8a7e ILT |
1818 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min), |
1819 | anti_max, | |
1820 | build_int_cst (TREE_TYPE (var_vr->min), 1)); | |
42289977 RG |
1821 | else |
1822 | min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min), | |
1823 | anti_max, size_int (1)); | |
307d19fe JL |
1824 | max = real_max; |
1825 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); | |
1826 | } | |
1827 | /* Case 3b, the anti-range extends into the high | |
1828 | part of the real range. Thus creating a new | |
917f1b7e | 1829 | higher for the real range. */ |
307d19fe | 1830 | else if (compare_values (anti_min, real_min) == 1 |
b881887e RG |
1831 | && ((cmp = compare_values (anti_min, real_max)) == -1 |
1832 | || cmp == 0)) | |
307d19fe | 1833 | { |
12df8a7e ILT |
1834 | gcc_assert (!is_negative_overflow_infinity (anti_min)); |
1835 | if (needs_overflow_infinity (TREE_TYPE (anti_min)) | |
e1f28918 | 1836 | && vrp_val_is_min (anti_min)) |
12df8a7e ILT |
1837 | { |
1838 | if (!supports_overflow_infinity (TREE_TYPE (var_vr->min))) | |
1839 | { | |
1840 | set_value_range_to_varying (vr_p); | |
1841 | return; | |
1842 | } | |
1843 | max = negative_overflow_infinity (TREE_TYPE (var_vr->min)); | |
1844 | } | |
5be014d5 | 1845 | else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min))) |
12df8a7e ILT |
1846 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min), |
1847 | anti_min, | |
1848 | build_int_cst (TREE_TYPE (var_vr->min), 1)); | |
5be014d5 AP |
1849 | else |
1850 | max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min), | |
1851 | anti_min, | |
1852 | size_int (-1)); | |
307d19fe JL |
1853 | min = real_min; |
1854 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); | |
1855 | } | |
1856 | } | |
0bca51f0 DN |
1857 | } |
1858 | } | |
1859 | ||
1860 | ||
1861 | /* Extract range information from SSA name VAR and store it in VR. If | |
1862 | VAR has an interesting range, use it. Otherwise, create the | |
1863 | range [VAR, VAR] and return it. This is useful in situations where | |
1864 | we may have conditionals testing values of VARYING names. For | |
1865 | instance, | |
1866 | ||
1867 | x_3 = y_5; | |
1868 | if (x_3 > y_5) | |
1869 | ... | |
1870 | ||
1871 | Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is | |
1872 | always false. */ | |
1873 | ||
1874 | static void | |
227858d1 | 1875 | extract_range_from_ssa_name (value_range_t *vr, tree var) |
0bca51f0 | 1876 | { |
227858d1 | 1877 | value_range_t *var_vr = get_value_range (var); |
0bca51f0 DN |
1878 | |
1879 | if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING) | |
227858d1 | 1880 | copy_value_range (vr, var_vr); |
0bca51f0 | 1881 | else |
227858d1 DN |
1882 | set_value_range (vr, VR_RANGE, var, var, NULL); |
1883 | ||
f5052e29 | 1884 | add_equivalence (&vr->equiv, var); |
0bca51f0 DN |
1885 | } |
1886 | ||
1887 | ||
9983270b DN |
1888 | /* Wrapper around int_const_binop. If the operation overflows and we |
1889 | are not using wrapping arithmetic, then adjust the result to be | |
12df8a7e ILT |
1890 | -INF or +INF depending on CODE, VAL1 and VAL2. This can return |
1891 | NULL_TREE if we need to use an overflow infinity representation but | |
1892 | the type does not support it. */ | |
9983270b | 1893 | |
12df8a7e | 1894 | static tree |
9983270b DN |
1895 | vrp_int_const_binop (enum tree_code code, tree val1, tree val2) |
1896 | { | |
1897 | tree res; | |
1898 | ||
377d569b | 1899 | res = int_const_binop (code, val1, val2, 0); |
9983270b DN |
1900 | |
1901 | /* If we are not using wrapping arithmetic, operate symbolically | |
1902 | on -INF and +INF. */ | |
eeef0e45 | 1903 | if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) |
b17775ab JM |
1904 | { |
1905 | int checkz = compare_values (res, val1); | |
26ef4301 | 1906 | bool overflow = false; |
b17775ab | 1907 | |
7dc32197 | 1908 | /* Ensure that res = val1 [+*] val2 >= val1 |
b17775ab | 1909 | or that res = val1 - val2 <= val1. */ |
26ef4301 | 1910 | if ((code == PLUS_EXPR |
7dc32197 DN |
1911 | && !(checkz == 1 || checkz == 0)) |
1912 | || (code == MINUS_EXPR | |
1913 | && !(checkz == 0 || checkz == -1))) | |
26ef4301 JL |
1914 | { |
1915 | overflow = true; | |
1916 | } | |
1917 | /* Checking for multiplication overflow is done by dividing the | |
1918 | output of the multiplication by the first input of the | |
1919 | multiplication. If the result of that division operation is | |
1920 | not equal to the second input of the multiplication, then the | |
1921 | multiplication overflowed. */ | |
1922 | else if (code == MULT_EXPR && !integer_zerop (val1)) | |
1923 | { | |
1924 | tree tmp = int_const_binop (TRUNC_DIV_EXPR, | |
3ea0e1e4 | 1925 | res, |
26ef4301 JL |
1926 | val1, 0); |
1927 | int check = compare_values (tmp, val2); | |
1928 | ||
1929 | if (check != 0) | |
1930 | overflow = true; | |
1931 | } | |
1932 | ||
1933 | if (overflow) | |
b17775ab JM |
1934 | { |
1935 | res = copy_node (res); | |
1936 | TREE_OVERFLOW (res) = 1; | |
1937 | } | |
26ef4301 | 1938 | |
b17775ab | 1939 | } |
12df8a7e ILT |
1940 | else if ((TREE_OVERFLOW (res) |
1941 | && !TREE_OVERFLOW (val1) | |
1942 | && !TREE_OVERFLOW (val2)) | |
1943 | || is_overflow_infinity (val1) | |
1944 | || is_overflow_infinity (val2)) | |
9983270b | 1945 | { |
7dc32197 DN |
1946 | /* If the operation overflowed but neither VAL1 nor VAL2 are |
1947 | overflown, return -INF or +INF depending on the operation | |
1948 | and the combination of signs of the operands. */ | |
9983270b DN |
1949 | int sgn1 = tree_int_cst_sgn (val1); |
1950 | int sgn2 = tree_int_cst_sgn (val2); | |
1951 | ||
12df8a7e ILT |
1952 | if (needs_overflow_infinity (TREE_TYPE (res)) |
1953 | && !supports_overflow_infinity (TREE_TYPE (res))) | |
1954 | return NULL_TREE; | |
1955 | ||
d7419dec ILT |
1956 | /* We have to punt on adding infinities of different signs, |
1957 | since we can't tell what the sign of the result should be. | |
1958 | Likewise for subtracting infinities of the same sign. */ | |
1959 | if (((code == PLUS_EXPR && sgn1 != sgn2) | |
1960 | || (code == MINUS_EXPR && sgn1 == sgn2)) | |
12df8a7e ILT |
1961 | && is_overflow_infinity (val1) |
1962 | && is_overflow_infinity (val2)) | |
1963 | return NULL_TREE; | |
1964 | ||
d7419dec ILT |
1965 | /* Don't try to handle division or shifting of infinities. */ |
1966 | if ((code == TRUNC_DIV_EXPR | |
1967 | || code == FLOOR_DIV_EXPR | |
1968 | || code == CEIL_DIV_EXPR | |
1969 | || code == EXACT_DIV_EXPR | |
1970 | || code == ROUND_DIV_EXPR | |
1971 | || code == RSHIFT_EXPR) | |
1972 | && (is_overflow_infinity (val1) | |
1973 | || is_overflow_infinity (val2))) | |
1974 | return NULL_TREE; | |
1975 | ||
0d22e81f EB |
1976 | /* Notice that we only need to handle the restricted set of |
1977 | operations handled by extract_range_from_binary_expr. | |
1978 | Among them, only multiplication, addition and subtraction | |
1979 | can yield overflow without overflown operands because we | |
1980 | are working with integral types only... except in the | |
1981 | case VAL1 = -INF and VAL2 = -1 which overflows to +INF | |
1982 | for division too. */ | |
1983 | ||
1984 | /* For multiplication, the sign of the overflow is given | |
1985 | by the comparison of the signs of the operands. */ | |
1986 | if ((code == MULT_EXPR && sgn1 == sgn2) | |
1987 | /* For addition, the operands must be of the same sign | |
1988 | to yield an overflow. Its sign is therefore that | |
d7419dec ILT |
1989 | of one of the operands, for example the first. For |
1990 | infinite operands X + -INF is negative, not positive. */ | |
1991 | || (code == PLUS_EXPR | |
1992 | && (sgn1 >= 0 | |
1993 | ? !is_negative_overflow_infinity (val2) | |
1994 | : is_positive_overflow_infinity (val2))) | |
12df8a7e ILT |
1995 | /* For subtraction, non-infinite operands must be of |
1996 | different signs to yield an overflow. Its sign is | |
1997 | therefore that of the first operand or the opposite of | |
1998 | that of the second operand. A first operand of 0 counts | |
1999 | as positive here, for the corner case 0 - (-INF), which | |
2000 | overflows, but must yield +INF. For infinite operands 0 | |
2001 | - INF is negative, not positive. */ | |
2002 | || (code == MINUS_EXPR | |
2003 | && (sgn1 >= 0 | |
2004 | ? !is_positive_overflow_infinity (val2) | |
2005 | : is_negative_overflow_infinity (val2))) | |
13338552 RG |
2006 | /* We only get in here with positive shift count, so the |
2007 | overflow direction is the same as the sign of val1. | |
2008 | Actually rshift does not overflow at all, but we only | |
2009 | handle the case of shifting overflowed -INF and +INF. */ | |
2010 | || (code == RSHIFT_EXPR | |
2011 | && sgn1 >= 0) | |
0d22e81f EB |
2012 | /* For division, the only case is -INF / -1 = +INF. */ |
2013 | || code == TRUNC_DIV_EXPR | |
2014 | || code == FLOOR_DIV_EXPR | |
2015 | || code == CEIL_DIV_EXPR | |
2016 | || code == EXACT_DIV_EXPR | |
2017 | || code == ROUND_DIV_EXPR) | |
12df8a7e ILT |
2018 | return (needs_overflow_infinity (TREE_TYPE (res)) |
2019 | ? positive_overflow_infinity (TREE_TYPE (res)) | |
2020 | : TYPE_MAX_VALUE (TREE_TYPE (res))); | |
9983270b | 2021 | else |
12df8a7e ILT |
2022 | return (needs_overflow_infinity (TREE_TYPE (res)) |
2023 | ? negative_overflow_infinity (TREE_TYPE (res)) | |
2024 | : TYPE_MIN_VALUE (TREE_TYPE (res))); | |
9983270b DN |
2025 | } |
2026 | ||
2027 | return res; | |
2028 | } | |
2029 | ||
2030 | ||
0bca51f0 DN |
2031 | /* Extract range information from a binary expression EXPR based on |
2032 | the ranges of each of its operands and the expression code. */ | |
2033 | ||
2034 | static void | |
2d3cd5d5 RAE |
2035 | extract_range_from_binary_expr (value_range_t *vr, |
2036 | enum tree_code code, | |
2037 | tree expr_type, tree op0, tree op1) | |
0bca51f0 | 2038 | { |
4e2d94a9 | 2039 | enum value_range_type type; |
2d3cd5d5 | 2040 | tree min, max; |
0bca51f0 | 2041 | int cmp; |
227858d1 DN |
2042 | value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; |
2043 | value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; | |
0bca51f0 DN |
2044 | |
2045 | /* Not all binary expressions can be applied to ranges in a | |
2046 | meaningful way. Handle only arithmetic operations. */ | |
2047 | if (code != PLUS_EXPR | |
2048 | && code != MINUS_EXPR | |
5be014d5 | 2049 | && code != POINTER_PLUS_EXPR |
0bca51f0 DN |
2050 | && code != MULT_EXPR |
2051 | && code != TRUNC_DIV_EXPR | |
2052 | && code != FLOOR_DIV_EXPR | |
2053 | && code != CEIL_DIV_EXPR | |
2054 | && code != EXACT_DIV_EXPR | |
2055 | && code != ROUND_DIV_EXPR | |
6569e716 | 2056 | && code != RSHIFT_EXPR |
0bca51f0 | 2057 | && code != MIN_EXPR |
227858d1 | 2058 | && code != MAX_EXPR |
29c8f8c2 | 2059 | && code != BIT_AND_EXPR |
30821654 | 2060 | && code != BIT_IOR_EXPR |
227858d1 | 2061 | && code != TRUTH_AND_EXPR |
74290e83 | 2062 | && code != TRUTH_OR_EXPR) |
0bca51f0 | 2063 | { |
73019a42 | 2064 | /* We can still do constant propagation here. */ |
a4b93060 RG |
2065 | tree const_op0 = op_with_constant_singleton_value_range (op0); |
2066 | tree const_op1 = op_with_constant_singleton_value_range (op1); | |
2067 | if (const_op0 || const_op1) | |
73019a42 | 2068 | { |
a4b93060 RG |
2069 | tree tem = fold_binary (code, expr_type, |
2070 | const_op0 ? const_op0 : op0, | |
2071 | const_op1 ? const_op1 : op1); | |
08298a8c RG |
2072 | if (tem |
2073 | && is_gimple_min_invariant (tem) | |
73019a42 RG |
2074 | && !is_overflow_infinity (tem)) |
2075 | { | |
2076 | set_value_range (vr, VR_RANGE, tem, tem, NULL); | |
2077 | return; | |
2078 | } | |
2079 | } | |
b565d777 | 2080 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2081 | return; |
2082 | } | |
2083 | ||
2084 | /* Get value ranges for each operand. For constant operands, create | |
2085 | a new value range with the operand to simplify processing. */ | |
0bca51f0 DN |
2086 | if (TREE_CODE (op0) == SSA_NAME) |
2087 | vr0 = *(get_value_range (op0)); | |
227858d1 | 2088 | else if (is_gimple_min_invariant (op0)) |
b60b4711 | 2089 | set_value_range_to_value (&vr0, op0, NULL); |
0bca51f0 | 2090 | else |
227858d1 | 2091 | set_value_range_to_varying (&vr0); |
0bca51f0 | 2092 | |
0bca51f0 DN |
2093 | if (TREE_CODE (op1) == SSA_NAME) |
2094 | vr1 = *(get_value_range (op1)); | |
227858d1 | 2095 | else if (is_gimple_min_invariant (op1)) |
b60b4711 | 2096 | set_value_range_to_value (&vr1, op1, NULL); |
0bca51f0 | 2097 | else |
227858d1 | 2098 | set_value_range_to_varying (&vr1); |
0bca51f0 DN |
2099 | |
2100 | /* If either range is UNDEFINED, so is the result. */ | |
2101 | if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED) | |
2102 | { | |
227858d1 | 2103 | set_value_range_to_undefined (vr); |
0bca51f0 DN |
2104 | return; |
2105 | } | |
2106 | ||
4e2d94a9 KH |
2107 | /* The type of the resulting value range defaults to VR0.TYPE. */ |
2108 | type = vr0.type; | |
2109 | ||
227858d1 | 2110 | /* Refuse to operate on VARYING ranges, ranges of different kinds |
29c8f8c2 KH |
2111 | and symbolic ranges. As an exception, we allow BIT_AND_EXPR |
2112 | because we may be able to derive a useful range even if one of | |
193a3681 JJ |
2113 | the operands is VR_VARYING or symbolic range. Similarly for |
2114 | divisions. TODO, we may be able to derive anti-ranges in | |
2115 | some cases. */ | |
29c8f8c2 | 2116 | if (code != BIT_AND_EXPR |
9b61327b KH |
2117 | && code != TRUTH_AND_EXPR |
2118 | && code != TRUTH_OR_EXPR | |
193a3681 JJ |
2119 | && code != TRUNC_DIV_EXPR |
2120 | && code != FLOOR_DIV_EXPR | |
2121 | && code != CEIL_DIV_EXPR | |
2122 | && code != EXACT_DIV_EXPR | |
2123 | && code != ROUND_DIV_EXPR | |
29c8f8c2 KH |
2124 | && (vr0.type == VR_VARYING |
2125 | || vr1.type == VR_VARYING | |
2126 | || vr0.type != vr1.type | |
2127 | || symbolic_range_p (&vr0) | |
2128 | || symbolic_range_p (&vr1))) | |
0bca51f0 | 2129 | { |
b565d777 | 2130 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2131 | return; |
2132 | } | |
2133 | ||
2134 | /* Now evaluate the expression to determine the new range. */ | |
2d3cd5d5 | 2135 | if (POINTER_TYPE_P (expr_type) |
0bca51f0 DN |
2136 | || POINTER_TYPE_P (TREE_TYPE (op0)) |
2137 | || POINTER_TYPE_P (TREE_TYPE (op1))) | |
2138 | { | |
5be014d5 | 2139 | if (code == MIN_EXPR || code == MAX_EXPR) |
e57f2b41 | 2140 | { |
5be014d5 AP |
2141 | /* For MIN/MAX expressions with pointers, we only care about |
2142 | nullness, if both are non null, then the result is nonnull. | |
2143 | If both are null, then the result is null. Otherwise they | |
2144 | are varying. */ | |
2145 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
2d3cd5d5 | 2146 | set_value_range_to_nonnull (vr, expr_type); |
e57f2b41 | 2147 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |
2d3cd5d5 | 2148 | set_value_range_to_null (vr, expr_type); |
e57f2b41 KH |
2149 | else |
2150 | set_value_range_to_varying (vr); | |
5be014d5 AP |
2151 | |
2152 | return; | |
e57f2b41 | 2153 | } |
5be014d5 AP |
2154 | gcc_assert (code == POINTER_PLUS_EXPR); |
2155 | /* For pointer types, we are really only interested in asserting | |
2156 | whether the expression evaluates to non-NULL. */ | |
2157 | if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) | |
2d3cd5d5 | 2158 | set_value_range_to_nonnull (vr, expr_type); |
5be014d5 | 2159 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |
2d3cd5d5 | 2160 | set_value_range_to_null (vr, expr_type); |
0bca51f0 | 2161 | else |
5be014d5 | 2162 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2163 | |
2164 | return; | |
2165 | } | |
2166 | ||
2167 | /* For integer ranges, apply the operation to each end of the | |
2168 | range and see what we end up with. */ | |
2893f753 | 2169 | if (code == TRUTH_AND_EXPR |
74290e83 | 2170 | || code == TRUTH_OR_EXPR) |
227858d1 | 2171 | { |
9b61327b KH |
2172 | /* If one of the operands is zero, we know that the whole |
2173 | expression evaluates zero. */ | |
2174 | if (code == TRUTH_AND_EXPR | |
2175 | && ((vr0.type == VR_RANGE | |
2176 | && integer_zerop (vr0.min) | |
2177 | && integer_zerop (vr0.max)) | |
2178 | || (vr1.type == VR_RANGE | |
2179 | && integer_zerop (vr1.min) | |
2180 | && integer_zerop (vr1.max)))) | |
2181 | { | |
2182 | type = VR_RANGE; | |
2d3cd5d5 | 2183 | min = max = build_int_cst (expr_type, 0); |
9b61327b KH |
2184 | } |
2185 | /* If one of the operands is one, we know that the whole | |
2186 | expression evaluates one. */ | |
2187 | else if (code == TRUTH_OR_EXPR | |
2188 | && ((vr0.type == VR_RANGE | |
2189 | && integer_onep (vr0.min) | |
2190 | && integer_onep (vr0.max)) | |
2191 | || (vr1.type == VR_RANGE | |
2192 | && integer_onep (vr1.min) | |
2193 | && integer_onep (vr1.max)))) | |
2194 | { | |
2195 | type = VR_RANGE; | |
2d3cd5d5 | 2196 | min = max = build_int_cst (expr_type, 1); |
9b61327b KH |
2197 | } |
2198 | else if (vr0.type != VR_VARYING | |
2199 | && vr1.type != VR_VARYING | |
2200 | && vr0.type == vr1.type | |
2201 | && !symbolic_range_p (&vr0) | |
12df8a7e ILT |
2202 | && !overflow_infinity_range_p (&vr0) |
2203 | && !symbolic_range_p (&vr1) | |
2204 | && !overflow_infinity_range_p (&vr1)) | |
9b61327b KH |
2205 | { |
2206 | /* Boolean expressions cannot be folded with int_const_binop. */ | |
2d3cd5d5 RAE |
2207 | min = fold_binary (code, expr_type, vr0.min, vr1.min); |
2208 | max = fold_binary (code, expr_type, vr0.max, vr1.max); | |
9b61327b KH |
2209 | } |
2210 | else | |
2211 | { | |
31ab1cc9 | 2212 | /* The result of a TRUTH_*_EXPR is always true or false. */ |
2d3cd5d5 | 2213 | set_value_range_to_truthvalue (vr, expr_type); |
9b61327b KH |
2214 | return; |
2215 | } | |
227858d1 DN |
2216 | } |
2217 | else if (code == PLUS_EXPR | |
227858d1 DN |
2218 | || code == MIN_EXPR |
2219 | || code == MAX_EXPR) | |
0bca51f0 | 2220 | { |
567fb660 KH |
2221 | /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to |
2222 | VR_VARYING. It would take more effort to compute a precise | |
2223 | range for such a case. For example, if we have op0 == 1 and | |
2224 | op1 == -1 with their ranges both being ~[0,0], we would have | |
2225 | op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0]. | |
2226 | Note that we are guaranteed to have vr0.type == vr1.type at | |
2227 | this point. */ | |
2228 | if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE) | |
2229 | { | |
2230 | set_value_range_to_varying (vr); | |
2231 | return; | |
2232 | } | |
2233 | ||
0bca51f0 DN |
2234 | /* For operations that make the resulting range directly |
2235 | proportional to the original ranges, apply the operation to | |
2236 | the same end of each range. */ | |
9983270b DN |
2237 | min = vrp_int_const_binop (code, vr0.min, vr1.min); |
2238 | max = vrp_int_const_binop (code, vr0.max, vr1.max); | |
77a30264 RG |
2239 | |
2240 | /* If both additions overflowed the range kind is still correct. | |
2241 | This happens regularly with subtracting something in unsigned | |
2242 | arithmetic. | |
2243 | ??? See PR30318 for all the cases we do not handle. */ | |
2244 | if (code == PLUS_EXPR | |
2245 | && (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2246 | && (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2247 | { | |
2248 | min = build_int_cst_wide (TREE_TYPE (min), | |
2249 | TREE_INT_CST_LOW (min), | |
2250 | TREE_INT_CST_HIGH (min)); | |
2251 | max = build_int_cst_wide (TREE_TYPE (max), | |
2252 | TREE_INT_CST_LOW (max), | |
2253 | TREE_INT_CST_HIGH (max)); | |
2254 | } | |
0bca51f0 | 2255 | } |
9983270b DN |
2256 | else if (code == MULT_EXPR |
2257 | || code == TRUNC_DIV_EXPR | |
227858d1 DN |
2258 | || code == FLOOR_DIV_EXPR |
2259 | || code == CEIL_DIV_EXPR | |
2260 | || code == EXACT_DIV_EXPR | |
6569e716 RG |
2261 | || code == ROUND_DIV_EXPR |
2262 | || code == RSHIFT_EXPR) | |
0bca51f0 | 2263 | { |
9983270b DN |
2264 | tree val[4]; |
2265 | size_t i; | |
12df8a7e | 2266 | bool sop; |
9983270b | 2267 | |
567fb660 KH |
2268 | /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, |
2269 | drop to VR_VARYING. It would take more effort to compute a | |
2270 | precise range for such a case. For example, if we have | |
2271 | op0 == 65536 and op1 == 65536 with their ranges both being | |
2272 | ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so | |
2273 | we cannot claim that the product is in ~[0,0]. Note that we | |
2274 | are guaranteed to have vr0.type == vr1.type at this | |
2275 | point. */ | |
2276 | if (code == MULT_EXPR | |
2277 | && vr0.type == VR_ANTI_RANGE | |
eeef0e45 | 2278 | && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))) |
567fb660 KH |
2279 | { |
2280 | set_value_range_to_varying (vr); | |
2281 | return; | |
2282 | } | |
2283 | ||
af33044f RH |
2284 | /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], |
2285 | then drop to VR_VARYING. Outside of this range we get undefined | |
7fa7289d | 2286 | behavior from the shift operation. We cannot even trust |
af33044f RH |
2287 | SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl |
2288 | shifts, and the operation at the tree level may be widened. */ | |
2289 | if (code == RSHIFT_EXPR) | |
13338552 | 2290 | { |
af33044f RH |
2291 | if (vr1.type == VR_ANTI_RANGE |
2292 | || !vrp_expr_computes_nonnegative (op1, &sop) | |
2293 | || (operand_less_p | |
2294 | (build_int_cst (TREE_TYPE (vr1.max), | |
2d3cd5d5 | 2295 | TYPE_PRECISION (expr_type) - 1), |
af33044f RH |
2296 | vr1.max) != 0)) |
2297 | { | |
2298 | set_value_range_to_varying (vr); | |
2299 | return; | |
2300 | } | |
13338552 RG |
2301 | } |
2302 | ||
193a3681 JJ |
2303 | else if ((code == TRUNC_DIV_EXPR |
2304 | || code == FLOOR_DIV_EXPR | |
2305 | || code == CEIL_DIV_EXPR | |
2306 | || code == EXACT_DIV_EXPR | |
2307 | || code == ROUND_DIV_EXPR) | |
2308 | && (vr0.type != VR_RANGE || symbolic_range_p (&vr0))) | |
2309 | { | |
2310 | /* For division, if op1 has VR_RANGE but op0 does not, something | |
2311 | can be deduced just from that range. Say [min, max] / [4, max] | |
2312 | gives [min / 4, max / 4] range. */ | |
2313 | if (vr1.type == VR_RANGE | |
2314 | && !symbolic_range_p (&vr1) | |
2315 | && !range_includes_zero_p (&vr1)) | |
2316 | { | |
2317 | vr0.type = type = VR_RANGE; | |
2318 | vr0.min = vrp_val_min (TREE_TYPE (op0)); | |
2319 | vr0.max = vrp_val_max (TREE_TYPE (op1)); | |
2320 | } | |
2321 | else | |
2322 | { | |
2323 | set_value_range_to_varying (vr); | |
2324 | return; | |
2325 | } | |
2326 | } | |
2327 | ||
2328 | /* For divisions, if op0 is VR_RANGE, we can deduce a range | |
2329 | even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can | |
2330 | include 0. */ | |
2331 | if ((code == TRUNC_DIV_EXPR | |
2332 | || code == FLOOR_DIV_EXPR | |
2333 | || code == CEIL_DIV_EXPR | |
2334 | || code == EXACT_DIV_EXPR | |
2335 | || code == ROUND_DIV_EXPR) | |
2336 | && vr0.type == VR_RANGE | |
2337 | && (vr1.type != VR_RANGE | |
2338 | || symbolic_range_p (&vr1) | |
2339 | || range_includes_zero_p (&vr1))) | |
2340 | { | |
2341 | tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); | |
2342 | int cmp; | |
2343 | ||
2344 | sop = false; | |
2345 | min = NULL_TREE; | |
2346 | max = NULL_TREE; | |
2347 | if (vrp_expr_computes_nonnegative (op1, &sop) && !sop) | |
2348 | { | |
2349 | /* For unsigned division or when divisor is known | |
2350 | to be non-negative, the range has to cover | |
2351 | all numbers from 0 to max for positive max | |
2352 | and all numbers from min to 0 for negative min. */ | |
2353 | cmp = compare_values (vr0.max, zero); | |
2354 | if (cmp == -1) | |
2355 | max = zero; | |
2356 | else if (cmp == 0 || cmp == 1) | |
2357 | max = vr0.max; | |
2358 | else | |
2359 | type = VR_VARYING; | |
2360 | cmp = compare_values (vr0.min, zero); | |
2361 | if (cmp == 1) | |
2362 | min = zero; | |
2363 | else if (cmp == 0 || cmp == -1) | |
2364 | min = vr0.min; | |
2365 | else | |
2366 | type = VR_VARYING; | |
2367 | } | |
2368 | else | |
2369 | { | |
2370 | /* Otherwise the range is -max .. max or min .. -min | |
2371 | depending on which bound is bigger in absolute value, | |
2372 | as the division can change the sign. */ | |
2373 | abs_extent_range (vr, vr0.min, vr0.max); | |
2374 | return; | |
2375 | } | |
2376 | if (type == VR_VARYING) | |
2377 | { | |
2378 | set_value_range_to_varying (vr); | |
2379 | return; | |
2380 | } | |
2381 | } | |
2382 | ||
9983270b DN |
2383 | /* Multiplications and divisions are a bit tricky to handle, |
2384 | depending on the mix of signs we have in the two ranges, we | |
2385 | need to operate on different values to get the minimum and | |
2386 | maximum values for the new range. One approach is to figure | |
2387 | out all the variations of range combinations and do the | |
2388 | operations. | |
2389 | ||
2390 | However, this involves several calls to compare_values and it | |
2391 | is pretty convoluted. It's simpler to do the 4 operations | |
2392 | (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP | |
2393 | MAX1) and then figure the smallest and largest values to form | |
2394 | the new range. */ | |
12df8a7e ILT |
2395 | else |
2396 | { | |
193a3681 JJ |
2397 | gcc_assert ((vr0.type == VR_RANGE |
2398 | || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE)) | |
2399 | && vr0.type == vr1.type); | |
2400 | ||
2401 | /* Compute the 4 cross operations. */ | |
2402 | sop = false; | |
2403 | val[0] = vrp_int_const_binop (code, vr0.min, vr1.min); | |
2404 | if (val[0] == NULL_TREE) | |
12df8a7e | 2405 | sop = true; |
9983270b | 2406 | |
193a3681 JJ |
2407 | if (vr1.max == vr1.min) |
2408 | val[1] = NULL_TREE; | |
2409 | else | |
2410 | { | |
2411 | val[1] = vrp_int_const_binop (code, vr0.min, vr1.max); | |
2412 | if (val[1] == NULL_TREE) | |
2413 | sop = true; | |
2414 | } | |
9983270b | 2415 | |
193a3681 JJ |
2416 | if (vr0.max == vr0.min) |
2417 | val[2] = NULL_TREE; | |
2418 | else | |
2419 | { | |
2420 | val[2] = vrp_int_const_binop (code, vr0.max, vr1.min); | |
2421 | if (val[2] == NULL_TREE) | |
2422 | sop = true; | |
2423 | } | |
9983270b | 2424 | |
193a3681 JJ |
2425 | if (vr0.min == vr0.max || vr1.min == vr1.max) |
2426 | val[3] = NULL_TREE; | |
2427 | else | |
2428 | { | |
2429 | val[3] = vrp_int_const_binop (code, vr0.max, vr1.max); | |
2430 | if (val[3] == NULL_TREE) | |
2431 | sop = true; | |
2432 | } | |
9983270b | 2433 | |
193a3681 JJ |
2434 | if (sop) |
2435 | { | |
2436 | set_value_range_to_varying (vr); | |
2437 | return; | |
2438 | } | |
9983270b | 2439 | |
193a3681 JJ |
2440 | /* Set MIN to the minimum of VAL[i] and MAX to the maximum |
2441 | of VAL[i]. */ | |
2442 | min = val[0]; | |
2443 | max = val[0]; | |
2444 | for (i = 1; i < 4; i++) | |
227858d1 | 2445 | { |
193a3681 JJ |
2446 | if (!is_gimple_min_invariant (min) |
2447 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2448 | || !is_gimple_min_invariant (max) | |
2449 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2450 | break; | |
2451 | ||
2452 | if (val[i]) | |
9983270b | 2453 | { |
193a3681 JJ |
2454 | if (!is_gimple_min_invariant (val[i]) |
2455 | || (TREE_OVERFLOW (val[i]) | |
2456 | && !is_overflow_infinity (val[i]))) | |
2457 | { | |
2458 | /* If we found an overflowed value, set MIN and MAX | |
2459 | to it so that we set the resulting range to | |
2460 | VARYING. */ | |
2461 | min = max = val[i]; | |
2462 | break; | |
2463 | } | |
9983270b | 2464 | |
193a3681 JJ |
2465 | if (compare_values (val[i], min) == -1) |
2466 | min = val[i]; | |
9983270b | 2467 | |
193a3681 JJ |
2468 | if (compare_values (val[i], max) == 1) |
2469 | max = val[i]; | |
2470 | } | |
227858d1 DN |
2471 | } |
2472 | } | |
2473 | } | |
2474 | else if (code == MINUS_EXPR) | |
2475 | { | |
567fb660 KH |
2476 | /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to |
2477 | VR_VARYING. It would take more effort to compute a precise | |
2478 | range for such a case. For example, if we have op0 == 1 and | |
2479 | op1 == 1 with their ranges both being ~[0,0], we would have | |
2480 | op0 - op1 == 0, so we cannot claim that the difference is in | |
2481 | ~[0,0]. Note that we are guaranteed to have | |
2482 | vr0.type == vr1.type at this point. */ | |
2483 | if (vr0.type == VR_ANTI_RANGE) | |
2484 | { | |
2485 | set_value_range_to_varying (vr); | |
2486 | return; | |
2487 | } | |
2488 | ||
227858d1 DN |
2489 | /* For MINUS_EXPR, apply the operation to the opposite ends of |
2490 | each range. */ | |
9983270b DN |
2491 | min = vrp_int_const_binop (code, vr0.min, vr1.max); |
2492 | max = vrp_int_const_binop (code, vr0.max, vr1.min); | |
227858d1 | 2493 | } |
29c8f8c2 KH |
2494 | else if (code == BIT_AND_EXPR) |
2495 | { | |
2496 | if (vr0.type == VR_RANGE | |
2497 | && vr0.min == vr0.max | |
12df8a7e ILT |
2498 | && TREE_CODE (vr0.max) == INTEGER_CST |
2499 | && !TREE_OVERFLOW (vr0.max) | |
2500 | && tree_int_cst_sgn (vr0.max) >= 0) | |
29c8f8c2 | 2501 | { |
2d3cd5d5 | 2502 | min = build_int_cst (expr_type, 0); |
29c8f8c2 KH |
2503 | max = vr0.max; |
2504 | } | |
2505 | else if (vr1.type == VR_RANGE | |
12df8a7e ILT |
2506 | && vr1.min == vr1.max |
2507 | && TREE_CODE (vr1.max) == INTEGER_CST | |
2508 | && !TREE_OVERFLOW (vr1.max) | |
2509 | && tree_int_cst_sgn (vr1.max) >= 0) | |
29c8f8c2 | 2510 | { |
4e2d94a9 | 2511 | type = VR_RANGE; |
2d3cd5d5 | 2512 | min = build_int_cst (expr_type, 0); |
29c8f8c2 KH |
2513 | max = vr1.max; |
2514 | } | |
2515 | else | |
2516 | { | |
2517 | set_value_range_to_varying (vr); | |
2518 | return; | |
2519 | } | |
2520 | } | |
30821654 PB |
2521 | else if (code == BIT_IOR_EXPR) |
2522 | { | |
2523 | if (vr0.type == VR_RANGE | |
2524 | && vr1.type == VR_RANGE | |
2525 | && TREE_CODE (vr0.min) == INTEGER_CST | |
2526 | && TREE_CODE (vr1.min) == INTEGER_CST | |
2527 | && TREE_CODE (vr0.max) == INTEGER_CST | |
2528 | && TREE_CODE (vr1.max) == INTEGER_CST | |
2529 | && tree_int_cst_sgn (vr0.min) >= 0 | |
2530 | && tree_int_cst_sgn (vr1.min) >= 0) | |
2531 | { | |
2532 | double_int vr0_max = tree_to_double_int (vr0.max); | |
2533 | double_int vr1_max = tree_to_double_int (vr1.max); | |
2534 | double_int ior_max; | |
2535 | ||
2536 | /* Set all bits to the right of the most significant one to 1. | |
2537 | For example, [0, 4] | [4, 4] = [4, 7]. */ | |
2538 | ior_max.low = vr0_max.low | vr1_max.low; | |
2539 | ior_max.high = vr0_max.high | vr1_max.high; | |
2540 | if (ior_max.high != 0) | |
2541 | { | |
26e4f1ba | 2542 | ior_max.low = ~(unsigned HOST_WIDE_INT)0u; |
30821654 PB |
2543 | ior_max.high |= ((HOST_WIDE_INT) 1 |
2544 | << floor_log2 (ior_max.high)) - 1; | |
2545 | } | |
26e4f1ba | 2546 | else if (ior_max.low != 0) |
30821654 PB |
2547 | ior_max.low |= ((unsigned HOST_WIDE_INT) 1u |
2548 | << floor_log2 (ior_max.low)) - 1; | |
2549 | ||
2550 | /* Both of these endpoints are conservative. */ | |
2551 | min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min); | |
2552 | max = double_int_to_tree (expr_type, ior_max); | |
2553 | } | |
2554 | else | |
2555 | { | |
2556 | set_value_range_to_varying (vr); | |
2557 | return; | |
2558 | } | |
2559 | } | |
227858d1 DN |
2560 | else |
2561 | gcc_unreachable (); | |
fda05890 | 2562 | |
9983270b | 2563 | /* If either MIN or MAX overflowed, then set the resulting range to |
12df8a7e ILT |
2564 | VARYING. But we do accept an overflow infinity |
2565 | representation. */ | |
2566 | if (min == NULL_TREE | |
2567 | || !is_gimple_min_invariant (min) | |
2568 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2569 | || max == NULL_TREE | |
2570 | || !is_gimple_min_invariant (max) | |
2571 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2572 | { | |
2573 | set_value_range_to_varying (vr); | |
2574 | return; | |
2575 | } | |
2576 | ||
fa633851 ILT |
2577 | /* We punt if: |
2578 | 1) [-INF, +INF] | |
2579 | 2) [-INF, +-INF(OVF)] | |
2580 | 3) [+-INF(OVF), +INF] | |
2581 | 4) [+-INF(OVF), +-INF(OVF)] | |
2582 | We learn nothing when we have INF and INF(OVF) on both sides. | |
2583 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
2584 | overflow. */ | |
e1f28918 ILT |
2585 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) |
2586 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
227858d1 | 2587 | { |
9983270b DN |
2588 | set_value_range_to_varying (vr); |
2589 | return; | |
fda05890 KH |
2590 | } |
2591 | ||
227858d1 DN |
2592 | cmp = compare_values (min, max); |
2593 | if (cmp == -2 || cmp == 1) | |
2594 | { | |
2595 | /* If the new range has its limits swapped around (MIN > MAX), | |
2596 | then the operation caused one of them to wrap around, mark | |
2597 | the new range VARYING. */ | |
2598 | set_value_range_to_varying (vr); | |
2599 | } | |
2600 | else | |
4e2d94a9 | 2601 | set_value_range (vr, type, min, max, NULL); |
fda05890 KH |
2602 | } |
2603 | ||
2604 | ||
0bca51f0 DN |
2605 | /* Extract range information from a unary expression EXPR based on |
2606 | the range of its operand and the expression code. */ | |
2607 | ||
2608 | static void | |
2d3cd5d5 RAE |
2609 | extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, |
2610 | tree type, tree op0) | |
0bca51f0 | 2611 | { |
2d3cd5d5 | 2612 | tree min, max; |
0bca51f0 | 2613 | int cmp; |
227858d1 DN |
2614 | value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; |
2615 | ||
2616 | /* Refuse to operate on certain unary expressions for which we | |
2617 | cannot easily determine a resulting range. */ | |
2618 | if (code == FIX_TRUNC_EXPR | |
227858d1 DN |
2619 | || code == FLOAT_EXPR |
2620 | || code == BIT_NOT_EXPR | |
227858d1 DN |
2621 | || code == CONJ_EXPR) |
2622 | { | |
73019a42 RG |
2623 | /* We can still do constant propagation here. */ |
2624 | if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE) | |
2625 | { | |
2626 | tree tem = fold_unary (code, type, op0); | |
08298a8c RG |
2627 | if (tem |
2628 | && is_gimple_min_invariant (tem) | |
73019a42 RG |
2629 | && !is_overflow_infinity (tem)) |
2630 | { | |
2631 | set_value_range (vr, VR_RANGE, tem, tem, NULL); | |
2632 | return; | |
2633 | } | |
2634 | } | |
227858d1 DN |
2635 | set_value_range_to_varying (vr); |
2636 | return; | |
2637 | } | |
0bca51f0 DN |
2638 | |
2639 | /* Get value ranges for the operand. For constant operands, create | |
2640 | a new value range with the operand to simplify processing. */ | |
0bca51f0 DN |
2641 | if (TREE_CODE (op0) == SSA_NAME) |
2642 | vr0 = *(get_value_range (op0)); | |
227858d1 | 2643 | else if (is_gimple_min_invariant (op0)) |
b60b4711 | 2644 | set_value_range_to_value (&vr0, op0, NULL); |
0bca51f0 | 2645 | else |
227858d1 | 2646 | set_value_range_to_varying (&vr0); |
0bca51f0 DN |
2647 | |
2648 | /* If VR0 is UNDEFINED, so is the result. */ | |
2649 | if (vr0.type == VR_UNDEFINED) | |
2650 | { | |
227858d1 | 2651 | set_value_range_to_undefined (vr); |
0bca51f0 DN |
2652 | return; |
2653 | } | |
2654 | ||
a3b196e3 JL |
2655 | /* Refuse to operate on symbolic ranges, or if neither operand is |
2656 | a pointer or integral type. */ | |
2657 | if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
2658 | && !POINTER_TYPE_P (TREE_TYPE (op0))) | |
2659 | || (vr0.type != VR_VARYING | |
2660 | && symbolic_range_p (&vr0))) | |
0bca51f0 | 2661 | { |
b565d777 | 2662 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2663 | return; |
2664 | } | |
2665 | ||
2666 | /* If the expression involves pointers, we are only interested in | |
2667 | determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ | |
2d3cd5d5 | 2668 | if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0))) |
0bca51f0 | 2669 | { |
12df8a7e | 2670 | bool sop; |
6ac01510 | 2671 | |
12df8a7e ILT |
2672 | sop = false; |
2673 | if (range_is_nonnull (&vr0) | |
2d3cd5d5 | 2674 | || (tree_unary_nonzero_warnv_p (code, type, op0, &sop) |
12df8a7e | 2675 | && !sop)) |
2d3cd5d5 | 2676 | set_value_range_to_nonnull (vr, type); |
0bca51f0 | 2677 | else if (range_is_null (&vr0)) |
2d3cd5d5 | 2678 | set_value_range_to_null (vr, type); |
0bca51f0 | 2679 | else |
b565d777 | 2680 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2681 | |
2682 | return; | |
2683 | } | |
2684 | ||
2685 | /* Handle unary expressions on integer ranges. */ | |
1a87cf0c | 2686 | if (CONVERT_EXPR_CODE_P (code) |
b47ee386 RG |
2687 | && INTEGRAL_TYPE_P (type) |
2688 | && INTEGRAL_TYPE_P (TREE_TYPE (op0))) | |
0bca51f0 | 2689 | { |
441e96b5 | 2690 | tree inner_type = TREE_TYPE (op0); |
2d3cd5d5 | 2691 | tree outer_type = type; |
441e96b5 | 2692 | |
b47ee386 RG |
2693 | /* If VR0 is varying and we increase the type precision, assume |
2694 | a full range for the following transformation. */ | |
2695 | if (vr0.type == VR_VARYING | |
2696 | && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) | |
2735e93e | 2697 | { |
b47ee386 RG |
2698 | vr0.type = VR_RANGE; |
2699 | vr0.min = TYPE_MIN_VALUE (inner_type); | |
2700 | vr0.max = TYPE_MAX_VALUE (inner_type); | |
2735e93e JL |
2701 | } |
2702 | ||
b47ee386 RG |
2703 | /* If VR0 is a constant range or anti-range and the conversion is |
2704 | not truncating we can convert the min and max values and | |
2705 | canonicalize the resulting range. Otherwise we can do the | |
2706 | conversion if the size of the range is less than what the | |
2707 | precision of the target type can represent and the range is | |
2708 | not an anti-range. */ | |
2709 | if ((vr0.type == VR_RANGE | |
2710 | || vr0.type == VR_ANTI_RANGE) | |
2711 | && TREE_CODE (vr0.min) == INTEGER_CST | |
2712 | && TREE_CODE (vr0.max) == INTEGER_CST | |
2713 | && !is_overflow_infinity (vr0.min) | |
2714 | && !is_overflow_infinity (vr0.max) | |
2715 | && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) | |
2716 | || (vr0.type == VR_RANGE | |
2717 | && integer_zerop (int_const_binop (RSHIFT_EXPR, | |
2718 | int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0), | |
2719 | size_int (TYPE_PRECISION (outer_type)), 0))))) | |
441e96b5 | 2720 | { |
b47ee386 RG |
2721 | tree new_min, new_max; |
2722 | new_min = force_fit_type_double (outer_type, | |
2723 | TREE_INT_CST_LOW (vr0.min), | |
2724 | TREE_INT_CST_HIGH (vr0.min), 0, 0); | |
2725 | new_max = force_fit_type_double (outer_type, | |
2726 | TREE_INT_CST_LOW (vr0.max), | |
2727 | TREE_INT_CST_HIGH (vr0.max), 0, 0); | |
2728 | set_and_canonicalize_value_range (vr, vr0.type, | |
2729 | new_min, new_max, NULL); | |
441e96b5 DN |
2730 | return; |
2731 | } | |
b47ee386 RG |
2732 | |
2733 | set_value_range_to_varying (vr); | |
2734 | return; | |
0bca51f0 DN |
2735 | } |
2736 | ||
a3b196e3 JL |
2737 | /* Conversion of a VR_VARYING value to a wider type can result |
2738 | in a usable range. So wait until after we've handled conversions | |
2739 | before dropping the result to VR_VARYING if we had a source | |
2740 | operand that is VR_VARYING. */ | |
2741 | if (vr0.type == VR_VARYING) | |
2742 | { | |
2743 | set_value_range_to_varying (vr); | |
2744 | return; | |
2745 | } | |
2746 | ||
0bca51f0 DN |
2747 | /* Apply the operation to each end of the range and see what we end |
2748 | up with. */ | |
227858d1 | 2749 | if (code == NEGATE_EXPR |
2d3cd5d5 | 2750 | && !TYPE_UNSIGNED (type)) |
227858d1 | 2751 | { |
96b2034b | 2752 | /* NEGATE_EXPR flips the range around. We need to treat |
12df8a7e ILT |
2753 | TYPE_MIN_VALUE specially. */ |
2754 | if (is_positive_overflow_infinity (vr0.max)) | |
2d3cd5d5 | 2755 | min = negative_overflow_infinity (type); |
12df8a7e | 2756 | else if (is_negative_overflow_infinity (vr0.max)) |
2d3cd5d5 | 2757 | min = positive_overflow_infinity (type); |
e1f28918 | 2758 | else if (!vrp_val_is_min (vr0.max)) |
2d3cd5d5 RAE |
2759 | min = fold_unary_to_constant (code, type, vr0.max); |
2760 | else if (needs_overflow_infinity (type)) | |
12df8a7e | 2761 | { |
2d3cd5d5 | 2762 | if (supports_overflow_infinity (type) |
8cf781f0 | 2763 | && !is_overflow_infinity (vr0.min) |
e1f28918 | 2764 | && !vrp_val_is_min (vr0.min)) |
2d3cd5d5 | 2765 | min = positive_overflow_infinity (type); |
12df8a7e ILT |
2766 | else |
2767 | { | |
2768 | set_value_range_to_varying (vr); | |
2769 | return; | |
2770 | } | |
2771 | } | |
2772 | else | |
2d3cd5d5 | 2773 | min = TYPE_MIN_VALUE (type); |
12df8a7e ILT |
2774 | |
2775 | if (is_positive_overflow_infinity (vr0.min)) | |
2d3cd5d5 | 2776 | max = negative_overflow_infinity (type); |
12df8a7e | 2777 | else if (is_negative_overflow_infinity (vr0.min)) |
2d3cd5d5 | 2778 | max = positive_overflow_infinity (type); |
e1f28918 | 2779 | else if (!vrp_val_is_min (vr0.min)) |
2d3cd5d5 RAE |
2780 | max = fold_unary_to_constant (code, type, vr0.min); |
2781 | else if (needs_overflow_infinity (type)) | |
12df8a7e | 2782 | { |
2d3cd5d5 RAE |
2783 | if (supports_overflow_infinity (type)) |
2784 | max = positive_overflow_infinity (type); | |
12df8a7e ILT |
2785 | else |
2786 | { | |
2787 | set_value_range_to_varying (vr); | |
2788 | return; | |
2789 | } | |
2790 | } | |
2791 | else | |
2d3cd5d5 | 2792 | max = TYPE_MIN_VALUE (type); |
c1a70a3c RS |
2793 | } |
2794 | else if (code == NEGATE_EXPR | |
2d3cd5d5 | 2795 | && TYPE_UNSIGNED (type)) |
c1a70a3c RS |
2796 | { |
2797 | if (!range_includes_zero_p (&vr0)) | |
2798 | { | |
2d3cd5d5 RAE |
2799 | max = fold_unary_to_constant (code, type, vr0.min); |
2800 | min = fold_unary_to_constant (code, type, vr0.max); | |
c1a70a3c RS |
2801 | } |
2802 | else | |
2803 | { | |
2804 | if (range_is_null (&vr0)) | |
2d3cd5d5 | 2805 | set_value_range_to_null (vr, type); |
c1a70a3c RS |
2806 | else |
2807 | set_value_range_to_varying (vr); | |
2808 | return; | |
2809 | } | |
227858d1 DN |
2810 | } |
2811 | else if (code == ABS_EXPR | |
2d3cd5d5 | 2812 | && !TYPE_UNSIGNED (type)) |
227858d1 | 2813 | { |
ff08cbee JM |
2814 | /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a |
2815 | useful range. */ | |
2d3cd5d5 | 2816 | if (!TYPE_OVERFLOW_UNDEFINED (type) |
ff08cbee | 2817 | && ((vr0.type == VR_RANGE |
e1f28918 | 2818 | && vrp_val_is_min (vr0.min)) |
ff08cbee | 2819 | || (vr0.type == VR_ANTI_RANGE |
e1f28918 | 2820 | && !vrp_val_is_min (vr0.min) |
ff08cbee JM |
2821 | && !range_includes_zero_p (&vr0)))) |
2822 | { | |
2823 | set_value_range_to_varying (vr); | |
2824 | return; | |
2825 | } | |
2826 | ||
227858d1 DN |
2827 | /* ABS_EXPR may flip the range around, if the original range |
2828 | included negative values. */ | |
12df8a7e | 2829 | if (is_overflow_infinity (vr0.min)) |
2d3cd5d5 | 2830 | min = positive_overflow_infinity (type); |
e1f28918 | 2831 | else if (!vrp_val_is_min (vr0.min)) |
2d3cd5d5 RAE |
2832 | min = fold_unary_to_constant (code, type, vr0.min); |
2833 | else if (!needs_overflow_infinity (type)) | |
2834 | min = TYPE_MAX_VALUE (type); | |
2835 | else if (supports_overflow_infinity (type)) | |
2836 | min = positive_overflow_infinity (type); | |
12df8a7e ILT |
2837 | else |
2838 | { | |
2839 | set_value_range_to_varying (vr); | |
2840 | return; | |
2841 | } | |
227858d1 | 2842 | |
12df8a7e | 2843 | if (is_overflow_infinity (vr0.max)) |
2d3cd5d5 | 2844 | max = positive_overflow_infinity (type); |
e1f28918 | 2845 | else if (!vrp_val_is_min (vr0.max)) |
2d3cd5d5 RAE |
2846 | max = fold_unary_to_constant (code, type, vr0.max); |
2847 | else if (!needs_overflow_infinity (type)) | |
2848 | max = TYPE_MAX_VALUE (type); | |
d3cbd7de RG |
2849 | else if (supports_overflow_infinity (type) |
2850 | /* We shouldn't generate [+INF, +INF] as set_value_range | |
2851 | doesn't like this and ICEs. */ | |
2852 | && !is_positive_overflow_infinity (min)) | |
2d3cd5d5 | 2853 | max = positive_overflow_infinity (type); |
12df8a7e ILT |
2854 | else |
2855 | { | |
2856 | set_value_range_to_varying (vr); | |
2857 | return; | |
2858 | } | |
227858d1 | 2859 | |
ff08cbee JM |
2860 | cmp = compare_values (min, max); |
2861 | ||
2862 | /* If a VR_ANTI_RANGEs contains zero, then we have | |
2863 | ~[-INF, min(MIN, MAX)]. */ | |
2864 | if (vr0.type == VR_ANTI_RANGE) | |
2865 | { | |
2866 | if (range_includes_zero_p (&vr0)) | |
2867 | { | |
ff08cbee JM |
2868 | /* Take the lower of the two values. */ |
2869 | if (cmp != 1) | |
2870 | max = min; | |
2871 | ||
2872 | /* Create ~[-INF, min (abs(MIN), abs(MAX))] | |
2873 | or ~[-INF + 1, min (abs(MIN), abs(MAX))] when | |
2874 | flag_wrapv is set and the original anti-range doesn't include | |
2875 | TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ | |
2d3cd5d5 | 2876 | if (TYPE_OVERFLOW_WRAPS (type)) |
12df8a7e | 2877 | { |
2d3cd5d5 | 2878 | tree type_min_value = TYPE_MIN_VALUE (type); |
12df8a7e ILT |
2879 | |
2880 | min = (vr0.min != type_min_value | |
2881 | ? int_const_binop (PLUS_EXPR, type_min_value, | |
2882 | integer_one_node, 0) | |
2883 | : type_min_value); | |
2884 | } | |
2885 | else | |
2886 | { | |
2887 | if (overflow_infinity_range_p (&vr0)) | |
2d3cd5d5 | 2888 | min = negative_overflow_infinity (type); |
12df8a7e | 2889 | else |
2d3cd5d5 | 2890 | min = TYPE_MIN_VALUE (type); |
12df8a7e | 2891 | } |
ff08cbee JM |
2892 | } |
2893 | else | |
2894 | { | |
2895 | /* All else has failed, so create the range [0, INF], even for | |
2896 | flag_wrapv since TYPE_MIN_VALUE is in the original | |
2897 | anti-range. */ | |
2898 | vr0.type = VR_RANGE; | |
2d3cd5d5 RAE |
2899 | min = build_int_cst (type, 0); |
2900 | if (needs_overflow_infinity (type)) | |
12df8a7e | 2901 | { |
2d3cd5d5 RAE |
2902 | if (supports_overflow_infinity (type)) |
2903 | max = positive_overflow_infinity (type); | |
12df8a7e ILT |
2904 | else |
2905 | { | |
2906 | set_value_range_to_varying (vr); | |
2907 | return; | |
2908 | } | |
2909 | } | |
2910 | else | |
2d3cd5d5 | 2911 | max = TYPE_MAX_VALUE (type); |
ff08cbee JM |
2912 | } |
2913 | } | |
2914 | ||
2915 | /* If the range contains zero then we know that the minimum value in the | |
2916 | range will be zero. */ | |
2917 | else if (range_includes_zero_p (&vr0)) | |
2918 | { | |
2919 | if (cmp == 1) | |
2920 | max = min; | |
2d3cd5d5 | 2921 | min = build_int_cst (type, 0); |
ff08cbee JM |
2922 | } |
2923 | else | |
227858d1 | 2924 | { |
ff08cbee JM |
2925 | /* If the range was reversed, swap MIN and MAX. */ |
2926 | if (cmp == 1) | |
2927 | { | |
2928 | tree t = min; | |
2929 | min = max; | |
2930 | max = t; | |
2931 | } | |
227858d1 DN |
2932 | } |
2933 | } | |
2934 | else | |
2935 | { | |
2936 | /* Otherwise, operate on each end of the range. */ | |
2d3cd5d5 RAE |
2937 | min = fold_unary_to_constant (code, type, vr0.min); |
2938 | max = fold_unary_to_constant (code, type, vr0.max); | |
12df8a7e | 2939 | |
2d3cd5d5 | 2940 | if (needs_overflow_infinity (type)) |
12df8a7e ILT |
2941 | { |
2942 | gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR); | |
8cf781f0 ILT |
2943 | |
2944 | /* If both sides have overflowed, we don't know | |
2945 | anything. */ | |
2946 | if ((is_overflow_infinity (vr0.min) | |
2947 | || TREE_OVERFLOW (min)) | |
2948 | && (is_overflow_infinity (vr0.max) | |
2949 | || TREE_OVERFLOW (max))) | |
2950 | { | |
2951 | set_value_range_to_varying (vr); | |
2952 | return; | |
2953 | } | |
2954 | ||
12df8a7e ILT |
2955 | if (is_overflow_infinity (vr0.min)) |
2956 | min = vr0.min; | |
2957 | else if (TREE_OVERFLOW (min)) | |
2958 | { | |
2d3cd5d5 | 2959 | if (supports_overflow_infinity (type)) |
12df8a7e ILT |
2960 | min = (tree_int_cst_sgn (min) >= 0 |
2961 | ? positive_overflow_infinity (TREE_TYPE (min)) | |
2962 | : negative_overflow_infinity (TREE_TYPE (min))); | |
2963 | else | |
2964 | { | |
2965 | set_value_range_to_varying (vr); | |
2966 | return; | |
2967 | } | |
2968 | } | |
2969 | ||
2970 | if (is_overflow_infinity (vr0.max)) | |
2971 | max = vr0.max; | |
2972 | else if (TREE_OVERFLOW (max)) | |
2973 | { | |
2d3cd5d5 | 2974 | if (supports_overflow_infinity (type)) |
12df8a7e ILT |
2975 | max = (tree_int_cst_sgn (max) >= 0 |
2976 | ? positive_overflow_infinity (TREE_TYPE (max)) | |
2977 | : negative_overflow_infinity (TREE_TYPE (max))); | |
2978 | else | |
2979 | { | |
2980 | set_value_range_to_varying (vr); | |
2981 | return; | |
2982 | } | |
2983 | } | |
2984 | } | |
227858d1 | 2985 | } |
0bca51f0 DN |
2986 | |
2987 | cmp = compare_values (min, max); | |
2988 | if (cmp == -2 || cmp == 1) | |
2989 | { | |
2990 | /* If the new range has its limits swapped around (MIN > MAX), | |
2991 | then the operation caused one of them to wrap around, mark | |
2992 | the new range VARYING. */ | |
b565d777 | 2993 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2994 | } |
2995 | else | |
227858d1 DN |
2996 | set_value_range (vr, vr0.type, min, max, NULL); |
2997 | } | |
2998 | ||
2999 | ||
f255541f RC |
3000 | /* Extract range information from a conditional expression EXPR based on |
3001 | the ranges of each of its operands and the expression code. */ | |
3002 | ||
3003 | static void | |
3004 | extract_range_from_cond_expr (value_range_t *vr, tree expr) | |
3005 | { | |
3006 | tree op0, op1; | |
3007 | value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; | |
3008 | value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; | |
3009 | ||
3010 | /* Get value ranges for each operand. For constant operands, create | |
3011 | a new value range with the operand to simplify processing. */ | |
3012 | op0 = COND_EXPR_THEN (expr); | |
3013 | if (TREE_CODE (op0) == SSA_NAME) | |
3014 | vr0 = *(get_value_range (op0)); | |
3015 | else if (is_gimple_min_invariant (op0)) | |
b60b4711 | 3016 | set_value_range_to_value (&vr0, op0, NULL); |
f255541f RC |
3017 | else |
3018 | set_value_range_to_varying (&vr0); | |
3019 | ||
3020 | op1 = COND_EXPR_ELSE (expr); | |
3021 | if (TREE_CODE (op1) == SSA_NAME) | |
3022 | vr1 = *(get_value_range (op1)); | |
3023 | else if (is_gimple_min_invariant (op1)) | |
b60b4711 | 3024 | set_value_range_to_value (&vr1, op1, NULL); |
f255541f RC |
3025 | else |
3026 | set_value_range_to_varying (&vr1); | |
3027 | ||
3028 | /* The resulting value range is the union of the operand ranges */ | |
3029 | vrp_meet (&vr0, &vr1); | |
3030 | copy_value_range (vr, &vr0); | |
3031 | } | |
3032 | ||
3033 | ||
227858d1 DN |
3034 | /* Extract range information from a comparison expression EXPR based |
3035 | on the range of its operand and the expression code. */ | |
3036 | ||
3037 | static void | |
2d3cd5d5 RAE |
3038 | extract_range_from_comparison (value_range_t *vr, enum tree_code code, |
3039 | tree type, tree op0, tree op1) | |
227858d1 | 3040 | { |
12df8a7e | 3041 | bool sop = false; |
726a989a RB |
3042 | tree val; |
3043 | ||
6b99f156 JH |
3044 | val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, |
3045 | NULL); | |
12df8a7e ILT |
3046 | |
3047 | /* A disadvantage of using a special infinity as an overflow | |
3048 | representation is that we lose the ability to record overflow | |
3049 | when we don't have an infinity. So we have to ignore a result | |
3050 | which relies on overflow. */ | |
3051 | ||
3052 | if (val && !is_overflow_infinity (val) && !sop) | |
227858d1 DN |
3053 | { |
3054 | /* Since this expression was found on the RHS of an assignment, | |
3055 | its type may be different from _Bool. Convert VAL to EXPR's | |
3056 | type. */ | |
2d3cd5d5 | 3057 | val = fold_convert (type, val); |
b60b4711 ILT |
3058 | if (is_gimple_min_invariant (val)) |
3059 | set_value_range_to_value (vr, val, vr->equiv); | |
3060 | else | |
3061 | set_value_range (vr, VR_RANGE, val, val, vr->equiv); | |
227858d1 DN |
3062 | } |
3063 | else | |
31ab1cc9 | 3064 | /* The result of a comparison is always true or false. */ |
2d3cd5d5 | 3065 | set_value_range_to_truthvalue (vr, type); |
0bca51f0 DN |
3066 | } |
3067 | ||
726a989a RB |
3068 | /* Try to derive a nonnegative or nonzero range out of STMT relying |
3069 | primarily on generic routines in fold in conjunction with range data. | |
3070 | Store the result in *VR */ | |
0bca51f0 | 3071 | |
726a989a RB |
3072 | static void |
3073 | extract_range_basic (value_range_t *vr, gimple stmt) | |
3074 | { | |
3075 | bool sop = false; | |
3076 | tree type = gimple_expr_type (stmt); | |
3077 | ||
3078 | if (INTEGRAL_TYPE_P (type) | |
3079 | && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) | |
3080 | set_value_range_to_nonnegative (vr, type, | |
3081 | sop || stmt_overflow_infinity (stmt)); | |
3082 | else if (vrp_stmt_computes_nonzero (stmt, &sop) | |
3083 | && !sop) | |
3084 | set_value_range_to_nonnull (vr, type); | |
3085 | else | |
3086 | set_value_range_to_varying (vr); | |
3087 | } | |
3088 | ||
3089 | ||
3090 | /* Try to compute a useful range out of assignment STMT and store it | |
227858d1 | 3091 | in *VR. */ |
0bca51f0 DN |
3092 | |
3093 | static void | |
726a989a | 3094 | extract_range_from_assignment (value_range_t *vr, gimple stmt) |
0bca51f0 | 3095 | { |
726a989a | 3096 | enum tree_code code = gimple_assign_rhs_code (stmt); |
0bca51f0 DN |
3097 | |
3098 | if (code == ASSERT_EXPR) | |
726a989a | 3099 | extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); |
0bca51f0 | 3100 | else if (code == SSA_NAME) |
726a989a | 3101 | extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); |
227858d1 | 3102 | else if (TREE_CODE_CLASS (code) == tcc_binary |
227858d1 DN |
3103 | || code == TRUTH_AND_EXPR |
3104 | || code == TRUTH_OR_EXPR | |
3105 | || code == TRUTH_XOR_EXPR) | |
726a989a RB |
3106 | extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), |
3107 | gimple_expr_type (stmt), | |
3108 | gimple_assign_rhs1 (stmt), | |
3109 | gimple_assign_rhs2 (stmt)); | |
0bca51f0 | 3110 | else if (TREE_CODE_CLASS (code) == tcc_unary) |
726a989a RB |
3111 | extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), |
3112 | gimple_expr_type (stmt), | |
3113 | gimple_assign_rhs1 (stmt)); | |
f255541f | 3114 | else if (code == COND_EXPR) |
726a989a | 3115 | extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt)); |
227858d1 | 3116 | else if (TREE_CODE_CLASS (code) == tcc_comparison) |
726a989a RB |
3117 | extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), |
3118 | gimple_expr_type (stmt), | |
3119 | gimple_assign_rhs1 (stmt), | |
3120 | gimple_assign_rhs2 (stmt)); | |
3121 | else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS | |
3122 | && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) | |
3123 | set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); | |
0bca51f0 | 3124 | else |
b565d777 | 3125 | set_value_range_to_varying (vr); |
b16caf72 | 3126 | |
b16caf72 | 3127 | if (vr->type == VR_VARYING) |
726a989a | 3128 | extract_range_basic (vr, stmt); |
0bca51f0 DN |
3129 | } |
3130 | ||
1e8552eb | 3131 | /* Given a range VR, a LOOP and a variable VAR, determine whether it |
0bca51f0 DN |
3132 | would be profitable to adjust VR using scalar evolution information |
3133 | for VAR. If so, update VR with the new limits. */ | |
3134 | ||
3135 | static void | |
726a989a RB |
3136 | adjust_range_with_scev (value_range_t *vr, struct loop *loop, |
3137 | gimple stmt, tree var) | |
0bca51f0 | 3138 | { |
20527215 | 3139 | tree init, step, chrec, tmin, tmax, min, max, type; |
d7f5de76 | 3140 | enum ev_direction dir; |
0bca51f0 DN |
3141 | |
3142 | /* TODO. Don't adjust anti-ranges. An anti-range may provide | |
3143 | better opportunities than a regular range, but I'm not sure. */ | |
3144 | if (vr->type == VR_ANTI_RANGE) | |
3145 | return; | |
3146 | ||
d7770457 | 3147 | chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); |
6f1c9cd0 SP |
3148 | |
3149 | /* Like in PR19590, scev can return a constant function. */ | |
3150 | if (is_gimple_min_invariant (chrec)) | |
3151 | { | |
cdc64612 | 3152 | set_value_range_to_value (vr, chrec, vr->equiv); |
6f1c9cd0 SP |
3153 | return; |
3154 | } | |
3155 | ||
0bca51f0 DN |
3156 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) |
3157 | return; | |
3158 | ||
d7770457 SP |
3159 | init = initial_condition_in_loop_num (chrec, loop->num); |
3160 | step = evolution_part_in_loop_num (chrec, loop->num); | |
0bca51f0 DN |
3161 | |
3162 | /* If STEP is symbolic, we can't know whether INIT will be the | |
04dce5a4 ZD |
3163 | minimum or maximum value in the range. Also, unless INIT is |
3164 | a simple expression, compare_values and possibly other functions | |
3165 | in tree-vrp won't be able to handle it. */ | |
d7770457 | 3166 | if (step == NULL_TREE |
04dce5a4 ZD |
3167 | || !is_gimple_min_invariant (step) |
3168 | || !valid_value_p (init)) | |
0bca51f0 DN |
3169 | return; |
3170 | ||
d7f5de76 ZD |
3171 | dir = scev_direction (chrec); |
3172 | if (/* Do not adjust ranges if we do not know whether the iv increases | |
3173 | or decreases, ... */ | |
3174 | dir == EV_DIR_UNKNOWN | |
3175 | /* ... or if it may wrap. */ | |
42fd6772 | 3176 | || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), |
d7f5de76 | 3177 | true)) |
227858d1 DN |
3178 | return; |
3179 | ||
12df8a7e ILT |
3180 | /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of |
3181 | negative_overflow_infinity and positive_overflow_infinity, | |
3182 | because we have concluded that the loop probably does not | |
3183 | wrap. */ | |
3184 | ||
20527215 ZD |
3185 | type = TREE_TYPE (var); |
3186 | if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) | |
3187 | tmin = lower_bound_in_type (type, type); | |
3188 | else | |
3189 | tmin = TYPE_MIN_VALUE (type); | |
3190 | if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) | |
3191 | tmax = upper_bound_in_type (type, type); | |
3192 | else | |
3193 | tmax = TYPE_MAX_VALUE (type); | |
3194 | ||
3195 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
0bca51f0 | 3196 | { |
20527215 ZD |
3197 | min = tmin; |
3198 | max = tmax; | |
3199 | ||
0bca51f0 DN |
3200 | /* For VARYING or UNDEFINED ranges, just about anything we get |
3201 | from scalar evolutions should be better. */ | |
4f67dfcf | 3202 | |
d7f5de76 | 3203 | if (dir == EV_DIR_DECREASES) |
4f67dfcf | 3204 | max = init; |
0bca51f0 | 3205 | else |
4f67dfcf JL |
3206 | min = init; |
3207 | ||
3208 | /* If we would create an invalid range, then just assume we | |
3209 | know absolutely nothing. This may be over-conservative, | |
20527215 ZD |
3210 | but it's clearly safe, and should happen only in unreachable |
3211 | parts of code, or for invalid programs. */ | |
4f67dfcf JL |
3212 | if (compare_values (min, max) == 1) |
3213 | return; | |
3214 | ||
3215 | set_value_range (vr, VR_RANGE, min, max, vr->equiv); | |
0bca51f0 DN |
3216 | } |
3217 | else if (vr->type == VR_RANGE) | |
3218 | { | |
20527215 ZD |
3219 | min = vr->min; |
3220 | max = vr->max; | |
d5448566 | 3221 | |
d7f5de76 | 3222 | if (dir == EV_DIR_DECREASES) |
0bca51f0 | 3223 | { |
d5448566 KH |
3224 | /* INIT is the maximum value. If INIT is lower than VR->MAX |
3225 | but no smaller than VR->MIN, set VR->MAX to INIT. */ | |
3226 | if (compare_values (init, max) == -1) | |
3227 | { | |
3228 | max = init; | |
3229 | ||
3230 | /* If we just created an invalid range with the minimum | |
20527215 ZD |
3231 | greater than the maximum, we fail conservatively. |
3232 | This should happen only in unreachable | |
3233 | parts of code, or for invalid programs. */ | |
d5448566 | 3234 | if (compare_values (min, max) == 1) |
20527215 | 3235 | return; |
d5448566 | 3236 | } |
9a46cc16 ILT |
3237 | |
3238 | /* According to the loop information, the variable does not | |
3239 | overflow. If we think it does, probably because of an | |
3240 | overflow due to arithmetic on a different INF value, | |
3241 | reset now. */ | |
3242 | if (is_negative_overflow_infinity (min)) | |
3243 | min = tmin; | |
0bca51f0 DN |
3244 | } |
3245 | else | |
3246 | { | |
3247 | /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ | |
d5448566 KH |
3248 | if (compare_values (init, min) == 1) |
3249 | { | |
3250 | min = init; | |
3251 | ||
20527215 | 3252 | /* Again, avoid creating invalid range by failing. */ |
d5448566 | 3253 | if (compare_values (min, max) == 1) |
20527215 | 3254 | return; |
d5448566 | 3255 | } |
9a46cc16 ILT |
3256 | |
3257 | if (is_positive_overflow_infinity (max)) | |
3258 | max = tmax; | |
0bca51f0 | 3259 | } |
d5448566 | 3260 | |
227858d1 | 3261 | set_value_range (vr, VR_RANGE, min, max, vr->equiv); |
0bca51f0 DN |
3262 | } |
3263 | } | |
3264 | ||
9a46cc16 ILT |
3265 | /* Return true if VAR may overflow at STMT. This checks any available |
3266 | loop information to see if we can determine that VAR does not | |
3267 | overflow. */ | |
3268 | ||
3269 | static bool | |
726a989a | 3270 | vrp_var_may_overflow (tree var, gimple stmt) |
9a46cc16 ILT |
3271 | { |
3272 | struct loop *l; | |
3273 | tree chrec, init, step; | |
3274 | ||
3275 | if (current_loops == NULL) | |
3276 | return true; | |
3277 | ||
3278 | l = loop_containing_stmt (stmt); | |
3279 | if (l == NULL) | |
3280 | return true; | |
3281 | ||
3282 | chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var)); | |
3283 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) | |
3284 | return true; | |
3285 | ||
3286 | init = initial_condition_in_loop_num (chrec, l->num); | |
3287 | step = evolution_part_in_loop_num (chrec, l->num); | |
3288 | ||
3289 | if (step == NULL_TREE | |
3290 | || !is_gimple_min_invariant (step) | |
3291 | || !valid_value_p (init)) | |
3292 | return true; | |
3293 | ||
3294 | /* If we get here, we know something useful about VAR based on the | |
3295 | loop information. If it wraps, it may overflow. */ | |
3296 | ||
3297 | if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), | |
3298 | true)) | |
3299 | return true; | |
3300 | ||
3301 | if (dump_file && (dump_flags & TDF_DETAILS) != 0) | |
3302 | { | |
3303 | print_generic_expr (dump_file, var, 0); | |
3304 | fprintf (dump_file, ": loop information indicates does not overflow\n"); | |
3305 | } | |
3306 | ||
3307 | return false; | |
3308 | } | |
3309 | ||
0bca51f0 DN |
3310 | |
3311 | /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: | |
3312 | ||
227858d1 DN |
3313 | - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for |
3314 | all the values in the ranges. | |
0bca51f0 DN |
3315 | |
3316 | - Return BOOLEAN_FALSE_NODE if the comparison always returns false. | |
3317 | ||
227858d1 | 3318 | - Return NULL_TREE if it is not always possible to determine the |
12df8a7e ILT |
3319 | value of the comparison. |
3320 | ||
3321 | Also set *STRICT_OVERFLOW_P to indicate whether a range with an | |
3322 | overflow infinity was used in the test. */ | |
227858d1 | 3323 | |
0bca51f0 DN |
3324 | |
3325 | static tree | |
12df8a7e ILT |
3326 | compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, |
3327 | bool *strict_overflow_p) | |
0bca51f0 DN |
3328 | { |
3329 | /* VARYING or UNDEFINED ranges cannot be compared. */ | |
3330 | if (vr0->type == VR_VARYING | |
3331 | || vr0->type == VR_UNDEFINED | |
3332 | || vr1->type == VR_VARYING | |
3333 | || vr1->type == VR_UNDEFINED) | |
3334 | return NULL_TREE; | |
3335 | ||
3336 | /* Anti-ranges need to be handled separately. */ | |
3337 | if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) | |
3338 | { | |
3339 | /* If both are anti-ranges, then we cannot compute any | |
3340 | comparison. */ | |
3341 | if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) | |
3342 | return NULL_TREE; | |
3343 | ||
3344 | /* These comparisons are never statically computable. */ | |
3345 | if (comp == GT_EXPR | |
3346 | || comp == GE_EXPR | |
3347 | || comp == LT_EXPR | |
3348 | || comp == LE_EXPR) | |
3349 | return NULL_TREE; | |
3350 | ||
3351 | /* Equality can be computed only between a range and an | |
3352 | anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ | |
3353 | if (vr0->type == VR_RANGE) | |
3354 | { | |
3355 | /* To simplify processing, make VR0 the anti-range. */ | |
227858d1 | 3356 | value_range_t *tmp = vr0; |
0bca51f0 DN |
3357 | vr0 = vr1; |
3358 | vr1 = tmp; | |
3359 | } | |
3360 | ||
3361 | gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); | |
3362 | ||
12df8a7e ILT |
3363 | if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 |
3364 | && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) | |
0bca51f0 DN |
3365 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
3366 | ||
3367 | return NULL_TREE; | |
3368 | } | |
3369 | ||
0c948c27 ILT |
3370 | if (!usable_range_p (vr0, strict_overflow_p) |
3371 | || !usable_range_p (vr1, strict_overflow_p)) | |
3372 | return NULL_TREE; | |
3373 | ||
0bca51f0 DN |
3374 | /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the |
3375 | operands around and change the comparison code. */ | |
3376 | if (comp == GT_EXPR || comp == GE_EXPR) | |
3377 | { | |
227858d1 | 3378 | value_range_t *tmp; |
0bca51f0 DN |
3379 | comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; |
3380 | tmp = vr0; | |
3381 | vr0 = vr1; | |
3382 | vr1 = tmp; | |
3383 | } | |
3384 | ||
3385 | if (comp == EQ_EXPR) | |
3386 | { | |
3387 | /* Equality may only be computed if both ranges represent | |
3388 | exactly one value. */ | |
12df8a7e ILT |
3389 | if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 |
3390 | && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) | |
0bca51f0 | 3391 | { |
12df8a7e ILT |
3392 | int cmp_min = compare_values_warnv (vr0->min, vr1->min, |
3393 | strict_overflow_p); | |
3394 | int cmp_max = compare_values_warnv (vr0->max, vr1->max, | |
3395 | strict_overflow_p); | |
0bca51f0 DN |
3396 | if (cmp_min == 0 && cmp_max == 0) |
3397 | return boolean_true_node; | |
3398 | else if (cmp_min != -2 && cmp_max != -2) | |
3399 | return boolean_false_node; | |
3400 | } | |
7ab1122a | 3401 | /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ |
12df8a7e ILT |
3402 | else if (compare_values_warnv (vr0->min, vr1->max, |
3403 | strict_overflow_p) == 1 | |
3404 | || compare_values_warnv (vr1->min, vr0->max, | |
3405 | strict_overflow_p) == 1) | |
7ab1122a | 3406 | return boolean_false_node; |
0bca51f0 DN |
3407 | |
3408 | return NULL_TREE; | |
3409 | } | |
3410 | else if (comp == NE_EXPR) | |
3411 | { | |
3412 | int cmp1, cmp2; | |
3413 | ||
3414 | /* If VR0 is completely to the left or completely to the right | |
3415 | of VR1, they are always different. Notice that we need to | |
3416 | make sure that both comparisons yield similar results to | |
3417 | avoid comparing values that cannot be compared at | |
3418 | compile-time. */ | |
12df8a7e ILT |
3419 | cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
3420 | cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); | |
0bca51f0 DN |
3421 | if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) |
3422 | return boolean_true_node; | |
3423 | ||
3424 | /* If VR0 and VR1 represent a single value and are identical, | |
3425 | return false. */ | |
12df8a7e ILT |
3426 | else if (compare_values_warnv (vr0->min, vr0->max, |
3427 | strict_overflow_p) == 0 | |
3428 | && compare_values_warnv (vr1->min, vr1->max, | |
3429 | strict_overflow_p) == 0 | |
3430 | && compare_values_warnv (vr0->min, vr1->min, | |
3431 | strict_overflow_p) == 0 | |
3432 | && compare_values_warnv (vr0->max, vr1->max, | |
3433 | strict_overflow_p) == 0) | |
0bca51f0 DN |
3434 | return boolean_false_node; |
3435 | ||
3436 | /* Otherwise, they may or may not be different. */ | |
3437 | else | |
3438 | return NULL_TREE; | |
3439 | } | |
3440 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
3441 | { | |
3442 | int tst; | |
3443 | ||
3444 | /* If VR0 is to the left of VR1, return true. */ | |
12df8a7e | 3445 | tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
0bca51f0 DN |
3446 | if ((comp == LT_EXPR && tst == -1) |
3447 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
12df8a7e ILT |
3448 | { |
3449 | if (overflow_infinity_range_p (vr0) | |
3450 | || overflow_infinity_range_p (vr1)) | |
3451 | *strict_overflow_p = true; | |
3452 | return boolean_true_node; | |
3453 | } | |
0bca51f0 DN |
3454 | |
3455 | /* If VR0 is to the right of VR1, return false. */ | |
12df8a7e | 3456 | tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); |
0bca51f0 DN |
3457 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
3458 | || (comp == LE_EXPR && tst == 1)) | |
12df8a7e ILT |
3459 | { |
3460 | if (overflow_infinity_range_p (vr0) | |
3461 | || overflow_infinity_range_p (vr1)) | |
3462 | *strict_overflow_p = true; | |
3463 | return boolean_false_node; | |
3464 | } | |
0bca51f0 DN |
3465 | |
3466 | /* Otherwise, we don't know. */ | |
3467 | return NULL_TREE; | |
3468 | } | |
3469 | ||
3470 | gcc_unreachable (); | |
3471 | } | |
3472 | ||
3473 | ||
3474 | /* Given a value range VR, a value VAL and a comparison code COMP, return | |
227858d1 | 3475 | BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the |
0bca51f0 DN |
3476 | values in VR. Return BOOLEAN_FALSE_NODE if the comparison |
3477 | always returns false. Return NULL_TREE if it is not always | |
12df8a7e ILT |
3478 | possible to determine the value of the comparison. Also set |
3479 | *STRICT_OVERFLOW_P to indicate whether a range with an overflow | |
3480 | infinity was used in the test. */ | |
0bca51f0 DN |
3481 | |
3482 | static tree | |
12df8a7e ILT |
3483 | compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, |
3484 | bool *strict_overflow_p) | |
0bca51f0 DN |
3485 | { |
3486 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
3487 | return NULL_TREE; | |
3488 | ||
3489 | /* Anti-ranges need to be handled separately. */ | |
3490 | if (vr->type == VR_ANTI_RANGE) | |
3491 | { | |
3492 | /* For anti-ranges, the only predicates that we can compute at | |
3493 | compile time are equality and inequality. */ | |
3494 | if (comp == GT_EXPR | |
3495 | || comp == GE_EXPR | |
3496 | || comp == LT_EXPR | |
3497 | || comp == LE_EXPR) | |
3498 | return NULL_TREE; | |
3499 | ||
d2f3ffba JM |
3500 | /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ |
3501 | if (value_inside_range (val, vr) == 1) | |
0bca51f0 DN |
3502 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
3503 | ||
3504 | return NULL_TREE; | |
3505 | } | |
3506 | ||
0c948c27 ILT |
3507 | if (!usable_range_p (vr, strict_overflow_p)) |
3508 | return NULL_TREE; | |
3509 | ||
0bca51f0 DN |
3510 | if (comp == EQ_EXPR) |
3511 | { | |
3512 | /* EQ_EXPR may only be computed if VR represents exactly | |
3513 | one value. */ | |
12df8a7e | 3514 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) |
0bca51f0 | 3515 | { |
12df8a7e | 3516 | int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); |
0bca51f0 DN |
3517 | if (cmp == 0) |
3518 | return boolean_true_node; | |
3519 | else if (cmp == -1 || cmp == 1 || cmp == 2) | |
3520 | return boolean_false_node; | |
3521 | } | |
12df8a7e ILT |
3522 | else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 |
3523 | || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) | |
5de2df7b | 3524 | return boolean_false_node; |
0bca51f0 DN |
3525 | |
3526 | return NULL_TREE; | |
3527 | } | |
3528 | else if (comp == NE_EXPR) | |
3529 | { | |
3530 | /* If VAL is not inside VR, then they are always different. */ | |
12df8a7e ILT |
3531 | if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 |
3532 | || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) | |
0bca51f0 DN |
3533 | return boolean_true_node; |
3534 | ||
3535 | /* If VR represents exactly one value equal to VAL, then return | |
3536 | false. */ | |
12df8a7e ILT |
3537 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 |
3538 | && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) | |
0bca51f0 DN |
3539 | return boolean_false_node; |
3540 | ||
3541 | /* Otherwise, they may or may not be different. */ | |
3542 | return NULL_TREE; | |
3543 | } | |
3544 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
3545 | { | |
3546 | int tst; | |
3547 | ||
3548 | /* If VR is to the left of VAL, return true. */ | |
12df8a7e | 3549 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
0bca51f0 DN |
3550 | if ((comp == LT_EXPR && tst == -1) |
3551 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
12df8a7e ILT |
3552 | { |
3553 | if (overflow_infinity_range_p (vr)) | |
3554 | *strict_overflow_p = true; | |
3555 | return boolean_true_node; | |
3556 | } | |
0bca51f0 DN |
3557 | |
3558 | /* If VR is to the right of VAL, return false. */ | |
12df8a7e | 3559 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
0bca51f0 DN |
3560 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
3561 | || (comp == LE_EXPR && tst == 1)) | |
12df8a7e ILT |
3562 | { |
3563 | if (overflow_infinity_range_p (vr)) | |
3564 | *strict_overflow_p = true; | |
3565 | return boolean_false_node; | |
3566 | } | |
0bca51f0 DN |
3567 | |
3568 | /* Otherwise, we don't know. */ | |
3569 | return NULL_TREE; | |
3570 | } | |
3571 | else if (comp == GT_EXPR || comp == GE_EXPR) | |
3572 | { | |
3573 | int tst; | |
3574 | ||
3575 | /* If VR is to the right of VAL, return true. */ | |
12df8a7e | 3576 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
0bca51f0 DN |
3577 | if ((comp == GT_EXPR && tst == 1) |
3578 | || (comp == GE_EXPR && (tst == 0 || tst == 1))) | |
12df8a7e ILT |
3579 | { |
3580 | if (overflow_infinity_range_p (vr)) | |
3581 | *strict_overflow_p = true; | |
3582 | return boolean_true_node; | |
3583 | } | |
0bca51f0 DN |
3584 | |
3585 | /* If VR is to the left of VAL, return false. */ | |
12df8a7e | 3586 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
0bca51f0 DN |
3587 | if ((comp == GT_EXPR && (tst == -1 || tst == 0)) |
3588 | || (comp == GE_EXPR && tst == -1)) | |
12df8a7e ILT |
3589 | { |
3590 | if (overflow_infinity_range_p (vr)) | |
3591 | *strict_overflow_p = true; | |
3592 | return boolean_false_node; | |
3593 | } | |
0bca51f0 DN |
3594 | |
3595 | /* Otherwise, we don't know. */ | |
3596 | return NULL_TREE; | |
3597 | } | |
3598 | ||
3599 | gcc_unreachable (); | |
3600 | } | |
3601 | ||
3602 | ||
3603 | /* Debugging dumps. */ | |
3604 | ||
227858d1 DN |
3605 | void dump_value_range (FILE *, value_range_t *); |
3606 | void debug_value_range (value_range_t *); | |
3607 | void dump_all_value_ranges (FILE *); | |
3608 | void debug_all_value_ranges (void); | |
3609 | void dump_vr_equiv (FILE *, bitmap); | |
3610 | void debug_vr_equiv (bitmap); | |
3611 | ||
3612 | ||
3613 | /* Dump value range VR to FILE. */ | |
3614 | ||
0bca51f0 | 3615 | void |
227858d1 | 3616 | dump_value_range (FILE *file, value_range_t *vr) |
0bca51f0 DN |
3617 | { |
3618 | if (vr == NULL) | |
3619 | fprintf (file, "[]"); | |
3620 | else if (vr->type == VR_UNDEFINED) | |
3621 | fprintf (file, "UNDEFINED"); | |
3622 | else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
3623 | { | |
227858d1 DN |
3624 | tree type = TREE_TYPE (vr->min); |
3625 | ||
0bca51f0 | 3626 | fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); |
227858d1 | 3627 | |
e1f28918 | 3628 | if (is_negative_overflow_infinity (vr->min)) |
12df8a7e | 3629 | fprintf (file, "-INF(OVF)"); |
e1f28918 ILT |
3630 | else if (INTEGRAL_TYPE_P (type) |
3631 | && !TYPE_UNSIGNED (type) | |
3632 | && vrp_val_is_min (vr->min)) | |
3633 | fprintf (file, "-INF"); | |
227858d1 DN |
3634 | else |
3635 | print_generic_expr (file, vr->min, 0); | |
3636 | ||
0bca51f0 | 3637 | fprintf (file, ", "); |
227858d1 | 3638 | |
e1f28918 | 3639 | if (is_positive_overflow_infinity (vr->max)) |
12df8a7e | 3640 | fprintf (file, "+INF(OVF)"); |
e1f28918 ILT |
3641 | else if (INTEGRAL_TYPE_P (type) |
3642 | && vrp_val_is_max (vr->max)) | |
3643 | fprintf (file, "+INF"); | |
227858d1 DN |
3644 | else |
3645 | print_generic_expr (file, vr->max, 0); | |
3646 | ||
0bca51f0 | 3647 | fprintf (file, "]"); |
227858d1 DN |
3648 | |
3649 | if (vr->equiv) | |
3650 | { | |
3651 | bitmap_iterator bi; | |
3652 | unsigned i, c = 0; | |
3653 | ||
3654 | fprintf (file, " EQUIVALENCES: { "); | |
3655 | ||
3656 | EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) | |
3657 | { | |
3658 | print_generic_expr (file, ssa_name (i), 0); | |
3659 | fprintf (file, " "); | |
3660 | c++; | |
3661 | } | |
3662 | ||
3663 | fprintf (file, "} (%u elements)", c); | |
3664 | } | |
0bca51f0 DN |
3665 | } |
3666 | else if (vr->type == VR_VARYING) | |
3667 | fprintf (file, "VARYING"); | |
3668 | else | |
3669 | fprintf (file, "INVALID RANGE"); | |
3670 | } | |
3671 | ||
3672 | ||
3673 | /* Dump value range VR to stderr. */ | |
3674 | ||
3675 | void | |
227858d1 | 3676 | debug_value_range (value_range_t *vr) |
0bca51f0 DN |
3677 | { |
3678 | dump_value_range (stderr, vr); | |
96644aba | 3679 | fprintf (stderr, "\n"); |
0bca51f0 DN |
3680 | } |
3681 | ||
3682 | ||
3683 | /* Dump value ranges of all SSA_NAMEs to FILE. */ | |
3684 | ||
3685 | void | |
3686 | dump_all_value_ranges (FILE *file) | |
3687 | { | |
3688 | size_t i; | |
3689 | ||
3690 | for (i = 0; i < num_ssa_names; i++) | |
3691 | { | |
227858d1 | 3692 | if (vr_value[i]) |
0bca51f0 | 3693 | { |
227858d1 | 3694 | print_generic_expr (file, ssa_name (i), 0); |
0bca51f0 | 3695 | fprintf (file, ": "); |
227858d1 | 3696 | dump_value_range (file, vr_value[i]); |
0bca51f0 DN |
3697 | fprintf (file, "\n"); |
3698 | } | |
3699 | } | |
3700 | ||
3701 | fprintf (file, "\n"); | |
3702 | } | |
3703 | ||
3704 | ||
3705 | /* Dump all value ranges to stderr. */ | |
3706 | ||
3707 | void | |
3708 | debug_all_value_ranges (void) | |
3709 | { | |
3710 | dump_all_value_ranges (stderr); | |
3711 | } | |
3712 | ||
3713 | ||
0bca51f0 DN |
3714 | /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
3715 | create a new SSA name N and return the assertion assignment | |
3716 | 'V = ASSERT_EXPR <V, V OP W>'. */ | |
3717 | ||
726a989a | 3718 | static gimple |
0bca51f0 DN |
3719 | build_assert_expr_for (tree cond, tree v) |
3720 | { | |
726a989a RB |
3721 | tree n; |
3722 | gimple assertion; | |
0bca51f0 DN |
3723 | |
3724 | gcc_assert (TREE_CODE (v) == SSA_NAME); | |
726a989a | 3725 | n = duplicate_ssa_name (v, NULL); |
0bca51f0 | 3726 | |
7da4bf7d | 3727 | if (COMPARISON_CLASS_P (cond)) |
0bca51f0 | 3728 | { |
0d451405 | 3729 | tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |
726a989a | 3730 | assertion = gimple_build_assign (n, a); |
0bca51f0 DN |
3731 | } |
3732 | else if (TREE_CODE (cond) == TRUTH_NOT_EXPR) | |
3733 | { | |
3734 | /* Given !V, build the assignment N = false. */ | |
3735 | tree op0 = TREE_OPERAND (cond, 0); | |
3736 | gcc_assert (op0 == v); | |
726a989a | 3737 | assertion = gimple_build_assign (n, boolean_false_node); |
0bca51f0 DN |
3738 | } |
3739 | else if (TREE_CODE (cond) == SSA_NAME) | |
3740 | { | |
3741 | /* Given V, build the assignment N = true. */ | |
3742 | gcc_assert (v == cond); | |
726a989a | 3743 | assertion = gimple_build_assign (n, boolean_true_node); |
0bca51f0 DN |
3744 | } |
3745 | else | |
3746 | gcc_unreachable (); | |
3747 | ||
3748 | SSA_NAME_DEF_STMT (n) = assertion; | |
3749 | ||
3750 | /* The new ASSERT_EXPR, creates a new SSA name that replaces the | |
3751 | operand of the ASSERT_EXPR. Register the new name and the old one | |
3752 | in the replacement table so that we can fix the SSA web after | |
3753 | adding all the ASSERT_EXPRs. */ | |
3754 | register_new_name_mapping (n, v); | |
3755 | ||
3756 | return assertion; | |
3757 | } | |
3758 | ||
3759 | ||
3760 | /* Return false if EXPR is a predicate expression involving floating | |
3761 | point values. */ | |
3762 | ||
3763 | static inline bool | |
726a989a | 3764 | fp_predicate (gimple stmt) |
0bca51f0 | 3765 | { |
726a989a RB |
3766 | GIMPLE_CHECK (stmt, GIMPLE_COND); |
3767 | ||
3768 | return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); | |
0bca51f0 DN |
3769 | } |
3770 | ||
3771 | ||
227858d1 DN |
3772 | /* If the range of values taken by OP can be inferred after STMT executes, |
3773 | return the comparison code (COMP_CODE_P) and value (VAL_P) that | |
3774 | describes the inferred range. Return true if a range could be | |
3775 | inferred. */ | |
0bca51f0 | 3776 | |
227858d1 | 3777 | static bool |
726a989a | 3778 | infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) |
0bca51f0 | 3779 | { |
227858d1 DN |
3780 | *val_p = NULL_TREE; |
3781 | *comp_code_p = ERROR_MARK; | |
3782 | ||
9fabf0d4 DN |
3783 | /* Do not attempt to infer anything in names that flow through |
3784 | abnormal edges. */ | |
3785 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) | |
227858d1 DN |
3786 | return false; |
3787 | ||
3788 | /* Similarly, don't infer anything from statements that may throw | |
3789 | exceptions. */ | |
726a989a | 3790 | if (stmt_could_throw_p (stmt)) |
227858d1 | 3791 | return false; |
9fabf0d4 | 3792 | |
8c5285e1 DN |
3793 | /* If STMT is the last statement of a basic block with no |
3794 | successors, there is no point inferring anything about any of its | |
3795 | operands. We would not be able to find a proper insertion point | |
3796 | for the assertion, anyway. */ | |
726a989a | 3797 | if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0) |
8c5285e1 DN |
3798 | return false; |
3799 | ||
60c9ad46 JL |
3800 | /* We can only assume that a pointer dereference will yield |
3801 | non-NULL if -fdelete-null-pointer-checks is enabled. */ | |
11522353 ILT |
3802 | if (flag_delete_null_pointer_checks |
3803 | && POINTER_TYPE_P (TREE_TYPE (op)) | |
726a989a | 3804 | && gimple_code (stmt) != GIMPLE_ASM) |
0bca51f0 | 3805 | { |
e9e0aa2c | 3806 | unsigned num_uses, num_loads, num_stores; |
0bca51f0 | 3807 | |
e9e0aa2c DN |
3808 | count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores); |
3809 | if (num_loads + num_stores > 0) | |
0bca51f0 | 3810 | { |
227858d1 DN |
3811 | *val_p = build_int_cst (TREE_TYPE (op), 0); |
3812 | *comp_code_p = NE_EXPR; | |
3813 | return true; | |
0bca51f0 DN |
3814 | } |
3815 | } | |
3816 | ||
227858d1 | 3817 | return false; |
0bca51f0 DN |
3818 | } |
3819 | ||
3820 | ||
227858d1 DN |
3821 | void dump_asserts_for (FILE *, tree); |
3822 | void debug_asserts_for (tree); | |
3823 | void dump_all_asserts (FILE *); | |
3824 | void debug_all_asserts (void); | |
3825 | ||
3826 | /* Dump all the registered assertions for NAME to FILE. */ | |
3827 | ||
3828 | void | |
3829 | dump_asserts_for (FILE *file, tree name) | |
3830 | { | |
3831 | assert_locus_t loc; | |
3832 | ||
3833 | fprintf (file, "Assertions to be inserted for "); | |
3834 | print_generic_expr (file, name, 0); | |
3835 | fprintf (file, "\n"); | |
3836 | ||
3837 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
3838 | while (loc) | |
3839 | { | |
3840 | fprintf (file, "\t"); | |
726a989a | 3841 | print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); |
227858d1 DN |
3842 | fprintf (file, "\n\tBB #%d", loc->bb->index); |
3843 | if (loc->e) | |
3844 | { | |
3845 | fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, | |
3846 | loc->e->dest->index); | |
3847 | dump_edge_info (file, loc->e, 0); | |
3848 | } | |
3849 | fprintf (file, "\n\tPREDICATE: "); | |
3850 | print_generic_expr (file, name, 0); | |
3851 | fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]); | |
3852 | print_generic_expr (file, loc->val, 0); | |
3853 | fprintf (file, "\n\n"); | |
3854 | loc = loc->next; | |
3855 | } | |
3856 | ||
3857 | fprintf (file, "\n"); | |
3858 | } | |
3859 | ||
3860 | ||
3861 | /* Dump all the registered assertions for NAME to stderr. */ | |
3862 | ||
3863 | void | |
3864 | debug_asserts_for (tree name) | |
3865 | { | |
3866 | dump_asserts_for (stderr, name); | |
3867 | } | |
3868 | ||
3869 | ||
3870 | /* Dump all the registered assertions for all the names to FILE. */ | |
3871 | ||
3872 | void | |
3873 | dump_all_asserts (FILE *file) | |
3874 | { | |
3875 | unsigned i; | |
3876 | bitmap_iterator bi; | |
3877 | ||
3878 | fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); | |
3879 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) | |
3880 | dump_asserts_for (file, ssa_name (i)); | |
3881 | fprintf (file, "\n"); | |
3882 | } | |
3883 | ||
3884 | ||
3885 | /* Dump all the registered assertions for all the names to stderr. */ | |
3886 | ||
3887 | void | |
3888 | debug_all_asserts (void) | |
3889 | { | |
3890 | dump_all_asserts (stderr); | |
3891 | } | |
3892 | ||
3893 | ||
3894 | /* If NAME doesn't have an ASSERT_EXPR registered for asserting | |
2ab8dbf4 | 3895 | 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
227858d1 | 3896 | E->DEST, then register this location as a possible insertion point |
2ab8dbf4 | 3897 | for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |
227858d1 DN |
3898 | |
3899 | BB, E and SI provide the exact insertion point for the new | |
3900 | ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted | |
3901 | on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on | |
3902 | BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E | |
3903 | must not be NULL. */ | |
3904 | ||
3905 | static void | |
2ab8dbf4 | 3906 | register_new_assert_for (tree name, tree expr, |
227858d1 DN |
3907 | enum tree_code comp_code, |
3908 | tree val, | |
3909 | basic_block bb, | |
3910 | edge e, | |
726a989a | 3911 | gimple_stmt_iterator si) |
227858d1 DN |
3912 | { |
3913 | assert_locus_t n, loc, last_loc; | |
3914 | bool found; | |
3915 | basic_block dest_bb; | |
3916 | ||
3917 | #if defined ENABLE_CHECKING | |
3918 | gcc_assert (bb == NULL || e == NULL); | |
3919 | ||
3920 | if (e == NULL) | |
726a989a RB |
3921 | gcc_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |
3922 | && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); | |
227858d1 DN |
3923 | #endif |
3924 | ||
a1b969a0 RG |
3925 | /* Never build an assert comparing against an integer constant with |
3926 | TREE_OVERFLOW set. This confuses our undefined overflow warning | |
3927 | machinery. */ | |
3928 | if (TREE_CODE (val) == INTEGER_CST | |
3929 | && TREE_OVERFLOW (val)) | |
3930 | val = build_int_cst_wide (TREE_TYPE (val), | |
3931 | TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val)); | |
3932 | ||
227858d1 DN |
3933 | /* The new assertion A will be inserted at BB or E. We need to |
3934 | determine if the new location is dominated by a previously | |
3935 | registered location for A. If we are doing an edge insertion, | |
3936 | assume that A will be inserted at E->DEST. Note that this is not | |
3937 | necessarily true. | |
3938 | ||
3939 | If E is a critical edge, it will be split. But even if E is | |
3940 | split, the new block will dominate the same set of blocks that | |
3941 | E->DEST dominates. | |
3942 | ||
3943 | The reverse, however, is not true, blocks dominated by E->DEST | |
3944 | will not be dominated by the new block created to split E. So, | |
3945 | if the insertion location is on a critical edge, we will not use | |
3946 | the new location to move another assertion previously registered | |
3947 | at a block dominated by E->DEST. */ | |
3948 | dest_bb = (bb) ? bb : e->dest; | |
3949 | ||
3950 | /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and | |
3951 | VAL at a block dominating DEST_BB, then we don't need to insert a new | |
3952 | one. Similarly, if the same assertion already exists at a block | |
3953 | dominated by DEST_BB and the new location is not on a critical | |
3954 | edge, then update the existing location for the assertion (i.e., | |
3955 | move the assertion up in the dominance tree). | |
3956 | ||
3957 | Note, this is implemented as a simple linked list because there | |
3958 | should not be more than a handful of assertions registered per | |
3959 | name. If this becomes a performance problem, a table hashed by | |
3960 | COMP_CODE and VAL could be implemented. */ | |
3961 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
3962 | last_loc = loc; | |
3963 | found = false; | |
3964 | while (loc) | |
3965 | { | |
3966 | if (loc->comp_code == comp_code | |
3967 | && (loc->val == val | |
2ab8dbf4 RG |
3968 | || operand_equal_p (loc->val, val, 0)) |
3969 | && (loc->expr == expr | |
3970 | || operand_equal_p (loc->expr, expr, 0))) | |
227858d1 DN |
3971 | { |
3972 | /* If the assertion NAME COMP_CODE VAL has already been | |
3973 | registered at a basic block that dominates DEST_BB, then | |
3974 | we don't need to insert the same assertion again. Note | |
3975 | that we don't check strict dominance here to avoid | |
3976 | replicating the same assertion inside the same basic | |
3977 | block more than once (e.g., when a pointer is | |
3978 | dereferenced several times inside a block). | |
3979 | ||
3980 | An exception to this rule are edge insertions. If the | |
3981 | new assertion is to be inserted on edge E, then it will | |
3982 | dominate all the other insertions that we may want to | |
3983 | insert in DEST_BB. So, if we are doing an edge | |
3984 | insertion, don't do this dominance check. */ | |
3985 | if (e == NULL | |
3986 | && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb)) | |
3987 | return; | |
3988 | ||
3989 | /* Otherwise, if E is not a critical edge and DEST_BB | |
3990 | dominates the existing location for the assertion, move | |
3991 | the assertion up in the dominance tree by updating its | |
3992 | location information. */ | |
3993 | if ((e == NULL || !EDGE_CRITICAL_P (e)) | |
3994 | && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) | |
3995 | { | |
3996 | loc->bb = dest_bb; | |
3997 | loc->e = e; | |
3998 | loc->si = si; | |
3999 | return; | |
4000 | } | |
4001 | } | |
4002 | ||
4003 | /* Update the last node of the list and move to the next one. */ | |
4004 | last_loc = loc; | |
4005 | loc = loc->next; | |
4006 | } | |
4007 | ||
4008 | /* If we didn't find an assertion already registered for | |
4009 | NAME COMP_CODE VAL, add a new one at the end of the list of | |
4010 | assertions associated with NAME. */ | |
5ed6ace5 | 4011 | n = XNEW (struct assert_locus_d); |
227858d1 DN |
4012 | n->bb = dest_bb; |
4013 | n->e = e; | |
4014 | n->si = si; | |
4015 | n->comp_code = comp_code; | |
4016 | n->val = val; | |
2ab8dbf4 | 4017 | n->expr = expr; |
227858d1 DN |
4018 | n->next = NULL; |
4019 | ||
4020 | if (last_loc) | |
4021 | last_loc->next = n; | |
4022 | else | |
4023 | asserts_for[SSA_NAME_VERSION (name)] = n; | |
4024 | ||
4025 | bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); | |
4026 | } | |
4027 | ||
a26a02d7 RAE |
4028 | /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |
4029 | Extract a suitable test code and value and store them into *CODE_P and | |
4030 | *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. | |
4031 | ||
4032 | If no extraction was possible, return FALSE, otherwise return TRUE. | |
4033 | ||
4034 | If INVERT is true, then we invert the result stored into *CODE_P. */ | |
764a79ed RAE |
4035 | |
4036 | static bool | |
4037 | extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, | |
4038 | tree cond_op0, tree cond_op1, | |
4039 | bool invert, enum tree_code *code_p, | |
4040 | tree *val_p) | |
4041 | { | |
4042 | enum tree_code comp_code; | |
4043 | tree val; | |
4044 | ||
4045 | /* Otherwise, we have a comparison of the form NAME COMP VAL | |
4046 | or VAL COMP NAME. */ | |
4047 | if (name == cond_op1) | |
4048 | { | |
4049 | /* If the predicate is of the form VAL COMP NAME, flip | |
4050 | COMP around because we need to register NAME as the | |
4051 | first operand in the predicate. */ | |
4052 | comp_code = swap_tree_comparison (cond_code); | |
4053 | val = cond_op0; | |
4054 | } | |
4055 | else | |
4056 | { | |
4057 | /* The comparison is of the form NAME COMP VAL, so the | |
4058 | comparison code remains unchanged. */ | |
4059 | comp_code = cond_code; | |
4060 | val = cond_op1; | |
4061 | } | |
4062 | ||
4063 | /* Invert the comparison code as necessary. */ | |
4064 | if (invert) | |
4065 | comp_code = invert_tree_comparison (comp_code, 0); | |
4066 | ||
4067 | /* VRP does not handle float types. */ | |
4068 | if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) | |
4069 | return false; | |
4070 | ||
4071 | /* Do not register always-false predicates. | |
4072 | FIXME: this works around a limitation in fold() when dealing with | |
4073 | enumerations. Given 'enum { N1, N2 } x;', fold will not | |
4074 | fold 'if (x > N2)' to 'if (0)'. */ | |
4075 | if ((comp_code == GT_EXPR || comp_code == LT_EXPR) | |
4076 | && INTEGRAL_TYPE_P (TREE_TYPE (val))) | |
4077 | { | |
4078 | tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); | |
4079 | tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); | |
4080 | ||
4081 | if (comp_code == GT_EXPR | |
4082 | && (!max | |
4083 | || compare_values (val, max) == 0)) | |
4084 | return false; | |
4085 | ||
4086 | if (comp_code == LT_EXPR | |
4087 | && (!min | |
4088 | || compare_values (val, min) == 0)) | |
4089 | return false; | |
4090 | } | |
4091 | *code_p = comp_code; | |
4092 | *val_p = val; | |
4093 | return true; | |
4094 | } | |
279f3eb5 | 4095 | |
2ab8dbf4 RG |
4096 | /* Try to register an edge assertion for SSA name NAME on edge E for |
4097 | the condition COND contributing to the conditional jump pointed to by BSI. | |
4098 | Invert the condition COND if INVERT is true. | |
4099 | Return true if an assertion for NAME could be registered. */ | |
4100 | ||
4101 | static bool | |
726a989a | 4102 | register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, |
a26a02d7 RAE |
4103 | enum tree_code cond_code, |
4104 | tree cond_op0, tree cond_op1, bool invert) | |
2ab8dbf4 RG |
4105 | { |
4106 | tree val; | |
4107 | enum tree_code comp_code; | |
4108 | bool retval = false; | |
4109 | ||
a26a02d7 RAE |
4110 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
4111 | cond_op0, | |
4112 | cond_op1, | |
4113 | invert, &comp_code, &val)) | |
2ab8dbf4 RG |
4114 | return false; |
4115 | ||
4116 | /* Only register an ASSERT_EXPR if NAME was found in the sub-graph | |
4117 | reachable from E. */ | |
c4ab2baa | 4118 | if (live_on_edge (e, name) |
2ab8dbf4 RG |
4119 | && !has_single_use (name)) |
4120 | { | |
4121 | register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); | |
4122 | retval = true; | |
4123 | } | |
4124 | ||
4125 | /* In the case of NAME <= CST and NAME being defined as | |
4126 | NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 | |
4127 | and NAME2 <= CST - CST2. We can do the same for NAME > CST. | |
4128 | This catches range and anti-range tests. */ | |
4129 | if ((comp_code == LE_EXPR | |
4130 | || comp_code == GT_EXPR) | |
4131 | && TREE_CODE (val) == INTEGER_CST | |
4132 | && TYPE_UNSIGNED (TREE_TYPE (val))) | |
4133 | { | |
726a989a | 4134 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
70b7b037 | 4135 | tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |
2ab8dbf4 RG |
4136 | |
4137 | /* Extract CST2 from the (optional) addition. */ | |
726a989a RB |
4138 | if (is_gimple_assign (def_stmt) |
4139 | && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) | |
2ab8dbf4 | 4140 | { |
726a989a RB |
4141 | name2 = gimple_assign_rhs1 (def_stmt); |
4142 | cst2 = gimple_assign_rhs2 (def_stmt); | |
2ab8dbf4 RG |
4143 | if (TREE_CODE (name2) == SSA_NAME |
4144 | && TREE_CODE (cst2) == INTEGER_CST) | |
4145 | def_stmt = SSA_NAME_DEF_STMT (name2); | |
4146 | } | |
4147 | ||
70b7b037 | 4148 | /* Extract NAME2 from the (optional) sign-changing cast. */ |
726a989a | 4149 | if (gimple_assign_cast_p (def_stmt)) |
70b7b037 | 4150 | { |
1a87cf0c | 4151 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) |
726a989a RB |
4152 | && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |
4153 | && (TYPE_PRECISION (gimple_expr_type (def_stmt)) | |
4154 | == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) | |
4155 | name3 = gimple_assign_rhs1 (def_stmt); | |
70b7b037 | 4156 | } |
2ab8dbf4 | 4157 | |
70b7b037 RG |
4158 | /* If name3 is used later, create an ASSERT_EXPR for it. */ |
4159 | if (name3 != NULL_TREE | |
4160 | && TREE_CODE (name3) == SSA_NAME | |
2ab8dbf4 RG |
4161 | && (cst2 == NULL_TREE |
4162 | || TREE_CODE (cst2) == INTEGER_CST) | |
70b7b037 | 4163 | && INTEGRAL_TYPE_P (TREE_TYPE (name3)) |
c4ab2baa | 4164 | && live_on_edge (e, name3) |
70b7b037 RG |
4165 | && !has_single_use (name3)) |
4166 | { | |
4167 | tree tmp; | |
4168 | ||
4169 | /* Build an expression for the range test. */ | |
4170 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); | |
4171 | if (cst2 != NULL_TREE) | |
4172 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
4173 | ||
4174 | if (dump_file) | |
4175 | { | |
4176 | fprintf (dump_file, "Adding assert for "); | |
4177 | print_generic_expr (dump_file, name3, 0); | |
4178 | fprintf (dump_file, " from "); | |
4179 | print_generic_expr (dump_file, tmp, 0); | |
4180 | fprintf (dump_file, "\n"); | |
4181 | } | |
4182 | ||
4183 | register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); | |
4184 | ||
4185 | retval = true; | |
4186 | } | |
4187 | ||
4188 | /* If name2 is used later, create an ASSERT_EXPR for it. */ | |
4189 | if (name2 != NULL_TREE | |
4190 | && TREE_CODE (name2) == SSA_NAME | |
4191 | && TREE_CODE (cst2) == INTEGER_CST | |
4192 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
c4ab2baa | 4193 | && live_on_edge (e, name2) |
2ab8dbf4 RG |
4194 | && !has_single_use (name2)) |
4195 | { | |
4196 | tree tmp; | |
4197 | ||
4198 | /* Build an expression for the range test. */ | |
4199 | tmp = name2; | |
4200 | if (TREE_TYPE (name) != TREE_TYPE (name2)) | |
4201 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); | |
4202 | if (cst2 != NULL_TREE) | |
4203 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
4204 | ||
4205 | if (dump_file) | |
4206 | { | |
4207 | fprintf (dump_file, "Adding assert for "); | |
4208 | print_generic_expr (dump_file, name2, 0); | |
4209 | fprintf (dump_file, " from "); | |
4210 | print_generic_expr (dump_file, tmp, 0); | |
4211 | fprintf (dump_file, "\n"); | |
4212 | } | |
4213 | ||
4214 | register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); | |
4215 | ||
4216 | retval = true; | |
4217 | } | |
4218 | } | |
4219 | ||
4220 | return retval; | |
4221 | } | |
4222 | ||
279f3eb5 JL |
4223 | /* OP is an operand of a truth value expression which is known to have |
4224 | a particular value. Register any asserts for OP and for any | |
4225 | operands in OP's defining statement. | |
4226 | ||
4227 | If CODE is EQ_EXPR, then we want to register OP is zero (false), | |
4228 | if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ | |
4229 | ||
4230 | static bool | |
4231 | register_edge_assert_for_1 (tree op, enum tree_code code, | |
726a989a | 4232 | edge e, gimple_stmt_iterator bsi) |
279f3eb5 | 4233 | { |
34fc5065 | 4234 | bool retval = false; |
726a989a RB |
4235 | gimple op_def; |
4236 | tree val; | |
a26a02d7 | 4237 | enum tree_code rhs_code; |
227858d1 | 4238 | |
279f3eb5 JL |
4239 | /* We only care about SSA_NAMEs. */ |
4240 | if (TREE_CODE (op) != SSA_NAME) | |
227858d1 DN |
4241 | return false; |
4242 | ||
279f3eb5 JL |
4243 | /* We know that OP will have a zero or nonzero value. If OP is used |
4244 | more than once go ahead and register an assert for OP. | |
4245 | ||
4246 | The FOUND_IN_SUBGRAPH support is not helpful in this situation as | |
4247 | it will always be set for OP (because OP is used in a COND_EXPR in | |
4248 | the subgraph). */ | |
4249 | if (!has_single_use (op)) | |
4250 | { | |
4251 | val = build_int_cst (TREE_TYPE (op), 0); | |
2ab8dbf4 | 4252 | register_new_assert_for (op, op, code, val, NULL, e, bsi); |
279f3eb5 JL |
4253 | retval = true; |
4254 | } | |
4255 | ||
4256 | /* Now look at how OP is set. If it's set from a comparison, | |
4257 | a truth operation or some bit operations, then we may be able | |
4258 | to register information about the operands of that assignment. */ | |
4259 | op_def = SSA_NAME_DEF_STMT (op); | |
726a989a | 4260 | if (gimple_code (op_def) != GIMPLE_ASSIGN) |
279f3eb5 JL |
4261 | return retval; |
4262 | ||
726a989a | 4263 | rhs_code = gimple_assign_rhs_code (op_def); |
279f3eb5 | 4264 | |
726a989a | 4265 | if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) |
227858d1 | 4266 | { |
34fc5065 | 4267 | bool invert = (code == EQ_EXPR ? true : false); |
726a989a RB |
4268 | tree op0 = gimple_assign_rhs1 (op_def); |
4269 | tree op1 = gimple_assign_rhs2 (op_def); | |
227858d1 | 4270 | |
2ab8dbf4 | 4271 | if (TREE_CODE (op0) == SSA_NAME) |
a26a02d7 RAE |
4272 | retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, |
4273 | invert); | |
2ab8dbf4 | 4274 | if (TREE_CODE (op1) == SSA_NAME) |
a26a02d7 RAE |
4275 | retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, |
4276 | invert); | |
279f3eb5 JL |
4277 | } |
4278 | else if ((code == NE_EXPR | |
726a989a RB |
4279 | && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR |
4280 | || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)) | |
279f3eb5 | 4281 | || (code == EQ_EXPR |
726a989a RB |
4282 | && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR |
4283 | || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))) | |
279f3eb5 JL |
4284 | { |
4285 | /* Recurse on each operand. */ | |
726a989a | 4286 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
279f3eb5 | 4287 | code, e, bsi); |
726a989a | 4288 | retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def), |
279f3eb5 JL |
4289 | code, e, bsi); |
4290 | } | |
726a989a | 4291 | else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR) |
279f3eb5 | 4292 | { |
34fc5065 RG |
4293 | /* Recurse, flipping CODE. */ |
4294 | code = invert_tree_comparison (code, false); | |
726a989a | 4295 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
34fc5065 | 4296 | code, e, bsi); |
279f3eb5 | 4297 | } |
726a989a | 4298 | else if (gimple_assign_rhs_code (op_def) == SSA_NAME) |
279f3eb5 | 4299 | { |
34fc5065 | 4300 | /* Recurse through the copy. */ |
726a989a RB |
4301 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
4302 | code, e, bsi); | |
279f3eb5 | 4303 | } |
1a87cf0c | 4304 | else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) |
279f3eb5 | 4305 | { |
34fc5065 | 4306 | /* Recurse through the type conversion. */ |
726a989a | 4307 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
279f3eb5 JL |
4308 | code, e, bsi); |
4309 | } | |
227858d1 | 4310 | |
279f3eb5 JL |
4311 | return retval; |
4312 | } | |
da11c5d2 | 4313 | |
279f3eb5 JL |
4314 | /* Try to register an edge assertion for SSA name NAME on edge E for |
4315 | the condition COND contributing to the conditional jump pointed to by SI. | |
4316 | Return true if an assertion for NAME could be registered. */ | |
da11c5d2 | 4317 | |
279f3eb5 | 4318 | static bool |
726a989a | 4319 | register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, |
a26a02d7 RAE |
4320 | enum tree_code cond_code, tree cond_op0, |
4321 | tree cond_op1) | |
279f3eb5 JL |
4322 | { |
4323 | tree val; | |
4324 | enum tree_code comp_code; | |
4325 | bool retval = false; | |
4326 | bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; | |
4327 | ||
4328 | /* Do not attempt to infer anything in names that flow through | |
4329 | abnormal edges. */ | |
4330 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) | |
4331 | return false; | |
4332 | ||
a26a02d7 RAE |
4333 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
4334 | cond_op0, cond_op1, | |
4335 | is_else_edge, | |
4336 | &comp_code, &val)) | |
279f3eb5 JL |
4337 | return false; |
4338 | ||
2ab8dbf4 | 4339 | /* Register ASSERT_EXPRs for name. */ |
a26a02d7 RAE |
4340 | retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, |
4341 | cond_op1, is_else_edge); | |
2ab8dbf4 | 4342 | |
279f3eb5 JL |
4343 | |
4344 | /* If COND is effectively an equality test of an SSA_NAME against | |
4345 | the value zero or one, then we may be able to assert values | |
4346 | for SSA_NAMEs which flow into COND. */ | |
4347 | ||
4348 | /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining | |
4349 | statement of NAME we can assert both operands of the TRUTH_AND_EXPR | |
2f8e468b | 4350 | have nonzero value. */ |
279f3eb5 JL |
4351 | if (((comp_code == EQ_EXPR && integer_onep (val)) |
4352 | || (comp_code == NE_EXPR && integer_zerop (val)))) | |
4353 | { | |
726a989a | 4354 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
279f3eb5 | 4355 | |
726a989a RB |
4356 | if (is_gimple_assign (def_stmt) |
4357 | && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR | |
4358 | || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)) | |
279f3eb5 | 4359 | { |
726a989a RB |
4360 | tree op0 = gimple_assign_rhs1 (def_stmt); |
4361 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
279f3eb5 JL |
4362 | retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si); |
4363 | retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si); | |
227858d1 DN |
4364 | } |
4365 | } | |
279f3eb5 JL |
4366 | |
4367 | /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining | |
4368 | statement of NAME we can assert both operands of the TRUTH_OR_EXPR | |
4369 | have zero value. */ | |
4370 | if (((comp_code == EQ_EXPR && integer_zerop (val)) | |
4371 | || (comp_code == NE_EXPR && integer_onep (val)))) | |
227858d1 | 4372 | { |
726a989a | 4373 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
279f3eb5 | 4374 | |
726a989a RB |
4375 | if (is_gimple_assign (def_stmt) |
4376 | && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR | |
e09deb14 RG |
4377 | /* For BIT_IOR_EXPR only if NAME == 0 both operands have |
4378 | necessarily zero value. */ | |
4379 | || (comp_code == EQ_EXPR | |
726a989a | 4380 | && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)))) |
279f3eb5 | 4381 | { |
726a989a RB |
4382 | tree op0 = gimple_assign_rhs1 (def_stmt); |
4383 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
279f3eb5 JL |
4384 | retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si); |
4385 | retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si); | |
4386 | } | |
227858d1 DN |
4387 | } |
4388 | ||
279f3eb5 | 4389 | return retval; |
227858d1 DN |
4390 | } |
4391 | ||
4392 | ||
227858d1 | 4393 | /* Determine whether the outgoing edges of BB should receive an |
279f3eb5 | 4394 | ASSERT_EXPR for each of the operands of BB's LAST statement. |
9bb6aa43 | 4395 | The last statement of BB must be a COND_EXPR. |
227858d1 DN |
4396 | |
4397 | If any of the sub-graphs rooted at BB have an interesting use of | |
4398 | the predicate operands, an assert location node is added to the | |
4399 | list of assertions for the corresponding operands. */ | |
4400 | ||
4401 | static bool | |
726a989a | 4402 | find_conditional_asserts (basic_block bb, gimple last) |
227858d1 DN |
4403 | { |
4404 | bool need_assert; | |
726a989a | 4405 | gimple_stmt_iterator bsi; |
279f3eb5 | 4406 | tree op; |
227858d1 DN |
4407 | edge_iterator ei; |
4408 | edge e; | |
4409 | ssa_op_iter iter; | |
4410 | ||
4411 | need_assert = false; | |
726a989a | 4412 | bsi = gsi_for_stmt (last); |
227858d1 DN |
4413 | |
4414 | /* Look for uses of the operands in each of the sub-graphs | |
4415 | rooted at BB. We need to check each of the outgoing edges | |
4416 | separately, so that we know what kind of ASSERT_EXPR to | |
4417 | insert. */ | |
4418 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4419 | { | |
4420 | if (e->dest == bb) | |
4421 | continue; | |
4422 | ||
227858d1 DN |
4423 | /* Register the necessary assertions for each operand in the |
4424 | conditional predicate. */ | |
4425 | FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) | |
a26a02d7 | 4426 | { |
726a989a RB |
4427 | need_assert |= register_edge_assert_for (op, e, bsi, |
4428 | gimple_cond_code (last), | |
4429 | gimple_cond_lhs (last), | |
4430 | gimple_cond_rhs (last)); | |
a26a02d7 | 4431 | } |
227858d1 DN |
4432 | } |
4433 | ||
227858d1 DN |
4434 | return need_assert; |
4435 | } | |
4436 | ||
9bb6aa43 RG |
4437 | /* Compare two case labels sorting first by the destination label uid |
4438 | and then by the case value. */ | |
4439 | ||
4440 | static int | |
4441 | compare_case_labels (const void *p1, const void *p2) | |
4442 | { | |
741ac903 KG |
4443 | const_tree const case1 = *(const_tree const*)p1; |
4444 | const_tree const case2 = *(const_tree const*)p2; | |
9bb6aa43 RG |
4445 | unsigned int uid1 = DECL_UID (CASE_LABEL (case1)); |
4446 | unsigned int uid2 = DECL_UID (CASE_LABEL (case2)); | |
4447 | ||
4448 | if (uid1 < uid2) | |
4449 | return -1; | |
4450 | else if (uid1 == uid2) | |
4451 | { | |
4452 | /* Make sure the default label is first in a group. */ | |
4453 | if (!CASE_LOW (case1)) | |
4454 | return -1; | |
4455 | else if (!CASE_LOW (case2)) | |
4456 | return 1; | |
4457 | else | |
4458 | return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); | |
4459 | } | |
4460 | else | |
4461 | return 1; | |
4462 | } | |
4463 | ||
4464 | /* Determine whether the outgoing edges of BB should receive an | |
4465 | ASSERT_EXPR for each of the operands of BB's LAST statement. | |
4466 | The last statement of BB must be a SWITCH_EXPR. | |
4467 | ||
4468 | If any of the sub-graphs rooted at BB have an interesting use of | |
4469 | the predicate operands, an assert location node is added to the | |
4470 | list of assertions for the corresponding operands. */ | |
4471 | ||
4472 | static bool | |
726a989a | 4473 | find_switch_asserts (basic_block bb, gimple last) |
9bb6aa43 RG |
4474 | { |
4475 | bool need_assert; | |
726a989a | 4476 | gimple_stmt_iterator bsi; |
a26a02d7 | 4477 | tree op; |
9bb6aa43 | 4478 | edge e; |
726a989a RB |
4479 | tree vec2; |
4480 | size_t n = gimple_switch_num_labels(last); | |
109e637b | 4481 | #if GCC_VERSION >= 4000 |
9bb6aa43 | 4482 | unsigned int idx; |
109e637b JM |
4483 | #else |
4484 | /* Work around GCC 3.4 bug (PR 37086). */ | |
4485 | volatile unsigned int idx; | |
4486 | #endif | |
9bb6aa43 RG |
4487 | |
4488 | need_assert = false; | |
726a989a RB |
4489 | bsi = gsi_for_stmt (last); |
4490 | op = gimple_switch_index (last); | |
9bb6aa43 RG |
4491 | if (TREE_CODE (op) != SSA_NAME) |
4492 | return false; | |
4493 | ||
4494 | /* Build a vector of case labels sorted by destination label. */ | |
4495 | vec2 = make_tree_vec (n); | |
4496 | for (idx = 0; idx < n; ++idx) | |
726a989a | 4497 | TREE_VEC_ELT (vec2, idx) = gimple_switch_label (last, idx); |
9bb6aa43 RG |
4498 | qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels); |
4499 | ||
4500 | for (idx = 0; idx < n; ++idx) | |
4501 | { | |
4502 | tree min, max; | |
4503 | tree cl = TREE_VEC_ELT (vec2, idx); | |
4504 | ||
4505 | min = CASE_LOW (cl); | |
4506 | max = CASE_HIGH (cl); | |
4507 | ||
4508 | /* If there are multiple case labels with the same destination | |
4509 | we need to combine them to a single value range for the edge. */ | |
4510 | if (idx + 1 < n | |
4511 | && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1))) | |
4512 | { | |
4513 | /* Skip labels until the last of the group. */ | |
4514 | do { | |
4515 | ++idx; | |
4516 | } while (idx < n | |
4517 | && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx))); | |
4518 | --idx; | |
4519 | ||
4520 | /* Pick up the maximum of the case label range. */ | |
4521 | if (CASE_HIGH (TREE_VEC_ELT (vec2, idx))) | |
4522 | max = CASE_HIGH (TREE_VEC_ELT (vec2, idx)); | |
4523 | else | |
4524 | max = CASE_LOW (TREE_VEC_ELT (vec2, idx)); | |
4525 | } | |
4526 | ||
4527 | /* Nothing to do if the range includes the default label until we | |
4528 | can register anti-ranges. */ | |
4529 | if (min == NULL_TREE) | |
4530 | continue; | |
4531 | ||
4532 | /* Find the edge to register the assert expr on. */ | |
4533 | e = find_edge (bb, label_to_block (CASE_LABEL (cl))); | |
4534 | ||
9bb6aa43 RG |
4535 | /* Register the necessary assertions for the operand in the |
4536 | SWITCH_EXPR. */ | |
a26a02d7 RAE |
4537 | need_assert |= register_edge_assert_for (op, e, bsi, |
4538 | max ? GE_EXPR : EQ_EXPR, | |
4539 | op, | |
4540 | fold_convert (TREE_TYPE (op), | |
4541 | min)); | |
9bb6aa43 RG |
4542 | if (max) |
4543 | { | |
a26a02d7 RAE |
4544 | need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR, |
4545 | op, | |
4546 | fold_convert (TREE_TYPE (op), | |
4547 | max)); | |
9bb6aa43 RG |
4548 | } |
4549 | } | |
4550 | ||
9bb6aa43 RG |
4551 | return need_assert; |
4552 | } | |
4553 | ||
227858d1 DN |
4554 | |
4555 | /* Traverse all the statements in block BB looking for statements that | |
4556 | may generate useful assertions for the SSA names in their operand. | |
4557 | If a statement produces a useful assertion A for name N_i, then the | |
4558 | list of assertions already generated for N_i is scanned to | |
4559 | determine if A is actually needed. | |
4560 | ||
4561 | If N_i already had the assertion A at a location dominating the | |
4562 | current location, then nothing needs to be done. Otherwise, the | |
4563 | new location for A is recorded instead. | |
4564 | ||
4565 | 1- For every statement S in BB, all the variables used by S are | |
4566 | added to bitmap FOUND_IN_SUBGRAPH. | |
4567 | ||
4568 | 2- If statement S uses an operand N in a way that exposes a known | |
4569 | value range for N, then if N was not already generated by an | |
4570 | ASSERT_EXPR, create a new assert location for N. For instance, | |
4571 | if N is a pointer and the statement dereferences it, we can | |
4572 | assume that N is not NULL. | |
4573 | ||
4574 | 3- COND_EXPRs are a special case of #2. We can derive range | |
4575 | information from the predicate but need to insert different | |
4576 | ASSERT_EXPRs for each of the sub-graphs rooted at the | |
4577 | conditional block. If the last statement of BB is a conditional | |
4578 | expression of the form 'X op Y', then | |
4579 | ||
4580 | a) Remove X and Y from the set FOUND_IN_SUBGRAPH. | |
4581 | ||
4582 | b) If the conditional is the only entry point to the sub-graph | |
4583 | corresponding to the THEN_CLAUSE, recurse into it. On | |
4584 | return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then | |
4585 | an ASSERT_EXPR is added for the corresponding variable. | |
4586 | ||
4587 | c) Repeat step (b) on the ELSE_CLAUSE. | |
4588 | ||
4589 | d) Mark X and Y in FOUND_IN_SUBGRAPH. | |
4590 | ||
4591 | For instance, | |
4592 | ||
4593 | if (a == 9) | |
4594 | b = a; | |
4595 | else | |
4596 | b = c + 1; | |
4597 | ||
4598 | In this case, an assertion on the THEN clause is useful to | |
4599 | determine that 'a' is always 9 on that edge. However, an assertion | |
4600 | on the ELSE clause would be unnecessary. | |
4601 | ||
4602 | 4- If BB does not end in a conditional expression, then we recurse | |
4603 | into BB's dominator children. | |
4604 | ||
4605 | At the end of the recursive traversal, every SSA name will have a | |
4606 | list of locations where ASSERT_EXPRs should be added. When a new | |
4607 | location for name N is found, it is registered by calling | |
4608 | register_new_assert_for. That function keeps track of all the | |
4609 | registered assertions to prevent adding unnecessary assertions. | |
4610 | For instance, if a pointer P_4 is dereferenced more than once in a | |
4611 | dominator tree, only the location dominating all the dereference of | |
4612 | P_4 will receive an ASSERT_EXPR. | |
4613 | ||
4614 | If this function returns true, then it means that there are names | |
4615 | for which we need to generate ASSERT_EXPRs. Those assertions are | |
9bb6aa43 | 4616 | inserted by process_assert_insertions. */ |
227858d1 DN |
4617 | |
4618 | static bool | |
c4ab2baa | 4619 | find_assert_locations_1 (basic_block bb, sbitmap live) |
227858d1 | 4620 | { |
726a989a RB |
4621 | gimple_stmt_iterator si; |
4622 | gimple last; | |
4623 | gimple phi; | |
227858d1 | 4624 | bool need_assert; |
227858d1 DN |
4625 | |
4626 | need_assert = false; | |
c4ab2baa | 4627 | last = last_stmt (bb); |
227858d1 | 4628 | |
c4ab2baa RG |
4629 | /* If BB's last statement is a conditional statement involving integer |
4630 | operands, determine if we need to add ASSERT_EXPRs. */ | |
4631 | if (last | |
4632 | && gimple_code (last) == GIMPLE_COND | |
4633 | && !fp_predicate (last) | |
4634 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
4635 | need_assert |= find_conditional_asserts (bb, last); | |
227858d1 | 4636 | |
c4ab2baa RG |
4637 | /* If BB's last statement is a switch statement involving integer |
4638 | operands, determine if we need to add ASSERT_EXPRs. */ | |
4639 | if (last | |
4640 | && gimple_code (last) == GIMPLE_SWITCH | |
4641 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
4642 | need_assert |= find_switch_asserts (bb, last); | |
227858d1 DN |
4643 | |
4644 | /* Traverse all the statements in BB marking used names and looking | |
4645 | for statements that may infer assertions for their used operands. */ | |
726a989a | 4646 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
227858d1 | 4647 | { |
726a989a RB |
4648 | gimple stmt; |
4649 | tree op; | |
227858d1 DN |
4650 | ssa_op_iter i; |
4651 | ||
726a989a | 4652 | stmt = gsi_stmt (si); |
227858d1 DN |
4653 | |
4654 | /* See if we can derive an assertion for any of STMT's operands. */ | |
4655 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
4656 | { | |
4657 | tree value; | |
4658 | enum tree_code comp_code; | |
4659 | ||
c4ab2baa RG |
4660 | /* Mark OP in our live bitmap. */ |
4661 | SET_BIT (live, SSA_NAME_VERSION (op)); | |
227858d1 | 4662 | |
227858d1 DN |
4663 | /* If OP is used in such a way that we can infer a value |
4664 | range for it, and we don't find a previous assertion for | |
4665 | it, create a new assertion location node for OP. */ | |
4666 | if (infer_value_range (stmt, op, &comp_code, &value)) | |
4667 | { | |
917f1b7e | 4668 | /* If we are able to infer a nonzero value range for OP, |
60c9ad46 JL |
4669 | then walk backwards through the use-def chain to see if OP |
4670 | was set via a typecast. | |
4671 | ||
4672 | If so, then we can also infer a nonzero value range | |
4673 | for the operand of the NOP_EXPR. */ | |
4674 | if (comp_code == NE_EXPR && integer_zerop (value)) | |
4675 | { | |
4676 | tree t = op; | |
726a989a | 4677 | gimple def_stmt = SSA_NAME_DEF_STMT (t); |
60c9ad46 | 4678 | |
726a989a RB |
4679 | while (is_gimple_assign (def_stmt) |
4680 | && gimple_assign_rhs_code (def_stmt) == NOP_EXPR | |
07beea0d | 4681 | && TREE_CODE |
726a989a | 4682 | (gimple_assign_rhs1 (def_stmt)) == SSA_NAME |
07beea0d | 4683 | && POINTER_TYPE_P |
726a989a | 4684 | (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) |
60c9ad46 | 4685 | { |
726a989a | 4686 | t = gimple_assign_rhs1 (def_stmt); |
60c9ad46 JL |
4687 | def_stmt = SSA_NAME_DEF_STMT (t); |
4688 | ||
4689 | /* Note we want to register the assert for the | |
4690 | operand of the NOP_EXPR after SI, not after the | |
4691 | conversion. */ | |
4692 | if (! has_single_use (t)) | |
4693 | { | |
2ab8dbf4 | 4694 | register_new_assert_for (t, t, comp_code, value, |
60c9ad46 JL |
4695 | bb, NULL, si); |
4696 | need_assert = true; | |
4697 | } | |
4698 | } | |
4699 | } | |
4700 | ||
4701 | /* If OP is used only once, namely in this STMT, don't | |
4702 | bother creating an ASSERT_EXPR for it. Such an | |
4703 | ASSERT_EXPR would do nothing but increase compile time. */ | |
4704 | if (!has_single_use (op)) | |
4705 | { | |
2ab8dbf4 RG |
4706 | register_new_assert_for (op, op, comp_code, value, |
4707 | bb, NULL, si); | |
60c9ad46 JL |
4708 | need_assert = true; |
4709 | } | |
0bca51f0 DN |
4710 | } |
4711 | } | |
0bca51f0 DN |
4712 | } |
4713 | ||
c4ab2baa RG |
4714 | /* Traverse all PHI nodes in BB marking used operands. */ |
4715 | for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si)) | |
4716 | { | |
4717 | use_operand_p arg_p; | |
4718 | ssa_op_iter i; | |
4719 | phi = gsi_stmt (si); | |
9bb6aa43 | 4720 | |
c4ab2baa RG |
4721 | FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) |
4722 | { | |
4723 | tree arg = USE_FROM_PTR (arg_p); | |
4724 | if (TREE_CODE (arg) == SSA_NAME) | |
4725 | SET_BIT (live, SSA_NAME_VERSION (arg)); | |
4726 | } | |
4727 | } | |
227858d1 DN |
4728 | |
4729 | return need_assert; | |
4730 | } | |
4731 | ||
c4ab2baa RG |
4732 | /* Do an RPO walk over the function computing SSA name liveness |
4733 | on-the-fly and deciding on assert expressions to insert. | |
4734 | Returns true if there are assert expressions to be inserted. */ | |
4735 | ||
4736 | static bool | |
4737 | find_assert_locations (void) | |
4738 | { | |
4739 | int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); | |
4740 | int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); | |
4741 | int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); | |
4742 | int rpo_cnt, i; | |
4743 | bool need_asserts; | |
4744 | ||
4745 | live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS); | |
4746 | rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); | |
4747 | for (i = 0; i < rpo_cnt; ++i) | |
4748 | bb_rpo[rpo[i]] = i; | |
4749 | ||
4750 | need_asserts = false; | |
4751 | for (i = rpo_cnt-1; i >= 0; --i) | |
4752 | { | |
4753 | basic_block bb = BASIC_BLOCK (rpo[i]); | |
4754 | edge e; | |
4755 | edge_iterator ei; | |
4756 | ||
4757 | if (!live[rpo[i]]) | |
4758 | { | |
4759 | live[rpo[i]] = sbitmap_alloc (num_ssa_names); | |
4760 | sbitmap_zero (live[rpo[i]]); | |
4761 | } | |
4762 | ||
4763 | /* Process BB and update the live information with uses in | |
4764 | this block. */ | |
4765 | need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]); | |
4766 | ||
4767 | /* Merge liveness into the predecessor blocks and free it. */ | |
4768 | if (!sbitmap_empty_p (live[rpo[i]])) | |
4769 | { | |
4770 | int pred_rpo = i; | |
4771 | FOR_EACH_EDGE (e, ei, bb->preds) | |
4772 | { | |
4773 | int pred = e->src->index; | |
4774 | if (e->flags & EDGE_DFS_BACK) | |
4775 | continue; | |
4776 | ||
4777 | if (!live[pred]) | |
4778 | { | |
4779 | live[pred] = sbitmap_alloc (num_ssa_names); | |
4780 | sbitmap_zero (live[pred]); | |
4781 | } | |
4782 | sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]); | |
4783 | ||
4784 | if (bb_rpo[pred] < pred_rpo) | |
4785 | pred_rpo = bb_rpo[pred]; | |
4786 | } | |
4787 | ||
4788 | /* Record the RPO number of the last visited block that needs | |
4789 | live information from this block. */ | |
4790 | last_rpo[rpo[i]] = pred_rpo; | |
4791 | } | |
4792 | else | |
4793 | { | |
4794 | sbitmap_free (live[rpo[i]]); | |
4795 | live[rpo[i]] = NULL; | |
4796 | } | |
4797 | ||
4798 | /* We can free all successors live bitmaps if all their | |
4799 | predecessors have been visited already. */ | |
4800 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4801 | if (last_rpo[e->dest->index] == i | |
4802 | && live[e->dest->index]) | |
4803 | { | |
4804 | sbitmap_free (live[e->dest->index]); | |
4805 | live[e->dest->index] = NULL; | |
4806 | } | |
4807 | } | |
4808 | ||
4809 | XDELETEVEC (rpo); | |
4810 | XDELETEVEC (bb_rpo); | |
4811 | XDELETEVEC (last_rpo); | |
4812 | for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i) | |
4813 | if (live[i]) | |
4814 | sbitmap_free (live[i]); | |
4815 | XDELETEVEC (live); | |
4816 | ||
4817 | return need_asserts; | |
4818 | } | |
227858d1 DN |
4819 | |
4820 | /* Create an ASSERT_EXPR for NAME and insert it in the location | |
4821 | indicated by LOC. Return true if we made any edge insertions. */ | |
4822 | ||
4823 | static bool | |
4824 | process_assert_insertions_for (tree name, assert_locus_t loc) | |
4825 | { | |
4826 | /* Build the comparison expression NAME_i COMP_CODE VAL. */ | |
726a989a RB |
4827 | gimple stmt; |
4828 | tree cond; | |
4829 | gimple assert_stmt; | |
227858d1 DN |
4830 | edge_iterator ei; |
4831 | edge e; | |
4832 | ||
2ab8dbf4 | 4833 | cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); |
726a989a | 4834 | assert_stmt = build_assert_expr_for (cond, name); |
227858d1 | 4835 | if (loc->e) |
0bca51f0 | 4836 | { |
227858d1 DN |
4837 | /* We have been asked to insert the assertion on an edge. This |
4838 | is used only by COND_EXPR and SWITCH_EXPR assertions. */ | |
4839 | #if defined ENABLE_CHECKING | |
726a989a RB |
4840 | gcc_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND |
4841 | || gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH); | |
227858d1 | 4842 | #endif |
0bca51f0 | 4843 | |
726a989a | 4844 | gsi_insert_on_edge (loc->e, assert_stmt); |
227858d1 DN |
4845 | return true; |
4846 | } | |
9fabf0d4 | 4847 | |
227858d1 DN |
4848 | /* Otherwise, we can insert right after LOC->SI iff the |
4849 | statement must not be the last statement in the block. */ | |
726a989a | 4850 | stmt = gsi_stmt (loc->si); |
227858d1 DN |
4851 | if (!stmt_ends_bb_p (stmt)) |
4852 | { | |
726a989a | 4853 | gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); |
227858d1 DN |
4854 | return false; |
4855 | } | |
9fabf0d4 | 4856 | |
227858d1 DN |
4857 | /* If STMT must be the last statement in BB, we can only insert new |
4858 | assertions on the non-abnormal edge out of BB. Note that since | |
4859 | STMT is not control flow, there may only be one non-abnormal edge | |
4860 | out of BB. */ | |
4861 | FOR_EACH_EDGE (e, ei, loc->bb->succs) | |
4862 | if (!(e->flags & EDGE_ABNORMAL)) | |
4863 | { | |
726a989a | 4864 | gsi_insert_on_edge (e, assert_stmt); |
227858d1 DN |
4865 | return true; |
4866 | } | |
0bca51f0 | 4867 | |
227858d1 DN |
4868 | gcc_unreachable (); |
4869 | } | |
0bca51f0 | 4870 | |
0bca51f0 | 4871 | |
227858d1 DN |
4872 | /* Process all the insertions registered for every name N_i registered |
4873 | in NEED_ASSERT_FOR. The list of assertions to be inserted are | |
4874 | found in ASSERTS_FOR[i]. */ | |
0bca51f0 | 4875 | |
227858d1 DN |
4876 | static void |
4877 | process_assert_insertions (void) | |
4878 | { | |
4879 | unsigned i; | |
4880 | bitmap_iterator bi; | |
4881 | bool update_edges_p = false; | |
4882 | int num_asserts = 0; | |
0bca51f0 | 4883 | |
227858d1 DN |
4884 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4885 | dump_all_asserts (dump_file); | |
60b4ccde | 4886 | |
227858d1 DN |
4887 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |
4888 | { | |
4889 | assert_locus_t loc = asserts_for[i]; | |
4890 | gcc_assert (loc); | |
4891 | ||
4892 | while (loc) | |
60b4ccde | 4893 | { |
227858d1 DN |
4894 | assert_locus_t next = loc->next; |
4895 | update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); | |
4896 | free (loc); | |
4897 | loc = next; | |
4898 | num_asserts++; | |
60b4ccde | 4899 | } |
0bca51f0 | 4900 | } |
0bca51f0 | 4901 | |
227858d1 | 4902 | if (update_edges_p) |
726a989a | 4903 | gsi_commit_edge_inserts (); |
0bca51f0 | 4904 | |
01902653 RG |
4905 | statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", |
4906 | num_asserts); | |
0bca51f0 DN |
4907 | } |
4908 | ||
4909 | ||
4910 | /* Traverse the flowgraph looking for conditional jumps to insert range | |
4911 | expressions. These range expressions are meant to provide information | |
4912 | to optimizations that need to reason in terms of value ranges. They | |
4913 | will not be expanded into RTL. For instance, given: | |
4914 | ||
4915 | x = ... | |
4916 | y = ... | |
4917 | if (x < y) | |
4918 | y = x - 2; | |
4919 | else | |
4920 | x = y + 3; | |
4921 | ||
4922 | this pass will transform the code into: | |
4923 | ||
4924 | x = ... | |
4925 | y = ... | |
4926 | if (x < y) | |
4927 | { | |
4928 | x = ASSERT_EXPR <x, x < y> | |
4929 | y = x - 2 | |
4930 | } | |
4931 | else | |
4932 | { | |
4933 | y = ASSERT_EXPR <y, x <= y> | |
4934 | x = y + 3 | |
4935 | } | |
4936 | ||
4937 | The idea is that once copy and constant propagation have run, other | |
4938 | optimizations will be able to determine what ranges of values can 'x' | |
4939 | take in different paths of the code, simply by checking the reaching | |
4940 | definition of 'x'. */ | |
4941 | ||
4942 | static void | |
4943 | insert_range_assertions (void) | |
4944 | { | |
227858d1 | 4945 | need_assert_for = BITMAP_ALLOC (NULL); |
b9eae1a9 | 4946 | asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); |
0bca51f0 DN |
4947 | |
4948 | calculate_dominance_info (CDI_DOMINATORS); | |
4949 | ||
c4ab2baa | 4950 | if (find_assert_locations ()) |
0bca51f0 | 4951 | { |
227858d1 | 4952 | process_assert_insertions (); |
0bca51f0 DN |
4953 | update_ssa (TODO_update_ssa_no_phi); |
4954 | } | |
4955 | ||
4956 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4957 | { | |
4958 | fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); | |
4959 | dump_function_to_file (current_function_decl, dump_file, dump_flags); | |
4960 | } | |
4961 | ||
227858d1 DN |
4962 | free (asserts_for); |
4963 | BITMAP_FREE (need_assert_for); | |
0bca51f0 DN |
4964 | } |
4965 | ||
590b1f2d DM |
4966 | /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays |
4967 | and "struct" hacks. If VRP can determine that the | |
9f5ed61a | 4968 | array subscript is a constant, check if it is outside valid |
590b1f2d DM |
4969 | range. If the array subscript is a RANGE, warn if it is |
4970 | non-overlapping with valid range. | |
4971 | IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ | |
4972 | ||
4973 | static void | |
c2255bc4 | 4974 | check_array_ref (location_t location, tree ref, bool ignore_off_by_one) |
590b1f2d DM |
4975 | { |
4976 | value_range_t* vr = NULL; | |
4977 | tree low_sub, up_sub; | |
4978 | tree low_bound, up_bound = array_ref_up_bound (ref); | |
4979 | ||
4980 | low_sub = up_sub = TREE_OPERAND (ref, 1); | |
4981 | ||
88df9da1 | 4982 | if (!up_bound || TREE_NO_WARNING (ref) |
590b1f2d DM |
4983 | || TREE_CODE (up_bound) != INTEGER_CST |
4984 | /* Can not check flexible arrays. */ | |
4985 | || (TYPE_SIZE (TREE_TYPE (ref)) == NULL_TREE | |
4986 | && TYPE_DOMAIN (TREE_TYPE (ref)) != NULL_TREE | |
4987 | && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (ref))) == NULL_TREE) | |
4988 | /* Accesses after the end of arrays of size 0 (gcc | |
4989 | extension) and 1 are likely intentional ("struct | |
e4d35515 SB |
4990 | hack"). */ |
4991 | || compare_tree_int (up_bound, 1) <= 0) | |
590b1f2d DM |
4992 | return; |
4993 | ||
4994 | low_bound = array_ref_low_bound (ref); | |
4995 | ||
4996 | if (TREE_CODE (low_sub) == SSA_NAME) | |
4997 | { | |
4998 | vr = get_value_range (low_sub); | |
4999 | if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
5000 | { | |
5001 | low_sub = vr->type == VR_RANGE ? vr->max : vr->min; | |
5002 | up_sub = vr->type == VR_RANGE ? vr->min : vr->max; | |
5003 | } | |
5004 | } | |
5005 | ||
5006 | if (vr && vr->type == VR_ANTI_RANGE) | |
5007 | { | |
5008 | if (TREE_CODE (up_sub) == INTEGER_CST | |
5009 | && tree_int_cst_lt (up_bound, up_sub) | |
5010 | && TREE_CODE (low_sub) == INTEGER_CST | |
5011 | && tree_int_cst_lt (low_sub, low_bound)) | |
5012 | { | |
92ef7fb1 MLI |
5013 | warning_at (location, OPT_Warray_bounds, |
5014 | "array subscript is outside array bounds"); | |
590b1f2d DM |
5015 | TREE_NO_WARNING (ref) = 1; |
5016 | } | |
5017 | } | |
5018 | else if (TREE_CODE (up_sub) == INTEGER_CST | |
5019 | && tree_int_cst_lt (up_bound, up_sub) | |
5020 | && !tree_int_cst_equal (up_bound, up_sub) | |
5021 | && (!ignore_off_by_one | |
5022 | || !tree_int_cst_equal (int_const_binop (PLUS_EXPR, | |
5023 | up_bound, | |
5024 | integer_one_node, | |
5025 | 0), | |
5026 | up_sub))) | |
5027 | { | |
92ef7fb1 MLI |
5028 | warning_at (location, OPT_Warray_bounds, |
5029 | "array subscript is above array bounds"); | |
590b1f2d DM |
5030 | TREE_NO_WARNING (ref) = 1; |
5031 | } | |
5032 | else if (TREE_CODE (low_sub) == INTEGER_CST | |
5033 | && tree_int_cst_lt (low_sub, low_bound)) | |
5034 | { | |
92ef7fb1 MLI |
5035 | warning_at (location, OPT_Warray_bounds, |
5036 | "array subscript is below array bounds"); | |
590b1f2d DM |
5037 | TREE_NO_WARNING (ref) = 1; |
5038 | } | |
5039 | } | |
5040 | ||
05fb69e4 DM |
5041 | /* Searches if the expr T, located at LOCATION computes |
5042 | address of an ARRAY_REF, and call check_array_ref on it. */ | |
5043 | ||
5044 | static void | |
92ef7fb1 | 5045 | search_for_addr_array (tree t, location_t location) |
05fb69e4 DM |
5046 | { |
5047 | while (TREE_CODE (t) == SSA_NAME) | |
5048 | { | |
726a989a RB |
5049 | gimple g = SSA_NAME_DEF_STMT (t); |
5050 | ||
5051 | if (gimple_code (g) != GIMPLE_ASSIGN) | |
05fb69e4 | 5052 | return; |
726a989a | 5053 | |
9968d233 SP |
5054 | if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) |
5055 | != GIMPLE_SINGLE_RHS) | |
726a989a RB |
5056 | return; |
5057 | ||
5058 | t = gimple_assign_rhs1 (g); | |
05fb69e4 DM |
5059 | } |
5060 | ||
5061 | ||
5062 | /* We are only interested in addresses of ARRAY_REF's. */ | |
5063 | if (TREE_CODE (t) != ADDR_EXPR) | |
5064 | return; | |
5065 | ||
5066 | /* Check each ARRAY_REFs in the reference chain. */ | |
5067 | do | |
5068 | { | |
5069 | if (TREE_CODE (t) == ARRAY_REF) | |
c2255bc4 | 5070 | check_array_ref (location, t, true /*ignore_off_by_one*/); |
05fb69e4 | 5071 | |
9968d233 | 5072 | t = TREE_OPERAND (t, 0); |
05fb69e4 DM |
5073 | } |
5074 | while (handled_component_p (t)); | |
5075 | } | |
5076 | ||
590b1f2d DM |
5077 | /* walk_tree() callback that checks if *TP is |
5078 | an ARRAY_REF inside an ADDR_EXPR (in which an array | |
5079 | subscript one outside the valid range is allowed). Call | |
5080 | check_array_ref for each ARRAY_REF found. The location is | |
5081 | passed in DATA. */ | |
5082 | ||
5083 | static tree | |
5084 | check_array_bounds (tree *tp, int *walk_subtree, void *data) | |
5085 | { | |
5086 | tree t = *tp; | |
726a989a | 5087 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; |
c2255bc4 AH |
5088 | location_t location; |
5089 | ||
5090 | if (EXPR_HAS_LOCATION (t)) | |
5091 | location = EXPR_LOCATION (t); | |
5092 | else | |
5093 | { | |
5094 | location_t *locp = (location_t *) wi->info; | |
5095 | location = *locp; | |
5096 | } | |
88df9da1 | 5097 | |
590b1f2d DM |
5098 | *walk_subtree = TRUE; |
5099 | ||
5100 | if (TREE_CODE (t) == ARRAY_REF) | |
c2255bc4 | 5101 | check_array_ref (location, t, false /*ignore_off_by_one*/); |
1eb7b049 | 5102 | |
05fb69e4 DM |
5103 | if (TREE_CODE (t) == INDIRECT_REF |
5104 | || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) | |
c2255bc4 | 5105 | search_for_addr_array (TREE_OPERAND (t, 0), location); |
590b1f2d | 5106 | |
05fb69e4 DM |
5107 | if (TREE_CODE (t) == ADDR_EXPR) |
5108 | *walk_subtree = FALSE; | |
5109 | ||
590b1f2d DM |
5110 | return NULL_TREE; |
5111 | } | |
5112 | ||
5113 | /* Walk over all statements of all reachable BBs and call check_array_bounds | |
5114 | on them. */ | |
5115 | ||
5116 | static void | |
5117 | check_all_array_refs (void) | |
5118 | { | |
5119 | basic_block bb; | |
726a989a | 5120 | gimple_stmt_iterator si; |
590b1f2d DM |
5121 | |
5122 | FOR_EACH_BB (bb) | |
5123 | { | |
5124 | /* Skip bb's that are clearly unreachable. */ | |
5125 | if (single_pred_p (bb)) | |
5126 | { | |
92ef7fb1 MLI |
5127 | int i; |
5128 | bool reachable = true; | |
5129 | edge e2; | |
5130 | edge e = EDGE_PRED (bb, 0); | |
5131 | basic_block pred_bb = e->src; | |
726a989a | 5132 | gimple ls = NULL; |
590b1f2d | 5133 | |
92ef7fb1 MLI |
5134 | for (i = 0; VEC_iterate (edge, to_remove_edges, i, e2); ++i) |
5135 | if (e == e2) | |
5136 | { | |
5137 | reachable = false; | |
5138 | break; | |
5139 | } | |
5140 | ||
5141 | if (!reachable) | |
5142 | continue; | |
5143 | ||
726a989a RB |
5144 | if (!gsi_end_p (gsi_last_bb (pred_bb))) |
5145 | ls = gsi_stmt (gsi_last_bb (pred_bb)); | |
590b1f2d | 5146 | |
726a989a RB |
5147 | if (ls && gimple_code (ls) == GIMPLE_COND |
5148 | && ((gimple_cond_false_p (ls) | |
590b1f2d | 5149 | && (EDGE_PRED (bb, 0)->flags & EDGE_TRUE_VALUE)) |
726a989a | 5150 | || (gimple_cond_true_p (ls) |
590b1f2d DM |
5151 | && (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE)))) |
5152 | continue; | |
5153 | } | |
726a989a RB |
5154 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
5155 | { | |
5156 | gimple stmt = gsi_stmt (si); | |
726a989a RB |
5157 | struct walk_stmt_info wi; |
5158 | if (!gimple_has_location (stmt)) | |
5159 | continue; | |
5160 | ||
5161 | if (is_gimple_call (stmt)) | |
5162 | { | |
5163 | size_t i; | |
5164 | size_t n = gimple_call_num_args (stmt); | |
5165 | for (i = 0; i < n; i++) | |
5166 | { | |
5167 | tree arg = gimple_call_arg (stmt, i); | |
92ef7fb1 | 5168 | search_for_addr_array (arg, gimple_location (stmt)); |
726a989a RB |
5169 | } |
5170 | } | |
5171 | else | |
5172 | { | |
5173 | memset (&wi, 0, sizeof (wi)); | |
92ef7fb1 MLI |
5174 | wi.info = CONST_CAST (void *, (const void *) |
5175 | gimple_location_ptr (stmt)); | |
726a989a RB |
5176 | |
5177 | walk_gimple_op (gsi_stmt (si), | |
5178 | check_array_bounds, | |
5179 | &wi); | |
5180 | } | |
5181 | } | |
590b1f2d DM |
5182 | } |
5183 | } | |
0bca51f0 | 5184 | |
94908762 JL |
5185 | /* Convert range assertion expressions into the implied copies and |
5186 | copy propagate away the copies. Doing the trivial copy propagation | |
5187 | here avoids the need to run the full copy propagation pass after | |
5188 | VRP. | |
227858d1 DN |
5189 | |
5190 | FIXME, this will eventually lead to copy propagation removing the | |
5191 | names that had useful range information attached to them. For | |
5192 | instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, | |
5193 | then N_i will have the range [3, +INF]. | |
5194 | ||
5195 | However, by converting the assertion into the implied copy | |
5196 | operation N_i = N_j, we will then copy-propagate N_j into the uses | |
5197 | of N_i and lose the range information. We may want to hold on to | |
5198 | ASSERT_EXPRs a little while longer as the ranges could be used in | |
5199 | things like jump threading. | |
5200 | ||
5201 | The problem with keeping ASSERT_EXPRs around is that passes after | |
94908762 JL |
5202 | VRP need to handle them appropriately. |
5203 | ||
5204 | Another approach would be to make the range information a first | |
5205 | class property of the SSA_NAME so that it can be queried from | |
5206 | any pass. This is made somewhat more complex by the need for | |
5207 | multiple ranges to be associated with one SSA_NAME. */ | |
0bca51f0 DN |
5208 | |
5209 | static void | |
5210 | remove_range_assertions (void) | |
5211 | { | |
5212 | basic_block bb; | |
726a989a | 5213 | gimple_stmt_iterator si; |
0bca51f0 | 5214 | |
94908762 JL |
5215 | /* Note that the BSI iterator bump happens at the bottom of the |
5216 | loop and no bump is necessary if we're removing the statement | |
5217 | referenced by the current BSI. */ | |
0bca51f0 | 5218 | FOR_EACH_BB (bb) |
726a989a | 5219 | for (si = gsi_start_bb (bb); !gsi_end_p (si);) |
0bca51f0 | 5220 | { |
726a989a RB |
5221 | gimple stmt = gsi_stmt (si); |
5222 | gimple use_stmt; | |
0bca51f0 | 5223 | |
726a989a RB |
5224 | if (is_gimple_assign (stmt) |
5225 | && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) | |
0bca51f0 | 5226 | { |
726a989a RB |
5227 | tree rhs = gimple_assign_rhs1 (stmt); |
5228 | tree var; | |
0bca51f0 | 5229 | tree cond = fold (ASSERT_EXPR_COND (rhs)); |
94908762 JL |
5230 | use_operand_p use_p; |
5231 | imm_use_iterator iter; | |
5232 | ||
0bca51f0 | 5233 | gcc_assert (cond != boolean_false_node); |
94908762 | 5234 | |
701b8964 ZD |
5235 | /* Propagate the RHS into every use of the LHS. */ |
5236 | var = ASSERT_EXPR_VAR (rhs); | |
07beea0d | 5237 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, |
726a989a | 5238 | gimple_assign_lhs (stmt)) |
6c00f606 AM |
5239 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
5240 | { | |
5241 | SET_USE (use_p, var); | |
5242 | gcc_assert (TREE_CODE (var) == SSA_NAME); | |
5243 | } | |
94908762 JL |
5244 | |
5245 | /* And finally, remove the copy, it is not needed. */ | |
726a989a | 5246 | gsi_remove (&si, true); |
ca15e365 | 5247 | release_defs (stmt); |
0bca51f0 | 5248 | } |
94908762 | 5249 | else |
726a989a | 5250 | gsi_next (&si); |
0bca51f0 DN |
5251 | } |
5252 | } | |
5253 | ||
5254 | ||
5255 | /* Return true if STMT is interesting for VRP. */ | |
5256 | ||
5257 | static bool | |
726a989a | 5258 | stmt_interesting_for_vrp (gimple stmt) |
0bca51f0 | 5259 | { |
726a989a RB |
5260 | if (gimple_code (stmt) == GIMPLE_PHI |
5261 | && is_gimple_reg (gimple_phi_result (stmt)) | |
5262 | && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt))) | |
5263 | || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt))))) | |
0bca51f0 | 5264 | return true; |
726a989a | 5265 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) |
0bca51f0 | 5266 | { |
726a989a | 5267 | tree lhs = gimple_get_lhs (stmt); |
0bca51f0 | 5268 | |
2bbec6d9 JL |
5269 | /* In general, assignments with virtual operands are not useful |
5270 | for deriving ranges, with the obvious exception of calls to | |
5271 | builtin functions. */ | |
726a989a | 5272 | if (lhs && TREE_CODE (lhs) == SSA_NAME |
0bca51f0 DN |
5273 | && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
5274 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
726a989a RB |
5275 | && ((is_gimple_call (stmt) |
5276 | && gimple_call_fndecl (stmt) != NULL_TREE | |
5277 | && DECL_IS_BUILTIN (gimple_call_fndecl (stmt))) | |
5006671f | 5278 | || !gimple_vuse (stmt))) |
0bca51f0 DN |
5279 | return true; |
5280 | } | |
726a989a RB |
5281 | else if (gimple_code (stmt) == GIMPLE_COND |
5282 | || gimple_code (stmt) == GIMPLE_SWITCH) | |
0bca51f0 DN |
5283 | return true; |
5284 | ||
5285 | return false; | |
5286 | } | |
5287 | ||
5288 | ||
87e71ff4 | 5289 | /* Initialize local data structures for VRP. */ |
0bca51f0 | 5290 | |
227858d1 | 5291 | static void |
0bca51f0 DN |
5292 | vrp_initialize (void) |
5293 | { | |
5294 | basic_block bb; | |
0bca51f0 | 5295 | |
b9eae1a9 | 5296 | vr_value = XCNEWVEC (value_range_t *, num_ssa_names); |
fc6827fe | 5297 | vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); |
0bca51f0 DN |
5298 | |
5299 | FOR_EACH_BB (bb) | |
5300 | { | |
726a989a | 5301 | gimple_stmt_iterator si; |
0bca51f0 | 5302 | |
726a989a | 5303 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
0bca51f0 | 5304 | { |
726a989a | 5305 | gimple phi = gsi_stmt (si); |
0bca51f0 DN |
5306 | if (!stmt_interesting_for_vrp (phi)) |
5307 | { | |
5308 | tree lhs = PHI_RESULT (phi); | |
b565d777 | 5309 | set_value_range_to_varying (get_value_range (lhs)); |
726a989a | 5310 | prop_set_simulate_again (phi, false); |
0bca51f0 DN |
5311 | } |
5312 | else | |
726a989a | 5313 | prop_set_simulate_again (phi, true); |
0bca51f0 DN |
5314 | } |
5315 | ||
726a989a | 5316 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
0bca51f0 | 5317 | { |
726a989a | 5318 | gimple stmt = gsi_stmt (si); |
0bca51f0 DN |
5319 | |
5320 | if (!stmt_interesting_for_vrp (stmt)) | |
5321 | { | |
5322 | ssa_op_iter i; | |
5323 | tree def; | |
5324 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) | |
b565d777 | 5325 | set_value_range_to_varying (get_value_range (def)); |
726a989a | 5326 | prop_set_simulate_again (stmt, false); |
0bca51f0 DN |
5327 | } |
5328 | else | |
5329 | { | |
726a989a | 5330 | prop_set_simulate_again (stmt, true); |
0bca51f0 DN |
5331 | } |
5332 | } | |
5333 | } | |
0bca51f0 DN |
5334 | } |
5335 | ||
5336 | ||
5337 | /* Visit assignment STMT. If it produces an interesting range, record | |
5338 | the SSA name in *OUTPUT_P. */ | |
5339 | ||
5340 | static enum ssa_prop_result | |
726a989a | 5341 | vrp_visit_assignment_or_call (gimple stmt, tree *output_p) |
0bca51f0 | 5342 | { |
726a989a | 5343 | tree def, lhs; |
0bca51f0 | 5344 | ssa_op_iter iter; |
726a989a RB |
5345 | enum gimple_code code = gimple_code (stmt); |
5346 | lhs = gimple_get_lhs (stmt); | |
0bca51f0 DN |
5347 | |
5348 | /* We only keep track of ranges in integral and pointer types. */ | |
5349 | if (TREE_CODE (lhs) == SSA_NAME | |
e260a614 JL |
5350 | && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
5351 | /* It is valid to have NULL MIN/MAX values on a type. See | |
5352 | build_range_type. */ | |
5353 | && TYPE_MIN_VALUE (TREE_TYPE (lhs)) | |
5354 | && TYPE_MAX_VALUE (TREE_TYPE (lhs))) | |
0bca51f0 DN |
5355 | || POINTER_TYPE_P (TREE_TYPE (lhs)))) |
5356 | { | |
0bca51f0 | 5357 | struct loop *l; |
227858d1 DN |
5358 | value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; |
5359 | ||
726a989a RB |
5360 | if (code == GIMPLE_CALL) |
5361 | extract_range_basic (&new_vr, stmt); | |
5362 | else | |
5363 | extract_range_from_assignment (&new_vr, stmt); | |
0bca51f0 DN |
5364 | |
5365 | /* If STMT is inside a loop, we may be able to know something | |
5366 | else about the range of LHS by examining scalar evolution | |
5367 | information. */ | |
d78f3f78 | 5368 | if (current_loops && (l = loop_containing_stmt (stmt))) |
1e8552eb | 5369 | adjust_range_with_scev (&new_vr, l, stmt, lhs); |
0bca51f0 | 5370 | |
227858d1 | 5371 | if (update_value_range (lhs, &new_vr)) |
0bca51f0 DN |
5372 | { |
5373 | *output_p = lhs; | |
5374 | ||
5375 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5376 | { | |
227858d1 | 5377 | fprintf (dump_file, "Found new range for "); |
0bca51f0 | 5378 | print_generic_expr (dump_file, lhs, 0); |
227858d1 DN |
5379 | fprintf (dump_file, ": "); |
5380 | dump_value_range (dump_file, &new_vr); | |
0bca51f0 DN |
5381 | fprintf (dump_file, "\n\n"); |
5382 | } | |
5383 | ||
5384 | if (new_vr.type == VR_VARYING) | |
5385 | return SSA_PROP_VARYING; | |
5386 | ||
5387 | return SSA_PROP_INTERESTING; | |
5388 | } | |
5389 | ||
5390 | return SSA_PROP_NOT_INTERESTING; | |
5391 | } | |
5392 | ||
227858d1 | 5393 | /* Every other statement produces no useful ranges. */ |
0bca51f0 | 5394 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) |
b565d777 | 5395 | set_value_range_to_varying (get_value_range (def)); |
0bca51f0 DN |
5396 | |
5397 | return SSA_PROP_VARYING; | |
5398 | } | |
5399 | ||
f5052e29 | 5400 | /* Helper that gets the value range of the SSA_NAME with version I |
c80b4100 | 5401 | or a symbolic range containing the SSA_NAME only if the value range |
f5052e29 RG |
5402 | is varying or undefined. */ |
5403 | ||
5404 | static inline value_range_t | |
5405 | get_vr_for_comparison (int i) | |
5406 | { | |
5407 | value_range_t vr = *(vr_value[i]); | |
5408 | ||
5409 | /* If name N_i does not have a valid range, use N_i as its own | |
5410 | range. This allows us to compare against names that may | |
5411 | have N_i in their ranges. */ | |
5412 | if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) | |
5413 | { | |
5414 | vr.type = VR_RANGE; | |
5415 | vr.min = ssa_name (i); | |
5416 | vr.max = ssa_name (i); | |
5417 | } | |
5418 | ||
5419 | return vr; | |
5420 | } | |
0bca51f0 | 5421 | |
227858d1 DN |
5422 | /* Compare all the value ranges for names equivalent to VAR with VAL |
5423 | using comparison code COMP. Return the same value returned by | |
12df8a7e ILT |
5424 | compare_range_with_value, including the setting of |
5425 | *STRICT_OVERFLOW_P. */ | |
227858d1 DN |
5426 | |
5427 | static tree | |
12df8a7e ILT |
5428 | compare_name_with_value (enum tree_code comp, tree var, tree val, |
5429 | bool *strict_overflow_p) | |
227858d1 DN |
5430 | { |
5431 | bitmap_iterator bi; | |
5432 | unsigned i; | |
5433 | bitmap e; | |
5434 | tree retval, t; | |
12df8a7e | 5435 | int used_strict_overflow; |
f5052e29 RG |
5436 | bool sop; |
5437 | value_range_t equiv_vr; | |
227858d1 DN |
5438 | |
5439 | /* Get the set of equivalences for VAR. */ | |
5440 | e = get_value_range (var)->equiv; | |
5441 | ||
12df8a7e ILT |
5442 | /* Start at -1. Set it to 0 if we do a comparison without relying |
5443 | on overflow, or 1 if all comparisons rely on overflow. */ | |
5444 | used_strict_overflow = -1; | |
5445 | ||
f5052e29 RG |
5446 | /* Compare vars' value range with val. */ |
5447 | equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); | |
5448 | sop = false; | |
5449 | retval = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
e07e405d ILT |
5450 | if (retval) |
5451 | used_strict_overflow = sop ? 1 : 0; | |
227858d1 | 5452 | |
f5052e29 RG |
5453 | /* If the equiv set is empty we have done all work we need to do. */ |
5454 | if (e == NULL) | |
5455 | { | |
5456 | if (retval | |
5457 | && used_strict_overflow > 0) | |
5458 | *strict_overflow_p = true; | |
5459 | return retval; | |
5460 | } | |
227858d1 | 5461 | |
f5052e29 RG |
5462 | EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) |
5463 | { | |
5464 | equiv_vr = get_vr_for_comparison (i); | |
12df8a7e ILT |
5465 | sop = false; |
5466 | t = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
227858d1 DN |
5467 | if (t) |
5468 | { | |
96644aba RG |
5469 | /* If we get different answers from different members |
5470 | of the equivalence set this check must be in a dead | |
5471 | code region. Folding it to a trap representation | |
5472 | would be correct here. For now just return don't-know. */ | |
5473 | if (retval != NULL | |
5474 | && t != retval) | |
5475 | { | |
5476 | retval = NULL_TREE; | |
5477 | break; | |
5478 | } | |
227858d1 | 5479 | retval = t; |
12df8a7e ILT |
5480 | |
5481 | if (!sop) | |
5482 | used_strict_overflow = 0; | |
5483 | else if (used_strict_overflow < 0) | |
5484 | used_strict_overflow = 1; | |
227858d1 DN |
5485 | } |
5486 | } | |
5487 | ||
f5052e29 RG |
5488 | if (retval |
5489 | && used_strict_overflow > 0) | |
5490 | *strict_overflow_p = true; | |
227858d1 | 5491 | |
f5052e29 | 5492 | return retval; |
227858d1 DN |
5493 | } |
5494 | ||
5495 | ||
5496 | /* Given a comparison code COMP and names N1 and N2, compare all the | |
8ab5f5c9 | 5497 | ranges equivalent to N1 against all the ranges equivalent to N2 |
227858d1 | 5498 | to determine the value of N1 COMP N2. Return the same value |
12df8a7e ILT |
5499 | returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate |
5500 | whether we relied on an overflow infinity in the comparison. */ | |
5501 | ||
0bca51f0 DN |
5502 | |
5503 | static tree | |
12df8a7e ILT |
5504 | compare_names (enum tree_code comp, tree n1, tree n2, |
5505 | bool *strict_overflow_p) | |
227858d1 DN |
5506 | { |
5507 | tree t, retval; | |
5508 | bitmap e1, e2; | |
5509 | bitmap_iterator bi1, bi2; | |
5510 | unsigned i1, i2; | |
12df8a7e | 5511 | int used_strict_overflow; |
f5052e29 RG |
5512 | static bitmap_obstack *s_obstack = NULL; |
5513 | static bitmap s_e1 = NULL, s_e2 = NULL; | |
227858d1 DN |
5514 | |
5515 | /* Compare the ranges of every name equivalent to N1 against the | |
5516 | ranges of every name equivalent to N2. */ | |
5517 | e1 = get_value_range (n1)->equiv; | |
5518 | e2 = get_value_range (n2)->equiv; | |
5519 | ||
f5052e29 RG |
5520 | /* Use the fake bitmaps if e1 or e2 are not available. */ |
5521 | if (s_obstack == NULL) | |
5522 | { | |
5523 | s_obstack = XNEW (bitmap_obstack); | |
5524 | bitmap_obstack_initialize (s_obstack); | |
5525 | s_e1 = BITMAP_ALLOC (s_obstack); | |
5526 | s_e2 = BITMAP_ALLOC (s_obstack); | |
5527 | } | |
5528 | if (e1 == NULL) | |
5529 | e1 = s_e1; | |
5530 | if (e2 == NULL) | |
5531 | e2 = s_e2; | |
5532 | ||
227858d1 DN |
5533 | /* Add N1 and N2 to their own set of equivalences to avoid |
5534 | duplicating the body of the loop just to check N1 and N2 | |
5535 | ranges. */ | |
5536 | bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); | |
5537 | bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); | |
5538 | ||
5539 | /* If the equivalence sets have a common intersection, then the two | |
5540 | names can be compared without checking their ranges. */ | |
5541 | if (bitmap_intersect_p (e1, e2)) | |
5542 | { | |
5543 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
5544 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
5545 | ||
5546 | return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) | |
5547 | ? boolean_true_node | |
5548 | : boolean_false_node; | |
5549 | } | |
5550 | ||
12df8a7e ILT |
5551 | /* Start at -1. Set it to 0 if we do a comparison without relying |
5552 | on overflow, or 1 if all comparisons rely on overflow. */ | |
5553 | used_strict_overflow = -1; | |
5554 | ||
227858d1 DN |
5555 | /* Otherwise, compare all the equivalent ranges. First, add N1 and |
5556 | N2 to their own set of equivalences to avoid duplicating the body | |
5557 | of the loop just to check N1 and N2 ranges. */ | |
5558 | EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) | |
5559 | { | |
f5052e29 | 5560 | value_range_t vr1 = get_vr_for_comparison (i1); |
227858d1 DN |
5561 | |
5562 | t = retval = NULL_TREE; | |
5563 | EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) | |
5564 | { | |
3b7bab4d | 5565 | bool sop = false; |
12df8a7e | 5566 | |
f5052e29 | 5567 | value_range_t vr2 = get_vr_for_comparison (i2); |
227858d1 | 5568 | |
12df8a7e | 5569 | t = compare_ranges (comp, &vr1, &vr2, &sop); |
227858d1 DN |
5570 | if (t) |
5571 | { | |
96644aba RG |
5572 | /* If we get different answers from different members |
5573 | of the equivalence set this check must be in a dead | |
5574 | code region. Folding it to a trap representation | |
5575 | would be correct here. For now just return don't-know. */ | |
5576 | if (retval != NULL | |
5577 | && t != retval) | |
5578 | { | |
5579 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
5580 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
5581 | return NULL_TREE; | |
5582 | } | |
227858d1 | 5583 | retval = t; |
12df8a7e ILT |
5584 | |
5585 | if (!sop) | |
5586 | used_strict_overflow = 0; | |
5587 | else if (used_strict_overflow < 0) | |
5588 | used_strict_overflow = 1; | |
227858d1 DN |
5589 | } |
5590 | } | |
5591 | ||
5592 | if (retval) | |
5593 | { | |
5594 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
5595 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
12df8a7e ILT |
5596 | if (used_strict_overflow > 0) |
5597 | *strict_overflow_p = true; | |
227858d1 DN |
5598 | return retval; |
5599 | } | |
5600 | } | |
5601 | ||
5602 | /* None of the equivalent ranges are useful in computing this | |
5603 | comparison. */ | |
5604 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
5605 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
5606 | return NULL_TREE; | |
5607 | } | |
5608 | ||
6b99f156 JH |
5609 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
5610 | ||
5611 | static tree | |
5612 | vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, | |
5613 | tree op0, tree op1, | |
5614 | bool * strict_overflow_p) | |
5615 | { | |
5616 | value_range_t *vr0, *vr1; | |
5617 | ||
5618 | vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; | |
5619 | vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; | |
5620 | ||
5621 | if (vr0 && vr1) | |
5622 | return compare_ranges (code, vr0, vr1, strict_overflow_p); | |
5623 | else if (vr0 && vr1 == NULL) | |
5624 | return compare_range_with_value (code, vr0, op1, strict_overflow_p); | |
5625 | else if (vr0 == NULL && vr1) | |
5626 | return (compare_range_with_value | |
5627 | (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); | |
5628 | return NULL; | |
5629 | } | |
5630 | ||
2d3cd5d5 RAE |
5631 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
5632 | ||
5633 | static tree | |
5634 | vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, | |
5635 | tree op1, bool use_equiv_p, | |
6b99f156 | 5636 | bool *strict_overflow_p, bool *only_ranges) |
2d3cd5d5 | 5637 | { |
6b99f156 JH |
5638 | tree ret; |
5639 | if (only_ranges) | |
5640 | *only_ranges = true; | |
5641 | ||
2d3cd5d5 RAE |
5642 | /* We only deal with integral and pointer types. */ |
5643 | if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
5644 | && !POINTER_TYPE_P (TREE_TYPE (op0))) | |
5645 | return NULL_TREE; | |
5646 | ||
5647 | if (use_equiv_p) | |
5648 | { | |
6b99f156 JH |
5649 | if (only_ranges |
5650 | && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
5651 | (code, op0, op1, strict_overflow_p))) | |
5652 | return ret; | |
5653 | *only_ranges = false; | |
2d3cd5d5 | 5654 | if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) |
726a989a | 5655 | return compare_names (code, op0, op1, strict_overflow_p); |
2d3cd5d5 | 5656 | else if (TREE_CODE (op0) == SSA_NAME) |
726a989a | 5657 | return compare_name_with_value (code, op0, op1, strict_overflow_p); |
2d3cd5d5 RAE |
5658 | else if (TREE_CODE (op1) == SSA_NAME) |
5659 | return (compare_name_with_value | |
726a989a | 5660 | (swap_tree_comparison (code), op1, op0, strict_overflow_p)); |
2d3cd5d5 RAE |
5661 | } |
5662 | else | |
6b99f156 JH |
5663 | return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, |
5664 | strict_overflow_p); | |
2d3cd5d5 RAE |
5665 | return NULL_TREE; |
5666 | } | |
227858d1 | 5667 | |
e80d7580 | 5668 | /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range |
0c948c27 ILT |
5669 | information. Return NULL if the conditional can not be evaluated. |
5670 | The ranges of all the names equivalent with the operands in COND | |
5671 | will be used when trying to compute the value. If the result is | |
5672 | based on undefined signed overflow, issue a warning if | |
5673 | appropriate. */ | |
5674 | ||
5675 | tree | |
726a989a | 5676 | vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) |
0c948c27 ILT |
5677 | { |
5678 | bool sop; | |
5679 | tree ret; | |
6b99f156 | 5680 | bool only_ranges; |
0c948c27 ILT |
5681 | |
5682 | sop = false; | |
6b99f156 JH |
5683 | ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, |
5684 | &only_ranges); | |
0c948c27 ILT |
5685 | |
5686 | if (ret && sop) | |
5687 | { | |
5688 | enum warn_strict_overflow_code wc; | |
5689 | const char* warnmsg; | |
5690 | ||
5691 | if (is_gimple_min_invariant (ret)) | |
5692 | { | |
5693 | wc = WARN_STRICT_OVERFLOW_CONDITIONAL; | |
5694 | warnmsg = G_("assuming signed overflow does not occur when " | |
5695 | "simplifying conditional to constant"); | |
5696 | } | |
5697 | else | |
5698 | { | |
5699 | wc = WARN_STRICT_OVERFLOW_COMPARISON; | |
5700 | warnmsg = G_("assuming signed overflow does not occur when " | |
5701 | "simplifying conditional"); | |
5702 | } | |
5703 | ||
5704 | if (issue_strict_overflow_warning (wc)) | |
5705 | { | |
726a989a | 5706 | location_t location; |
0c948c27 | 5707 | |
726a989a RB |
5708 | if (!gimple_has_location (stmt)) |
5709 | location = input_location; | |
0c948c27 | 5710 | else |
726a989a RB |
5711 | location = gimple_location (stmt); |
5712 | warning (OPT_Wstrict_overflow, "%H%s", &location, warnmsg); | |
0c948c27 ILT |
5713 | } |
5714 | } | |
5715 | ||
faebccf9 | 5716 | if (warn_type_limits |
6b99f156 | 5717 | && ret && only_ranges |
e80d7580 RAE |
5718 | && TREE_CODE_CLASS (code) == tcc_comparison |
5719 | && TREE_CODE (op0) == SSA_NAME) | |
faebccf9 DN |
5720 | { |
5721 | /* If the comparison is being folded and the operand on the LHS | |
5722 | is being compared against a constant value that is outside of | |
5723 | the natural range of OP0's type, then the predicate will | |
5724 | always fold regardless of the value of OP0. If -Wtype-limits | |
5725 | was specified, emit a warning. */ | |
5726 | const char *warnmsg = NULL; | |
faebccf9 DN |
5727 | tree type = TREE_TYPE (op0); |
5728 | value_range_t *vr0 = get_value_range (op0); | |
5729 | ||
5730 | if (vr0->type != VR_VARYING | |
5731 | && INTEGRAL_TYPE_P (type) | |
5732 | && vrp_val_is_min (vr0->min) | |
5733 | && vrp_val_is_max (vr0->max) | |
5734 | && is_gimple_min_invariant (op1)) | |
5735 | { | |
5736 | if (integer_zerop (ret)) | |
5737 | warnmsg = G_("comparison always false due to limited range of " | |
5738 | "data type"); | |
5739 | else | |
5740 | warnmsg = G_("comparison always true due to limited range of " | |
5741 | "data type"); | |
5742 | } | |
5743 | ||
5744 | if (warnmsg) | |
5745 | { | |
726a989a | 5746 | location_t location; |
faebccf9 | 5747 | |
726a989a RB |
5748 | if (!gimple_has_location (stmt)) |
5749 | location = input_location; | |
faebccf9 | 5750 | else |
726a989a | 5751 | location = gimple_location (stmt); |
faebccf9 | 5752 | |
726a989a | 5753 | warning (OPT_Wtype_limits, "%H%s", &location, warnmsg); |
faebccf9 DN |
5754 | } |
5755 | } | |
5756 | ||
0c948c27 ILT |
5757 | return ret; |
5758 | } | |
5759 | ||
0bca51f0 DN |
5760 | |
5761 | /* Visit conditional statement STMT. If we can determine which edge | |
5762 | will be taken out of STMT's basic block, record it in | |
5763 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
5764 | SSA_PROP_VARYING. */ | |
5765 | ||
5766 | static enum ssa_prop_result | |
726a989a | 5767 | vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p) |
0bca51f0 | 5768 | { |
726a989a | 5769 | tree val; |
12df8a7e | 5770 | bool sop; |
0bca51f0 DN |
5771 | |
5772 | *taken_edge_p = NULL; | |
0bca51f0 DN |
5773 | |
5774 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5775 | { | |
5776 | tree use; | |
5777 | ssa_op_iter i; | |
5778 | ||
5779 | fprintf (dump_file, "\nVisiting conditional with predicate: "); | |
726a989a | 5780 | print_gimple_stmt (dump_file, stmt, 0, 0); |
0bca51f0 DN |
5781 | fprintf (dump_file, "\nWith known ranges\n"); |
5782 | ||
5783 | FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) | |
5784 | { | |
5785 | fprintf (dump_file, "\t"); | |
5786 | print_generic_expr (dump_file, use, 0); | |
5787 | fprintf (dump_file, ": "); | |
227858d1 | 5788 | dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); |
0bca51f0 DN |
5789 | } |
5790 | ||
5791 | fprintf (dump_file, "\n"); | |
5792 | } | |
5793 | ||
5794 | /* Compute the value of the predicate COND by checking the known | |
227858d1 DN |
5795 | ranges of each of its operands. |
5796 | ||
5797 | Note that we cannot evaluate all the equivalent ranges here | |
5798 | because those ranges may not yet be final and with the current | |
5799 | propagation strategy, we cannot determine when the value ranges | |
5800 | of the names in the equivalence set have changed. | |
5801 | ||
5802 | For instance, given the following code fragment | |
5803 | ||
5804 | i_5 = PHI <8, i_13> | |
5805 | ... | |
5806 | i_14 = ASSERT_EXPR <i_5, i_5 != 0> | |
5807 | if (i_14 == 1) | |
5808 | ... | |
5809 | ||
5810 | Assume that on the first visit to i_14, i_5 has the temporary | |
5811 | range [8, 8] because the second argument to the PHI function is | |
5812 | not yet executable. We derive the range ~[0, 0] for i_14 and the | |
5813 | equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for | |
5814 | the first time, since i_14 is equivalent to the range [8, 8], we | |
5815 | determine that the predicate is always false. | |
5816 | ||
5817 | On the next round of propagation, i_13 is determined to be | |
5818 | VARYING, which causes i_5 to drop down to VARYING. So, another | |
5819 | visit to i_14 is scheduled. In this second visit, we compute the | |
5820 | exact same range and equivalence set for i_14, namely ~[0, 0] and | |
5821 | { i_5 }. But we did not have the previous range for i_5 | |
5822 | registered, so vrp_visit_assignment thinks that the range for | |
5823 | i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' | |
5824 | is not visited again, which stops propagation from visiting | |
5825 | statements in the THEN clause of that if(). | |
5826 | ||
5827 | To properly fix this we would need to keep the previous range | |
5828 | value for the names in the equivalence set. This way we would've | |
5829 | discovered that from one visit to the other i_5 changed from | |
5830 | range [8, 8] to VR_VARYING. | |
5831 | ||
5832 | However, fixing this apparent limitation may not be worth the | |
5833 | additional checking. Testing on several code bases (GCC, DLV, | |
5834 | MICO, TRAMP3D and SPEC2000) showed that doing this results in | |
5835 | 4 more predicates folded in SPEC. */ | |
12df8a7e | 5836 | sop = false; |
e80d7580 | 5837 | |
726a989a RB |
5838 | val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), |
5839 | gimple_cond_lhs (stmt), | |
5840 | gimple_cond_rhs (stmt), | |
6b99f156 | 5841 | false, &sop, NULL); |
0bca51f0 | 5842 | if (val) |
12df8a7e ILT |
5843 | { |
5844 | if (!sop) | |
726a989a | 5845 | *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); |
12df8a7e ILT |
5846 | else |
5847 | { | |
5848 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5849 | fprintf (dump_file, | |
5850 | "\nIgnoring predicate evaluation because " | |
5851 | "it assumes that signed overflow is undefined"); | |
5852 | val = NULL_TREE; | |
5853 | } | |
5854 | } | |
0bca51f0 DN |
5855 | |
5856 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5857 | { | |
5858 | fprintf (dump_file, "\nPredicate evaluates to: "); | |
5859 | if (val == NULL_TREE) | |
5860 | fprintf (dump_file, "DON'T KNOW\n"); | |
5861 | else | |
5862 | print_generic_stmt (dump_file, val, 0); | |
5863 | } | |
5864 | ||
5865 | return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; | |
5866 | } | |
5867 | ||
b7d8d447 RAE |
5868 | /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL |
5869 | that includes the value VAL. The search is restricted to the range | |
726a989a | 5870 | [START_IDX, n - 1] where n is the size of VEC. |
0bca51f0 | 5871 | |
b7d8d447 RAE |
5872 | If there is a CASE_LABEL for VAL, its index is placed in IDX and true is |
5873 | returned. | |
5874 | ||
92ef7fb1 | 5875 | If there is no CASE_LABEL for VAL and there is one that is larger than VAL, |
b7d8d447 RAE |
5876 | it is placed in IDX and false is returned. |
5877 | ||
726a989a | 5878 | If VAL is larger than any CASE_LABEL, n is placed on IDX and false is |
b7d8d447 | 5879 | returned. */ |
8aea0bf0 RG |
5880 | |
5881 | static bool | |
726a989a | 5882 | find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx) |
8aea0bf0 | 5883 | { |
726a989a | 5884 | size_t n = gimple_switch_num_labels (stmt); |
b7d8d447 RAE |
5885 | size_t low, high; |
5886 | ||
5887 | /* Find case label for minimum of the value range or the next one. | |
5888 | At each iteration we are searching in [low, high - 1]. */ | |
8aea0bf0 | 5889 | |
726a989a | 5890 | for (low = start_idx, high = n; high != low; ) |
8aea0bf0 RG |
5891 | { |
5892 | tree t; | |
5893 | int cmp; | |
726a989a | 5894 | /* Note that i != high, so we never ask for n. */ |
b7d8d447 | 5895 | size_t i = (high + low) / 2; |
726a989a | 5896 | t = gimple_switch_label (stmt, i); |
8aea0bf0 RG |
5897 | |
5898 | /* Cache the result of comparing CASE_LOW and val. */ | |
5899 | cmp = tree_int_cst_compare (CASE_LOW (t), val); | |
5900 | ||
b7d8d447 RAE |
5901 | if (cmp == 0) |
5902 | { | |
5903 | /* Ranges cannot be empty. */ | |
5904 | *idx = i; | |
5905 | return true; | |
5906 | } | |
5907 | else if (cmp > 0) | |
8aea0bf0 RG |
5908 | high = i; |
5909 | else | |
b7d8d447 RAE |
5910 | { |
5911 | low = i + 1; | |
5912 | if (CASE_HIGH (t) != NULL | |
5913 | && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) | |
8aea0bf0 RG |
5914 | { |
5915 | *idx = i; | |
5916 | return true; | |
5917 | } | |
5918 | } | |
5919 | } | |
5920 | ||
b7d8d447 | 5921 | *idx = high; |
8aea0bf0 RG |
5922 | return false; |
5923 | } | |
5924 | ||
b7d8d447 RAE |
5925 | /* Searches the case label vector VEC for the range of CASE_LABELs that is used |
5926 | for values between MIN and MAX. The first index is placed in MIN_IDX. The | |
5927 | last index is placed in MAX_IDX. If the range of CASE_LABELs is empty | |
5928 | then MAX_IDX < MIN_IDX. | |
5929 | Returns true if the default label is not needed. */ | |
5930 | ||
5931 | static bool | |
726a989a RB |
5932 | find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx, |
5933 | size_t *max_idx) | |
b7d8d447 RAE |
5934 | { |
5935 | size_t i, j; | |
726a989a RB |
5936 | bool min_take_default = !find_case_label_index (stmt, 1, min, &i); |
5937 | bool max_take_default = !find_case_label_index (stmt, i, max, &j); | |
b7d8d447 RAE |
5938 | |
5939 | if (i == j | |
5940 | && min_take_default | |
5941 | && max_take_default) | |
5942 | { | |
5943 | /* Only the default case label reached. | |
5944 | Return an empty range. */ | |
5945 | *min_idx = 1; | |
5946 | *max_idx = 0; | |
5947 | return false; | |
5948 | } | |
5949 | else | |
5950 | { | |
5951 | bool take_default = min_take_default || max_take_default; | |
5952 | tree low, high; | |
5953 | size_t k; | |
5954 | ||
5955 | if (max_take_default) | |
5956 | j--; | |
5957 | ||
5958 | /* If the case label range is continuous, we do not need | |
5959 | the default case label. Verify that. */ | |
726a989a RB |
5960 | high = CASE_LOW (gimple_switch_label (stmt, i)); |
5961 | if (CASE_HIGH (gimple_switch_label (stmt, i))) | |
5962 | high = CASE_HIGH (gimple_switch_label (stmt, i)); | |
b7d8d447 RAE |
5963 | for (k = i + 1; k <= j; ++k) |
5964 | { | |
726a989a | 5965 | low = CASE_LOW (gimple_switch_label (stmt, k)); |
b7d8d447 RAE |
5966 | if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0))) |
5967 | { | |
5968 | take_default = true; | |
5969 | break; | |
5970 | } | |
5971 | high = low; | |
726a989a RB |
5972 | if (CASE_HIGH (gimple_switch_label (stmt, k))) |
5973 | high = CASE_HIGH (gimple_switch_label (stmt, k)); | |
b7d8d447 RAE |
5974 | } |
5975 | ||
5976 | *min_idx = i; | |
5977 | *max_idx = j; | |
5978 | return !take_default; | |
5979 | } | |
5980 | } | |
5981 | ||
8aea0bf0 RG |
5982 | /* Visit switch statement STMT. If we can determine which edge |
5983 | will be taken out of STMT's basic block, record it in | |
5984 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
5985 | SSA_PROP_VARYING. */ | |
5986 | ||
5987 | static enum ssa_prop_result | |
726a989a | 5988 | vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p) |
8aea0bf0 RG |
5989 | { |
5990 | tree op, val; | |
5991 | value_range_t *vr; | |
5992 | size_t i = 0, j = 0, n; | |
b7d8d447 | 5993 | bool take_default; |
8aea0bf0 RG |
5994 | |
5995 | *taken_edge_p = NULL; | |
726a989a | 5996 | op = gimple_switch_index (stmt); |
8aea0bf0 RG |
5997 | if (TREE_CODE (op) != SSA_NAME) |
5998 | return SSA_PROP_VARYING; | |
5999 | ||
6000 | vr = get_value_range (op); | |
6001 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6002 | { | |
6003 | fprintf (dump_file, "\nVisiting switch expression with operand "); | |
6004 | print_generic_expr (dump_file, op, 0); | |
6005 | fprintf (dump_file, " with known range "); | |
6006 | dump_value_range (dump_file, vr); | |
6007 | fprintf (dump_file, "\n"); | |
6008 | } | |
6009 | ||
6010 | if (vr->type != VR_RANGE | |
6011 | || symbolic_range_p (vr)) | |
6012 | return SSA_PROP_VARYING; | |
6013 | ||
6014 | /* Find the single edge that is taken from the switch expression. */ | |
726a989a | 6015 | n = gimple_switch_num_labels (stmt); |
8aea0bf0 | 6016 | |
726a989a | 6017 | take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j); |
8aea0bf0 | 6018 | |
b7d8d447 RAE |
6019 | /* Check if the range spans no CASE_LABEL. If so, we only reach the default |
6020 | label */ | |
8aea0bf0 | 6021 | if (j < i) |
b7d8d447 RAE |
6022 | { |
6023 | gcc_assert (take_default); | |
726a989a | 6024 | val = gimple_switch_default_label (stmt); |
b7d8d447 | 6025 | } |
8aea0bf0 RG |
6026 | else |
6027 | { | |
b7d8d447 RAE |
6028 | /* Check if labels with index i to j and maybe the default label |
6029 | are all reaching the same label. */ | |
6030 | ||
726a989a | 6031 | val = gimple_switch_label (stmt, i); |
b7d8d447 | 6032 | if (take_default |
726a989a RB |
6033 | && CASE_LABEL (gimple_switch_default_label (stmt)) |
6034 | != CASE_LABEL (val)) | |
8aea0bf0 RG |
6035 | { |
6036 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6037 | fprintf (dump_file, " not a single destination for this " | |
6038 | "range\n"); | |
6039 | return SSA_PROP_VARYING; | |
6040 | } | |
6041 | for (++i; i <= j; ++i) | |
6042 | { | |
726a989a | 6043 | if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) |
8aea0bf0 RG |
6044 | { |
6045 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6046 | fprintf (dump_file, " not a single destination for this " | |
6047 | "range\n"); | |
6048 | return SSA_PROP_VARYING; | |
6049 | } | |
6050 | } | |
6051 | } | |
6052 | ||
726a989a | 6053 | *taken_edge_p = find_edge (gimple_bb (stmt), |
8aea0bf0 RG |
6054 | label_to_block (CASE_LABEL (val))); |
6055 | ||
6056 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6057 | { | |
6058 | fprintf (dump_file, " will take edge to "); | |
6059 | print_generic_stmt (dump_file, CASE_LABEL (val), 0); | |
6060 | } | |
6061 | ||
6062 | return SSA_PROP_INTERESTING; | |
6063 | } | |
6064 | ||
6065 | ||
0bca51f0 DN |
6066 | /* Evaluate statement STMT. If the statement produces a useful range, |
6067 | return SSA_PROP_INTERESTING and record the SSA name with the | |
6068 | interesting range into *OUTPUT_P. | |
6069 | ||
6070 | If STMT is a conditional branch and we can determine its truth | |
6071 | value, the taken edge is recorded in *TAKEN_EDGE_P. | |
6072 | ||
6073 | If STMT produces a varying value, return SSA_PROP_VARYING. */ | |
6074 | ||
6075 | static enum ssa_prop_result | |
726a989a | 6076 | vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) |
0bca51f0 DN |
6077 | { |
6078 | tree def; | |
6079 | ssa_op_iter iter; | |
0bca51f0 DN |
6080 | |
6081 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6082 | { | |
6083 | fprintf (dump_file, "\nVisiting statement:\n"); | |
726a989a | 6084 | print_gimple_stmt (dump_file, stmt, 0, dump_flags); |
0bca51f0 DN |
6085 | fprintf (dump_file, "\n"); |
6086 | } | |
6087 | ||
726a989a | 6088 | if (is_gimple_assign (stmt) || is_gimple_call (stmt)) |
2bbec6d9 | 6089 | { |
2bbec6d9 JL |
6090 | /* In general, assignments with virtual operands are not useful |
6091 | for deriving ranges, with the obvious exception of calls to | |
6092 | builtin functions. */ | |
726a989a RB |
6093 | |
6094 | if ((is_gimple_call (stmt) | |
6095 | && gimple_call_fndecl (stmt) != NULL_TREE | |
6096 | && DECL_IS_BUILTIN (gimple_call_fndecl (stmt))) | |
5006671f | 6097 | || !gimple_vuse (stmt)) |
726a989a | 6098 | return vrp_visit_assignment_or_call (stmt, output_p); |
2bbec6d9 | 6099 | } |
726a989a | 6100 | else if (gimple_code (stmt) == GIMPLE_COND) |
0bca51f0 | 6101 | return vrp_visit_cond_stmt (stmt, taken_edge_p); |
726a989a | 6102 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
8aea0bf0 | 6103 | return vrp_visit_switch_stmt (stmt, taken_edge_p); |
0bca51f0 DN |
6104 | |
6105 | /* All other statements produce nothing of interest for VRP, so mark | |
6106 | their outputs varying and prevent further simulation. */ | |
6107 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) | |
b565d777 | 6108 | set_value_range_to_varying (get_value_range (def)); |
0bca51f0 DN |
6109 | |
6110 | return SSA_PROP_VARYING; | |
6111 | } | |
6112 | ||
6113 | ||
6114 | /* Meet operation for value ranges. Given two value ranges VR0 and | |
32c8bce7 DS |
6115 | VR1, store in VR0 a range that contains both VR0 and VR1. This |
6116 | may not be the smallest possible such range. */ | |
0bca51f0 DN |
6117 | |
6118 | static void | |
227858d1 | 6119 | vrp_meet (value_range_t *vr0, value_range_t *vr1) |
0bca51f0 DN |
6120 | { |
6121 | if (vr0->type == VR_UNDEFINED) | |
6122 | { | |
227858d1 | 6123 | copy_value_range (vr0, vr1); |
0bca51f0 DN |
6124 | return; |
6125 | } | |
6126 | ||
6127 | if (vr1->type == VR_UNDEFINED) | |
6128 | { | |
6129 | /* Nothing to do. VR0 already has the resulting range. */ | |
6130 | return; | |
6131 | } | |
6132 | ||
6133 | if (vr0->type == VR_VARYING) | |
6134 | { | |
6135 | /* Nothing to do. VR0 already has the resulting range. */ | |
6136 | return; | |
6137 | } | |
6138 | ||
6139 | if (vr1->type == VR_VARYING) | |
0bca51f0 | 6140 | { |
b565d777 | 6141 | set_value_range_to_varying (vr0); |
0bca51f0 DN |
6142 | return; |
6143 | } | |
6144 | ||
6145 | if (vr0->type == VR_RANGE && vr1->type == VR_RANGE) | |
6146 | { | |
32c8bce7 DS |
6147 | int cmp; |
6148 | tree min, max; | |
6149 | ||
6150 | /* Compute the convex hull of the ranges. The lower limit of | |
6151 | the new range is the minimum of the two ranges. If they | |
6152 | cannot be compared, then give up. */ | |
6153 | cmp = compare_values (vr0->min, vr1->min); | |
6154 | if (cmp == 0 || cmp == 1) | |
6155 | min = vr1->min; | |
6156 | else if (cmp == -1) | |
6157 | min = vr0->min; | |
6158 | else | |
6159 | goto give_up; | |
6160 | ||
6161 | /* Similarly, the upper limit of the new range is the maximum | |
6162 | of the two ranges. If they cannot be compared, then | |
6163 | give up. */ | |
6164 | cmp = compare_values (vr0->max, vr1->max); | |
6165 | if (cmp == 0 || cmp == -1) | |
6166 | max = vr1->max; | |
6167 | else if (cmp == 1) | |
6168 | max = vr0->max; | |
6169 | else | |
6170 | goto give_up; | |
0bca51f0 | 6171 | |
8cf781f0 ILT |
6172 | /* Check for useless ranges. */ |
6173 | if (INTEGRAL_TYPE_P (TREE_TYPE (min)) | |
e1f28918 ILT |
6174 | && ((vrp_val_is_min (min) || is_overflow_infinity (min)) |
6175 | && (vrp_val_is_max (max) || is_overflow_infinity (max)))) | |
8cf781f0 ILT |
6176 | goto give_up; |
6177 | ||
32c8bce7 DS |
6178 | /* The resulting set of equivalences is the intersection of |
6179 | the two sets. */ | |
6180 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | |
6181 | bitmap_and_into (vr0->equiv, vr1->equiv); | |
6182 | else if (vr0->equiv && !vr1->equiv) | |
6183 | bitmap_clear (vr0->equiv); | |
227858d1 | 6184 | |
32c8bce7 | 6185 | set_value_range (vr0, vr0->type, min, max, vr0->equiv); |
0bca51f0 DN |
6186 | } |
6187 | else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) | |
6188 | { | |
32c8bce7 DS |
6189 | /* Two anti-ranges meet only if their complements intersect. |
6190 | Only handle the case of identical ranges. */ | |
0bca51f0 DN |
6191 | if (compare_values (vr0->min, vr1->min) == 0 |
6192 | && compare_values (vr0->max, vr1->max) == 0 | |
6193 | && compare_values (vr0->min, vr0->max) == 0) | |
227858d1 | 6194 | { |
8ab5f5c9 | 6195 | /* The resulting set of equivalences is the intersection of |
227858d1 DN |
6196 | the two sets. */ |
6197 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | |
6198 | bitmap_and_into (vr0->equiv, vr1->equiv); | |
880031e1 JL |
6199 | else if (vr0->equiv && !vr1->equiv) |
6200 | bitmap_clear (vr0->equiv); | |
227858d1 | 6201 | } |
0bca51f0 | 6202 | else |
32c8bce7 | 6203 | goto give_up; |
0bca51f0 DN |
6204 | } |
6205 | else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) | |
6206 | { | |
32c8bce7 DS |
6207 | /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4], |
6208 | only handle the case where the ranges have an empty intersection. | |
6209 | The result of the meet operation is the anti-range. */ | |
227858d1 DN |
6210 | if (!symbolic_range_p (vr0) |
6211 | && !symbolic_range_p (vr1) | |
6212 | && !value_ranges_intersect_p (vr0, vr1)) | |
0bca51f0 | 6213 | { |
2d33a2ef DN |
6214 | /* Copy most of VR1 into VR0. Don't copy VR1's equivalence |
6215 | set. We need to compute the intersection of the two | |
6216 | equivalence sets. */ | |
0bca51f0 | 6217 | if (vr1->type == VR_ANTI_RANGE) |
2d33a2ef | 6218 | set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv); |
880031e1 JL |
6219 | |
6220 | /* The resulting set of equivalences is the intersection of | |
6221 | the two sets. */ | |
6222 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | |
6223 | bitmap_and_into (vr0->equiv, vr1->equiv); | |
6224 | else if (vr0->equiv && !vr1->equiv) | |
6225 | bitmap_clear (vr0->equiv); | |
0bca51f0 DN |
6226 | } |
6227 | else | |
32c8bce7 | 6228 | goto give_up; |
0bca51f0 DN |
6229 | } |
6230 | else | |
6231 | gcc_unreachable (); | |
227858d1 DN |
6232 | |
6233 | return; | |
6234 | ||
32c8bce7 DS |
6235 | give_up: |
6236 | /* Failed to find an efficient meet. Before giving up and setting | |
6237 | the result to VARYING, see if we can at least derive a useful | |
6238 | anti-range. FIXME, all this nonsense about distinguishing | |
b19bb8b0 DN |
6239 | anti-ranges from ranges is necessary because of the odd |
6240 | semantics of range_includes_zero_p and friends. */ | |
227858d1 | 6241 | if (!symbolic_range_p (vr0) |
b19bb8b0 DN |
6242 | && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0)) |
6243 | || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0))) | |
227858d1 | 6244 | && !symbolic_range_p (vr1) |
b19bb8b0 DN |
6245 | && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1)) |
6246 | || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1)))) | |
e82d7e60 DN |
6247 | { |
6248 | set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min)); | |
6249 | ||
6250 | /* Since this meet operation did not result from the meeting of | |
6251 | two equivalent names, VR0 cannot have any equivalences. */ | |
6252 | if (vr0->equiv) | |
6253 | bitmap_clear (vr0->equiv); | |
6254 | } | |
227858d1 DN |
6255 | else |
6256 | set_value_range_to_varying (vr0); | |
0bca51f0 DN |
6257 | } |
6258 | ||
227858d1 | 6259 | |
0bca51f0 DN |
6260 | /* Visit all arguments for PHI node PHI that flow through executable |
6261 | edges. If a valid value range can be derived from all the incoming | |
6262 | value ranges, set a new range for the LHS of PHI. */ | |
6263 | ||
6264 | static enum ssa_prop_result | |
726a989a | 6265 | vrp_visit_phi_node (gimple phi) |
0bca51f0 | 6266 | { |
726a989a | 6267 | size_t i; |
0bca51f0 | 6268 | tree lhs = PHI_RESULT (phi); |
227858d1 DN |
6269 | value_range_t *lhs_vr = get_value_range (lhs); |
6270 | value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; | |
fc6827fe | 6271 | int edges, old_edges; |
227858d1 DN |
6272 | |
6273 | copy_value_range (&vr_result, lhs_vr); | |
0bca51f0 DN |
6274 | |
6275 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6276 | { | |
6277 | fprintf (dump_file, "\nVisiting PHI node: "); | |
726a989a | 6278 | print_gimple_stmt (dump_file, phi, 0, dump_flags); |
0bca51f0 DN |
6279 | } |
6280 | ||
fc6827fe | 6281 | edges = 0; |
726a989a | 6282 | for (i = 0; i < gimple_phi_num_args (phi); i++) |
0bca51f0 | 6283 | { |
726a989a | 6284 | edge e = gimple_phi_arg_edge (phi, i); |
0bca51f0 DN |
6285 | |
6286 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6287 | { | |
6288 | fprintf (dump_file, | |
6289 | "\n Argument #%d (%d -> %d %sexecutable)\n", | |
726a989a | 6290 | (int) i, e->src->index, e->dest->index, |
0bca51f0 DN |
6291 | (e->flags & EDGE_EXECUTABLE) ? "" : "not "); |
6292 | } | |
6293 | ||
6294 | if (e->flags & EDGE_EXECUTABLE) | |
6295 | { | |
6296 | tree arg = PHI_ARG_DEF (phi, i); | |
227858d1 | 6297 | value_range_t vr_arg; |
0bca51f0 | 6298 | |
fc6827fe ILT |
6299 | ++edges; |
6300 | ||
0bca51f0 | 6301 | if (TREE_CODE (arg) == SSA_NAME) |
31ab1cc9 RG |
6302 | { |
6303 | vr_arg = *(get_value_range (arg)); | |
31ab1cc9 | 6304 | } |
0bca51f0 DN |
6305 | else |
6306 | { | |
8cf781f0 ILT |
6307 | if (is_overflow_infinity (arg)) |
6308 | { | |
6309 | arg = copy_node (arg); | |
6310 | TREE_OVERFLOW (arg) = 0; | |
6311 | } | |
6312 | ||
0bca51f0 DN |
6313 | vr_arg.type = VR_RANGE; |
6314 | vr_arg.min = arg; | |
6315 | vr_arg.max = arg; | |
227858d1 | 6316 | vr_arg.equiv = NULL; |
0bca51f0 DN |
6317 | } |
6318 | ||
6319 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6320 | { | |
6321 | fprintf (dump_file, "\t"); | |
6322 | print_generic_expr (dump_file, arg, dump_flags); | |
6323 | fprintf (dump_file, "\n\tValue: "); | |
6324 | dump_value_range (dump_file, &vr_arg); | |
6325 | fprintf (dump_file, "\n"); | |
6326 | } | |
6327 | ||
6328 | vrp_meet (&vr_result, &vr_arg); | |
6329 | ||
6330 | if (vr_result.type == VR_VARYING) | |
6331 | break; | |
6332 | } | |
6333 | } | |
6334 | ||
6335 | if (vr_result.type == VR_VARYING) | |
227858d1 | 6336 | goto varying; |
0bca51f0 | 6337 | |
fc6827fe ILT |
6338 | old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; |
6339 | vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; | |
6340 | ||
0bca51f0 DN |
6341 | /* To prevent infinite iterations in the algorithm, derive ranges |
6342 | when the new value is slightly bigger or smaller than the | |
fc6827fe ILT |
6343 | previous one. We don't do this if we have seen a new executable |
6344 | edge; this helps us avoid an overflow infinity for conditionals | |
6345 | which are not in a loop. */ | |
31ab1cc9 | 6346 | if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE |
fc6827fe | 6347 | && edges <= old_edges) |
0bca51f0 DN |
6348 | { |
6349 | if (!POINTER_TYPE_P (TREE_TYPE (lhs))) | |
6350 | { | |
6351 | int cmp_min = compare_values (lhs_vr->min, vr_result.min); | |
6352 | int cmp_max = compare_values (lhs_vr->max, vr_result.max); | |
6353 | ||
6354 | /* If the new minimum is smaller or larger than the previous | |
6355 | one, go all the way to -INF. In the first case, to avoid | |
6356 | iterating millions of times to reach -INF, and in the | |
6357 | other case to avoid infinite bouncing between different | |
6358 | minimums. */ | |
6359 | if (cmp_min > 0 || cmp_min < 0) | |
12df8a7e | 6360 | { |
02511194 PB |
6361 | /* If we will end up with a (-INF, +INF) range, set it to |
6362 | VARYING. Same if the previous max value was invalid for | |
6363 | the type and we'd end up with vr_result.min > vr_result.max. */ | |
6364 | if (vrp_val_is_max (vr_result.max) | |
6365 | || compare_values (TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)), | |
6366 | vr_result.max) > 0) | |
12df8a7e ILT |
6367 | goto varying; |
6368 | ||
9a46cc16 ILT |
6369 | if (!needs_overflow_infinity (TREE_TYPE (vr_result.min)) |
6370 | || !vrp_var_may_overflow (lhs, phi)) | |
12df8a7e ILT |
6371 | vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)); |
6372 | else if (supports_overflow_infinity (TREE_TYPE (vr_result.min))) | |
6373 | vr_result.min = | |
6374 | negative_overflow_infinity (TREE_TYPE (vr_result.min)); | |
6375 | else | |
6376 | goto varying; | |
6377 | } | |
0bca51f0 DN |
6378 | |
6379 | /* Similarly, if the new maximum is smaller or larger than | |
6380 | the previous one, go all the way to +INF. */ | |
6381 | if (cmp_max < 0 || cmp_max > 0) | |
12df8a7e | 6382 | { |
02511194 PB |
6383 | /* If we will end up with a (-INF, +INF) range, set it to |
6384 | VARYING. Same if the previous min value was invalid for | |
6385 | the type and we'd end up with vr_result.max < vr_result.min. */ | |
6386 | if (vrp_val_is_min (vr_result.min) | |
6387 | || compare_values (TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)), | |
6388 | vr_result.min) < 0) | |
12df8a7e ILT |
6389 | goto varying; |
6390 | ||
9a46cc16 ILT |
6391 | if (!needs_overflow_infinity (TREE_TYPE (vr_result.max)) |
6392 | || !vrp_var_may_overflow (lhs, phi)) | |
12df8a7e ILT |
6393 | vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)); |
6394 | else if (supports_overflow_infinity (TREE_TYPE (vr_result.max))) | |
6395 | vr_result.max = | |
6396 | positive_overflow_infinity (TREE_TYPE (vr_result.max)); | |
6397 | else | |
6398 | goto varying; | |
6399 | } | |
0bca51f0 DN |
6400 | } |
6401 | } | |
6402 | ||
6403 | /* If the new range is different than the previous value, keep | |
6404 | iterating. */ | |
227858d1 | 6405 | if (update_value_range (lhs, &vr_result)) |
0bca51f0 DN |
6406 | return SSA_PROP_INTERESTING; |
6407 | ||
6408 | /* Nothing changed, don't add outgoing edges. */ | |
6409 | return SSA_PROP_NOT_INTERESTING; | |
227858d1 DN |
6410 | |
6411 | /* No match found. Set the LHS to VARYING. */ | |
6412 | varying: | |
6413 | set_value_range_to_varying (lhs_vr); | |
6414 | return SSA_PROP_VARYING; | |
0bca51f0 DN |
6415 | } |
6416 | ||
30821654 PB |
6417 | /* Simplify boolean operations if the source is known |
6418 | to be already a boolean. */ | |
6419 | static bool | |
6420 | simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
6421 | { | |
6422 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
6423 | tree val = NULL; | |
6424 | tree op0, op1; | |
6425 | value_range_t *vr; | |
6426 | bool sop = false; | |
6427 | bool need_conversion; | |
6428 | ||
6429 | op0 = gimple_assign_rhs1 (stmt); | |
30821654 PB |
6430 | if (TYPE_PRECISION (TREE_TYPE (op0)) != 1) |
6431 | { | |
61b70fcb JJ |
6432 | if (TREE_CODE (op0) != SSA_NAME) |
6433 | return false; | |
6434 | vr = get_value_range (op0); | |
6435 | ||
30821654 PB |
6436 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); |
6437 | if (!val || !integer_onep (val)) | |
6438 | return false; | |
6439 | ||
6440 | val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); | |
6441 | if (!val || !integer_onep (val)) | |
6442 | return false; | |
6443 | } | |
6444 | ||
6445 | if (rhs_code == TRUTH_NOT_EXPR) | |
6446 | { | |
6447 | rhs_code = NE_EXPR; | |
29442589 | 6448 | op1 = build_int_cst (TREE_TYPE (op0), 1); |
30821654 | 6449 | } |
30821654 PB |
6450 | else |
6451 | { | |
6452 | op1 = gimple_assign_rhs2 (stmt); | |
6453 | ||
6454 | /* Reduce number of cases to handle. */ | |
6455 | if (is_gimple_min_invariant (op1)) | |
6456 | { | |
6457 | /* Exclude anything that should have been already folded. */ | |
61b70fcb JJ |
6458 | if (rhs_code != EQ_EXPR |
6459 | && rhs_code != NE_EXPR | |
6460 | && rhs_code != TRUTH_XOR_EXPR) | |
6461 | return false; | |
6462 | ||
6463 | if (!integer_zerop (op1) | |
6464 | && !integer_onep (op1) | |
6465 | && !integer_all_onesp (op1)) | |
6466 | return false; | |
30821654 PB |
6467 | |
6468 | /* Limit the number of cases we have to consider. */ | |
6469 | if (rhs_code == EQ_EXPR) | |
6470 | { | |
6471 | rhs_code = NE_EXPR; | |
6472 | op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1); | |
6473 | } | |
6474 | } | |
6475 | else | |
6476 | { | |
6477 | /* Punt on A == B as there is no BIT_XNOR_EXPR. */ | |
6478 | if (rhs_code == EQ_EXPR) | |
6479 | return false; | |
6480 | ||
6481 | if (TYPE_PRECISION (TREE_TYPE (op1)) != 1) | |
6482 | { | |
6483 | vr = get_value_range (op1); | |
6484 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); | |
6485 | if (!val || !integer_onep (val)) | |
6486 | return false; | |
6487 | ||
6488 | val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); | |
6489 | if (!val || !integer_onep (val)) | |
6490 | return false; | |
6491 | } | |
6492 | } | |
6493 | } | |
6494 | ||
6495 | if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
6496 | { | |
6497 | location_t location; | |
6498 | ||
6499 | if (!gimple_has_location (stmt)) | |
6500 | location = input_location; | |
6501 | else | |
6502 | location = gimple_location (stmt); | |
6503 | ||
6504 | if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR) | |
6505 | warning_at (location, OPT_Wstrict_overflow, | |
6506 | _("assuming signed overflow does not occur when " | |
6507 | "simplifying && or || to & or |")); | |
6508 | else | |
6509 | warning_at (location, OPT_Wstrict_overflow, | |
6510 | _("assuming signed overflow does not occur when " | |
6511 | "simplifying ==, != or ! to identity or ^")); | |
6512 | } | |
6513 | ||
6514 | need_conversion = | |
6515 | !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)), | |
6516 | TREE_TYPE (op0)); | |
6517 | ||
e61451e8 RG |
6518 | /* Make sure to not sign-extend -1 as a boolean value. */ |
6519 | if (need_conversion | |
6520 | && !TYPE_UNSIGNED (TREE_TYPE (op0)) | |
6521 | && TYPE_PRECISION (TREE_TYPE (op0)) == 1) | |
6522 | return false; | |
6523 | ||
30821654 PB |
6524 | switch (rhs_code) |
6525 | { | |
6526 | case TRUTH_AND_EXPR: | |
6527 | rhs_code = BIT_AND_EXPR; | |
6528 | break; | |
6529 | case TRUTH_OR_EXPR: | |
6530 | rhs_code = BIT_IOR_EXPR; | |
6531 | break; | |
6532 | case TRUTH_XOR_EXPR: | |
6533 | case NE_EXPR: | |
6534 | if (integer_zerop (op1)) | |
6535 | { | |
6536 | gimple_assign_set_rhs_with_ops (gsi, | |
6537 | need_conversion ? NOP_EXPR : SSA_NAME, | |
6538 | op0, NULL); | |
6539 | update_stmt (gsi_stmt (*gsi)); | |
6540 | return true; | |
6541 | } | |
6542 | ||
6543 | rhs_code = BIT_XOR_EXPR; | |
6544 | break; | |
6545 | default: | |
6546 | gcc_unreachable (); | |
6547 | } | |
6548 | ||
6549 | if (need_conversion) | |
6550 | return false; | |
6551 | ||
6552 | gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1); | |
6553 | update_stmt (gsi_stmt (*gsi)); | |
6554 | return true; | |
6555 | } | |
6556 | ||
1a557723 JL |
6557 | /* Simplify a division or modulo operator to a right shift or |
6558 | bitwise and if the first operand is unsigned or is greater | |
6559 | than zero and the second operand is an exact power of two. */ | |
a513fe88 | 6560 | |
30821654 | 6561 | static bool |
726a989a | 6562 | simplify_div_or_mod_using_ranges (gimple stmt) |
a513fe88 | 6563 | { |
726a989a | 6564 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
1a557723 | 6565 | tree val = NULL; |
726a989a RB |
6566 | tree op0 = gimple_assign_rhs1 (stmt); |
6567 | tree op1 = gimple_assign_rhs2 (stmt); | |
6568 | value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt)); | |
a513fe88 | 6569 | |
726a989a | 6570 | if (TYPE_UNSIGNED (TREE_TYPE (op0))) |
1a557723 JL |
6571 | { |
6572 | val = integer_one_node; | |
6573 | } | |
6574 | else | |
6575 | { | |
12df8a7e ILT |
6576 | bool sop = false; |
6577 | ||
737b0891 | 6578 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); |
0c948c27 ILT |
6579 | |
6580 | if (val | |
6581 | && sop | |
6582 | && integer_onep (val) | |
6583 | && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
6584 | { | |
726a989a | 6585 | location_t location; |
0c948c27 | 6586 | |
726a989a RB |
6587 | if (!gimple_has_location (stmt)) |
6588 | location = input_location; | |
0c948c27 | 6589 | else |
726a989a | 6590 | location = gimple_location (stmt); |
0c948c27 ILT |
6591 | warning (OPT_Wstrict_overflow, |
6592 | ("%Hassuming signed overflow does not occur when " | |
6593 | "simplifying / or %% to >> or &"), | |
726a989a | 6594 | &location); |
0c948c27 | 6595 | } |
1a557723 JL |
6596 | } |
6597 | ||
6598 | if (val && integer_onep (val)) | |
a513fe88 | 6599 | { |
1a557723 | 6600 | tree t; |
a513fe88 | 6601 | |
1a557723 JL |
6602 | if (rhs_code == TRUNC_DIV_EXPR) |
6603 | { | |
6604 | t = build_int_cst (NULL_TREE, tree_log2 (op1)); | |
726a989a RB |
6605 | gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); |
6606 | gimple_assign_set_rhs1 (stmt, op0); | |
6607 | gimple_assign_set_rhs2 (stmt, t); | |
1a557723 JL |
6608 | } |
6609 | else | |
a513fe88 | 6610 | { |
1a557723 JL |
6611 | t = build_int_cst (TREE_TYPE (op1), 1); |
6612 | t = int_const_binop (MINUS_EXPR, op1, t, 0); | |
6613 | t = fold_convert (TREE_TYPE (op0), t); | |
726a989a RB |
6614 | |
6615 | gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); | |
6616 | gimple_assign_set_rhs1 (stmt, op0); | |
6617 | gimple_assign_set_rhs2 (stmt, t); | |
1a557723 JL |
6618 | } |
6619 | ||
1a557723 | 6620 | update_stmt (stmt); |
30821654 | 6621 | return true; |
1a557723 | 6622 | } |
30821654 PB |
6623 | |
6624 | return false; | |
1a557723 | 6625 | } |
a513fe88 | 6626 | |
1a557723 JL |
6627 | /* If the operand to an ABS_EXPR is >= 0, then eliminate the |
6628 | ABS_EXPR. If the operand is <= 0, then simplify the | |
6629 | ABS_EXPR into a NEGATE_EXPR. */ | |
6630 | ||
30821654 | 6631 | static bool |
726a989a | 6632 | simplify_abs_using_ranges (gimple stmt) |
1a557723 JL |
6633 | { |
6634 | tree val = NULL; | |
726a989a | 6635 | tree op = gimple_assign_rhs1 (stmt); |
1a557723 | 6636 | tree type = TREE_TYPE (op); |
726a989a | 6637 | value_range_t *vr = get_value_range (op); |
1a557723 JL |
6638 | |
6639 | if (TYPE_UNSIGNED (type)) | |
6640 | { | |
6641 | val = integer_zero_node; | |
6642 | } | |
6643 | else if (vr) | |
6644 | { | |
12df8a7e ILT |
6645 | bool sop = false; |
6646 | ||
6647 | val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); | |
1a557723 JL |
6648 | if (!val) |
6649 | { | |
12df8a7e ILT |
6650 | sop = false; |
6651 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, | |
6652 | &sop); | |
1a557723 JL |
6653 | |
6654 | if (val) | |
a513fe88 | 6655 | { |
1a557723 JL |
6656 | if (integer_zerop (val)) |
6657 | val = integer_one_node; | |
6658 | else if (integer_onep (val)) | |
6659 | val = integer_zero_node; | |
6660 | } | |
6661 | } | |
a513fe88 | 6662 | |
1a557723 JL |
6663 | if (val |
6664 | && (integer_onep (val) || integer_zerop (val))) | |
6665 | { | |
0c948c27 ILT |
6666 | if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) |
6667 | { | |
726a989a | 6668 | location_t location; |
0c948c27 | 6669 | |
726a989a RB |
6670 | if (!gimple_has_location (stmt)) |
6671 | location = input_location; | |
0c948c27 | 6672 | else |
726a989a | 6673 | location = gimple_location (stmt); |
0c948c27 ILT |
6674 | warning (OPT_Wstrict_overflow, |
6675 | ("%Hassuming signed overflow does not occur when " | |
6676 | "simplifying abs (X) to X or -X"), | |
726a989a | 6677 | &location); |
0c948c27 ILT |
6678 | } |
6679 | ||
726a989a | 6680 | gimple_assign_set_rhs1 (stmt, op); |
1a557723 | 6681 | if (integer_onep (val)) |
726a989a | 6682 | gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); |
1a557723 | 6683 | else |
726a989a | 6684 | gimple_assign_set_rhs_code (stmt, SSA_NAME); |
1a557723 | 6685 | update_stmt (stmt); |
30821654 | 6686 | return true; |
1a557723 JL |
6687 | } |
6688 | } | |
30821654 PB |
6689 | |
6690 | return false; | |
1a557723 JL |
6691 | } |
6692 | ||
d579f20b JL |
6693 | /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has |
6694 | a known value range VR. | |
6695 | ||
6696 | If there is one and only one value which will satisfy the | |
6697 | conditional, then return that value. Else return NULL. */ | |
6698 | ||
6699 | static tree | |
6700 | test_for_singularity (enum tree_code cond_code, tree op0, | |
6701 | tree op1, value_range_t *vr) | |
6702 | { | |
6703 | tree min = NULL; | |
6704 | tree max = NULL; | |
6705 | ||
6706 | /* Extract minimum/maximum values which satisfy the | |
6707 | the conditional as it was written. */ | |
6708 | if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
6709 | { | |
12df8a7e ILT |
6710 | /* This should not be negative infinity; there is no overflow |
6711 | here. */ | |
d579f20b JL |
6712 | min = TYPE_MIN_VALUE (TREE_TYPE (op0)); |
6713 | ||
6714 | max = op1; | |
12df8a7e | 6715 | if (cond_code == LT_EXPR && !is_overflow_infinity (max)) |
d579f20b JL |
6716 | { |
6717 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
a5ad7269 | 6718 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); |
3fe5bcaf ILT |
6719 | if (EXPR_P (max)) |
6720 | TREE_NO_WARNING (max) = 1; | |
d579f20b JL |
6721 | } |
6722 | } | |
6723 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) | |
6724 | { | |
12df8a7e ILT |
6725 | /* This should not be positive infinity; there is no overflow |
6726 | here. */ | |
d579f20b JL |
6727 | max = TYPE_MAX_VALUE (TREE_TYPE (op0)); |
6728 | ||
6729 | min = op1; | |
12df8a7e | 6730 | if (cond_code == GT_EXPR && !is_overflow_infinity (min)) |
d579f20b JL |
6731 | { |
6732 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
f9fe7aed | 6733 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); |
3fe5bcaf ILT |
6734 | if (EXPR_P (min)) |
6735 | TREE_NO_WARNING (min) = 1; | |
d579f20b JL |
6736 | } |
6737 | } | |
6738 | ||
6739 | /* Now refine the minimum and maximum values using any | |
6740 | value range information we have for op0. */ | |
6741 | if (min && max) | |
6742 | { | |
6743 | if (compare_values (vr->min, min) == -1) | |
6744 | min = min; | |
6745 | else | |
6746 | min = vr->min; | |
6747 | if (compare_values (vr->max, max) == 1) | |
6748 | max = max; | |
6749 | else | |
6750 | max = vr->max; | |
6751 | ||
f9fe7aed JL |
6752 | /* If the new min/max values have converged to a single value, |
6753 | then there is only one value which can satisfy the condition, | |
6754 | return that value. */ | |
6755 | if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) | |
d579f20b JL |
6756 | return min; |
6757 | } | |
6758 | return NULL; | |
6759 | } | |
6760 | ||
1a557723 JL |
6761 | /* Simplify a conditional using a relational operator to an equality |
6762 | test if the range information indicates only one value can satisfy | |
6763 | the original conditional. */ | |
6764 | ||
30821654 | 6765 | static bool |
726a989a | 6766 | simplify_cond_using_ranges (gimple stmt) |
1a557723 | 6767 | { |
726a989a RB |
6768 | tree op0 = gimple_cond_lhs (stmt); |
6769 | tree op1 = gimple_cond_rhs (stmt); | |
6770 | enum tree_code cond_code = gimple_cond_code (stmt); | |
1a557723 JL |
6771 | |
6772 | if (cond_code != NE_EXPR | |
6773 | && cond_code != EQ_EXPR | |
6774 | && TREE_CODE (op0) == SSA_NAME | |
6775 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
6776 | && is_gimple_min_invariant (op1)) | |
6777 | { | |
6778 | value_range_t *vr = get_value_range (op0); | |
6779 | ||
6780 | /* If we have range information for OP0, then we might be | |
6781 | able to simplify this conditional. */ | |
6782 | if (vr->type == VR_RANGE) | |
6783 | { | |
82d6e6fc | 6784 | tree new_tree = test_for_singularity (cond_code, op0, op1, vr); |
1a557723 | 6785 | |
82d6e6fc | 6786 | if (new_tree) |
1a557723 | 6787 | { |
d579f20b | 6788 | if (dump_file) |
1a557723 | 6789 | { |
d579f20b | 6790 | fprintf (dump_file, "Simplified relational "); |
726a989a | 6791 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b | 6792 | fprintf (dump_file, " into "); |
a513fe88 JL |
6793 | } |
6794 | ||
726a989a RB |
6795 | gimple_cond_set_code (stmt, EQ_EXPR); |
6796 | gimple_cond_set_lhs (stmt, op0); | |
82d6e6fc | 6797 | gimple_cond_set_rhs (stmt, new_tree); |
726a989a | 6798 | |
d579f20b JL |
6799 | update_stmt (stmt); |
6800 | ||
6801 | if (dump_file) | |
a513fe88 | 6802 | { |
726a989a | 6803 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b | 6804 | fprintf (dump_file, "\n"); |
a513fe88 | 6805 | } |
d579f20b | 6806 | |
30821654 | 6807 | return true; |
a513fe88 JL |
6808 | } |
6809 | ||
d579f20b JL |
6810 | /* Try again after inverting the condition. We only deal |
6811 | with integral types here, so no need to worry about | |
6812 | issues with inverting FP comparisons. */ | |
6813 | cond_code = invert_tree_comparison (cond_code, false); | |
82d6e6fc | 6814 | new_tree = test_for_singularity (cond_code, op0, op1, vr); |
d579f20b | 6815 | |
82d6e6fc | 6816 | if (new_tree) |
1a557723 | 6817 | { |
d579f20b | 6818 | if (dump_file) |
1a557723 | 6819 | { |
d579f20b | 6820 | fprintf (dump_file, "Simplified relational "); |
726a989a | 6821 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b | 6822 | fprintf (dump_file, " into "); |
1a557723 | 6823 | } |
d579f20b | 6824 | |
726a989a RB |
6825 | gimple_cond_set_code (stmt, NE_EXPR); |
6826 | gimple_cond_set_lhs (stmt, op0); | |
82d6e6fc | 6827 | gimple_cond_set_rhs (stmt, new_tree); |
726a989a | 6828 | |
d579f20b JL |
6829 | update_stmt (stmt); |
6830 | ||
6831 | if (dump_file) | |
6832 | { | |
726a989a | 6833 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b JL |
6834 | fprintf (dump_file, "\n"); |
6835 | } | |
d579f20b | 6836 | |
30821654 | 6837 | return true; |
1a557723 | 6838 | } |
a513fe88 JL |
6839 | } |
6840 | } | |
30821654 PB |
6841 | |
6842 | return false; | |
a513fe88 JL |
6843 | } |
6844 | ||
b7814a18 RG |
6845 | /* Simplify a switch statement using the value range of the switch |
6846 | argument. */ | |
6847 | ||
30821654 | 6848 | static bool |
726a989a | 6849 | simplify_switch_using_ranges (gimple stmt) |
b7814a18 | 6850 | { |
726a989a | 6851 | tree op = gimple_switch_index (stmt); |
b7814a18 RG |
6852 | value_range_t *vr; |
6853 | bool take_default; | |
6854 | edge e; | |
6855 | edge_iterator ei; | |
6856 | size_t i = 0, j = 0, n, n2; | |
726a989a | 6857 | tree vec2; |
b7814a18 RG |
6858 | switch_update su; |
6859 | ||
92ef7fb1 MLI |
6860 | if (TREE_CODE (op) == SSA_NAME) |
6861 | { | |
6862 | vr = get_value_range (op); | |
b7814a18 | 6863 | |
92ef7fb1 MLI |
6864 | /* We can only handle integer ranges. */ |
6865 | if (vr->type != VR_RANGE | |
6866 | || symbolic_range_p (vr)) | |
6867 | return false; | |
b7814a18 | 6868 | |
92ef7fb1 MLI |
6869 | /* Find case label for min/max of the value range. */ |
6870 | take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j); | |
6871 | } | |
6872 | else if (TREE_CODE (op) == INTEGER_CST) | |
6873 | { | |
6874 | take_default = !find_case_label_index (stmt, 1, op, &i); | |
6875 | if (take_default) | |
6876 | { | |
6877 | i = 1; | |
6878 | j = 0; | |
6879 | } | |
6880 | else | |
6881 | { | |
6882 | j = i; | |
6883 | } | |
6884 | } | |
6885 | else | |
30821654 | 6886 | return false; |
b7814a18 | 6887 | |
726a989a | 6888 | n = gimple_switch_num_labels (stmt); |
b7814a18 RG |
6889 | |
6890 | /* Bail out if this is just all edges taken. */ | |
726a989a RB |
6891 | if (i == 1 |
6892 | && j == n - 1 | |
b7814a18 | 6893 | && take_default) |
30821654 | 6894 | return false; |
b7814a18 RG |
6895 | |
6896 | /* Build a new vector of taken case labels. */ | |
6897 | vec2 = make_tree_vec (j - i + 1 + (int)take_default); | |
726a989a | 6898 | n2 = 0; |
b7814a18 RG |
6899 | |
6900 | /* Add the default edge, if necessary. */ | |
6901 | if (take_default) | |
726a989a RB |
6902 | TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); |
6903 | ||
6904 | for (; i <= j; ++i, ++n2) | |
6905 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); | |
b7814a18 RG |
6906 | |
6907 | /* Mark needed edges. */ | |
6908 | for (i = 0; i < n2; ++i) | |
6909 | { | |
726a989a | 6910 | e = find_edge (gimple_bb (stmt), |
b7814a18 RG |
6911 | label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); |
6912 | e->aux = (void *)-1; | |
6913 | } | |
6914 | ||
6915 | /* Queue not needed edges for later removal. */ | |
726a989a | 6916 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
b7814a18 RG |
6917 | { |
6918 | if (e->aux == (void *)-1) | |
6919 | { | |
6920 | e->aux = NULL; | |
6921 | continue; | |
6922 | } | |
6923 | ||
6924 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6925 | { | |
6926 | fprintf (dump_file, "removing unreachable case label\n"); | |
6927 | } | |
6928 | VEC_safe_push (edge, heap, to_remove_edges, e); | |
6929 | } | |
6930 | ||
6931 | /* And queue an update for the stmt. */ | |
6932 | su.stmt = stmt; | |
6933 | su.vec = vec2; | |
6934 | VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su); | |
30821654 | 6935 | return false; |
b7814a18 RG |
6936 | } |
6937 | ||
1a557723 JL |
6938 | /* Simplify STMT using ranges if possible. */ |
6939 | ||
30821654 PB |
6940 | bool |
6941 | simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) | |
1a557723 | 6942 | { |
30821654 | 6943 | gimple stmt = gsi_stmt (*gsi); |
726a989a | 6944 | if (is_gimple_assign (stmt)) |
1a557723 | 6945 | { |
726a989a | 6946 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
1a557723 | 6947 | |
30821654 PB |
6948 | switch (rhs_code) |
6949 | { | |
6950 | case EQ_EXPR: | |
6951 | case NE_EXPR: | |
6952 | case TRUTH_NOT_EXPR: | |
6953 | case TRUTH_AND_EXPR: | |
6954 | case TRUTH_OR_EXPR: | |
6955 | case TRUTH_XOR_EXPR: | |
6956 | /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR | |
6957 | or identity if the RHS is zero or one, and the LHS are known | |
6958 | to be boolean values. Transform all TRUTH_*_EXPR into | |
6959 | BIT_*_EXPR if both arguments are known to be boolean values. */ | |
6960 | if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) | |
6961 | return simplify_truth_ops_using_ranges (gsi, stmt); | |
6962 | break; | |
6963 | ||
1a557723 JL |
6964 | /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR |
6965 | and BIT_AND_EXPR respectively if the first operand is greater | |
6966 | than zero and the second operand is an exact power of two. */ | |
30821654 PB |
6967 | case TRUNC_DIV_EXPR: |
6968 | case TRUNC_MOD_EXPR: | |
6969 | if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))) | |
6970 | && integer_pow2p (gimple_assign_rhs2 (stmt))) | |
6971 | return simplify_div_or_mod_using_ranges (stmt); | |
6972 | break; | |
1a557723 JL |
6973 | |
6974 | /* Transform ABS (X) into X or -X as appropriate. */ | |
30821654 PB |
6975 | case ABS_EXPR: |
6976 | if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME | |
6977 | && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) | |
6978 | return simplify_abs_using_ranges (stmt); | |
6979 | break; | |
6980 | ||
6981 | default: | |
6982 | break; | |
6983 | } | |
1a557723 | 6984 | } |
726a989a | 6985 | else if (gimple_code (stmt) == GIMPLE_COND) |
30821654 | 6986 | return simplify_cond_using_ranges (stmt); |
726a989a | 6987 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
30821654 PB |
6988 | return simplify_switch_using_ranges (stmt); |
6989 | ||
6990 | return false; | |
1a557723 JL |
6991 | } |
6992 | ||
2090d6a0 JL |
6993 | /* Stack of dest,src equivalency pairs that need to be restored after |
6994 | each attempt to thread a block's incoming edge to an outgoing edge. | |
6995 | ||
6996 | A NULL entry is used to mark the end of pairs which need to be | |
6997 | restored. */ | |
6998 | static VEC(tree,heap) *stack; | |
6999 | ||
0c948c27 ILT |
7000 | /* A trivial wrapper so that we can present the generic jump threading |
7001 | code with a simple API for simplifying statements. STMT is the | |
7002 | statement we want to simplify, WITHIN_STMT provides the location | |
7003 | for any overflow warnings. */ | |
7004 | ||
2090d6a0 | 7005 | static tree |
726a989a | 7006 | simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) |
2090d6a0 JL |
7007 | { |
7008 | /* We only use VRP information to simplify conditionals. This is | |
7009 | overly conservative, but it's unclear if doing more would be | |
7010 | worth the compile time cost. */ | |
726a989a | 7011 | if (gimple_code (stmt) != GIMPLE_COND) |
2090d6a0 JL |
7012 | return NULL; |
7013 | ||
726a989a RB |
7014 | return vrp_evaluate_conditional (gimple_cond_code (stmt), |
7015 | gimple_cond_lhs (stmt), | |
7016 | gimple_cond_rhs (stmt), within_stmt); | |
2090d6a0 JL |
7017 | } |
7018 | ||
7019 | /* Blocks which have more than one predecessor and more than | |
fa10beec | 7020 | one successor present jump threading opportunities, i.e., |
2090d6a0 JL |
7021 | when the block is reached from a specific predecessor, we |
7022 | may be able to determine which of the outgoing edges will | |
7023 | be traversed. When this optimization applies, we are able | |
7024 | to avoid conditionals at runtime and we may expose secondary | |
7025 | optimization opportunities. | |
7026 | ||
7027 | This routine is effectively a driver for the generic jump | |
7028 | threading code. It basically just presents the generic code | |
7029 | with edges that may be suitable for jump threading. | |
7030 | ||
7031 | Unlike DOM, we do not iterate VRP if jump threading was successful. | |
7032 | While iterating may expose new opportunities for VRP, it is expected | |
7033 | those opportunities would be very limited and the compile time cost | |
7034 | to expose those opportunities would be significant. | |
7035 | ||
7036 | As jump threading opportunities are discovered, they are registered | |
7037 | for later realization. */ | |
7038 | ||
7039 | static void | |
7040 | identify_jump_threads (void) | |
7041 | { | |
7042 | basic_block bb; | |
726a989a | 7043 | gimple dummy; |
b7814a18 RG |
7044 | int i; |
7045 | edge e; | |
2090d6a0 JL |
7046 | |
7047 | /* Ugh. When substituting values earlier in this pass we can | |
7048 | wipe the dominance information. So rebuild the dominator | |
7049 | information as we need it within the jump threading code. */ | |
7050 | calculate_dominance_info (CDI_DOMINATORS); | |
7051 | ||
7052 | /* We do not allow VRP information to be used for jump threading | |
7053 | across a back edge in the CFG. Otherwise it becomes too | |
7054 | difficult to avoid eliminating loop exit tests. Of course | |
7055 | EDGE_DFS_BACK is not accurate at this time so we have to | |
7056 | recompute it. */ | |
7057 | mark_dfs_back_edges (); | |
7058 | ||
b7814a18 RG |
7059 | /* Do not thread across edges we are about to remove. Just marking |
7060 | them as EDGE_DFS_BACK will do. */ | |
7061 | for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i) | |
7062 | e->flags |= EDGE_DFS_BACK; | |
7063 | ||
2090d6a0 JL |
7064 | /* Allocate our unwinder stack to unwind any temporary equivalences |
7065 | that might be recorded. */ | |
7066 | stack = VEC_alloc (tree, heap, 20); | |
7067 | ||
7068 | /* To avoid lots of silly node creation, we create a single | |
7069 | conditional and just modify it in-place when attempting to | |
7070 | thread jumps. */ | |
726a989a RB |
7071 | dummy = gimple_build_cond (EQ_EXPR, |
7072 | integer_zero_node, integer_zero_node, | |
7073 | NULL, NULL); | |
2090d6a0 JL |
7074 | |
7075 | /* Walk through all the blocks finding those which present a | |
7076 | potential jump threading opportunity. We could set this up | |
7077 | as a dominator walker and record data during the walk, but | |
7078 | I doubt it's worth the effort for the classes of jump | |
7079 | threading opportunities we are trying to identify at this | |
7080 | point in compilation. */ | |
7081 | FOR_EACH_BB (bb) | |
7082 | { | |
726a989a | 7083 | gimple last; |
2090d6a0 JL |
7084 | |
7085 | /* If the generic jump threading code does not find this block | |
7086 | interesting, then there is nothing to do. */ | |
7087 | if (! potentially_threadable_block (bb)) | |
7088 | continue; | |
7089 | ||
7090 | /* We only care about blocks ending in a COND_EXPR. While there | |
7091 | may be some value in handling SWITCH_EXPR here, I doubt it's | |
7092 | terribly important. */ | |
726a989a RB |
7093 | last = gsi_stmt (gsi_last_bb (bb)); |
7094 | if (gimple_code (last) != GIMPLE_COND) | |
2090d6a0 JL |
7095 | continue; |
7096 | ||
7097 | /* We're basically looking for any kind of conditional with | |
7098 | integral type arguments. */ | |
726a989a RB |
7099 | if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME |
7100 | && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) | |
7101 | && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME | |
7102 | || is_gimple_min_invariant (gimple_cond_rhs (last))) | |
7103 | && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last)))) | |
2090d6a0 JL |
7104 | { |
7105 | edge_iterator ei; | |
2090d6a0 JL |
7106 | |
7107 | /* We've got a block with multiple predecessors and multiple | |
7108 | successors which also ends in a suitable conditional. For | |
7109 | each predecessor, see if we can thread it to a specific | |
7110 | successor. */ | |
7111 | FOR_EACH_EDGE (e, ei, bb->preds) | |
7112 | { | |
7113 | /* Do not thread across back edges or abnormal edges | |
7114 | in the CFG. */ | |
7115 | if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) | |
7116 | continue; | |
7117 | ||
726a989a | 7118 | thread_across_edge (dummy, e, true, &stack, |
2090d6a0 JL |
7119 | simplify_stmt_for_jump_threading); |
7120 | } | |
7121 | } | |
7122 | } | |
7123 | ||
7124 | /* We do not actually update the CFG or SSA graphs at this point as | |
7125 | ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet | |
7126 | handle ASSERT_EXPRs gracefully. */ | |
7127 | } | |
7128 | ||
7129 | /* We identified all the jump threading opportunities earlier, but could | |
7130 | not transform the CFG at that time. This routine transforms the | |
7131 | CFG and arranges for the dominator tree to be rebuilt if necessary. | |
7132 | ||
7133 | Note the SSA graph update will occur during the normal TODO | |
7134 | processing by the pass manager. */ | |
7135 | static void | |
7136 | finalize_jump_threads (void) | |
7137 | { | |
b02b9b53 | 7138 | thread_through_all_blocks (false); |
2090d6a0 JL |
7139 | VEC_free (tree, heap, stack); |
7140 | } | |
1a557723 | 7141 | |
0bca51f0 DN |
7142 | |
7143 | /* Traverse all the blocks folding conditionals with known ranges. */ | |
7144 | ||
7145 | static void | |
7146 | vrp_finalize (void) | |
7147 | { | |
227858d1 DN |
7148 | size_t i; |
7149 | prop_value_t *single_val_range; | |
7150 | bool do_value_subst_p; | |
0bca51f0 DN |
7151 | |
7152 | if (dump_file) | |
7153 | { | |
7154 | fprintf (dump_file, "\nValue ranges after VRP:\n\n"); | |
7155 | dump_all_value_ranges (dump_file); | |
7156 | fprintf (dump_file, "\n"); | |
7157 | } | |
7158 | ||
227858d1 DN |
7159 | /* We may have ended with ranges that have exactly one value. Those |
7160 | values can be substituted as any other copy/const propagated | |
7161 | value using substitute_and_fold. */ | |
b9eae1a9 | 7162 | single_val_range = XCNEWVEC (prop_value_t, num_ssa_names); |
227858d1 DN |
7163 | |
7164 | do_value_subst_p = false; | |
7165 | for (i = 0; i < num_ssa_names; i++) | |
7166 | if (vr_value[i] | |
7167 | && vr_value[i]->type == VR_RANGE | |
7168 | && vr_value[i]->min == vr_value[i]->max) | |
7169 | { | |
7170 | single_val_range[i].value = vr_value[i]->min; | |
7171 | do_value_subst_p = true; | |
7172 | } | |
7173 | ||
7174 | if (!do_value_subst_p) | |
0bca51f0 | 7175 | { |
227858d1 DN |
7176 | /* We found no single-valued ranges, don't waste time trying to |
7177 | do single value substitution in substitute_and_fold. */ | |
7178 | free (single_val_range); | |
7179 | single_val_range = NULL; | |
0bca51f0 DN |
7180 | } |
7181 | ||
227858d1 DN |
7182 | substitute_and_fold (single_val_range, true); |
7183 | ||
590b1f2d | 7184 | if (warn_array_bounds) |
62e5bf5d | 7185 | check_all_array_refs (); |
590b1f2d | 7186 | |
2090d6a0 JL |
7187 | /* We must identify jump threading opportunities before we release |
7188 | the datastructures built by VRP. */ | |
7189 | identify_jump_threads (); | |
7190 | ||
227858d1 DN |
7191 | /* Free allocated memory. */ |
7192 | for (i = 0; i < num_ssa_names; i++) | |
7193 | if (vr_value[i]) | |
7194 | { | |
7195 | BITMAP_FREE (vr_value[i]->equiv); | |
7196 | free (vr_value[i]); | |
7197 | } | |
7198 | ||
7199 | free (single_val_range); | |
7200 | free (vr_value); | |
fc6827fe | 7201 | free (vr_phi_edge_counts); |
b16caf72 JL |
7202 | |
7203 | /* So that we can distinguish between VRP data being available | |
7204 | and not available. */ | |
7205 | vr_value = NULL; | |
fc6827fe | 7206 | vr_phi_edge_counts = NULL; |
0bca51f0 DN |
7207 | } |
7208 | ||
7209 | ||
7210 | /* Main entry point to VRP (Value Range Propagation). This pass is | |
7211 | loosely based on J. R. C. Patterson, ``Accurate Static Branch | |
7212 | Prediction by Value Range Propagation,'' in SIGPLAN Conference on | |
7213 | Programming Language Design and Implementation, pp. 67-78, 1995. | |
7214 | Also available at http://citeseer.ist.psu.edu/patterson95accurate.html | |
7215 | ||
7216 | This is essentially an SSA-CCP pass modified to deal with ranges | |
7217 | instead of constants. | |
7218 | ||
227858d1 DN |
7219 | While propagating ranges, we may find that two or more SSA name |
7220 | have equivalent, though distinct ranges. For instance, | |
7221 | ||
7222 | 1 x_9 = p_3->a; | |
7223 | 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> | |
7224 | 3 if (p_4 == q_2) | |
7225 | 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; | |
7226 | 5 endif | |
7227 | 6 if (q_2) | |
7228 | ||
7229 | In the code above, pointer p_5 has range [q_2, q_2], but from the | |
7230 | code we can also determine that p_5 cannot be NULL and, if q_2 had | |
7231 | a non-varying range, p_5's range should also be compatible with it. | |
7232 | ||
8ab5f5c9 | 7233 | These equivalences are created by two expressions: ASSERT_EXPR and |
227858d1 DN |
7234 | copy operations. Since p_5 is an assertion on p_4, and p_4 was the |
7235 | result of another assertion, then we can use the fact that p_5 and | |
7236 | p_4 are equivalent when evaluating p_5's range. | |
7237 | ||
8ab5f5c9 | 7238 | Together with value ranges, we also propagate these equivalences |
227858d1 DN |
7239 | between names so that we can take advantage of information from |
7240 | multiple ranges when doing final replacement. Note that this | |
7241 | equivalency relation is transitive but not symmetric. | |
7242 | ||
7243 | In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we | |
7244 | cannot assert that q_2 is equivalent to p_5 because q_2 may be used | |
7245 | in contexts where that assertion does not hold (e.g., in line 6). | |
7246 | ||
0bca51f0 DN |
7247 | TODO, the main difference between this pass and Patterson's is that |
7248 | we do not propagate edge probabilities. We only compute whether | |
7249 | edges can be taken or not. That is, instead of having a spectrum | |
7250 | of jump probabilities between 0 and 1, we only deal with 0, 1 and | |
7251 | DON'T KNOW. In the future, it may be worthwhile to propagate | |
7252 | probabilities to aid branch prediction. */ | |
7253 | ||
c2924966 | 7254 | static unsigned int |
0bca51f0 DN |
7255 | execute_vrp (void) |
7256 | { | |
b7814a18 RG |
7257 | int i; |
7258 | edge e; | |
7259 | switch_update *su; | |
7260 | ||
b02b9b53 | 7261 | loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
d51157de ZD |
7262 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
7263 | scev_initialize (); | |
b02b9b53 ZD |
7264 | |
7265 | insert_range_assertions (); | |
0bca51f0 | 7266 | |
b7814a18 RG |
7267 | to_remove_edges = VEC_alloc (edge, heap, 10); |
7268 | to_update_switch_stmts = VEC_alloc (switch_update, heap, 5); | |
448ee662 | 7269 | threadedge_initialize_values (); |
b7814a18 | 7270 | |
227858d1 DN |
7271 | vrp_initialize (); |
7272 | ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); | |
7273 | vrp_finalize (); | |
0bca51f0 | 7274 | |
2090d6a0 JL |
7275 | /* ASSERT_EXPRs must be removed before finalizing jump threads |
7276 | as finalizing jump threads calls the CFG cleanup code which | |
7277 | does not properly handle ASSERT_EXPRs. */ | |
0bca51f0 | 7278 | remove_range_assertions (); |
59c02d8a JL |
7279 | |
7280 | /* If we exposed any new variables, go ahead and put them into | |
7281 | SSA form now, before we handle jump threading. This simplifies | |
7282 | interactions between rewriting of _DECL nodes into SSA form | |
7283 | and rewriting SSA_NAME nodes into SSA form after block | |
7284 | duplication and CFG manipulation. */ | |
7285 | update_ssa (TODO_update_ssa); | |
7286 | ||
2090d6a0 | 7287 | finalize_jump_threads (); |
0a4bf1d3 RG |
7288 | |
7289 | /* Remove dead edges from SWITCH_EXPR optimization. This leaves the | |
7290 | CFG in a broken state and requires a cfg_cleanup run. */ | |
7291 | for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i) | |
7292 | remove_edge (e); | |
7293 | /* Update SWITCH_EXPR case label vector. */ | |
7294 | for (i = 0; VEC_iterate (switch_update, to_update_switch_stmts, i, su); ++i) | |
726a989a RB |
7295 | { |
7296 | size_t j; | |
7297 | size_t n = TREE_VEC_LENGTH (su->vec); | |
256f88c6 | 7298 | tree label; |
726a989a RB |
7299 | gimple_switch_set_num_labels (su->stmt, n); |
7300 | for (j = 0; j < n; j++) | |
7301 | gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); | |
256f88c6 RG |
7302 | /* As we may have replaced the default label with a regular one |
7303 | make sure to make it a real default label again. This ensures | |
7304 | optimal expansion. */ | |
7305 | label = gimple_switch_default_label (su->stmt); | |
7306 | CASE_LOW (label) = NULL_TREE; | |
7307 | CASE_HIGH (label) = NULL_TREE; | |
726a989a | 7308 | } |
0a4bf1d3 RG |
7309 | |
7310 | if (VEC_length (edge, to_remove_edges) > 0) | |
82893cba | 7311 | free_dominance_info (CDI_DOMINATORS); |
0a4bf1d3 RG |
7312 | |
7313 | VEC_free (edge, heap, to_remove_edges); | |
7314 | VEC_free (switch_update, heap, to_update_switch_stmts); | |
448ee662 | 7315 | threadedge_finalize_values (); |
0a4bf1d3 | 7316 | |
d51157de ZD |
7317 | scev_finalize (); |
7318 | loop_optimizer_finalize (); | |
c2924966 | 7319 | return 0; |
0bca51f0 DN |
7320 | } |
7321 | ||
7322 | static bool | |
7323 | gate_vrp (void) | |
7324 | { | |
7325 | return flag_tree_vrp != 0; | |
7326 | } | |
7327 | ||
8ddbbcae | 7328 | struct gimple_opt_pass pass_vrp = |
0bca51f0 | 7329 | { |
8ddbbcae JH |
7330 | { |
7331 | GIMPLE_PASS, | |
0bca51f0 DN |
7332 | "vrp", /* name */ |
7333 | gate_vrp, /* gate */ | |
7334 | execute_vrp, /* execute */ | |
7335 | NULL, /* sub */ | |
7336 | NULL, /* next */ | |
7337 | 0, /* static_pass_number */ | |
7338 | TV_TREE_VRP, /* tv_id */ | |
4effdf02 | 7339 | PROP_ssa, /* properties_required */ |
0bca51f0 | 7340 | 0, /* properties_provided */ |
ae07b463 | 7341 | 0, /* properties_destroyed */ |
0bca51f0 DN |
7342 | 0, /* todo_flags_start */ |
7343 | TODO_cleanup_cfg | |
7344 | | TODO_ggc_collect | |
7345 | | TODO_verify_ssa | |
7346 | | TODO_dump_func | |
8ddbbcae JH |
7347 | | TODO_update_ssa /* todo_flags_finish */ |
7348 | } | |
0bca51f0 | 7349 | }; |