]>
Commit | Line | Data |
---|---|---|
88dbf20f | 1 | /* Support routines for Value Range Propagation (VRP). |
3aea1f79 | 2 | Copyright (C) 2005-2014 Free Software Foundation, Inc. |
88dbf20f | 3 | Contributed by Diego Novillo <dnovillo@redhat.com>. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
8c4c00c1 | 9 | the Free Software Foundation; either version 3, or (at your option) |
88dbf20f | 10 | any later version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
88dbf20f | 20 | |
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
24 | #include "tm.h" | |
88dbf20f | 25 | #include "flags.h" |
26 | #include "tree.h" | |
9ed99284 | 27 | #include "stor-layout.h" |
28 | #include "calls.h" | |
94ea8568 | 29 | #include "predict.h" |
30 | #include "vec.h" | |
31 | #include "hashtab.h" | |
32 | #include "hash-set.h" | |
33 | #include "machmode.h" | |
34 | #include "hard-reg-set.h" | |
35 | #include "input.h" | |
36 | #include "function.h" | |
37 | #include "dominance.h" | |
38 | #include "cfg.h" | |
39 | #include "cfganal.h" | |
88dbf20f | 40 | #include "basic-block.h" |
bc61cadb | 41 | #include "tree-ssa-alias.h" |
42 | #include "internal-fn.h" | |
43 | #include "gimple-fold.h" | |
44 | #include "tree-eh.h" | |
45 | #include "gimple-expr.h" | |
46 | #include "is-a.h" | |
073c1fd5 | 47 | #include "gimple.h" |
dcf1a1ec | 48 | #include "gimple-iterator.h" |
49 | #include "gimple-walk.h" | |
073c1fd5 | 50 | #include "gimple-ssa.h" |
51 | #include "tree-cfg.h" | |
52 | #include "tree-phinodes.h" | |
53 | #include "ssa-iterators.h" | |
9ed99284 | 54 | #include "stringpool.h" |
073c1fd5 | 55 | #include "tree-ssanames.h" |
05d9c18a | 56 | #include "tree-ssa-loop-manip.h" |
57 | #include "tree-ssa-loop-niter.h" | |
073c1fd5 | 58 | #include "tree-ssa-loop.h" |
59 | #include "tree-into-ssa.h" | |
69ee5dbb | 60 | #include "tree-ssa.h" |
88dbf20f | 61 | #include "tree-pass.h" |
62 | #include "tree-dump.h" | |
ce084dfc | 63 | #include "gimple-pretty-print.h" |
0b205f4c | 64 | #include "diagnostic-core.h" |
a2a1fde2 | 65 | #include "intl.h" |
88dbf20f | 66 | #include "cfgloop.h" |
67 | #include "tree-scalar-evolution.h" | |
68 | #include "tree-ssa-propagate.h" | |
69 | #include "tree-chrec.h" | |
0c5b289a | 70 | #include "tree-ssa-threadupdate.h" |
f0938d2c | 71 | #include "expr.h" |
72 | #include "optabs.h" | |
424a4a92 | 73 | #include "tree-ssa-threadedge.h" |
6b409616 | 74 | #include "wide-int.h" |
88dbf20f | 75 | |
75a70cf9 | 76 | |
14f101cf | 77 | |
78 | /* Range of values that can be associated with an SSA_NAME after VRP | |
79 | has executed. */ | |
80 | struct value_range_d | |
81 | { | |
82 | /* Lattice value represented by this range. */ | |
83 | enum value_range_type type; | |
84 | ||
85 | /* Minimum and maximum values represented by this range. These | |
86 | values should be interpreted as follows: | |
87 | ||
88 | - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must | |
89 | be NULL. | |
90 | ||
91 | - If TYPE == VR_RANGE then MIN holds the minimum value and | |
92 | MAX holds the maximum value of the range [MIN, MAX]. | |
93 | ||
94 | - If TYPE == ANTI_RANGE the variable is known to NOT | |
95 | take any values in the range [MIN, MAX]. */ | |
96 | tree min; | |
97 | tree max; | |
98 | ||
99 | /* Set of SSA names whose value ranges are equivalent to this one. | |
100 | This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */ | |
101 | bitmap equiv; | |
102 | }; | |
103 | ||
104 | typedef struct value_range_d value_range_t; | |
105 | ||
748eb1f9 | 106 | #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL } |
107 | ||
17ed8337 | 108 | /* Set of SSA names found live during the RPO traversal of the function |
109 | for still active basic-blocks. */ | |
110 | static sbitmap *live; | |
111 | ||
112 | /* Return true if the SSA name NAME is live on the edge E. */ | |
113 | ||
114 | static bool | |
115 | live_on_edge (edge e, tree name) | |
116 | { | |
117 | return (live[e->dest->index] | |
08b7917c | 118 | && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); |
17ed8337 | 119 | } |
88dbf20f | 120 | |
88dbf20f | 121 | /* Local functions. */ |
122 | static int compare_values (tree val1, tree val2); | |
c3783c3b | 123 | static int compare_values_warnv (tree val1, tree val2, bool *); |
ec0fa513 | 124 | static void vrp_meet (value_range_t *, value_range_t *); |
04dbf3c4 | 125 | static void vrp_intersect_ranges (value_range_t *, value_range_t *); |
93116081 | 126 | static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, |
e0ad89bd | 127 | tree, tree, bool, bool *, |
128 | bool *); | |
88dbf20f | 129 | |
eea12c72 | 130 | /* Location information for ASSERT_EXPRs. Each instance of this |
131 | structure describes an ASSERT_EXPR for an SSA name. Since a single | |
132 | SSA name may have more than one assertion associated with it, these | |
133 | locations are kept in a linked list attached to the corresponding | |
134 | SSA name. */ | |
135 | struct assert_locus_d | |
88dbf20f | 136 | { |
eea12c72 | 137 | /* Basic block where the assertion would be inserted. */ |
138 | basic_block bb; | |
139 | ||
140 | /* Some assertions need to be inserted on an edge (e.g., assertions | |
141 | generated by COND_EXPRs). In those cases, BB will be NULL. */ | |
142 | edge e; | |
143 | ||
144 | /* Pointer to the statement that generated this assertion. */ | |
75a70cf9 | 145 | gimple_stmt_iterator si; |
eea12c72 | 146 | |
147 | /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ | |
148 | enum tree_code comp_code; | |
149 | ||
150 | /* Value being compared against. */ | |
151 | tree val; | |
152 | ||
bed8bec4 | 153 | /* Expression to compare. */ |
154 | tree expr; | |
155 | ||
eea12c72 | 156 | /* Next node in the linked list. */ |
157 | struct assert_locus_d *next; | |
158 | }; | |
159 | ||
160 | typedef struct assert_locus_d *assert_locus_t; | |
161 | ||
162 | /* If bit I is present, it means that SSA name N_i has a list of | |
163 | assertions that should be inserted in the IL. */ | |
164 | static bitmap need_assert_for; | |
165 | ||
166 | /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] | |
167 | holds a list of ASSERT_LOCUS_T nodes that describe where | |
168 | ASSERT_EXPRs for SSA name N_I should be inserted. */ | |
169 | static assert_locus_t *asserts_for; | |
170 | ||
eea12c72 | 171 | /* Value range array. After propagation, VR_VALUE[I] holds the range |
172 | of values that SSA name N_I may take. */ | |
e0186710 | 173 | static unsigned num_vr_values; |
eea12c72 | 174 | static value_range_t **vr_value; |
e0186710 | 175 | static bool values_propagated; |
88dbf20f | 176 | |
5c7155ca | 177 | /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the |
178 | number of executable edges we saw the last time we visited the | |
179 | node. */ | |
180 | static int *vr_phi_edge_counts; | |
181 | ||
72c30859 | 182 | typedef struct { |
75a70cf9 | 183 | gimple stmt; |
72c30859 | 184 | tree vec; |
185 | } switch_update; | |
186 | ||
f1f41a6c | 187 | static vec<edge> to_remove_edges; |
188 | static vec<switch_update> to_update_switch_stmts; | |
72c30859 | 189 | |
88dbf20f | 190 | |
a9538d68 | 191 | /* Return the maximum value for TYPE. */ |
22cdb855 | 192 | |
193 | static inline tree | |
194 | vrp_val_max (const_tree type) | |
195 | { | |
196 | if (!INTEGRAL_TYPE_P (type)) | |
197 | return NULL_TREE; | |
198 | ||
22cdb855 | 199 | return TYPE_MAX_VALUE (type); |
200 | } | |
201 | ||
a9538d68 | 202 | /* Return the minimum value for TYPE. */ |
22cdb855 | 203 | |
204 | static inline tree | |
205 | vrp_val_min (const_tree type) | |
206 | { | |
207 | if (!INTEGRAL_TYPE_P (type)) | |
208 | return NULL_TREE; | |
209 | ||
22cdb855 | 210 | return TYPE_MIN_VALUE (type); |
211 | } | |
212 | ||
213 | /* Return whether VAL is equal to the maximum value of its type. This | |
214 | will be true for a positive overflow infinity. We can't do a | |
215 | simple equality comparison with TYPE_MAX_VALUE because C typedefs | |
216 | and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == | |
217 | to the integer constant with the same value in the type. */ | |
218 | ||
219 | static inline bool | |
220 | vrp_val_is_max (const_tree val) | |
221 | { | |
222 | tree type_max = vrp_val_max (TREE_TYPE (val)); | |
223 | return (val == type_max | |
224 | || (type_max != NULL_TREE | |
225 | && operand_equal_p (val, type_max, 0))); | |
226 | } | |
227 | ||
228 | /* Return whether VAL is equal to the minimum value of its type. This | |
229 | will be true for a negative overflow infinity. */ | |
230 | ||
231 | static inline bool | |
232 | vrp_val_is_min (const_tree val) | |
233 | { | |
234 | tree type_min = vrp_val_min (TREE_TYPE (val)); | |
235 | return (val == type_min | |
236 | || (type_min != NULL_TREE | |
237 | && operand_equal_p (val, type_min, 0))); | |
238 | } | |
239 | ||
240 | ||
c3783c3b | 241 | /* Return whether TYPE should use an overflow infinity distinct from |
242 | TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to | |
243 | represent a signed overflow during VRP computations. An infinity | |
244 | is distinct from a half-range, which will go from some number to | |
245 | TYPE_{MIN,MAX}_VALUE. */ | |
246 | ||
247 | static inline bool | |
9f627b1a | 248 | needs_overflow_infinity (const_tree type) |
c3783c3b | 249 | { |
a9538d68 | 250 | return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); |
c3783c3b | 251 | } |
252 | ||
253 | /* Return whether TYPE can support our overflow infinity | |
254 | representation: we use the TREE_OVERFLOW flag, which only exists | |
255 | for constants. If TYPE doesn't support this, we don't optimize | |
256 | cases which would require signed overflow--we drop them to | |
257 | VARYING. */ | |
258 | ||
259 | static inline bool | |
9f627b1a | 260 | supports_overflow_infinity (const_tree type) |
c3783c3b | 261 | { |
22cdb855 | 262 | tree min = vrp_val_min (type), max = vrp_val_max (type); |
c3783c3b | 263 | #ifdef ENABLE_CHECKING |
264 | gcc_assert (needs_overflow_infinity (type)); | |
265 | #endif | |
22cdb855 | 266 | return (min != NULL_TREE |
267 | && CONSTANT_CLASS_P (min) | |
268 | && max != NULL_TREE | |
269 | && CONSTANT_CLASS_P (max)); | |
c3783c3b | 270 | } |
271 | ||
272 | /* VAL is the maximum or minimum value of a type. Return a | |
273 | corresponding overflow infinity. */ | |
274 | ||
275 | static inline tree | |
276 | make_overflow_infinity (tree val) | |
277 | { | |
1b4345f7 | 278 | gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); |
c3783c3b | 279 | val = copy_node (val); |
280 | TREE_OVERFLOW (val) = 1; | |
281 | return val; | |
282 | } | |
283 | ||
284 | /* Return a negative overflow infinity for TYPE. */ | |
285 | ||
286 | static inline tree | |
287 | negative_overflow_infinity (tree type) | |
288 | { | |
1b4345f7 | 289 | gcc_checking_assert (supports_overflow_infinity (type)); |
22cdb855 | 290 | return make_overflow_infinity (vrp_val_min (type)); |
c3783c3b | 291 | } |
292 | ||
293 | /* Return a positive overflow infinity for TYPE. */ | |
294 | ||
295 | static inline tree | |
296 | positive_overflow_infinity (tree type) | |
297 | { | |
1b4345f7 | 298 | gcc_checking_assert (supports_overflow_infinity (type)); |
22cdb855 | 299 | return make_overflow_infinity (vrp_val_max (type)); |
c3783c3b | 300 | } |
301 | ||
302 | /* Return whether VAL is a negative overflow infinity. */ | |
303 | ||
304 | static inline bool | |
9f627b1a | 305 | is_negative_overflow_infinity (const_tree val) |
c3783c3b | 306 | { |
42081cd6 | 307 | return (TREE_OVERFLOW_P (val) |
308 | && needs_overflow_infinity (TREE_TYPE (val)) | |
22cdb855 | 309 | && vrp_val_is_min (val)); |
c3783c3b | 310 | } |
311 | ||
312 | /* Return whether VAL is a positive overflow infinity. */ | |
313 | ||
314 | static inline bool | |
9f627b1a | 315 | is_positive_overflow_infinity (const_tree val) |
c3783c3b | 316 | { |
42081cd6 | 317 | return (TREE_OVERFLOW_P (val) |
318 | && needs_overflow_infinity (TREE_TYPE (val)) | |
22cdb855 | 319 | && vrp_val_is_max (val)); |
c3783c3b | 320 | } |
321 | ||
322 | /* Return whether VAL is a positive or negative overflow infinity. */ | |
323 | ||
324 | static inline bool | |
9f627b1a | 325 | is_overflow_infinity (const_tree val) |
c3783c3b | 326 | { |
42081cd6 | 327 | return (TREE_OVERFLOW_P (val) |
328 | && needs_overflow_infinity (TREE_TYPE (val)) | |
22cdb855 | 329 | && (vrp_val_is_min (val) || vrp_val_is_max (val))); |
c3783c3b | 330 | } |
331 | ||
75a70cf9 | 332 | /* Return whether STMT has a constant rhs that is_overflow_infinity. */ |
333 | ||
334 | static inline bool | |
335 | stmt_overflow_infinity (gimple stmt) | |
336 | { | |
337 | if (is_gimple_assign (stmt) | |
338 | && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == | |
339 | GIMPLE_SINGLE_RHS) | |
340 | return is_overflow_infinity (gimple_assign_rhs1 (stmt)); | |
341 | return false; | |
342 | } | |
343 | ||
afc1ad6d | 344 | /* If VAL is now an overflow infinity, return VAL. Otherwise, return |
345 | the same value with TREE_OVERFLOW clear. This can be used to avoid | |
346 | confusing a regular value with an overflow value. */ | |
347 | ||
348 | static inline tree | |
349 | avoid_overflow_infinity (tree val) | |
350 | { | |
351 | if (!is_overflow_infinity (val)) | |
352 | return val; | |
353 | ||
22cdb855 | 354 | if (vrp_val_is_max (val)) |
355 | return vrp_val_max (TREE_TYPE (val)); | |
afc1ad6d | 356 | else |
357 | { | |
1b4345f7 | 358 | gcc_checking_assert (vrp_val_is_min (val)); |
22cdb855 | 359 | return vrp_val_min (TREE_TYPE (val)); |
afc1ad6d | 360 | } |
361 | } | |
362 | ||
c3783c3b | 363 | |
909992de | 364 | /* Return true if ARG is marked with the nonnull attribute in the |
365 | current function signature. */ | |
366 | ||
367 | static bool | |
9f627b1a | 368 | nonnull_arg_p (const_tree arg) |
909992de | 369 | { |
370 | tree t, attrs, fntype; | |
371 | unsigned HOST_WIDE_INT arg_num; | |
372 | ||
373 | gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); | |
374 | ||
8d665d11 | 375 | /* The static chain decl is always non null. */ |
376 | if (arg == cfun->static_chain_decl) | |
377 | return true; | |
378 | ||
909992de | 379 | fntype = TREE_TYPE (current_function_decl); |
9ca77b08 | 380 | for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs)) |
909992de | 381 | { |
9ca77b08 | 382 | attrs = lookup_attribute ("nonnull", attrs); |
909992de | 383 | |
9ca77b08 | 384 | /* If "nonnull" wasn't specified, we know nothing about the argument. */ |
385 | if (attrs == NULL_TREE) | |
386 | return false; | |
909992de | 387 | |
9ca77b08 | 388 | /* If "nonnull" applies to all the arguments, then ARG is non-null. */ |
389 | if (TREE_VALUE (attrs) == NULL_TREE) | |
909992de | 390 | return true; |
9ca77b08 | 391 | |
392 | /* Get the position number for ARG in the function signature. */ | |
393 | for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); | |
394 | t; | |
395 | t = DECL_CHAIN (t), arg_num++) | |
396 | { | |
397 | if (t == arg) | |
398 | break; | |
399 | } | |
400 | ||
401 | gcc_assert (t == arg); | |
402 | ||
403 | /* Now see if ARG_NUM is mentioned in the nonnull list. */ | |
404 | for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) | |
405 | { | |
406 | if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) | |
407 | return true; | |
408 | } | |
909992de | 409 | } |
410 | ||
411 | return false; | |
412 | } | |
413 | ||
414 | ||
bc8c1f83 | 415 | /* Set value range VR to VR_UNDEFINED. */ |
416 | ||
417 | static inline void | |
418 | set_value_range_to_undefined (value_range_t *vr) | |
419 | { | |
420 | vr->type = VR_UNDEFINED; | |
421 | vr->min = vr->max = NULL_TREE; | |
422 | if (vr->equiv) | |
423 | bitmap_clear (vr->equiv); | |
424 | } | |
425 | ||
426 | ||
bed8bec4 | 427 | /* Set value range VR to VR_VARYING. */ |
428 | ||
429 | static inline void | |
430 | set_value_range_to_varying (value_range_t *vr) | |
431 | { | |
432 | vr->type = VR_VARYING; | |
433 | vr->min = vr->max = NULL_TREE; | |
434 | if (vr->equiv) | |
435 | bitmap_clear (vr->equiv); | |
436 | } | |
437 | ||
438 | ||
eea12c72 | 439 | /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |
440 | ||
441 | static void | |
442 | set_value_range (value_range_t *vr, enum value_range_type t, tree min, | |
443 | tree max, bitmap equiv) | |
88dbf20f | 444 | { |
445 | #if defined ENABLE_CHECKING | |
eea12c72 | 446 | /* Check the validity of the range. */ |
88dbf20f | 447 | if (t == VR_RANGE || t == VR_ANTI_RANGE) |
448 | { | |
449 | int cmp; | |
450 | ||
451 | gcc_assert (min && max); | |
452 | ||
3dad27e9 | 453 | gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min)) |
454 | && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max))); | |
455 | ||
88dbf20f | 456 | if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) |
b876a744 | 457 | gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); |
88dbf20f | 458 | |
459 | cmp = compare_values (min, max); | |
460 | gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); | |
b700987e | 461 | |
462 | if (needs_overflow_infinity (TREE_TYPE (min))) | |
463 | gcc_assert (!is_overflow_infinity (min) | |
464 | || !is_overflow_infinity (max)); | |
88dbf20f | 465 | } |
88dbf20f | 466 | |
eea12c72 | 467 | if (t == VR_UNDEFINED || t == VR_VARYING) |
468 | gcc_assert (min == NULL_TREE && max == NULL_TREE); | |
469 | ||
470 | if (t == VR_UNDEFINED || t == VR_VARYING) | |
471 | gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); | |
472 | #endif | |
88dbf20f | 473 | |
474 | vr->type = t; | |
475 | vr->min = min; | |
476 | vr->max = max; | |
eea12c72 | 477 | |
478 | /* Since updating the equivalence set involves deep copying the | |
479 | bitmaps, only do it if absolutely necessary. */ | |
fbcece5e | 480 | if (vr->equiv == NULL |
481 | && equiv != NULL) | |
eea12c72 | 482 | vr->equiv = BITMAP_ALLOC (NULL); |
483 | ||
484 | if (equiv != vr->equiv) | |
485 | { | |
486 | if (equiv && !bitmap_empty_p (equiv)) | |
487 | bitmap_copy (vr->equiv, equiv); | |
488 | else | |
489 | bitmap_clear (vr->equiv); | |
490 | } | |
88dbf20f | 491 | } |
492 | ||
493 | ||
bed8bec4 | 494 | /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. |
495 | This means adjusting T, MIN and MAX representing the case of a | |
496 | wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] | |
497 | as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. | |
498 | In corner cases where MAX+1 or MIN-1 wraps this will fall back | |
499 | to varying. | |
500 | This routine exists to ease canonicalization in the case where we | |
501 | extract ranges from var + CST op limit. */ | |
88dbf20f | 502 | |
bed8bec4 | 503 | static void |
504 | set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, | |
505 | tree min, tree max, bitmap equiv) | |
88dbf20f | 506 | { |
bc8c1f83 | 507 | /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |
508 | if (t == VR_UNDEFINED) | |
509 | { | |
510 | set_value_range_to_undefined (vr); | |
511 | return; | |
512 | } | |
513 | else if (t == VR_VARYING) | |
514 | { | |
515 | set_value_range_to_varying (vr); | |
516 | return; | |
517 | } | |
518 | ||
519 | /* Nothing to canonicalize for symbolic ranges. */ | |
520 | if (TREE_CODE (min) != INTEGER_CST | |
22cdb855 | 521 | || TREE_CODE (max) != INTEGER_CST) |
bed8bec4 | 522 | { |
523 | set_value_range (vr, t, min, max, equiv); | |
524 | return; | |
525 | } | |
c3783c3b | 526 | |
bed8bec4 | 527 | /* Wrong order for min and max, to swap them and the VR type we need |
528 | to adjust them. */ | |
bed8bec4 | 529 | if (tree_int_cst_lt (max, min)) |
530 | { | |
5b7878ed | 531 | tree one, tmp; |
532 | ||
533 | /* For one bit precision if max < min, then the swapped | |
534 | range covers all values, so for VR_RANGE it is varying and | |
535 | for VR_ANTI_RANGE empty range, so drop to varying as well. */ | |
536 | if (TYPE_PRECISION (TREE_TYPE (min)) == 1) | |
537 | { | |
538 | set_value_range_to_varying (vr); | |
539 | return; | |
540 | } | |
541 | ||
542 | one = build_int_cst (TREE_TYPE (min), 1); | |
543 | tmp = int_const_binop (PLUS_EXPR, max, one); | |
317e2a67 | 544 | max = int_const_binop (MINUS_EXPR, min, one); |
22cdb855 | 545 | min = tmp; |
546 | ||
547 | /* There's one corner case, if we had [C+1, C] before we now have | |
548 | that again. But this represents an empty value range, so drop | |
549 | to varying in this case. */ | |
550 | if (tree_int_cst_lt (max, min)) | |
551 | { | |
552 | set_value_range_to_varying (vr); | |
553 | return; | |
554 | } | |
555 | ||
556 | t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; | |
557 | } | |
558 | ||
559 | /* Anti-ranges that can be represented as ranges should be so. */ | |
560 | if (t == VR_ANTI_RANGE) | |
561 | { | |
562 | bool is_min = vrp_val_is_min (min); | |
563 | bool is_max = vrp_val_is_max (max); | |
564 | ||
565 | if (is_min && is_max) | |
566 | { | |
bc8c1f83 | 567 | /* We cannot deal with empty ranges, drop to varying. |
568 | ??? This could be VR_UNDEFINED instead. */ | |
22cdb855 | 569 | set_value_range_to_varying (vr); |
570 | return; | |
571 | } | |
5b7878ed | 572 | else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
5b7878ed | 573 | && (is_min || is_max)) |
574 | { | |
ce7ae77c | 575 | /* Non-empty boolean ranges can always be represented |
576 | as a singleton range. */ | |
577 | if (is_min) | |
578 | min = max = vrp_val_max (TREE_TYPE (min)); | |
5b7878ed | 579 | else |
ce7ae77c | 580 | min = max = vrp_val_min (TREE_TYPE (min)); |
581 | t = VR_RANGE; | |
5b7878ed | 582 | } |
22cdb855 | 583 | else if (is_min |
584 | /* As a special exception preserve non-null ranges. */ | |
585 | && !(TYPE_UNSIGNED (TREE_TYPE (min)) | |
586 | && integer_zerop (max))) | |
587 | { | |
588 | tree one = build_int_cst (TREE_TYPE (max), 1); | |
317e2a67 | 589 | min = int_const_binop (PLUS_EXPR, max, one); |
22cdb855 | 590 | max = vrp_val_max (TREE_TYPE (max)); |
591 | t = VR_RANGE; | |
592 | } | |
593 | else if (is_max) | |
594 | { | |
595 | tree one = build_int_cst (TREE_TYPE (min), 1); | |
317e2a67 | 596 | max = int_const_binop (MINUS_EXPR, min, one); |
22cdb855 | 597 | min = vrp_val_min (TREE_TYPE (min)); |
598 | t = VR_RANGE; | |
599 | } | |
bed8bec4 | 600 | } |
601 | ||
bc8c1f83 | 602 | /* Drop [-INF(OVF), +INF(OVF)] to varying. */ |
603 | if (needs_overflow_infinity (TREE_TYPE (min)) | |
604 | && is_overflow_infinity (min) | |
605 | && is_overflow_infinity (max)) | |
606 | { | |
607 | set_value_range_to_varying (vr); | |
608 | return; | |
609 | } | |
610 | ||
bed8bec4 | 611 | set_value_range (vr, t, min, max, equiv); |
612 | } | |
613 | ||
614 | /* Copy value range FROM into value range TO. */ | |
8dbf774a | 615 | |
616 | static inline void | |
bed8bec4 | 617 | copy_value_range (value_range_t *to, value_range_t *from) |
8dbf774a | 618 | { |
bed8bec4 | 619 | set_value_range (to, from->type, from->min, from->max, from->equiv); |
c3783c3b | 620 | } |
621 | ||
b700987e | 622 | /* Set value range VR to a single value. This function is only called |
623 | with values we get from statements, and exists to clear the | |
624 | TREE_OVERFLOW flag so that we don't think we have an overflow | |
625 | infinity when we shouldn't. */ | |
626 | ||
627 | static inline void | |
4baf1a77 | 628 | set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) |
b700987e | 629 | { |
630 | gcc_assert (is_gimple_min_invariant (val)); | |
3dad27e9 | 631 | if (TREE_OVERFLOW_P (val)) |
632 | val = drop_tree_overflow (val); | |
4baf1a77 | 633 | set_value_range (vr, VR_RANGE, val, val, equiv); |
b700987e | 634 | } |
635 | ||
c3783c3b | 636 | /* Set value range VR to a non-negative range of type TYPE. |
80777cd8 | 637 | OVERFLOW_INFINITY indicates whether to use an overflow infinity |
c3783c3b | 638 | rather than TYPE_MAX_VALUE; this should be true if we determine |
639 | that the range is nonnegative based on the assumption that signed | |
640 | overflow does not occur. */ | |
641 | ||
642 | static inline void | |
643 | set_value_range_to_nonnegative (value_range_t *vr, tree type, | |
644 | bool overflow_infinity) | |
645 | { | |
646 | tree zero; | |
647 | ||
648 | if (overflow_infinity && !supports_overflow_infinity (type)) | |
649 | { | |
650 | set_value_range_to_varying (vr); | |
651 | return; | |
652 | } | |
653 | ||
654 | zero = build_int_cst (type, 0); | |
655 | set_value_range (vr, VR_RANGE, zero, | |
656 | (overflow_infinity | |
657 | ? positive_overflow_infinity (type) | |
658 | : TYPE_MAX_VALUE (type)), | |
659 | vr->equiv); | |
8dbf774a | 660 | } |
eea12c72 | 661 | |
662 | /* Set value range VR to a non-NULL range of type TYPE. */ | |
663 | ||
664 | static inline void | |
665 | set_value_range_to_nonnull (value_range_t *vr, tree type) | |
666 | { | |
667 | tree zero = build_int_cst (type, 0); | |
668 | set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); | |
669 | } | |
670 | ||
671 | ||
672 | /* Set value range VR to a NULL range of type TYPE. */ | |
673 | ||
674 | static inline void | |
675 | set_value_range_to_null (value_range_t *vr, tree type) | |
676 | { | |
4baf1a77 | 677 | set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); |
eea12c72 | 678 | } |
679 | ||
680 | ||
b9b64cb7 | 681 | /* Set value range VR to a range of a truthvalue of type TYPE. */ |
682 | ||
683 | static inline void | |
684 | set_value_range_to_truthvalue (value_range_t *vr, tree type) | |
685 | { | |
686 | if (TYPE_PRECISION (type) == 1) | |
687 | set_value_range_to_varying (vr); | |
688 | else | |
689 | set_value_range (vr, VR_RANGE, | |
690 | build_int_cst (type, 0), build_int_cst (type, 1), | |
691 | vr->equiv); | |
692 | } | |
693 | ||
694 | ||
e52dd258 | 695 | /* If abs (min) < abs (max), set VR to [-max, max], if |
696 | abs (min) >= abs (max), set VR to [-min, min]. */ | |
697 | ||
698 | static void | |
699 | abs_extent_range (value_range_t *vr, tree min, tree max) | |
700 | { | |
701 | int cmp; | |
702 | ||
703 | gcc_assert (TREE_CODE (min) == INTEGER_CST); | |
704 | gcc_assert (TREE_CODE (max) == INTEGER_CST); | |
705 | gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); | |
706 | gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); | |
707 | min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); | |
708 | max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); | |
709 | if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) | |
710 | { | |
711 | set_value_range_to_varying (vr); | |
712 | return; | |
713 | } | |
714 | cmp = compare_values (min, max); | |
715 | if (cmp == -1) | |
716 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); | |
717 | else if (cmp == 0 || cmp == 1) | |
718 | { | |
719 | max = min; | |
720 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); | |
721 | } | |
722 | else | |
723 | { | |
724 | set_value_range_to_varying (vr); | |
725 | return; | |
726 | } | |
727 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
728 | } | |
729 | ||
730 | ||
48e1416a | 731 | /* Return value range information for VAR. |
8dbf774a | 732 | |
733 | If we have no values ranges recorded (ie, VRP is not running), then | |
734 | return NULL. Otherwise create an empty range if none existed for VAR. */ | |
88dbf20f | 735 | |
eea12c72 | 736 | static value_range_t * |
9f627b1a | 737 | get_value_range (const_tree var) |
88dbf20f | 738 | { |
e0186710 | 739 | static const struct value_range_d vr_const_varying |
740 | = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; | |
eea12c72 | 741 | value_range_t *vr; |
88dbf20f | 742 | tree sym; |
eea12c72 | 743 | unsigned ver = SSA_NAME_VERSION (var); |
88dbf20f | 744 | |
8dbf774a | 745 | /* If we have no recorded ranges, then return NULL. */ |
746 | if (! vr_value) | |
747 | return NULL; | |
748 | ||
e0186710 | 749 | /* If we query the range for a new SSA name return an unmodifiable VARYING. |
750 | We should get here at most from the substitute-and-fold stage which | |
751 | will never try to change values. */ | |
752 | if (ver >= num_vr_values) | |
753 | return CONST_CAST (value_range_t *, &vr_const_varying); | |
754 | ||
eea12c72 | 755 | vr = vr_value[ver]; |
88dbf20f | 756 | if (vr) |
757 | return vr; | |
758 | ||
e0186710 | 759 | /* After propagation finished do not allocate new value-ranges. */ |
760 | if (values_propagated) | |
761 | return CONST_CAST (value_range_t *, &vr_const_varying); | |
762 | ||
88dbf20f | 763 | /* Create a default value range. */ |
43959b95 | 764 | vr_value[ver] = vr = XCNEW (value_range_t); |
88dbf20f | 765 | |
fbcece5e | 766 | /* Defer allocating the equivalence set. */ |
767 | vr->equiv = NULL; | |
eea12c72 | 768 | |
fb41023e | 769 | /* If VAR is a default definition of a parameter, the variable can |
770 | take any value in VAR's type. */ | |
0e443ce1 | 771 | if (SSA_NAME_IS_DEFAULT_DEF (var)) |
909992de | 772 | { |
7ecda5e8 | 773 | sym = SSA_NAME_VAR (var); |
0e443ce1 | 774 | if (TREE_CODE (sym) == PARM_DECL) |
775 | { | |
776 | /* Try to use the "nonnull" attribute to create ~[0, 0] | |
777 | anti-ranges for pointers. Note that this is only valid with | |
778 | default definitions of PARM_DECLs. */ | |
779 | if (POINTER_TYPE_P (TREE_TYPE (sym)) | |
780 | && nonnull_arg_p (sym)) | |
781 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); | |
782 | else | |
783 | set_value_range_to_varying (vr); | |
784 | } | |
785 | else if (TREE_CODE (sym) == RESULT_DECL | |
786 | && DECL_BY_REFERENCE (sym)) | |
909992de | 787 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); |
909992de | 788 | } |
88dbf20f | 789 | |
790 | return vr; | |
791 | } | |
792 | ||
238ad80e | 793 | /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
794 | ||
795 | static inline bool | |
9f627b1a | 796 | vrp_operand_equal_p (const_tree val1, const_tree val2) |
238ad80e | 797 | { |
c3783c3b | 798 | if (val1 == val2) |
799 | return true; | |
800 | if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) | |
801 | return false; | |
42081cd6 | 802 | return is_overflow_infinity (val1) == is_overflow_infinity (val2); |
238ad80e | 803 | } |
804 | ||
805 | /* Return true, if the bitmaps B1 and B2 are equal. */ | |
806 | ||
807 | static inline bool | |
1f1872fd | 808 | vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
238ad80e | 809 | { |
810 | return (b1 == b2 | |
b1296be0 | 811 | || ((!b1 || bitmap_empty_p (b1)) |
812 | && (!b2 || bitmap_empty_p (b2))) | |
238ad80e | 813 | || (b1 && b2 |
814 | && bitmap_equal_p (b1, b2))); | |
815 | } | |
88dbf20f | 816 | |
eea12c72 | 817 | /* Update the value range and equivalence set for variable VAR to |
818 | NEW_VR. Return true if NEW_VR is different from VAR's previous | |
819 | value. | |
820 | ||
821 | NOTE: This function assumes that NEW_VR is a temporary value range | |
822 | object created for the sole purpose of updating VAR's range. The | |
823 | storage used by the equivalence set from NEW_VR will be freed by | |
824 | this function. Do not call update_value_range when NEW_VR | |
825 | is the range object associated with another SSA name. */ | |
88dbf20f | 826 | |
827 | static inline bool | |
9f627b1a | 828 | update_value_range (const_tree var, value_range_t *new_vr) |
88dbf20f | 829 | { |
eea12c72 | 830 | value_range_t *old_vr; |
831 | bool is_new; | |
832 | ||
833 | /* Update the value range, if necessary. */ | |
834 | old_vr = get_value_range (var); | |
835 | is_new = old_vr->type != new_vr->type | |
238ad80e | 836 | || !vrp_operand_equal_p (old_vr->min, new_vr->min) |
837 | || !vrp_operand_equal_p (old_vr->max, new_vr->max) | |
838 | || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); | |
88dbf20f | 839 | |
eea12c72 | 840 | if (is_new) |
a74ec8b4 | 841 | { |
842 | /* Do not allow transitions up the lattice. The following | |
843 | is slightly more awkward than just new_vr->type < old_vr->type | |
844 | because VR_RANGE and VR_ANTI_RANGE need to be considered | |
845 | the same. We may not have is_new when transitioning to | |
846 | UNDEFINED or from VARYING. */ | |
847 | if (new_vr->type == VR_UNDEFINED | |
848 | || old_vr->type == VR_VARYING) | |
849 | set_value_range_to_varying (old_vr); | |
850 | else | |
851 | set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, | |
852 | new_vr->equiv); | |
853 | } | |
88dbf20f | 854 | |
eea12c72 | 855 | BITMAP_FREE (new_vr->equiv); |
88dbf20f | 856 | |
eea12c72 | 857 | return is_new; |
858 | } | |
88dbf20f | 859 | |
88dbf20f | 860 | |
fbcece5e | 861 | /* Add VAR and VAR's equivalence set to EQUIV. This is the central |
862 | point where equivalence processing can be turned on/off. */ | |
88dbf20f | 863 | |
eea12c72 | 864 | static void |
9f627b1a | 865 | add_equivalence (bitmap *equiv, const_tree var) |
eea12c72 | 866 | { |
867 | unsigned ver = SSA_NAME_VERSION (var); | |
868 | value_range_t *vr = vr_value[ver]; | |
88dbf20f | 869 | |
fbcece5e | 870 | if (*equiv == NULL) |
871 | *equiv = BITMAP_ALLOC (NULL); | |
872 | bitmap_set_bit (*equiv, ver); | |
eea12c72 | 873 | if (vr && vr->equiv) |
fbcece5e | 874 | bitmap_ior_into (*equiv, vr->equiv); |
88dbf20f | 875 | } |
876 | ||
877 | ||
878 | /* Return true if VR is ~[0, 0]. */ | |
879 | ||
880 | static inline bool | |
eea12c72 | 881 | range_is_nonnull (value_range_t *vr) |
88dbf20f | 882 | { |
883 | return vr->type == VR_ANTI_RANGE | |
884 | && integer_zerop (vr->min) | |
885 | && integer_zerop (vr->max); | |
886 | } | |
887 | ||
888 | ||
889 | /* Return true if VR is [0, 0]. */ | |
890 | ||
891 | static inline bool | |
eea12c72 | 892 | range_is_null (value_range_t *vr) |
88dbf20f | 893 | { |
894 | return vr->type == VR_RANGE | |
895 | && integer_zerop (vr->min) | |
896 | && integer_zerop (vr->max); | |
897 | } | |
898 | ||
bca0860e | 899 | /* Return true if max and min of VR are INTEGER_CST. It's not necessary |
900 | a singleton. */ | |
901 | ||
902 | static inline bool | |
903 | range_int_cst_p (value_range_t *vr) | |
904 | { | |
905 | return (vr->type == VR_RANGE | |
906 | && TREE_CODE (vr->max) == INTEGER_CST | |
ac4a8000 | 907 | && TREE_CODE (vr->min) == INTEGER_CST); |
bca0860e | 908 | } |
909 | ||
910 | /* Return true if VR is a INTEGER_CST singleton. */ | |
911 | ||
912 | static inline bool | |
913 | range_int_cst_singleton_p (value_range_t *vr) | |
914 | { | |
915 | return (range_int_cst_p (vr) | |
4a8f88ff | 916 | && !is_overflow_infinity (vr->min) |
917 | && !is_overflow_infinity (vr->max) | |
bca0860e | 918 | && tree_int_cst_equal (vr->min, vr->max)); |
919 | } | |
88dbf20f | 920 | |
eea12c72 | 921 | /* Return true if value range VR involves at least one symbol. */ |
88dbf20f | 922 | |
eea12c72 | 923 | static inline bool |
924 | symbolic_range_p (value_range_t *vr) | |
88dbf20f | 925 | { |
eea12c72 | 926 | return (!is_gimple_min_invariant (vr->min) |
927 | || !is_gimple_min_invariant (vr->max)); | |
88dbf20f | 928 | } |
929 | ||
1ec8aa41 | 930 | /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |
931 | otherwise. We only handle additive operations and set NEG to true if the | |
932 | symbol is negated and INV to the invariant part, if any. */ | |
933 | ||
934 | static tree | |
935 | get_single_symbol (tree t, bool *neg, tree *inv) | |
936 | { | |
937 | bool neg_; | |
938 | tree inv_; | |
939 | ||
940 | if (TREE_CODE (t) == PLUS_EXPR | |
941 | || TREE_CODE (t) == POINTER_PLUS_EXPR | |
942 | || TREE_CODE (t) == MINUS_EXPR) | |
943 | { | |
944 | if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) | |
945 | { | |
946 | neg_ = (TREE_CODE (t) == MINUS_EXPR); | |
947 | inv_ = TREE_OPERAND (t, 0); | |
948 | t = TREE_OPERAND (t, 1); | |
949 | } | |
950 | else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) | |
951 | { | |
952 | neg_ = false; | |
953 | inv_ = TREE_OPERAND (t, 1); | |
954 | t = TREE_OPERAND (t, 0); | |
955 | } | |
956 | else | |
957 | return NULL_TREE; | |
958 | } | |
959 | else | |
960 | { | |
961 | neg_ = false; | |
962 | inv_ = NULL_TREE; | |
963 | } | |
964 | ||
965 | if (TREE_CODE (t) == NEGATE_EXPR) | |
966 | { | |
967 | t = TREE_OPERAND (t, 0); | |
968 | neg_ = !neg_; | |
969 | } | |
970 | ||
971 | if (TREE_CODE (t) != SSA_NAME) | |
972 | return NULL_TREE; | |
973 | ||
974 | *neg = neg_; | |
975 | *inv = inv_; | |
976 | return t; | |
977 | } | |
978 | ||
979 | /* The reverse operation: build a symbolic expression with TYPE | |
980 | from symbol SYM, negated according to NEG, and invariant INV. */ | |
981 | ||
982 | static tree | |
983 | build_symbolic_expr (tree type, tree sym, bool neg, tree inv) | |
984 | { | |
985 | const bool pointer_p = POINTER_TYPE_P (type); | |
986 | tree t = sym; | |
987 | ||
988 | if (neg) | |
989 | t = build1 (NEGATE_EXPR, type, t); | |
990 | ||
991 | if (integer_zerop (inv)) | |
992 | return t; | |
993 | ||
994 | return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); | |
995 | } | |
996 | ||
997 | /* Return true if value range VR involves exactly one symbol SYM. */ | |
998 | ||
999 | static bool | |
1000 | symbolic_range_based_on_p (value_range_t *vr, const_tree sym) | |
1001 | { | |
1002 | bool neg, min_has_symbol, max_has_symbol; | |
1003 | tree inv; | |
1004 | ||
1005 | if (is_gimple_min_invariant (vr->min)) | |
1006 | min_has_symbol = false; | |
1007 | else if (get_single_symbol (vr->min, &neg, &inv) == sym) | |
1008 | min_has_symbol = true; | |
1009 | else | |
1010 | return false; | |
1011 | ||
1012 | if (is_gimple_min_invariant (vr->max)) | |
1013 | max_has_symbol = false; | |
1014 | else if (get_single_symbol (vr->max, &neg, &inv) == sym) | |
1015 | max_has_symbol = true; | |
1016 | else | |
1017 | return false; | |
1018 | ||
1019 | return (min_has_symbol || max_has_symbol); | |
1020 | } | |
1021 | ||
80777cd8 | 1022 | /* Return true if value range VR uses an overflow infinity. */ |
8dbf774a | 1023 | |
c3783c3b | 1024 | static inline bool |
1025 | overflow_infinity_range_p (value_range_t *vr) | |
8dbf774a | 1026 | { |
c3783c3b | 1027 | return (vr->type == VR_RANGE |
1028 | && (is_overflow_infinity (vr->min) | |
1029 | || is_overflow_infinity (vr->max))); | |
1030 | } | |
add6ee5e | 1031 | |
a2a1fde2 | 1032 | /* Return false if we can not make a valid comparison based on VR; |
1033 | this will be the case if it uses an overflow infinity and overflow | |
1034 | is not undefined (i.e., -fno-strict-overflow is in effect). | |
1035 | Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR | |
1036 | uses an overflow infinity. */ | |
1037 | ||
1038 | static bool | |
1039 | usable_range_p (value_range_t *vr, bool *strict_overflow_p) | |
1040 | { | |
1041 | gcc_assert (vr->type == VR_RANGE); | |
1042 | if (is_overflow_infinity (vr->min)) | |
1043 | { | |
1044 | *strict_overflow_p = true; | |
1045 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) | |
1046 | return false; | |
1047 | } | |
1048 | if (is_overflow_infinity (vr->max)) | |
1049 | { | |
1050 | *strict_overflow_p = true; | |
1051 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) | |
1052 | return false; | |
1053 | } | |
1054 | return true; | |
1055 | } | |
1056 | ||
1057 | ||
75a70cf9 | 1058 | /* Return true if the result of assignment STMT is know to be non-negative. |
1059 | If the return value is based on the assumption that signed overflow is | |
1060 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1061 | *STRICT_OVERFLOW_P.*/ | |
1062 | ||
1063 | static bool | |
1064 | gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
1065 | { | |
1066 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
1067 | switch (get_gimple_rhs_class (code)) | |
1068 | { | |
1069 | case GIMPLE_UNARY_RHS: | |
1070 | return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), | |
1071 | gimple_expr_type (stmt), | |
1072 | gimple_assign_rhs1 (stmt), | |
1073 | strict_overflow_p); | |
1074 | case GIMPLE_BINARY_RHS: | |
1075 | return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), | |
1076 | gimple_expr_type (stmt), | |
1077 | gimple_assign_rhs1 (stmt), | |
1078 | gimple_assign_rhs2 (stmt), | |
1079 | strict_overflow_p); | |
00f4f705 | 1080 | case GIMPLE_TERNARY_RHS: |
1081 | return false; | |
75a70cf9 | 1082 | case GIMPLE_SINGLE_RHS: |
1083 | return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), | |
1084 | strict_overflow_p); | |
1085 | case GIMPLE_INVALID_RHS: | |
1086 | gcc_unreachable (); | |
1087 | default: | |
1088 | gcc_unreachable (); | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | /* Return true if return value of call STMT is know to be non-negative. | |
1093 | If the return value is based on the assumption that signed overflow is | |
1094 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1095 | *STRICT_OVERFLOW_P.*/ | |
1096 | ||
1097 | static bool | |
1098 | gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
1099 | { | |
1100 | tree arg0 = gimple_call_num_args (stmt) > 0 ? | |
1101 | gimple_call_arg (stmt, 0) : NULL_TREE; | |
1102 | tree arg1 = gimple_call_num_args (stmt) > 1 ? | |
1103 | gimple_call_arg (stmt, 1) : NULL_TREE; | |
1104 | ||
1105 | return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), | |
1106 | gimple_call_fndecl (stmt), | |
1107 | arg0, | |
1108 | arg1, | |
1109 | strict_overflow_p); | |
1110 | } | |
1111 | ||
1112 | /* Return true if STMT is know to to compute a non-negative value. | |
1113 | If the return value is based on the assumption that signed overflow is | |
1114 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1115 | *STRICT_OVERFLOW_P.*/ | |
1116 | ||
1117 | static bool | |
1118 | gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) | |
1119 | { | |
1120 | switch (gimple_code (stmt)) | |
1121 | { | |
1122 | case GIMPLE_ASSIGN: | |
1123 | return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); | |
1124 | case GIMPLE_CALL: | |
1125 | return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); | |
1126 | default: | |
1127 | gcc_unreachable (); | |
1128 | } | |
1129 | } | |
1130 | ||
1131 | /* Return true if the result of assignment STMT is know to be non-zero. | |
1132 | If the return value is based on the assumption that signed overflow is | |
1133 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1134 | *STRICT_OVERFLOW_P.*/ | |
1135 | ||
1136 | static bool | |
1137 | gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) | |
1138 | { | |
1139 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
1140 | switch (get_gimple_rhs_class (code)) | |
1141 | { | |
1142 | case GIMPLE_UNARY_RHS: | |
1143 | return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
1144 | gimple_expr_type (stmt), | |
1145 | gimple_assign_rhs1 (stmt), | |
1146 | strict_overflow_p); | |
1147 | case GIMPLE_BINARY_RHS: | |
1148 | return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
1149 | gimple_expr_type (stmt), | |
1150 | gimple_assign_rhs1 (stmt), | |
1151 | gimple_assign_rhs2 (stmt), | |
1152 | strict_overflow_p); | |
00f4f705 | 1153 | case GIMPLE_TERNARY_RHS: |
1154 | return false; | |
75a70cf9 | 1155 | case GIMPLE_SINGLE_RHS: |
1156 | return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), | |
1157 | strict_overflow_p); | |
1158 | case GIMPLE_INVALID_RHS: | |
1159 | gcc_unreachable (); | |
1160 | default: | |
1161 | gcc_unreachable (); | |
1162 | } | |
1163 | } | |
1164 | ||
d7dcba40 | 1165 | /* Return true if STMT is known to compute a non-zero value. |
75a70cf9 | 1166 | If the return value is based on the assumption that signed overflow is |
1167 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1168 | *STRICT_OVERFLOW_P.*/ | |
1169 | ||
1170 | static bool | |
1171 | gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) | |
1172 | { | |
1173 | switch (gimple_code (stmt)) | |
1174 | { | |
1175 | case GIMPLE_ASSIGN: | |
1176 | return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); | |
1177 | case GIMPLE_CALL: | |
0b7282f1 | 1178 | { |
1179 | tree fndecl = gimple_call_fndecl (stmt); | |
1180 | if (!fndecl) return false; | |
1181 | if (flag_delete_null_pointer_checks && !flag_check_new | |
1182 | && DECL_IS_OPERATOR_NEW (fndecl) | |
1183 | && !TREE_NOTHROW (fndecl)) | |
1184 | return true; | |
d7dcba40 | 1185 | if (flag_delete_null_pointer_checks && |
1186 | lookup_attribute ("returns_nonnull", | |
1187 | TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))) | |
1188 | return true; | |
0b7282f1 | 1189 | return gimple_alloca_call_p (stmt); |
1190 | } | |
75a70cf9 | 1191 | default: |
1192 | gcc_unreachable (); | |
1193 | } | |
1194 | } | |
1195 | ||
c3783c3b | 1196 | /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges |
eea12c72 | 1197 | obtained so far. */ |
88dbf20f | 1198 | |
eea12c72 | 1199 | static bool |
75a70cf9 | 1200 | vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) |
88dbf20f | 1201 | { |
75a70cf9 | 1202 | if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) |
eea12c72 | 1203 | return true; |
88dbf20f | 1204 | |
eea12c72 | 1205 | /* If we have an expression of the form &X->a, then the expression |
1206 | is nonnull if X is nonnull. */ | |
75a70cf9 | 1207 | if (is_gimple_assign (stmt) |
1208 | && gimple_assign_rhs_code (stmt) == ADDR_EXPR) | |
eea12c72 | 1209 | { |
75a70cf9 | 1210 | tree expr = gimple_assign_rhs1 (stmt); |
eea12c72 | 1211 | tree base = get_base_address (TREE_OPERAND (expr, 0)); |
88dbf20f | 1212 | |
eea12c72 | 1213 | if (base != NULL_TREE |
182cf5a9 | 1214 | && TREE_CODE (base) == MEM_REF |
eea12c72 | 1215 | && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) |
1216 | { | |
1217 | value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); | |
1218 | if (range_is_nonnull (vr)) | |
1219 | return true; | |
1220 | } | |
1221 | } | |
e7d43f99 | 1222 | |
eea12c72 | 1223 | return false; |
e7d43f99 | 1224 | } |
1225 | ||
7587869b | 1226 | /* Returns true if EXPR is a valid value (as expected by compare_values) -- |
1227 | a gimple invariant, or SSA_NAME +- CST. */ | |
1228 | ||
1229 | static bool | |
1230 | valid_value_p (tree expr) | |
1231 | { | |
1232 | if (TREE_CODE (expr) == SSA_NAME) | |
1233 | return true; | |
1234 | ||
1235 | if (TREE_CODE (expr) == PLUS_EXPR | |
1236 | || TREE_CODE (expr) == MINUS_EXPR) | |
1237 | return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME | |
1238 | && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); | |
48e1416a | 1239 | |
7587869b | 1240 | return is_gimple_min_invariant (expr); |
1241 | } | |
e7d43f99 | 1242 | |
48e1416a | 1243 | /* Return |
7e8bc5b6 | 1244 | 1 if VAL < VAL2 |
1245 | 0 if !(VAL < VAL2) | |
1246 | -2 if those are incomparable. */ | |
1247 | static inline int | |
1248 | operand_less_p (tree val, tree val2) | |
1249 | { | |
7e8bc5b6 | 1250 | /* LT is folded faster than GE and others. Inline the common case. */ |
1251 | if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) | |
d99d10ca | 1252 | return tree_int_cst_lt (val, val2); |
7e8bc5b6 | 1253 | else |
c3783c3b | 1254 | { |
1255 | tree tcmp; | |
1256 | ||
f82ccac1 | 1257 | fold_defer_overflow_warnings (); |
1258 | ||
c3783c3b | 1259 | tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |
f82ccac1 | 1260 | |
1261 | fold_undefer_and_ignore_overflow_warnings (); | |
1262 | ||
ffdf1c47 | 1263 | if (!tcmp |
1264 | || TREE_CODE (tcmp) != INTEGER_CST) | |
c3783c3b | 1265 | return -2; |
1266 | ||
1267 | if (!integer_zerop (tcmp)) | |
1268 | return 1; | |
1269 | } | |
1270 | ||
1271 | /* val >= val2, not considering overflow infinity. */ | |
1272 | if (is_negative_overflow_infinity (val)) | |
1273 | return is_negative_overflow_infinity (val2) ? 0 : 1; | |
1274 | else if (is_positive_overflow_infinity (val2)) | |
1275 | return is_positive_overflow_infinity (val) ? 0 : 1; | |
1276 | ||
1277 | return 0; | |
7e8bc5b6 | 1278 | } |
1279 | ||
88dbf20f | 1280 | /* Compare two values VAL1 and VAL2. Return |
48e1416a | 1281 | |
88dbf20f | 1282 | -2 if VAL1 and VAL2 cannot be compared at compile-time, |
1283 | -1 if VAL1 < VAL2, | |
1284 | 0 if VAL1 == VAL2, | |
1285 | +1 if VAL1 > VAL2, and | |
1286 | +2 if VAL1 != VAL2 | |
1287 | ||
1288 | This is similar to tree_int_cst_compare but supports pointer values | |
c3783c3b | 1289 | and values that cannot be compared at compile time. |
1290 | ||
1291 | If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to | |
1292 | true if the return value is only valid if we assume that signed | |
1293 | overflow is undefined. */ | |
88dbf20f | 1294 | |
1295 | static int | |
c3783c3b | 1296 | compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
88dbf20f | 1297 | { |
1298 | if (val1 == val2) | |
1299 | return 0; | |
1300 | ||
a9b4b38e | 1301 | /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
1302 | both integers. */ | |
1303 | gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) | |
1304 | == POINTER_TYPE_P (TREE_TYPE (val2))); | |
1ec8aa41 | 1305 | |
0de36bdb | 1306 | /* Convert the two values into the same type. This is needed because |
1307 | sizetype causes sign extension even for unsigned types. */ | |
1308 | val2 = fold_convert (TREE_TYPE (val1), val2); | |
1309 | STRIP_USELESS_TYPE_CONVERSION (val2); | |
a9b4b38e | 1310 | |
88dbf20f | 1311 | if ((TREE_CODE (val1) == SSA_NAME |
1ec8aa41 | 1312 | || (TREE_CODE (val1) == NEGATE_EXPR |
1313 | && TREE_CODE (TREE_OPERAND (val1, 0)) == SSA_NAME) | |
88dbf20f | 1314 | || TREE_CODE (val1) == PLUS_EXPR |
1315 | || TREE_CODE (val1) == MINUS_EXPR) | |
1316 | && (TREE_CODE (val2) == SSA_NAME | |
1ec8aa41 | 1317 | || (TREE_CODE (val2) == NEGATE_EXPR |
1318 | && TREE_CODE (TREE_OPERAND (val2, 0)) == SSA_NAME) | |
88dbf20f | 1319 | || TREE_CODE (val2) == PLUS_EXPR |
1320 | || TREE_CODE (val2) == MINUS_EXPR)) | |
1321 | { | |
1322 | tree n1, c1, n2, c2; | |
82086091 | 1323 | enum tree_code code1, code2; |
48e1416a | 1324 | |
1ec8aa41 | 1325 | /* If VAL1 and VAL2 are of the form '[-]NAME [+-] CST' or 'NAME', |
88dbf20f | 1326 | return -1 or +1 accordingly. If VAL1 and VAL2 don't use the |
1327 | same name, return -2. */ | |
1ec8aa41 | 1328 | if (TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == NEGATE_EXPR) |
88dbf20f | 1329 | { |
82086091 | 1330 | code1 = SSA_NAME; |
88dbf20f | 1331 | n1 = val1; |
1332 | c1 = NULL_TREE; | |
1333 | } | |
1334 | else | |
1335 | { | |
82086091 | 1336 | code1 = TREE_CODE (val1); |
88dbf20f | 1337 | n1 = TREE_OPERAND (val1, 0); |
1338 | c1 = TREE_OPERAND (val1, 1); | |
82086091 | 1339 | if (tree_int_cst_sgn (c1) == -1) |
1340 | { | |
c3783c3b | 1341 | if (is_negative_overflow_infinity (c1)) |
1342 | return -2; | |
82086091 | 1343 | c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); |
1344 | if (!c1) | |
1345 | return -2; | |
1346 | code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1347 | } | |
88dbf20f | 1348 | } |
1349 | ||
1ec8aa41 | 1350 | if (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == NEGATE_EXPR) |
88dbf20f | 1351 | { |
82086091 | 1352 | code2 = SSA_NAME; |
88dbf20f | 1353 | n2 = val2; |
1354 | c2 = NULL_TREE; | |
1355 | } | |
1356 | else | |
1357 | { | |
82086091 | 1358 | code2 = TREE_CODE (val2); |
88dbf20f | 1359 | n2 = TREE_OPERAND (val2, 0); |
1360 | c2 = TREE_OPERAND (val2, 1); | |
82086091 | 1361 | if (tree_int_cst_sgn (c2) == -1) |
1362 | { | |
c3783c3b | 1363 | if (is_negative_overflow_infinity (c2)) |
1364 | return -2; | |
82086091 | 1365 | c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); |
1366 | if (!c2) | |
1367 | return -2; | |
1368 | code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1369 | } | |
88dbf20f | 1370 | } |
1371 | ||
1372 | /* Both values must use the same name. */ | |
1ec8aa41 | 1373 | if (TREE_CODE (n1) == NEGATE_EXPR && TREE_CODE (n2) == NEGATE_EXPR) |
1374 | { | |
1375 | n1 = TREE_OPERAND (n1, 0); | |
1376 | n2 = TREE_OPERAND (n2, 0); | |
1377 | } | |
88dbf20f | 1378 | if (n1 != n2) |
1379 | return -2; | |
1380 | ||
1ec8aa41 | 1381 | if (code1 == SSA_NAME && code2 == SSA_NAME) |
82086091 | 1382 | /* NAME == NAME */ |
1383 | return 0; | |
1384 | ||
1385 | /* If overflow is defined we cannot simplify more. */ | |
981eb798 | 1386 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) |
82086091 | 1387 | return -2; |
1388 | ||
d8f696cf | 1389 | if (strict_overflow_p != NULL |
1390 | && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) | |
1391 | && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) | |
c3783c3b | 1392 | *strict_overflow_p = true; |
1393 | ||
82086091 | 1394 | if (code1 == SSA_NAME) |
88dbf20f | 1395 | { |
82086091 | 1396 | if (code2 == PLUS_EXPR) |
88dbf20f | 1397 | /* NAME < NAME + CST */ |
1398 | return -1; | |
82086091 | 1399 | else if (code2 == MINUS_EXPR) |
88dbf20f | 1400 | /* NAME > NAME - CST */ |
1401 | return 1; | |
1402 | } | |
82086091 | 1403 | else if (code1 == PLUS_EXPR) |
88dbf20f | 1404 | { |
82086091 | 1405 | if (code2 == SSA_NAME) |
88dbf20f | 1406 | /* NAME + CST > NAME */ |
1407 | return 1; | |
82086091 | 1408 | else if (code2 == PLUS_EXPR) |
88dbf20f | 1409 | /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ |
c3783c3b | 1410 | return compare_values_warnv (c1, c2, strict_overflow_p); |
82086091 | 1411 | else if (code2 == MINUS_EXPR) |
88dbf20f | 1412 | /* NAME + CST1 > NAME - CST2 */ |
1413 | return 1; | |
1414 | } | |
82086091 | 1415 | else if (code1 == MINUS_EXPR) |
88dbf20f | 1416 | { |
82086091 | 1417 | if (code2 == SSA_NAME) |
88dbf20f | 1418 | /* NAME - CST < NAME */ |
1419 | return -1; | |
82086091 | 1420 | else if (code2 == PLUS_EXPR) |
88dbf20f | 1421 | /* NAME - CST1 < NAME + CST2 */ |
1422 | return -1; | |
82086091 | 1423 | else if (code2 == MINUS_EXPR) |
88dbf20f | 1424 | /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that |
1425 | C1 and C2 are swapped in the call to compare_values. */ | |
c3783c3b | 1426 | return compare_values_warnv (c2, c1, strict_overflow_p); |
88dbf20f | 1427 | } |
1428 | ||
1429 | gcc_unreachable (); | |
1430 | } | |
1431 | ||
1432 | /* We cannot compare non-constants. */ | |
1433 | if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) | |
1434 | return -2; | |
1435 | ||
a9b4b38e | 1436 | if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
05812c7f | 1437 | { |
c3783c3b | 1438 | /* We cannot compare overflowed values, except for overflow |
1439 | infinities. */ | |
05812c7f | 1440 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
c3783c3b | 1441 | { |
a2a1fde2 | 1442 | if (strict_overflow_p != NULL) |
1443 | *strict_overflow_p = true; | |
c3783c3b | 1444 | if (is_negative_overflow_infinity (val1)) |
1445 | return is_negative_overflow_infinity (val2) ? 0 : -1; | |
1446 | else if (is_negative_overflow_infinity (val2)) | |
1447 | return 1; | |
1448 | else if (is_positive_overflow_infinity (val1)) | |
1449 | return is_positive_overflow_infinity (val2) ? 0 : 1; | |
1450 | else if (is_positive_overflow_infinity (val2)) | |
1451 | return -1; | |
1452 | return -2; | |
1453 | } | |
05812c7f | 1454 | |
1455 | return tree_int_cst_compare (val1, val2); | |
1456 | } | |
88dbf20f | 1457 | else |
1458 | { | |
1459 | tree t; | |
1460 | ||
1461 | /* First see if VAL1 and VAL2 are not the same. */ | |
1462 | if (val1 == val2 || operand_equal_p (val1, val2, 0)) | |
1463 | return 0; | |
48e1416a | 1464 | |
88dbf20f | 1465 | /* If VAL1 is a lower address than VAL2, return -1. */ |
7e8bc5b6 | 1466 | if (operand_less_p (val1, val2) == 1) |
88dbf20f | 1467 | return -1; |
1468 | ||
1469 | /* If VAL1 is a higher address than VAL2, return +1. */ | |
7e8bc5b6 | 1470 | if (operand_less_p (val2, val1) == 1) |
88dbf20f | 1471 | return 1; |
1472 | ||
14dc13e5 | 1473 | /* If VAL1 is different than VAL2, return +2. |
1474 | For integer constants we either have already returned -1 or 1 | |
7920eed5 | 1475 | or they are equivalent. We still might succeed in proving |
1476 | something about non-trivial operands. */ | |
14dc13e5 | 1477 | if (TREE_CODE (val1) != INTEGER_CST |
1478 | || TREE_CODE (val2) != INTEGER_CST) | |
1479 | { | |
1480 | t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); | |
ffdf1c47 | 1481 | if (t && integer_onep (t)) |
14dc13e5 | 1482 | return 2; |
1483 | } | |
88dbf20f | 1484 | |
1485 | return -2; | |
1486 | } | |
1487 | } | |
1488 | ||
a2a1fde2 | 1489 | /* Compare values like compare_values_warnv, but treat comparisons of |
1490 | nonconstants which rely on undefined overflow as incomparable. */ | |
c3783c3b | 1491 | |
1492 | static int | |
1493 | compare_values (tree val1, tree val2) | |
1494 | { | |
1495 | bool sop; | |
1496 | int ret; | |
1497 | ||
1498 | sop = false; | |
1499 | ret = compare_values_warnv (val1, val2, &sop); | |
a2a1fde2 | 1500 | if (sop |
1501 | && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) | |
c3783c3b | 1502 | ret = -2; |
1503 | return ret; | |
1504 | } | |
1505 | ||
88dbf20f | 1506 | |
7d48cd66 | 1507 | /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, |
1508 | 0 if VAL is not inside [MIN, MAX], | |
30a9e679 | 1509 | -2 if we cannot tell either way. |
1510 | ||
7e8bc5b6 | 1511 | Benchmark compile/20001226-1.c compilation time after changing this |
1512 | function. */ | |
88dbf20f | 1513 | |
1514 | static inline int | |
7d48cd66 | 1515 | value_inside_range (tree val, tree min, tree max) |
88dbf20f | 1516 | { |
7e8bc5b6 | 1517 | int cmp1, cmp2; |
88dbf20f | 1518 | |
7d48cd66 | 1519 | cmp1 = operand_less_p (val, min); |
7e8bc5b6 | 1520 | if (cmp1 == -2) |
88dbf20f | 1521 | return -2; |
7e8bc5b6 | 1522 | if (cmp1 == 1) |
1523 | return 0; | |
88dbf20f | 1524 | |
7d48cd66 | 1525 | cmp2 = operand_less_p (max, val); |
7e8bc5b6 | 1526 | if (cmp2 == -2) |
88dbf20f | 1527 | return -2; |
1528 | ||
7e8bc5b6 | 1529 | return !cmp2; |
88dbf20f | 1530 | } |
1531 | ||
1532 | ||
1533 | /* Return true if value ranges VR0 and VR1 have a non-empty | |
48e1416a | 1534 | intersection. |
1535 | ||
7e8bc5b6 | 1536 | Benchmark compile/20001226-1.c compilation time after changing this |
1537 | function. | |
1538 | */ | |
88dbf20f | 1539 | |
1540 | static inline bool | |
eea12c72 | 1541 | value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) |
88dbf20f | 1542 | { |
14dc13e5 | 1543 | /* The value ranges do not intersect if the maximum of the first range is |
1544 | less than the minimum of the second range or vice versa. | |
1545 | When those relations are unknown, we can't do any better. */ | |
1546 | if (operand_less_p (vr0->max, vr1->min) != 0) | |
1547 | return false; | |
1548 | if (operand_less_p (vr1->max, vr0->min) != 0) | |
1549 | return false; | |
1550 | return true; | |
88dbf20f | 1551 | } |
1552 | ||
1553 | ||
7d48cd66 | 1554 | /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not |
1555 | include the value zero, -2 if we cannot tell. */ | |
eea12c72 | 1556 | |
7d48cd66 | 1557 | static inline int |
1558 | range_includes_zero_p (tree min, tree max) | |
eea12c72 | 1559 | { |
7d48cd66 | 1560 | tree zero = build_int_cst (TREE_TYPE (min), 0); |
1561 | return value_inside_range (zero, min, max); | |
eea12c72 | 1562 | } |
1563 | ||
c37659ce | 1564 | /* Return true if *VR is know to only contain nonnegative values. */ |
1565 | ||
1566 | static inline bool | |
1567 | value_range_nonnegative_p (value_range_t *vr) | |
1568 | { | |
713b2724 | 1569 | /* Testing for VR_ANTI_RANGE is not useful here as any anti-range |
1570 | which would return a useful value should be encoded as a | |
1571 | VR_RANGE. */ | |
c37659ce | 1572 | if (vr->type == VR_RANGE) |
1573 | { | |
1574 | int result = compare_values (vr->min, integer_zero_node); | |
1575 | return (result == 0 || result == 1); | |
1576 | } | |
c37659ce | 1577 | |
1578 | return false; | |
1579 | } | |
1580 | ||
c37659ce | 1581 | /* If *VR has a value rante that is a single constant value return that, |
1582 | otherwise return NULL_TREE. */ | |
1583 | ||
1584 | static tree | |
1585 | value_range_constant_singleton (value_range_t *vr) | |
1586 | { | |
1587 | if (vr->type == VR_RANGE | |
1588 | && operand_equal_p (vr->min, vr->max, 0) | |
1589 | && is_gimple_min_invariant (vr->min)) | |
1590 | return vr->min; | |
1591 | ||
1592 | return NULL_TREE; | |
8dbf774a | 1593 | } |
1594 | ||
43ffec67 | 1595 | /* If OP has a value range with a single constant value return that, |
1596 | otherwise return NULL_TREE. This returns OP itself if OP is a | |
1597 | constant. */ | |
1598 | ||
1599 | static tree | |
1600 | op_with_constant_singleton_value_range (tree op) | |
1601 | { | |
43ffec67 | 1602 | if (is_gimple_min_invariant (op)) |
1603 | return op; | |
1604 | ||
1605 | if (TREE_CODE (op) != SSA_NAME) | |
1606 | return NULL_TREE; | |
1607 | ||
c37659ce | 1608 | return value_range_constant_singleton (get_value_range (op)); |
43ffec67 | 1609 | } |
1610 | ||
d6f3306a | 1611 | /* Return true if op is in a boolean [0, 1] value-range. */ |
1612 | ||
1613 | static bool | |
1614 | op_with_boolean_value_range_p (tree op) | |
1615 | { | |
1616 | value_range_t *vr; | |
1617 | ||
1618 | if (TYPE_PRECISION (TREE_TYPE (op)) == 1) | |
1619 | return true; | |
1620 | ||
1621 | if (integer_zerop (op) | |
1622 | || integer_onep (op)) | |
1623 | return true; | |
1624 | ||
1625 | if (TREE_CODE (op) != SSA_NAME) | |
1626 | return false; | |
1627 | ||
1628 | vr = get_value_range (op); | |
1629 | return (vr->type == VR_RANGE | |
1630 | && integer_zerop (vr->min) | |
1631 | && integer_onep (vr->max)); | |
1632 | } | |
eea12c72 | 1633 | |
88dbf20f | 1634 | /* Extract value range information from an ASSERT_EXPR EXPR and store |
1635 | it in *VR_P. */ | |
1636 | ||
1637 | static void | |
eea12c72 | 1638 | extract_range_from_assert (value_range_t *vr_p, tree expr) |
88dbf20f | 1639 | { |
eea12c72 | 1640 | tree var, cond, limit, min, max, type; |
04dbf3c4 | 1641 | value_range_t *limit_vr; |
e7d43f99 | 1642 | enum tree_code cond_code; |
88dbf20f | 1643 | |
1644 | var = ASSERT_EXPR_VAR (expr); | |
1645 | cond = ASSERT_EXPR_COND (expr); | |
1646 | ||
a640bb21 | 1647 | gcc_assert (COMPARISON_CLASS_P (cond)); |
88dbf20f | 1648 | |
1649 | /* Find VAR in the ASSERT_EXPR conditional. */ | |
bed8bec4 | 1650 | if (var == TREE_OPERAND (cond, 0) |
1651 | || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR | |
1652 | || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) | |
eea12c72 | 1653 | { |
1654 | /* If the predicate is of the form VAR COMP LIMIT, then we just | |
1655 | take LIMIT from the RHS and use the same comparison code. */ | |
eea12c72 | 1656 | cond_code = TREE_CODE (cond); |
bed8bec4 | 1657 | limit = TREE_OPERAND (cond, 1); |
1658 | cond = TREE_OPERAND (cond, 0); | |
eea12c72 | 1659 | } |
1660 | else | |
1661 | { | |
1662 | /* If the predicate is of the form LIMIT COMP VAR, then we need | |
1663 | to flip around the comparison code to create the proper range | |
1664 | for VAR. */ | |
6a0aeeaa | 1665 | cond_code = swap_tree_comparison (TREE_CODE (cond)); |
bed8bec4 | 1666 | limit = TREE_OPERAND (cond, 0); |
1667 | cond = TREE_OPERAND (cond, 1); | |
eea12c72 | 1668 | } |
88dbf20f | 1669 | |
afc1ad6d | 1670 | limit = avoid_overflow_infinity (limit); |
1671 | ||
325d00b0 | 1672 | type = TREE_TYPE (var); |
88dbf20f | 1673 | gcc_assert (limit != var); |
1674 | ||
eea12c72 | 1675 | /* For pointer arithmetic, we only keep track of pointer equality |
1676 | and inequality. */ | |
1677 | if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) | |
88dbf20f | 1678 | { |
e7d43f99 | 1679 | set_value_range_to_varying (vr_p); |
88dbf20f | 1680 | return; |
1681 | } | |
1682 | ||
eea12c72 | 1683 | /* If LIMIT is another SSA name and LIMIT has a range of its own, |
1684 | try to use LIMIT's range to avoid creating symbolic ranges | |
1685 | unnecessarily. */ | |
1686 | limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; | |
1687 | ||
1688 | /* LIMIT's range is only interesting if it has any useful information. */ | |
1689 | if (limit_vr | |
1690 | && (limit_vr->type == VR_UNDEFINED | |
1691 | || limit_vr->type == VR_VARYING | |
1692 | || symbolic_range_p (limit_vr))) | |
1693 | limit_vr = NULL; | |
1694 | ||
166b8fc0 | 1695 | /* Initially, the new range has the same set of equivalences of |
1696 | VAR's range. This will be revised before returning the final | |
1697 | value. Since assertions may be chained via mutually exclusive | |
1698 | predicates, we will need to trim the set of equivalences before | |
1699 | we are done. */ | |
eea12c72 | 1700 | gcc_assert (vr_p->equiv == NULL); |
fbcece5e | 1701 | add_equivalence (&vr_p->equiv, var); |
eea12c72 | 1702 | |
1703 | /* Extract a new range based on the asserted comparison for VAR and | |
1704 | LIMIT's value range. Notice that if LIMIT has an anti-range, we | |
1705 | will only use it for equality comparisons (EQ_EXPR). For any | |
1706 | other kind of assertion, we cannot derive a range from LIMIT's | |
1707 | anti-range that can be used to describe the new range. For | |
1708 | instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], | |
1709 | then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is | |
1710 | no single range for x_2 that could describe LE_EXPR, so we might | |
bed8bec4 | 1711 | as well build the range [b_4, +INF] for it. |
1712 | One special case we handle is extracting a range from a | |
1713 | range test encoded as (unsigned)var + CST <= limit. */ | |
1714 | if (TREE_CODE (cond) == NOP_EXPR | |
1715 | || TREE_CODE (cond) == PLUS_EXPR) | |
1716 | { | |
bed8bec4 | 1717 | if (TREE_CODE (cond) == PLUS_EXPR) |
1718 | { | |
22cdb855 | 1719 | min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), |
1720 | TREE_OPERAND (cond, 1)); | |
317e2a67 | 1721 | max = int_const_binop (PLUS_EXPR, limit, min); |
bed8bec4 | 1722 | cond = TREE_OPERAND (cond, 0); |
1723 | } | |
1724 | else | |
22cdb855 | 1725 | { |
1726 | min = build_int_cst (TREE_TYPE (var), 0); | |
1727 | max = limit; | |
1728 | } | |
bed8bec4 | 1729 | |
22cdb855 | 1730 | /* Make sure to not set TREE_OVERFLOW on the final type |
1731 | conversion. We are willingly interpreting large positive | |
f754c5e6 | 1732 | unsigned values as negative signed values here. */ |
8945e16b | 1733 | min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false); |
1734 | max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false); | |
bed8bec4 | 1735 | |
1736 | /* We can transform a max, min range to an anti-range or | |
1737 | vice-versa. Use set_and_canonicalize_value_range which does | |
1738 | this for us. */ | |
1739 | if (cond_code == LE_EXPR) | |
1740 | set_and_canonicalize_value_range (vr_p, VR_RANGE, | |
1741 | min, max, vr_p->equiv); | |
1742 | else if (cond_code == GT_EXPR) | |
1743 | set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, | |
1744 | min, max, vr_p->equiv); | |
1745 | else | |
1746 | gcc_unreachable (); | |
1747 | } | |
1748 | else if (cond_code == EQ_EXPR) | |
eea12c72 | 1749 | { |
1750 | enum value_range_type range_type; | |
1751 | ||
1752 | if (limit_vr) | |
1753 | { | |
1754 | range_type = limit_vr->type; | |
1755 | min = limit_vr->min; | |
1756 | max = limit_vr->max; | |
1757 | } | |
1758 | else | |
1759 | { | |
1760 | range_type = VR_RANGE; | |
1761 | min = limit; | |
1762 | max = limit; | |
1763 | } | |
1764 | ||
1765 | set_value_range (vr_p, range_type, min, max, vr_p->equiv); | |
1766 | ||
1767 | /* When asserting the equality VAR == LIMIT and LIMIT is another | |
1768 | SSA name, the new range will also inherit the equivalence set | |
1769 | from LIMIT. */ | |
1770 | if (TREE_CODE (limit) == SSA_NAME) | |
fbcece5e | 1771 | add_equivalence (&vr_p->equiv, limit); |
eea12c72 | 1772 | } |
1773 | else if (cond_code == NE_EXPR) | |
1774 | { | |
1775 | /* As described above, when LIMIT's range is an anti-range and | |
1776 | this assertion is an inequality (NE_EXPR), then we cannot | |
1777 | derive anything from the anti-range. For instance, if | |
1778 | LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does | |
1779 | not imply that VAR's range is [0, 0]. So, in the case of | |
1780 | anti-ranges, we just assert the inequality using LIMIT and | |
d461f9a9 | 1781 | not its anti-range. |
1782 | ||
1783 | If LIMIT_VR is a range, we can only use it to build a new | |
1784 | anti-range if LIMIT_VR is a single-valued range. For | |
1785 | instance, if LIMIT_VR is [0, 1], the predicate | |
1786 | VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. | |
1787 | Rather, it means that for value 0 VAR should be ~[0, 0] | |
1788 | and for value 1, VAR should be ~[1, 1]. We cannot | |
1789 | represent these ranges. | |
1790 | ||
1791 | The only situation in which we can build a valid | |
1792 | anti-range is when LIMIT_VR is a single-valued range | |
48e1416a | 1793 | (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, |
d461f9a9 | 1794 | build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ |
1795 | if (limit_vr | |
1796 | && limit_vr->type == VR_RANGE | |
1797 | && compare_values (limit_vr->min, limit_vr->max) == 0) | |
eea12c72 | 1798 | { |
d461f9a9 | 1799 | min = limit_vr->min; |
1800 | max = limit_vr->max; | |
eea12c72 | 1801 | } |
1802 | else | |
1803 | { | |
d461f9a9 | 1804 | /* In any other case, we cannot use LIMIT's range to build a |
1805 | valid anti-range. */ | |
1806 | min = max = limit; | |
eea12c72 | 1807 | } |
1808 | ||
1809 | /* If MIN and MAX cover the whole range for their type, then | |
1810 | just use the original LIMIT. */ | |
1811 | if (INTEGRAL_TYPE_P (type) | |
b876a744 | 1812 | && vrp_val_is_min (min) |
1813 | && vrp_val_is_max (max)) | |
eea12c72 | 1814 | min = max = limit; |
1815 | ||
ce7ae77c | 1816 | set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, |
1817 | min, max, vr_p->equiv); | |
eea12c72 | 1818 | } |
1819 | else if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
88dbf20f | 1820 | { |
eea12c72 | 1821 | min = TYPE_MIN_VALUE (type); |
1822 | ||
1823 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1824 | max = limit; | |
1825 | else | |
1826 | { | |
1827 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1828 | range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for | |
1829 | LT_EXPR. */ | |
1830 | max = limit_vr->max; | |
1831 | } | |
1832 | ||
ad2a47a5 | 1833 | /* If the maximum value forces us to be out of bounds, simply punt. |
1834 | It would be pointless to try and do anything more since this | |
1835 | all should be optimized away above us. */ | |
c08d658d | 1836 | if ((cond_code == LT_EXPR |
1837 | && compare_values (max, min) == 0) | |
4a8f88ff | 1838 | || is_overflow_infinity (max)) |
ad2a47a5 | 1839 | set_value_range_to_varying (vr_p); |
1840 | else | |
eea12c72 | 1841 | { |
ad2a47a5 | 1842 | /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ |
c08d658d | 1843 | if (cond_code == LT_EXPR) |
ad2a47a5 | 1844 | { |
0418ac74 | 1845 | if (TYPE_PRECISION (TREE_TYPE (max)) == 1 |
1846 | && !TYPE_UNSIGNED (TREE_TYPE (max))) | |
1847 | max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, | |
1848 | build_int_cst (TREE_TYPE (max), -1)); | |
1849 | else | |
1850 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, | |
1851 | build_int_cst (TREE_TYPE (max), 1)); | |
d8f696cf | 1852 | if (EXPR_P (max)) |
1853 | TREE_NO_WARNING (max) = 1; | |
ad2a47a5 | 1854 | } |
eea12c72 | 1855 | |
ad2a47a5 | 1856 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1857 | } | |
88dbf20f | 1858 | } |
eea12c72 | 1859 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) |
88dbf20f | 1860 | { |
eea12c72 | 1861 | max = TYPE_MAX_VALUE (type); |
1862 | ||
1863 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1864 | min = limit; | |
1865 | else | |
1866 | { | |
1867 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1868 | range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for | |
1869 | GT_EXPR. */ | |
1870 | min = limit_vr->min; | |
1871 | } | |
1872 | ||
ad2a47a5 | 1873 | /* If the minimum value forces us to be out of bounds, simply punt. |
1874 | It would be pointless to try and do anything more since this | |
1875 | all should be optimized away above us. */ | |
c08d658d | 1876 | if ((cond_code == GT_EXPR |
1877 | && compare_values (min, max) == 0) | |
4a8f88ff | 1878 | || is_overflow_infinity (min)) |
ad2a47a5 | 1879 | set_value_range_to_varying (vr_p); |
1880 | else | |
eea12c72 | 1881 | { |
ad2a47a5 | 1882 | /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ |
c08d658d | 1883 | if (cond_code == GT_EXPR) |
ad2a47a5 | 1884 | { |
0418ac74 | 1885 | if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
1886 | && !TYPE_UNSIGNED (TREE_TYPE (min))) | |
1887 | min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, | |
1888 | build_int_cst (TREE_TYPE (min), -1)); | |
1889 | else | |
1890 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, | |
1891 | build_int_cst (TREE_TYPE (min), 1)); | |
d8f696cf | 1892 | if (EXPR_P (min)) |
1893 | TREE_NO_WARNING (min) = 1; | |
ad2a47a5 | 1894 | } |
eea12c72 | 1895 | |
ad2a47a5 | 1896 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1897 | } | |
88dbf20f | 1898 | } |
1899 | else | |
1900 | gcc_unreachable (); | |
1901 | ||
04dbf3c4 | 1902 | /* Finally intersect the new range with what we already know about var. */ |
1903 | vrp_intersect_ranges (vr_p, get_value_range (var)); | |
88dbf20f | 1904 | } |
1905 | ||
1906 | ||
1907 | /* Extract range information from SSA name VAR and store it in VR. If | |
1908 | VAR has an interesting range, use it. Otherwise, create the | |
1909 | range [VAR, VAR] and return it. This is useful in situations where | |
1910 | we may have conditionals testing values of VARYING names. For | |
1911 | instance, | |
1912 | ||
1913 | x_3 = y_5; | |
1914 | if (x_3 > y_5) | |
1915 | ... | |
1916 | ||
1917 | Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is | |
1918 | always false. */ | |
1919 | ||
1920 | static void | |
eea12c72 | 1921 | extract_range_from_ssa_name (value_range_t *vr, tree var) |
88dbf20f | 1922 | { |
eea12c72 | 1923 | value_range_t *var_vr = get_value_range (var); |
88dbf20f | 1924 | |
0d4c8cda | 1925 | if (var_vr->type != VR_VARYING) |
eea12c72 | 1926 | copy_value_range (vr, var_vr); |
88dbf20f | 1927 | else |
eea12c72 | 1928 | set_value_range (vr, VR_RANGE, var, var, NULL); |
1929 | ||
fbcece5e | 1930 | add_equivalence (&vr->equiv, var); |
88dbf20f | 1931 | } |
1932 | ||
1933 | ||
c25c642e | 1934 | /* Wrapper around int_const_binop. If the operation overflows and we |
1935 | are not using wrapping arithmetic, then adjust the result to be | |
c3783c3b | 1936 | -INF or +INF depending on CODE, VAL1 and VAL2. This can return |
1937 | NULL_TREE if we need to use an overflow infinity representation but | |
1938 | the type does not support it. */ | |
c25c642e | 1939 | |
c3783c3b | 1940 | static tree |
c25c642e | 1941 | vrp_int_const_binop (enum tree_code code, tree val1, tree val2) |
1942 | { | |
1943 | tree res; | |
1944 | ||
317e2a67 | 1945 | res = int_const_binop (code, val1, val2); |
c25c642e | 1946 | |
0da2010b | 1947 | /* If we are using unsigned arithmetic, operate symbolically |
1948 | on -INF and +INF as int_const_binop only handles signed overflow. */ | |
1949 | if (TYPE_UNSIGNED (TREE_TYPE (val1))) | |
0bca0790 | 1950 | { |
1951 | int checkz = compare_values (res, val1); | |
9d8b8bc4 | 1952 | bool overflow = false; |
0bca0790 | 1953 | |
38f0f92a | 1954 | /* Ensure that res = val1 [+*] val2 >= val1 |
0bca0790 | 1955 | or that res = val1 - val2 <= val1. */ |
9d8b8bc4 | 1956 | if ((code == PLUS_EXPR |
38f0f92a | 1957 | && !(checkz == 1 || checkz == 0)) |
1958 | || (code == MINUS_EXPR | |
1959 | && !(checkz == 0 || checkz == -1))) | |
9d8b8bc4 | 1960 | { |
1961 | overflow = true; | |
1962 | } | |
1963 | /* Checking for multiplication overflow is done by dividing the | |
1964 | output of the multiplication by the first input of the | |
1965 | multiplication. If the result of that division operation is | |
1966 | not equal to the second input of the multiplication, then the | |
1967 | multiplication overflowed. */ | |
1968 | else if (code == MULT_EXPR && !integer_zerop (val1)) | |
1969 | { | |
1970 | tree tmp = int_const_binop (TRUNC_DIV_EXPR, | |
9fbc4e9e | 1971 | res, |
317e2a67 | 1972 | val1); |
9d8b8bc4 | 1973 | int check = compare_values (tmp, val2); |
1974 | ||
1975 | if (check != 0) | |
1976 | overflow = true; | |
1977 | } | |
1978 | ||
1979 | if (overflow) | |
0bca0790 | 1980 | { |
1981 | res = copy_node (res); | |
1982 | TREE_OVERFLOW (res) = 1; | |
1983 | } | |
9d8b8bc4 | 1984 | |
0bca0790 | 1985 | } |
e1b11b05 | 1986 | else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) |
1987 | /* If the singed operation wraps then int_const_binop has done | |
1988 | everything we want. */ | |
1989 | ; | |
e913b5cd | 1990 | /* Signed division of -1/0 overflows and by the time it gets here |
1991 | returns NULL_TREE. */ | |
1992 | else if (!res) | |
1993 | return NULL_TREE; | |
c3783c3b | 1994 | else if ((TREE_OVERFLOW (res) |
1995 | && !TREE_OVERFLOW (val1) | |
1996 | && !TREE_OVERFLOW (val2)) | |
1997 | || is_overflow_infinity (val1) | |
1998 | || is_overflow_infinity (val2)) | |
c25c642e | 1999 | { |
38f0f92a | 2000 | /* If the operation overflowed but neither VAL1 nor VAL2 are |
2001 | overflown, return -INF or +INF depending on the operation | |
2002 | and the combination of signs of the operands. */ | |
c25c642e | 2003 | int sgn1 = tree_int_cst_sgn (val1); |
2004 | int sgn2 = tree_int_cst_sgn (val2); | |
2005 | ||
c3783c3b | 2006 | if (needs_overflow_infinity (TREE_TYPE (res)) |
2007 | && !supports_overflow_infinity (TREE_TYPE (res))) | |
2008 | return NULL_TREE; | |
2009 | ||
659753d3 | 2010 | /* We have to punt on adding infinities of different signs, |
2011 | since we can't tell what the sign of the result should be. | |
2012 | Likewise for subtracting infinities of the same sign. */ | |
2013 | if (((code == PLUS_EXPR && sgn1 != sgn2) | |
2014 | || (code == MINUS_EXPR && sgn1 == sgn2)) | |
c3783c3b | 2015 | && is_overflow_infinity (val1) |
2016 | && is_overflow_infinity (val2)) | |
2017 | return NULL_TREE; | |
2018 | ||
659753d3 | 2019 | /* Don't try to handle division or shifting of infinities. */ |
2020 | if ((code == TRUNC_DIV_EXPR | |
2021 | || code == FLOOR_DIV_EXPR | |
2022 | || code == CEIL_DIV_EXPR | |
2023 | || code == EXACT_DIV_EXPR | |
2024 | || code == ROUND_DIV_EXPR | |
2025 | || code == RSHIFT_EXPR) | |
2026 | && (is_overflow_infinity (val1) | |
2027 | || is_overflow_infinity (val2))) | |
2028 | return NULL_TREE; | |
2029 | ||
a26da925 | 2030 | /* Notice that we only need to handle the restricted set of |
2031 | operations handled by extract_range_from_binary_expr. | |
2032 | Among them, only multiplication, addition and subtraction | |
2033 | can yield overflow without overflown operands because we | |
2034 | are working with integral types only... except in the | |
2035 | case VAL1 = -INF and VAL2 = -1 which overflows to +INF | |
2036 | for division too. */ | |
2037 | ||
2038 | /* For multiplication, the sign of the overflow is given | |
2039 | by the comparison of the signs of the operands. */ | |
2040 | if ((code == MULT_EXPR && sgn1 == sgn2) | |
2041 | /* For addition, the operands must be of the same sign | |
2042 | to yield an overflow. Its sign is therefore that | |
659753d3 | 2043 | of one of the operands, for example the first. For |
2044 | infinite operands X + -INF is negative, not positive. */ | |
2045 | || (code == PLUS_EXPR | |
2046 | && (sgn1 >= 0 | |
2047 | ? !is_negative_overflow_infinity (val2) | |
2048 | : is_positive_overflow_infinity (val2))) | |
c3783c3b | 2049 | /* For subtraction, non-infinite operands must be of |
2050 | different signs to yield an overflow. Its sign is | |
2051 | therefore that of the first operand or the opposite of | |
2052 | that of the second operand. A first operand of 0 counts | |
2053 | as positive here, for the corner case 0 - (-INF), which | |
2054 | overflows, but must yield +INF. For infinite operands 0 | |
2055 | - INF is negative, not positive. */ | |
2056 | || (code == MINUS_EXPR | |
2057 | && (sgn1 >= 0 | |
2058 | ? !is_positive_overflow_infinity (val2) | |
2059 | : is_negative_overflow_infinity (val2))) | |
62065c0b | 2060 | /* We only get in here with positive shift count, so the |
2061 | overflow direction is the same as the sign of val1. | |
2062 | Actually rshift does not overflow at all, but we only | |
2063 | handle the case of shifting overflowed -INF and +INF. */ | |
2064 | || (code == RSHIFT_EXPR | |
2065 | && sgn1 >= 0) | |
a26da925 | 2066 | /* For division, the only case is -INF / -1 = +INF. */ |
2067 | || code == TRUNC_DIV_EXPR | |
2068 | || code == FLOOR_DIV_EXPR | |
2069 | || code == CEIL_DIV_EXPR | |
2070 | || code == EXACT_DIV_EXPR | |
2071 | || code == ROUND_DIV_EXPR) | |
c3783c3b | 2072 | return (needs_overflow_infinity (TREE_TYPE (res)) |
2073 | ? positive_overflow_infinity (TREE_TYPE (res)) | |
2074 | : TYPE_MAX_VALUE (TREE_TYPE (res))); | |
c25c642e | 2075 | else |
c3783c3b | 2076 | return (needs_overflow_infinity (TREE_TYPE (res)) |
2077 | ? negative_overflow_infinity (TREE_TYPE (res)) | |
2078 | : TYPE_MIN_VALUE (TREE_TYPE (res))); | |
c25c642e | 2079 | } |
2080 | ||
2081 | return res; | |
2082 | } | |
2083 | ||
2084 | ||
e913b5cd | 2085 | /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO |
522b9a02 | 2086 | bitmask if some bit is unset, it means for all numbers in the range |
2087 | the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO | |
2088 | bitmask if some bit is set, it means for all numbers in the range | |
2089 | the bit is 1, otherwise it might be 0 or 1. */ | |
2090 | ||
2091 | static bool | |
e913b5cd | 2092 | zero_nonzero_bits_from_vr (const tree expr_type, |
2093 | value_range_t *vr, | |
2094 | wide_int *may_be_nonzero, | |
2095 | wide_int *must_be_nonzero) | |
522b9a02 | 2096 | { |
796b6678 | 2097 | *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |
2098 | *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); | |
ac4a8000 | 2099 | if (!range_int_cst_p (vr) |
4a8f88ff | 2100 | || is_overflow_infinity (vr->min) |
2101 | || is_overflow_infinity (vr->max)) | |
63bb6dcf | 2102 | return false; |
2103 | ||
2104 | if (range_int_cst_singleton_p (vr)) | |
2105 | { | |
e913b5cd | 2106 | *may_be_nonzero = vr->min; |
63bb6dcf | 2107 | *must_be_nonzero = *may_be_nonzero; |
2108 | } | |
2109 | else if (tree_int_cst_sgn (vr->min) >= 0 | |
2110 | || tree_int_cst_sgn (vr->max) < 0) | |
522b9a02 | 2111 | { |
ab2c1de8 | 2112 | wide_int xor_mask = wi::bit_xor (vr->min, vr->max); |
2113 | *may_be_nonzero = wi::bit_or (vr->min, vr->max); | |
2114 | *must_be_nonzero = wi::bit_and (vr->min, vr->max); | |
796b6678 | 2115 | if (xor_mask != 0) |
522b9a02 | 2116 | { |
796b6678 | 2117 | wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, |
b3fba3cd | 2118 | may_be_nonzero->get_precision ()); |
2119 | *may_be_nonzero = *may_be_nonzero | mask; | |
2120 | *must_be_nonzero = must_be_nonzero->and_not (mask); | |
522b9a02 | 2121 | } |
2122 | } | |
63bb6dcf | 2123 | |
2124 | return true; | |
522b9a02 | 2125 | } |
2126 | ||
748eb1f9 | 2127 | /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |
2128 | so that *VR0 U *VR1 == *AR. Returns true if that is possible, | |
2129 | false otherwise. If *AR can be represented with a single range | |
2130 | *VR1 will be VR_UNDEFINED. */ | |
2131 | ||
2132 | static bool | |
2133 | ranges_from_anti_range (value_range_t *ar, | |
2134 | value_range_t *vr0, value_range_t *vr1) | |
2135 | { | |
2136 | tree type = TREE_TYPE (ar->min); | |
2137 | ||
2138 | vr0->type = VR_UNDEFINED; | |
2139 | vr1->type = VR_UNDEFINED; | |
2140 | ||
2141 | if (ar->type != VR_ANTI_RANGE | |
2142 | || TREE_CODE (ar->min) != INTEGER_CST | |
2143 | || TREE_CODE (ar->max) != INTEGER_CST | |
2144 | || !vrp_val_min (type) | |
2145 | || !vrp_val_max (type)) | |
2146 | return false; | |
2147 | ||
2148 | if (!vrp_val_is_min (ar->min)) | |
2149 | { | |
2150 | vr0->type = VR_RANGE; | |
2151 | vr0->min = vrp_val_min (type); | |
6da74b21 | 2152 | vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1)); |
748eb1f9 | 2153 | } |
2154 | if (!vrp_val_is_max (ar->max)) | |
2155 | { | |
2156 | vr1->type = VR_RANGE; | |
6da74b21 | 2157 | vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1)); |
748eb1f9 | 2158 | vr1->max = vrp_val_max (type); |
2159 | } | |
2160 | if (vr0->type == VR_UNDEFINED) | |
2161 | { | |
2162 | *vr0 = *vr1; | |
2163 | vr1->type = VR_UNDEFINED; | |
2164 | } | |
2165 | ||
2166 | return vr0->type != VR_UNDEFINED; | |
2167 | } | |
2168 | ||
5360e345 | 2169 | /* Helper to extract a value-range *VR for a multiplicative operation |
2170 | *VR0 CODE *VR1. */ | |
2171 | ||
2172 | static void | |
2173 | extract_range_from_multiplicative_op_1 (value_range_t *vr, | |
2174 | enum tree_code code, | |
2175 | value_range_t *vr0, value_range_t *vr1) | |
2176 | { | |
2177 | enum value_range_type type; | |
2178 | tree val[4]; | |
2179 | size_t i; | |
2180 | tree min, max; | |
2181 | bool sop; | |
2182 | int cmp; | |
2183 | ||
2184 | /* Multiplications, divisions and shifts are a bit tricky to handle, | |
2185 | depending on the mix of signs we have in the two ranges, we | |
2186 | need to operate on different values to get the minimum and | |
2187 | maximum values for the new range. One approach is to figure | |
2188 | out all the variations of range combinations and do the | |
2189 | operations. | |
2190 | ||
2191 | However, this involves several calls to compare_values and it | |
2192 | is pretty convoluted. It's simpler to do the 4 operations | |
2193 | (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP | |
2194 | MAX1) and then figure the smallest and largest values to form | |
2195 | the new range. */ | |
2196 | gcc_assert (code == MULT_EXPR | |
2197 | || code == TRUNC_DIV_EXPR | |
2198 | || code == FLOOR_DIV_EXPR | |
2199 | || code == CEIL_DIV_EXPR | |
2200 | || code == EXACT_DIV_EXPR | |
2201 | || code == ROUND_DIV_EXPR | |
b0b91aff | 2202 | || code == RSHIFT_EXPR |
2203 | || code == LSHIFT_EXPR); | |
5360e345 | 2204 | gcc_assert ((vr0->type == VR_RANGE |
2205 | || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE)) | |
2206 | && vr0->type == vr1->type); | |
2207 | ||
2208 | type = vr0->type; | |
2209 | ||
2210 | /* Compute the 4 cross operations. */ | |
2211 | sop = false; | |
2212 | val[0] = vrp_int_const_binop (code, vr0->min, vr1->min); | |
2213 | if (val[0] == NULL_TREE) | |
2214 | sop = true; | |
2215 | ||
2216 | if (vr1->max == vr1->min) | |
2217 | val[1] = NULL_TREE; | |
2218 | else | |
2219 | { | |
2220 | val[1] = vrp_int_const_binop (code, vr0->min, vr1->max); | |
2221 | if (val[1] == NULL_TREE) | |
2222 | sop = true; | |
2223 | } | |
2224 | ||
2225 | if (vr0->max == vr0->min) | |
2226 | val[2] = NULL_TREE; | |
2227 | else | |
2228 | { | |
2229 | val[2] = vrp_int_const_binop (code, vr0->max, vr1->min); | |
2230 | if (val[2] == NULL_TREE) | |
2231 | sop = true; | |
2232 | } | |
2233 | ||
2234 | if (vr0->min == vr0->max || vr1->min == vr1->max) | |
2235 | val[3] = NULL_TREE; | |
2236 | else | |
2237 | { | |
2238 | val[3] = vrp_int_const_binop (code, vr0->max, vr1->max); | |
2239 | if (val[3] == NULL_TREE) | |
2240 | sop = true; | |
2241 | } | |
2242 | ||
2243 | if (sop) | |
2244 | { | |
2245 | set_value_range_to_varying (vr); | |
2246 | return; | |
2247 | } | |
2248 | ||
2249 | /* Set MIN to the minimum of VAL[i] and MAX to the maximum | |
2250 | of VAL[i]. */ | |
2251 | min = val[0]; | |
2252 | max = val[0]; | |
2253 | for (i = 1; i < 4; i++) | |
2254 | { | |
2255 | if (!is_gimple_min_invariant (min) | |
2256 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2257 | || !is_gimple_min_invariant (max) | |
2258 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2259 | break; | |
2260 | ||
2261 | if (val[i]) | |
2262 | { | |
2263 | if (!is_gimple_min_invariant (val[i]) | |
2264 | || (TREE_OVERFLOW (val[i]) | |
2265 | && !is_overflow_infinity (val[i]))) | |
2266 | { | |
2267 | /* If we found an overflowed value, set MIN and MAX | |
2268 | to it so that we set the resulting range to | |
2269 | VARYING. */ | |
2270 | min = max = val[i]; | |
2271 | break; | |
2272 | } | |
2273 | ||
2274 | if (compare_values (val[i], min) == -1) | |
2275 | min = val[i]; | |
2276 | ||
2277 | if (compare_values (val[i], max) == 1) | |
2278 | max = val[i]; | |
2279 | } | |
2280 | } | |
2281 | ||
2282 | /* If either MIN or MAX overflowed, then set the resulting range to | |
2283 | VARYING. But we do accept an overflow infinity | |
2284 | representation. */ | |
2285 | if (min == NULL_TREE | |
2286 | || !is_gimple_min_invariant (min) | |
2287 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2288 | || max == NULL_TREE | |
2289 | || !is_gimple_min_invariant (max) | |
2290 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2291 | { | |
2292 | set_value_range_to_varying (vr); | |
2293 | return; | |
2294 | } | |
2295 | ||
2296 | /* We punt if: | |
2297 | 1) [-INF, +INF] | |
2298 | 2) [-INF, +-INF(OVF)] | |
2299 | 3) [+-INF(OVF), +INF] | |
2300 | 4) [+-INF(OVF), +-INF(OVF)] | |
2301 | We learn nothing when we have INF and INF(OVF) on both sides. | |
2302 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
2303 | overflow. */ | |
2304 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) | |
2305 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
2306 | { | |
2307 | set_value_range_to_varying (vr); | |
2308 | return; | |
2309 | } | |
2310 | ||
2311 | cmp = compare_values (min, max); | |
2312 | if (cmp == -2 || cmp == 1) | |
2313 | { | |
2314 | /* If the new range has its limits swapped around (MIN > MAX), | |
2315 | then the operation caused one of them to wrap around, mark | |
2316 | the new range VARYING. */ | |
2317 | set_value_range_to_varying (vr); | |
2318 | } | |
2319 | else | |
2320 | set_value_range (vr, type, min, max, NULL); | |
2321 | } | |
522b9a02 | 2322 | |
c37659ce | 2323 | /* Extract range information from a binary operation CODE based on |
1ec8aa41 | 2324 | the ranges of each of its operands *VR0 and *VR1 with resulting |
c37659ce | 2325 | type EXPR_TYPE. The resulting range is stored in *VR. */ |
88dbf20f | 2326 | |
2327 | static void | |
c37659ce | 2328 | extract_range_from_binary_expr_1 (value_range_t *vr, |
2329 | enum tree_code code, tree expr_type, | |
2330 | value_range_t *vr0_, value_range_t *vr1_) | |
88dbf20f | 2331 | { |
c37659ce | 2332 | value_range_t vr0 = *vr0_, vr1 = *vr1_; |
748eb1f9 | 2333 | value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
0ed3ba34 | 2334 | enum value_range_type type; |
5360e345 | 2335 | tree min = NULL_TREE, max = NULL_TREE; |
88dbf20f | 2336 | int cmp; |
2337 | ||
5360e345 | 2338 | if (!INTEGRAL_TYPE_P (expr_type) |
2339 | && !POINTER_TYPE_P (expr_type)) | |
2340 | { | |
2341 | set_value_range_to_varying (vr); | |
2342 | return; | |
2343 | } | |
2344 | ||
88dbf20f | 2345 | /* Not all binary expressions can be applied to ranges in a |
2346 | meaningful way. Handle only arithmetic operations. */ | |
2347 | if (code != PLUS_EXPR | |
2348 | && code != MINUS_EXPR | |
0de36bdb | 2349 | && code != POINTER_PLUS_EXPR |
88dbf20f | 2350 | && code != MULT_EXPR |
2351 | && code != TRUNC_DIV_EXPR | |
2352 | && code != FLOOR_DIV_EXPR | |
2353 | && code != CEIL_DIV_EXPR | |
2354 | && code != EXACT_DIV_EXPR | |
2355 | && code != ROUND_DIV_EXPR | |
ccab2921 | 2356 | && code != TRUNC_MOD_EXPR |
975070ea | 2357 | && code != RSHIFT_EXPR |
e7ea1c21 | 2358 | && code != LSHIFT_EXPR |
88dbf20f | 2359 | && code != MIN_EXPR |
eea12c72 | 2360 | && code != MAX_EXPR |
b3ded9f8 | 2361 | && code != BIT_AND_EXPR |
6c696748 | 2362 | && code != BIT_IOR_EXPR |
2363 | && code != BIT_XOR_EXPR) | |
88dbf20f | 2364 | { |
e7d43f99 | 2365 | set_value_range_to_varying (vr); |
88dbf20f | 2366 | return; |
2367 | } | |
2368 | ||
fb41023e | 2369 | /* If both ranges are UNDEFINED, so is the result. */ |
2370 | if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) | |
88dbf20f | 2371 | { |
eea12c72 | 2372 | set_value_range_to_undefined (vr); |
88dbf20f | 2373 | return; |
2374 | } | |
fb41023e | 2375 | /* If one of the ranges is UNDEFINED drop it to VARYING for the following |
2376 | code. At some point we may want to special-case operations that | |
2377 | have UNDEFINED result for all or some value-ranges of the not UNDEFINED | |
2378 | operand. */ | |
2379 | else if (vr0.type == VR_UNDEFINED) | |
2380 | set_value_range_to_varying (&vr0); | |
2381 | else if (vr1.type == VR_UNDEFINED) | |
2382 | set_value_range_to_varying (&vr1); | |
88dbf20f | 2383 | |
748eb1f9 | 2384 | /* Now canonicalize anti-ranges to ranges when they are not symbolic |
2385 | and express ~[] op X as ([]' op X) U ([]'' op X). */ | |
2386 | if (vr0.type == VR_ANTI_RANGE | |
2387 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | |
2388 | { | |
2389 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); | |
2390 | if (vrtem1.type != VR_UNDEFINED) | |
2391 | { | |
2392 | value_range_t vrres = VR_INITIALIZER; | |
2393 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, | |
2394 | &vrtem1, vr1_); | |
2395 | vrp_meet (vr, &vrres); | |
2396 | } | |
2397 | return; | |
2398 | } | |
2399 | /* Likewise for X op ~[]. */ | |
2400 | if (vr1.type == VR_ANTI_RANGE | |
2401 | && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) | |
2402 | { | |
2403 | extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); | |
2404 | if (vrtem1.type != VR_UNDEFINED) | |
2405 | { | |
2406 | value_range_t vrres = VR_INITIALIZER; | |
2407 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, | |
2408 | vr0_, &vrtem1); | |
2409 | vrp_meet (vr, &vrres); | |
2410 | } | |
2411 | return; | |
2412 | } | |
2413 | ||
0ed3ba34 | 2414 | /* The type of the resulting value range defaults to VR0.TYPE. */ |
2415 | type = vr0.type; | |
2416 | ||
eea12c72 | 2417 | /* Refuse to operate on VARYING ranges, ranges of different kinds |
1ec8aa41 | 2418 | and symbolic ranges. As an exception, we allow BIT_{AND,IOR} |
b3ded9f8 | 2419 | because we may be able to derive a useful range even if one of |
e52dd258 | 2420 | the operands is VR_VARYING or symbolic range. Similarly for |
1ec8aa41 | 2421 | divisions, MIN/MAX and PLUS/MINUS. |
2422 | ||
2423 | TODO, we may be able to derive anti-ranges in some cases. */ | |
b3ded9f8 | 2424 | if (code != BIT_AND_EXPR |
cfd7906e | 2425 | && code != BIT_IOR_EXPR |
e52dd258 | 2426 | && code != TRUNC_DIV_EXPR |
2427 | && code != FLOOR_DIV_EXPR | |
2428 | && code != CEIL_DIV_EXPR | |
2429 | && code != EXACT_DIV_EXPR | |
2430 | && code != ROUND_DIV_EXPR | |
ccab2921 | 2431 | && code != TRUNC_MOD_EXPR |
1e24c0c9 | 2432 | && code != MIN_EXPR |
2433 | && code != MAX_EXPR | |
1ec8aa41 | 2434 | && code != PLUS_EXPR |
2435 | && code != MINUS_EXPR | |
b3ded9f8 | 2436 | && (vr0.type == VR_VARYING |
2437 | || vr1.type == VR_VARYING | |
2438 | || vr0.type != vr1.type | |
2439 | || symbolic_range_p (&vr0) | |
2440 | || symbolic_range_p (&vr1))) | |
88dbf20f | 2441 | { |
e7d43f99 | 2442 | set_value_range_to_varying (vr); |
88dbf20f | 2443 | return; |
2444 | } | |
2445 | ||
2446 | /* Now evaluate the expression to determine the new range. */ | |
c37659ce | 2447 | if (POINTER_TYPE_P (expr_type)) |
88dbf20f | 2448 | { |
c37659ce | 2449 | if (code == MIN_EXPR || code == MAX_EXPR) |
5e3e3575 | 2450 | { |
0de36bdb | 2451 | /* For MIN/MAX expressions with pointers, we only care about |
2452 | nullness, if both are non null, then the result is nonnull. | |
2453 | If both are null, then the result is null. Otherwise they | |
2454 | are varying. */ | |
2455 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
93116081 | 2456 | set_value_range_to_nonnull (vr, expr_type); |
5e3e3575 | 2457 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |
93116081 | 2458 | set_value_range_to_null (vr, expr_type); |
5e3e3575 | 2459 | else |
2460 | set_value_range_to_varying (vr); | |
2461 | } | |
c37659ce | 2462 | else if (code == POINTER_PLUS_EXPR) |
b03fbfbd | 2463 | { |
2464 | /* For pointer types, we are really only interested in asserting | |
2465 | whether the expression evaluates to non-NULL. */ | |
2466 | if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) | |
2467 | set_value_range_to_nonnull (vr, expr_type); | |
2468 | else if (range_is_null (&vr0) && range_is_null (&vr1)) | |
2469 | set_value_range_to_null (vr, expr_type); | |
2470 | else | |
2471 | set_value_range_to_varying (vr); | |
2472 | } | |
2473 | else if (code == BIT_AND_EXPR) | |
2474 | { | |
2475 | /* For pointer types, we are really only interested in asserting | |
2476 | whether the expression evaluates to non-NULL. */ | |
2477 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
2478 | set_value_range_to_nonnull (vr, expr_type); | |
2479 | else if (range_is_null (&vr0) || range_is_null (&vr1)) | |
2480 | set_value_range_to_null (vr, expr_type); | |
2481 | else | |
2482 | set_value_range_to_varying (vr); | |
2483 | } | |
88dbf20f | 2484 | else |
c37659ce | 2485 | set_value_range_to_varying (vr); |
88dbf20f | 2486 | |
2487 | return; | |
2488 | } | |
2489 | ||
2490 | /* For integer ranges, apply the operation to each end of the | |
2491 | range and see what we end up with. */ | |
c5faecd5 | 2492 | if (code == PLUS_EXPR || code == MINUS_EXPR) |
88dbf20f | 2493 | { |
1ec8aa41 | 2494 | const bool minus_p = (code == MINUS_EXPR); |
2495 | tree min_op0 = vr0.min; | |
2496 | tree min_op1 = minus_p ? vr1.max : vr1.min; | |
2497 | tree max_op0 = vr0.max; | |
2498 | tree max_op1 = minus_p ? vr1.min : vr1.max; | |
2499 | tree sym_min_op0 = NULL_TREE; | |
2500 | tree sym_min_op1 = NULL_TREE; | |
2501 | tree sym_max_op0 = NULL_TREE; | |
2502 | tree sym_max_op1 = NULL_TREE; | |
2503 | bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; | |
2504 | ||
2505 | /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or | |
2506 | single-symbolic ranges, try to compute the precise resulting range, | |
2507 | but only if we know that this resulting range will also be constant | |
2508 | or single-symbolic. */ | |
2509 | if (vr0.type == VR_RANGE && vr1.type == VR_RANGE | |
2510 | && (TREE_CODE (min_op0) == INTEGER_CST | |
2511 | || (sym_min_op0 | |
2512 | = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) | |
2513 | && (TREE_CODE (min_op1) == INTEGER_CST | |
2514 | || (sym_min_op1 | |
2515 | = get_single_symbol (min_op1, &neg_min_op1, &min_op1))) | |
2516 | && (!(sym_min_op0 && sym_min_op1) | |
2517 | || (sym_min_op0 == sym_min_op1 | |
2518 | && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1))) | |
2519 | && (TREE_CODE (max_op0) == INTEGER_CST | |
2520 | || (sym_max_op0 | |
2521 | = get_single_symbol (max_op0, &neg_max_op0, &max_op0))) | |
2522 | && (TREE_CODE (max_op1) == INTEGER_CST | |
2523 | || (sym_max_op1 | |
2524 | = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) | |
2525 | && (!(sym_max_op0 && sym_max_op1) | |
2526 | || (sym_max_op0 == sym_max_op1 | |
2527 | && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) | |
e913b5cd | 2528 | { |
1ec8aa41 | 2529 | const signop sgn = TYPE_SIGN (expr_type); |
2530 | const unsigned int prec = TYPE_PRECISION (expr_type); | |
2531 | wide_int type_min, type_max, wmin, wmax; | |
c5faecd5 | 2532 | int min_ovf = 0; |
2533 | int max_ovf = 0; | |
ac4a8000 | 2534 | |
1ec8aa41 | 2535 | /* Get the lower and upper bounds of the type. */ |
2536 | if (TYPE_OVERFLOW_WRAPS (expr_type)) | |
2537 | { | |
2538 | type_min = wi::min_value (prec, sgn); | |
2539 | type_max = wi::max_value (prec, sgn); | |
2540 | } | |
2541 | else | |
c5faecd5 | 2542 | { |
1ec8aa41 | 2543 | type_min = vrp_val_min (expr_type); |
2544 | type_max = vrp_val_max (expr_type); | |
c5faecd5 | 2545 | } |
1ec8aa41 | 2546 | |
2547 | /* Combine the lower bounds, if any. */ | |
2548 | if (min_op0 && min_op1) | |
c5faecd5 | 2549 | { |
1ec8aa41 | 2550 | if (minus_p) |
2551 | { | |
2552 | wmin = wi::sub (min_op0, min_op1); | |
cf8f0e63 | 2553 | |
1ec8aa41 | 2554 | /* Check for overflow. */ |
2555 | if (wi::cmp (0, min_op1, sgn) | |
2556 | != wi::cmp (wmin, min_op0, sgn)) | |
2557 | min_ovf = wi::cmp (min_op0, min_op1, sgn); | |
2558 | } | |
2559 | else | |
2560 | { | |
2561 | wmin = wi::add (min_op0, min_op1); | |
2562 | ||
2563 | /* Check for overflow. */ | |
2564 | if (wi::cmp (min_op1, 0, sgn) | |
2565 | != wi::cmp (wmin, min_op0, sgn)) | |
2566 | min_ovf = wi::cmp (min_op0, wmin, sgn); | |
2567 | } | |
c5faecd5 | 2568 | } |
1ec8aa41 | 2569 | else if (min_op0) |
2570 | wmin = min_op0; | |
2571 | else if (min_op1) | |
2572 | wmin = minus_p ? wi::neg (min_op1) : min_op1; | |
2573 | else | |
2574 | wmin = wi::shwi (0, prec); | |
c5faecd5 | 2575 | |
1ec8aa41 | 2576 | /* Combine the upper bounds, if any. */ |
2577 | if (max_op0 && max_op1) | |
c5faecd5 | 2578 | { |
1ec8aa41 | 2579 | if (minus_p) |
2580 | { | |
2581 | wmax = wi::sub (max_op0, max_op1); | |
2582 | ||
2583 | /* Check for overflow. */ | |
2584 | if (wi::cmp (0, max_op1, sgn) | |
2585 | != wi::cmp (wmax, max_op0, sgn)) | |
2586 | max_ovf = wi::cmp (max_op0, max_op1, sgn); | |
2587 | } | |
2588 | else | |
2589 | { | |
2590 | wmax = wi::add (max_op0, max_op1); | |
2591 | ||
2592 | if (wi::cmp (max_op1, 0, sgn) | |
2593 | != wi::cmp (wmax, max_op0, sgn)) | |
2594 | max_ovf = wi::cmp (max_op0, wmax, sgn); | |
2595 | } | |
c5faecd5 | 2596 | } |
1ec8aa41 | 2597 | else if (max_op0) |
2598 | wmax = max_op0; | |
2599 | else if (max_op1) | |
2600 | wmax = minus_p ? wi::neg (max_op1) : max_op1; | |
2601 | else | |
2602 | wmax = wi::shwi (0, prec); | |
c5faecd5 | 2603 | |
2604 | /* Check for type overflow. */ | |
2605 | if (min_ovf == 0) | |
2606 | { | |
796b6678 | 2607 | if (wi::cmp (wmin, type_min, sgn) == -1) |
c5faecd5 | 2608 | min_ovf = -1; |
796b6678 | 2609 | else if (wi::cmp (wmin, type_max, sgn) == 1) |
c5faecd5 | 2610 | min_ovf = 1; |
2611 | } | |
2612 | if (max_ovf == 0) | |
2613 | { | |
796b6678 | 2614 | if (wi::cmp (wmax, type_min, sgn) == -1) |
c5faecd5 | 2615 | max_ovf = -1; |
796b6678 | 2616 | else if (wi::cmp (wmax, type_max, sgn) == 1) |
c5faecd5 | 2617 | max_ovf = 1; |
2618 | } | |
ac4a8000 | 2619 | |
1ec8aa41 | 2620 | /* If we have overflow for the constant part and the resulting |
2621 | range will be symbolic, drop to VR_VARYING. */ | |
2622 | if ((min_ovf && sym_min_op0 != sym_min_op1) | |
2623 | || (max_ovf && sym_max_op0 != sym_max_op1)) | |
2624 | { | |
2625 | set_value_range_to_varying (vr); | |
2626 | return; | |
2627 | } | |
2628 | ||
ac4a8000 | 2629 | if (TYPE_OVERFLOW_WRAPS (expr_type)) |
2630 | { | |
2631 | /* If overflow wraps, truncate the values and adjust the | |
2632 | range kind and bounds appropriately. */ | |
796b6678 | 2633 | wide_int tmin = wide_int::from (wmin, prec, sgn); |
2634 | wide_int tmax = wide_int::from (wmax, prec, sgn); | |
c5faecd5 | 2635 | if (min_ovf == max_ovf) |
ac4a8000 | 2636 | { |
2637 | /* No overflow or both overflow or underflow. The | |
2638 | range kind stays VR_RANGE. */ | |
e913b5cd | 2639 | min = wide_int_to_tree (expr_type, tmin); |
2640 | max = wide_int_to_tree (expr_type, tmax); | |
ac4a8000 | 2641 | } |
1ec8aa41 | 2642 | else if (min_ovf == -1 && max_ovf == 1) |
ac4a8000 | 2643 | { |
2644 | /* Underflow and overflow, drop to VR_VARYING. */ | |
2645 | set_value_range_to_varying (vr); | |
2646 | return; | |
2647 | } | |
2648 | else | |
2649 | { | |
2650 | /* Min underflow or max overflow. The range kind | |
2651 | changes to VR_ANTI_RANGE. */ | |
b7c599a6 | 2652 | bool covers = false; |
e913b5cd | 2653 | wide_int tem = tmin; |
c5faecd5 | 2654 | gcc_assert ((min_ovf == -1 && max_ovf == 0) |
2655 | || (max_ovf == 1 && min_ovf == 0)); | |
ac4a8000 | 2656 | type = VR_ANTI_RANGE; |
e913b5cd | 2657 | tmin = tmax + 1; |
796b6678 | 2658 | if (wi::cmp (tmin, tmax, sgn) < 0) |
b7c599a6 | 2659 | covers = true; |
e913b5cd | 2660 | tmax = tem - 1; |
796b6678 | 2661 | if (wi::cmp (tmax, tem, sgn) > 0) |
b7c599a6 | 2662 | covers = true; |
ac4a8000 | 2663 | /* If the anti-range would cover nothing, drop to varying. |
2664 | Likewise if the anti-range bounds are outside of the | |
2665 | types values. */ | |
796b6678 | 2666 | if (covers || wi::cmp (tmin, tmax, sgn) > 0) |
ac4a8000 | 2667 | { |
2668 | set_value_range_to_varying (vr); | |
2669 | return; | |
2670 | } | |
e913b5cd | 2671 | min = wide_int_to_tree (expr_type, tmin); |
2672 | max = wide_int_to_tree (expr_type, tmax); | |
ac4a8000 | 2673 | } |
2674 | } | |
2675 | else | |
2676 | { | |
ac4a8000 | 2677 | /* If overflow does not wrap, saturate to the types min/max |
2678 | value. */ | |
c5faecd5 | 2679 | if (min_ovf == -1) |
ac4a8000 | 2680 | { |
2681 | if (needs_overflow_infinity (expr_type) | |
2682 | && supports_overflow_infinity (expr_type)) | |
2683 | min = negative_overflow_infinity (expr_type); | |
2684 | else | |
e913b5cd | 2685 | min = wide_int_to_tree (expr_type, type_min); |
ac4a8000 | 2686 | } |
c5faecd5 | 2687 | else if (min_ovf == 1) |
ac4a8000 | 2688 | { |
2689 | if (needs_overflow_infinity (expr_type) | |
2690 | && supports_overflow_infinity (expr_type)) | |
2691 | min = positive_overflow_infinity (expr_type); | |
2692 | else | |
e913b5cd | 2693 | min = wide_int_to_tree (expr_type, type_max); |
ac4a8000 | 2694 | } |
2695 | else | |
e913b5cd | 2696 | min = wide_int_to_tree (expr_type, wmin); |
ac4a8000 | 2697 | |
c5faecd5 | 2698 | if (max_ovf == -1) |
ac4a8000 | 2699 | { |
2700 | if (needs_overflow_infinity (expr_type) | |
2701 | && supports_overflow_infinity (expr_type)) | |
2702 | max = negative_overflow_infinity (expr_type); | |
2703 | else | |
e913b5cd | 2704 | max = wide_int_to_tree (expr_type, type_min); |
ac4a8000 | 2705 | } |
c5faecd5 | 2706 | else if (max_ovf == 1) |
ac4a8000 | 2707 | { |
2708 | if (needs_overflow_infinity (expr_type) | |
2709 | && supports_overflow_infinity (expr_type)) | |
2710 | max = positive_overflow_infinity (expr_type); | |
2711 | else | |
e913b5cd | 2712 | max = wide_int_to_tree (expr_type, type_max); |
ac4a8000 | 2713 | } |
2714 | else | |
e913b5cd | 2715 | max = wide_int_to_tree (expr_type, wmax); |
ac4a8000 | 2716 | } |
1ec8aa41 | 2717 | |
ac4a8000 | 2718 | if (needs_overflow_infinity (expr_type) |
2719 | && supports_overflow_infinity (expr_type)) | |
2720 | { | |
1ec8aa41 | 2721 | if ((min_op0 && is_negative_overflow_infinity (min_op0)) |
2722 | || (min_op1 | |
2723 | && (minus_p | |
2724 | ? is_positive_overflow_infinity (min_op1) | |
2725 | : is_negative_overflow_infinity (min_op1)))) | |
ac4a8000 | 2726 | min = negative_overflow_infinity (expr_type); |
1ec8aa41 | 2727 | if ((max_op0 && is_positive_overflow_infinity (max_op0)) |
2728 | || (max_op1 | |
2729 | && (minus_p | |
2730 | ? is_negative_overflow_infinity (max_op1) | |
2731 | : is_positive_overflow_infinity (max_op1)))) | |
ac4a8000 | 2732 | max = positive_overflow_infinity (expr_type); |
2733 | } | |
1ec8aa41 | 2734 | |
2735 | /* If the result lower bound is constant, we're done; | |
2736 | otherwise, build the symbolic lower bound. */ | |
2737 | if (sym_min_op0 == sym_min_op1) | |
2738 | ; | |
2739 | else if (sym_min_op0) | |
2740 | min = build_symbolic_expr (expr_type, sym_min_op0, | |
2741 | neg_min_op0, min); | |
2742 | else if (sym_min_op1) | |
2743 | min = build_symbolic_expr (expr_type, sym_min_op1, | |
2744 | neg_min_op1 ^ minus_p, min); | |
2745 | ||
2746 | /* Likewise for the upper bound. */ | |
2747 | if (sym_max_op0 == sym_max_op1) | |
2748 | ; | |
2749 | else if (sym_max_op0) | |
2750 | max = build_symbolic_expr (expr_type, sym_max_op0, | |
2751 | neg_max_op0, max); | |
2752 | else if (sym_max_op1) | |
2753 | max = build_symbolic_expr (expr_type, sym_max_op1, | |
2754 | neg_max_op1 ^ minus_p, max); | |
ac4a8000 | 2755 | } |
2756 | else | |
6285cf63 | 2757 | { |
ac4a8000 | 2758 | /* For other cases, for example if we have a PLUS_EXPR with two |
2759 | VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort | |
2760 | to compute a precise range for such a case. | |
2761 | ??? General even mixed range kind operations can be expressed | |
2762 | by for example transforming ~[3, 5] + [1, 2] to range-only | |
2763 | operations and a union primitive: | |
2764 | [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] | |
2765 | [-INF+1, 4] U [6, +INF(OVF)] | |
2766 | though usually the union is not exactly representable with | |
2767 | a single range or anti-range as the above is | |
2768 | [-INF+1, +INF(OVF)] intersected with ~[5, 5] | |
2769 | but one could use a scheme similar to equivalences for this. */ | |
5360e345 | 2770 | set_value_range_to_varying (vr); |
2771 | return; | |
6285cf63 | 2772 | } |
88dbf20f | 2773 | } |
5360e345 | 2774 | else if (code == MIN_EXPR |
2775 | || code == MAX_EXPR) | |
2776 | { | |
1e24c0c9 | 2777 | if (vr0.type == VR_RANGE |
2778 | && !symbolic_range_p (&vr0)) | |
2779 | { | |
2780 | type = VR_RANGE; | |
2781 | if (vr1.type == VR_RANGE | |
2782 | && !symbolic_range_p (&vr1)) | |
2783 | { | |
2784 | /* For operations that make the resulting range directly | |
2785 | proportional to the original ranges, apply the operation to | |
2786 | the same end of each range. */ | |
2787 | min = vrp_int_const_binop (code, vr0.min, vr1.min); | |
2788 | max = vrp_int_const_binop (code, vr0.max, vr1.max); | |
2789 | } | |
2790 | else if (code == MIN_EXPR) | |
2791 | { | |
2792 | min = vrp_val_min (expr_type); | |
2793 | max = vr0.max; | |
2794 | } | |
2795 | else if (code == MAX_EXPR) | |
2796 | { | |
2797 | min = vr0.min; | |
2798 | max = vrp_val_max (expr_type); | |
2799 | } | |
2800 | } | |
2801 | else if (vr1.type == VR_RANGE | |
2802 | && !symbolic_range_p (&vr1)) | |
5360e345 | 2803 | { |
1e24c0c9 | 2804 | type = VR_RANGE; |
2805 | if (code == MIN_EXPR) | |
2806 | { | |
2807 | min = vrp_val_min (expr_type); | |
2808 | max = vr1.max; | |
2809 | } | |
2810 | else if (code == MAX_EXPR) | |
2811 | { | |
2812 | min = vr1.min; | |
2813 | max = vrp_val_max (expr_type); | |
2814 | } | |
5360e345 | 2815 | } |
2816 | else | |
2817 | { | |
1e24c0c9 | 2818 | set_value_range_to_varying (vr); |
2819 | return; | |
5360e345 | 2820 | } |
2821 | } | |
2822 | else if (code == MULT_EXPR) | |
88dbf20f | 2823 | { |
4f5712bd | 2824 | /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not |
e913b5cd | 2825 | drop to varying. This test requires 2*prec bits if both |
2826 | operands are signed and 2*prec + 2 bits if either is not. */ | |
2827 | ||
2828 | signop sign = TYPE_SIGN (expr_type); | |
2829 | unsigned int prec = TYPE_PRECISION (expr_type); | |
e913b5cd | 2830 | |
4f5712bd | 2831 | if (range_int_cst_p (&vr0) |
2832 | && range_int_cst_p (&vr1) | |
2833 | && TYPE_OVERFLOW_WRAPS (expr_type)) | |
2834 | { | |
84014c53 | 2835 | typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; |
2836 | typedef generic_wide_int | |
2837 | <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; | |
2838 | vrp_int sizem1 = wi::mask <vrp_int> (prec, false); | |
2839 | vrp_int size = sizem1 + 1; | |
4f5712bd | 2840 | |
e913b5cd | 2841 | /* Extend the values using the sign of the result to PREC2. |
2842 | From here on out, everthing is just signed math no matter | |
ddb1be65 | 2843 | what the input types were. */ |
84014c53 | 2844 | vrp_int min0 = vrp_int_cst (vr0.min); |
2845 | vrp_int max0 = vrp_int_cst (vr0.max); | |
2846 | vrp_int min1 = vrp_int_cst (vr1.min); | |
2847 | vrp_int max1 = vrp_int_cst (vr1.max); | |
4f5712bd | 2848 | /* Canonicalize the intervals. */ |
e913b5cd | 2849 | if (sign == UNSIGNED) |
4f5712bd | 2850 | { |
796b6678 | 2851 | if (wi::ltu_p (size, min0 + max0)) |
4f5712bd | 2852 | { |
e913b5cd | 2853 | min0 -= size; |
cf8f0e63 | 2854 | max0 -= size; |
4f5712bd | 2855 | } |
2856 | ||
796b6678 | 2857 | if (wi::ltu_p (size, min1 + max1)) |
4f5712bd | 2858 | { |
e913b5cd | 2859 | min1 -= size; |
cf8f0e63 | 2860 | max1 -= size; |
4f5712bd | 2861 | } |
2862 | } | |
4f5712bd | 2863 | |
84014c53 | 2864 | vrp_int prod0 = min0 * min1; |
2865 | vrp_int prod1 = min0 * max1; | |
2866 | vrp_int prod2 = max0 * min1; | |
2867 | vrp_int prod3 = max0 * max1; | |
e913b5cd | 2868 | |
2869 | /* Sort the 4 products so that min is in prod0 and max is in | |
2870 | prod3. */ | |
2871 | /* min0min1 > max0max1 */ | |
796b6678 | 2872 | if (wi::gts_p (prod0, prod3)) |
4f5712bd | 2873 | { |
84014c53 | 2874 | vrp_int tmp = prod3; |
e913b5cd | 2875 | prod3 = prod0; |
2876 | prod0 = tmp; | |
4f5712bd | 2877 | } |
e913b5cd | 2878 | |
2879 | /* min0max1 > max0min1 */ | |
796b6678 | 2880 | if (wi::gts_p (prod1, prod2)) |
e913b5cd | 2881 | { |
84014c53 | 2882 | vrp_int tmp = prod2; |
e913b5cd | 2883 | prod2 = prod1; |
2884 | prod1 = tmp; | |
2885 | } | |
2886 | ||
796b6678 | 2887 | if (wi::gts_p (prod0, prod1)) |
4f5712bd | 2888 | { |
84014c53 | 2889 | vrp_int tmp = prod1; |
e913b5cd | 2890 | prod1 = prod0; |
2891 | prod0 = tmp; | |
4f5712bd | 2892 | } |
4f5712bd | 2893 | |
796b6678 | 2894 | if (wi::gts_p (prod2, prod3)) |
e913b5cd | 2895 | { |
84014c53 | 2896 | vrp_int tmp = prod3; |
e913b5cd | 2897 | prod3 = prod2; |
2898 | prod2 = tmp; | |
2899 | } | |
2900 | ||
2901 | /* diff = max - min. */ | |
2902 | prod2 = prod3 - prod0; | |
796b6678 | 2903 | if (wi::geu_p (prod2, sizem1)) |
4f5712bd | 2904 | { |
2905 | /* the range covers all values. */ | |
2906 | set_value_range_to_varying (vr); | |
2907 | return; | |
2908 | } | |
2909 | ||
2910 | /* The following should handle the wrapping and selecting | |
2911 | VR_ANTI_RANGE for us. */ | |
e913b5cd | 2912 | min = wide_int_to_tree (expr_type, prod0); |
2913 | max = wide_int_to_tree (expr_type, prod3); | |
4f5712bd | 2914 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); |
2915 | return; | |
2916 | } | |
2917 | ||
6285cf63 | 2918 | /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, |
2919 | drop to VR_VARYING. It would take more effort to compute a | |
2920 | precise range for such a case. For example, if we have | |
2921 | op0 == 65536 and op1 == 65536 with their ranges both being | |
2922 | ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so | |
2923 | we cannot claim that the product is in ~[0,0]. Note that we | |
2924 | are guaranteed to have vr0.type == vr1.type at this | |
2925 | point. */ | |
5360e345 | 2926 | if (vr0.type == VR_ANTI_RANGE |
c37659ce | 2927 | && !TYPE_OVERFLOW_UNDEFINED (expr_type)) |
6285cf63 | 2928 | { |
2929 | set_value_range_to_varying (vr); | |
2930 | return; | |
2931 | } | |
2932 | ||
5360e345 | 2933 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
2934 | return; | |
2935 | } | |
e208bd44 | 2936 | else if (code == RSHIFT_EXPR |
2937 | || code == LSHIFT_EXPR) | |
5360e345 | 2938 | { |
6291249b | 2939 | /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], |
2940 | then drop to VR_VARYING. Outside of this range we get undefined | |
efec32e0 | 2941 | behavior from the shift operation. We cannot even trust |
6291249b | 2942 | SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl |
2943 | shifts, and the operation at the tree level may be widened. */ | |
e208bd44 | 2944 | if (range_int_cst_p (&vr1) |
2945 | && compare_tree_int (vr1.min, 0) >= 0 | |
2946 | && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) | |
62065c0b | 2947 | { |
e208bd44 | 2948 | if (code == RSHIFT_EXPR) |
2949 | { | |
2950 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); | |
2951 | return; | |
2952 | } | |
2953 | /* We can map lshifts by constants to MULT_EXPR handling. */ | |
2954 | else if (code == LSHIFT_EXPR | |
2955 | && range_int_cst_singleton_p (&vr1)) | |
2956 | { | |
2957 | bool saved_flag_wrapv; | |
2958 | value_range_t vr1p = VR_INITIALIZER; | |
2959 | vr1p.type = VR_RANGE; | |
796b6678 | 2960 | vr1p.min = (wide_int_to_tree |
2961 | (expr_type, | |
2962 | wi::set_bit_in_zero (tree_to_shwi (vr1.min), | |
2963 | TYPE_PRECISION (expr_type)))); | |
e208bd44 | 2964 | vr1p.max = vr1p.min; |
2965 | /* We have to use a wrapping multiply though as signed overflow | |
2966 | on lshifts is implementation defined in C89. */ | |
2967 | saved_flag_wrapv = flag_wrapv; | |
2968 | flag_wrapv = 1; | |
2969 | extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, | |
2970 | &vr0, &vr1p); | |
2971 | flag_wrapv = saved_flag_wrapv; | |
2972 | return; | |
2973 | } | |
b0b91aff | 2974 | else if (code == LSHIFT_EXPR |
2975 | && range_int_cst_p (&vr0)) | |
2976 | { | |
b3133d23 | 2977 | int prec = TYPE_PRECISION (expr_type); |
2978 | int overflow_pos = prec; | |
b0b91aff | 2979 | int bound_shift; |
ab2c1de8 | 2980 | wide_int low_bound, high_bound; |
b3133d23 | 2981 | bool uns = TYPE_UNSIGNED (expr_type); |
2982 | bool in_bounds = false; | |
b0b91aff | 2983 | |
b3133d23 | 2984 | if (!uns) |
b0b91aff | 2985 | overflow_pos -= 1; |
2986 | ||
e913b5cd | 2987 | bound_shift = overflow_pos - tree_to_shwi (vr1.max); |
2988 | /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can | |
b3133d23 | 2989 | overflow. However, for that to happen, vr1.max needs to be |
2990 | zero, which means vr1 is a singleton range of zero, which | |
2991 | means it should be handled by the previous LSHIFT_EXPR | |
2992 | if-clause. */ | |
ab2c1de8 | 2993 | wide_int bound = wi::set_bit_in_zero (bound_shift, prec); |
2994 | wide_int complement = ~(bound - 1); | |
b3133d23 | 2995 | |
2996 | if (uns) | |
2997 | { | |
e913b5cd | 2998 | low_bound = bound; |
2999 | high_bound = complement; | |
796b6678 | 3000 | if (wi::ltu_p (vr0.max, low_bound)) |
b3133d23 | 3001 | { |
3002 | /* [5, 6] << [1, 2] == [10, 24]. */ | |
3003 | /* We're shifting out only zeroes, the value increases | |
3004 | monotonically. */ | |
3005 | in_bounds = true; | |
3006 | } | |
796b6678 | 3007 | else if (wi::ltu_p (high_bound, vr0.min)) |
b3133d23 | 3008 | { |
3009 | /* [0xffffff00, 0xffffffff] << [1, 2] | |
3010 | == [0xfffffc00, 0xfffffffe]. */ | |
3011 | /* We're shifting out only ones, the value decreases | |
3012 | monotonically. */ | |
3013 | in_bounds = true; | |
3014 | } | |
3015 | } | |
3016 | else | |
3017 | { | |
3018 | /* [-1, 1] << [1, 2] == [-4, 4]. */ | |
e913b5cd | 3019 | low_bound = complement; |
b3133d23 | 3020 | high_bound = bound; |
796b6678 | 3021 | if (wi::lts_p (vr0.max, high_bound) |
3022 | && wi::lts_p (low_bound, vr0.min)) | |
b3133d23 | 3023 | { |
3024 | /* For non-negative numbers, we're shifting out only | |
3025 | zeroes, the value increases monotonically. | |
3026 | For negative numbers, we're shifting out only ones, the | |
3027 | value decreases monotomically. */ | |
3028 | in_bounds = true; | |
3029 | } | |
3030 | } | |
3031 | ||
3032 | if (in_bounds) | |
b0b91aff | 3033 | { |
b0b91aff | 3034 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
3035 | return; | |
3036 | } | |
3037 | } | |
e7ea1c21 | 3038 | } |
e7ea1c21 | 3039 | set_value_range_to_varying (vr); |
3040 | return; | |
3041 | } | |
5360e345 | 3042 | else if (code == TRUNC_DIV_EXPR |
3043 | || code == FLOOR_DIV_EXPR | |
3044 | || code == CEIL_DIV_EXPR | |
3045 | || code == EXACT_DIV_EXPR | |
3046 | || code == ROUND_DIV_EXPR) | |
3047 | { | |
3048 | if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) | |
e52dd258 | 3049 | { |
3050 | /* For division, if op1 has VR_RANGE but op0 does not, something | |
3051 | can be deduced just from that range. Say [min, max] / [4, max] | |
3052 | gives [min / 4, max / 4] range. */ | |
3053 | if (vr1.type == VR_RANGE | |
3054 | && !symbolic_range_p (&vr1) | |
7d48cd66 | 3055 | && range_includes_zero_p (vr1.min, vr1.max) == 0) |
e52dd258 | 3056 | { |
3057 | vr0.type = type = VR_RANGE; | |
c37659ce | 3058 | vr0.min = vrp_val_min (expr_type); |
3059 | vr0.max = vrp_val_max (expr_type); | |
e52dd258 | 3060 | } |
3061 | else | |
3062 | { | |
3063 | set_value_range_to_varying (vr); | |
3064 | return; | |
3065 | } | |
3066 | } | |
3067 | ||
47d397e1 | 3068 | /* For divisions, if flag_non_call_exceptions is true, we must |
3069 | not eliminate a division by zero. */ | |
5360e345 | 3070 | if (cfun->can_throw_non_call_exceptions |
47d397e1 | 3071 | && (vr1.type != VR_RANGE |
7d48cd66 | 3072 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
47d397e1 | 3073 | { |
3074 | set_value_range_to_varying (vr); | |
3075 | return; | |
3076 | } | |
3077 | ||
e52dd258 | 3078 | /* For divisions, if op0 is VR_RANGE, we can deduce a range |
3079 | even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can | |
3080 | include 0. */ | |
5360e345 | 3081 | if (vr0.type == VR_RANGE |
e52dd258 | 3082 | && (vr1.type != VR_RANGE |
7d48cd66 | 3083 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
e52dd258 | 3084 | { |
3085 | tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); | |
3086 | int cmp; | |
3087 | ||
e52dd258 | 3088 | min = NULL_TREE; |
3089 | max = NULL_TREE; | |
c37659ce | 3090 | if (TYPE_UNSIGNED (expr_type) |
3091 | || value_range_nonnegative_p (&vr1)) | |
e52dd258 | 3092 | { |
3093 | /* For unsigned division or when divisor is known | |
3094 | to be non-negative, the range has to cover | |
3095 | all numbers from 0 to max for positive max | |
3096 | and all numbers from min to 0 for negative min. */ | |
3097 | cmp = compare_values (vr0.max, zero); | |
3098 | if (cmp == -1) | |
3099 | max = zero; | |
3100 | else if (cmp == 0 || cmp == 1) | |
3101 | max = vr0.max; | |
3102 | else | |
3103 | type = VR_VARYING; | |
3104 | cmp = compare_values (vr0.min, zero); | |
3105 | if (cmp == 1) | |
3106 | min = zero; | |
3107 | else if (cmp == 0 || cmp == -1) | |
3108 | min = vr0.min; | |
3109 | else | |
3110 | type = VR_VARYING; | |
3111 | } | |
3112 | else | |
3113 | { | |
3114 | /* Otherwise the range is -max .. max or min .. -min | |
3115 | depending on which bound is bigger in absolute value, | |
3116 | as the division can change the sign. */ | |
3117 | abs_extent_range (vr, vr0.min, vr0.max); | |
3118 | return; | |
3119 | } | |
3120 | if (type == VR_VARYING) | |
3121 | { | |
3122 | set_value_range_to_varying (vr); | |
3123 | return; | |
3124 | } | |
3125 | } | |
c3783c3b | 3126 | else |
3127 | { | |
5360e345 | 3128 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
3129 | return; | |
eea12c72 | 3130 | } |
3131 | } | |
ebc6c513 | 3132 | else if (code == TRUNC_MOD_EXPR) |
ccab2921 | 3133 | { |
ebc6c513 | 3134 | if (vr1.type != VR_RANGE |
7d48cd66 | 3135 | || range_includes_zero_p (vr1.min, vr1.max) != 0 |
ebc6c513 | 3136 | || vrp_val_is_min (vr1.min)) |
ccab2921 | 3137 | { |
3138 | set_value_range_to_varying (vr); | |
3139 | return; | |
3140 | } | |
3141 | type = VR_RANGE; | |
ebc6c513 | 3142 | /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */ |
c37659ce | 3143 | max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); |
ebc6c513 | 3144 | if (tree_int_cst_lt (max, vr1.max)) |
3145 | max = vr1.max; | |
e913b5cd | 3146 | max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1)); |
ebc6c513 | 3147 | /* If the dividend is non-negative the modulus will be |
3148 | non-negative as well. */ | |
c37659ce | 3149 | if (TYPE_UNSIGNED (expr_type) |
3150 | || value_range_nonnegative_p (&vr0)) | |
ebc6c513 | 3151 | min = build_int_cst (TREE_TYPE (max), 0); |
ccab2921 | 3152 | else |
c37659ce | 3153 | min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max); |
ccab2921 | 3154 | } |
6c696748 | 3155 | else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) |
b3ded9f8 | 3156 | { |
522b9a02 | 3157 | bool int_cst_range0, int_cst_range1; |
e913b5cd | 3158 | wide_int may_be_nonzero0, may_be_nonzero1; |
3159 | wide_int must_be_nonzero0, must_be_nonzero1; | |
bca0860e | 3160 | |
ab2c1de8 | 3161 | int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, |
3162 | &may_be_nonzero0, | |
522b9a02 | 3163 | &must_be_nonzero0); |
ab2c1de8 | 3164 | int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, |
3165 | &may_be_nonzero1, | |
522b9a02 | 3166 | &must_be_nonzero1); |
bca0860e | 3167 | |
522b9a02 | 3168 | type = VR_RANGE; |
f26adbc1 | 3169 | if (code == BIT_AND_EXPR) |
0c0b52bd | 3170 | { |
e913b5cd | 3171 | min = wide_int_to_tree (expr_type, |
3172 | must_be_nonzero0 & must_be_nonzero1); | |
ab2c1de8 | 3173 | wide_int wmax = may_be_nonzero0 & may_be_nonzero1; |
63bb6dcf | 3174 | /* If both input ranges contain only negative values we can |
3175 | truncate the result range maximum to the minimum of the | |
3176 | input range maxima. */ | |
3177 | if (int_cst_range0 && int_cst_range1 | |
3178 | && tree_int_cst_sgn (vr0.max) < 0 | |
3179 | && tree_int_cst_sgn (vr1.max) < 0) | |
0c0b52bd | 3180 | { |
796b6678 | 3181 | wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); |
3182 | wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); | |
0c0b52bd | 3183 | } |
63bb6dcf | 3184 | /* If either input range contains only non-negative values |
3185 | we can truncate the result range maximum to the respective | |
3186 | maximum of the input range. */ | |
3187 | if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) | |
796b6678 | 3188 | wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); |
522b9a02 | 3189 | if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) |
796b6678 | 3190 | wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); |
e913b5cd | 3191 | max = wide_int_to_tree (expr_type, wmax); |
b3ded9f8 | 3192 | } |
f26adbc1 | 3193 | else if (code == BIT_IOR_EXPR) |
e31161b3 | 3194 | { |
e913b5cd | 3195 | max = wide_int_to_tree (expr_type, |
3196 | may_be_nonzero0 | may_be_nonzero1); | |
ab2c1de8 | 3197 | wide_int wmin = must_be_nonzero0 | must_be_nonzero1; |
63bb6dcf | 3198 | /* If the input ranges contain only positive values we can |
3199 | truncate the minimum of the result range to the maximum | |
3200 | of the input range minima. */ | |
3201 | if (int_cst_range0 && int_cst_range1 | |
3202 | && tree_int_cst_sgn (vr0.min) >= 0 | |
3203 | && tree_int_cst_sgn (vr1.min) >= 0) | |
f26adbc1 | 3204 | { |
796b6678 | 3205 | wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); |
3206 | wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); | |
f26adbc1 | 3207 | } |
63bb6dcf | 3208 | /* If either input range contains only negative values |
3209 | we can truncate the minimum of the result range to the | |
3210 | respective minimum range. */ | |
3211 | if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) | |
796b6678 | 3212 | wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); |
63bb6dcf | 3213 | if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) |
796b6678 | 3214 | wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); |
e913b5cd | 3215 | min = wide_int_to_tree (expr_type, wmin); |
f26adbc1 | 3216 | } |
6c696748 | 3217 | else if (code == BIT_XOR_EXPR) |
3218 | { | |
ab2c1de8 | 3219 | wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) |
3220 | | ~(may_be_nonzero0 | may_be_nonzero1)); | |
3221 | wide_int result_one_bits | |
3222 | = (must_be_nonzero0.and_not (may_be_nonzero1) | |
3223 | | must_be_nonzero1.and_not (may_be_nonzero0)); | |
e913b5cd | 3224 | max = wide_int_to_tree (expr_type, ~result_zero_bits); |
3225 | min = wide_int_to_tree (expr_type, result_one_bits); | |
63bb6dcf | 3226 | /* If the range has all positive or all negative values the |
3227 | result is better than VARYING. */ | |
3228 | if (tree_int_cst_sgn (min) < 0 | |
3229 | || tree_int_cst_sgn (max) >= 0) | |
3230 | ; | |
6c696748 | 3231 | else |
6c696748 | 3232 | max = min = NULL_TREE; |
3233 | } | |
e31161b3 | 3234 | } |
eea12c72 | 3235 | else |
3236 | gcc_unreachable (); | |
ed19cf09 | 3237 | |
c25c642e | 3238 | /* If either MIN or MAX overflowed, then set the resulting range to |
1ec8aa41 | 3239 | VARYING. But we do accept an overflow infinity representation. */ |
c3783c3b | 3240 | if (min == NULL_TREE |
1ec8aa41 | 3241 | || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min)) |
c3783c3b | 3242 | || max == NULL_TREE |
1ec8aa41 | 3243 | || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max))) |
c3783c3b | 3244 | { |
3245 | set_value_range_to_varying (vr); | |
3246 | return; | |
3247 | } | |
3248 | ||
c68b42d2 | 3249 | /* We punt if: |
3250 | 1) [-INF, +INF] | |
3251 | 2) [-INF, +-INF(OVF)] | |
3252 | 3) [+-INF(OVF), +INF] | |
3253 | 4) [+-INF(OVF), +-INF(OVF)] | |
3254 | We learn nothing when we have INF and INF(OVF) on both sides. | |
3255 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
3256 | overflow. */ | |
b876a744 | 3257 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) |
3258 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
eea12c72 | 3259 | { |
c25c642e | 3260 | set_value_range_to_varying (vr); |
3261 | return; | |
ed19cf09 | 3262 | } |
3263 | ||
eea12c72 | 3264 | cmp = compare_values (min, max); |
3265 | if (cmp == -2 || cmp == 1) | |
3266 | { | |
3267 | /* If the new range has its limits swapped around (MIN > MAX), | |
3268 | then the operation caused one of them to wrap around, mark | |
3269 | the new range VARYING. */ | |
3270 | set_value_range_to_varying (vr); | |
3271 | } | |
3272 | else | |
0ed3ba34 | 3273 | set_value_range (vr, type, min, max, NULL); |
ed19cf09 | 3274 | } |
3275 | ||
c37659ce | 3276 | /* Extract range information from a binary expression OP0 CODE OP1 based on |
3277 | the ranges of each of its operands with resulting type EXPR_TYPE. | |
3278 | The resulting range is stored in *VR. */ | |
3279 | ||
3280 | static void | |
3281 | extract_range_from_binary_expr (value_range_t *vr, | |
3282 | enum tree_code code, | |
3283 | tree expr_type, tree op0, tree op1) | |
3284 | { | |
748eb1f9 | 3285 | value_range_t vr0 = VR_INITIALIZER; |
3286 | value_range_t vr1 = VR_INITIALIZER; | |
c37659ce | 3287 | |
3288 | /* Get value ranges for each operand. For constant operands, create | |
3289 | a new value range with the operand to simplify processing. */ | |
3290 | if (TREE_CODE (op0) == SSA_NAME) | |
3291 | vr0 = *(get_value_range (op0)); | |
3292 | else if (is_gimple_min_invariant (op0)) | |
3293 | set_value_range_to_value (&vr0, op0, NULL); | |
3294 | else | |
3295 | set_value_range_to_varying (&vr0); | |
3296 | ||
3297 | if (TREE_CODE (op1) == SSA_NAME) | |
3298 | vr1 = *(get_value_range (op1)); | |
3299 | else if (is_gimple_min_invariant (op1)) | |
3300 | set_value_range_to_value (&vr1, op1, NULL); | |
3301 | else | |
3302 | set_value_range_to_varying (&vr1); | |
3303 | ||
3304 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); | |
1ec8aa41 | 3305 | |
3306 | /* Try harder for PLUS and MINUS if the range of one operand is symbolic | |
3307 | and based on the other operand, for example if it was deduced from a | |
3308 | symbolic comparison. When a bound of the range of the first operand | |
3309 | is invariant, we set the corresponding bound of the new range to INF | |
3310 | in order to avoid recursing on the range of the second operand. */ | |
3311 | if (vr->type == VR_VARYING | |
3312 | && (code == PLUS_EXPR || code == MINUS_EXPR) | |
3313 | && TREE_CODE (op1) == SSA_NAME | |
3314 | && vr0.type == VR_RANGE | |
3315 | && symbolic_range_based_on_p (&vr0, op1)) | |
3316 | { | |
3317 | const bool minus_p = (code == MINUS_EXPR); | |
3318 | value_range_t n_vr1 = VR_INITIALIZER; | |
3319 | ||
3320 | /* Try with VR0 and [-INF, OP1]. */ | |
3321 | if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min)) | |
3322 | set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL); | |
3323 | ||
3324 | /* Try with VR0 and [OP1, +INF]. */ | |
3325 | else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max)) | |
3326 | set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL); | |
3327 | ||
3328 | /* Try with VR0 and [OP1, OP1]. */ | |
3329 | else | |
3330 | set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL); | |
3331 | ||
3332 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1); | |
3333 | } | |
3334 | ||
3335 | if (vr->type == VR_VARYING | |
3336 | && (code == PLUS_EXPR || code == MINUS_EXPR) | |
3337 | && TREE_CODE (op0) == SSA_NAME | |
3338 | && vr1.type == VR_RANGE | |
3339 | && symbolic_range_based_on_p (&vr1, op0)) | |
3340 | { | |
3341 | const bool minus_p = (code == MINUS_EXPR); | |
3342 | value_range_t n_vr0 = VR_INITIALIZER; | |
3343 | ||
3344 | /* Try with [-INF, OP0] and VR1. */ | |
3345 | if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min)) | |
3346 | set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL); | |
3347 | ||
3348 | /* Try with [OP0, +INF] and VR1. */ | |
3349 | else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max)) | |
3350 | set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL); | |
3351 | ||
3352 | /* Try with [OP0, OP0] and VR1. */ | |
3353 | else | |
3354 | set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL); | |
3355 | ||
3356 | extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1); | |
3357 | } | |
c37659ce | 3358 | } |
ed19cf09 | 3359 | |
113fbe09 | 3360 | /* Extract range information from a unary operation CODE based on |
3361 | the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. | |
3362 | The The resulting range is stored in *VR. */ | |
88dbf20f | 3363 | |
3364 | static void | |
113fbe09 | 3365 | extract_range_from_unary_expr_1 (value_range_t *vr, |
3366 | enum tree_code code, tree type, | |
3367 | value_range_t *vr0_, tree op0_type) | |
88dbf20f | 3368 | { |
748eb1f9 | 3369 | value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
eea12c72 | 3370 | |
713b2724 | 3371 | /* VRP only operates on integral and pointer types. */ |
3372 | if (!(INTEGRAL_TYPE_P (op0_type) | |
3373 | || POINTER_TYPE_P (op0_type)) | |
3374 | || !(INTEGRAL_TYPE_P (type) | |
3375 | || POINTER_TYPE_P (type))) | |
eea12c72 | 3376 | { |
3377 | set_value_range_to_varying (vr); | |
3378 | return; | |
3379 | } | |
88dbf20f | 3380 | |
713b2724 | 3381 | /* If VR0 is UNDEFINED, so is the result. */ |
3382 | if (vr0.type == VR_UNDEFINED) | |
88dbf20f | 3383 | { |
713b2724 | 3384 | set_value_range_to_undefined (vr); |
88dbf20f | 3385 | return; |
3386 | } | |
3387 | ||
748eb1f9 | 3388 | /* Handle operations that we express in terms of others. */ |
273da1f5 | 3389 | if (code == PAREN_EXPR || code == OBJ_TYPE_REF) |
748eb1f9 | 3390 | { |
273da1f5 | 3391 | /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ |
748eb1f9 | 3392 | copy_value_range (vr, &vr0); |
3393 | return; | |
3394 | } | |
3395 | else if (code == NEGATE_EXPR) | |
3396 | { | |
3397 | /* -X is simply 0 - X, so re-use existing code that also handles | |
3398 | anti-ranges fine. */ | |
3399 | value_range_t zero = VR_INITIALIZER; | |
3400 | set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); | |
3401 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); | |
3402 | return; | |
3403 | } | |
3404 | else if (code == BIT_NOT_EXPR) | |
3405 | { | |
3406 | /* ~X is simply -1 - X, so re-use existing code that also handles | |
3407 | anti-ranges fine. */ | |
3408 | value_range_t minusone = VR_INITIALIZER; | |
3409 | set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); | |
3410 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, | |
3411 | type, &minusone, &vr0); | |
3412 | return; | |
3413 | } | |
3414 | ||
3415 | /* Now canonicalize anti-ranges to ranges when they are not symbolic | |
3416 | and express op ~[] as (op []') U (op []''). */ | |
3417 | if (vr0.type == VR_ANTI_RANGE | |
3418 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | |
3419 | { | |
3420 | extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type); | |
3421 | if (vrtem1.type != VR_UNDEFINED) | |
3422 | { | |
3423 | value_range_t vrres = VR_INITIALIZER; | |
3424 | extract_range_from_unary_expr_1 (&vrres, code, type, | |
3425 | &vrtem1, op0_type); | |
3426 | vrp_meet (vr, &vrres); | |
3427 | } | |
3428 | return; | |
3429 | } | |
3430 | ||
713b2724 | 3431 | if (CONVERT_EXPR_CODE_P (code)) |
88dbf20f | 3432 | { |
113fbe09 | 3433 | tree inner_type = op0_type; |
93116081 | 3434 | tree outer_type = type; |
80096ea0 | 3435 | |
713b2724 | 3436 | /* If the expression evaluates to a pointer, we are only interested in |
3437 | determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ | |
3438 | if (POINTER_TYPE_P (type)) | |
3439 | { | |
81e042f9 | 3440 | if (range_is_nonnull (&vr0)) |
3441 | set_value_range_to_nonnull (vr, type); | |
3442 | else if (range_is_null (&vr0)) | |
3443 | set_value_range_to_null (vr, type); | |
713b2724 | 3444 | else |
3445 | set_value_range_to_varying (vr); | |
3446 | return; | |
3447 | } | |
3448 | ||
0d27ac1e | 3449 | /* If VR0 is varying and we increase the type precision, assume |
3450 | a full range for the following transformation. */ | |
3451 | if (vr0.type == VR_VARYING | |
713b2724 | 3452 | && INTEGRAL_TYPE_P (inner_type) |
0d27ac1e | 3453 | && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) |
b8f8bd67 | 3454 | { |
0d27ac1e | 3455 | vr0.type = VR_RANGE; |
3456 | vr0.min = TYPE_MIN_VALUE (inner_type); | |
3457 | vr0.max = TYPE_MAX_VALUE (inner_type); | |
b8f8bd67 | 3458 | } |
3459 | ||
0d27ac1e | 3460 | /* If VR0 is a constant range or anti-range and the conversion is |
3461 | not truncating we can convert the min and max values and | |
3462 | canonicalize the resulting range. Otherwise we can do the | |
3463 | conversion if the size of the range is less than what the | |
3464 | precision of the target type can represent and the range is | |
3465 | not an anti-range. */ | |
3466 | if ((vr0.type == VR_RANGE | |
3467 | || vr0.type == VR_ANTI_RANGE) | |
3468 | && TREE_CODE (vr0.min) == INTEGER_CST | |
3469 | && TREE_CODE (vr0.max) == INTEGER_CST | |
33731620 | 3470 | && (!is_overflow_infinity (vr0.min) |
3471 | || (vr0.type == VR_RANGE | |
3472 | && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) | |
3473 | && needs_overflow_infinity (outer_type) | |
3474 | && supports_overflow_infinity (outer_type))) | |
3475 | && (!is_overflow_infinity (vr0.max) | |
3476 | || (vr0.type == VR_RANGE | |
3477 | && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) | |
3478 | && needs_overflow_infinity (outer_type) | |
3479 | && supports_overflow_infinity (outer_type))) | |
0d27ac1e | 3480 | && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) |
3481 | || (vr0.type == VR_RANGE | |
3482 | && integer_zerop (int_const_binop (RSHIFT_EXPR, | |
317e2a67 | 3483 | int_const_binop (MINUS_EXPR, vr0.max, vr0.min), |
3484 | size_int (TYPE_PRECISION (outer_type))))))) | |
80096ea0 | 3485 | { |
0d27ac1e | 3486 | tree new_min, new_max; |
33731620 | 3487 | if (is_overflow_infinity (vr0.min)) |
3488 | new_min = negative_overflow_infinity (outer_type); | |
df8f94d5 | 3489 | else |
8945e16b | 3490 | new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), |
e913b5cd | 3491 | 0, false); |
33731620 | 3492 | if (is_overflow_infinity (vr0.max)) |
3493 | new_max = positive_overflow_infinity (outer_type); | |
df8f94d5 | 3494 | else |
8945e16b | 3495 | new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), |
e913b5cd | 3496 | 0, false); |
0d27ac1e | 3497 | set_and_canonicalize_value_range (vr, vr0.type, |
3498 | new_min, new_max, NULL); | |
80096ea0 | 3499 | return; |
3500 | } | |
0d27ac1e | 3501 | |
3502 | set_value_range_to_varying (vr); | |
3503 | return; | |
88dbf20f | 3504 | } |
713b2724 | 3505 | else if (code == ABS_EXPR) |
eea12c72 | 3506 | { |
713b2724 | 3507 | tree min, max; |
3508 | int cmp; | |
3509 | ||
3510 | /* Pass through vr0 in the easy cases. */ | |
3511 | if (TYPE_UNSIGNED (type) | |
3512 | || value_range_nonnegative_p (&vr0)) | |
3513 | { | |
3514 | copy_value_range (vr, &vr0); | |
3515 | return; | |
3516 | } | |
3517 | ||
3518 | /* For the remaining varying or symbolic ranges we can't do anything | |
3519 | useful. */ | |
3520 | if (vr0.type == VR_VARYING | |
3521 | || symbolic_range_p (&vr0)) | |
3522 | { | |
3523 | set_value_range_to_varying (vr); | |
3524 | return; | |
3525 | } | |
3526 | ||
8b5d7cdf | 3527 | /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a |
3528 | useful range. */ | |
93116081 | 3529 | if (!TYPE_OVERFLOW_UNDEFINED (type) |
8b5d7cdf | 3530 | && ((vr0.type == VR_RANGE |
b876a744 | 3531 | && vrp_val_is_min (vr0.min)) |
8b5d7cdf | 3532 | || (vr0.type == VR_ANTI_RANGE |
713b2724 | 3533 | && !vrp_val_is_min (vr0.min)))) |
8b5d7cdf | 3534 | { |
3535 | set_value_range_to_varying (vr); | |
3536 | return; | |
3537 | } | |
48e1416a | 3538 | |
eea12c72 | 3539 | /* ABS_EXPR may flip the range around, if the original range |
3540 | included negative values. */ | |
c3783c3b | 3541 | if (is_overflow_infinity (vr0.min)) |
93116081 | 3542 | min = positive_overflow_infinity (type); |
b876a744 | 3543 | else if (!vrp_val_is_min (vr0.min)) |
93116081 | 3544 | min = fold_unary_to_constant (code, type, vr0.min); |
3545 | else if (!needs_overflow_infinity (type)) | |
3546 | min = TYPE_MAX_VALUE (type); | |
3547 | else if (supports_overflow_infinity (type)) | |
3548 | min = positive_overflow_infinity (type); | |
c3783c3b | 3549 | else |
3550 | { | |
3551 | set_value_range_to_varying (vr); | |
3552 | return; | |
3553 | } | |
eea12c72 | 3554 | |
c3783c3b | 3555 | if (is_overflow_infinity (vr0.max)) |
93116081 | 3556 | max = positive_overflow_infinity (type); |
b876a744 | 3557 | else if (!vrp_val_is_min (vr0.max)) |
93116081 | 3558 | max = fold_unary_to_constant (code, type, vr0.max); |
3559 | else if (!needs_overflow_infinity (type)) | |
3560 | max = TYPE_MAX_VALUE (type); | |
eaba51b9 | 3561 | else if (supports_overflow_infinity (type) |
3562 | /* We shouldn't generate [+INF, +INF] as set_value_range | |
3563 | doesn't like this and ICEs. */ | |
3564 | && !is_positive_overflow_infinity (min)) | |
93116081 | 3565 | max = positive_overflow_infinity (type); |
c3783c3b | 3566 | else |
3567 | { | |
3568 | set_value_range_to_varying (vr); | |
3569 | return; | |
3570 | } | |
eea12c72 | 3571 | |
8b5d7cdf | 3572 | cmp = compare_values (min, max); |
3573 | ||
3574 | /* If a VR_ANTI_RANGEs contains zero, then we have | |
3575 | ~[-INF, min(MIN, MAX)]. */ | |
3576 | if (vr0.type == VR_ANTI_RANGE) | |
48e1416a | 3577 | { |
7d48cd66 | 3578 | if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
8b5d7cdf | 3579 | { |
8b5d7cdf | 3580 | /* Take the lower of the two values. */ |
3581 | if (cmp != 1) | |
3582 | max = min; | |
3583 | ||
3584 | /* Create ~[-INF, min (abs(MIN), abs(MAX))] | |
3585 | or ~[-INF + 1, min (abs(MIN), abs(MAX))] when | |
3586 | flag_wrapv is set and the original anti-range doesn't include | |
3587 | TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ | |
93116081 | 3588 | if (TYPE_OVERFLOW_WRAPS (type)) |
c3783c3b | 3589 | { |
93116081 | 3590 | tree type_min_value = TYPE_MIN_VALUE (type); |
c3783c3b | 3591 | |
3592 | min = (vr0.min != type_min_value | |
3593 | ? int_const_binop (PLUS_EXPR, type_min_value, | |
e913b5cd | 3594 | build_int_cst (TREE_TYPE (type_min_value), 1)) |
c3783c3b | 3595 | : type_min_value); |
3596 | } | |
3597 | else | |
3598 | { | |
3599 | if (overflow_infinity_range_p (&vr0)) | |
93116081 | 3600 | min = negative_overflow_infinity (type); |
c3783c3b | 3601 | else |
93116081 | 3602 | min = TYPE_MIN_VALUE (type); |
c3783c3b | 3603 | } |
8b5d7cdf | 3604 | } |
3605 | else | |
3606 | { | |
3607 | /* All else has failed, so create the range [0, INF], even for | |
3608 | flag_wrapv since TYPE_MIN_VALUE is in the original | |
3609 | anti-range. */ | |
3610 | vr0.type = VR_RANGE; | |
93116081 | 3611 | min = build_int_cst (type, 0); |
3612 | if (needs_overflow_infinity (type)) | |
c3783c3b | 3613 | { |
93116081 | 3614 | if (supports_overflow_infinity (type)) |
3615 | max = positive_overflow_infinity (type); | |
c3783c3b | 3616 | else |
3617 | { | |
3618 | set_value_range_to_varying (vr); | |
3619 | return; | |
3620 | } | |
3621 | } | |
3622 | else | |
93116081 | 3623 | max = TYPE_MAX_VALUE (type); |
8b5d7cdf | 3624 | } |
3625 | } | |
3626 | ||
3627 | /* If the range contains zero then we know that the minimum value in the | |
3628 | range will be zero. */ | |
7d48cd66 | 3629 | else if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
8b5d7cdf | 3630 | { |
3631 | if (cmp == 1) | |
3632 | max = min; | |
93116081 | 3633 | min = build_int_cst (type, 0); |
8b5d7cdf | 3634 | } |
3635 | else | |
eea12c72 | 3636 | { |
8b5d7cdf | 3637 | /* If the range was reversed, swap MIN and MAX. */ |
3638 | if (cmp == 1) | |
3639 | { | |
3640 | tree t = min; | |
3641 | min = max; | |
3642 | max = t; | |
3643 | } | |
eea12c72 | 3644 | } |
713b2724 | 3645 | |
3646 | cmp = compare_values (min, max); | |
3647 | if (cmp == -2 || cmp == 1) | |
3648 | { | |
3649 | /* If the new range has its limits swapped around (MIN > MAX), | |
3650 | then the operation caused one of them to wrap around, mark | |
3651 | the new range VARYING. */ | |
3652 | set_value_range_to_varying (vr); | |
3653 | } | |
3654 | else | |
3655 | set_value_range (vr, vr0.type, min, max, NULL); | |
3656 | return; | |
eea12c72 | 3657 | } |
88dbf20f | 3658 | |
713b2724 | 3659 | /* For unhandled operations fall back to varying. */ |
3660 | set_value_range_to_varying (vr); | |
3661 | return; | |
eea12c72 | 3662 | } |
3663 | ||
3664 | ||
113fbe09 | 3665 | /* Extract range information from a unary expression CODE OP0 based on |
3666 | the range of its operand with resulting type TYPE. | |
3667 | The resulting range is stored in *VR. */ | |
3668 | ||
3669 | static void | |
3670 | extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, | |
3671 | tree type, tree op0) | |
3672 | { | |
748eb1f9 | 3673 | value_range_t vr0 = VR_INITIALIZER; |
113fbe09 | 3674 | |
3675 | /* Get value ranges for the operand. For constant operands, create | |
3676 | a new value range with the operand to simplify processing. */ | |
3677 | if (TREE_CODE (op0) == SSA_NAME) | |
3678 | vr0 = *(get_value_range (op0)); | |
3679 | else if (is_gimple_min_invariant (op0)) | |
3680 | set_value_range_to_value (&vr0, op0, NULL); | |
3681 | else | |
3682 | set_value_range_to_varying (&vr0); | |
3683 | ||
3684 | extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0)); | |
3685 | } | |
3686 | ||
3687 | ||
8a2caf10 | 3688 | /* Extract range information from a conditional expression STMT based on |
ec0fa513 | 3689 | the ranges of each of its operands and the expression code. */ |
3690 | ||
3691 | static void | |
8a2caf10 | 3692 | extract_range_from_cond_expr (value_range_t *vr, gimple stmt) |
ec0fa513 | 3693 | { |
3694 | tree op0, op1; | |
748eb1f9 | 3695 | value_range_t vr0 = VR_INITIALIZER; |
3696 | value_range_t vr1 = VR_INITIALIZER; | |
ec0fa513 | 3697 | |
3698 | /* Get value ranges for each operand. For constant operands, create | |
3699 | a new value range with the operand to simplify processing. */ | |
8a2caf10 | 3700 | op0 = gimple_assign_rhs2 (stmt); |
ec0fa513 | 3701 | if (TREE_CODE (op0) == SSA_NAME) |
3702 | vr0 = *(get_value_range (op0)); | |
3703 | else if (is_gimple_min_invariant (op0)) | |
4baf1a77 | 3704 | set_value_range_to_value (&vr0, op0, NULL); |
ec0fa513 | 3705 | else |
3706 | set_value_range_to_varying (&vr0); | |
3707 | ||
8a2caf10 | 3708 | op1 = gimple_assign_rhs3 (stmt); |
ec0fa513 | 3709 | if (TREE_CODE (op1) == SSA_NAME) |
3710 | vr1 = *(get_value_range (op1)); | |
3711 | else if (is_gimple_min_invariant (op1)) | |
4baf1a77 | 3712 | set_value_range_to_value (&vr1, op1, NULL); |
ec0fa513 | 3713 | else |
3714 | set_value_range_to_varying (&vr1); | |
3715 | ||
3716 | /* The resulting value range is the union of the operand ranges */ | |
ec0fa513 | 3717 | copy_value_range (vr, &vr0); |
a2623f6b | 3718 | vrp_meet (vr, &vr1); |
ec0fa513 | 3719 | } |
3720 | ||
3721 | ||
eea12c72 | 3722 | /* Extract range information from a comparison expression EXPR based |
3723 | on the range of its operand and the expression code. */ | |
3724 | ||
3725 | static void | |
93116081 | 3726 | extract_range_from_comparison (value_range_t *vr, enum tree_code code, |
3727 | tree type, tree op0, tree op1) | |
eea12c72 | 3728 | { |
c3783c3b | 3729 | bool sop = false; |
75a70cf9 | 3730 | tree val; |
48e1416a | 3731 | |
e0ad89bd | 3732 | val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, |
3733 | NULL); | |
c3783c3b | 3734 | |
3735 | /* A disadvantage of using a special infinity as an overflow | |
3736 | representation is that we lose the ability to record overflow | |
3737 | when we don't have an infinity. So we have to ignore a result | |
3738 | which relies on overflow. */ | |
3739 | ||
3740 | if (val && !is_overflow_infinity (val) && !sop) | |
eea12c72 | 3741 | { |
3742 | /* Since this expression was found on the RHS of an assignment, | |
3743 | its type may be different from _Bool. Convert VAL to EXPR's | |
3744 | type. */ | |
93116081 | 3745 | val = fold_convert (type, val); |
4baf1a77 | 3746 | if (is_gimple_min_invariant (val)) |
3747 | set_value_range_to_value (vr, val, vr->equiv); | |
3748 | else | |
3749 | set_value_range (vr, VR_RANGE, val, val, vr->equiv); | |
eea12c72 | 3750 | } |
3751 | else | |
b9b64cb7 | 3752 | /* The result of a comparison is always true or false. */ |
93116081 | 3753 | set_value_range_to_truthvalue (vr, type); |
88dbf20f | 3754 | } |
3755 | ||
75a70cf9 | 3756 | /* Try to derive a nonnegative or nonzero range out of STMT relying |
3757 | primarily on generic routines in fold in conjunction with range data. | |
3758 | Store the result in *VR */ | |
88dbf20f | 3759 | |
75a70cf9 | 3760 | static void |
3761 | extract_range_basic (value_range_t *vr, gimple stmt) | |
3762 | { | |
3763 | bool sop = false; | |
3764 | tree type = gimple_expr_type (stmt); | |
3765 | ||
64b5be08 | 3766 | if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) |
3767 | { | |
3768 | tree fndecl = gimple_call_fndecl (stmt), arg; | |
3769 | int mini, maxi, zerov = 0, prec; | |
3770 | ||
3771 | switch (DECL_FUNCTION_CODE (fndecl)) | |
3772 | { | |
3773 | case BUILT_IN_CONSTANT_P: | |
3774 | /* If the call is __builtin_constant_p and the argument is a | |
3775 | function parameter resolve it to false. This avoids bogus | |
3776 | array bound warnings. | |
3777 | ??? We could do this as early as inlining is finished. */ | |
3778 | arg = gimple_call_arg (stmt, 0); | |
3779 | if (TREE_CODE (arg) == SSA_NAME | |
3780 | && SSA_NAME_IS_DEFAULT_DEF (arg) | |
3781 | && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL) | |
3782 | { | |
3783 | set_value_range_to_null (vr, type); | |
3784 | return; | |
3785 | } | |
3786 | break; | |
3787 | /* Both __builtin_ffs* and __builtin_popcount return | |
3788 | [0, prec]. */ | |
3789 | CASE_INT_FN (BUILT_IN_FFS): | |
3790 | CASE_INT_FN (BUILT_IN_POPCOUNT): | |
3791 | arg = gimple_call_arg (stmt, 0); | |
3792 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3793 | mini = 0; | |
3794 | maxi = prec; | |
3795 | if (TREE_CODE (arg) == SSA_NAME) | |
3796 | { | |
3797 | value_range_t *vr0 = get_value_range (arg); | |
3798 | /* If arg is non-zero, then ffs or popcount | |
3799 | are non-zero. */ | |
3800 | if (((vr0->type == VR_RANGE | |
80b766e4 | 3801 | && range_includes_zero_p (vr0->min, vr0->max) == 0) |
64b5be08 | 3802 | || (vr0->type == VR_ANTI_RANGE |
80b766e4 | 3803 | && range_includes_zero_p (vr0->min, vr0->max) == 1)) |
3804 | && !is_overflow_infinity (vr0->min) | |
3805 | && !is_overflow_infinity (vr0->max)) | |
64b5be08 | 3806 | mini = 1; |
3807 | /* If some high bits are known to be zero, | |
3808 | we can decrease the maximum. */ | |
3809 | if (vr0->type == VR_RANGE | |
3810 | && TREE_CODE (vr0->max) == INTEGER_CST | |
80b766e4 | 3811 | && !operand_less_p (vr0->min, |
3812 | build_zero_cst (TREE_TYPE (vr0->min))) | |
4a8f88ff | 3813 | && !is_overflow_infinity (vr0->max)) |
64b5be08 | 3814 | maxi = tree_floor_log2 (vr0->max) + 1; |
3815 | } | |
3816 | goto bitop_builtin; | |
3817 | /* __builtin_parity* returns [0, 1]. */ | |
3818 | CASE_INT_FN (BUILT_IN_PARITY): | |
3819 | mini = 0; | |
3820 | maxi = 1; | |
3821 | goto bitop_builtin; | |
3822 | /* __builtin_c[lt]z* return [0, prec-1], except for | |
3823 | when the argument is 0, but that is undefined behavior. | |
3824 | On many targets where the CLZ RTL or optab value is defined | |
3825 | for 0 the value is prec, so include that in the range | |
3826 | by default. */ | |
3827 | CASE_INT_FN (BUILT_IN_CLZ): | |
3828 | arg = gimple_call_arg (stmt, 0); | |
3829 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3830 | mini = 0; | |
3831 | maxi = prec; | |
3832 | if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg))) | |
3833 | != CODE_FOR_nothing | |
3834 | && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), | |
3835 | zerov) | |
3836 | /* Handle only the single common value. */ | |
3837 | && zerov != prec) | |
3838 | /* Magic value to give up, unless vr0 proves | |
3839 | arg is non-zero. */ | |
3840 | mini = -2; | |
3841 | if (TREE_CODE (arg) == SSA_NAME) | |
3842 | { | |
3843 | value_range_t *vr0 = get_value_range (arg); | |
3844 | /* From clz of VR_RANGE minimum we can compute | |
3845 | result maximum. */ | |
3846 | if (vr0->type == VR_RANGE | |
3847 | && TREE_CODE (vr0->min) == INTEGER_CST | |
4a8f88ff | 3848 | && !is_overflow_infinity (vr0->min)) |
64b5be08 | 3849 | { |
3850 | maxi = prec - 1 - tree_floor_log2 (vr0->min); | |
3851 | if (maxi != prec) | |
3852 | mini = 0; | |
3853 | } | |
3854 | else if (vr0->type == VR_ANTI_RANGE | |
3855 | && integer_zerop (vr0->min) | |
4a8f88ff | 3856 | && !is_overflow_infinity (vr0->min)) |
64b5be08 | 3857 | { |
3858 | maxi = prec - 1; | |
3859 | mini = 0; | |
3860 | } | |
3861 | if (mini == -2) | |
3862 | break; | |
3863 | /* From clz of VR_RANGE maximum we can compute | |
3864 | result minimum. */ | |
3865 | if (vr0->type == VR_RANGE | |
3866 | && TREE_CODE (vr0->max) == INTEGER_CST | |
4a8f88ff | 3867 | && !is_overflow_infinity (vr0->max)) |
64b5be08 | 3868 | { |
3869 | mini = prec - 1 - tree_floor_log2 (vr0->max); | |
3870 | if (mini == prec) | |
3871 | break; | |
3872 | } | |
3873 | } | |
3874 | if (mini == -2) | |
3875 | break; | |
3876 | goto bitop_builtin; | |
3877 | /* __builtin_ctz* return [0, prec-1], except for | |
3878 | when the argument is 0, but that is undefined behavior. | |
3879 | If there is a ctz optab for this mode and | |
3880 | CTZ_DEFINED_VALUE_AT_ZERO, include that in the range, | |
3881 | otherwise just assume 0 won't be seen. */ | |
3882 | CASE_INT_FN (BUILT_IN_CTZ): | |
3883 | arg = gimple_call_arg (stmt, 0); | |
3884 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3885 | mini = 0; | |
3886 | maxi = prec - 1; | |
3887 | if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg))) | |
3888 | != CODE_FOR_nothing | |
3889 | && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), | |
3890 | zerov)) | |
3891 | { | |
3892 | /* Handle only the two common values. */ | |
3893 | if (zerov == -1) | |
3894 | mini = -1; | |
3895 | else if (zerov == prec) | |
3896 | maxi = prec; | |
3897 | else | |
3898 | /* Magic value to give up, unless vr0 proves | |
3899 | arg is non-zero. */ | |
3900 | mini = -2; | |
3901 | } | |
3902 | if (TREE_CODE (arg) == SSA_NAME) | |
3903 | { | |
3904 | value_range_t *vr0 = get_value_range (arg); | |
3905 | /* If arg is non-zero, then use [0, prec - 1]. */ | |
3906 | if (((vr0->type == VR_RANGE | |
3907 | && integer_nonzerop (vr0->min)) | |
3908 | || (vr0->type == VR_ANTI_RANGE | |
3909 | && integer_zerop (vr0->min))) | |
4a8f88ff | 3910 | && !is_overflow_infinity (vr0->min)) |
64b5be08 | 3911 | { |
3912 | mini = 0; | |
3913 | maxi = prec - 1; | |
3914 | } | |
3915 | /* If some high bits are known to be zero, | |
3916 | we can decrease the result maximum. */ | |
3917 | if (vr0->type == VR_RANGE | |
3918 | && TREE_CODE (vr0->max) == INTEGER_CST | |
4a8f88ff | 3919 | && !is_overflow_infinity (vr0->max)) |
64b5be08 | 3920 | { |
3921 | maxi = tree_floor_log2 (vr0->max); | |
3922 | /* For vr0 [0, 0] give up. */ | |
3923 | if (maxi == -1) | |
3924 | break; | |
3925 | } | |
3926 | } | |
3927 | if (mini == -2) | |
3928 | break; | |
3929 | goto bitop_builtin; | |
3930 | /* __builtin_clrsb* returns [0, prec-1]. */ | |
3931 | CASE_INT_FN (BUILT_IN_CLRSB): | |
3932 | arg = gimple_call_arg (stmt, 0); | |
3933 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3934 | mini = 0; | |
3935 | maxi = prec - 1; | |
3936 | goto bitop_builtin; | |
3937 | bitop_builtin: | |
3938 | set_value_range (vr, VR_RANGE, build_int_cst (type, mini), | |
3939 | build_int_cst (type, maxi), NULL); | |
3940 | return; | |
3941 | default: | |
3942 | break; | |
3943 | } | |
3944 | } | |
137559b2 | 3945 | else if (is_gimple_call (stmt) |
3946 | && gimple_call_internal_p (stmt)) | |
3947 | { | |
3948 | enum tree_code subcode = ERROR_MARK; | |
3949 | switch (gimple_call_internal_fn (stmt)) | |
3950 | { | |
3951 | case IFN_UBSAN_CHECK_ADD: | |
3952 | subcode = PLUS_EXPR; | |
3953 | break; | |
3954 | case IFN_UBSAN_CHECK_SUB: | |
3955 | subcode = MINUS_EXPR; | |
3956 | break; | |
3957 | case IFN_UBSAN_CHECK_MUL: | |
3958 | subcode = MULT_EXPR; | |
3959 | break; | |
3960 | default: | |
3961 | break; | |
3962 | } | |
3963 | if (subcode != ERROR_MARK) | |
3964 | { | |
3965 | bool saved_flag_wrapv = flag_wrapv; | |
3966 | /* Pretend the arithmetics is wrapping. If there is | |
3967 | any overflow, we'll complain, but will actually do | |
3968 | wrapping operation. */ | |
3969 | flag_wrapv = 1; | |
3970 | extract_range_from_binary_expr (vr, subcode, type, | |
3971 | gimple_call_arg (stmt, 0), | |
3972 | gimple_call_arg (stmt, 1)); | |
3973 | flag_wrapv = saved_flag_wrapv; | |
3974 | ||
3975 | /* If for both arguments vrp_valueize returned non-NULL, | |
3976 | this should have been already folded and if not, it | |
3977 | wasn't folded because of overflow. Avoid removing the | |
3978 | UBSAN_CHECK_* calls in that case. */ | |
3979 | if (vr->type == VR_RANGE | |
3980 | && (vr->min == vr->max | |
3981 | || operand_equal_p (vr->min, vr->max, 0))) | |
3982 | set_value_range_to_varying (vr); | |
3983 | return; | |
3984 | } | |
3985 | } | |
64b5be08 | 3986 | if (INTEGRAL_TYPE_P (type) |
3987 | && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) | |
75a70cf9 | 3988 | set_value_range_to_nonnegative (vr, type, |
3989 | sop || stmt_overflow_infinity (stmt)); | |
3990 | else if (vrp_stmt_computes_nonzero (stmt, &sop) | |
3991 | && !sop) | |
3992 | set_value_range_to_nonnull (vr, type); | |
3993 | else | |
3994 | set_value_range_to_varying (vr); | |
3995 | } | |
3996 | ||
3997 | ||
3998 | /* Try to compute a useful range out of assignment STMT and store it | |
eea12c72 | 3999 | in *VR. */ |
88dbf20f | 4000 | |
4001 | static void | |
75a70cf9 | 4002 | extract_range_from_assignment (value_range_t *vr, gimple stmt) |
88dbf20f | 4003 | { |
75a70cf9 | 4004 | enum tree_code code = gimple_assign_rhs_code (stmt); |
88dbf20f | 4005 | |
4006 | if (code == ASSERT_EXPR) | |
75a70cf9 | 4007 | extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); |
88dbf20f | 4008 | else if (code == SSA_NAME) |
75a70cf9 | 4009 | extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); |
cfd7906e | 4010 | else if (TREE_CODE_CLASS (code) == tcc_binary) |
75a70cf9 | 4011 | extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), |
4012 | gimple_expr_type (stmt), | |
4013 | gimple_assign_rhs1 (stmt), | |
4014 | gimple_assign_rhs2 (stmt)); | |
88dbf20f | 4015 | else if (TREE_CODE_CLASS (code) == tcc_unary) |
75a70cf9 | 4016 | extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), |
4017 | gimple_expr_type (stmt), | |
4018 | gimple_assign_rhs1 (stmt)); | |
ec0fa513 | 4019 | else if (code == COND_EXPR) |
8a2caf10 | 4020 | extract_range_from_cond_expr (vr, stmt); |
eea12c72 | 4021 | else if (TREE_CODE_CLASS (code) == tcc_comparison) |
75a70cf9 | 4022 | extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), |
4023 | gimple_expr_type (stmt), | |
4024 | gimple_assign_rhs1 (stmt), | |
4025 | gimple_assign_rhs2 (stmt)); | |
4026 | else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS | |
4027 | && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) | |
4028 | set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); | |
88dbf20f | 4029 | else |
e7d43f99 | 4030 | set_value_range_to_varying (vr); |
8dbf774a | 4031 | |
8dbf774a | 4032 | if (vr->type == VR_VARYING) |
75a70cf9 | 4033 | extract_range_basic (vr, stmt); |
88dbf20f | 4034 | } |
4035 | ||
b3786ab3 | 4036 | /* Given a range VR, a LOOP and a variable VAR, determine whether it |
88dbf20f | 4037 | would be profitable to adjust VR using scalar evolution information |
4038 | for VAR. If so, update VR with the new limits. */ | |
4039 | ||
4040 | static void | |
75a70cf9 | 4041 | adjust_range_with_scev (value_range_t *vr, struct loop *loop, |
4042 | gimple stmt, tree var) | |
88dbf20f | 4043 | { |
9300c776 | 4044 | tree init, step, chrec, tmin, tmax, min, max, type, tem; |
57e3f39a | 4045 | enum ev_direction dir; |
88dbf20f | 4046 | |
4047 | /* TODO. Don't adjust anti-ranges. An anti-range may provide | |
4048 | better opportunities than a regular range, but I'm not sure. */ | |
4049 | if (vr->type == VR_ANTI_RANGE) | |
4050 | return; | |
4051 | ||
903dae48 | 4052 | chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); |
437fd8c0 | 4053 | |
4054 | /* Like in PR19590, scev can return a constant function. */ | |
4055 | if (is_gimple_min_invariant (chrec)) | |
4056 | { | |
fb807d22 | 4057 | set_value_range_to_value (vr, chrec, vr->equiv); |
437fd8c0 | 4058 | return; |
4059 | } | |
4060 | ||
88dbf20f | 4061 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) |
4062 | return; | |
4063 | ||
903dae48 | 4064 | init = initial_condition_in_loop_num (chrec, loop->num); |
9300c776 | 4065 | tem = op_with_constant_singleton_value_range (init); |
4066 | if (tem) | |
4067 | init = tem; | |
903dae48 | 4068 | step = evolution_part_in_loop_num (chrec, loop->num); |
9300c776 | 4069 | tem = op_with_constant_singleton_value_range (step); |
4070 | if (tem) | |
4071 | step = tem; | |
88dbf20f | 4072 | |
4073 | /* If STEP is symbolic, we can't know whether INIT will be the | |
7587869b | 4074 | minimum or maximum value in the range. Also, unless INIT is |
4075 | a simple expression, compare_values and possibly other functions | |
4076 | in tree-vrp won't be able to handle it. */ | |
903dae48 | 4077 | if (step == NULL_TREE |
7587869b | 4078 | || !is_gimple_min_invariant (step) |
4079 | || !valid_value_p (init)) | |
88dbf20f | 4080 | return; |
4081 | ||
57e3f39a | 4082 | dir = scev_direction (chrec); |
4083 | if (/* Do not adjust ranges if we do not know whether the iv increases | |
4084 | or decreases, ... */ | |
4085 | dir == EV_DIR_UNKNOWN | |
4086 | /* ... or if it may wrap. */ | |
17519ba0 | 4087 | || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), |
57e3f39a | 4088 | true)) |
eea12c72 | 4089 | return; |
4090 | ||
c3783c3b | 4091 | /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of |
4092 | negative_overflow_infinity and positive_overflow_infinity, | |
4093 | because we have concluded that the loop probably does not | |
4094 | wrap. */ | |
4095 | ||
318a3281 | 4096 | type = TREE_TYPE (var); |
4097 | if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) | |
4098 | tmin = lower_bound_in_type (type, type); | |
4099 | else | |
4100 | tmin = TYPE_MIN_VALUE (type); | |
4101 | if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) | |
4102 | tmax = upper_bound_in_type (type, type); | |
4103 | else | |
4104 | tmax = TYPE_MAX_VALUE (type); | |
4105 | ||
9b53b827 | 4106 | /* Try to use estimated number of iterations for the loop to constrain the |
8fe79ba5 | 4107 | final value in the evolution. */ |
9b53b827 | 4108 | if (TREE_CODE (step) == INTEGER_CST |
9b53b827 | 4109 | && is_gimple_val (init) |
4110 | && (TREE_CODE (init) != SSA_NAME | |
4111 | || get_value_range (init)->type == VR_RANGE)) | |
4112 | { | |
5de9d3ed | 4113 | widest_int nit; |
8fe79ba5 | 4114 | |
dbf94397 | 4115 | /* We are only entering here for loop header PHI nodes, so using |
4116 | the number of latch executions is the correct thing to use. */ | |
4117 | if (max_loop_iterations (loop, &nit)) | |
8fe79ba5 | 4118 | { |
748eb1f9 | 4119 | value_range_t maxvr = VR_INITIALIZER; |
e913b5cd | 4120 | signop sgn = TYPE_SIGN (TREE_TYPE (step)); |
4121 | bool overflow; | |
ddb1be65 | 4122 | |
28e557ef | 4123 | widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, |
4124 | &overflow); | |
8fe79ba5 | 4125 | /* If the multiplication overflowed we can't do a meaningful |
4126 | adjustment. Likewise if the result doesn't fit in the type | |
4127 | of the induction variable. For a signed type we have to | |
4128 | check whether the result has the expected signedness which | |
4129 | is that of the step as number of iterations is unsigned. */ | |
4130 | if (!overflow | |
796b6678 | 4131 | && wi::fits_to_tree_p (wtmp, TREE_TYPE (init)) |
e913b5cd | 4132 | && (sgn == UNSIGNED |
796b6678 | 4133 | || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0))) |
9b53b827 | 4134 | { |
e913b5cd | 4135 | tem = wide_int_to_tree (TREE_TYPE (init), wtmp); |
8fe79ba5 | 4136 | extract_range_from_binary_expr (&maxvr, PLUS_EXPR, |
4137 | TREE_TYPE (init), init, tem); | |
4138 | /* Likewise if the addition did. */ | |
4139 | if (maxvr.type == VR_RANGE) | |
4140 | { | |
4141 | tmin = maxvr.min; | |
4142 | tmax = maxvr.max; | |
4143 | } | |
9b53b827 | 4144 | } |
4145 | } | |
4146 | } | |
4147 | ||
318a3281 | 4148 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) |
88dbf20f | 4149 | { |
318a3281 | 4150 | min = tmin; |
4151 | max = tmax; | |
4152 | ||
88dbf20f | 4153 | /* For VARYING or UNDEFINED ranges, just about anything we get |
4154 | from scalar evolutions should be better. */ | |
a16bbe8b | 4155 | |
57e3f39a | 4156 | if (dir == EV_DIR_DECREASES) |
a16bbe8b | 4157 | max = init; |
88dbf20f | 4158 | else |
a16bbe8b | 4159 | min = init; |
88dbf20f | 4160 | } |
4161 | else if (vr->type == VR_RANGE) | |
4162 | { | |
318a3281 | 4163 | min = vr->min; |
4164 | max = vr->max; | |
f486df52 | 4165 | |
57e3f39a | 4166 | if (dir == EV_DIR_DECREASES) |
88dbf20f | 4167 | { |
f486df52 | 4168 | /* INIT is the maximum value. If INIT is lower than VR->MAX |
4169 | but no smaller than VR->MIN, set VR->MAX to INIT. */ | |
4170 | if (compare_values (init, max) == -1) | |
9b53b827 | 4171 | max = init; |
4d1ddfdf | 4172 | |
4173 | /* According to the loop information, the variable does not | |
4174 | overflow. If we think it does, probably because of an | |
4175 | overflow due to arithmetic on a different INF value, | |
4176 | reset now. */ | |
9b53b827 | 4177 | if (is_negative_overflow_infinity (min) |
4178 | || compare_values (min, tmin) == -1) | |
4d1ddfdf | 4179 | min = tmin; |
9b53b827 | 4180 | |
88dbf20f | 4181 | } |
4182 | else | |
4183 | { | |
4184 | /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ | |
f486df52 | 4185 | if (compare_values (init, min) == 1) |
9b53b827 | 4186 | min = init; |
4d1ddfdf | 4187 | |
9b53b827 | 4188 | if (is_positive_overflow_infinity (max) |
4189 | || compare_values (tmax, max) == -1) | |
4d1ddfdf | 4190 | max = tmax; |
88dbf20f | 4191 | } |
6ee295d9 | 4192 | } |
4193 | else | |
4194 | return; | |
f486df52 | 4195 | |
6ee295d9 | 4196 | /* If we just created an invalid range with the minimum |
4197 | greater than the maximum, we fail conservatively. | |
4198 | This should happen only in unreachable | |
4199 | parts of code, or for invalid programs. */ | |
4200 | if (compare_values (min, max) == 1 | |
4201 | || (is_negative_overflow_infinity (min) | |
4202 | && is_positive_overflow_infinity (max))) | |
4203 | return; | |
9b53b827 | 4204 | |
6ee295d9 | 4205 | set_value_range (vr, VR_RANGE, min, max, vr->equiv); |
88dbf20f | 4206 | } |
4207 | ||
4208 | ||
4209 | /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: | |
48e1416a | 4210 | |
eea12c72 | 4211 | - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for |
4212 | all the values in the ranges. | |
88dbf20f | 4213 | |
4214 | - Return BOOLEAN_FALSE_NODE if the comparison always returns false. | |
4215 | ||
eea12c72 | 4216 | - Return NULL_TREE if it is not always possible to determine the |
c3783c3b | 4217 | value of the comparison. |
4218 | ||
4219 | Also set *STRICT_OVERFLOW_P to indicate whether a range with an | |
4220 | overflow infinity was used in the test. */ | |
eea12c72 | 4221 | |
88dbf20f | 4222 | |
4223 | static tree | |
c3783c3b | 4224 | compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, |
4225 | bool *strict_overflow_p) | |
88dbf20f | 4226 | { |
4227 | /* VARYING or UNDEFINED ranges cannot be compared. */ | |
4228 | if (vr0->type == VR_VARYING | |
4229 | || vr0->type == VR_UNDEFINED | |
4230 | || vr1->type == VR_VARYING | |
4231 | || vr1->type == VR_UNDEFINED) | |
4232 | return NULL_TREE; | |
4233 | ||
4234 | /* Anti-ranges need to be handled separately. */ | |
4235 | if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) | |
4236 | { | |
4237 | /* If both are anti-ranges, then we cannot compute any | |
4238 | comparison. */ | |
4239 | if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) | |
4240 | return NULL_TREE; | |
4241 | ||
4242 | /* These comparisons are never statically computable. */ | |
4243 | if (comp == GT_EXPR | |
4244 | || comp == GE_EXPR | |
4245 | || comp == LT_EXPR | |
4246 | || comp == LE_EXPR) | |
4247 | return NULL_TREE; | |
4248 | ||
4249 | /* Equality can be computed only between a range and an | |
4250 | anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ | |
4251 | if (vr0->type == VR_RANGE) | |
4252 | { | |
4253 | /* To simplify processing, make VR0 the anti-range. */ | |
eea12c72 | 4254 | value_range_t *tmp = vr0; |
88dbf20f | 4255 | vr0 = vr1; |
4256 | vr1 = tmp; | |
4257 | } | |
4258 | ||
4259 | gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); | |
4260 | ||
c3783c3b | 4261 | if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 |
4262 | && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) | |
88dbf20f | 4263 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
4264 | ||
4265 | return NULL_TREE; | |
4266 | } | |
4267 | ||
a2a1fde2 | 4268 | if (!usable_range_p (vr0, strict_overflow_p) |
4269 | || !usable_range_p (vr1, strict_overflow_p)) | |
4270 | return NULL_TREE; | |
4271 | ||
88dbf20f | 4272 | /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the |
4273 | operands around and change the comparison code. */ | |
4274 | if (comp == GT_EXPR || comp == GE_EXPR) | |
4275 | { | |
eea12c72 | 4276 | value_range_t *tmp; |
88dbf20f | 4277 | comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; |
4278 | tmp = vr0; | |
4279 | vr0 = vr1; | |
4280 | vr1 = tmp; | |
4281 | } | |
4282 | ||
4283 | if (comp == EQ_EXPR) | |
4284 | { | |
4285 | /* Equality may only be computed if both ranges represent | |
4286 | exactly one value. */ | |
c3783c3b | 4287 | if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 |
4288 | && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) | |
88dbf20f | 4289 | { |
c3783c3b | 4290 | int cmp_min = compare_values_warnv (vr0->min, vr1->min, |
4291 | strict_overflow_p); | |
4292 | int cmp_max = compare_values_warnv (vr0->max, vr1->max, | |
4293 | strict_overflow_p); | |
88dbf20f | 4294 | if (cmp_min == 0 && cmp_max == 0) |
4295 | return boolean_true_node; | |
4296 | else if (cmp_min != -2 && cmp_max != -2) | |
4297 | return boolean_false_node; | |
4298 | } | |
b200b146 | 4299 | /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ |
c3783c3b | 4300 | else if (compare_values_warnv (vr0->min, vr1->max, |
4301 | strict_overflow_p) == 1 | |
4302 | || compare_values_warnv (vr1->min, vr0->max, | |
4303 | strict_overflow_p) == 1) | |
b200b146 | 4304 | return boolean_false_node; |
88dbf20f | 4305 | |
4306 | return NULL_TREE; | |
4307 | } | |
4308 | else if (comp == NE_EXPR) | |
4309 | { | |
4310 | int cmp1, cmp2; | |
4311 | ||
4312 | /* If VR0 is completely to the left or completely to the right | |
4313 | of VR1, they are always different. Notice that we need to | |
4314 | make sure that both comparisons yield similar results to | |
4315 | avoid comparing values that cannot be compared at | |
4316 | compile-time. */ | |
c3783c3b | 4317 | cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
4318 | cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); | |
88dbf20f | 4319 | if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) |
4320 | return boolean_true_node; | |
4321 | ||
4322 | /* If VR0 and VR1 represent a single value and are identical, | |
4323 | return false. */ | |
c3783c3b | 4324 | else if (compare_values_warnv (vr0->min, vr0->max, |
4325 | strict_overflow_p) == 0 | |
4326 | && compare_values_warnv (vr1->min, vr1->max, | |
4327 | strict_overflow_p) == 0 | |
4328 | && compare_values_warnv (vr0->min, vr1->min, | |
4329 | strict_overflow_p) == 0 | |
4330 | && compare_values_warnv (vr0->max, vr1->max, | |
4331 | strict_overflow_p) == 0) | |
88dbf20f | 4332 | return boolean_false_node; |
4333 | ||
4334 | /* Otherwise, they may or may not be different. */ | |
4335 | else | |
4336 | return NULL_TREE; | |
4337 | } | |
4338 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
4339 | { | |
4340 | int tst; | |
4341 | ||
4342 | /* If VR0 is to the left of VR1, return true. */ | |
c3783c3b | 4343 | tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
88dbf20f | 4344 | if ((comp == LT_EXPR && tst == -1) |
4345 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
c3783c3b | 4346 | { |
4347 | if (overflow_infinity_range_p (vr0) | |
4348 | || overflow_infinity_range_p (vr1)) | |
4349 | *strict_overflow_p = true; | |
4350 | return boolean_true_node; | |
4351 | } | |
88dbf20f | 4352 | |
4353 | /* If VR0 is to the right of VR1, return false. */ | |
c3783c3b | 4354 | tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); |
88dbf20f | 4355 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
4356 | || (comp == LE_EXPR && tst == 1)) | |
c3783c3b | 4357 | { |
4358 | if (overflow_infinity_range_p (vr0) | |
4359 | || overflow_infinity_range_p (vr1)) | |
4360 | *strict_overflow_p = true; | |
4361 | return boolean_false_node; | |
4362 | } | |
88dbf20f | 4363 | |
4364 | /* Otherwise, we don't know. */ | |
4365 | return NULL_TREE; | |
4366 | } | |
48e1416a | 4367 | |
88dbf20f | 4368 | gcc_unreachable (); |
4369 | } | |
4370 | ||
4371 | ||
4372 | /* Given a value range VR, a value VAL and a comparison code COMP, return | |
eea12c72 | 4373 | BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the |
88dbf20f | 4374 | values in VR. Return BOOLEAN_FALSE_NODE if the comparison |
4375 | always returns false. Return NULL_TREE if it is not always | |
c3783c3b | 4376 | possible to determine the value of the comparison. Also set |
4377 | *STRICT_OVERFLOW_P to indicate whether a range with an overflow | |
4378 | infinity was used in the test. */ | |
88dbf20f | 4379 | |
4380 | static tree | |
c3783c3b | 4381 | compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, |
4382 | bool *strict_overflow_p) | |
88dbf20f | 4383 | { |
4384 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
4385 | return NULL_TREE; | |
4386 | ||
4387 | /* Anti-ranges need to be handled separately. */ | |
4388 | if (vr->type == VR_ANTI_RANGE) | |
4389 | { | |
4390 | /* For anti-ranges, the only predicates that we can compute at | |
4391 | compile time are equality and inequality. */ | |
4392 | if (comp == GT_EXPR | |
4393 | || comp == GE_EXPR | |
4394 | || comp == LT_EXPR | |
4395 | || comp == LE_EXPR) | |
4396 | return NULL_TREE; | |
4397 | ||
446faf9d | 4398 | /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ |
7d48cd66 | 4399 | if (value_inside_range (val, vr->min, vr->max) == 1) |
88dbf20f | 4400 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
4401 | ||
4402 | return NULL_TREE; | |
4403 | } | |
4404 | ||
a2a1fde2 | 4405 | if (!usable_range_p (vr, strict_overflow_p)) |
4406 | return NULL_TREE; | |
4407 | ||
88dbf20f | 4408 | if (comp == EQ_EXPR) |
4409 | { | |
4410 | /* EQ_EXPR may only be computed if VR represents exactly | |
4411 | one value. */ | |
c3783c3b | 4412 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) |
88dbf20f | 4413 | { |
c3783c3b | 4414 | int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); |
88dbf20f | 4415 | if (cmp == 0) |
4416 | return boolean_true_node; | |
4417 | else if (cmp == -1 || cmp == 1 || cmp == 2) | |
4418 | return boolean_false_node; | |
4419 | } | |
c3783c3b | 4420 | else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 |
4421 | || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) | |
5b9b1fc4 | 4422 | return boolean_false_node; |
88dbf20f | 4423 | |
4424 | return NULL_TREE; | |
4425 | } | |
4426 | else if (comp == NE_EXPR) | |
4427 | { | |
4428 | /* If VAL is not inside VR, then they are always different. */ | |
c3783c3b | 4429 | if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 |
4430 | || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) | |
88dbf20f | 4431 | return boolean_true_node; |
4432 | ||
4433 | /* If VR represents exactly one value equal to VAL, then return | |
4434 | false. */ | |
c3783c3b | 4435 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 |
4436 | && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) | |
88dbf20f | 4437 | return boolean_false_node; |
4438 | ||
4439 | /* Otherwise, they may or may not be different. */ | |
4440 | return NULL_TREE; | |
4441 | } | |
4442 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
4443 | { | |
4444 | int tst; | |
4445 | ||
4446 | /* If VR is to the left of VAL, return true. */ | |
c3783c3b | 4447 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
88dbf20f | 4448 | if ((comp == LT_EXPR && tst == -1) |
4449 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
c3783c3b | 4450 | { |
4451 | if (overflow_infinity_range_p (vr)) | |
4452 | *strict_overflow_p = true; | |
4453 | return boolean_true_node; | |
4454 | } | |
88dbf20f | 4455 | |
4456 | /* If VR is to the right of VAL, return false. */ | |
c3783c3b | 4457 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
88dbf20f | 4458 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
4459 | || (comp == LE_EXPR && tst == 1)) | |
c3783c3b | 4460 | { |
4461 | if (overflow_infinity_range_p (vr)) | |
4462 | *strict_overflow_p = true; | |
4463 | return boolean_false_node; | |
4464 | } | |
88dbf20f | 4465 | |
4466 | /* Otherwise, we don't know. */ | |
4467 | return NULL_TREE; | |
4468 | } | |
4469 | else if (comp == GT_EXPR || comp == GE_EXPR) | |
4470 | { | |
4471 | int tst; | |
4472 | ||
4473 | /* If VR is to the right of VAL, return true. */ | |
c3783c3b | 4474 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
88dbf20f | 4475 | if ((comp == GT_EXPR && tst == 1) |
4476 | || (comp == GE_EXPR && (tst == 0 || tst == 1))) | |
c3783c3b | 4477 | { |
4478 | if (overflow_infinity_range_p (vr)) | |
4479 | *strict_overflow_p = true; | |
4480 | return boolean_true_node; | |
4481 | } | |
88dbf20f | 4482 | |
4483 | /* If VR is to the left of VAL, return false. */ | |
c3783c3b | 4484 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
88dbf20f | 4485 | if ((comp == GT_EXPR && (tst == -1 || tst == 0)) |
4486 | || (comp == GE_EXPR && tst == -1)) | |
c3783c3b | 4487 | { |
4488 | if (overflow_infinity_range_p (vr)) | |
4489 | *strict_overflow_p = true; | |
4490 | return boolean_false_node; | |
4491 | } | |
88dbf20f | 4492 | |
4493 | /* Otherwise, we don't know. */ | |
4494 | return NULL_TREE; | |
4495 | } | |
4496 | ||
4497 | gcc_unreachable (); | |
4498 | } | |
4499 | ||
4500 | ||
4501 | /* Debugging dumps. */ | |
4502 | ||
eea12c72 | 4503 | void dump_value_range (FILE *, value_range_t *); |
4504 | void debug_value_range (value_range_t *); | |
4505 | void dump_all_value_ranges (FILE *); | |
4506 | void debug_all_value_ranges (void); | |
4507 | void dump_vr_equiv (FILE *, bitmap); | |
4508 | void debug_vr_equiv (bitmap); | |
4509 | ||
4510 | ||
4511 | /* Dump value range VR to FILE. */ | |
4512 | ||
88dbf20f | 4513 | void |
eea12c72 | 4514 | dump_value_range (FILE *file, value_range_t *vr) |
88dbf20f | 4515 | { |
4516 | if (vr == NULL) | |
4517 | fprintf (file, "[]"); | |
4518 | else if (vr->type == VR_UNDEFINED) | |
4519 | fprintf (file, "UNDEFINED"); | |
4520 | else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
4521 | { | |
eea12c72 | 4522 | tree type = TREE_TYPE (vr->min); |
4523 | ||
88dbf20f | 4524 | fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); |
eea12c72 | 4525 | |
b876a744 | 4526 | if (is_negative_overflow_infinity (vr->min)) |
c3783c3b | 4527 | fprintf (file, "-INF(OVF)"); |
b876a744 | 4528 | else if (INTEGRAL_TYPE_P (type) |
4529 | && !TYPE_UNSIGNED (type) | |
4530 | && vrp_val_is_min (vr->min)) | |
4531 | fprintf (file, "-INF"); | |
eea12c72 | 4532 | else |
4533 | print_generic_expr (file, vr->min, 0); | |
4534 | ||
88dbf20f | 4535 | fprintf (file, ", "); |
eea12c72 | 4536 | |
b876a744 | 4537 | if (is_positive_overflow_infinity (vr->max)) |
c3783c3b | 4538 | fprintf (file, "+INF(OVF)"); |
b876a744 | 4539 | else if (INTEGRAL_TYPE_P (type) |
4540 | && vrp_val_is_max (vr->max)) | |
4541 | fprintf (file, "+INF"); | |
eea12c72 | 4542 | else |
4543 | print_generic_expr (file, vr->max, 0); | |
4544 | ||
88dbf20f | 4545 | fprintf (file, "]"); |
eea12c72 | 4546 | |
4547 | if (vr->equiv) | |
4548 | { | |
4549 | bitmap_iterator bi; | |
4550 | unsigned i, c = 0; | |
4551 | ||
4552 | fprintf (file, " EQUIVALENCES: { "); | |
4553 | ||
4554 | EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) | |
4555 | { | |
4556 | print_generic_expr (file, ssa_name (i), 0); | |
4557 | fprintf (file, " "); | |
4558 | c++; | |
4559 | } | |
4560 | ||
4561 | fprintf (file, "} (%u elements)", c); | |
4562 | } | |
88dbf20f | 4563 | } |
4564 | else if (vr->type == VR_VARYING) | |
4565 | fprintf (file, "VARYING"); | |
4566 | else | |
4567 | fprintf (file, "INVALID RANGE"); | |
4568 | } | |
4569 | ||
4570 | ||
4571 | /* Dump value range VR to stderr. */ | |
4572 | ||
4b987fac | 4573 | DEBUG_FUNCTION void |
eea12c72 | 4574 | debug_value_range (value_range_t *vr) |
88dbf20f | 4575 | { |
4576 | dump_value_range (stderr, vr); | |
79f0a894 | 4577 | fprintf (stderr, "\n"); |
88dbf20f | 4578 | } |
4579 | ||
4580 | ||
4581 | /* Dump value ranges of all SSA_NAMEs to FILE. */ | |
4582 | ||
4583 | void | |
4584 | dump_all_value_ranges (FILE *file) | |
4585 | { | |
4586 | size_t i; | |
4587 | ||
e0186710 | 4588 | for (i = 0; i < num_vr_values; i++) |
88dbf20f | 4589 | { |
eea12c72 | 4590 | if (vr_value[i]) |
88dbf20f | 4591 | { |
eea12c72 | 4592 | print_generic_expr (file, ssa_name (i), 0); |
88dbf20f | 4593 | fprintf (file, ": "); |
eea12c72 | 4594 | dump_value_range (file, vr_value[i]); |
88dbf20f | 4595 | fprintf (file, "\n"); |
4596 | } | |
4597 | } | |
4598 | ||
4599 | fprintf (file, "\n"); | |
4600 | } | |
4601 | ||
4602 | ||
4603 | /* Dump all value ranges to stderr. */ | |
4604 | ||
4b987fac | 4605 | DEBUG_FUNCTION void |
88dbf20f | 4606 | debug_all_value_ranges (void) |
4607 | { | |
4608 | dump_all_value_ranges (stderr); | |
4609 | } | |
4610 | ||
4611 | ||
88dbf20f | 4612 | /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
4613 | create a new SSA name N and return the assertion assignment | |
54751bcf | 4614 | 'N = ASSERT_EXPR <V, V OP W>'. */ |
88dbf20f | 4615 | |
75a70cf9 | 4616 | static gimple |
88dbf20f | 4617 | build_assert_expr_for (tree cond, tree v) |
4618 | { | |
0dc262e0 | 4619 | tree a; |
75a70cf9 | 4620 | gimple assertion; |
88dbf20f | 4621 | |
0dc262e0 | 4622 | gcc_assert (TREE_CODE (v) == SSA_NAME |
4623 | && COMPARISON_CLASS_P (cond)); | |
88dbf20f | 4624 | |
0dc262e0 | 4625 | a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |
4626 | assertion = gimple_build_assign (NULL_TREE, a); | |
88dbf20f | 4627 | |
4628 | /* The new ASSERT_EXPR, creates a new SSA name that replaces the | |
0dc262e0 | 4629 | operand of the ASSERT_EXPR. Create it so the new name and the old one |
4630 | are registered in the replacement table so that we can fix the SSA web | |
4631 | after adding all the ASSERT_EXPRs. */ | |
4632 | create_new_def_for (v, assertion, NULL); | |
88dbf20f | 4633 | |
4634 | return assertion; | |
4635 | } | |
4636 | ||
4637 | ||
4638 | /* Return false if EXPR is a predicate expression involving floating | |
4639 | point values. */ | |
4640 | ||
4641 | static inline bool | |
75a70cf9 | 4642 | fp_predicate (gimple stmt) |
88dbf20f | 4643 | { |
75a70cf9 | 4644 | GIMPLE_CHECK (stmt, GIMPLE_COND); |
4645 | ||
4646 | return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); | |
88dbf20f | 4647 | } |
4648 | ||
eea12c72 | 4649 | /* If the range of values taken by OP can be inferred after STMT executes, |
4650 | return the comparison code (COMP_CODE_P) and value (VAL_P) that | |
4651 | describes the inferred range. Return true if a range could be | |
4652 | inferred. */ | |
88dbf20f | 4653 | |
eea12c72 | 4654 | static bool |
75a70cf9 | 4655 | infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) |
88dbf20f | 4656 | { |
eea12c72 | 4657 | *val_p = NULL_TREE; |
4658 | *comp_code_p = ERROR_MARK; | |
4659 | ||
2b6719e9 | 4660 | /* Do not attempt to infer anything in names that flow through |
4661 | abnormal edges. */ | |
4662 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) | |
eea12c72 | 4663 | return false; |
4664 | ||
4665 | /* Similarly, don't infer anything from statements that may throw | |
d00cc43d | 4666 | exceptions. ??? Relax this requirement? */ |
75a70cf9 | 4667 | if (stmt_could_throw_p (stmt)) |
eea12c72 | 4668 | return false; |
2b6719e9 | 4669 | |
d3d3f7c2 | 4670 | /* If STMT is the last statement of a basic block with no normal |
4a34401c | 4671 | successors, there is no point inferring anything about any of its |
4672 | operands. We would not be able to find a proper insertion point | |
4673 | for the assertion, anyway. */ | |
d3d3f7c2 | 4674 | if (stmt_ends_bb_p (stmt)) |
4675 | { | |
4676 | edge_iterator ei; | |
4677 | edge e; | |
4678 | ||
4679 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) | |
4680 | if (!(e->flags & EDGE_ABNORMAL)) | |
4681 | break; | |
4682 | if (e == NULL) | |
4683 | return false; | |
4684 | } | |
4a34401c | 4685 | |
30b10261 | 4686 | if (infer_nonnull_range (stmt, op, true, true)) |
88dbf20f | 4687 | { |
d00cc43d | 4688 | *val_p = build_int_cst (TREE_TYPE (op), 0); |
4689 | *comp_code_p = NE_EXPR; | |
4690 | return true; | |
88dbf20f | 4691 | } |
4692 | ||
eea12c72 | 4693 | return false; |
88dbf20f | 4694 | } |
4695 | ||
4696 | ||
eea12c72 | 4697 | void dump_asserts_for (FILE *, tree); |
4698 | void debug_asserts_for (tree); | |
4699 | void dump_all_asserts (FILE *); | |
4700 | void debug_all_asserts (void); | |
4701 | ||
4702 | /* Dump all the registered assertions for NAME to FILE. */ | |
4703 | ||
4704 | void | |
4705 | dump_asserts_for (FILE *file, tree name) | |
4706 | { | |
4707 | assert_locus_t loc; | |
4708 | ||
4709 | fprintf (file, "Assertions to be inserted for "); | |
4710 | print_generic_expr (file, name, 0); | |
4711 | fprintf (file, "\n"); | |
4712 | ||
4713 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
4714 | while (loc) | |
4715 | { | |
4716 | fprintf (file, "\t"); | |
75a70cf9 | 4717 | print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); |
eea12c72 | 4718 | fprintf (file, "\n\tBB #%d", loc->bb->index); |
4719 | if (loc->e) | |
4720 | { | |
4721 | fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, | |
4722 | loc->e->dest->index); | |
5147ec07 | 4723 | dump_edge_info (file, loc->e, dump_flags, 0); |
eea12c72 | 4724 | } |
4725 | fprintf (file, "\n\tPREDICATE: "); | |
4726 | print_generic_expr (file, name, 0); | |
f3d35d4d | 4727 | fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); |
eea12c72 | 4728 | print_generic_expr (file, loc->val, 0); |
4729 | fprintf (file, "\n\n"); | |
4730 | loc = loc->next; | |
4731 | } | |
4732 | ||
4733 | fprintf (file, "\n"); | |
4734 | } | |
4735 | ||
4736 | ||
4737 | /* Dump all the registered assertions for NAME to stderr. */ | |
4738 | ||
4b987fac | 4739 | DEBUG_FUNCTION void |
eea12c72 | 4740 | debug_asserts_for (tree name) |
4741 | { | |
4742 | dump_asserts_for (stderr, name); | |
4743 | } | |
4744 | ||
4745 | ||
4746 | /* Dump all the registered assertions for all the names to FILE. */ | |
4747 | ||
4748 | void | |
4749 | dump_all_asserts (FILE *file) | |
4750 | { | |
4751 | unsigned i; | |
4752 | bitmap_iterator bi; | |
4753 | ||
4754 | fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); | |
4755 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) | |
4756 | dump_asserts_for (file, ssa_name (i)); | |
4757 | fprintf (file, "\n"); | |
4758 | } | |
4759 | ||
4760 | ||
4761 | /* Dump all the registered assertions for all the names to stderr. */ | |
4762 | ||
4b987fac | 4763 | DEBUG_FUNCTION void |
eea12c72 | 4764 | debug_all_asserts (void) |
4765 | { | |
4766 | dump_all_asserts (stderr); | |
4767 | } | |
4768 | ||
4769 | ||
4770 | /* If NAME doesn't have an ASSERT_EXPR registered for asserting | |
bed8bec4 | 4771 | 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
eea12c72 | 4772 | E->DEST, then register this location as a possible insertion point |
bed8bec4 | 4773 | for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |
eea12c72 | 4774 | |
4775 | BB, E and SI provide the exact insertion point for the new | |
4776 | ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted | |
4777 | on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on | |
4778 | BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E | |
4779 | must not be NULL. */ | |
4780 | ||
4781 | static void | |
bed8bec4 | 4782 | register_new_assert_for (tree name, tree expr, |
eea12c72 | 4783 | enum tree_code comp_code, |
4784 | tree val, | |
4785 | basic_block bb, | |
4786 | edge e, | |
75a70cf9 | 4787 | gimple_stmt_iterator si) |
eea12c72 | 4788 | { |
4789 | assert_locus_t n, loc, last_loc; | |
eea12c72 | 4790 | basic_block dest_bb; |
4791 | ||
1b4345f7 | 4792 | gcc_checking_assert (bb == NULL || e == NULL); |
eea12c72 | 4793 | |
4794 | if (e == NULL) | |
1b4345f7 | 4795 | gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |
4796 | && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); | |
eea12c72 | 4797 | |
ca9e4658 | 4798 | /* Never build an assert comparing against an integer constant with |
4799 | TREE_OVERFLOW set. This confuses our undefined overflow warning | |
4800 | machinery. */ | |
4a8f88ff | 4801 | if (TREE_OVERFLOW_P (val)) |
4802 | val = drop_tree_overflow (val); | |
ca9e4658 | 4803 | |
eea12c72 | 4804 | /* The new assertion A will be inserted at BB or E. We need to |
4805 | determine if the new location is dominated by a previously | |
4806 | registered location for A. If we are doing an edge insertion, | |
4807 | assume that A will be inserted at E->DEST. Note that this is not | |
4808 | necessarily true. | |
48e1416a | 4809 | |
eea12c72 | 4810 | If E is a critical edge, it will be split. But even if E is |
4811 | split, the new block will dominate the same set of blocks that | |
4812 | E->DEST dominates. | |
48e1416a | 4813 | |
eea12c72 | 4814 | The reverse, however, is not true, blocks dominated by E->DEST |
4815 | will not be dominated by the new block created to split E. So, | |
4816 | if the insertion location is on a critical edge, we will not use | |
4817 | the new location to move another assertion previously registered | |
4818 | at a block dominated by E->DEST. */ | |
4819 | dest_bb = (bb) ? bb : e->dest; | |
4820 | ||
4821 | /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and | |
4822 | VAL at a block dominating DEST_BB, then we don't need to insert a new | |
4823 | one. Similarly, if the same assertion already exists at a block | |
4824 | dominated by DEST_BB and the new location is not on a critical | |
4825 | edge, then update the existing location for the assertion (i.e., | |
4826 | move the assertion up in the dominance tree). | |
4827 | ||
4828 | Note, this is implemented as a simple linked list because there | |
4829 | should not be more than a handful of assertions registered per | |
4830 | name. If this becomes a performance problem, a table hashed by | |
4831 | COMP_CODE and VAL could be implemented. */ | |
4832 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
4833 | last_loc = loc; | |
eea12c72 | 4834 | while (loc) |
4835 | { | |
4836 | if (loc->comp_code == comp_code | |
4837 | && (loc->val == val | |
bed8bec4 | 4838 | || operand_equal_p (loc->val, val, 0)) |
4839 | && (loc->expr == expr | |
4840 | || operand_equal_p (loc->expr, expr, 0))) | |
eea12c72 | 4841 | { |
d6f10e50 | 4842 | /* If E is not a critical edge and DEST_BB |
eea12c72 | 4843 | dominates the existing location for the assertion, move |
4844 | the assertion up in the dominance tree by updating its | |
4845 | location information. */ | |
4846 | if ((e == NULL || !EDGE_CRITICAL_P (e)) | |
4847 | && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) | |
4848 | { | |
4849 | loc->bb = dest_bb; | |
4850 | loc->e = e; | |
4851 | loc->si = si; | |
4852 | return; | |
4853 | } | |
4854 | } | |
4855 | ||
4856 | /* Update the last node of the list and move to the next one. */ | |
4857 | last_loc = loc; | |
4858 | loc = loc->next; | |
4859 | } | |
4860 | ||
4861 | /* If we didn't find an assertion already registered for | |
4862 | NAME COMP_CODE VAL, add a new one at the end of the list of | |
4863 | assertions associated with NAME. */ | |
4c36ffe6 | 4864 | n = XNEW (struct assert_locus_d); |
eea12c72 | 4865 | n->bb = dest_bb; |
4866 | n->e = e; | |
4867 | n->si = si; | |
4868 | n->comp_code = comp_code; | |
4869 | n->val = val; | |
bed8bec4 | 4870 | n->expr = expr; |
eea12c72 | 4871 | n->next = NULL; |
4872 | ||
4873 | if (last_loc) | |
4874 | last_loc->next = n; | |
4875 | else | |
4876 | asserts_for[SSA_NAME_VERSION (name)] = n; | |
4877 | ||
4878 | bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); | |
4879 | } | |
4880 | ||
a00913c7 | 4881 | /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |
4882 | Extract a suitable test code and value and store them into *CODE_P and | |
4883 | *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. | |
4884 | ||
4885 | If no extraction was possible, return FALSE, otherwise return TRUE. | |
4886 | ||
4887 | If INVERT is true, then we invert the result stored into *CODE_P. */ | |
7433d1d6 | 4888 | |
4889 | static bool | |
4890 | extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, | |
4891 | tree cond_op0, tree cond_op1, | |
4892 | bool invert, enum tree_code *code_p, | |
4893 | tree *val_p) | |
4894 | { | |
4895 | enum tree_code comp_code; | |
4896 | tree val; | |
4897 | ||
4898 | /* Otherwise, we have a comparison of the form NAME COMP VAL | |
4899 | or VAL COMP NAME. */ | |
4900 | if (name == cond_op1) | |
4901 | { | |
4902 | /* If the predicate is of the form VAL COMP NAME, flip | |
4903 | COMP around because we need to register NAME as the | |
4904 | first operand in the predicate. */ | |
4905 | comp_code = swap_tree_comparison (cond_code); | |
4906 | val = cond_op0; | |
4907 | } | |
4908 | else | |
4909 | { | |
4910 | /* The comparison is of the form NAME COMP VAL, so the | |
4911 | comparison code remains unchanged. */ | |
4912 | comp_code = cond_code; | |
4913 | val = cond_op1; | |
4914 | } | |
4915 | ||
4916 | /* Invert the comparison code as necessary. */ | |
4917 | if (invert) | |
4918 | comp_code = invert_tree_comparison (comp_code, 0); | |
4919 | ||
4920 | /* VRP does not handle float types. */ | |
4921 | if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) | |
4922 | return false; | |
4923 | ||
4924 | /* Do not register always-false predicates. | |
4925 | FIXME: this works around a limitation in fold() when dealing with | |
4926 | enumerations. Given 'enum { N1, N2 } x;', fold will not | |
4927 | fold 'if (x > N2)' to 'if (0)'. */ | |
4928 | if ((comp_code == GT_EXPR || comp_code == LT_EXPR) | |
4929 | && INTEGRAL_TYPE_P (TREE_TYPE (val))) | |
4930 | { | |
4931 | tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); | |
4932 | tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); | |
4933 | ||
4934 | if (comp_code == GT_EXPR | |
4935 | && (!max | |
4936 | || compare_values (val, max) == 0)) | |
4937 | return false; | |
4938 | ||
4939 | if (comp_code == LT_EXPR | |
4940 | && (!min | |
4941 | || compare_values (val, min) == 0)) | |
4942 | return false; | |
4943 | } | |
4944 | *code_p = comp_code; | |
4945 | *val_p = val; | |
4946 | return true; | |
4947 | } | |
fecf3b39 | 4948 | |
7139adf8 | 4949 | /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any |
4950 | (otherwise return VAL). VAL and MASK must be zero-extended for | |
4951 | precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT | |
4952 | (to transform signed values into unsigned) and at the end xor | |
4953 | SGNBIT back. */ | |
4954 | ||
e913b5cd | 4955 | static wide_int |
10c3fe8d | 4956 | masked_increment (const wide_int &val_in, const wide_int &mask, |
4957 | const wide_int &sgnbit, unsigned int prec) | |
7139adf8 | 4958 | { |
796b6678 | 4959 | wide_int bit = wi::one (prec), res; |
7139adf8 | 4960 | unsigned int i; |
4961 | ||
10c3fe8d | 4962 | wide_int val = val_in ^ sgnbit; |
cf8f0e63 | 4963 | for (i = 0; i < prec; i++, bit += bit) |
7139adf8 | 4964 | { |
4965 | res = mask; | |
796b6678 | 4966 | if ((res & bit) == 0) |
7139adf8 | 4967 | continue; |
e913b5cd | 4968 | res = bit - 1; |
cf8f0e63 | 4969 | res = (val + bit).and_not (res); |
4970 | res &= mask; | |
796b6678 | 4971 | if (wi::gtu_p (res, val)) |
cf8f0e63 | 4972 | return res ^ sgnbit; |
7139adf8 | 4973 | } |
cf8f0e63 | 4974 | return val ^ sgnbit; |
7139adf8 | 4975 | } |
4976 | ||
bed8bec4 | 4977 | /* Try to register an edge assertion for SSA name NAME on edge E for |
4978 | the condition COND contributing to the conditional jump pointed to by BSI. | |
4979 | Invert the condition COND if INVERT is true. | |
4980 | Return true if an assertion for NAME could be registered. */ | |
4981 | ||
4982 | static bool | |
75a70cf9 | 4983 | register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, |
a00913c7 | 4984 | enum tree_code cond_code, |
4985 | tree cond_op0, tree cond_op1, bool invert) | |
bed8bec4 | 4986 | { |
4987 | tree val; | |
4988 | enum tree_code comp_code; | |
4989 | bool retval = false; | |
4990 | ||
a00913c7 | 4991 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
4992 | cond_op0, | |
4993 | cond_op1, | |
4994 | invert, &comp_code, &val)) | |
bed8bec4 | 4995 | return false; |
4996 | ||
4997 | /* Only register an ASSERT_EXPR if NAME was found in the sub-graph | |
4998 | reachable from E. */ | |
17ed8337 | 4999 | if (live_on_edge (e, name) |
bed8bec4 | 5000 | && !has_single_use (name)) |
5001 | { | |
5002 | register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); | |
5003 | retval = true; | |
5004 | } | |
5005 | ||
5006 | /* In the case of NAME <= CST and NAME being defined as | |
5007 | NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 | |
5008 | and NAME2 <= CST - CST2. We can do the same for NAME > CST. | |
5009 | This catches range and anti-range tests. */ | |
5010 | if ((comp_code == LE_EXPR | |
5011 | || comp_code == GT_EXPR) | |
5012 | && TREE_CODE (val) == INTEGER_CST | |
5013 | && TYPE_UNSIGNED (TREE_TYPE (val))) | |
5014 | { | |
75a70cf9 | 5015 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
22cdb855 | 5016 | tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |
bed8bec4 | 5017 | |
5018 | /* Extract CST2 from the (optional) addition. */ | |
75a70cf9 | 5019 | if (is_gimple_assign (def_stmt) |
5020 | && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) | |
bed8bec4 | 5021 | { |
75a70cf9 | 5022 | name2 = gimple_assign_rhs1 (def_stmt); |
5023 | cst2 = gimple_assign_rhs2 (def_stmt); | |
bed8bec4 | 5024 | if (TREE_CODE (name2) == SSA_NAME |
5025 | && TREE_CODE (cst2) == INTEGER_CST) | |
5026 | def_stmt = SSA_NAME_DEF_STMT (name2); | |
5027 | } | |
5028 | ||
22cdb855 | 5029 | /* Extract NAME2 from the (optional) sign-changing cast. */ |
75a70cf9 | 5030 | if (gimple_assign_cast_p (def_stmt)) |
22cdb855 | 5031 | { |
d9659041 | 5032 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) |
75a70cf9 | 5033 | && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |
5034 | && (TYPE_PRECISION (gimple_expr_type (def_stmt)) | |
5035 | == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) | |
5036 | name3 = gimple_assign_rhs1 (def_stmt); | |
22cdb855 | 5037 | } |
bed8bec4 | 5038 | |
22cdb855 | 5039 | /* If name3 is used later, create an ASSERT_EXPR for it. */ |
5040 | if (name3 != NULL_TREE | |
5041 | && TREE_CODE (name3) == SSA_NAME | |
bed8bec4 | 5042 | && (cst2 == NULL_TREE |
5043 | || TREE_CODE (cst2) == INTEGER_CST) | |
22cdb855 | 5044 | && INTEGRAL_TYPE_P (TREE_TYPE (name3)) |
17ed8337 | 5045 | && live_on_edge (e, name3) |
22cdb855 | 5046 | && !has_single_use (name3)) |
5047 | { | |
5048 | tree tmp; | |
5049 | ||
5050 | /* Build an expression for the range test. */ | |
5051 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); | |
5052 | if (cst2 != NULL_TREE) | |
5053 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
5054 | ||
5055 | if (dump_file) | |
5056 | { | |
5057 | fprintf (dump_file, "Adding assert for "); | |
5058 | print_generic_expr (dump_file, name3, 0); | |
5059 | fprintf (dump_file, " from "); | |
5060 | print_generic_expr (dump_file, tmp, 0); | |
5061 | fprintf (dump_file, "\n"); | |
5062 | } | |
5063 | ||
5064 | register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); | |
5065 | ||
5066 | retval = true; | |
5067 | } | |
5068 | ||
5069 | /* If name2 is used later, create an ASSERT_EXPR for it. */ | |
5070 | if (name2 != NULL_TREE | |
5071 | && TREE_CODE (name2) == SSA_NAME | |
5072 | && TREE_CODE (cst2) == INTEGER_CST | |
5073 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
17ed8337 | 5074 | && live_on_edge (e, name2) |
bed8bec4 | 5075 | && !has_single_use (name2)) |
5076 | { | |
5077 | tree tmp; | |
5078 | ||
5079 | /* Build an expression for the range test. */ | |
5080 | tmp = name2; | |
5081 | if (TREE_TYPE (name) != TREE_TYPE (name2)) | |
5082 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); | |
5083 | if (cst2 != NULL_TREE) | |
5084 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
5085 | ||
5086 | if (dump_file) | |
5087 | { | |
5088 | fprintf (dump_file, "Adding assert for "); | |
5089 | print_generic_expr (dump_file, name2, 0); | |
5090 | fprintf (dump_file, " from "); | |
5091 | print_generic_expr (dump_file, tmp, 0); | |
5092 | fprintf (dump_file, "\n"); | |
5093 | } | |
5094 | ||
5095 | register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); | |
5096 | ||
5097 | retval = true; | |
5098 | } | |
5099 | } | |
5100 | ||
1e24c0c9 | 5101 | /* In the case of post-in/decrement tests like if (i++) ... and uses |
5102 | of the in/decremented value on the edge the extra name we want to | |
5103 | assert for is not on the def chain of the name compared. Instead | |
5104 | it is in the set of use stmts. */ | |
5105 | if ((comp_code == NE_EXPR | |
5106 | || comp_code == EQ_EXPR) | |
5107 | && TREE_CODE (val) == INTEGER_CST) | |
5108 | { | |
5109 | imm_use_iterator ui; | |
5110 | gimple use_stmt; | |
5111 | FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) | |
5112 | { | |
5113 | /* Cut off to use-stmts that are in the predecessor. */ | |
5114 | if (gimple_bb (use_stmt) != e->src) | |
5115 | continue; | |
5116 | ||
5117 | if (!is_gimple_assign (use_stmt)) | |
5118 | continue; | |
5119 | ||
5120 | enum tree_code code = gimple_assign_rhs_code (use_stmt); | |
5121 | if (code != PLUS_EXPR | |
5122 | && code != MINUS_EXPR) | |
5123 | continue; | |
5124 | ||
5125 | tree cst = gimple_assign_rhs2 (use_stmt); | |
5126 | if (TREE_CODE (cst) != INTEGER_CST) | |
5127 | continue; | |
5128 | ||
5129 | tree name2 = gimple_assign_lhs (use_stmt); | |
5130 | if (live_on_edge (e, name2)) | |
5131 | { | |
5132 | cst = int_const_binop (code, val, cst); | |
5133 | register_new_assert_for (name2, name2, comp_code, cst, | |
5134 | NULL, e, bsi); | |
5135 | retval = true; | |
5136 | } | |
5137 | } | |
5138 | } | |
5139 | ||
98f00c5b | 5140 | if (TREE_CODE_CLASS (comp_code) == tcc_comparison |
5141 | && TREE_CODE (val) == INTEGER_CST) | |
5142 | { | |
5143 | gimple def_stmt = SSA_NAME_DEF_STMT (name); | |
7139adf8 | 5144 | tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; |
98f00c5b | 5145 | tree val2 = NULL_TREE; |
6b48fd25 | 5146 | unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); |
796b6678 | 5147 | wide_int mask = wi::zero (prec); |
0bec0083 | 5148 | unsigned int nprec = prec; |
5149 | enum tree_code rhs_code = ERROR_MARK; | |
5150 | ||
5151 | if (is_gimple_assign (def_stmt)) | |
5152 | rhs_code = gimple_assign_rhs_code (def_stmt); | |
98f00c5b | 5153 | |
22676c8f | 5154 | /* Add asserts for NAME cmp CST and NAME being defined |
5155 | as NAME = (int) NAME2. */ | |
5156 | if (!TYPE_UNSIGNED (TREE_TYPE (val)) | |
5157 | && (comp_code == LE_EXPR || comp_code == LT_EXPR | |
5158 | || comp_code == GT_EXPR || comp_code == GE_EXPR) | |
5159 | && gimple_assign_cast_p (def_stmt)) | |
5160 | { | |
5161 | name2 = gimple_assign_rhs1 (def_stmt); | |
0bec0083 | 5162 | if (CONVERT_EXPR_CODE_P (rhs_code) |
22676c8f | 5163 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
5164 | && TYPE_UNSIGNED (TREE_TYPE (name2)) | |
5165 | && prec == TYPE_PRECISION (TREE_TYPE (name2)) | |
5166 | && (comp_code == LE_EXPR || comp_code == GT_EXPR | |
5167 | || !tree_int_cst_equal (val, | |
5168 | TYPE_MIN_VALUE (TREE_TYPE (val)))) | |
5169 | && live_on_edge (e, name2) | |
5170 | && !has_single_use (name2)) | |
5171 | { | |
5172 | tree tmp, cst; | |
5173 | enum tree_code new_comp_code = comp_code; | |
5174 | ||
5175 | cst = fold_convert (TREE_TYPE (name2), | |
5176 | TYPE_MIN_VALUE (TREE_TYPE (val))); | |
5177 | /* Build an expression for the range test. */ | |
5178 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); | |
5179 | cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, | |
5180 | fold_convert (TREE_TYPE (name2), val)); | |
5181 | if (comp_code == LT_EXPR || comp_code == GE_EXPR) | |
5182 | { | |
5183 | new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; | |
5184 | cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, | |
5185 | build_int_cst (TREE_TYPE (name2), 1)); | |
5186 | } | |
5187 | ||
5188 | if (dump_file) | |
5189 | { | |
5190 | fprintf (dump_file, "Adding assert for "); | |
5191 | print_generic_expr (dump_file, name2, 0); | |
5192 | fprintf (dump_file, " from "); | |
5193 | print_generic_expr (dump_file, tmp, 0); | |
5194 | fprintf (dump_file, "\n"); | |
5195 | } | |
5196 | ||
5197 | register_new_assert_for (name2, tmp, new_comp_code, cst, NULL, | |
5198 | e, bsi); | |
5199 | ||
5200 | retval = true; | |
5201 | } | |
5202 | } | |
5203 | ||
5204 | /* Add asserts for NAME cmp CST and NAME being defined as | |
5205 | NAME = NAME2 >> CST2. | |
5206 | ||
5207 | Extract CST2 from the right shift. */ | |
0bec0083 | 5208 | if (rhs_code == RSHIFT_EXPR) |
98f00c5b | 5209 | { |
5210 | name2 = gimple_assign_rhs1 (def_stmt); | |
5211 | cst2 = gimple_assign_rhs2 (def_stmt); | |
5212 | if (TREE_CODE (name2) == SSA_NAME | |
e913b5cd | 5213 | && tree_fits_uhwi_p (cst2) |
98f00c5b | 5214 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
e913b5cd | 5215 | && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) |
27d5d67b | 5216 | && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) |
98f00c5b | 5217 | && live_on_edge (e, name2) |
5218 | && !has_single_use (name2)) | |
5219 | { | |
796b6678 | 5220 | mask = wi::mask (tree_to_uhwi (cst2), false, prec); |
98f00c5b | 5221 | val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); |
5222 | } | |
5223 | } | |
98f00c5b | 5224 | if (val2 != NULL_TREE |
5225 | && TREE_CODE (val2) == INTEGER_CST | |
5226 | && simple_cst_equal (fold_build2 (RSHIFT_EXPR, | |
5227 | TREE_TYPE (val), | |
5228 | val2, cst2), val)) | |
5229 | { | |
5230 | enum tree_code new_comp_code = comp_code; | |
5231 | tree tmp, new_val; | |
5232 | ||
5233 | tmp = name2; | |
5234 | if (comp_code == EQ_EXPR || comp_code == NE_EXPR) | |
5235 | { | |
5236 | if (!TYPE_UNSIGNED (TREE_TYPE (val))) | |
5237 | { | |
98f00c5b | 5238 | tree type = build_nonstandard_integer_type (prec, 1); |
5239 | tmp = build1 (NOP_EXPR, type, name2); | |
5240 | val2 = fold_convert (type, val2); | |
5241 | } | |
5242 | tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); | |
e913b5cd | 5243 | new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); |
98f00c5b | 5244 | new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; |
5245 | } | |
5246 | else if (comp_code == LT_EXPR || comp_code == GE_EXPR) | |
7c4be923 | 5247 | { |
e913b5cd | 5248 | wide_int minval |
796b6678 | 5249 | = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); |
7c4be923 | 5250 | new_val = val2; |
6da74b21 | 5251 | if (minval == new_val) |
7c4be923 | 5252 | new_val = NULL_TREE; |
5253 | } | |
98f00c5b | 5254 | else |
5255 | { | |
e913b5cd | 5256 | wide_int maxval |
796b6678 | 5257 | = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); |
6da74b21 | 5258 | mask |= val2; |
cf8f0e63 | 5259 | if (mask == maxval) |
6b48fd25 | 5260 | new_val = NULL_TREE; |
5261 | else | |
e913b5cd | 5262 | new_val = wide_int_to_tree (TREE_TYPE (val2), mask); |
98f00c5b | 5263 | } |
5264 | ||
6b48fd25 | 5265 | if (new_val) |
98f00c5b | 5266 | { |
6b48fd25 | 5267 | if (dump_file) |
5268 | { | |
5269 | fprintf (dump_file, "Adding assert for "); | |
5270 | print_generic_expr (dump_file, name2, 0); | |
5271 | fprintf (dump_file, " from "); | |
5272 | print_generic_expr (dump_file, tmp, 0); | |
5273 | fprintf (dump_file, "\n"); | |
5274 | } | |
98f00c5b | 5275 | |
6b48fd25 | 5276 | register_new_assert_for (name2, tmp, new_comp_code, new_val, |
5277 | NULL, e, bsi); | |
5278 | retval = true; | |
5279 | } | |
98f00c5b | 5280 | } |
7139adf8 | 5281 | |
5282 | /* Add asserts for NAME cmp CST and NAME being defined as | |
5283 | NAME = NAME2 & CST2. | |
5284 | ||
0bec0083 | 5285 | Extract CST2 from the and. |
5286 | ||
5287 | Also handle | |
5288 | NAME = (unsigned) NAME2; | |
5289 | casts where NAME's type is unsigned and has smaller precision | |
5290 | than NAME2's type as if it was NAME = NAME2 & MASK. */ | |
7139adf8 | 5291 | names[0] = NULL_TREE; |
5292 | names[1] = NULL_TREE; | |
5293 | cst2 = NULL_TREE; | |
0bec0083 | 5294 | if (rhs_code == BIT_AND_EXPR |
5295 | || (CONVERT_EXPR_CODE_P (rhs_code) | |
5296 | && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE | |
5297 | && TYPE_UNSIGNED (TREE_TYPE (val)) | |
5298 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) | |
5299 | > prec | |
5300 | && !retval)) | |
7139adf8 | 5301 | { |
5302 | name2 = gimple_assign_rhs1 (def_stmt); | |
0bec0083 | 5303 | if (rhs_code == BIT_AND_EXPR) |
5304 | cst2 = gimple_assign_rhs2 (def_stmt); | |
5305 | else | |
5306 | { | |
5307 | cst2 = TYPE_MAX_VALUE (TREE_TYPE (val)); | |
5308 | nprec = TYPE_PRECISION (TREE_TYPE (name2)); | |
5309 | } | |
7139adf8 | 5310 | if (TREE_CODE (name2) == SSA_NAME |
5311 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
5312 | && TREE_CODE (cst2) == INTEGER_CST | |
5313 | && !integer_zerop (cst2) | |
0bec0083 | 5314 | && (nprec > 1 |
7139adf8 | 5315 | || TYPE_UNSIGNED (TREE_TYPE (val)))) |
5316 | { | |
5317 | gimple def_stmt2 = SSA_NAME_DEF_STMT (name2); | |
5318 | if (gimple_assign_cast_p (def_stmt2)) | |
5319 | { | |
5320 | names[1] = gimple_assign_rhs1 (def_stmt2); | |
5321 | if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) | |
5322 | || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) | |
5323 | || (TYPE_PRECISION (TREE_TYPE (name2)) | |
5324 | != TYPE_PRECISION (TREE_TYPE (names[1]))) | |
5325 | || !live_on_edge (e, names[1]) | |
5326 | || has_single_use (names[1])) | |
5327 | names[1] = NULL_TREE; | |
5328 | } | |
5329 | if (live_on_edge (e, name2) | |
5330 | && !has_single_use (name2)) | |
5331 | names[0] = name2; | |
5332 | } | |
5333 | } | |
5334 | if (names[0] || names[1]) | |
5335 | { | |
796b6678 | 5336 | wide_int minv, maxv, valv, cst2v; |
e913b5cd | 5337 | wide_int tem, sgnbit; |
3ac3d905 | 5338 | bool valid_p = false, valn, cst2n; |
7139adf8 | 5339 | enum tree_code ccode = comp_code; |
5340 | ||
796b6678 | 5341 | valv = wide_int::from (val, nprec, UNSIGNED); |
5342 | cst2v = wide_int::from (cst2, nprec, UNSIGNED); | |
3ac3d905 | 5343 | valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val))); |
5344 | cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val))); | |
7139adf8 | 5345 | /* If CST2 doesn't have most significant bit set, |
5346 | but VAL is negative, we have comparison like | |
5347 | if ((x & 0x123) > -4) (always true). Just give up. */ | |
5348 | if (!cst2n && valn) | |
5349 | ccode = ERROR_MARK; | |
5350 | if (cst2n) | |
796b6678 | 5351 | sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |
7139adf8 | 5352 | else |
796b6678 | 5353 | sgnbit = wi::zero (nprec); |
cf8f0e63 | 5354 | minv = valv & cst2v; |
7139adf8 | 5355 | switch (ccode) |
5356 | { | |
5357 | case EQ_EXPR: | |
5358 | /* Minimum unsigned value for equality is VAL & CST2 | |
5359 | (should be equal to VAL, otherwise we probably should | |
5360 | have folded the comparison into false) and | |
5361 | maximum unsigned value is VAL | ~CST2. */ | |
cf8f0e63 | 5362 | maxv = valv | ~cst2v; |
7139adf8 | 5363 | valid_p = true; |
5364 | break; | |
e913b5cd | 5365 | |
7139adf8 | 5366 | case NE_EXPR: |
cf8f0e63 | 5367 | tem = valv | ~cst2v; |
7139adf8 | 5368 | /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ |
796b6678 | 5369 | if (valv == 0) |
7139adf8 | 5370 | { |
5371 | cst2n = false; | |
796b6678 | 5372 | sgnbit = wi::zero (nprec); |
7139adf8 | 5373 | goto gt_expr; |
5374 | } | |
5375 | /* If (VAL | ~CST2) is all ones, handle it as | |
5376 | (X & CST2) < VAL. */ | |
e913b5cd | 5377 | if (tem == -1) |
7139adf8 | 5378 | { |
5379 | cst2n = false; | |
5380 | valn = false; | |
796b6678 | 5381 | sgnbit = wi::zero (nprec); |
7139adf8 | 5382 | goto lt_expr; |
5383 | } | |
3ac3d905 | 5384 | if (!cst2n && wi::neg_p (cst2v)) |
796b6678 | 5385 | sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |
5386 | if (sgnbit != 0) | |
7139adf8 | 5387 | { |
cf8f0e63 | 5388 | if (valv == sgnbit) |
7139adf8 | 5389 | { |
5390 | cst2n = true; | |
5391 | valn = true; | |
5392 | goto gt_expr; | |
5393 | } | |
796b6678 | 5394 | if (tem == wi::mask (nprec - 1, false, nprec)) |
7139adf8 | 5395 | { |
5396 | cst2n = true; | |
5397 | goto lt_expr; | |
5398 | } | |
5399 | if (!cst2n) | |
796b6678 | 5400 | sgnbit = wi::zero (nprec); |
7139adf8 | 5401 | } |
5402 | break; | |
e913b5cd | 5403 | |
7139adf8 | 5404 | case GE_EXPR: |
5405 | /* Minimum unsigned value for >= if (VAL & CST2) == VAL | |
5406 | is VAL and maximum unsigned value is ~0. For signed | |
5407 | comparison, if CST2 doesn't have most significant bit | |
5408 | set, handle it similarly. If CST2 has MSB set, | |
5409 | the minimum is the same, and maximum is ~0U/2. */ | |
cf8f0e63 | 5410 | if (minv != valv) |
7139adf8 | 5411 | { |
5412 | /* If (VAL & CST2) != VAL, X & CST2 can't be equal to | |
5413 | VAL. */ | |
0bec0083 | 5414 | minv = masked_increment (valv, cst2v, sgnbit, nprec); |
cf8f0e63 | 5415 | if (minv == valv) |
7139adf8 | 5416 | break; |
5417 | } | |
796b6678 | 5418 | maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |
7139adf8 | 5419 | valid_p = true; |
5420 | break; | |
e913b5cd | 5421 | |
7139adf8 | 5422 | case GT_EXPR: |
5423 | gt_expr: | |
5424 | /* Find out smallest MINV where MINV > VAL | |
5425 | && (MINV & CST2) == MINV, if any. If VAL is signed and | |
0bec0083 | 5426 | CST2 has MSB set, compute it biased by 1 << (nprec - 1). */ |
5427 | minv = masked_increment (valv, cst2v, sgnbit, nprec); | |
cf8f0e63 | 5428 | if (minv == valv) |
7139adf8 | 5429 | break; |
796b6678 | 5430 | maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |
7139adf8 | 5431 | valid_p = true; |
5432 | break; | |
e913b5cd | 5433 | |
7139adf8 | 5434 | case LE_EXPR: |
5435 | /* Minimum unsigned value for <= is 0 and maximum | |
5436 | unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. | |
5437 | Otherwise, find smallest VAL2 where VAL2 > VAL | |
5438 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 | |
5439 | as maximum. | |
5440 | For signed comparison, if CST2 doesn't have most | |
5441 | significant bit set, handle it similarly. If CST2 has | |
5442 | MSB set, the maximum is the same and minimum is INT_MIN. */ | |
cf8f0e63 | 5443 | if (minv == valv) |
7139adf8 | 5444 | maxv = valv; |
5445 | else | |
5446 | { | |
0bec0083 | 5447 | maxv = masked_increment (valv, cst2v, sgnbit, nprec); |
cf8f0e63 | 5448 | if (maxv == valv) |
7139adf8 | 5449 | break; |
e913b5cd | 5450 | maxv -= 1; |
7139adf8 | 5451 | } |
cf8f0e63 | 5452 | maxv |= ~cst2v; |
7139adf8 | 5453 | minv = sgnbit; |
5454 | valid_p = true; | |
5455 | break; | |
e913b5cd | 5456 | |
7139adf8 | 5457 | case LT_EXPR: |
5458 | lt_expr: | |
5459 | /* Minimum unsigned value for < is 0 and maximum | |
5460 | unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL. | |
5461 | Otherwise, find smallest VAL2 where VAL2 > VAL | |
5462 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 | |
5463 | as maximum. | |
5464 | For signed comparison, if CST2 doesn't have most | |
5465 | significant bit set, handle it similarly. If CST2 has | |
5466 | MSB set, the maximum is the same and minimum is INT_MIN. */ | |
cf8f0e63 | 5467 | if (minv == valv) |
7139adf8 | 5468 | { |
cf8f0e63 | 5469 | if (valv == sgnbit) |
7139adf8 | 5470 | break; |
5471 | maxv = valv; | |
5472 | } | |
5473 | else | |
5474 | { | |
0bec0083 | 5475 | maxv = masked_increment (valv, cst2v, sgnbit, nprec); |
cf8f0e63 | 5476 | if (maxv == valv) |
7139adf8 | 5477 | break; |
5478 | } | |
e913b5cd | 5479 | maxv -= 1; |
cf8f0e63 | 5480 | maxv |= ~cst2v; |
7139adf8 | 5481 | minv = sgnbit; |
5482 | valid_p = true; | |
5483 | break; | |
e913b5cd | 5484 | |
7139adf8 | 5485 | default: |
5486 | break; | |
5487 | } | |
5488 | if (valid_p | |
3ac3d905 | 5489 | && (maxv - minv) != -1) |
7139adf8 | 5490 | { |
5491 | tree tmp, new_val, type; | |
5492 | int i; | |
5493 | ||
5494 | for (i = 0; i < 2; i++) | |
5495 | if (names[i]) | |
5496 | { | |
e913b5cd | 5497 | wide_int maxv2 = maxv; |
7139adf8 | 5498 | tmp = names[i]; |
5499 | type = TREE_TYPE (names[i]); | |
5500 | if (!TYPE_UNSIGNED (type)) | |
5501 | { | |
0bec0083 | 5502 | type = build_nonstandard_integer_type (nprec, 1); |
7139adf8 | 5503 | tmp = build1 (NOP_EXPR, type, names[i]); |
5504 | } | |
796b6678 | 5505 | if (minv != 0) |
7139adf8 | 5506 | { |
5507 | tmp = build2 (PLUS_EXPR, type, tmp, | |
e913b5cd | 5508 | wide_int_to_tree (type, -minv)); |
cf8f0e63 | 5509 | maxv2 = maxv - minv; |
7139adf8 | 5510 | } |
e913b5cd | 5511 | new_val = wide_int_to_tree (type, maxv2); |
7139adf8 | 5512 | |
5513 | if (dump_file) | |
5514 | { | |
5515 | fprintf (dump_file, "Adding assert for "); | |
5516 | print_generic_expr (dump_file, names[i], 0); | |
5517 | fprintf (dump_file, " from "); | |
5518 | print_generic_expr (dump_file, tmp, 0); | |
5519 | fprintf (dump_file, "\n"); | |
5520 | } | |
5521 | ||
5522 | register_new_assert_for (names[i], tmp, LE_EXPR, | |
5523 | new_val, NULL, e, bsi); | |
5524 | retval = true; | |
5525 | } | |
5526 | } | |
5527 | } | |
98f00c5b | 5528 | } |
5529 | ||
bed8bec4 | 5530 | return retval; |
5531 | } | |
5532 | ||
fecf3b39 | 5533 | /* OP is an operand of a truth value expression which is known to have |
5534 | a particular value. Register any asserts for OP and for any | |
48e1416a | 5535 | operands in OP's defining statement. |
fecf3b39 | 5536 | |
5537 | If CODE is EQ_EXPR, then we want to register OP is zero (false), | |
5538 | if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ | |
5539 | ||
5540 | static bool | |
5541 | register_edge_assert_for_1 (tree op, enum tree_code code, | |
75a70cf9 | 5542 | edge e, gimple_stmt_iterator bsi) |
fecf3b39 | 5543 | { |
13f7fd91 | 5544 | bool retval = false; |
75a70cf9 | 5545 | gimple op_def; |
5546 | tree val; | |
a00913c7 | 5547 | enum tree_code rhs_code; |
eea12c72 | 5548 | |
fecf3b39 | 5549 | /* We only care about SSA_NAMEs. */ |
5550 | if (TREE_CODE (op) != SSA_NAME) | |
eea12c72 | 5551 | return false; |
5552 | ||
fecf3b39 | 5553 | /* We know that OP will have a zero or nonzero value. If OP is used |
5a0d3f9f | 5554 | more than once go ahead and register an assert for OP. */ |
5555 | if (live_on_edge (e, op) | |
5556 | && !has_single_use (op)) | |
fecf3b39 | 5557 | { |
5558 | val = build_int_cst (TREE_TYPE (op), 0); | |
bed8bec4 | 5559 | register_new_assert_for (op, op, code, val, NULL, e, bsi); |
fecf3b39 | 5560 | retval = true; |
5561 | } | |
5562 | ||
5563 | /* Now look at how OP is set. If it's set from a comparison, | |
5564 | a truth operation or some bit operations, then we may be able | |
5565 | to register information about the operands of that assignment. */ | |
5566 | op_def = SSA_NAME_DEF_STMT (op); | |
75a70cf9 | 5567 | if (gimple_code (op_def) != GIMPLE_ASSIGN) |
fecf3b39 | 5568 | return retval; |
5569 | ||
75a70cf9 | 5570 | rhs_code = gimple_assign_rhs_code (op_def); |
fecf3b39 | 5571 | |
75a70cf9 | 5572 | if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) |
eea12c72 | 5573 | { |
13f7fd91 | 5574 | bool invert = (code == EQ_EXPR ? true : false); |
75a70cf9 | 5575 | tree op0 = gimple_assign_rhs1 (op_def); |
5576 | tree op1 = gimple_assign_rhs2 (op_def); | |
eea12c72 | 5577 | |
bed8bec4 | 5578 | if (TREE_CODE (op0) == SSA_NAME) |
a00913c7 | 5579 | retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, |
5580 | invert); | |
bed8bec4 | 5581 | if (TREE_CODE (op1) == SSA_NAME) |
a00913c7 | 5582 | retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, |
5583 | invert); | |
fecf3b39 | 5584 | } |
5585 | else if ((code == NE_EXPR | |
cfd7906e | 5586 | && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) |
fecf3b39 | 5587 | || (code == EQ_EXPR |
cfd7906e | 5588 | && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) |
fecf3b39 | 5589 | { |
5590 | /* Recurse on each operand. */ | |
c2ff4a23 | 5591 | tree op0 = gimple_assign_rhs1 (op_def); |
5592 | tree op1 = gimple_assign_rhs2 (op_def); | |
5593 | if (TREE_CODE (op0) == SSA_NAME | |
5594 | && has_single_use (op0)) | |
5595 | retval |= register_edge_assert_for_1 (op0, code, e, bsi); | |
5596 | if (TREE_CODE (op1) == SSA_NAME | |
5597 | && has_single_use (op1)) | |
5598 | retval |= register_edge_assert_for_1 (op1, code, e, bsi); | |
fecf3b39 | 5599 | } |
eea7f7eb | 5600 | else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR |
5601 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) | |
fecf3b39 | 5602 | { |
13f7fd91 | 5603 | /* Recurse, flipping CODE. */ |
5604 | code = invert_tree_comparison (code, false); | |
75a70cf9 | 5605 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
13f7fd91 | 5606 | code, e, bsi); |
fecf3b39 | 5607 | } |
75a70cf9 | 5608 | else if (gimple_assign_rhs_code (op_def) == SSA_NAME) |
fecf3b39 | 5609 | { |
13f7fd91 | 5610 | /* Recurse through the copy. */ |
75a70cf9 | 5611 | retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), |
5612 | code, e, bsi); | |
fecf3b39 | 5613 | } |
d9659041 | 5614 | else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) |
48e1416a | 5615 | { |
f1dc1ab1 | 5616 | /* Recurse through the type conversion, unless it is a narrowing |
5617 | conversion or conversion from non-integral type. */ | |
5618 | tree rhs = gimple_assign_rhs1 (op_def); | |
5619 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs)) | |
5620 | && (TYPE_PRECISION (TREE_TYPE (rhs)) | |
5621 | <= TYPE_PRECISION (TREE_TYPE (op)))) | |
5622 | retval |= register_edge_assert_for_1 (rhs, code, e, bsi); | |
fecf3b39 | 5623 | } |
eea12c72 | 5624 | |
fecf3b39 | 5625 | return retval; |
5626 | } | |
072e921b | 5627 | |
fecf3b39 | 5628 | /* Try to register an edge assertion for SSA name NAME on edge E for |
5629 | the condition COND contributing to the conditional jump pointed to by SI. | |
5630 | Return true if an assertion for NAME could be registered. */ | |
072e921b | 5631 | |
fecf3b39 | 5632 | static bool |
75a70cf9 | 5633 | register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, |
a00913c7 | 5634 | enum tree_code cond_code, tree cond_op0, |
5635 | tree cond_op1) | |
fecf3b39 | 5636 | { |
5637 | tree val; | |
5638 | enum tree_code comp_code; | |
5639 | bool retval = false; | |
5640 | bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; | |
5641 | ||
5642 | /* Do not attempt to infer anything in names that flow through | |
5643 | abnormal edges. */ | |
5644 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) | |
5645 | return false; | |
5646 | ||
a00913c7 | 5647 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
5648 | cond_op0, cond_op1, | |
5649 | is_else_edge, | |
5650 | &comp_code, &val)) | |
fecf3b39 | 5651 | return false; |
5652 | ||
bed8bec4 | 5653 | /* Register ASSERT_EXPRs for name. */ |
a00913c7 | 5654 | retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, |
5655 | cond_op1, is_else_edge); | |
bed8bec4 | 5656 | |
fecf3b39 | 5657 | |
5658 | /* If COND is effectively an equality test of an SSA_NAME against | |
5659 | the value zero or one, then we may be able to assert values | |
5660 | for SSA_NAMEs which flow into COND. */ | |
5661 | ||
cfd7906e | 5662 | /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining |
5663 | statement of NAME we can assert both operands of the BIT_AND_EXPR | |
fa7637bd | 5664 | have nonzero value. */ |
fecf3b39 | 5665 | if (((comp_code == EQ_EXPR && integer_onep (val)) |
5666 | || (comp_code == NE_EXPR && integer_zerop (val)))) | |
5667 | { | |
75a70cf9 | 5668 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
fecf3b39 | 5669 | |
75a70cf9 | 5670 | if (is_gimple_assign (def_stmt) |
cfd7906e | 5671 | && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) |
fecf3b39 | 5672 | { |
75a70cf9 | 5673 | tree op0 = gimple_assign_rhs1 (def_stmt); |
5674 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
fecf3b39 | 5675 | retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si); |
5676 | retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si); | |
eea12c72 | 5677 | } |
5678 | } | |
fecf3b39 | 5679 | |
cfd7906e | 5680 | /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining |
5681 | statement of NAME we can assert both operands of the BIT_IOR_EXPR | |
fecf3b39 | 5682 | have zero value. */ |
5683 | if (((comp_code == EQ_EXPR && integer_zerop (val)) | |
5684 | || (comp_code == NE_EXPR && integer_onep (val)))) | |
eea12c72 | 5685 | { |
75a70cf9 | 5686 | gimple def_stmt = SSA_NAME_DEF_STMT (name); |
fecf3b39 | 5687 | |
cfd7906e | 5688 | /* For BIT_IOR_EXPR only if NAME == 0 both operands have |
5689 | necessarily zero value, or if type-precision is one. */ | |
75a70cf9 | 5690 | if (is_gimple_assign (def_stmt) |
cfd7906e | 5691 | && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR |
5692 | && (TYPE_PRECISION (TREE_TYPE (name)) == 1 | |
5693 | || comp_code == EQ_EXPR))) | |
fecf3b39 | 5694 | { |
75a70cf9 | 5695 | tree op0 = gimple_assign_rhs1 (def_stmt); |
5696 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
fecf3b39 | 5697 | retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si); |
5698 | retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si); | |
5699 | } | |
eea12c72 | 5700 | } |
5701 | ||
fecf3b39 | 5702 | return retval; |
eea12c72 | 5703 | } |
5704 | ||
5705 | ||
eea12c72 | 5706 | /* Determine whether the outgoing edges of BB should receive an |
fecf3b39 | 5707 | ASSERT_EXPR for each of the operands of BB's LAST statement. |
f3d56fef | 5708 | The last statement of BB must be a COND_EXPR. |
eea12c72 | 5709 | |
5710 | If any of the sub-graphs rooted at BB have an interesting use of | |
5711 | the predicate operands, an assert location node is added to the | |
5712 | list of assertions for the corresponding operands. */ | |
5713 | ||
5714 | static bool | |
75a70cf9 | 5715 | find_conditional_asserts (basic_block bb, gimple last) |
eea12c72 | 5716 | { |
5717 | bool need_assert; | |
75a70cf9 | 5718 | gimple_stmt_iterator bsi; |
fecf3b39 | 5719 | tree op; |
eea12c72 | 5720 | edge_iterator ei; |
5721 | edge e; | |
5722 | ssa_op_iter iter; | |
5723 | ||
5724 | need_assert = false; | |
75a70cf9 | 5725 | bsi = gsi_for_stmt (last); |
eea12c72 | 5726 | |
5727 | /* Look for uses of the operands in each of the sub-graphs | |
5728 | rooted at BB. We need to check each of the outgoing edges | |
5729 | separately, so that we know what kind of ASSERT_EXPR to | |
5730 | insert. */ | |
5731 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5732 | { | |
5733 | if (e->dest == bb) | |
5734 | continue; | |
5735 | ||
eea12c72 | 5736 | /* Register the necessary assertions for each operand in the |
5737 | conditional predicate. */ | |
5738 | FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) | |
a00913c7 | 5739 | { |
75a70cf9 | 5740 | need_assert |= register_edge_assert_for (op, e, bsi, |
5741 | gimple_cond_code (last), | |
5742 | gimple_cond_lhs (last), | |
5743 | gimple_cond_rhs (last)); | |
a00913c7 | 5744 | } |
eea12c72 | 5745 | } |
5746 | ||
eea12c72 | 5747 | return need_assert; |
5748 | } | |
5749 | ||
8b318348 | 5750 | struct case_info |
5751 | { | |
5752 | tree expr; | |
5753 | basic_block bb; | |
5754 | }; | |
5755 | ||
5756 | /* Compare two case labels sorting first by the destination bb index | |
f3d56fef | 5757 | and then by the case value. */ |
5758 | ||
5759 | static int | |
5760 | compare_case_labels (const void *p1, const void *p2) | |
5761 | { | |
8b318348 | 5762 | const struct case_info *ci1 = (const struct case_info *) p1; |
5763 | const struct case_info *ci2 = (const struct case_info *) p2; | |
5764 | int idx1 = ci1->bb->index; | |
5765 | int idx2 = ci2->bb->index; | |
f3d56fef | 5766 | |
8b318348 | 5767 | if (idx1 < idx2) |
f3d56fef | 5768 | return -1; |
8b318348 | 5769 | else if (idx1 == idx2) |
f3d56fef | 5770 | { |
5771 | /* Make sure the default label is first in a group. */ | |
8b318348 | 5772 | if (!CASE_LOW (ci1->expr)) |
f3d56fef | 5773 | return -1; |
8b318348 | 5774 | else if (!CASE_LOW (ci2->expr)) |
f3d56fef | 5775 | return 1; |
5776 | else | |
8b318348 | 5777 | return tree_int_cst_compare (CASE_LOW (ci1->expr), |
5778 | CASE_LOW (ci2->expr)); | |
f3d56fef | 5779 | } |
5780 | else | |
5781 | return 1; | |
5782 | } | |
5783 | ||
5784 | /* Determine whether the outgoing edges of BB should receive an | |
5785 | ASSERT_EXPR for each of the operands of BB's LAST statement. | |
5786 | The last statement of BB must be a SWITCH_EXPR. | |
5787 | ||
5788 | If any of the sub-graphs rooted at BB have an interesting use of | |
5789 | the predicate operands, an assert location node is added to the | |
5790 | list of assertions for the corresponding operands. */ | |
5791 | ||
5792 | static bool | |
75a70cf9 | 5793 | find_switch_asserts (basic_block bb, gimple last) |
f3d56fef | 5794 | { |
5795 | bool need_assert; | |
75a70cf9 | 5796 | gimple_stmt_iterator bsi; |
a00913c7 | 5797 | tree op; |
f3d56fef | 5798 | edge e; |
8b318348 | 5799 | struct case_info *ci; |
5800 | size_t n = gimple_switch_num_labels (last); | |
1c7857f5 | 5801 | #if GCC_VERSION >= 4000 |
f3d56fef | 5802 | unsigned int idx; |
1c7857f5 | 5803 | #else |
5804 | /* Work around GCC 3.4 bug (PR 37086). */ | |
5805 | volatile unsigned int idx; | |
5806 | #endif | |
f3d56fef | 5807 | |
5808 | need_assert = false; | |
75a70cf9 | 5809 | bsi = gsi_for_stmt (last); |
5810 | op = gimple_switch_index (last); | |
f3d56fef | 5811 | if (TREE_CODE (op) != SSA_NAME) |
5812 | return false; | |
5813 | ||
5814 | /* Build a vector of case labels sorted by destination label. */ | |
8b318348 | 5815 | ci = XNEWVEC (struct case_info, n); |
f3d56fef | 5816 | for (idx = 0; idx < n; ++idx) |
8b318348 | 5817 | { |
5818 | ci[idx].expr = gimple_switch_label (last, idx); | |
5819 | ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); | |
5820 | } | |
5821 | qsort (ci, n, sizeof (struct case_info), compare_case_labels); | |
f3d56fef | 5822 | |
5823 | for (idx = 0; idx < n; ++idx) | |
5824 | { | |
5825 | tree min, max; | |
8b318348 | 5826 | tree cl = ci[idx].expr; |
5827 | basic_block cbb = ci[idx].bb; | |
f3d56fef | 5828 | |
5829 | min = CASE_LOW (cl); | |
5830 | max = CASE_HIGH (cl); | |
5831 | ||
5832 | /* If there are multiple case labels with the same destination | |
5833 | we need to combine them to a single value range for the edge. */ | |
8b318348 | 5834 | if (idx + 1 < n && cbb == ci[idx + 1].bb) |
f3d56fef | 5835 | { |
5836 | /* Skip labels until the last of the group. */ | |
5837 | do { | |
5838 | ++idx; | |
8b318348 | 5839 | } while (idx < n && cbb == ci[idx].bb); |
f3d56fef | 5840 | --idx; |
5841 | ||
5842 | /* Pick up the maximum of the case label range. */ | |
8b318348 | 5843 | if (CASE_HIGH (ci[idx].expr)) |
5844 | max = CASE_HIGH (ci[idx].expr); | |
f3d56fef | 5845 | else |
8b318348 | 5846 | max = CASE_LOW (ci[idx].expr); |
f3d56fef | 5847 | } |
5848 | ||
5849 | /* Nothing to do if the range includes the default label until we | |
5850 | can register anti-ranges. */ | |
5851 | if (min == NULL_TREE) | |
5852 | continue; | |
5853 | ||
5854 | /* Find the edge to register the assert expr on. */ | |
8b318348 | 5855 | e = find_edge (bb, cbb); |
f3d56fef | 5856 | |
f3d56fef | 5857 | /* Register the necessary assertions for the operand in the |
5858 | SWITCH_EXPR. */ | |
a00913c7 | 5859 | need_assert |= register_edge_assert_for (op, e, bsi, |
5860 | max ? GE_EXPR : EQ_EXPR, | |
5861 | op, | |
5862 | fold_convert (TREE_TYPE (op), | |
5863 | min)); | |
f3d56fef | 5864 | if (max) |
5865 | { | |
a00913c7 | 5866 | need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR, |
5867 | op, | |
5868 | fold_convert (TREE_TYPE (op), | |
5869 | max)); | |
f3d56fef | 5870 | } |
5871 | } | |
5872 | ||
8b318348 | 5873 | XDELETEVEC (ci); |
f3d56fef | 5874 | return need_assert; |
5875 | } | |
5876 | ||
eea12c72 | 5877 | |
5878 | /* Traverse all the statements in block BB looking for statements that | |
5879 | may generate useful assertions for the SSA names in their operand. | |
5880 | If a statement produces a useful assertion A for name N_i, then the | |
5881 | list of assertions already generated for N_i is scanned to | |
5882 | determine if A is actually needed. | |
48e1416a | 5883 | |
eea12c72 | 5884 | If N_i already had the assertion A at a location dominating the |
5885 | current location, then nothing needs to be done. Otherwise, the | |
5886 | new location for A is recorded instead. | |
5887 | ||
5888 | 1- For every statement S in BB, all the variables used by S are | |
5889 | added to bitmap FOUND_IN_SUBGRAPH. | |
5890 | ||
5891 | 2- If statement S uses an operand N in a way that exposes a known | |
5892 | value range for N, then if N was not already generated by an | |
5893 | ASSERT_EXPR, create a new assert location for N. For instance, | |
5894 | if N is a pointer and the statement dereferences it, we can | |
5895 | assume that N is not NULL. | |
5896 | ||
5897 | 3- COND_EXPRs are a special case of #2. We can derive range | |
5898 | information from the predicate but need to insert different | |
5899 | ASSERT_EXPRs for each of the sub-graphs rooted at the | |
5900 | conditional block. If the last statement of BB is a conditional | |
5901 | expression of the form 'X op Y', then | |
5902 | ||
5903 | a) Remove X and Y from the set FOUND_IN_SUBGRAPH. | |
5904 | ||
5905 | b) If the conditional is the only entry point to the sub-graph | |
5906 | corresponding to the THEN_CLAUSE, recurse into it. On | |
5907 | return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then | |
5908 | an ASSERT_EXPR is added for the corresponding variable. | |
5909 | ||
5910 | c) Repeat step (b) on the ELSE_CLAUSE. | |
5911 | ||
5912 | d) Mark X and Y in FOUND_IN_SUBGRAPH. | |
5913 | ||
5914 | For instance, | |
5915 | ||
5916 | if (a == 9) | |
5917 | b = a; | |
5918 | else | |
5919 | b = c + 1; | |
5920 | ||
5921 | In this case, an assertion on the THEN clause is useful to | |
5922 | determine that 'a' is always 9 on that edge. However, an assertion | |
5923 | on the ELSE clause would be unnecessary. | |
5924 | ||
5925 | 4- If BB does not end in a conditional expression, then we recurse | |
5926 | into BB's dominator children. | |
48e1416a | 5927 | |
eea12c72 | 5928 | At the end of the recursive traversal, every SSA name will have a |
5929 | list of locations where ASSERT_EXPRs should be added. When a new | |
5930 | location for name N is found, it is registered by calling | |
5931 | register_new_assert_for. That function keeps track of all the | |
5932 | registered assertions to prevent adding unnecessary assertions. | |
5933 | For instance, if a pointer P_4 is dereferenced more than once in a | |
5934 | dominator tree, only the location dominating all the dereference of | |
5935 | P_4 will receive an ASSERT_EXPR. | |
5936 | ||
5937 | If this function returns true, then it means that there are names | |
5938 | for which we need to generate ASSERT_EXPRs. Those assertions are | |
f3d56fef | 5939 | inserted by process_assert_insertions. */ |
eea12c72 | 5940 | |
5941 | static bool | |
17ed8337 | 5942 | find_assert_locations_1 (basic_block bb, sbitmap live) |
eea12c72 | 5943 | { |
75a70cf9 | 5944 | gimple_stmt_iterator si; |
5945 | gimple last; | |
eea12c72 | 5946 | bool need_assert; |
eea12c72 | 5947 | |
5948 | need_assert = false; | |
17ed8337 | 5949 | last = last_stmt (bb); |
eea12c72 | 5950 | |
17ed8337 | 5951 | /* If BB's last statement is a conditional statement involving integer |
5952 | operands, determine if we need to add ASSERT_EXPRs. */ | |
5953 | if (last | |
5954 | && gimple_code (last) == GIMPLE_COND | |
5955 | && !fp_predicate (last) | |
5956 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
5957 | need_assert |= find_conditional_asserts (bb, last); | |
eea12c72 | 5958 | |
17ed8337 | 5959 | /* If BB's last statement is a switch statement involving integer |
5960 | operands, determine if we need to add ASSERT_EXPRs. */ | |
5961 | if (last | |
5962 | && gimple_code (last) == GIMPLE_SWITCH | |
5963 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
5964 | need_assert |= find_switch_asserts (bb, last); | |
eea12c72 | 5965 | |
5966 | /* Traverse all the statements in BB marking used names and looking | |
5967 | for statements that may infer assertions for their used operands. */ | |
d6f10e50 | 5968 | for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si)) |
eea12c72 | 5969 | { |
75a70cf9 | 5970 | gimple stmt; |
5971 | tree op; | |
eea12c72 | 5972 | ssa_op_iter i; |
5973 | ||
75a70cf9 | 5974 | stmt = gsi_stmt (si); |
eea12c72 | 5975 | |
9845d120 | 5976 | if (is_gimple_debug (stmt)) |
5977 | continue; | |
5978 | ||
eea12c72 | 5979 | /* See if we can derive an assertion for any of STMT's operands. */ |
5980 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
5981 | { | |
5982 | tree value; | |
5983 | enum tree_code comp_code; | |
5984 | ||
d6f10e50 | 5985 | /* If op is not live beyond this stmt, do not bother to insert |
5986 | asserts for it. */ | |
08b7917c | 5987 | if (!bitmap_bit_p (live, SSA_NAME_VERSION (op))) |
d6f10e50 | 5988 | continue; |
eea12c72 | 5989 | |
eea12c72 | 5990 | /* If OP is used in such a way that we can infer a value |
5991 | range for it, and we don't find a previous assertion for | |
5992 | it, create a new assertion location node for OP. */ | |
5993 | if (infer_value_range (stmt, op, &comp_code, &value)) | |
5994 | { | |
9ca2c29a | 5995 | /* If we are able to infer a nonzero value range for OP, |
581f1885 | 5996 | then walk backwards through the use-def chain to see if OP |
5997 | was set via a typecast. | |
5998 | ||
5999 | If so, then we can also infer a nonzero value range | |
6000 | for the operand of the NOP_EXPR. */ | |
6001 | if (comp_code == NE_EXPR && integer_zerop (value)) | |
6002 | { | |
6003 | tree t = op; | |
75a70cf9 | 6004 | gimple def_stmt = SSA_NAME_DEF_STMT (t); |
48e1416a | 6005 | |
75a70cf9 | 6006 | while (is_gimple_assign (def_stmt) |
6007 | && gimple_assign_rhs_code (def_stmt) == NOP_EXPR | |
35cc02b5 | 6008 | && TREE_CODE |
75a70cf9 | 6009 | (gimple_assign_rhs1 (def_stmt)) == SSA_NAME |
35cc02b5 | 6010 | && POINTER_TYPE_P |
75a70cf9 | 6011 | (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) |
581f1885 | 6012 | { |
75a70cf9 | 6013 | t = gimple_assign_rhs1 (def_stmt); |
581f1885 | 6014 | def_stmt = SSA_NAME_DEF_STMT (t); |
6015 | ||
6016 | /* Note we want to register the assert for the | |
6017 | operand of the NOP_EXPR after SI, not after the | |
6018 | conversion. */ | |
6019 | if (! has_single_use (t)) | |
6020 | { | |
bed8bec4 | 6021 | register_new_assert_for (t, t, comp_code, value, |
581f1885 | 6022 | bb, NULL, si); |
6023 | need_assert = true; | |
6024 | } | |
6025 | } | |
6026 | } | |
6027 | ||
d6f10e50 | 6028 | register_new_assert_for (op, op, comp_code, value, bb, NULL, si); |
6029 | need_assert = true; | |
88dbf20f | 6030 | } |
6031 | } | |
d6f10e50 | 6032 | |
6033 | /* Update live. */ | |
6034 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
08b7917c | 6035 | bitmap_set_bit (live, SSA_NAME_VERSION (op)); |
d6f10e50 | 6036 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF) |
08b7917c | 6037 | bitmap_clear_bit (live, SSA_NAME_VERSION (op)); |
88dbf20f | 6038 | } |
6039 | ||
d6f10e50 | 6040 | /* Traverse all PHI nodes in BB, updating live. */ |
9af5ce0c | 6041 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
17ed8337 | 6042 | { |
6043 | use_operand_p arg_p; | |
6044 | ssa_op_iter i; | |
d6f10e50 | 6045 | gimple phi = gsi_stmt (si); |
6046 | tree res = gimple_phi_result (phi); | |
6047 | ||
6048 | if (virtual_operand_p (res)) | |
6049 | continue; | |
f3d56fef | 6050 | |
17ed8337 | 6051 | FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) |
6052 | { | |
6053 | tree arg = USE_FROM_PTR (arg_p); | |
6054 | if (TREE_CODE (arg) == SSA_NAME) | |
08b7917c | 6055 | bitmap_set_bit (live, SSA_NAME_VERSION (arg)); |
17ed8337 | 6056 | } |
d6f10e50 | 6057 | |
08b7917c | 6058 | bitmap_clear_bit (live, SSA_NAME_VERSION (res)); |
17ed8337 | 6059 | } |
eea12c72 | 6060 | |
6061 | return need_assert; | |
6062 | } | |
6063 | ||
17ed8337 | 6064 | /* Do an RPO walk over the function computing SSA name liveness |
6065 | on-the-fly and deciding on assert expressions to insert. | |
6066 | Returns true if there are assert expressions to be inserted. */ | |
6067 | ||
6068 | static bool | |
6069 | find_assert_locations (void) | |
6070 | { | |
fe672ac0 | 6071 | int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
6072 | int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); | |
6073 | int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
17ed8337 | 6074 | int rpo_cnt, i; |
6075 | bool need_asserts; | |
6076 | ||
fe672ac0 | 6077 | live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun)); |
17ed8337 | 6078 | rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); |
6079 | for (i = 0; i < rpo_cnt; ++i) | |
6080 | bb_rpo[rpo[i]] = i; | |
6081 | ||
92decb41 | 6082 | /* Pre-seed loop latch liveness from loop header PHI nodes. Due to |
6083 | the order we compute liveness and insert asserts we otherwise | |
6084 | fail to insert asserts into the loop latch. */ | |
6085 | loop_p loop; | |
f21d4d00 | 6086 | FOR_EACH_LOOP (loop, 0) |
92decb41 | 6087 | { |
6088 | i = loop->latch->index; | |
6089 | unsigned int j = single_succ_edge (loop->latch)->dest_idx; | |
6090 | for (gimple_stmt_iterator gsi = gsi_start_phis (loop->header); | |
6091 | !gsi_end_p (gsi); gsi_next (&gsi)) | |
6092 | { | |
6093 | gimple phi = gsi_stmt (gsi); | |
6094 | if (virtual_operand_p (gimple_phi_result (phi))) | |
6095 | continue; | |
6096 | tree arg = gimple_phi_arg_def (phi, j); | |
6097 | if (TREE_CODE (arg) == SSA_NAME) | |
6098 | { | |
6099 | if (live[i] == NULL) | |
6100 | { | |
6101 | live[i] = sbitmap_alloc (num_ssa_names); | |
6102 | bitmap_clear (live[i]); | |
6103 | } | |
6104 | bitmap_set_bit (live[i], SSA_NAME_VERSION (arg)); | |
6105 | } | |
6106 | } | |
6107 | } | |
6108 | ||
17ed8337 | 6109 | need_asserts = false; |
ed7e2206 | 6110 | for (i = rpo_cnt - 1; i >= 0; --i) |
17ed8337 | 6111 | { |
f5a6b05f | 6112 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); |
17ed8337 | 6113 | edge e; |
6114 | edge_iterator ei; | |
6115 | ||
6116 | if (!live[rpo[i]]) | |
6117 | { | |
6118 | live[rpo[i]] = sbitmap_alloc (num_ssa_names); | |
53c5d9d4 | 6119 | bitmap_clear (live[rpo[i]]); |
17ed8337 | 6120 | } |
6121 | ||
6122 | /* Process BB and update the live information with uses in | |
6123 | this block. */ | |
6124 | need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]); | |
6125 | ||
6126 | /* Merge liveness into the predecessor blocks and free it. */ | |
53c5d9d4 | 6127 | if (!bitmap_empty_p (live[rpo[i]])) |
17ed8337 | 6128 | { |
6129 | int pred_rpo = i; | |
6130 | FOR_EACH_EDGE (e, ei, bb->preds) | |
6131 | { | |
6132 | int pred = e->src->index; | |
706567b8 | 6133 | if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK) |
17ed8337 | 6134 | continue; |
6135 | ||
6136 | if (!live[pred]) | |
6137 | { | |
6138 | live[pred] = sbitmap_alloc (num_ssa_names); | |
53c5d9d4 | 6139 | bitmap_clear (live[pred]); |
17ed8337 | 6140 | } |
53c5d9d4 | 6141 | bitmap_ior (live[pred], live[pred], live[rpo[i]]); |
17ed8337 | 6142 | |
6143 | if (bb_rpo[pred] < pred_rpo) | |
6144 | pred_rpo = bb_rpo[pred]; | |
6145 | } | |
6146 | ||
6147 | /* Record the RPO number of the last visited block that needs | |
6148 | live information from this block. */ | |
6149 | last_rpo[rpo[i]] = pred_rpo; | |
6150 | } | |
6151 | else | |
6152 | { | |
6153 | sbitmap_free (live[rpo[i]]); | |
6154 | live[rpo[i]] = NULL; | |
6155 | } | |
6156 | ||
6157 | /* We can free all successors live bitmaps if all their | |
6158 | predecessors have been visited already. */ | |
6159 | FOR_EACH_EDGE (e, ei, bb->succs) | |
6160 | if (last_rpo[e->dest->index] == i | |
6161 | && live[e->dest->index]) | |
6162 | { | |
6163 | sbitmap_free (live[e->dest->index]); | |
6164 | live[e->dest->index] = NULL; | |
6165 | } | |
6166 | } | |
6167 | ||
6168 | XDELETEVEC (rpo); | |
6169 | XDELETEVEC (bb_rpo); | |
6170 | XDELETEVEC (last_rpo); | |
fe672ac0 | 6171 | for (i = 0; i < last_basic_block_for_fn (cfun); ++i) |
17ed8337 | 6172 | if (live[i]) |
6173 | sbitmap_free (live[i]); | |
6174 | XDELETEVEC (live); | |
6175 | ||
6176 | return need_asserts; | |
6177 | } | |
eea12c72 | 6178 | |
6179 | /* Create an ASSERT_EXPR for NAME and insert it in the location | |
6180 | indicated by LOC. Return true if we made any edge insertions. */ | |
6181 | ||
6182 | static bool | |
6183 | process_assert_insertions_for (tree name, assert_locus_t loc) | |
6184 | { | |
6185 | /* Build the comparison expression NAME_i COMP_CODE VAL. */ | |
75a70cf9 | 6186 | gimple stmt; |
6187 | tree cond; | |
6188 | gimple assert_stmt; | |
eea12c72 | 6189 | edge_iterator ei; |
6190 | edge e; | |
6191 | ||
4fac6da9 | 6192 | /* If we have X <=> X do not insert an assert expr for that. */ |
6193 | if (loc->expr == loc->val) | |
6194 | return false; | |
6195 | ||
bed8bec4 | 6196 | cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); |
75a70cf9 | 6197 | assert_stmt = build_assert_expr_for (cond, name); |
eea12c72 | 6198 | if (loc->e) |
88dbf20f | 6199 | { |
eea12c72 | 6200 | /* We have been asked to insert the assertion on an edge. This |
6201 | is used only by COND_EXPR and SWITCH_EXPR assertions. */ | |
1b4345f7 | 6202 | gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND |
6203 | || (gimple_code (gsi_stmt (loc->si)) | |
6204 | == GIMPLE_SWITCH)); | |
88dbf20f | 6205 | |
75a70cf9 | 6206 | gsi_insert_on_edge (loc->e, assert_stmt); |
eea12c72 | 6207 | return true; |
6208 | } | |
2b6719e9 | 6209 | |
eea12c72 | 6210 | /* Otherwise, we can insert right after LOC->SI iff the |
6211 | statement must not be the last statement in the block. */ | |
75a70cf9 | 6212 | stmt = gsi_stmt (loc->si); |
eea12c72 | 6213 | if (!stmt_ends_bb_p (stmt)) |
6214 | { | |
75a70cf9 | 6215 | gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); |
eea12c72 | 6216 | return false; |
6217 | } | |
2b6719e9 | 6218 | |
eea12c72 | 6219 | /* If STMT must be the last statement in BB, we can only insert new |
6220 | assertions on the non-abnormal edge out of BB. Note that since | |
6221 | STMT is not control flow, there may only be one non-abnormal edge | |
6222 | out of BB. */ | |
6223 | FOR_EACH_EDGE (e, ei, loc->bb->succs) | |
6224 | if (!(e->flags & EDGE_ABNORMAL)) | |
6225 | { | |
75a70cf9 | 6226 | gsi_insert_on_edge (e, assert_stmt); |
eea12c72 | 6227 | return true; |
6228 | } | |
88dbf20f | 6229 | |
eea12c72 | 6230 | gcc_unreachable (); |
6231 | } | |
88dbf20f | 6232 | |
88dbf20f | 6233 | |
eea12c72 | 6234 | /* Process all the insertions registered for every name N_i registered |
6235 | in NEED_ASSERT_FOR. The list of assertions to be inserted are | |
6236 | found in ASSERTS_FOR[i]. */ | |
88dbf20f | 6237 | |
eea12c72 | 6238 | static void |
6239 | process_assert_insertions (void) | |
6240 | { | |
6241 | unsigned i; | |
6242 | bitmap_iterator bi; | |
6243 | bool update_edges_p = false; | |
6244 | int num_asserts = 0; | |
88dbf20f | 6245 | |
eea12c72 | 6246 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6247 | dump_all_asserts (dump_file); | |
4efa33ae | 6248 | |
eea12c72 | 6249 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |
6250 | { | |
6251 | assert_locus_t loc = asserts_for[i]; | |
6252 | gcc_assert (loc); | |
6253 | ||
6254 | while (loc) | |
4efa33ae | 6255 | { |
eea12c72 | 6256 | assert_locus_t next = loc->next; |
6257 | update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); | |
6258 | free (loc); | |
6259 | loc = next; | |
6260 | num_asserts++; | |
4efa33ae | 6261 | } |
88dbf20f | 6262 | } |
88dbf20f | 6263 | |
eea12c72 | 6264 | if (update_edges_p) |
75a70cf9 | 6265 | gsi_commit_edge_inserts (); |
88dbf20f | 6266 | |
581f8050 | 6267 | statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", |
6268 | num_asserts); | |
88dbf20f | 6269 | } |
6270 | ||
6271 | ||
6272 | /* Traverse the flowgraph looking for conditional jumps to insert range | |
6273 | expressions. These range expressions are meant to provide information | |
6274 | to optimizations that need to reason in terms of value ranges. They | |
6275 | will not be expanded into RTL. For instance, given: | |
6276 | ||
6277 | x = ... | |
6278 | y = ... | |
6279 | if (x < y) | |
6280 | y = x - 2; | |
6281 | else | |
6282 | x = y + 3; | |
6283 | ||
6284 | this pass will transform the code into: | |
6285 | ||
6286 | x = ... | |
6287 | y = ... | |
6288 | if (x < y) | |
6289 | { | |
6290 | x = ASSERT_EXPR <x, x < y> | |
6291 | y = x - 2 | |
6292 | } | |
6293 | else | |
6294 | { | |
54751bcf | 6295 | y = ASSERT_EXPR <y, x >= y> |
88dbf20f | 6296 | x = y + 3 |
6297 | } | |
6298 | ||
6299 | The idea is that once copy and constant propagation have run, other | |
6300 | optimizations will be able to determine what ranges of values can 'x' | |
6301 | take in different paths of the code, simply by checking the reaching | |
6302 | definition of 'x'. */ | |
6303 | ||
6304 | static void | |
6305 | insert_range_assertions (void) | |
6306 | { | |
eea12c72 | 6307 | need_assert_for = BITMAP_ALLOC (NULL); |
43959b95 | 6308 | asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); |
88dbf20f | 6309 | |
6310 | calculate_dominance_info (CDI_DOMINATORS); | |
6311 | ||
17ed8337 | 6312 | if (find_assert_locations ()) |
88dbf20f | 6313 | { |
eea12c72 | 6314 | process_assert_insertions (); |
88dbf20f | 6315 | update_ssa (TODO_update_ssa_no_phi); |
6316 | } | |
6317 | ||
6318 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6319 | { | |
6320 | fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); | |
6321 | dump_function_to_file (current_function_decl, dump_file, dump_flags); | |
6322 | } | |
6323 | ||
eea12c72 | 6324 | free (asserts_for); |
6325 | BITMAP_FREE (need_assert_for); | |
88dbf20f | 6326 | } |
6327 | ||
5bc96398 | 6328 | /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays |
6329 | and "struct" hacks. If VRP can determine that the | |
f2b32076 | 6330 | array subscript is a constant, check if it is outside valid |
5bc96398 | 6331 | range. If the array subscript is a RANGE, warn if it is |
6332 | non-overlapping with valid range. | |
6333 | IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ | |
6334 | ||
6335 | static void | |
e60a6f7b | 6336 | check_array_ref (location_t location, tree ref, bool ignore_off_by_one) |
5bc96398 | 6337 | { |
6338 | value_range_t* vr = NULL; | |
6339 | tree low_sub, up_sub; | |
c26ce8a9 | 6340 | tree low_bound, up_bound, up_bound_p1; |
6341 | tree base; | |
6342 | ||
6343 | if (TREE_NO_WARNING (ref)) | |
6344 | return; | |
5bc96398 | 6345 | |
6346 | low_sub = up_sub = TREE_OPERAND (ref, 1); | |
c26ce8a9 | 6347 | up_bound = array_ref_up_bound (ref); |
5bc96398 | 6348 | |
c26ce8a9 | 6349 | /* Can not check flexible arrays. */ |
6350 | if (!up_bound | |
6351 | || TREE_CODE (up_bound) != INTEGER_CST) | |
5bc96398 | 6352 | return; |
6353 | ||
c26ce8a9 | 6354 | /* Accesses to trailing arrays via pointers may access storage |
6355 | beyond the types array bounds. */ | |
6356 | base = get_base_address (ref); | |
182cf5a9 | 6357 | if (base && TREE_CODE (base) == MEM_REF) |
c26ce8a9 | 6358 | { |
6359 | tree cref, next = NULL_TREE; | |
6360 | ||
6361 | if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF) | |
6362 | return; | |
6363 | ||
6364 | cref = TREE_OPERAND (ref, 0); | |
6365 | if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE) | |
1767a056 | 6366 | for (next = DECL_CHAIN (TREE_OPERAND (cref, 1)); |
c26ce8a9 | 6367 | next && TREE_CODE (next) != FIELD_DECL; |
1767a056 | 6368 | next = DECL_CHAIN (next)) |
c26ce8a9 | 6369 | ; |
6370 | ||
6371 | /* If this is the last field in a struct type or a field in a | |
6372 | union type do not warn. */ | |
6373 | if (!next) | |
6374 | return; | |
6375 | } | |
6376 | ||
5bc96398 | 6377 | low_bound = array_ref_low_bound (ref); |
ddb1be65 | 6378 | up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, |
e913b5cd | 6379 | build_int_cst (TREE_TYPE (up_bound), 1)); |
5bc96398 | 6380 | |
6381 | if (TREE_CODE (low_sub) == SSA_NAME) | |
6382 | { | |
6383 | vr = get_value_range (low_sub); | |
6384 | if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
6385 | { | |
6386 | low_sub = vr->type == VR_RANGE ? vr->max : vr->min; | |
6387 | up_sub = vr->type == VR_RANGE ? vr->min : vr->max; | |
6388 | } | |
6389 | } | |
6390 | ||
6391 | if (vr && vr->type == VR_ANTI_RANGE) | |
6392 | { | |
6393 | if (TREE_CODE (up_sub) == INTEGER_CST | |
6394 | && tree_int_cst_lt (up_bound, up_sub) | |
6395 | && TREE_CODE (low_sub) == INTEGER_CST | |
6396 | && tree_int_cst_lt (low_sub, low_bound)) | |
6397 | { | |
496ffe87 | 6398 | warning_at (location, OPT_Warray_bounds, |
6399 | "array subscript is outside array bounds"); | |
5bc96398 | 6400 | TREE_NO_WARNING (ref) = 1; |
6401 | } | |
6402 | } | |
6403 | else if (TREE_CODE (up_sub) == INTEGER_CST | |
c26ce8a9 | 6404 | && (ignore_off_by_one |
6405 | ? (tree_int_cst_lt (up_bound, up_sub) | |
6406 | && !tree_int_cst_equal (up_bound_p1, up_sub)) | |
6407 | : (tree_int_cst_lt (up_bound, up_sub) | |
6408 | || tree_int_cst_equal (up_bound_p1, up_sub)))) | |
5bc96398 | 6409 | { |
1e24c0c9 | 6410 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6411 | { | |
6412 | fprintf (dump_file, "Array bound warning for "); | |
6413 | dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | |
4d739378 | 6414 | fprintf (dump_file, "\n"); |
1e24c0c9 | 6415 | } |
496ffe87 | 6416 | warning_at (location, OPT_Warray_bounds, |
6417 | "array subscript is above array bounds"); | |
5bc96398 | 6418 | TREE_NO_WARNING (ref) = 1; |
6419 | } | |
6420 | else if (TREE_CODE (low_sub) == INTEGER_CST | |
6421 | && tree_int_cst_lt (low_sub, low_bound)) | |
6422 | { | |
1e24c0c9 | 6423 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6424 | { | |
6425 | fprintf (dump_file, "Array bound warning for "); | |
6426 | dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | |
4d739378 | 6427 | fprintf (dump_file, "\n"); |
1e24c0c9 | 6428 | } |
496ffe87 | 6429 | warning_at (location, OPT_Warray_bounds, |
6430 | "array subscript is below array bounds"); | |
5bc96398 | 6431 | TREE_NO_WARNING (ref) = 1; |
6432 | } | |
6433 | } | |
6434 | ||
7988a017 | 6435 | /* Searches if the expr T, located at LOCATION computes |
6436 | address of an ARRAY_REF, and call check_array_ref on it. */ | |
6437 | ||
6438 | static void | |
496ffe87 | 6439 | search_for_addr_array (tree t, location_t location) |
7988a017 | 6440 | { |
6441 | while (TREE_CODE (t) == SSA_NAME) | |
6442 | { | |
75a70cf9 | 6443 | gimple g = SSA_NAME_DEF_STMT (t); |
6444 | ||
6445 | if (gimple_code (g) != GIMPLE_ASSIGN) | |
7988a017 | 6446 | return; |
75a70cf9 | 6447 | |
48e1416a | 6448 | if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) |
8fa85fc5 | 6449 | != GIMPLE_SINGLE_RHS) |
75a70cf9 | 6450 | return; |
6451 | ||
6452 | t = gimple_assign_rhs1 (g); | |
7988a017 | 6453 | } |
6454 | ||
6455 | ||
6456 | /* We are only interested in addresses of ARRAY_REF's. */ | |
48e1416a | 6457 | if (TREE_CODE (t) != ADDR_EXPR) |
7988a017 | 6458 | return; |
6459 | ||
6460 | /* Check each ARRAY_REFs in the reference chain. */ | |
48e1416a | 6461 | do |
7988a017 | 6462 | { |
6463 | if (TREE_CODE (t) == ARRAY_REF) | |
e60a6f7b | 6464 | check_array_ref (location, t, true /*ignore_off_by_one*/); |
7988a017 | 6465 | |
8fa85fc5 | 6466 | t = TREE_OPERAND (t, 0); |
7988a017 | 6467 | } |
6468 | while (handled_component_p (t)); | |
182cf5a9 | 6469 | |
6470 | if (TREE_CODE (t) == MEM_REF | |
6471 | && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR | |
6472 | && !TREE_NO_WARNING (t)) | |
6473 | { | |
6474 | tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); | |
6475 | tree low_bound, up_bound, el_sz; | |
5de9d3ed | 6476 | offset_int idx; |
182cf5a9 | 6477 | if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE |
6478 | || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE | |
6479 | || !TYPE_DOMAIN (TREE_TYPE (tem))) | |
6480 | return; | |
6481 | ||
6482 | low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | |
6483 | up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | |
6484 | el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); | |
6485 | if (!low_bound | |
6486 | || TREE_CODE (low_bound) != INTEGER_CST | |
6487 | || !up_bound | |
6488 | || TREE_CODE (up_bound) != INTEGER_CST | |
6489 | || !el_sz | |
6490 | || TREE_CODE (el_sz) != INTEGER_CST) | |
6491 | return; | |
6492 | ||
6493 | idx = mem_ref_offset (t); | |
5de9d3ed | 6494 | idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); |
796b6678 | 6495 | if (wi::lts_p (idx, 0)) |
182cf5a9 | 6496 | { |
1e24c0c9 | 6497 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6498 | { | |
6499 | fprintf (dump_file, "Array bound warning for "); | |
6500 | dump_generic_expr (MSG_NOTE, TDF_SLIM, t); | |
4d739378 | 6501 | fprintf (dump_file, "\n"); |
1e24c0c9 | 6502 | } |
182cf5a9 | 6503 | warning_at (location, OPT_Warray_bounds, |
6504 | "array subscript is below array bounds"); | |
6505 | TREE_NO_WARNING (t) = 1; | |
6506 | } | |
5de9d3ed | 6507 | else if (wi::gts_p (idx, (wi::to_offset (up_bound) |
6508 | - wi::to_offset (low_bound) + 1))) | |
182cf5a9 | 6509 | { |
1e24c0c9 | 6510 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6511 | { | |
6512 | fprintf (dump_file, "Array bound warning for "); | |
6513 | dump_generic_expr (MSG_NOTE, TDF_SLIM, t); | |
4d739378 | 6514 | fprintf (dump_file, "\n"); |
1e24c0c9 | 6515 | } |
182cf5a9 | 6516 | warning_at (location, OPT_Warray_bounds, |
6517 | "array subscript is above array bounds"); | |
6518 | TREE_NO_WARNING (t) = 1; | |
6519 | } | |
6520 | } | |
7988a017 | 6521 | } |
6522 | ||
5bc96398 | 6523 | /* walk_tree() callback that checks if *TP is |
6524 | an ARRAY_REF inside an ADDR_EXPR (in which an array | |
6525 | subscript one outside the valid range is allowed). Call | |
48e1416a | 6526 | check_array_ref for each ARRAY_REF found. The location is |
5bc96398 | 6527 | passed in DATA. */ |
6528 | ||
6529 | static tree | |
6530 | check_array_bounds (tree *tp, int *walk_subtree, void *data) | |
6531 | { | |
6532 | tree t = *tp; | |
75a70cf9 | 6533 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; |
e60a6f7b | 6534 | location_t location; |
6535 | ||
6536 | if (EXPR_HAS_LOCATION (t)) | |
6537 | location = EXPR_LOCATION (t); | |
6538 | else | |
6539 | { | |
6540 | location_t *locp = (location_t *) wi->info; | |
6541 | location = *locp; | |
6542 | } | |
5d19e882 | 6543 | |
5bc96398 | 6544 | *walk_subtree = TRUE; |
6545 | ||
6546 | if (TREE_CODE (t) == ARRAY_REF) | |
e60a6f7b | 6547 | check_array_ref (location, t, false /*ignore_off_by_one*/); |
533a9fbc | 6548 | |
182cf5a9 | 6549 | if (TREE_CODE (t) == MEM_REF |
7988a017 | 6550 | || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) |
e60a6f7b | 6551 | search_for_addr_array (TREE_OPERAND (t, 0), location); |
5bc96398 | 6552 | |
7988a017 | 6553 | if (TREE_CODE (t) == ADDR_EXPR) |
6554 | *walk_subtree = FALSE; | |
6555 | ||
5bc96398 | 6556 | return NULL_TREE; |
6557 | } | |
6558 | ||
6559 | /* Walk over all statements of all reachable BBs and call check_array_bounds | |
6560 | on them. */ | |
6561 | ||
6562 | static void | |
6563 | check_all_array_refs (void) | |
6564 | { | |
6565 | basic_block bb; | |
75a70cf9 | 6566 | gimple_stmt_iterator si; |
5bc96398 | 6567 | |
fc00614f | 6568 | FOR_EACH_BB_FN (bb, cfun) |
5bc96398 | 6569 | { |
8b938617 | 6570 | edge_iterator ei; |
6571 | edge e; | |
6572 | bool executable = false; | |
496ffe87 | 6573 | |
8b938617 | 6574 | /* Skip blocks that were found to be unreachable. */ |
6575 | FOR_EACH_EDGE (e, ei, bb->preds) | |
6576 | executable |= !!(e->flags & EDGE_EXECUTABLE); | |
6577 | if (!executable) | |
6578 | continue; | |
5bc96398 | 6579 | |
75a70cf9 | 6580 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
6581 | { | |
6582 | gimple stmt = gsi_stmt (si); | |
75a70cf9 | 6583 | struct walk_stmt_info wi; |
6584 | if (!gimple_has_location (stmt)) | |
6585 | continue; | |
6586 | ||
6587 | if (is_gimple_call (stmt)) | |
6588 | { | |
6589 | size_t i; | |
6590 | size_t n = gimple_call_num_args (stmt); | |
6591 | for (i = 0; i < n; i++) | |
6592 | { | |
6593 | tree arg = gimple_call_arg (stmt, i); | |
496ffe87 | 6594 | search_for_addr_array (arg, gimple_location (stmt)); |
75a70cf9 | 6595 | } |
6596 | } | |
6597 | else | |
6598 | { | |
6599 | memset (&wi, 0, sizeof (wi)); | |
496ffe87 | 6600 | wi.info = CONST_CAST (void *, (const void *) |
6601 | gimple_location_ptr (stmt)); | |
75a70cf9 | 6602 | |
6603 | walk_gimple_op (gsi_stmt (si), | |
6604 | check_array_bounds, | |
6605 | &wi); | |
6606 | } | |
6607 | } | |
5bc96398 | 6608 | } |
6609 | } | |
88dbf20f | 6610 | |
d9d5743f | 6611 | /* Return true if all imm uses of VAR are either in STMT, or |
6612 | feed (optionally through a chain of single imm uses) GIMPLE_COND | |
6613 | in basic block COND_BB. */ | |
6614 | ||
6615 | static bool | |
6616 | all_imm_uses_in_stmt_or_feed_cond (tree var, gimple stmt, basic_block cond_bb) | |
6617 | { | |
6618 | use_operand_p use_p, use2_p; | |
6619 | imm_use_iterator iter; | |
6620 | ||
6621 | FOR_EACH_IMM_USE_FAST (use_p, iter, var) | |
6622 | if (USE_STMT (use_p) != stmt) | |
6623 | { | |
5c8091dd | 6624 | gimple use_stmt = USE_STMT (use_p), use_stmt2; |
d9d5743f | 6625 | if (is_gimple_debug (use_stmt)) |
6626 | continue; | |
6627 | while (is_gimple_assign (use_stmt) | |
5c8091dd | 6628 | && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME |
d9d5743f | 6629 | && single_imm_use (gimple_assign_lhs (use_stmt), |
5c8091dd | 6630 | &use2_p, &use_stmt2)) |
6631 | use_stmt = use_stmt2; | |
d9d5743f | 6632 | if (gimple_code (use_stmt) != GIMPLE_COND |
6633 | || gimple_bb (use_stmt) != cond_bb) | |
6634 | return false; | |
6635 | } | |
6636 | return true; | |
6637 | } | |
6638 | ||
2d6ffc28 | 6639 | /* Handle |
6640 | _4 = x_3 & 31; | |
6641 | if (_4 != 0) | |
6642 | goto <bb 6>; | |
6643 | else | |
6644 | goto <bb 7>; | |
6645 | <bb 6>: | |
6646 | __builtin_unreachable (); | |
6647 | <bb 7>: | |
6648 | x_5 = ASSERT_EXPR <x_3, ...>; | |
6649 | If x_3 has no other immediate uses (checked by caller), | |
6650 | var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits | |
6651 | from the non-zero bitmask. */ | |
6652 | ||
6653 | static void | |
6654 | maybe_set_nonzero_bits (basic_block bb, tree var) | |
6655 | { | |
6656 | edge e = single_pred_edge (bb); | |
6657 | basic_block cond_bb = e->src; | |
6658 | gimple stmt = last_stmt (cond_bb); | |
6659 | tree cst; | |
6660 | ||
6661 | if (stmt == NULL | |
6662 | || gimple_code (stmt) != GIMPLE_COND | |
6663 | || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE) | |
6664 | ? EQ_EXPR : NE_EXPR) | |
6665 | || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME | |
6666 | || !integer_zerop (gimple_cond_rhs (stmt))) | |
6667 | return; | |
6668 | ||
6669 | stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt)); | |
6670 | if (!is_gimple_assign (stmt) | |
6671 | || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR | |
6672 | || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST) | |
6673 | return; | |
6674 | if (gimple_assign_rhs1 (stmt) != var) | |
6675 | { | |
6676 | gimple stmt2; | |
6677 | ||
6678 | if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) | |
6679 | return; | |
6680 | stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
6681 | if (!gimple_assign_cast_p (stmt2) | |
6682 | || gimple_assign_rhs1 (stmt2) != var | |
6683 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2)) | |
6684 | || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))) | |
6685 | != TYPE_PRECISION (TREE_TYPE (var)))) | |
6686 | return; | |
6687 | } | |
6688 | cst = gimple_assign_rhs2 (stmt); | |
9c1be15e | 6689 | set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst)); |
2d6ffc28 | 6690 | } |
6691 | ||
4dd9ed13 | 6692 | /* Convert range assertion expressions into the implied copies and |
6693 | copy propagate away the copies. Doing the trivial copy propagation | |
6694 | here avoids the need to run the full copy propagation pass after | |
48e1416a | 6695 | VRP. |
6696 | ||
eea12c72 | 6697 | FIXME, this will eventually lead to copy propagation removing the |
6698 | names that had useful range information attached to them. For | |
6699 | instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, | |
6700 | then N_i will have the range [3, +INF]. | |
48e1416a | 6701 | |
eea12c72 | 6702 | However, by converting the assertion into the implied copy |
6703 | operation N_i = N_j, we will then copy-propagate N_j into the uses | |
6704 | of N_i and lose the range information. We may want to hold on to | |
6705 | ASSERT_EXPRs a little while longer as the ranges could be used in | |
6706 | things like jump threading. | |
48e1416a | 6707 | |
eea12c72 | 6708 | The problem with keeping ASSERT_EXPRs around is that passes after |
48e1416a | 6709 | VRP need to handle them appropriately. |
4dd9ed13 | 6710 | |
6711 | Another approach would be to make the range information a first | |
6712 | class property of the SSA_NAME so that it can be queried from | |
6713 | any pass. This is made somewhat more complex by the need for | |
6714 | multiple ranges to be associated with one SSA_NAME. */ | |
88dbf20f | 6715 | |
6716 | static void | |
6717 | remove_range_assertions (void) | |
6718 | { | |
6719 | basic_block bb; | |
75a70cf9 | 6720 | gimple_stmt_iterator si; |
d9d5743f | 6721 | /* 1 if looking at ASSERT_EXPRs immediately at the beginning of |
6722 | a basic block preceeded by GIMPLE_COND branching to it and | |
6723 | __builtin_trap, -1 if not yet checked, 0 otherwise. */ | |
6724 | int is_unreachable; | |
88dbf20f | 6725 | |
4dd9ed13 | 6726 | /* Note that the BSI iterator bump happens at the bottom of the |
6727 | loop and no bump is necessary if we're removing the statement | |
6728 | referenced by the current BSI. */ | |
fc00614f | 6729 | FOR_EACH_BB_FN (bb, cfun) |
d9d5743f | 6730 | for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);) |
88dbf20f | 6731 | { |
75a70cf9 | 6732 | gimple stmt = gsi_stmt (si); |
6733 | gimple use_stmt; | |
88dbf20f | 6734 | |
75a70cf9 | 6735 | if (is_gimple_assign (stmt) |
6736 | && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) | |
88dbf20f | 6737 | { |
d9d5743f | 6738 | tree lhs = gimple_assign_lhs (stmt); |
75a70cf9 | 6739 | tree rhs = gimple_assign_rhs1 (stmt); |
6740 | tree var; | |
88dbf20f | 6741 | tree cond = fold (ASSERT_EXPR_COND (rhs)); |
4dd9ed13 | 6742 | use_operand_p use_p; |
6743 | imm_use_iterator iter; | |
6744 | ||
88dbf20f | 6745 | gcc_assert (cond != boolean_false_node); |
4dd9ed13 | 6746 | |
63945aec | 6747 | var = ASSERT_EXPR_VAR (rhs); |
d9d5743f | 6748 | gcc_assert (TREE_CODE (var) == SSA_NAME); |
6749 | ||
6750 | if (!POINTER_TYPE_P (TREE_TYPE (lhs)) | |
6751 | && SSA_NAME_RANGE_INFO (lhs)) | |
6752 | { | |
6753 | if (is_unreachable == -1) | |
6754 | { | |
6755 | is_unreachable = 0; | |
6756 | if (single_pred_p (bb) | |
6757 | && assert_unreachable_fallthru_edge_p | |
6758 | (single_pred_edge (bb))) | |
6759 | is_unreachable = 1; | |
6760 | } | |
6761 | /* Handle | |
6762 | if (x_7 >= 10 && x_7 < 20) | |
6763 | __builtin_unreachable (); | |
6764 | x_8 = ASSERT_EXPR <x_7, ...>; | |
6765 | if the only uses of x_7 are in the ASSERT_EXPR and | |
6766 | in the condition. In that case, we can copy the | |
6767 | range info from x_8 computed in this pass also | |
6768 | for x_7. */ | |
6769 | if (is_unreachable | |
6770 | && all_imm_uses_in_stmt_or_feed_cond (var, stmt, | |
6771 | single_pred (bb))) | |
2d6ffc28 | 6772 | { |
0c20fe49 | 6773 | set_range_info (var, SSA_NAME_RANGE_TYPE (lhs), |
9c1be15e | 6774 | SSA_NAME_RANGE_INFO (lhs)->get_min (), |
6775 | SSA_NAME_RANGE_INFO (lhs)->get_max ()); | |
2d6ffc28 | 6776 | maybe_set_nonzero_bits (bb, var); |
6777 | } | |
d9d5743f | 6778 | } |
6779 | ||
6780 | /* Propagate the RHS into every use of the LHS. */ | |
6781 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) | |
09aca5bc | 6782 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
d9d5743f | 6783 | SET_USE (use_p, var); |
4dd9ed13 | 6784 | |
6785 | /* And finally, remove the copy, it is not needed. */ | |
75a70cf9 | 6786 | gsi_remove (&si, true); |
48e1416a | 6787 | release_defs (stmt); |
88dbf20f | 6788 | } |
4dd9ed13 | 6789 | else |
d9d5743f | 6790 | { |
519aed8f | 6791 | if (!is_gimple_debug (gsi_stmt (si))) |
6792 | is_unreachable = 0; | |
d9d5743f | 6793 | gsi_next (&si); |
d9d5743f | 6794 | } |
88dbf20f | 6795 | } |
6796 | } | |
6797 | ||
6798 | ||
6799 | /* Return true if STMT is interesting for VRP. */ | |
6800 | ||
6801 | static bool | |
75a70cf9 | 6802 | stmt_interesting_for_vrp (gimple stmt) |
88dbf20f | 6803 | { |
7c782c9b | 6804 | if (gimple_code (stmt) == GIMPLE_PHI) |
6805 | { | |
6806 | tree res = gimple_phi_result (stmt); | |
6807 | return (!virtual_operand_p (res) | |
6808 | && (INTEGRAL_TYPE_P (TREE_TYPE (res)) | |
6809 | || POINTER_TYPE_P (TREE_TYPE (res)))); | |
6810 | } | |
75a70cf9 | 6811 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) |
88dbf20f | 6812 | { |
75a70cf9 | 6813 | tree lhs = gimple_get_lhs (stmt); |
88dbf20f | 6814 | |
04dde933 | 6815 | /* In general, assignments with virtual operands are not useful |
6816 | for deriving ranges, with the obvious exception of calls to | |
6817 | builtin functions. */ | |
75a70cf9 | 6818 | if (lhs && TREE_CODE (lhs) == SSA_NAME |
88dbf20f | 6819 | && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
6820 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
d7dcba40 | 6821 | && (is_gimple_call (stmt) |
dd277d48 | 6822 | || !gimple_vuse (stmt))) |
88dbf20f | 6823 | return true; |
6824 | } | |
75a70cf9 | 6825 | else if (gimple_code (stmt) == GIMPLE_COND |
6826 | || gimple_code (stmt) == GIMPLE_SWITCH) | |
88dbf20f | 6827 | return true; |
6828 | ||
6829 | return false; | |
6830 | } | |
6831 | ||
6832 | ||
011528fc | 6833 | /* Initialize local data structures for VRP. */ |
88dbf20f | 6834 | |
eea12c72 | 6835 | static void |
88dbf20f | 6836 | vrp_initialize (void) |
6837 | { | |
6838 | basic_block bb; | |
88dbf20f | 6839 | |
e0186710 | 6840 | values_propagated = false; |
6841 | num_vr_values = num_ssa_names; | |
6842 | vr_value = XCNEWVEC (value_range_t *, num_vr_values); | |
5c7155ca | 6843 | vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); |
88dbf20f | 6844 | |
fc00614f | 6845 | FOR_EACH_BB_FN (bb, cfun) |
88dbf20f | 6846 | { |
75a70cf9 | 6847 | gimple_stmt_iterator si; |
88dbf20f | 6848 | |
75a70cf9 | 6849 | for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
88dbf20f | 6850 | { |
75a70cf9 | 6851 | gimple phi = gsi_stmt (si); |
88dbf20f | 6852 | if (!stmt_interesting_for_vrp (phi)) |
6853 | { | |
6854 | tree lhs = PHI_RESULT (phi); | |
e7d43f99 | 6855 | set_value_range_to_varying (get_value_range (lhs)); |
75a70cf9 | 6856 | prop_set_simulate_again (phi, false); |
88dbf20f | 6857 | } |
6858 | else | |
75a70cf9 | 6859 | prop_set_simulate_again (phi, true); |
88dbf20f | 6860 | } |
6861 | ||
75a70cf9 | 6862 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
88dbf20f | 6863 | { |
75a70cf9 | 6864 | gimple stmt = gsi_stmt (si); |
88dbf20f | 6865 | |
2193544e | 6866 | /* If the statement is a control insn, then we do not |
6867 | want to avoid simulating the statement once. Failure | |
6868 | to do so means that those edges will never get added. */ | |
6869 | if (stmt_ends_bb_p (stmt)) | |
6870 | prop_set_simulate_again (stmt, true); | |
6871 | else if (!stmt_interesting_for_vrp (stmt)) | |
88dbf20f | 6872 | { |
6873 | ssa_op_iter i; | |
6874 | tree def; | |
6875 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) | |
e7d43f99 | 6876 | set_value_range_to_varying (get_value_range (def)); |
75a70cf9 | 6877 | prop_set_simulate_again (stmt, false); |
88dbf20f | 6878 | } |
6879 | else | |
2193544e | 6880 | prop_set_simulate_again (stmt, true); |
88dbf20f | 6881 | } |
6882 | } | |
88dbf20f | 6883 | } |
6884 | ||
1d0b727d | 6885 | /* Return the singleton value-range for NAME or NAME. */ |
6886 | ||
6887 | static inline tree | |
6888 | vrp_valueize (tree name) | |
6889 | { | |
6890 | if (TREE_CODE (name) == SSA_NAME) | |
6891 | { | |
6892 | value_range_t *vr = get_value_range (name); | |
6893 | if (vr->type == VR_RANGE | |
6894 | && (vr->min == vr->max | |
6895 | || operand_equal_p (vr->min, vr->max, 0))) | |
6896 | return vr->min; | |
6897 | } | |
6898 | return name; | |
6899 | } | |
88dbf20f | 6900 | |
6901 | /* Visit assignment STMT. If it produces an interesting range, record | |
6902 | the SSA name in *OUTPUT_P. */ | |
6903 | ||
6904 | static enum ssa_prop_result | |
75a70cf9 | 6905 | vrp_visit_assignment_or_call (gimple stmt, tree *output_p) |
88dbf20f | 6906 | { |
75a70cf9 | 6907 | tree def, lhs; |
88dbf20f | 6908 | ssa_op_iter iter; |
75a70cf9 | 6909 | enum gimple_code code = gimple_code (stmt); |
6910 | lhs = gimple_get_lhs (stmt); | |
88dbf20f | 6911 | |
6912 | /* We only keep track of ranges in integral and pointer types. */ | |
6913 | if (TREE_CODE (lhs) == SSA_NAME | |
f064ebd1 | 6914 | && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
6915 | /* It is valid to have NULL MIN/MAX values on a type. See | |
6916 | build_range_type. */ | |
6917 | && TYPE_MIN_VALUE (TREE_TYPE (lhs)) | |
6918 | && TYPE_MAX_VALUE (TREE_TYPE (lhs))) | |
88dbf20f | 6919 | || POINTER_TYPE_P (TREE_TYPE (lhs)))) |
6920 | { | |
748eb1f9 | 6921 | value_range_t new_vr = VR_INITIALIZER; |
eea12c72 | 6922 | |
1d0b727d | 6923 | /* Try folding the statement to a constant first. */ |
6924 | tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize); | |
3dad27e9 | 6925 | if (tem) |
6926 | set_value_range_to_value (&new_vr, tem, NULL); | |
1d0b727d | 6927 | /* Then dispatch to value-range extracting functions. */ |
6928 | else if (code == GIMPLE_CALL) | |
75a70cf9 | 6929 | extract_range_basic (&new_vr, stmt); |
6930 | else | |
6931 | extract_range_from_assignment (&new_vr, stmt); | |
88dbf20f | 6932 | |
eea12c72 | 6933 | if (update_value_range (lhs, &new_vr)) |
88dbf20f | 6934 | { |
6935 | *output_p = lhs; | |
6936 | ||
6937 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6938 | { | |
eea12c72 | 6939 | fprintf (dump_file, "Found new range for "); |
88dbf20f | 6940 | print_generic_expr (dump_file, lhs, 0); |
eea12c72 | 6941 | fprintf (dump_file, ": "); |
6942 | dump_value_range (dump_file, &new_vr); | |
0d4c8cda | 6943 | fprintf (dump_file, "\n"); |
88dbf20f | 6944 | } |
6945 | ||
6946 | if (new_vr.type == VR_VARYING) | |
6947 | return SSA_PROP_VARYING; | |
6948 | ||
6949 | return SSA_PROP_INTERESTING; | |
6950 | } | |
6951 | ||
6952 | return SSA_PROP_NOT_INTERESTING; | |
6953 | } | |
48e1416a | 6954 | |
eea12c72 | 6955 | /* Every other statement produces no useful ranges. */ |
88dbf20f | 6956 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) |
e7d43f99 | 6957 | set_value_range_to_varying (get_value_range (def)); |
88dbf20f | 6958 | |
6959 | return SSA_PROP_VARYING; | |
6960 | } | |
6961 | ||
fbcece5e | 6962 | /* Helper that gets the value range of the SSA_NAME with version I |
310d2511 | 6963 | or a symbolic range containing the SSA_NAME only if the value range |
fbcece5e | 6964 | is varying or undefined. */ |
6965 | ||
6966 | static inline value_range_t | |
6967 | get_vr_for_comparison (int i) | |
6968 | { | |
e0186710 | 6969 | value_range_t vr = *get_value_range (ssa_name (i)); |
fbcece5e | 6970 | |
6971 | /* If name N_i does not have a valid range, use N_i as its own | |
6972 | range. This allows us to compare against names that may | |
6973 | have N_i in their ranges. */ | |
6974 | if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) | |
6975 | { | |
6976 | vr.type = VR_RANGE; | |
6977 | vr.min = ssa_name (i); | |
6978 | vr.max = ssa_name (i); | |
6979 | } | |
6980 | ||
6981 | return vr; | |
6982 | } | |
88dbf20f | 6983 | |
eea12c72 | 6984 | /* Compare all the value ranges for names equivalent to VAR with VAL |
6985 | using comparison code COMP. Return the same value returned by | |
c3783c3b | 6986 | compare_range_with_value, including the setting of |
6987 | *STRICT_OVERFLOW_P. */ | |
eea12c72 | 6988 | |
6989 | static tree | |
c3783c3b | 6990 | compare_name_with_value (enum tree_code comp, tree var, tree val, |
6991 | bool *strict_overflow_p) | |
eea12c72 | 6992 | { |
6993 | bitmap_iterator bi; | |
6994 | unsigned i; | |
6995 | bitmap e; | |
6996 | tree retval, t; | |
c3783c3b | 6997 | int used_strict_overflow; |
fbcece5e | 6998 | bool sop; |
6999 | value_range_t equiv_vr; | |
eea12c72 | 7000 | |
7001 | /* Get the set of equivalences for VAR. */ | |
7002 | e = get_value_range (var)->equiv; | |
7003 | ||
c3783c3b | 7004 | /* Start at -1. Set it to 0 if we do a comparison without relying |
7005 | on overflow, or 1 if all comparisons rely on overflow. */ | |
7006 | used_strict_overflow = -1; | |
7007 | ||
fbcece5e | 7008 | /* Compare vars' value range with val. */ |
7009 | equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); | |
7010 | sop = false; | |
7011 | retval = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
7192d2a6 | 7012 | if (retval) |
7013 | used_strict_overflow = sop ? 1 : 0; | |
eea12c72 | 7014 | |
fbcece5e | 7015 | /* If the equiv set is empty we have done all work we need to do. */ |
7016 | if (e == NULL) | |
7017 | { | |
7018 | if (retval | |
7019 | && used_strict_overflow > 0) | |
7020 | *strict_overflow_p = true; | |
7021 | return retval; | |
7022 | } | |
eea12c72 | 7023 | |
fbcece5e | 7024 | EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) |
7025 | { | |
7026 | equiv_vr = get_vr_for_comparison (i); | |
c3783c3b | 7027 | sop = false; |
7028 | t = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
eea12c72 | 7029 | if (t) |
7030 | { | |
79f0a894 | 7031 | /* If we get different answers from different members |
7032 | of the equivalence set this check must be in a dead | |
7033 | code region. Folding it to a trap representation | |
7034 | would be correct here. For now just return don't-know. */ | |
7035 | if (retval != NULL | |
7036 | && t != retval) | |
7037 | { | |
7038 | retval = NULL_TREE; | |
7039 | break; | |
7040 | } | |
eea12c72 | 7041 | retval = t; |
c3783c3b | 7042 | |
7043 | if (!sop) | |
7044 | used_strict_overflow = 0; | |
7045 | else if (used_strict_overflow < 0) | |
7046 | used_strict_overflow = 1; | |
eea12c72 | 7047 | } |
7048 | } | |
7049 | ||
fbcece5e | 7050 | if (retval |
7051 | && used_strict_overflow > 0) | |
7052 | *strict_overflow_p = true; | |
eea12c72 | 7053 | |
fbcece5e | 7054 | return retval; |
eea12c72 | 7055 | } |
7056 | ||
7057 | ||
7058 | /* Given a comparison code COMP and names N1 and N2, compare all the | |
9aff9709 | 7059 | ranges equivalent to N1 against all the ranges equivalent to N2 |
eea12c72 | 7060 | to determine the value of N1 COMP N2. Return the same value |
c3783c3b | 7061 | returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate |
7062 | whether we relied on an overflow infinity in the comparison. */ | |
7063 | ||
88dbf20f | 7064 | |
7065 | static tree | |
c3783c3b | 7066 | compare_names (enum tree_code comp, tree n1, tree n2, |
7067 | bool *strict_overflow_p) | |
eea12c72 | 7068 | { |
7069 | tree t, retval; | |
7070 | bitmap e1, e2; | |
7071 | bitmap_iterator bi1, bi2; | |
7072 | unsigned i1, i2; | |
c3783c3b | 7073 | int used_strict_overflow; |
fbcece5e | 7074 | static bitmap_obstack *s_obstack = NULL; |
7075 | static bitmap s_e1 = NULL, s_e2 = NULL; | |
eea12c72 | 7076 | |
7077 | /* Compare the ranges of every name equivalent to N1 against the | |
7078 | ranges of every name equivalent to N2. */ | |
7079 | e1 = get_value_range (n1)->equiv; | |
7080 | e2 = get_value_range (n2)->equiv; | |
7081 | ||
fbcece5e | 7082 | /* Use the fake bitmaps if e1 or e2 are not available. */ |
7083 | if (s_obstack == NULL) | |
7084 | { | |
7085 | s_obstack = XNEW (bitmap_obstack); | |
7086 | bitmap_obstack_initialize (s_obstack); | |
7087 | s_e1 = BITMAP_ALLOC (s_obstack); | |
7088 | s_e2 = BITMAP_ALLOC (s_obstack); | |
7089 | } | |
7090 | if (e1 == NULL) | |
7091 | e1 = s_e1; | |
7092 | if (e2 == NULL) | |
7093 | e2 = s_e2; | |
7094 | ||
eea12c72 | 7095 | /* Add N1 and N2 to their own set of equivalences to avoid |
7096 | duplicating the body of the loop just to check N1 and N2 | |
7097 | ranges. */ | |
7098 | bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); | |
7099 | bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); | |
7100 | ||
7101 | /* If the equivalence sets have a common intersection, then the two | |
7102 | names can be compared without checking their ranges. */ | |
7103 | if (bitmap_intersect_p (e1, e2)) | |
7104 | { | |
7105 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7106 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7107 | ||
7108 | return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) | |
7109 | ? boolean_true_node | |
7110 | : boolean_false_node; | |
7111 | } | |
7112 | ||
c3783c3b | 7113 | /* Start at -1. Set it to 0 if we do a comparison without relying |
7114 | on overflow, or 1 if all comparisons rely on overflow. */ | |
7115 | used_strict_overflow = -1; | |
7116 | ||
eea12c72 | 7117 | /* Otherwise, compare all the equivalent ranges. First, add N1 and |
7118 | N2 to their own set of equivalences to avoid duplicating the body | |
7119 | of the loop just to check N1 and N2 ranges. */ | |
7120 | EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) | |
7121 | { | |
fbcece5e | 7122 | value_range_t vr1 = get_vr_for_comparison (i1); |
eea12c72 | 7123 | |
7124 | t = retval = NULL_TREE; | |
7125 | EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) | |
7126 | { | |
29188799 | 7127 | bool sop = false; |
c3783c3b | 7128 | |
fbcece5e | 7129 | value_range_t vr2 = get_vr_for_comparison (i2); |
eea12c72 | 7130 | |
c3783c3b | 7131 | t = compare_ranges (comp, &vr1, &vr2, &sop); |
eea12c72 | 7132 | if (t) |
7133 | { | |
79f0a894 | 7134 | /* If we get different answers from different members |
7135 | of the equivalence set this check must be in a dead | |
7136 | code region. Folding it to a trap representation | |
7137 | would be correct here. For now just return don't-know. */ | |
7138 | if (retval != NULL | |
7139 | && t != retval) | |
7140 | { | |
7141 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7142 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7143 | return NULL_TREE; | |
7144 | } | |
eea12c72 | 7145 | retval = t; |
c3783c3b | 7146 | |
7147 | if (!sop) | |
7148 | used_strict_overflow = 0; | |
7149 | else if (used_strict_overflow < 0) | |
7150 | used_strict_overflow = 1; | |
eea12c72 | 7151 | } |
7152 | } | |
7153 | ||
7154 | if (retval) | |
7155 | { | |
7156 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7157 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
c3783c3b | 7158 | if (used_strict_overflow > 0) |
7159 | *strict_overflow_p = true; | |
eea12c72 | 7160 | return retval; |
7161 | } | |
7162 | } | |
7163 | ||
7164 | /* None of the equivalent ranges are useful in computing this | |
7165 | comparison. */ | |
7166 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7167 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7168 | return NULL_TREE; | |
7169 | } | |
7170 | ||
e0ad89bd | 7171 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
7172 | ||
7173 | static tree | |
7174 | vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, | |
7175 | tree op0, tree op1, | |
7176 | bool * strict_overflow_p) | |
7177 | { | |
7178 | value_range_t *vr0, *vr1; | |
7179 | ||
7180 | vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; | |
7181 | vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; | |
7182 | ||
949d4c6f | 7183 | tree res = NULL_TREE; |
e0ad89bd | 7184 | if (vr0 && vr1) |
949d4c6f | 7185 | res = compare_ranges (code, vr0, vr1, strict_overflow_p); |
7186 | if (!res && vr0) | |
7187 | res = compare_range_with_value (code, vr0, op1, strict_overflow_p); | |
7188 | if (!res && vr1) | |
7189 | res = (compare_range_with_value | |
e0ad89bd | 7190 | (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); |
949d4c6f | 7191 | return res; |
e0ad89bd | 7192 | } |
7193 | ||
93116081 | 7194 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
7195 | ||
7196 | static tree | |
7197 | vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, | |
7198 | tree op1, bool use_equiv_p, | |
e0ad89bd | 7199 | bool *strict_overflow_p, bool *only_ranges) |
93116081 | 7200 | { |
e0ad89bd | 7201 | tree ret; |
7202 | if (only_ranges) | |
7203 | *only_ranges = true; | |
7204 | ||
93116081 | 7205 | /* We only deal with integral and pointer types. */ |
7206 | if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
7207 | && !POINTER_TYPE_P (TREE_TYPE (op0))) | |
7208 | return NULL_TREE; | |
7209 | ||
7210 | if (use_equiv_p) | |
7211 | { | |
e0ad89bd | 7212 | if (only_ranges |
7213 | && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
7214 | (code, op0, op1, strict_overflow_p))) | |
7215 | return ret; | |
7216 | *only_ranges = false; | |
93116081 | 7217 | if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) |
75a70cf9 | 7218 | return compare_names (code, op0, op1, strict_overflow_p); |
93116081 | 7219 | else if (TREE_CODE (op0) == SSA_NAME) |
75a70cf9 | 7220 | return compare_name_with_value (code, op0, op1, strict_overflow_p); |
93116081 | 7221 | else if (TREE_CODE (op1) == SSA_NAME) |
7222 | return (compare_name_with_value | |
75a70cf9 | 7223 | (swap_tree_comparison (code), op1, op0, strict_overflow_p)); |
93116081 | 7224 | } |
7225 | else | |
e0ad89bd | 7226 | return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, |
7227 | strict_overflow_p); | |
93116081 | 7228 | return NULL_TREE; |
7229 | } | |
eea12c72 | 7230 | |
ced5bc56 | 7231 | /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range |
a2a1fde2 | 7232 | information. Return NULL if the conditional can not be evaluated. |
7233 | The ranges of all the names equivalent with the operands in COND | |
7234 | will be used when trying to compute the value. If the result is | |
7235 | based on undefined signed overflow, issue a warning if | |
7236 | appropriate. */ | |
7237 | ||
07aee51b | 7238 | static tree |
75a70cf9 | 7239 | vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) |
a2a1fde2 | 7240 | { |
7241 | bool sop; | |
7242 | tree ret; | |
e0ad89bd | 7243 | bool only_ranges; |
a2a1fde2 | 7244 | |
a000e0d1 | 7245 | /* Some passes and foldings leak constants with overflow flag set |
7246 | into the IL. Avoid doing wrong things with these and bail out. */ | |
7247 | if ((TREE_CODE (op0) == INTEGER_CST | |
7248 | && TREE_OVERFLOW (op0)) | |
7249 | || (TREE_CODE (op1) == INTEGER_CST | |
7250 | && TREE_OVERFLOW (op1))) | |
7251 | return NULL_TREE; | |
7252 | ||
a2a1fde2 | 7253 | sop = false; |
e0ad89bd | 7254 | ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, |
7255 | &only_ranges); | |
a2a1fde2 | 7256 | |
7257 | if (ret && sop) | |
7258 | { | |
7259 | enum warn_strict_overflow_code wc; | |
7260 | const char* warnmsg; | |
7261 | ||
7262 | if (is_gimple_min_invariant (ret)) | |
7263 | { | |
7264 | wc = WARN_STRICT_OVERFLOW_CONDITIONAL; | |
7265 | warnmsg = G_("assuming signed overflow does not occur when " | |
7266 | "simplifying conditional to constant"); | |
7267 | } | |
7268 | else | |
7269 | { | |
7270 | wc = WARN_STRICT_OVERFLOW_COMPARISON; | |
7271 | warnmsg = G_("assuming signed overflow does not occur when " | |
7272 | "simplifying conditional"); | |
7273 | } | |
7274 | ||
7275 | if (issue_strict_overflow_warning (wc)) | |
7276 | { | |
75a70cf9 | 7277 | location_t location; |
a2a1fde2 | 7278 | |
75a70cf9 | 7279 | if (!gimple_has_location (stmt)) |
7280 | location = input_location; | |
a2a1fde2 | 7281 | else |
75a70cf9 | 7282 | location = gimple_location (stmt); |
5fb6a912 | 7283 | warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); |
a2a1fde2 | 7284 | } |
7285 | } | |
7286 | ||
100b67da | 7287 | if (warn_type_limits |
e0ad89bd | 7288 | && ret && only_ranges |
ced5bc56 | 7289 | && TREE_CODE_CLASS (code) == tcc_comparison |
7290 | && TREE_CODE (op0) == SSA_NAME) | |
100b67da | 7291 | { |
7292 | /* If the comparison is being folded and the operand on the LHS | |
7293 | is being compared against a constant value that is outside of | |
7294 | the natural range of OP0's type, then the predicate will | |
7295 | always fold regardless of the value of OP0. If -Wtype-limits | |
7296 | was specified, emit a warning. */ | |
100b67da | 7297 | tree type = TREE_TYPE (op0); |
7298 | value_range_t *vr0 = get_value_range (op0); | |
7299 | ||
7300 | if (vr0->type != VR_VARYING | |
7301 | && INTEGRAL_TYPE_P (type) | |
7302 | && vrp_val_is_min (vr0->min) | |
7303 | && vrp_val_is_max (vr0->max) | |
7304 | && is_gimple_min_invariant (op1)) | |
100b67da | 7305 | { |
75a70cf9 | 7306 | location_t location; |
100b67da | 7307 | |
75a70cf9 | 7308 | if (!gimple_has_location (stmt)) |
7309 | location = input_location; | |
100b67da | 7310 | else |
75a70cf9 | 7311 | location = gimple_location (stmt); |
100b67da | 7312 | |
48e1416a | 7313 | warning_at (location, OPT_Wtype_limits, |
5fb6a912 | 7314 | integer_zerop (ret) |
0aec0de8 | 7315 | ? G_("comparison always false " |
7316 | "due to limited range of data type") | |
7317 | : G_("comparison always true " | |
7318 | "due to limited range of data type")); | |
100b67da | 7319 | } |
7320 | } | |
7321 | ||
a2a1fde2 | 7322 | return ret; |
7323 | } | |
7324 | ||
88dbf20f | 7325 | |
7326 | /* Visit conditional statement STMT. If we can determine which edge | |
7327 | will be taken out of STMT's basic block, record it in | |
7328 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
7329 | SSA_PROP_VARYING. */ | |
7330 | ||
7331 | static enum ssa_prop_result | |
75a70cf9 | 7332 | vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p) |
88dbf20f | 7333 | { |
75a70cf9 | 7334 | tree val; |
c3783c3b | 7335 | bool sop; |
88dbf20f | 7336 | |
7337 | *taken_edge_p = NULL; | |
88dbf20f | 7338 | |
7339 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7340 | { | |
7341 | tree use; | |
7342 | ssa_op_iter i; | |
7343 | ||
7344 | fprintf (dump_file, "\nVisiting conditional with predicate: "); | |
75a70cf9 | 7345 | print_gimple_stmt (dump_file, stmt, 0, 0); |
88dbf20f | 7346 | fprintf (dump_file, "\nWith known ranges\n"); |
48e1416a | 7347 | |
88dbf20f | 7348 | FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) |
7349 | { | |
7350 | fprintf (dump_file, "\t"); | |
7351 | print_generic_expr (dump_file, use, 0); | |
7352 | fprintf (dump_file, ": "); | |
eea12c72 | 7353 | dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); |
88dbf20f | 7354 | } |
7355 | ||
7356 | fprintf (dump_file, "\n"); | |
7357 | } | |
7358 | ||
7359 | /* Compute the value of the predicate COND by checking the known | |
eea12c72 | 7360 | ranges of each of its operands. |
48e1416a | 7361 | |
eea12c72 | 7362 | Note that we cannot evaluate all the equivalent ranges here |
7363 | because those ranges may not yet be final and with the current | |
7364 | propagation strategy, we cannot determine when the value ranges | |
7365 | of the names in the equivalence set have changed. | |
7366 | ||
7367 | For instance, given the following code fragment | |
7368 | ||
7369 | i_5 = PHI <8, i_13> | |
7370 | ... | |
7371 | i_14 = ASSERT_EXPR <i_5, i_5 != 0> | |
7372 | if (i_14 == 1) | |
7373 | ... | |
7374 | ||
7375 | Assume that on the first visit to i_14, i_5 has the temporary | |
7376 | range [8, 8] because the second argument to the PHI function is | |
7377 | not yet executable. We derive the range ~[0, 0] for i_14 and the | |
7378 | equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for | |
7379 | the first time, since i_14 is equivalent to the range [8, 8], we | |
7380 | determine that the predicate is always false. | |
7381 | ||
7382 | On the next round of propagation, i_13 is determined to be | |
7383 | VARYING, which causes i_5 to drop down to VARYING. So, another | |
7384 | visit to i_14 is scheduled. In this second visit, we compute the | |
7385 | exact same range and equivalence set for i_14, namely ~[0, 0] and | |
7386 | { i_5 }. But we did not have the previous range for i_5 | |
7387 | registered, so vrp_visit_assignment thinks that the range for | |
7388 | i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' | |
7389 | is not visited again, which stops propagation from visiting | |
7390 | statements in the THEN clause of that if(). | |
7391 | ||
7392 | To properly fix this we would need to keep the previous range | |
7393 | value for the names in the equivalence set. This way we would've | |
7394 | discovered that from one visit to the other i_5 changed from | |
7395 | range [8, 8] to VR_VARYING. | |
7396 | ||
7397 | However, fixing this apparent limitation may not be worth the | |
7398 | additional checking. Testing on several code bases (GCC, DLV, | |
7399 | MICO, TRAMP3D and SPEC2000) showed that doing this results in | |
7400 | 4 more predicates folded in SPEC. */ | |
c3783c3b | 7401 | sop = false; |
ced5bc56 | 7402 | |
75a70cf9 | 7403 | val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), |
7404 | gimple_cond_lhs (stmt), | |
7405 | gimple_cond_rhs (stmt), | |
e0ad89bd | 7406 | false, &sop, NULL); |
88dbf20f | 7407 | if (val) |
c3783c3b | 7408 | { |
7409 | if (!sop) | |
75a70cf9 | 7410 | *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); |
c3783c3b | 7411 | else |
7412 | { | |
7413 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7414 | fprintf (dump_file, | |
7415 | "\nIgnoring predicate evaluation because " | |
7416 | "it assumes that signed overflow is undefined"); | |
7417 | val = NULL_TREE; | |
7418 | } | |
7419 | } | |
88dbf20f | 7420 | |
7421 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7422 | { | |
7423 | fprintf (dump_file, "\nPredicate evaluates to: "); | |
7424 | if (val == NULL_TREE) | |
7425 | fprintf (dump_file, "DON'T KNOW\n"); | |
7426 | else | |
7427 | print_generic_stmt (dump_file, val, 0); | |
7428 | } | |
7429 | ||
7430 | return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; | |
7431 | } | |
7432 | ||
d31e54f1 | 7433 | /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL |
7434 | that includes the value VAL. The search is restricted to the range | |
75a70cf9 | 7435 | [START_IDX, n - 1] where n is the size of VEC. |
88dbf20f | 7436 | |
d31e54f1 | 7437 | If there is a CASE_LABEL for VAL, its index is placed in IDX and true is |
7438 | returned. | |
7439 | ||
496ffe87 | 7440 | If there is no CASE_LABEL for VAL and there is one that is larger than VAL, |
d31e54f1 | 7441 | it is placed in IDX and false is returned. |
7442 | ||
75a70cf9 | 7443 | If VAL is larger than any CASE_LABEL, n is placed on IDX and false is |
d31e54f1 | 7444 | returned. */ |
b6d7b6c5 | 7445 | |
7446 | static bool | |
75a70cf9 | 7447 | find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx) |
b6d7b6c5 | 7448 | { |
75a70cf9 | 7449 | size_t n = gimple_switch_num_labels (stmt); |
d31e54f1 | 7450 | size_t low, high; |
7451 | ||
7452 | /* Find case label for minimum of the value range or the next one. | |
7453 | At each iteration we are searching in [low, high - 1]. */ | |
b6d7b6c5 | 7454 | |
75a70cf9 | 7455 | for (low = start_idx, high = n; high != low; ) |
b6d7b6c5 | 7456 | { |
7457 | tree t; | |
7458 | int cmp; | |
75a70cf9 | 7459 | /* Note that i != high, so we never ask for n. */ |
d31e54f1 | 7460 | size_t i = (high + low) / 2; |
75a70cf9 | 7461 | t = gimple_switch_label (stmt, i); |
b6d7b6c5 | 7462 | |
7463 | /* Cache the result of comparing CASE_LOW and val. */ | |
7464 | cmp = tree_int_cst_compare (CASE_LOW (t), val); | |
7465 | ||
d31e54f1 | 7466 | if (cmp == 0) |
7467 | { | |
7468 | /* Ranges cannot be empty. */ | |
7469 | *idx = i; | |
7470 | return true; | |
7471 | } | |
7472 | else if (cmp > 0) | |
b6d7b6c5 | 7473 | high = i; |
7474 | else | |
d31e54f1 | 7475 | { |
7476 | low = i + 1; | |
7477 | if (CASE_HIGH (t) != NULL | |
7478 | && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) | |
b6d7b6c5 | 7479 | { |
7480 | *idx = i; | |
7481 | return true; | |
7482 | } | |
7483 | } | |
7484 | } | |
7485 | ||
d31e54f1 | 7486 | *idx = high; |
b6d7b6c5 | 7487 | return false; |
7488 | } | |
7489 | ||
d31e54f1 | 7490 | /* Searches the case label vector VEC for the range of CASE_LABELs that is used |
7491 | for values between MIN and MAX. The first index is placed in MIN_IDX. The | |
7492 | last index is placed in MAX_IDX. If the range of CASE_LABELs is empty | |
7493 | then MAX_IDX < MIN_IDX. | |
7494 | Returns true if the default label is not needed. */ | |
7495 | ||
7496 | static bool | |
75a70cf9 | 7497 | find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx, |
7498 | size_t *max_idx) | |
d31e54f1 | 7499 | { |
7500 | size_t i, j; | |
75a70cf9 | 7501 | bool min_take_default = !find_case_label_index (stmt, 1, min, &i); |
7502 | bool max_take_default = !find_case_label_index (stmt, i, max, &j); | |
d31e54f1 | 7503 | |
7504 | if (i == j | |
7505 | && min_take_default | |
7506 | && max_take_default) | |
7507 | { | |
48e1416a | 7508 | /* Only the default case label reached. |
d31e54f1 | 7509 | Return an empty range. */ |
7510 | *min_idx = 1; | |
7511 | *max_idx = 0; | |
7512 | return false; | |
7513 | } | |
7514 | else | |
7515 | { | |
7516 | bool take_default = min_take_default || max_take_default; | |
7517 | tree low, high; | |
7518 | size_t k; | |
7519 | ||
7520 | if (max_take_default) | |
7521 | j--; | |
7522 | ||
7523 | /* If the case label range is continuous, we do not need | |
7524 | the default case label. Verify that. */ | |
75a70cf9 | 7525 | high = CASE_LOW (gimple_switch_label (stmt, i)); |
7526 | if (CASE_HIGH (gimple_switch_label (stmt, i))) | |
7527 | high = CASE_HIGH (gimple_switch_label (stmt, i)); | |
d31e54f1 | 7528 | for (k = i + 1; k <= j; ++k) |
7529 | { | |
75a70cf9 | 7530 | low = CASE_LOW (gimple_switch_label (stmt, k)); |
317e2a67 | 7531 | if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) |
d31e54f1 | 7532 | { |
7533 | take_default = true; | |
7534 | break; | |
7535 | } | |
7536 | high = low; | |
75a70cf9 | 7537 | if (CASE_HIGH (gimple_switch_label (stmt, k))) |
7538 | high = CASE_HIGH (gimple_switch_label (stmt, k)); | |
d31e54f1 | 7539 | } |
7540 | ||
7541 | *min_idx = i; | |
7542 | *max_idx = j; | |
7543 | return !take_default; | |
7544 | } | |
7545 | } | |
7546 | ||
98a8539f | 7547 | /* Searches the case label vector VEC for the ranges of CASE_LABELs that are |
7548 | used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and | |
7549 | MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. | |
7550 | Returns true if the default label is not needed. */ | |
7551 | ||
7552 | static bool | |
7553 | find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1, | |
7554 | size_t *max_idx1, size_t *min_idx2, | |
7555 | size_t *max_idx2) | |
7556 | { | |
7557 | size_t i, j, k, l; | |
7558 | unsigned int n = gimple_switch_num_labels (stmt); | |
7559 | bool take_default; | |
7560 | tree case_low, case_high; | |
7561 | tree min = vr->min, max = vr->max; | |
7562 | ||
7563 | gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE); | |
7564 | ||
7565 | take_default = !find_case_label_range (stmt, min, max, &i, &j); | |
7566 | ||
7567 | /* Set second range to emtpy. */ | |
7568 | *min_idx2 = 1; | |
7569 | *max_idx2 = 0; | |
7570 | ||
7571 | if (vr->type == VR_RANGE) | |
7572 | { | |
7573 | *min_idx1 = i; | |
7574 | *max_idx1 = j; | |
7575 | return !take_default; | |
7576 | } | |
7577 | ||
7578 | /* Set first range to all case labels. */ | |
7579 | *min_idx1 = 1; | |
7580 | *max_idx1 = n - 1; | |
7581 | ||
7582 | if (i > j) | |
7583 | return false; | |
7584 | ||
7585 | /* Make sure all the values of case labels [i , j] are contained in | |
7586 | range [MIN, MAX]. */ | |
7587 | case_low = CASE_LOW (gimple_switch_label (stmt, i)); | |
7588 | case_high = CASE_HIGH (gimple_switch_label (stmt, j)); | |
7589 | if (tree_int_cst_compare (case_low, min) < 0) | |
7590 | i += 1; | |
7591 | if (case_high != NULL_TREE | |
7592 | && tree_int_cst_compare (max, case_high) < 0) | |
7593 | j -= 1; | |
7594 | ||
7595 | if (i > j) | |
7596 | return false; | |
7597 | ||
7598 | /* If the range spans case labels [i, j], the corresponding anti-range spans | |
7599 | the labels [1, i - 1] and [j + 1, n - 1]. */ | |
7600 | k = j + 1; | |
7601 | l = n - 1; | |
7602 | if (k > l) | |
7603 | { | |
7604 | k = 1; | |
7605 | l = 0; | |
7606 | } | |
7607 | ||
7608 | j = i - 1; | |
7609 | i = 1; | |
7610 | if (i > j) | |
7611 | { | |
7612 | i = k; | |
7613 | j = l; | |
7614 | k = 1; | |
7615 | l = 0; | |
7616 | } | |
7617 | ||
7618 | *min_idx1 = i; | |
7619 | *max_idx1 = j; | |
7620 | *min_idx2 = k; | |
7621 | *max_idx2 = l; | |
7622 | return false; | |
7623 | } | |
7624 | ||
b6d7b6c5 | 7625 | /* Visit switch statement STMT. If we can determine which edge |
7626 | will be taken out of STMT's basic block, record it in | |
7627 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
7628 | SSA_PROP_VARYING. */ | |
7629 | ||
7630 | static enum ssa_prop_result | |
75a70cf9 | 7631 | vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p) |
b6d7b6c5 | 7632 | { |
7633 | tree op, val; | |
7634 | value_range_t *vr; | |
98a8539f | 7635 | size_t i = 0, j = 0, k, l; |
d31e54f1 | 7636 | bool take_default; |
b6d7b6c5 | 7637 | |
7638 | *taken_edge_p = NULL; | |
75a70cf9 | 7639 | op = gimple_switch_index (stmt); |
b6d7b6c5 | 7640 | if (TREE_CODE (op) != SSA_NAME) |
7641 | return SSA_PROP_VARYING; | |
7642 | ||
7643 | vr = get_value_range (op); | |
7644 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7645 | { | |
7646 | fprintf (dump_file, "\nVisiting switch expression with operand "); | |
7647 | print_generic_expr (dump_file, op, 0); | |
7648 | fprintf (dump_file, " with known range "); | |
7649 | dump_value_range (dump_file, vr); | |
7650 | fprintf (dump_file, "\n"); | |
7651 | } | |
7652 | ||
98a8539f | 7653 | if ((vr->type != VR_RANGE |
7654 | && vr->type != VR_ANTI_RANGE) | |
b6d7b6c5 | 7655 | || symbolic_range_p (vr)) |
7656 | return SSA_PROP_VARYING; | |
7657 | ||
7658 | /* Find the single edge that is taken from the switch expression. */ | |
98a8539f | 7659 | take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); |
b6d7b6c5 | 7660 | |
d31e54f1 | 7661 | /* Check if the range spans no CASE_LABEL. If so, we only reach the default |
7662 | label */ | |
b6d7b6c5 | 7663 | if (j < i) |
d31e54f1 | 7664 | { |
7665 | gcc_assert (take_default); | |
75a70cf9 | 7666 | val = gimple_switch_default_label (stmt); |
d31e54f1 | 7667 | } |
b6d7b6c5 | 7668 | else |
7669 | { | |
d31e54f1 | 7670 | /* Check if labels with index i to j and maybe the default label |
7671 | are all reaching the same label. */ | |
7672 | ||
75a70cf9 | 7673 | val = gimple_switch_label (stmt, i); |
d31e54f1 | 7674 | if (take_default |
75a70cf9 | 7675 | && CASE_LABEL (gimple_switch_default_label (stmt)) |
7676 | != CASE_LABEL (val)) | |
b6d7b6c5 | 7677 | { |
7678 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7679 | fprintf (dump_file, " not a single destination for this " | |
7680 | "range\n"); | |
7681 | return SSA_PROP_VARYING; | |
7682 | } | |
7683 | for (++i; i <= j; ++i) | |
7684 | { | |
75a70cf9 | 7685 | if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) |
b6d7b6c5 | 7686 | { |
7687 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7688 | fprintf (dump_file, " not a single destination for this " | |
7689 | "range\n"); | |
7690 | return SSA_PROP_VARYING; | |
7691 | } | |
7692 | } | |
98a8539f | 7693 | for (; k <= l; ++k) |
7694 | { | |
7695 | if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val)) | |
7696 | { | |
7697 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7698 | fprintf (dump_file, " not a single destination for this " | |
7699 | "range\n"); | |
7700 | return SSA_PROP_VARYING; | |
7701 | } | |
7702 | } | |
b6d7b6c5 | 7703 | } |
7704 | ||
75a70cf9 | 7705 | *taken_edge_p = find_edge (gimple_bb (stmt), |
b6d7b6c5 | 7706 | label_to_block (CASE_LABEL (val))); |
7707 | ||
7708 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7709 | { | |
7710 | fprintf (dump_file, " will take edge to "); | |
7711 | print_generic_stmt (dump_file, CASE_LABEL (val), 0); | |
7712 | } | |
7713 | ||
7714 | return SSA_PROP_INTERESTING; | |
7715 | } | |
7716 | ||
7717 | ||
88dbf20f | 7718 | /* Evaluate statement STMT. If the statement produces a useful range, |
7719 | return SSA_PROP_INTERESTING and record the SSA name with the | |
7720 | interesting range into *OUTPUT_P. | |
7721 | ||
7722 | If STMT is a conditional branch and we can determine its truth | |
7723 | value, the taken edge is recorded in *TAKEN_EDGE_P. | |
7724 | ||
7725 | If STMT produces a varying value, return SSA_PROP_VARYING. */ | |
7726 | ||
7727 | static enum ssa_prop_result | |
75a70cf9 | 7728 | vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) |
88dbf20f | 7729 | { |
7730 | tree def; | |
7731 | ssa_op_iter iter; | |
88dbf20f | 7732 | |
7733 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7734 | { | |
7735 | fprintf (dump_file, "\nVisiting statement:\n"); | |
75a70cf9 | 7736 | print_gimple_stmt (dump_file, stmt, 0, dump_flags); |
88dbf20f | 7737 | } |
7738 | ||
2193544e | 7739 | if (!stmt_interesting_for_vrp (stmt)) |
7740 | gcc_assert (stmt_ends_bb_p (stmt)); | |
7741 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) | |
0b7282f1 | 7742 | return vrp_visit_assignment_or_call (stmt, output_p); |
75a70cf9 | 7743 | else if (gimple_code (stmt) == GIMPLE_COND) |
88dbf20f | 7744 | return vrp_visit_cond_stmt (stmt, taken_edge_p); |
75a70cf9 | 7745 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
b6d7b6c5 | 7746 | return vrp_visit_switch_stmt (stmt, taken_edge_p); |
88dbf20f | 7747 | |
7748 | /* All other statements produce nothing of interest for VRP, so mark | |
7749 | their outputs varying and prevent further simulation. */ | |
7750 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) | |
e7d43f99 | 7751 | set_value_range_to_varying (get_value_range (def)); |
88dbf20f | 7752 | |
7753 | return SSA_PROP_VARYING; | |
7754 | } | |
7755 | ||
9c0a48ce | 7756 | /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and |
7757 | { VR1TYPE, VR0MIN, VR0MAX } and store the result | |
7758 | in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | |
7759 | possible such range. The resulting range is not canonicalized. */ | |
7760 | ||
7761 | static void | |
7762 | union_ranges (enum value_range_type *vr0type, | |
7763 | tree *vr0min, tree *vr0max, | |
7764 | enum value_range_type vr1type, | |
7765 | tree vr1min, tree vr1max) | |
7766 | { | |
7767 | bool mineq = operand_equal_p (*vr0min, vr1min, 0); | |
7768 | bool maxeq = operand_equal_p (*vr0max, vr1max, 0); | |
7769 | ||
7770 | /* [] is vr0, () is vr1 in the following classification comments. */ | |
7771 | if (mineq && maxeq) | |
7772 | { | |
7773 | /* [( )] */ | |
7774 | if (*vr0type == vr1type) | |
7775 | /* Nothing to do for equal ranges. */ | |
7776 | ; | |
7777 | else if ((*vr0type == VR_RANGE | |
7778 | && vr1type == VR_ANTI_RANGE) | |
7779 | || (*vr0type == VR_ANTI_RANGE | |
7780 | && vr1type == VR_RANGE)) | |
7781 | { | |
7782 | /* For anti-range with range union the result is varying. */ | |
7783 | goto give_up; | |
7784 | } | |
7785 | else | |
7786 | gcc_unreachable (); | |
7787 | } | |
7788 | else if (operand_less_p (*vr0max, vr1min) == 1 | |
7789 | || operand_less_p (vr1max, *vr0min) == 1) | |
7790 | { | |
7791 | /* [ ] ( ) or ( ) [ ] | |
7792 | If the ranges have an empty intersection, result of the union | |
7793 | operation is the anti-range or if both are anti-ranges | |
7794 | it covers all. */ | |
7795 | if (*vr0type == VR_ANTI_RANGE | |
7796 | && vr1type == VR_ANTI_RANGE) | |
7797 | goto give_up; | |
7798 | else if (*vr0type == VR_ANTI_RANGE | |
7799 | && vr1type == VR_RANGE) | |
7800 | ; | |
7801 | else if (*vr0type == VR_RANGE | |
7802 | && vr1type == VR_ANTI_RANGE) | |
7803 | { | |
7804 | *vr0type = vr1type; | |
7805 | *vr0min = vr1min; | |
7806 | *vr0max = vr1max; | |
7807 | } | |
7808 | else if (*vr0type == VR_RANGE | |
7809 | && vr1type == VR_RANGE) | |
7810 | { | |
7811 | /* The result is the convex hull of both ranges. */ | |
7812 | if (operand_less_p (*vr0max, vr1min) == 1) | |
7813 | { | |
7814 | /* If the result can be an anti-range, create one. */ | |
7815 | if (TREE_CODE (*vr0max) == INTEGER_CST | |
7816 | && TREE_CODE (vr1min) == INTEGER_CST | |
7817 | && vrp_val_is_min (*vr0min) | |
7818 | && vrp_val_is_max (vr1max)) | |
7819 | { | |
7820 | tree min = int_const_binop (PLUS_EXPR, | |
ddb1be65 | 7821 | *vr0max, |
e913b5cd | 7822 | build_int_cst (TREE_TYPE (*vr0max), 1)); |
9c0a48ce | 7823 | tree max = int_const_binop (MINUS_EXPR, |
ddb1be65 | 7824 | vr1min, |
e913b5cd | 7825 | build_int_cst (TREE_TYPE (vr1min), 1)); |
9c0a48ce | 7826 | if (!operand_less_p (max, min)) |
7827 | { | |
7828 | *vr0type = VR_ANTI_RANGE; | |
7829 | *vr0min = min; | |
7830 | *vr0max = max; | |
7831 | } | |
7832 | else | |
7833 | *vr0max = vr1max; | |
7834 | } | |
7835 | else | |
7836 | *vr0max = vr1max; | |
7837 | } | |
7838 | else | |
7839 | { | |
7840 | /* If the result can be an anti-range, create one. */ | |
7841 | if (TREE_CODE (vr1max) == INTEGER_CST | |
7842 | && TREE_CODE (*vr0min) == INTEGER_CST | |
7843 | && vrp_val_is_min (vr1min) | |
7844 | && vrp_val_is_max (*vr0max)) | |
7845 | { | |
7846 | tree min = int_const_binop (PLUS_EXPR, | |
e913b5cd | 7847 | vr1max, |
7848 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
9c0a48ce | 7849 | tree max = int_const_binop (MINUS_EXPR, |
e913b5cd | 7850 | *vr0min, |
7851 | build_int_cst (TREE_TYPE (*vr0min), 1)); | |
9c0a48ce | 7852 | if (!operand_less_p (max, min)) |
7853 | { | |
7854 | *vr0type = VR_ANTI_RANGE; | |
7855 | *vr0min = min; | |
7856 | *vr0max = max; | |
7857 | } | |
7858 | else | |
7859 | *vr0min = vr1min; | |
7860 | } | |
7861 | else | |
7862 | *vr0min = vr1min; | |
7863 | } | |
7864 | } | |
7865 | else | |
7866 | gcc_unreachable (); | |
7867 | } | |
7868 | else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) | |
7869 | && (mineq || operand_less_p (*vr0min, vr1min) == 1)) | |
7870 | { | |
7871 | /* [ ( ) ] or [( ) ] or [ ( )] */ | |
7872 | if (*vr0type == VR_RANGE | |
7873 | && vr1type == VR_RANGE) | |
7874 | ; | |
7875 | else if (*vr0type == VR_ANTI_RANGE | |
7876 | && vr1type == VR_ANTI_RANGE) | |
7877 | { | |
7878 | *vr0type = vr1type; | |
7879 | *vr0min = vr1min; | |
7880 | *vr0max = vr1max; | |
7881 | } | |
7882 | else if (*vr0type == VR_ANTI_RANGE | |
7883 | && vr1type == VR_RANGE) | |
7884 | { | |
7885 | /* Arbitrarily choose the right or left gap. */ | |
7886 | if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) | |
ddb1be65 | 7887 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, |
e913b5cd | 7888 | build_int_cst (TREE_TYPE (vr1min), 1)); |
9c0a48ce | 7889 | else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) |
e913b5cd | 7890 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, |
7891 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
9c0a48ce | 7892 | else |
7893 | goto give_up; | |
7894 | } | |
7895 | else if (*vr0type == VR_RANGE | |
7896 | && vr1type == VR_ANTI_RANGE) | |
7897 | /* The result covers everything. */ | |
7898 | goto give_up; | |
7899 | else | |
7900 | gcc_unreachable (); | |
7901 | } | |
7902 | else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) | |
7903 | && (mineq || operand_less_p (vr1min, *vr0min) == 1)) | |
7904 | { | |
7905 | /* ( [ ] ) or ([ ] ) or ( [ ]) */ | |
7906 | if (*vr0type == VR_RANGE | |
7907 | && vr1type == VR_RANGE) | |
7908 | { | |
7909 | *vr0type = vr1type; | |
7910 | *vr0min = vr1min; | |
7911 | *vr0max = vr1max; | |
7912 | } | |
7913 | else if (*vr0type == VR_ANTI_RANGE | |
7914 | && vr1type == VR_ANTI_RANGE) | |
7915 | ; | |
7916 | else if (*vr0type == VR_RANGE | |
7917 | && vr1type == VR_ANTI_RANGE) | |
7918 | { | |
7919 | *vr0type = VR_ANTI_RANGE; | |
7920 | if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) | |
7921 | { | |
e913b5cd | 7922 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, |
7923 | build_int_cst (TREE_TYPE (*vr0min), 1)); | |
9c0a48ce | 7924 | *vr0min = vr1min; |
7925 | } | |
7926 | else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) | |
7927 | { | |
e913b5cd | 7928 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, |
7929 | build_int_cst (TREE_TYPE (*vr0max), 1)); | |
9c0a48ce | 7930 | *vr0max = vr1max; |
7931 | } | |
7932 | else | |
7933 | goto give_up; | |
7934 | } | |
7935 | else if (*vr0type == VR_ANTI_RANGE | |
7936 | && vr1type == VR_RANGE) | |
7937 | /* The result covers everything. */ | |
7938 | goto give_up; | |
7939 | else | |
7940 | gcc_unreachable (); | |
7941 | } | |
7942 | else if ((operand_less_p (vr1min, *vr0max) == 1 | |
7943 | || operand_equal_p (vr1min, *vr0max, 0)) | |
7fb9d91f | 7944 | && operand_less_p (*vr0min, vr1min) == 1 |
7945 | && operand_less_p (*vr0max, vr1max) == 1) | |
9c0a48ce | 7946 | { |
7947 | /* [ ( ] ) or [ ]( ) */ | |
7948 | if (*vr0type == VR_RANGE | |
7949 | && vr1type == VR_RANGE) | |
7950 | *vr0max = vr1max; | |
7951 | else if (*vr0type == VR_ANTI_RANGE | |
7952 | && vr1type == VR_ANTI_RANGE) | |
7953 | *vr0min = vr1min; | |
7954 | else if (*vr0type == VR_ANTI_RANGE | |
7955 | && vr1type == VR_RANGE) | |
7956 | { | |
7957 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
ddb1be65 | 7958 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, |
e913b5cd | 7959 | build_int_cst (TREE_TYPE (vr1min), 1)); |
9c0a48ce | 7960 | else |
7961 | goto give_up; | |
7962 | } | |
7963 | else if (*vr0type == VR_RANGE | |
7964 | && vr1type == VR_ANTI_RANGE) | |
7965 | { | |
7966 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
7967 | { | |
7968 | *vr0type = vr1type; | |
e913b5cd | 7969 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, |
7970 | build_int_cst (TREE_TYPE (*vr0max), 1)); | |
9c0a48ce | 7971 | *vr0max = vr1max; |
7972 | } | |
7973 | else | |
7974 | goto give_up; | |
7975 | } | |
7976 | else | |
7977 | gcc_unreachable (); | |
7978 | } | |
7979 | else if ((operand_less_p (*vr0min, vr1max) == 1 | |
7980 | || operand_equal_p (*vr0min, vr1max, 0)) | |
7fb9d91f | 7981 | && operand_less_p (vr1min, *vr0min) == 1 |
7982 | && operand_less_p (vr1max, *vr0max) == 1) | |
9c0a48ce | 7983 | { |
7984 | /* ( [ ) ] or ( )[ ] */ | |
7985 | if (*vr0type == VR_RANGE | |
7986 | && vr1type == VR_RANGE) | |
7987 | *vr0min = vr1min; | |
7988 | else if (*vr0type == VR_ANTI_RANGE | |
7989 | && vr1type == VR_ANTI_RANGE) | |
7990 | *vr0max = vr1max; | |
7991 | else if (*vr0type == VR_ANTI_RANGE | |
7992 | && vr1type == VR_RANGE) | |
7993 | { | |
7994 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
e913b5cd | 7995 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, |
7996 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
9c0a48ce | 7997 | else |
7998 | goto give_up; | |
7999 | } | |
8000 | else if (*vr0type == VR_RANGE | |
8001 | && vr1type == VR_ANTI_RANGE) | |
8002 | { | |
8003 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
8004 | { | |
8005 | *vr0type = vr1type; | |
8006 | *vr0min = vr1min; | |
e913b5cd | 8007 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, |
8008 | build_int_cst (TREE_TYPE (*vr0min), 1)); | |
9c0a48ce | 8009 | } |
8010 | else | |
8011 | goto give_up; | |
8012 | } | |
8013 | else | |
8014 | gcc_unreachable (); | |
8015 | } | |
8016 | else | |
8017 | goto give_up; | |
8018 | ||
8019 | return; | |
8020 | ||
8021 | give_up: | |
8022 | *vr0type = VR_VARYING; | |
8023 | *vr0min = NULL_TREE; | |
8024 | *vr0max = NULL_TREE; | |
8025 | } | |
8026 | ||
04dbf3c4 | 8027 | /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and |
8028 | { VR1TYPE, VR0MIN, VR0MAX } and store the result | |
8029 | in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | |
8030 | possible such range. The resulting range is not canonicalized. */ | |
8031 | ||
8032 | static void | |
8033 | intersect_ranges (enum value_range_type *vr0type, | |
8034 | tree *vr0min, tree *vr0max, | |
8035 | enum value_range_type vr1type, | |
8036 | tree vr1min, tree vr1max) | |
8037 | { | |
a339107e | 8038 | bool mineq = operand_equal_p (*vr0min, vr1min, 0); |
8039 | bool maxeq = operand_equal_p (*vr0max, vr1max, 0); | |
8040 | ||
04dbf3c4 | 8041 | /* [] is vr0, () is vr1 in the following classification comments. */ |
a339107e | 8042 | if (mineq && maxeq) |
8043 | { | |
8044 | /* [( )] */ | |
8045 | if (*vr0type == vr1type) | |
8046 | /* Nothing to do for equal ranges. */ | |
8047 | ; | |
8048 | else if ((*vr0type == VR_RANGE | |
8049 | && vr1type == VR_ANTI_RANGE) | |
8050 | || (*vr0type == VR_ANTI_RANGE | |
8051 | && vr1type == VR_RANGE)) | |
8052 | { | |
8053 | /* For anti-range with range intersection the result is empty. */ | |
8054 | *vr0type = VR_UNDEFINED; | |
8055 | *vr0min = NULL_TREE; | |
8056 | *vr0max = NULL_TREE; | |
8057 | } | |
8058 | else | |
8059 | gcc_unreachable (); | |
8060 | } | |
8061 | else if (operand_less_p (*vr0max, vr1min) == 1 | |
8062 | || operand_less_p (vr1max, *vr0min) == 1) | |
04dbf3c4 | 8063 | { |
8064 | /* [ ] ( ) or ( ) [ ] | |
8065 | If the ranges have an empty intersection, the result of the | |
8066 | intersect operation is the range for intersecting an | |
ac4a8000 | 8067 | anti-range with a range or empty when intersecting two ranges. */ |
04dbf3c4 | 8068 | if (*vr0type == VR_RANGE |
8069 | && vr1type == VR_ANTI_RANGE) | |
8070 | ; | |
8071 | else if (*vr0type == VR_ANTI_RANGE | |
8072 | && vr1type == VR_RANGE) | |
8073 | { | |
8074 | *vr0type = vr1type; | |
8075 | *vr0min = vr1min; | |
8076 | *vr0max = vr1max; | |
8077 | } | |
8078 | else if (*vr0type == VR_RANGE | |
8079 | && vr1type == VR_RANGE) | |
8080 | { | |
8081 | *vr0type = VR_UNDEFINED; | |
8082 | *vr0min = NULL_TREE; | |
8083 | *vr0max = NULL_TREE; | |
8084 | } | |
8085 | else if (*vr0type == VR_ANTI_RANGE | |
8086 | && vr1type == VR_ANTI_RANGE) | |
8087 | { | |
ac4a8000 | 8088 | /* If the anti-ranges are adjacent to each other merge them. */ |
8089 | if (TREE_CODE (*vr0max) == INTEGER_CST | |
8090 | && TREE_CODE (vr1min) == INTEGER_CST | |
8091 | && operand_less_p (*vr0max, vr1min) == 1 | |
8092 | && integer_onep (int_const_binop (MINUS_EXPR, | |
8093 | vr1min, *vr0max))) | |
8094 | *vr0max = vr1max; | |
8095 | else if (TREE_CODE (vr1max) == INTEGER_CST | |
8096 | && TREE_CODE (*vr0min) == INTEGER_CST | |
8097 | && operand_less_p (vr1max, *vr0min) == 1 | |
8098 | && integer_onep (int_const_binop (MINUS_EXPR, | |
8099 | *vr0min, vr1max))) | |
8100 | *vr0min = vr1min; | |
8101 | /* Else arbitrarily take VR0. */ | |
04dbf3c4 | 8102 | } |
8103 | } | |
a339107e | 8104 | else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) |
8105 | && (mineq || operand_less_p (*vr0min, vr1min) == 1)) | |
04dbf3c4 | 8106 | { |
a339107e | 8107 | /* [ ( ) ] or [( ) ] or [ ( )] */ |
8108 | if (*vr0type == VR_RANGE | |
8109 | && vr1type == VR_RANGE) | |
04dbf3c4 | 8110 | { |
a339107e | 8111 | /* If both are ranges the result is the inner one. */ |
04dbf3c4 | 8112 | *vr0type = vr1type; |
8113 | *vr0min = vr1min; | |
8114 | *vr0max = vr1max; | |
8115 | } | |
a339107e | 8116 | else if (*vr0type == VR_RANGE |
8117 | && vr1type == VR_ANTI_RANGE) | |
8118 | { | |
8119 | /* Choose the right gap if the left one is empty. */ | |
8120 | if (mineq) | |
8121 | { | |
8122 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
e913b5cd | 8123 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, |
8124 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
a339107e | 8125 | else |
8126 | *vr0min = vr1max; | |
8127 | } | |
8128 | /* Choose the left gap if the right one is empty. */ | |
8129 | else if (maxeq) | |
8130 | { | |
8131 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
8132 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, | |
e913b5cd | 8133 | build_int_cst (TREE_TYPE (vr1min), 1)); |
a339107e | 8134 | else |
8135 | *vr0max = vr1min; | |
8136 | } | |
8137 | /* Choose the anti-range if the range is effectively varying. */ | |
8138 | else if (vrp_val_is_min (*vr0min) | |
8139 | && vrp_val_is_max (*vr0max)) | |
8140 | { | |
8141 | *vr0type = vr1type; | |
8142 | *vr0min = vr1min; | |
8143 | *vr0max = vr1max; | |
8144 | } | |
8145 | /* Else choose the range. */ | |
8146 | } | |
04dbf3c4 | 8147 | else if (*vr0type == VR_ANTI_RANGE |
8148 | && vr1type == VR_ANTI_RANGE) | |
8149 | /* If both are anti-ranges the result is the outer one. */ | |
8150 | ; | |
8151 | else if (*vr0type == VR_ANTI_RANGE | |
8152 | && vr1type == VR_RANGE) | |
8153 | { | |
8154 | /* The intersection is empty. */ | |
8155 | *vr0type = VR_UNDEFINED; | |
8156 | *vr0min = NULL_TREE; | |
8157 | *vr0max = NULL_TREE; | |
8158 | } | |
8159 | else | |
8160 | gcc_unreachable (); | |
8161 | } | |
a339107e | 8162 | else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) |
8163 | && (mineq || operand_less_p (vr1min, *vr0min) == 1)) | |
04dbf3c4 | 8164 | { |
a339107e | 8165 | /* ( [ ] ) or ([ ] ) or ( [ ]) */ |
8166 | if (*vr0type == VR_RANGE | |
8167 | && vr1type == VR_RANGE) | |
8168 | /* Choose the inner range. */ | |
04dbf3c4 | 8169 | ; |
a339107e | 8170 | else if (*vr0type == VR_ANTI_RANGE |
8171 | && vr1type == VR_RANGE) | |
8172 | { | |
8173 | /* Choose the right gap if the left is empty. */ | |
8174 | if (mineq) | |
8175 | { | |
8176 | *vr0type = VR_RANGE; | |
8177 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
8178 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, | |
e913b5cd | 8179 | build_int_cst (TREE_TYPE (*vr0max), 1)); |
a339107e | 8180 | else |
8181 | *vr0min = *vr0max; | |
8182 | *vr0max = vr1max; | |
8183 | } | |
8184 | /* Choose the left gap if the right is empty. */ | |
8185 | else if (maxeq) | |
8186 | { | |
8187 | *vr0type = VR_RANGE; | |
8188 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
8189 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | |
e913b5cd | 8190 | build_int_cst (TREE_TYPE (*vr0min), 1)); |
a339107e | 8191 | else |
8192 | *vr0max = *vr0min; | |
8193 | *vr0min = vr1min; | |
8194 | } | |
8195 | /* Choose the anti-range if the range is effectively varying. */ | |
8196 | else if (vrp_val_is_min (vr1min) | |
8197 | && vrp_val_is_max (vr1max)) | |
8198 | ; | |
8199 | /* Else choose the range. */ | |
8200 | else | |
8201 | { | |
8202 | *vr0type = vr1type; | |
8203 | *vr0min = vr1min; | |
8204 | *vr0max = vr1max; | |
8205 | } | |
8206 | } | |
04dbf3c4 | 8207 | else if (*vr0type == VR_ANTI_RANGE |
8208 | && vr1type == VR_ANTI_RANGE) | |
8209 | { | |
8210 | /* If both are anti-ranges the result is the outer one. */ | |
8211 | *vr0type = vr1type; | |
8212 | *vr0min = vr1min; | |
8213 | *vr0max = vr1max; | |
8214 | } | |
8215 | else if (vr1type == VR_ANTI_RANGE | |
8216 | && *vr0type == VR_RANGE) | |
8217 | { | |
8218 | /* The intersection is empty. */ | |
8219 | *vr0type = VR_UNDEFINED; | |
8220 | *vr0min = NULL_TREE; | |
8221 | *vr0max = NULL_TREE; | |
8222 | } | |
8223 | else | |
8224 | gcc_unreachable (); | |
8225 | } | |
8226 | else if ((operand_less_p (vr1min, *vr0max) == 1 | |
8227 | || operand_equal_p (vr1min, *vr0max, 0)) | |
a339107e | 8228 | && operand_less_p (*vr0min, vr1min) == 1) |
04dbf3c4 | 8229 | { |
a339107e | 8230 | /* [ ( ] ) or [ ]( ) */ |
04dbf3c4 | 8231 | if (*vr0type == VR_ANTI_RANGE |
8232 | && vr1type == VR_ANTI_RANGE) | |
8233 | *vr0max = vr1max; | |
8234 | else if (*vr0type == VR_RANGE | |
8235 | && vr1type == VR_RANGE) | |
8236 | *vr0min = vr1min; | |
8237 | else if (*vr0type == VR_RANGE | |
8238 | && vr1type == VR_ANTI_RANGE) | |
8239 | { | |
8240 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
8241 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, | |
e913b5cd | 8242 | build_int_cst (TREE_TYPE (vr1min), 1)); |
04dbf3c4 | 8243 | else |
8244 | *vr0max = vr1min; | |
8245 | } | |
8246 | else if (*vr0type == VR_ANTI_RANGE | |
8247 | && vr1type == VR_RANGE) | |
8248 | { | |
8249 | *vr0type = VR_RANGE; | |
8250 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
8251 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, | |
e913b5cd | 8252 | build_int_cst (TREE_TYPE (*vr0max), 1)); |
04dbf3c4 | 8253 | else |
8254 | *vr0min = *vr0max; | |
8255 | *vr0max = vr1max; | |
8256 | } | |
8257 | else | |
8258 | gcc_unreachable (); | |
8259 | } | |
8260 | else if ((operand_less_p (*vr0min, vr1max) == 1 | |
8261 | || operand_equal_p (*vr0min, vr1max, 0)) | |
a339107e | 8262 | && operand_less_p (vr1min, *vr0min) == 1) |
04dbf3c4 | 8263 | { |
a339107e | 8264 | /* ( [ ) ] or ( )[ ] */ |
04dbf3c4 | 8265 | if (*vr0type == VR_ANTI_RANGE |
8266 | && vr1type == VR_ANTI_RANGE) | |
8267 | *vr0min = vr1min; | |
8268 | else if (*vr0type == VR_RANGE | |
8269 | && vr1type == VR_RANGE) | |
8270 | *vr0max = vr1max; | |
8271 | else if (*vr0type == VR_RANGE | |
8272 | && vr1type == VR_ANTI_RANGE) | |
8273 | { | |
8274 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
8275 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, | |
e913b5cd | 8276 | build_int_cst (TREE_TYPE (vr1max), 1)); |
04dbf3c4 | 8277 | else |
8278 | *vr0min = vr1max; | |
8279 | } | |
8280 | else if (*vr0type == VR_ANTI_RANGE | |
8281 | && vr1type == VR_RANGE) | |
8282 | { | |
8283 | *vr0type = VR_RANGE; | |
8284 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
8285 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | |
e913b5cd | 8286 | build_int_cst (TREE_TYPE (*vr0min), 1)); |
04dbf3c4 | 8287 | else |
8288 | *vr0max = *vr0min; | |
8289 | *vr0min = vr1min; | |
8290 | } | |
8291 | else | |
8292 | gcc_unreachable (); | |
8293 | } | |
8294 | ||
8295 | /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as | |
8296 | result for the intersection. That's always a conservative | |
8297 | correct estimate. */ | |
8298 | ||
8299 | return; | |
8300 | } | |
8301 | ||
8302 | ||
8303 | /* Intersect the two value-ranges *VR0 and *VR1 and store the result | |
8304 | in *VR0. This may not be the smallest possible such range. */ | |
8305 | ||
8306 | static void | |
a339107e | 8307 | vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1) |
04dbf3c4 | 8308 | { |
8309 | value_range_t saved; | |
8310 | ||
8311 | /* If either range is VR_VARYING the other one wins. */ | |
8312 | if (vr1->type == VR_VARYING) | |
8313 | return; | |
8314 | if (vr0->type == VR_VARYING) | |
8315 | { | |
8316 | copy_value_range (vr0, vr1); | |
8317 | return; | |
8318 | } | |
8319 | ||
8320 | /* When either range is VR_UNDEFINED the resulting range is | |
8321 | VR_UNDEFINED, too. */ | |
8322 | if (vr0->type == VR_UNDEFINED) | |
8323 | return; | |
8324 | if (vr1->type == VR_UNDEFINED) | |
8325 | { | |
8326 | set_value_range_to_undefined (vr0); | |
8327 | return; | |
8328 | } | |
8329 | ||
8330 | /* Save the original vr0 so we can return it as conservative intersection | |
8331 | result when our worker turns things to varying. */ | |
8332 | saved = *vr0; | |
8333 | intersect_ranges (&vr0->type, &vr0->min, &vr0->max, | |
8334 | vr1->type, vr1->min, vr1->max); | |
8335 | /* Make sure to canonicalize the result though as the inversion of a | |
8336 | VR_RANGE can still be a VR_RANGE. */ | |
8337 | set_and_canonicalize_value_range (vr0, vr0->type, | |
8338 | vr0->min, vr0->max, vr0->equiv); | |
8339 | /* If that failed, use the saved original VR0. */ | |
8340 | if (vr0->type == VR_VARYING) | |
8341 | { | |
8342 | *vr0 = saved; | |
8343 | return; | |
8344 | } | |
8345 | /* If the result is VR_UNDEFINED there is no need to mess with | |
8346 | the equivalencies. */ | |
8347 | if (vr0->type == VR_UNDEFINED) | |
8348 | return; | |
8349 | ||
8350 | /* The resulting set of equivalences for range intersection is the union of | |
8351 | the two sets. */ | |
8352 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | |
8353 | bitmap_ior_into (vr0->equiv, vr1->equiv); | |
8354 | else if (vr1->equiv && !vr0->equiv) | |
8355 | bitmap_copy (vr0->equiv, vr1->equiv); | |
8356 | } | |
88dbf20f | 8357 | |
a339107e | 8358 | static void |
8359 | vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1) | |
8360 | { | |
8361 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8362 | { | |
8363 | fprintf (dump_file, "Intersecting\n "); | |
8364 | dump_value_range (dump_file, vr0); | |
8365 | fprintf (dump_file, "\nand\n "); | |
8366 | dump_value_range (dump_file, vr1); | |
8367 | fprintf (dump_file, "\n"); | |
8368 | } | |
8369 | vrp_intersect_ranges_1 (vr0, vr1); | |
8370 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8371 | { | |
8372 | fprintf (dump_file, "to\n "); | |
8373 | dump_value_range (dump_file, vr0); | |
8374 | fprintf (dump_file, "\n"); | |
8375 | } | |
8376 | } | |
8377 | ||
88dbf20f | 8378 | /* Meet operation for value ranges. Given two value ranges VR0 and |
ab37e731 | 8379 | VR1, store in VR0 a range that contains both VR0 and VR1. This |
8380 | may not be the smallest possible such range. */ | |
88dbf20f | 8381 | |
8382 | static void | |
9c0a48ce | 8383 | vrp_meet_1 (value_range_t *vr0, value_range_t *vr1) |
88dbf20f | 8384 | { |
9c0a48ce | 8385 | value_range_t saved; |
8386 | ||
88dbf20f | 8387 | if (vr0->type == VR_UNDEFINED) |
8388 | { | |
d590d541 | 8389 | set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv); |
88dbf20f | 8390 | return; |
8391 | } | |
8392 | ||
8393 | if (vr1->type == VR_UNDEFINED) | |
8394 | { | |
d590d541 | 8395 | /* VR0 already has the resulting range. */ |
88dbf20f | 8396 | return; |
8397 | } | |
8398 | ||
8399 | if (vr0->type == VR_VARYING) | |
8400 | { | |
8401 | /* Nothing to do. VR0 already has the resulting range. */ | |
8402 | return; | |
8403 | } | |
8404 | ||
8405 | if (vr1->type == VR_VARYING) | |
88dbf20f | 8406 | { |
e7d43f99 | 8407 | set_value_range_to_varying (vr0); |
88dbf20f | 8408 | return; |
8409 | } | |
8410 | ||
9c0a48ce | 8411 | saved = *vr0; |
8412 | union_ranges (&vr0->type, &vr0->min, &vr0->max, | |
8413 | vr1->type, vr1->min, vr1->max); | |
8414 | if (vr0->type == VR_VARYING) | |
88dbf20f | 8415 | { |
9c0a48ce | 8416 | /* Failed to find an efficient meet. Before giving up and setting |
8417 | the result to VARYING, see if we can at least derive a useful | |
8418 | anti-range. FIXME, all this nonsense about distinguishing | |
8419 | anti-ranges from ranges is necessary because of the odd | |
8420 | semantics of range_includes_zero_p and friends. */ | |
7d48cd66 | 8421 | if (((saved.type == VR_RANGE |
8422 | && range_includes_zero_p (saved.min, saved.max) == 0) | |
8423 | || (saved.type == VR_ANTI_RANGE | |
8424 | && range_includes_zero_p (saved.min, saved.max) == 1)) | |
8425 | && ((vr1->type == VR_RANGE | |
8426 | && range_includes_zero_p (vr1->min, vr1->max) == 0) | |
8427 | || (vr1->type == VR_ANTI_RANGE | |
8428 | && range_includes_zero_p (vr1->min, vr1->max) == 1))) | |
9c0a48ce | 8429 | { |
8430 | set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min)); | |
8431 | ||
8432 | /* Since this meet operation did not result from the meeting of | |
8433 | two equivalent names, VR0 cannot have any equivalences. */ | |
8434 | if (vr0->equiv) | |
8435 | bitmap_clear (vr0->equiv); | |
8436 | return; | |
72940ce4 | 8437 | } |
eea12c72 | 8438 | |
9c0a48ce | 8439 | set_value_range_to_varying (vr0); |
8440 | return; | |
88dbf20f | 8441 | } |
9c0a48ce | 8442 | set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, |
8443 | vr0->equiv); | |
8444 | if (vr0->type == VR_VARYING) | |
8445 | return; | |
eea12c72 | 8446 | |
72940ce4 | 8447 | /* The resulting set of equivalences is always the intersection of |
9c0a48ce | 8448 | the two sets. */ |
72940ce4 | 8449 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) |
8450 | bitmap_and_into (vr0->equiv, vr1->equiv); | |
8451 | else if (vr0->equiv && !vr1->equiv) | |
8452 | bitmap_clear (vr0->equiv); | |
9c0a48ce | 8453 | } |
72940ce4 | 8454 | |
9c0a48ce | 8455 | static void |
8456 | vrp_meet (value_range_t *vr0, value_range_t *vr1) | |
8457 | { | |
8458 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8459 | { | |
8460 | fprintf (dump_file, "Meeting\n "); | |
8461 | dump_value_range (dump_file, vr0); | |
8462 | fprintf (dump_file, "\nand\n "); | |
8463 | dump_value_range (dump_file, vr1); | |
8464 | fprintf (dump_file, "\n"); | |
8465 | } | |
8466 | vrp_meet_1 (vr0, vr1); | |
8467 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8468 | { | |
8469 | fprintf (dump_file, "to\n "); | |
8470 | dump_value_range (dump_file, vr0); | |
8471 | fprintf (dump_file, "\n"); | |
052f71a4 | 8472 | } |
88dbf20f | 8473 | } |
8474 | ||
eea12c72 | 8475 | |
88dbf20f | 8476 | /* Visit all arguments for PHI node PHI that flow through executable |
8477 | edges. If a valid value range can be derived from all the incoming | |
8478 | value ranges, set a new range for the LHS of PHI. */ | |
8479 | ||
8480 | static enum ssa_prop_result | |
75a70cf9 | 8481 | vrp_visit_phi_node (gimple phi) |
88dbf20f | 8482 | { |
75a70cf9 | 8483 | size_t i; |
88dbf20f | 8484 | tree lhs = PHI_RESULT (phi); |
eea12c72 | 8485 | value_range_t *lhs_vr = get_value_range (lhs); |
748eb1f9 | 8486 | value_range_t vr_result = VR_INITIALIZER; |
a2623f6b | 8487 | bool first = true; |
5c7155ca | 8488 | int edges, old_edges; |
ff486875 | 8489 | struct loop *l; |
eea12c72 | 8490 | |
88dbf20f | 8491 | if (dump_file && (dump_flags & TDF_DETAILS)) |
8492 | { | |
8493 | fprintf (dump_file, "\nVisiting PHI node: "); | |
75a70cf9 | 8494 | print_gimple_stmt (dump_file, phi, 0, dump_flags); |
88dbf20f | 8495 | } |
8496 | ||
5c7155ca | 8497 | edges = 0; |
75a70cf9 | 8498 | for (i = 0; i < gimple_phi_num_args (phi); i++) |
88dbf20f | 8499 | { |
75a70cf9 | 8500 | edge e = gimple_phi_arg_edge (phi, i); |
88dbf20f | 8501 | |
8502 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8503 | { | |
8504 | fprintf (dump_file, | |
0d4c8cda | 8505 | " Argument #%d (%d -> %d %sexecutable)\n", |
75a70cf9 | 8506 | (int) i, e->src->index, e->dest->index, |
88dbf20f | 8507 | (e->flags & EDGE_EXECUTABLE) ? "" : "not "); |
8508 | } | |
8509 | ||
8510 | if (e->flags & EDGE_EXECUTABLE) | |
8511 | { | |
8512 | tree arg = PHI_ARG_DEF (phi, i); | |
eea12c72 | 8513 | value_range_t vr_arg; |
88dbf20f | 8514 | |
5c7155ca | 8515 | ++edges; |
8516 | ||
88dbf20f | 8517 | if (TREE_CODE (arg) == SSA_NAME) |
b9b64cb7 | 8518 | { |
8519 | vr_arg = *(get_value_range (arg)); | |
d590d541 | 8520 | /* Do not allow equivalences or symbolic ranges to leak in from |
8521 | backedges. That creates invalid equivalencies. | |
8522 | See PR53465 and PR54767. */ | |
0d4c8cda | 8523 | if (e->flags & EDGE_DFS_BACK) |
d590d541 | 8524 | { |
0d4c8cda | 8525 | if (vr_arg.type == VR_RANGE |
8526 | || vr_arg.type == VR_ANTI_RANGE) | |
d590d541 | 8527 | { |
0d4c8cda | 8528 | vr_arg.equiv = NULL; |
8529 | if (symbolic_range_p (&vr_arg)) | |
8530 | { | |
8531 | vr_arg.type = VR_VARYING; | |
8532 | vr_arg.min = NULL_TREE; | |
8533 | vr_arg.max = NULL_TREE; | |
8534 | } | |
8535 | } | |
8536 | } | |
8537 | else | |
8538 | { | |
8539 | /* If the non-backedge arguments range is VR_VARYING then | |
8540 | we can still try recording a simple equivalence. */ | |
8541 | if (vr_arg.type == VR_VARYING) | |
8542 | { | |
8543 | vr_arg.type = VR_RANGE; | |
8544 | vr_arg.min = arg; | |
8545 | vr_arg.max = arg; | |
8546 | vr_arg.equiv = NULL; | |
d590d541 | 8547 | } |
8548 | } | |
b9b64cb7 | 8549 | } |
88dbf20f | 8550 | else |
8551 | { | |
3dad27e9 | 8552 | if (TREE_OVERFLOW_P (arg)) |
4a8f88ff | 8553 | arg = drop_tree_overflow (arg); |
b700987e | 8554 | |
88dbf20f | 8555 | vr_arg.type = VR_RANGE; |
8556 | vr_arg.min = arg; | |
8557 | vr_arg.max = arg; | |
eea12c72 | 8558 | vr_arg.equiv = NULL; |
88dbf20f | 8559 | } |
8560 | ||
8561 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8562 | { | |
8563 | fprintf (dump_file, "\t"); | |
8564 | print_generic_expr (dump_file, arg, dump_flags); | |
0d4c8cda | 8565 | fprintf (dump_file, ": "); |
88dbf20f | 8566 | dump_value_range (dump_file, &vr_arg); |
8567 | fprintf (dump_file, "\n"); | |
8568 | } | |
8569 | ||
a2623f6b | 8570 | if (first) |
8571 | copy_value_range (&vr_result, &vr_arg); | |
8572 | else | |
8573 | vrp_meet (&vr_result, &vr_arg); | |
8574 | first = false; | |
88dbf20f | 8575 | |
8576 | if (vr_result.type == VR_VARYING) | |
8577 | break; | |
8578 | } | |
8579 | } | |
8580 | ||
8581 | if (vr_result.type == VR_VARYING) | |
eea12c72 | 8582 | goto varying; |
fb41023e | 8583 | else if (vr_result.type == VR_UNDEFINED) |
8584 | goto update_range; | |
88dbf20f | 8585 | |
5c7155ca | 8586 | old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; |
8587 | vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; | |
8588 | ||
88dbf20f | 8589 | /* To prevent infinite iterations in the algorithm, derive ranges |
8590 | when the new value is slightly bigger or smaller than the | |
5c7155ca | 8591 | previous one. We don't do this if we have seen a new executable |
8592 | edge; this helps us avoid an overflow infinity for conditionals | |
bcaf4fd0 | 8593 | which are not in a loop. If the old value-range was VR_UNDEFINED |
8594 | use the updated range and iterate one more time. */ | |
9b53b827 | 8595 | if (edges > 0 |
1e400367 | 8596 | && gimple_phi_num_args (phi) > 1 |
bcaf4fd0 | 8597 | && edges == old_edges |
8598 | && lhs_vr->type != VR_UNDEFINED) | |
9b53b827 | 8599 | { |
0bf90b7b | 8600 | /* Compare old and new ranges, fall back to varying if the |
8601 | values are not comparable. */ | |
9b53b827 | 8602 | int cmp_min = compare_values (lhs_vr->min, vr_result.min); |
0bf90b7b | 8603 | if (cmp_min == -2) |
8604 | goto varying; | |
9b53b827 | 8605 | int cmp_max = compare_values (lhs_vr->max, vr_result.max); |
0bf90b7b | 8606 | if (cmp_max == -2) |
8607 | goto varying; | |
9b53b827 | 8608 | |
8609 | /* For non VR_RANGE or for pointers fall back to varying if | |
8610 | the range changed. */ | |
8611 | if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE | |
8612 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
8613 | && (cmp_min != 0 || cmp_max != 0)) | |
8614 | goto varying; | |
8615 | ||
b7f05e98 | 8616 | /* If the new minimum is larger than than the previous one |
8617 | retain the old value. If the new minimum value is smaller | |
8618 | than the previous one and not -INF go all the way to -INF + 1. | |
8619 | In the first case, to avoid infinite bouncing between different | |
8620 | minimums, and in the other case to avoid iterating millions of | |
8621 | times to reach -INF. Going to -INF + 1 also lets the following | |
8622 | iteration compute whether there will be any overflow, at the | |
8623 | expense of one additional iteration. */ | |
8624 | if (cmp_min < 0) | |
8625 | vr_result.min = lhs_vr->min; | |
8626 | else if (cmp_min > 0 | |
8627 | && !vrp_val_is_min (vr_result.min)) | |
8628 | vr_result.min | |
8629 | = int_const_binop (PLUS_EXPR, | |
8630 | vrp_val_min (TREE_TYPE (vr_result.min)), | |
8631 | build_int_cst (TREE_TYPE (vr_result.min), 1)); | |
8632 | ||
8633 | /* Similarly for the maximum value. */ | |
8634 | if (cmp_max > 0) | |
8635 | vr_result.max = lhs_vr->max; | |
8636 | else if (cmp_max < 0 | |
8637 | && !vrp_val_is_max (vr_result.max)) | |
8638 | vr_result.max | |
8639 | = int_const_binop (MINUS_EXPR, | |
8640 | vrp_val_max (TREE_TYPE (vr_result.min)), | |
8641 | build_int_cst (TREE_TYPE (vr_result.min), 1)); | |
9b53b827 | 8642 | |
8643 | /* If we dropped either bound to +-INF then if this is a loop | |
8644 | PHI node SCEV may known more about its value-range. */ | |
8645 | if ((cmp_min > 0 || cmp_min < 0 | |
8646 | || cmp_max < 0 || cmp_max > 0) | |
9b53b827 | 8647 | && (l = loop_containing_stmt (phi)) |
8648 | && l->header == gimple_bb (phi)) | |
8649 | adjust_range_with_scev (&vr_result, l, phi, lhs); | |
8650 | ||
8651 | /* If we will end up with a (-INF, +INF) range, set it to | |
8652 | VARYING. Same if the previous max value was invalid for | |
8653 | the type and we end up with vr_result.min > vr_result.max. */ | |
8654 | if ((vrp_val_is_max (vr_result.max) | |
8655 | && vrp_val_is_min (vr_result.min)) | |
8656 | || compare_values (vr_result.min, | |
8657 | vr_result.max) > 0) | |
8658 | goto varying; | |
88dbf20f | 8659 | } |
8660 | ||
8661 | /* If the new range is different than the previous value, keep | |
8662 | iterating. */ | |
fb41023e | 8663 | update_range: |
eea12c72 | 8664 | if (update_value_range (lhs, &vr_result)) |
9300c776 | 8665 | { |
8666 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8667 | { | |
8668 | fprintf (dump_file, "Found new range for "); | |
8669 | print_generic_expr (dump_file, lhs, 0); | |
8670 | fprintf (dump_file, ": "); | |
8671 | dump_value_range (dump_file, &vr_result); | |
0d4c8cda | 8672 | fprintf (dump_file, "\n"); |
9300c776 | 8673 | } |
8674 | ||
8675 | return SSA_PROP_INTERESTING; | |
8676 | } | |
88dbf20f | 8677 | |
8678 | /* Nothing changed, don't add outgoing edges. */ | |
8679 | return SSA_PROP_NOT_INTERESTING; | |
eea12c72 | 8680 | |
8681 | /* No match found. Set the LHS to VARYING. */ | |
8682 | varying: | |
8683 | set_value_range_to_varying (lhs_vr); | |
8684 | return SSA_PROP_VARYING; | |
88dbf20f | 8685 | } |
8686 | ||
e31161b3 | 8687 | /* Simplify boolean operations if the source is known |
8688 | to be already a boolean. */ | |
8689 | static bool | |
8690 | simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
8691 | { | |
8692 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
d6f3306a | 8693 | tree lhs, op0, op1; |
e31161b3 | 8694 | bool need_conversion; |
8695 | ||
eea7f7eb | 8696 | /* We handle only !=/== case here. */ |
8697 | gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); | |
8698 | ||
e31161b3 | 8699 | op0 = gimple_assign_rhs1 (stmt); |
d6f3306a | 8700 | if (!op_with_boolean_value_range_p (op0)) |
8701 | return false; | |
e31161b3 | 8702 | |
eea7f7eb | 8703 | op1 = gimple_assign_rhs2 (stmt); |
d6f3306a | 8704 | if (!op_with_boolean_value_range_p (op1)) |
8705 | return false; | |
eea7f7eb | 8706 | |
d6f3306a | 8707 | /* Reduce number of cases to handle to NE_EXPR. As there is no |
8708 | BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ | |
8709 | if (rhs_code == EQ_EXPR) | |
e31161b3 | 8710 | { |
d6f3306a | 8711 | if (TREE_CODE (op1) == INTEGER_CST) |
ddb1be65 | 8712 | op1 = int_const_binop (BIT_XOR_EXPR, op1, |
e913b5cd | 8713 | build_int_cst (TREE_TYPE (op1), 1)); |
e31161b3 | 8714 | else |
d6f3306a | 8715 | return false; |
e31161b3 | 8716 | } |
8717 | ||
d6f3306a | 8718 | lhs = gimple_assign_lhs (stmt); |
8719 | need_conversion | |
8720 | = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); | |
e31161b3 | 8721 | |
d6f3306a | 8722 | /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ |
cd5ffed9 | 8723 | if (need_conversion |
8724 | && !TYPE_UNSIGNED (TREE_TYPE (op0)) | |
d6f3306a | 8725 | && TYPE_PRECISION (TREE_TYPE (op0)) == 1 |
8726 | && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) | |
e31161b3 | 8727 | return false; |
8728 | ||
d6f3306a | 8729 | /* For A != 0 we can substitute A itself. */ |
8730 | if (integer_zerop (op1)) | |
8731 | gimple_assign_set_rhs_with_ops (gsi, | |
8732 | need_conversion | |
8733 | ? NOP_EXPR : TREE_CODE (op0), | |
8734 | op0, NULL_TREE); | |
8735 | /* For A != B we substitute A ^ B. Either with conversion. */ | |
8736 | else if (need_conversion) | |
8737 | { | |
03d37e4e | 8738 | tree tem = make_ssa_name (TREE_TYPE (op0), NULL); |
8739 | gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1); | |
d6f3306a | 8740 | gsi_insert_before (gsi, newop, GSI_SAME_STMT); |
d6f3306a | 8741 | gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE); |
8742 | } | |
8743 | /* Or without. */ | |
8744 | else | |
8745 | gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); | |
e31161b3 | 8746 | update_stmt (gsi_stmt (*gsi)); |
d6f3306a | 8747 | |
e31161b3 | 8748 | return true; |
8749 | } | |
8750 | ||
96c8d283 | 8751 | /* Simplify a division or modulo operator to a right shift or |
8752 | bitwise and if the first operand is unsigned or is greater | |
8753 | than zero and the second operand is an exact power of two. */ | |
15ea1735 | 8754 | |
e31161b3 | 8755 | static bool |
75a70cf9 | 8756 | simplify_div_or_mod_using_ranges (gimple stmt) |
15ea1735 | 8757 | { |
75a70cf9 | 8758 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
96c8d283 | 8759 | tree val = NULL; |
75a70cf9 | 8760 | tree op0 = gimple_assign_rhs1 (stmt); |
8761 | tree op1 = gimple_assign_rhs2 (stmt); | |
8762 | value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt)); | |
15ea1735 | 8763 | |
75a70cf9 | 8764 | if (TYPE_UNSIGNED (TREE_TYPE (op0))) |
96c8d283 | 8765 | { |
8766 | val = integer_one_node; | |
8767 | } | |
8768 | else | |
8769 | { | |
c3783c3b | 8770 | bool sop = false; |
8771 | ||
92f9b59a | 8772 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); |
a2a1fde2 | 8773 | |
8774 | if (val | |
8775 | && sop | |
8776 | && integer_onep (val) | |
8777 | && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
8778 | { | |
75a70cf9 | 8779 | location_t location; |
a2a1fde2 | 8780 | |
75a70cf9 | 8781 | if (!gimple_has_location (stmt)) |
8782 | location = input_location; | |
a2a1fde2 | 8783 | else |
75a70cf9 | 8784 | location = gimple_location (stmt); |
5fb6a912 | 8785 | warning_at (location, OPT_Wstrict_overflow, |
8786 | "assuming signed overflow does not occur when " | |
8787 | "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); | |
a2a1fde2 | 8788 | } |
96c8d283 | 8789 | } |
8790 | ||
8791 | if (val && integer_onep (val)) | |
15ea1735 | 8792 | { |
96c8d283 | 8793 | tree t; |
15ea1735 | 8794 | |
96c8d283 | 8795 | if (rhs_code == TRUNC_DIV_EXPR) |
8796 | { | |
7002a1c8 | 8797 | t = build_int_cst (integer_type_node, tree_log2 (op1)); |
75a70cf9 | 8798 | gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); |
8799 | gimple_assign_set_rhs1 (stmt, op0); | |
8800 | gimple_assign_set_rhs2 (stmt, t); | |
96c8d283 | 8801 | } |
8802 | else | |
15ea1735 | 8803 | { |
96c8d283 | 8804 | t = build_int_cst (TREE_TYPE (op1), 1); |
317e2a67 | 8805 | t = int_const_binop (MINUS_EXPR, op1, t); |
96c8d283 | 8806 | t = fold_convert (TREE_TYPE (op0), t); |
75a70cf9 | 8807 | |
8808 | gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); | |
8809 | gimple_assign_set_rhs1 (stmt, op0); | |
8810 | gimple_assign_set_rhs2 (stmt, t); | |
96c8d283 | 8811 | } |
8812 | ||
96c8d283 | 8813 | update_stmt (stmt); |
e31161b3 | 8814 | return true; |
96c8d283 | 8815 | } |
e31161b3 | 8816 | |
8817 | return false; | |
96c8d283 | 8818 | } |
15ea1735 | 8819 | |
96c8d283 | 8820 | /* If the operand to an ABS_EXPR is >= 0, then eliminate the |
8821 | ABS_EXPR. If the operand is <= 0, then simplify the | |
8822 | ABS_EXPR into a NEGATE_EXPR. */ | |
8823 | ||
e31161b3 | 8824 | static bool |
75a70cf9 | 8825 | simplify_abs_using_ranges (gimple stmt) |
96c8d283 | 8826 | { |
8827 | tree val = NULL; | |
75a70cf9 | 8828 | tree op = gimple_assign_rhs1 (stmt); |
96c8d283 | 8829 | tree type = TREE_TYPE (op); |
75a70cf9 | 8830 | value_range_t *vr = get_value_range (op); |
96c8d283 | 8831 | |
8832 | if (TYPE_UNSIGNED (type)) | |
8833 | { | |
8834 | val = integer_zero_node; | |
8835 | } | |
8836 | else if (vr) | |
8837 | { | |
c3783c3b | 8838 | bool sop = false; |
8839 | ||
8840 | val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); | |
96c8d283 | 8841 | if (!val) |
8842 | { | |
c3783c3b | 8843 | sop = false; |
8844 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, | |
8845 | &sop); | |
96c8d283 | 8846 | |
8847 | if (val) | |
15ea1735 | 8848 | { |
96c8d283 | 8849 | if (integer_zerop (val)) |
8850 | val = integer_one_node; | |
8851 | else if (integer_onep (val)) | |
8852 | val = integer_zero_node; | |
8853 | } | |
8854 | } | |
15ea1735 | 8855 | |
96c8d283 | 8856 | if (val |
8857 | && (integer_onep (val) || integer_zerop (val))) | |
8858 | { | |
a2a1fde2 | 8859 | if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) |
8860 | { | |
75a70cf9 | 8861 | location_t location; |
a2a1fde2 | 8862 | |
75a70cf9 | 8863 | if (!gimple_has_location (stmt)) |
8864 | location = input_location; | |
a2a1fde2 | 8865 | else |
75a70cf9 | 8866 | location = gimple_location (stmt); |
5fb6a912 | 8867 | warning_at (location, OPT_Wstrict_overflow, |
8868 | "assuming signed overflow does not occur when " | |
8869 | "simplifying %<abs (X)%> to %<X%> or %<-X%>"); | |
a2a1fde2 | 8870 | } |
8871 | ||
75a70cf9 | 8872 | gimple_assign_set_rhs1 (stmt, op); |
96c8d283 | 8873 | if (integer_onep (val)) |
75a70cf9 | 8874 | gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); |
96c8d283 | 8875 | else |
75a70cf9 | 8876 | gimple_assign_set_rhs_code (stmt, SSA_NAME); |
96c8d283 | 8877 | update_stmt (stmt); |
e31161b3 | 8878 | return true; |
96c8d283 | 8879 | } |
8880 | } | |
e31161b3 | 8881 | |
8882 | return false; | |
96c8d283 | 8883 | } |
8884 | ||
273e780e | 8885 | /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. |
8886 | If all the bits that are being cleared by & are already | |
8887 | known to be zero from VR, or all the bits that are being | |
8888 | set by | are already known to be one from VR, the bit | |
8889 | operation is redundant. */ | |
8890 | ||
8891 | static bool | |
8892 | simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
8893 | { | |
8894 | tree op0 = gimple_assign_rhs1 (stmt); | |
8895 | tree op1 = gimple_assign_rhs2 (stmt); | |
8896 | tree op = NULL_TREE; | |
748eb1f9 | 8897 | value_range_t vr0 = VR_INITIALIZER; |
8898 | value_range_t vr1 = VR_INITIALIZER; | |
e913b5cd | 8899 | wide_int may_be_nonzero0, may_be_nonzero1; |
8900 | wide_int must_be_nonzero0, must_be_nonzero1; | |
8901 | wide_int mask; | |
273e780e | 8902 | |
8903 | if (TREE_CODE (op0) == SSA_NAME) | |
8904 | vr0 = *(get_value_range (op0)); | |
8905 | else if (is_gimple_min_invariant (op0)) | |
8906 | set_value_range_to_value (&vr0, op0, NULL); | |
8907 | else | |
8908 | return false; | |
8909 | ||
8910 | if (TREE_CODE (op1) == SSA_NAME) | |
8911 | vr1 = *(get_value_range (op1)); | |
8912 | else if (is_gimple_min_invariant (op1)) | |
8913 | set_value_range_to_value (&vr1, op1, NULL); | |
8914 | else | |
8915 | return false; | |
8916 | ||
3a54beaf | 8917 | if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, |
8918 | &must_be_nonzero0)) | |
273e780e | 8919 | return false; |
3a54beaf | 8920 | if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, |
8921 | &must_be_nonzero1)) | |
273e780e | 8922 | return false; |
8923 | ||
8924 | switch (gimple_assign_rhs_code (stmt)) | |
8925 | { | |
8926 | case BIT_AND_EXPR: | |
cf8f0e63 | 8927 | mask = may_be_nonzero0.and_not (must_be_nonzero1); |
796b6678 | 8928 | if (mask == 0) |
273e780e | 8929 | { |
8930 | op = op0; | |
8931 | break; | |
8932 | } | |
cf8f0e63 | 8933 | mask = may_be_nonzero1.and_not (must_be_nonzero0); |
796b6678 | 8934 | if (mask == 0) |
273e780e | 8935 | { |
8936 | op = op1; | |
8937 | break; | |
8938 | } | |
8939 | break; | |
8940 | case BIT_IOR_EXPR: | |
cf8f0e63 | 8941 | mask = may_be_nonzero0.and_not (must_be_nonzero1); |
796b6678 | 8942 | if (mask == 0) |
273e780e | 8943 | { |
8944 | op = op1; | |
8945 | break; | |
8946 | } | |
cf8f0e63 | 8947 | mask = may_be_nonzero1.and_not (must_be_nonzero0); |
796b6678 | 8948 | if (mask == 0) |
273e780e | 8949 | { |
8950 | op = op0; | |
8951 | break; | |
8952 | } | |
8953 | break; | |
8954 | default: | |
8955 | gcc_unreachable (); | |
8956 | } | |
8957 | ||
8958 | if (op == NULL_TREE) | |
8959 | return false; | |
8960 | ||
8961 | gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL); | |
8962 | update_stmt (gsi_stmt (*gsi)); | |
8963 | return true; | |
8964 | } | |
8965 | ||
15fb6c2c | 8966 | /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has |
8967 | a known value range VR. | |
8968 | ||
8969 | If there is one and only one value which will satisfy the | |
8970 | conditional, then return that value. Else return NULL. */ | |
8971 | ||
8972 | static tree | |
8973 | test_for_singularity (enum tree_code cond_code, tree op0, | |
8974 | tree op1, value_range_t *vr) | |
8975 | { | |
8976 | tree min = NULL; | |
8977 | tree max = NULL; | |
8978 | ||
8979 | /* Extract minimum/maximum values which satisfy the | |
8980 | the conditional as it was written. */ | |
8981 | if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
8982 | { | |
c3783c3b | 8983 | /* This should not be negative infinity; there is no overflow |
8984 | here. */ | |
15fb6c2c | 8985 | min = TYPE_MIN_VALUE (TREE_TYPE (op0)); |
8986 | ||
8987 | max = op1; | |
c3783c3b | 8988 | if (cond_code == LT_EXPR && !is_overflow_infinity (max)) |
15fb6c2c | 8989 | { |
8990 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
31b55b9c | 8991 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); |
d8f696cf | 8992 | if (EXPR_P (max)) |
8993 | TREE_NO_WARNING (max) = 1; | |
15fb6c2c | 8994 | } |
8995 | } | |
8996 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) | |
8997 | { | |
c3783c3b | 8998 | /* This should not be positive infinity; there is no overflow |
8999 | here. */ | |
15fb6c2c | 9000 | max = TYPE_MAX_VALUE (TREE_TYPE (op0)); |
9001 | ||
9002 | min = op1; | |
c3783c3b | 9003 | if (cond_code == GT_EXPR && !is_overflow_infinity (min)) |
15fb6c2c | 9004 | { |
9005 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
4f87bd68 | 9006 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); |
d8f696cf | 9007 | if (EXPR_P (min)) |
9008 | TREE_NO_WARNING (min) = 1; | |
15fb6c2c | 9009 | } |
9010 | } | |
9011 | ||
9012 | /* Now refine the minimum and maximum values using any | |
9013 | value range information we have for op0. */ | |
9014 | if (min && max) | |
9015 | { | |
f133b485 | 9016 | if (compare_values (vr->min, min) == 1) |
15fb6c2c | 9017 | min = vr->min; |
f133b485 | 9018 | if (compare_values (vr->max, max) == -1) |
15fb6c2c | 9019 | max = vr->max; |
9020 | ||
4f87bd68 | 9021 | /* If the new min/max values have converged to a single value, |
9022 | then there is only one value which can satisfy the condition, | |
9023 | return that value. */ | |
9024 | if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) | |
15fb6c2c | 9025 | return min; |
9026 | } | |
9027 | return NULL; | |
9028 | } | |
9029 | ||
3172284a | 9030 | /* Return whether the value range *VR fits in an integer type specified |
9031 | by PRECISION and UNSIGNED_P. */ | |
9032 | ||
9033 | static bool | |
e913b5cd | 9034 | range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) |
3172284a | 9035 | { |
9036 | tree src_type; | |
9037 | unsigned src_precision; | |
5de9d3ed | 9038 | widest_int tem; |
e913b5cd | 9039 | signop src_sgn; |
3172284a | 9040 | |
9041 | /* We can only handle integral and pointer types. */ | |
9042 | src_type = TREE_TYPE (vr->min); | |
9043 | if (!INTEGRAL_TYPE_P (src_type) | |
9044 | && !POINTER_TYPE_P (src_type)) | |
9045 | return false; | |
9046 | ||
e913b5cd | 9047 | /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, |
3172284a | 9048 | and so is an identity transform. */ |
9049 | src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); | |
e913b5cd | 9050 | src_sgn = TYPE_SIGN (src_type); |
9051 | if ((src_precision < dest_precision | |
9052 | && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) | |
9053 | || (src_precision == dest_precision && src_sgn == dest_sgn)) | |
3172284a | 9054 | return true; |
9055 | ||
9056 | /* Now we can only handle ranges with constant bounds. */ | |
9057 | if (vr->type != VR_RANGE | |
9058 | || TREE_CODE (vr->min) != INTEGER_CST | |
9059 | || TREE_CODE (vr->max) != INTEGER_CST) | |
9060 | return false; | |
9061 | ||
e913b5cd | 9062 | /* For sign changes, the MSB of the wide_int has to be clear. |
3172284a | 9063 | An unsigned value with its MSB set cannot be represented by |
e913b5cd | 9064 | a signed wide_int, while a negative value cannot be represented |
9065 | by an unsigned wide_int. */ | |
9066 | if (src_sgn != dest_sgn | |
796b6678 | 9067 | && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0))) |
3172284a | 9068 | return false; |
9069 | ||
9070 | /* Then we can perform the conversion on both ends and compare | |
9071 | the result for equality. */ | |
5de9d3ed | 9072 | tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn); |
9073 | if (tem != wi::to_widest (vr->min)) | |
3172284a | 9074 | return false; |
5de9d3ed | 9075 | tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn); |
9076 | if (tem != wi::to_widest (vr->max)) | |
3172284a | 9077 | return false; |
9078 | ||
9079 | return true; | |
9080 | } | |
9081 | ||
7662ff7e | 9082 | /* Simplify a conditional using a relational operator to an equality |
9083 | test if the range information indicates only one value can satisfy | |
9084 | the original conditional. */ | |
96c8d283 | 9085 | |
e31161b3 | 9086 | static bool |
75a70cf9 | 9087 | simplify_cond_using_ranges (gimple stmt) |
96c8d283 | 9088 | { |
75a70cf9 | 9089 | tree op0 = gimple_cond_lhs (stmt); |
9090 | tree op1 = gimple_cond_rhs (stmt); | |
9091 | enum tree_code cond_code = gimple_cond_code (stmt); | |
96c8d283 | 9092 | |
7662ff7e | 9093 | if (cond_code != NE_EXPR |
96c8d283 | 9094 | && cond_code != EQ_EXPR |
9095 | && TREE_CODE (op0) == SSA_NAME | |
9096 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
9097 | && is_gimple_min_invariant (op1)) | |
9098 | { | |
9099 | value_range_t *vr = get_value_range (op0); | |
48e1416a | 9100 | |
96c8d283 | 9101 | /* If we have range information for OP0, then we might be |
9102 | able to simplify this conditional. */ | |
9103 | if (vr->type == VR_RANGE) | |
9104 | { | |
f4e36c33 | 9105 | tree new_tree = test_for_singularity (cond_code, op0, op1, vr); |
96c8d283 | 9106 | |
f4e36c33 | 9107 | if (new_tree) |
96c8d283 | 9108 | { |
15fb6c2c | 9109 | if (dump_file) |
96c8d283 | 9110 | { |
15fb6c2c | 9111 | fprintf (dump_file, "Simplified relational "); |
75a70cf9 | 9112 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 9113 | fprintf (dump_file, " into "); |
15ea1735 | 9114 | } |
9115 | ||
75a70cf9 | 9116 | gimple_cond_set_code (stmt, EQ_EXPR); |
9117 | gimple_cond_set_lhs (stmt, op0); | |
f4e36c33 | 9118 | gimple_cond_set_rhs (stmt, new_tree); |
75a70cf9 | 9119 | |
15fb6c2c | 9120 | update_stmt (stmt); |
9121 | ||
9122 | if (dump_file) | |
15ea1735 | 9123 | { |
75a70cf9 | 9124 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 9125 | fprintf (dump_file, "\n"); |
15ea1735 | 9126 | } |
15fb6c2c | 9127 | |
e31161b3 | 9128 | return true; |
15ea1735 | 9129 | } |
9130 | ||
15fb6c2c | 9131 | /* Try again after inverting the condition. We only deal |
9132 | with integral types here, so no need to worry about | |
9133 | issues with inverting FP comparisons. */ | |
9134 | cond_code = invert_tree_comparison (cond_code, false); | |
f4e36c33 | 9135 | new_tree = test_for_singularity (cond_code, op0, op1, vr); |
15fb6c2c | 9136 | |
f4e36c33 | 9137 | if (new_tree) |
96c8d283 | 9138 | { |
15fb6c2c | 9139 | if (dump_file) |
96c8d283 | 9140 | { |
15fb6c2c | 9141 | fprintf (dump_file, "Simplified relational "); |
75a70cf9 | 9142 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 9143 | fprintf (dump_file, " into "); |
96c8d283 | 9144 | } |
15fb6c2c | 9145 | |
75a70cf9 | 9146 | gimple_cond_set_code (stmt, NE_EXPR); |
9147 | gimple_cond_set_lhs (stmt, op0); | |
f4e36c33 | 9148 | gimple_cond_set_rhs (stmt, new_tree); |
75a70cf9 | 9149 | |
15fb6c2c | 9150 | update_stmt (stmt); |
9151 | ||
9152 | if (dump_file) | |
9153 | { | |
75a70cf9 | 9154 | print_gimple_stmt (dump_file, stmt, 0, 0); |
15fb6c2c | 9155 | fprintf (dump_file, "\n"); |
9156 | } | |
15fb6c2c | 9157 | |
e31161b3 | 9158 | return true; |
96c8d283 | 9159 | } |
15ea1735 | 9160 | } |
9161 | } | |
e31161b3 | 9162 | |
3172284a | 9163 | /* If we have a comparison of an SSA_NAME (OP0) against a constant, |
9164 | see if OP0 was set by a type conversion where the source of | |
9165 | the conversion is another SSA_NAME with a range that fits | |
9166 | into the range of OP0's type. | |
813adf93 | 9167 | |
3172284a | 9168 | If so, the conversion is redundant as the earlier SSA_NAME can be |
9169 | used for the comparison directly if we just massage the constant in the | |
9170 | comparison. */ | |
813adf93 | 9171 | if (TREE_CODE (op0) == SSA_NAME |
813adf93 | 9172 | && TREE_CODE (op1) == INTEGER_CST) |
9173 | { | |
9174 | gimple def_stmt = SSA_NAME_DEF_STMT (op0); | |
9175 | tree innerop; | |
9176 | ||
9177 | if (!is_gimple_assign (def_stmt) | |
9178 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
9179 | return false; | |
9180 | ||
9181 | innerop = gimple_assign_rhs1 (def_stmt); | |
9182 | ||
1c872eee | 9183 | if (TREE_CODE (innerop) == SSA_NAME |
9184 | && !POINTER_TYPE_P (TREE_TYPE (innerop))) | |
813adf93 | 9185 | { |
9186 | value_range_t *vr = get_value_range (innerop); | |
9187 | ||
9188 | if (range_int_cst_p (vr) | |
3172284a | 9189 | && range_fits_type_p (vr, |
9190 | TYPE_PRECISION (TREE_TYPE (op0)), | |
e913b5cd | 9191 | TYPE_SIGN (TREE_TYPE (op0))) |
65a8f1a1 | 9192 | && int_fits_type_p (op1, TREE_TYPE (innerop)) |
9193 | /* The range must not have overflowed, or if it did overflow | |
9194 | we must not be wrapping/trapping overflow and optimizing | |
9195 | with strict overflow semantics. */ | |
9196 | && ((!is_negative_overflow_infinity (vr->min) | |
9197 | && !is_positive_overflow_infinity (vr->max)) | |
9198 | || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop)))) | |
813adf93 | 9199 | { |
65a8f1a1 | 9200 | /* If the range overflowed and the user has asked for warnings |
9201 | when strict overflow semantics were used to optimize code, | |
9202 | issue an appropriate warning. */ | |
5be85c4c | 9203 | if (cond_code != EQ_EXPR && cond_code != NE_EXPR |
9204 | && (is_negative_overflow_infinity (vr->min) | |
9205 | || is_positive_overflow_infinity (vr->max)) | |
65a8f1a1 | 9206 | && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL)) |
9207 | { | |
9208 | location_t location; | |
9209 | ||
9210 | if (!gimple_has_location (stmt)) | |
9211 | location = input_location; | |
9212 | else | |
9213 | location = gimple_location (stmt); | |
9214 | warning_at (location, OPT_Wstrict_overflow, | |
9215 | "assuming signed overflow does not occur when " | |
9216 | "simplifying conditional"); | |
9217 | } | |
9218 | ||
813adf93 | 9219 | tree newconst = fold_convert (TREE_TYPE (innerop), op1); |
9220 | gimple_cond_set_lhs (stmt, innerop); | |
9221 | gimple_cond_set_rhs (stmt, newconst); | |
9222 | return true; | |
9223 | } | |
9224 | } | |
9225 | } | |
9226 | ||
e31161b3 | 9227 | return false; |
15ea1735 | 9228 | } |
9229 | ||
72c30859 | 9230 | /* Simplify a switch statement using the value range of the switch |
9231 | argument. */ | |
9232 | ||
e31161b3 | 9233 | static bool |
75a70cf9 | 9234 | simplify_switch_using_ranges (gimple stmt) |
72c30859 | 9235 | { |
75a70cf9 | 9236 | tree op = gimple_switch_index (stmt); |
72c30859 | 9237 | value_range_t *vr; |
9238 | bool take_default; | |
9239 | edge e; | |
9240 | edge_iterator ei; | |
9241 | size_t i = 0, j = 0, n, n2; | |
75a70cf9 | 9242 | tree vec2; |
72c30859 | 9243 | switch_update su; |
98a8539f | 9244 | size_t k = 1, l = 0; |
72c30859 | 9245 | |
496ffe87 | 9246 | if (TREE_CODE (op) == SSA_NAME) |
9247 | { | |
9248 | vr = get_value_range (op); | |
72c30859 | 9249 | |
496ffe87 | 9250 | /* We can only handle integer ranges. */ |
98a8539f | 9251 | if ((vr->type != VR_RANGE |
9252 | && vr->type != VR_ANTI_RANGE) | |
496ffe87 | 9253 | || symbolic_range_p (vr)) |
9254 | return false; | |
72c30859 | 9255 | |
496ffe87 | 9256 | /* Find case label for min/max of the value range. */ |
98a8539f | 9257 | take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); |
496ffe87 | 9258 | } |
9259 | else if (TREE_CODE (op) == INTEGER_CST) | |
9260 | { | |
9261 | take_default = !find_case_label_index (stmt, 1, op, &i); | |
9262 | if (take_default) | |
9263 | { | |
9264 | i = 1; | |
9265 | j = 0; | |
9266 | } | |
48e1416a | 9267 | else |
496ffe87 | 9268 | { |
9269 | j = i; | |
9270 | } | |
9271 | } | |
9272 | else | |
e31161b3 | 9273 | return false; |
72c30859 | 9274 | |
75a70cf9 | 9275 | n = gimple_switch_num_labels (stmt); |
72c30859 | 9276 | |
9277 | /* Bail out if this is just all edges taken. */ | |
75a70cf9 | 9278 | if (i == 1 |
9279 | && j == n - 1 | |
72c30859 | 9280 | && take_default) |
e31161b3 | 9281 | return false; |
72c30859 | 9282 | |
9283 | /* Build a new vector of taken case labels. */ | |
98a8539f | 9284 | vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); |
75a70cf9 | 9285 | n2 = 0; |
72c30859 | 9286 | |
9287 | /* Add the default edge, if necessary. */ | |
9288 | if (take_default) | |
75a70cf9 | 9289 | TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); |
9290 | ||
9291 | for (; i <= j; ++i, ++n2) | |
9292 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); | |
72c30859 | 9293 | |
98a8539f | 9294 | for (; k <= l; ++k, ++n2) |
9295 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k); | |
9296 | ||
72c30859 | 9297 | /* Mark needed edges. */ |
9298 | for (i = 0; i < n2; ++i) | |
9299 | { | |
75a70cf9 | 9300 | e = find_edge (gimple_bb (stmt), |
72c30859 | 9301 | label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); |
9302 | e->aux = (void *)-1; | |
9303 | } | |
9304 | ||
9305 | /* Queue not needed edges for later removal. */ | |
75a70cf9 | 9306 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
72c30859 | 9307 | { |
9308 | if (e->aux == (void *)-1) | |
9309 | { | |
9310 | e->aux = NULL; | |
9311 | continue; | |
9312 | } | |
9313 | ||
9314 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
9315 | { | |
9316 | fprintf (dump_file, "removing unreachable case label\n"); | |
9317 | } | |
f1f41a6c | 9318 | to_remove_edges.safe_push (e); |
8b938617 | 9319 | e->flags &= ~EDGE_EXECUTABLE; |
72c30859 | 9320 | } |
9321 | ||
9322 | /* And queue an update for the stmt. */ | |
9323 | su.stmt = stmt; | |
9324 | su.vec = vec2; | |
f1f41a6c | 9325 | to_update_switch_stmts.safe_push (su); |
e31161b3 | 9326 | return false; |
72c30859 | 9327 | } |
9328 | ||
7430df61 | 9329 | /* Simplify an integral conversion from an SSA name in STMT. */ |
9330 | ||
9331 | static bool | |
9332 | simplify_conversion_using_ranges (gimple stmt) | |
9333 | { | |
b3b0dcac | 9334 | tree innerop, middleop, finaltype; |
9335 | gimple def_stmt; | |
9336 | value_range_t *innervr; | |
e913b5cd | 9337 | signop inner_sgn, middle_sgn, final_sgn; |
9298e319 | 9338 | unsigned inner_prec, middle_prec, final_prec; |
5de9d3ed | 9339 | widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; |
b3b0dcac | 9340 | |
9341 | finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); | |
5c3a6c62 | 9342 | if (!INTEGRAL_TYPE_P (finaltype)) |
9343 | return false; | |
b3b0dcac | 9344 | middleop = gimple_assign_rhs1 (stmt); |
9345 | def_stmt = SSA_NAME_DEF_STMT (middleop); | |
7430df61 | 9346 | if (!is_gimple_assign (def_stmt) |
9347 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
9348 | return false; | |
b3b0dcac | 9349 | innerop = gimple_assign_rhs1 (def_stmt); |
3a4a5c9c | 9350 | if (TREE_CODE (innerop) != SSA_NAME |
9351 | || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) | |
7430df61 | 9352 | return false; |
b3b0dcac | 9353 | |
9354 | /* Get the value-range of the inner operand. */ | |
9355 | innervr = get_value_range (innerop); | |
9356 | if (innervr->type != VR_RANGE | |
9357 | || TREE_CODE (innervr->min) != INTEGER_CST | |
9358 | || TREE_CODE (innervr->max) != INTEGER_CST) | |
7430df61 | 9359 | return false; |
b3b0dcac | 9360 | |
9361 | /* Simulate the conversion chain to check if the result is equal if | |
9362 | the middle conversion is removed. */ | |
5de9d3ed | 9363 | innermin = wi::to_widest (innervr->min); |
9364 | innermax = wi::to_widest (innervr->max); | |
9298e319 | 9365 | |
9366 | inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); | |
9367 | middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); | |
9368 | final_prec = TYPE_PRECISION (finaltype); | |
9369 | ||
9370 | /* If the first conversion is not injective, the second must not | |
9371 | be widening. */ | |
796b6678 | 9372 | if (wi::gtu_p (innermax - innermin, |
5de9d3ed | 9373 | wi::mask <widest_int> (middle_prec, false)) |
9298e319 | 9374 | && middle_prec < final_prec) |
7430df61 | 9375 | return false; |
9298e319 | 9376 | /* We also want a medium value so that we can track the effect that |
9377 | narrowing conversions with sign change have. */ | |
e913b5cd | 9378 | inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); |
9379 | if (inner_sgn == UNSIGNED) | |
5de9d3ed | 9380 | innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false); |
9298e319 | 9381 | else |
e913b5cd | 9382 | innermed = 0; |
796b6678 | 9383 | if (wi::cmp (innermin, innermed, inner_sgn) >= 0 |
9384 | || wi::cmp (innermed, innermax, inner_sgn) >= 0) | |
9298e319 | 9385 | innermed = innermin; |
9386 | ||
e913b5cd | 9387 | middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); |
796b6678 | 9388 | middlemin = wi::ext (innermin, middle_prec, middle_sgn); |
9389 | middlemed = wi::ext (innermed, middle_prec, middle_sgn); | |
9390 | middlemax = wi::ext (innermax, middle_prec, middle_sgn); | |
9298e319 | 9391 | |
81e042f9 | 9392 | /* Require that the final conversion applied to both the original |
9393 | and the intermediate range produces the same result. */ | |
e913b5cd | 9394 | final_sgn = TYPE_SIGN (finaltype); |
796b6678 | 9395 | if (wi::ext (middlemin, final_prec, final_sgn) |
9396 | != wi::ext (innermin, final_prec, final_sgn) | |
9397 | || wi::ext (middlemed, final_prec, final_sgn) | |
9398 | != wi::ext (innermed, final_prec, final_sgn) | |
9399 | || wi::ext (middlemax, final_prec, final_sgn) | |
9400 | != wi::ext (innermax, final_prec, final_sgn)) | |
b3b0dcac | 9401 | return false; |
9402 | ||
9403 | gimple_assign_set_rhs1 (stmt, innerop); | |
7430df61 | 9404 | update_stmt (stmt); |
9405 | return true; | |
9406 | } | |
9407 | ||
f0938d2c | 9408 | /* Simplify a conversion from integral SSA name to float in STMT. */ |
9409 | ||
9410 | static bool | |
9411 | simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
9412 | { | |
9413 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
9414 | value_range_t *vr = get_value_range (rhs1); | |
3754d046 | 9415 | machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); |
9416 | machine_mode mode; | |
f0938d2c | 9417 | tree tem; |
9418 | gimple conv; | |
9419 | ||
9420 | /* We can only handle constant ranges. */ | |
9421 | if (vr->type != VR_RANGE | |
9422 | || TREE_CODE (vr->min) != INTEGER_CST | |
9423 | || TREE_CODE (vr->max) != INTEGER_CST) | |
9424 | return false; | |
9425 | ||
9426 | /* First check if we can use a signed type in place of an unsigned. */ | |
9427 | if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) | |
9428 | && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) | |
9429 | != CODE_FOR_nothing) | |
e913b5cd | 9430 | && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) |
f0938d2c | 9431 | mode = TYPE_MODE (TREE_TYPE (rhs1)); |
9432 | /* If we can do the conversion in the current input mode do nothing. */ | |
9433 | else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), | |
4b9630f0 | 9434 | TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing) |
f0938d2c | 9435 | return false; |
9436 | /* Otherwise search for a mode we can use, starting from the narrowest | |
9437 | integer mode available. */ | |
9438 | else | |
9439 | { | |
9440 | mode = GET_CLASS_NARROWEST_MODE (MODE_INT); | |
9441 | do | |
9442 | { | |
9443 | /* If we cannot do a signed conversion to float from mode | |
9444 | or if the value-range does not fit in the signed type | |
9445 | try with a wider mode. */ | |
9446 | if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing | |
e913b5cd | 9447 | && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) |
f0938d2c | 9448 | break; |
9449 | ||
9450 | mode = GET_MODE_WIDER_MODE (mode); | |
9451 | /* But do not widen the input. Instead leave that to the | |
9452 | optabs expansion code. */ | |
9453 | if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) | |
9454 | return false; | |
9455 | } | |
9456 | while (mode != VOIDmode); | |
9457 | if (mode == VOIDmode) | |
9458 | return false; | |
9459 | } | |
9460 | ||
9461 | /* It works, insert a truncation or sign-change before the | |
9462 | float conversion. */ | |
03d37e4e | 9463 | tem = make_ssa_name (build_nonstandard_integer_type |
f0938d2c | 9464 | (GET_MODE_PRECISION (mode), 0), NULL); |
9465 | conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE); | |
f0938d2c | 9466 | gsi_insert_before (gsi, conv, GSI_SAME_STMT); |
9467 | gimple_assign_set_rhs1 (stmt, tem); | |
9468 | update_stmt (stmt); | |
9469 | ||
9470 | return true; | |
9471 | } | |
9472 | ||
509e8fea | 9473 | /* Simplify an internal fn call using ranges if possible. */ |
9474 | ||
9475 | static bool | |
9476 | simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) | |
9477 | { | |
9478 | enum tree_code subcode; | |
9479 | switch (gimple_call_internal_fn (stmt)) | |
9480 | { | |
9481 | case IFN_UBSAN_CHECK_ADD: | |
9482 | subcode = PLUS_EXPR; | |
9483 | break; | |
9484 | case IFN_UBSAN_CHECK_SUB: | |
9485 | subcode = MINUS_EXPR; | |
9486 | break; | |
9487 | case IFN_UBSAN_CHECK_MUL: | |
9488 | subcode = MULT_EXPR; | |
9489 | break; | |
9490 | default: | |
9491 | return false; | |
9492 | } | |
9493 | ||
9494 | value_range_t vr0 = VR_INITIALIZER; | |
9495 | value_range_t vr1 = VR_INITIALIZER; | |
9496 | tree op0 = gimple_call_arg (stmt, 0); | |
9497 | tree op1 = gimple_call_arg (stmt, 1); | |
9498 | ||
9499 | if (TREE_CODE (op0) == SSA_NAME) | |
9500 | vr0 = *get_value_range (op0); | |
9501 | else if (TREE_CODE (op0) == INTEGER_CST) | |
9502 | set_value_range_to_value (&vr0, op0, NULL); | |
9503 | else | |
ce8e6661 | 9504 | set_value_range_to_varying (&vr0); |
509e8fea | 9505 | |
9506 | if (TREE_CODE (op1) == SSA_NAME) | |
9507 | vr1 = *get_value_range (op1); | |
9508 | else if (TREE_CODE (op1) == INTEGER_CST) | |
9509 | set_value_range_to_value (&vr1, op1, NULL); | |
9510 | else | |
ce8e6661 | 9511 | set_value_range_to_varying (&vr1); |
509e8fea | 9512 | |
ce8e6661 | 9513 | if (!range_int_cst_p (&vr0)) |
509e8fea | 9514 | { |
ce8e6661 | 9515 | /* If one range is VR_ANTI_RANGE, VR_VARYING etc., |
9516 | optimize at least x = y + 0; x = y - 0; x = y * 0; | |
9517 | and x = y * 1; which never overflow. */ | |
9518 | if (!range_int_cst_p (&vr1)) | |
9519 | return false; | |
9520 | if (tree_int_cst_sgn (vr1.min) == -1) | |
9521 | return false; | |
9522 | if (compare_tree_int (vr1.max, subcode == MULT_EXPR) == 1) | |
509e8fea | 9523 | return false; |
9524 | } | |
ce8e6661 | 9525 | else if (!range_int_cst_p (&vr1)) |
9526 | { | |
9527 | /* If one range is VR_ANTI_RANGE, VR_VARYING etc., | |
9528 | optimize at least x = 0 + y; x = 0 * y; and x = 1 * y; | |
9529 | which never overflow. */ | |
9530 | if (subcode == MINUS_EXPR) | |
9531 | return false; | |
9532 | if (!range_int_cst_p (&vr0)) | |
9533 | return false; | |
9534 | if (tree_int_cst_sgn (vr0.min) == -1) | |
9535 | return false; | |
9536 | if (compare_tree_int (vr0.max, subcode == MULT_EXPR) == 1) | |
9537 | return false; | |
9538 | } | |
9539 | else | |
509e8fea | 9540 | { |
ce8e6661 | 9541 | tree r1 = int_const_binop (subcode, vr0.min, vr1.min); |
9542 | tree r2 = int_const_binop (subcode, vr0.max, vr1.max); | |
9543 | if (r1 == NULL_TREE || TREE_OVERFLOW (r1) | |
9544 | || r2 == NULL_TREE || TREE_OVERFLOW (r2)) | |
509e8fea | 9545 | return false; |
ce8e6661 | 9546 | if (subcode == MULT_EXPR) |
9547 | { | |
9548 | tree r3 = int_const_binop (subcode, vr0.min, vr1.max); | |
9549 | tree r4 = int_const_binop (subcode, vr0.max, vr1.min); | |
9550 | if (r3 == NULL_TREE || TREE_OVERFLOW (r3) | |
9551 | || r4 == NULL_TREE || TREE_OVERFLOW (r4)) | |
9552 | return false; | |
9553 | } | |
509e8fea | 9554 | } |
ce8e6661 | 9555 | |
509e8fea | 9556 | gimple g = gimple_build_assign_with_ops (subcode, gimple_call_lhs (stmt), |
9557 | op0, op1); | |
9558 | gsi_replace (gsi, g, false); | |
9559 | return true; | |
9560 | } | |
9561 | ||
96c8d283 | 9562 | /* Simplify STMT using ranges if possible. */ |
9563 | ||
07aee51b | 9564 | static bool |
e31161b3 | 9565 | simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) |
96c8d283 | 9566 | { |
e31161b3 | 9567 | gimple stmt = gsi_stmt (*gsi); |
75a70cf9 | 9568 | if (is_gimple_assign (stmt)) |
96c8d283 | 9569 | { |
75a70cf9 | 9570 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
7430df61 | 9571 | tree rhs1 = gimple_assign_rhs1 (stmt); |
96c8d283 | 9572 | |
e31161b3 | 9573 | switch (rhs_code) |
9574 | { | |
9575 | case EQ_EXPR: | |
9576 | case NE_EXPR: | |
eea7f7eb | 9577 | /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity |
9578 | if the RHS is zero or one, and the LHS are known to be boolean | |
9579 | values. */ | |
7430df61 | 9580 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
e31161b3 | 9581 | return simplify_truth_ops_using_ranges (gsi, stmt); |
9582 | break; | |
9583 | ||
96c8d283 | 9584 | /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR |
9585 | and BIT_AND_EXPR respectively if the first operand is greater | |
9586 | than zero and the second operand is an exact power of two. */ | |
e31161b3 | 9587 | case TRUNC_DIV_EXPR: |
9588 | case TRUNC_MOD_EXPR: | |
7430df61 | 9589 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) |
e31161b3 | 9590 | && integer_pow2p (gimple_assign_rhs2 (stmt))) |
9591 | return simplify_div_or_mod_using_ranges (stmt); | |
9592 | break; | |
96c8d283 | 9593 | |
9594 | /* Transform ABS (X) into X or -X as appropriate. */ | |
e31161b3 | 9595 | case ABS_EXPR: |
7430df61 | 9596 | if (TREE_CODE (rhs1) == SSA_NAME |
9597 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
e31161b3 | 9598 | return simplify_abs_using_ranges (stmt); |
9599 | break; | |
9600 | ||
273e780e | 9601 | case BIT_AND_EXPR: |
9602 | case BIT_IOR_EXPR: | |
9603 | /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR | |
9604 | if all the bits being cleared are already cleared or | |
9605 | all the bits being set are already set. */ | |
7430df61 | 9606 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
273e780e | 9607 | return simplify_bit_ops_using_ranges (gsi, stmt); |
9608 | break; | |
9609 | ||
7430df61 | 9610 | CASE_CONVERT: |
9611 | if (TREE_CODE (rhs1) == SSA_NAME | |
9612 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
9613 | return simplify_conversion_using_ranges (stmt); | |
9614 | break; | |
9615 | ||
f0938d2c | 9616 | case FLOAT_EXPR: |
9617 | if (TREE_CODE (rhs1) == SSA_NAME | |
9618 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
9619 | return simplify_float_conversion_using_ranges (gsi, stmt); | |
9620 | break; | |
9621 | ||
e31161b3 | 9622 | default: |
9623 | break; | |
9624 | } | |
96c8d283 | 9625 | } |
75a70cf9 | 9626 | else if (gimple_code (stmt) == GIMPLE_COND) |
e31161b3 | 9627 | return simplify_cond_using_ranges (stmt); |
75a70cf9 | 9628 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
e31161b3 | 9629 | return simplify_switch_using_ranges (stmt); |
509e8fea | 9630 | else if (is_gimple_call (stmt) |
9631 | && gimple_call_internal_p (stmt)) | |
9632 | return simplify_internal_call_using_ranges (gsi, stmt); | |
e31161b3 | 9633 | |
9634 | return false; | |
96c8d283 | 9635 | } |
9636 | ||
07aee51b | 9637 | /* If the statement pointed by SI has a predicate whose value can be |
9638 | computed using the value range information computed by VRP, compute | |
9639 | its value and return true. Otherwise, return false. */ | |
9640 | ||
9641 | static bool | |
9642 | fold_predicate_in (gimple_stmt_iterator *si) | |
9643 | { | |
9644 | bool assignment_p = false; | |
9645 | tree val; | |
9646 | gimple stmt = gsi_stmt (*si); | |
9647 | ||
9648 | if (is_gimple_assign (stmt) | |
9649 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) | |
9650 | { | |
9651 | assignment_p = true; | |
9652 | val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), | |
9653 | gimple_assign_rhs1 (stmt), | |
9654 | gimple_assign_rhs2 (stmt), | |
9655 | stmt); | |
9656 | } | |
9657 | else if (gimple_code (stmt) == GIMPLE_COND) | |
9658 | val = vrp_evaluate_conditional (gimple_cond_code (stmt), | |
9659 | gimple_cond_lhs (stmt), | |
9660 | gimple_cond_rhs (stmt), | |
9661 | stmt); | |
9662 | else | |
9663 | return false; | |
9664 | ||
9665 | if (val) | |
9666 | { | |
9667 | if (assignment_p) | |
9668 | val = fold_convert (gimple_expr_type (stmt), val); | |
48e1416a | 9669 | |
07aee51b | 9670 | if (dump_file) |
9671 | { | |
9672 | fprintf (dump_file, "Folding predicate "); | |
9673 | print_gimple_expr (dump_file, stmt, 0, 0); | |
9674 | fprintf (dump_file, " to "); | |
9675 | print_generic_expr (dump_file, val, 0); | |
9676 | fprintf (dump_file, "\n"); | |
9677 | } | |
9678 | ||
9679 | if (is_gimple_assign (stmt)) | |
9680 | gimple_assign_set_rhs_from_tree (si, val); | |
9681 | else | |
9682 | { | |
9683 | gcc_assert (gimple_code (stmt) == GIMPLE_COND); | |
9684 | if (integer_zerop (val)) | |
9685 | gimple_cond_make_false (stmt); | |
9686 | else if (integer_onep (val)) | |
9687 | gimple_cond_make_true (stmt); | |
9688 | else | |
9689 | gcc_unreachable (); | |
9690 | } | |
9691 | ||
9692 | return true; | |
9693 | } | |
9694 | ||
9695 | return false; | |
9696 | } | |
9697 | ||
9698 | /* Callback for substitute_and_fold folding the stmt at *SI. */ | |
9699 | ||
9700 | static bool | |
9701 | vrp_fold_stmt (gimple_stmt_iterator *si) | |
9702 | { | |
9703 | if (fold_predicate_in (si)) | |
9704 | return true; | |
9705 | ||
9706 | return simplify_stmt_using_ranges (si); | |
9707 | } | |
9708 | ||
62b180e1 | 9709 | /* Stack of dest,src equivalency pairs that need to be restored after |
48e1416a | 9710 | each attempt to thread a block's incoming edge to an outgoing edge. |
62b180e1 | 9711 | |
9712 | A NULL entry is used to mark the end of pairs which need to be | |
9713 | restored. */ | |
f1f41a6c | 9714 | static vec<tree> equiv_stack; |
62b180e1 | 9715 | |
a2a1fde2 | 9716 | /* A trivial wrapper so that we can present the generic jump threading |
9717 | code with a simple API for simplifying statements. STMT is the | |
9718 | statement we want to simplify, WITHIN_STMT provides the location | |
9719 | for any overflow warnings. */ | |
9720 | ||
62b180e1 | 9721 | static tree |
75a70cf9 | 9722 | simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) |
62b180e1 | 9723 | { |
eff54963 | 9724 | if (gimple_code (stmt) == GIMPLE_COND) |
9725 | return vrp_evaluate_conditional (gimple_cond_code (stmt), | |
9726 | gimple_cond_lhs (stmt), | |
9727 | gimple_cond_rhs (stmt), within_stmt); | |
62b180e1 | 9728 | |
eff54963 | 9729 | if (gimple_code (stmt) == GIMPLE_ASSIGN) |
9730 | { | |
9731 | value_range_t new_vr = VR_INITIALIZER; | |
9732 | tree lhs = gimple_assign_lhs (stmt); | |
9733 | ||
9734 | if (TREE_CODE (lhs) == SSA_NAME | |
9735 | && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
9736 | || POINTER_TYPE_P (TREE_TYPE (lhs)))) | |
9737 | { | |
9738 | extract_range_from_assignment (&new_vr, stmt); | |
9739 | if (range_int_cst_singleton_p (&new_vr)) | |
9740 | return new_vr.min; | |
9741 | } | |
9742 | } | |
62b180e1 | 9743 | |
eff54963 | 9744 | return NULL_TREE; |
62b180e1 | 9745 | } |
9746 | ||
9747 | /* Blocks which have more than one predecessor and more than | |
f0b5f617 | 9748 | one successor present jump threading opportunities, i.e., |
62b180e1 | 9749 | when the block is reached from a specific predecessor, we |
9750 | may be able to determine which of the outgoing edges will | |
9751 | be traversed. When this optimization applies, we are able | |
9752 | to avoid conditionals at runtime and we may expose secondary | |
9753 | optimization opportunities. | |
9754 | ||
9755 | This routine is effectively a driver for the generic jump | |
9756 | threading code. It basically just presents the generic code | |
9757 | with edges that may be suitable for jump threading. | |
9758 | ||
9759 | Unlike DOM, we do not iterate VRP if jump threading was successful. | |
9760 | While iterating may expose new opportunities for VRP, it is expected | |
9761 | those opportunities would be very limited and the compile time cost | |
48e1416a | 9762 | to expose those opportunities would be significant. |
62b180e1 | 9763 | |
9764 | As jump threading opportunities are discovered, they are registered | |
9765 | for later realization. */ | |
9766 | ||
9767 | static void | |
9768 | identify_jump_threads (void) | |
9769 | { | |
9770 | basic_block bb; | |
75a70cf9 | 9771 | gimple dummy; |
72c30859 | 9772 | int i; |
9773 | edge e; | |
62b180e1 | 9774 | |
9775 | /* Ugh. When substituting values earlier in this pass we can | |
9776 | wipe the dominance information. So rebuild the dominator | |
9777 | information as we need it within the jump threading code. */ | |
9778 | calculate_dominance_info (CDI_DOMINATORS); | |
9779 | ||
9780 | /* We do not allow VRP information to be used for jump threading | |
9781 | across a back edge in the CFG. Otherwise it becomes too | |
9782 | difficult to avoid eliminating loop exit tests. Of course | |
9783 | EDGE_DFS_BACK is not accurate at this time so we have to | |
9784 | recompute it. */ | |
9785 | mark_dfs_back_edges (); | |
9786 | ||
72c30859 | 9787 | /* Do not thread across edges we are about to remove. Just marking |
9788 | them as EDGE_DFS_BACK will do. */ | |
f1f41a6c | 9789 | FOR_EACH_VEC_ELT (to_remove_edges, i, e) |
72c30859 | 9790 | e->flags |= EDGE_DFS_BACK; |
9791 | ||
62b180e1 | 9792 | /* Allocate our unwinder stack to unwind any temporary equivalences |
9793 | that might be recorded. */ | |
f1f41a6c | 9794 | equiv_stack.create (20); |
62b180e1 | 9795 | |
9796 | /* To avoid lots of silly node creation, we create a single | |
9797 | conditional and just modify it in-place when attempting to | |
9798 | thread jumps. */ | |
75a70cf9 | 9799 | dummy = gimple_build_cond (EQ_EXPR, |
9800 | integer_zero_node, integer_zero_node, | |
9801 | NULL, NULL); | |
62b180e1 | 9802 | |
9803 | /* Walk through all the blocks finding those which present a | |
9804 | potential jump threading opportunity. We could set this up | |
9805 | as a dominator walker and record data during the walk, but | |
9806 | I doubt it's worth the effort for the classes of jump | |
9807 | threading opportunities we are trying to identify at this | |
9808 | point in compilation. */ | |
fc00614f | 9809 | FOR_EACH_BB_FN (bb, cfun) |
62b180e1 | 9810 | { |
75a70cf9 | 9811 | gimple last; |
62b180e1 | 9812 | |
9813 | /* If the generic jump threading code does not find this block | |
9814 | interesting, then there is nothing to do. */ | |
9815 | if (! potentially_threadable_block (bb)) | |
9816 | continue; | |
9817 | ||
9818 | /* We only care about blocks ending in a COND_EXPR. While there | |
9819 | may be some value in handling SWITCH_EXPR here, I doubt it's | |
9820 | terribly important. */ | |
75a70cf9 | 9821 | last = gsi_stmt (gsi_last_bb (bb)); |
62b180e1 | 9822 | |
50258e8d | 9823 | /* We're basically looking for a switch or any kind of conditional with |
69978c67 | 9824 | integral or pointer type arguments. Note the type of the second |
9825 | argument will be the same as the first argument, so no need to | |
9826 | check it explicitly. */ | |
50258e8d | 9827 | if (gimple_code (last) == GIMPLE_SWITCH |
9828 | || (gimple_code (last) == GIMPLE_COND | |
9829 | && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME | |
9830 | && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) | |
9831 | || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))) | |
9832 | && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME | |
9833 | || is_gimple_min_invariant (gimple_cond_rhs (last))))) | |
62b180e1 | 9834 | { |
9835 | edge_iterator ei; | |
62b180e1 | 9836 | |
9837 | /* We've got a block with multiple predecessors and multiple | |
50258e8d | 9838 | successors which also ends in a suitable conditional or |
9839 | switch statement. For each predecessor, see if we can thread | |
9840 | it to a specific successor. */ | |
62b180e1 | 9841 | FOR_EACH_EDGE (e, ei, bb->preds) |
9842 | { | |
9843 | /* Do not thread across back edges or abnormal edges | |
9844 | in the CFG. */ | |
9845 | if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) | |
9846 | continue; | |
9847 | ||
2b15d2ba | 9848 | thread_across_edge (dummy, e, true, &equiv_stack, |
62b180e1 | 9849 | simplify_stmt_for_jump_threading); |
9850 | } | |
9851 | } | |
9852 | } | |
9853 | ||
9854 | /* We do not actually update the CFG or SSA graphs at this point as | |
9855 | ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet | |
9856 | handle ASSERT_EXPRs gracefully. */ | |
9857 | } | |
9858 | ||
9859 | /* We identified all the jump threading opportunities earlier, but could | |
9860 | not transform the CFG at that time. This routine transforms the | |
9861 | CFG and arranges for the dominator tree to be rebuilt if necessary. | |
9862 | ||
9863 | Note the SSA graph update will occur during the normal TODO | |
9864 | processing by the pass manager. */ | |
9865 | static void | |
9866 | finalize_jump_threads (void) | |
9867 | { | |
7e0311ae | 9868 | thread_through_all_blocks (false); |
f1f41a6c | 9869 | equiv_stack.release (); |
62b180e1 | 9870 | } |
96c8d283 | 9871 | |
88dbf20f | 9872 | |
9873 | /* Traverse all the blocks folding conditionals with known ranges. */ | |
9874 | ||
9875 | static void | |
9876 | vrp_finalize (void) | |
9877 | { | |
eea12c72 | 9878 | size_t i; |
e0186710 | 9879 | |
9880 | values_propagated = true; | |
88dbf20f | 9881 | |
9882 | if (dump_file) | |
9883 | { | |
9884 | fprintf (dump_file, "\nValue ranges after VRP:\n\n"); | |
9885 | dump_all_value_ranges (dump_file); | |
9886 | fprintf (dump_file, "\n"); | |
9887 | } | |
9888 | ||
14f101cf | 9889 | substitute_and_fold (op_with_constant_singleton_value_range, |
9890 | vrp_fold_stmt, false); | |
eea12c72 | 9891 | |
5bc96398 | 9892 | if (warn_array_bounds) |
8b938617 | 9893 | check_all_array_refs (); |
5bc96398 | 9894 | |
62b180e1 | 9895 | /* We must identify jump threading opportunities before we release |
9896 | the datastructures built by VRP. */ | |
9897 | identify_jump_threads (); | |
9898 | ||
3c59e4a7 | 9899 | /* Set value range to non pointer SSA_NAMEs. */ |
9900 | for (i = 0; i < num_vr_values; i++) | |
0cf78115 | 9901 | if (vr_value[i]) |
9902 | { | |
9903 | tree name = ssa_name (i); | |
3c59e4a7 | 9904 | |
355c7ba5 | 9905 | if (!name |
9906 | || POINTER_TYPE_P (TREE_TYPE (name)) | |
0cf78115 | 9907 | || (vr_value[i]->type == VR_VARYING) |
9908 | || (vr_value[i]->type == VR_UNDEFINED)) | |
9909 | continue; | |
3c59e4a7 | 9910 | |
0cf78115 | 9911 | if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST) |
0c20fe49 | 9912 | && (TREE_CODE (vr_value[i]->max) == INTEGER_CST) |
9913 | && (vr_value[i]->type == VR_RANGE | |
9914 | || vr_value[i]->type == VR_ANTI_RANGE)) | |
9c1be15e | 9915 | set_range_info (name, vr_value[i]->type, vr_value[i]->min, |
9916 | vr_value[i]->max); | |
0cf78115 | 9917 | } |
3c59e4a7 | 9918 | |
eea12c72 | 9919 | /* Free allocated memory. */ |
e0186710 | 9920 | for (i = 0; i < num_vr_values; i++) |
eea12c72 | 9921 | if (vr_value[i]) |
9922 | { | |
9923 | BITMAP_FREE (vr_value[i]->equiv); | |
9924 | free (vr_value[i]); | |
9925 | } | |
9926 | ||
eea12c72 | 9927 | free (vr_value); |
5c7155ca | 9928 | free (vr_phi_edge_counts); |
8dbf774a | 9929 | |
9930 | /* So that we can distinguish between VRP data being available | |
9931 | and not available. */ | |
9932 | vr_value = NULL; | |
5c7155ca | 9933 | vr_phi_edge_counts = NULL; |
88dbf20f | 9934 | } |
9935 | ||
9936 | ||
9937 | /* Main entry point to VRP (Value Range Propagation). This pass is | |
9938 | loosely based on J. R. C. Patterson, ``Accurate Static Branch | |
9939 | Prediction by Value Range Propagation,'' in SIGPLAN Conference on | |
9940 | Programming Language Design and Implementation, pp. 67-78, 1995. | |
9941 | Also available at http://citeseer.ist.psu.edu/patterson95accurate.html | |
9942 | ||
9943 | This is essentially an SSA-CCP pass modified to deal with ranges | |
9944 | instead of constants. | |
9945 | ||
eea12c72 | 9946 | While propagating ranges, we may find that two or more SSA name |
9947 | have equivalent, though distinct ranges. For instance, | |
9948 | ||
9949 | 1 x_9 = p_3->a; | |
9950 | 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> | |
9951 | 3 if (p_4 == q_2) | |
9952 | 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; | |
9953 | 5 endif | |
9954 | 6 if (q_2) | |
48e1416a | 9955 | |
eea12c72 | 9956 | In the code above, pointer p_5 has range [q_2, q_2], but from the |
9957 | code we can also determine that p_5 cannot be NULL and, if q_2 had | |
9958 | a non-varying range, p_5's range should also be compatible with it. | |
9959 | ||
9aff9709 | 9960 | These equivalences are created by two expressions: ASSERT_EXPR and |
eea12c72 | 9961 | copy operations. Since p_5 is an assertion on p_4, and p_4 was the |
9962 | result of another assertion, then we can use the fact that p_5 and | |
9963 | p_4 are equivalent when evaluating p_5's range. | |
9964 | ||
9aff9709 | 9965 | Together with value ranges, we also propagate these equivalences |
eea12c72 | 9966 | between names so that we can take advantage of information from |
9967 | multiple ranges when doing final replacement. Note that this | |
9968 | equivalency relation is transitive but not symmetric. | |
48e1416a | 9969 | |
eea12c72 | 9970 | In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we |
9971 | cannot assert that q_2 is equivalent to p_5 because q_2 may be used | |
9972 | in contexts where that assertion does not hold (e.g., in line 6). | |
9973 | ||
88dbf20f | 9974 | TODO, the main difference between this pass and Patterson's is that |
9975 | we do not propagate edge probabilities. We only compute whether | |
9976 | edges can be taken or not. That is, instead of having a spectrum | |
9977 | of jump probabilities between 0 and 1, we only deal with 0, 1 and | |
9978 | DON'T KNOW. In the future, it may be worthwhile to propagate | |
9979 | probabilities to aid branch prediction. */ | |
9980 | ||
2a1990e9 | 9981 | static unsigned int |
88dbf20f | 9982 | execute_vrp (void) |
9983 | { | |
72c30859 | 9984 | int i; |
9985 | edge e; | |
9986 | switch_update *su; | |
9987 | ||
7e0311ae | 9988 | loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
7a3bf727 | 9989 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
9990 | scev_initialize (); | |
7e0311ae | 9991 | |
d590d541 | 9992 | /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. |
9993 | Inserting assertions may split edges which will invalidate | |
9994 | EDGE_DFS_BACK. */ | |
593f9380 | 9995 | insert_range_assertions (); |
9996 | ||
f1f41a6c | 9997 | to_remove_edges.create (10); |
9998 | to_update_switch_stmts.create (5); | |
f003f9fd | 9999 | threadedge_initialize_values (); |
72c30859 | 10000 | |
d590d541 | 10001 | /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ |
10002 | mark_dfs_back_edges (); | |
10003 | ||
eea12c72 | 10004 | vrp_initialize (); |
10005 | ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); | |
10006 | vrp_finalize (); | |
88dbf20f | 10007 | |
593f9380 | 10008 | free_numbers_of_iterations_estimates (); |
10009 | ||
62b180e1 | 10010 | /* ASSERT_EXPRs must be removed before finalizing jump threads |
10011 | as finalizing jump threads calls the CFG cleanup code which | |
10012 | does not properly handle ASSERT_EXPRs. */ | |
88dbf20f | 10013 | remove_range_assertions (); |
708dacc2 | 10014 | |
10015 | /* If we exposed any new variables, go ahead and put them into | |
10016 | SSA form now, before we handle jump threading. This simplifies | |
10017 | interactions between rewriting of _DECL nodes into SSA form | |
10018 | and rewriting SSA_NAME nodes into SSA form after block | |
10019 | duplication and CFG manipulation. */ | |
10020 | update_ssa (TODO_update_ssa); | |
10021 | ||
62b180e1 | 10022 | finalize_jump_threads (); |
ffeeba75 | 10023 | |
10024 | /* Remove dead edges from SWITCH_EXPR optimization. This leaves the | |
10025 | CFG in a broken state and requires a cfg_cleanup run. */ | |
f1f41a6c | 10026 | FOR_EACH_VEC_ELT (to_remove_edges, i, e) |
ffeeba75 | 10027 | remove_edge (e); |
10028 | /* Update SWITCH_EXPR case label vector. */ | |
f1f41a6c | 10029 | FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su) |
75a70cf9 | 10030 | { |
10031 | size_t j; | |
10032 | size_t n = TREE_VEC_LENGTH (su->vec); | |
2df2af5a | 10033 | tree label; |
75a70cf9 | 10034 | gimple_switch_set_num_labels (su->stmt, n); |
10035 | for (j = 0; j < n; j++) | |
10036 | gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); | |
2df2af5a | 10037 | /* As we may have replaced the default label with a regular one |
10038 | make sure to make it a real default label again. This ensures | |
10039 | optimal expansion. */ | |
49a70175 | 10040 | label = gimple_switch_label (su->stmt, 0); |
2df2af5a | 10041 | CASE_LOW (label) = NULL_TREE; |
10042 | CASE_HIGH (label) = NULL_TREE; | |
75a70cf9 | 10043 | } |
ffeeba75 | 10044 | |
f1f41a6c | 10045 | if (to_remove_edges.length () > 0) |
d5459a15 | 10046 | { |
10047 | free_dominance_info (CDI_DOMINATORS); | |
b3083327 | 10048 | loops_state_set (LOOPS_NEED_FIXUP); |
d5459a15 | 10049 | } |
ffeeba75 | 10050 | |
f1f41a6c | 10051 | to_remove_edges.release (); |
10052 | to_update_switch_stmts.release (); | |
f003f9fd | 10053 | threadedge_finalize_values (); |
ffeeba75 | 10054 | |
7a3bf727 | 10055 | scev_finalize (); |
10056 | loop_optimizer_finalize (); | |
2a1990e9 | 10057 | return 0; |
88dbf20f | 10058 | } |
10059 | ||
cbe8bda8 | 10060 | namespace { |
10061 | ||
10062 | const pass_data pass_data_vrp = | |
10063 | { | |
10064 | GIMPLE_PASS, /* type */ | |
10065 | "vrp", /* name */ | |
10066 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 10067 | TV_TREE_VRP, /* tv_id */ |
10068 | PROP_ssa, /* properties_required */ | |
10069 | 0, /* properties_provided */ | |
10070 | 0, /* properties_destroyed */ | |
10071 | 0, /* todo_flags_start */ | |
8b88439e | 10072 | ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ |
88dbf20f | 10073 | }; |
cbe8bda8 | 10074 | |
10075 | class pass_vrp : public gimple_opt_pass | |
10076 | { | |
10077 | public: | |
9af5ce0c | 10078 | pass_vrp (gcc::context *ctxt) |
10079 | : gimple_opt_pass (pass_data_vrp, ctxt) | |
cbe8bda8 | 10080 | {} |
10081 | ||
10082 | /* opt_pass methods: */ | |
ae84f584 | 10083 | opt_pass * clone () { return new pass_vrp (m_ctxt); } |
31315c24 | 10084 | virtual bool gate (function *) { return flag_tree_vrp != 0; } |
65b0537f | 10085 | virtual unsigned int execute (function *) { return execute_vrp (); } |
cbe8bda8 | 10086 | |
10087 | }; // class pass_vrp | |
10088 | ||
10089 | } // anon namespace | |
10090 | ||
10091 | gimple_opt_pass * | |
10092 | make_pass_vrp (gcc::context *ctxt) | |
10093 | { | |
10094 | return new pass_vrp (ctxt); | |
10095 | } |