]>
Commit | Line | Data |
---|---|---|
0bca51f0 | 1 | /* Support routines for Value Range Propagation (VRP). |
818ab71a | 2 | Copyright (C) 2005-2016 Free Software Foundation, Inc. |
0bca51f0 DN |
3 | Contributed by Diego Novillo <dnovillo@redhat.com>. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9dcd6f09 | 9 | the Free Software Foundation; either version 3, or (at your option) |
0bca51f0 DN |
10 | any later version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
0bca51f0 DN |
20 | |
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
c7131fb2 | 24 | #include "backend.h" |
957060b5 AM |
25 | #include "insn-codes.h" |
26 | #include "rtl.h" | |
c7131fb2 AM |
27 | #include "tree.h" |
28 | #include "gimple.h" | |
957060b5 AM |
29 | #include "cfghooks.h" |
30 | #include "tree-pass.h" | |
c7131fb2 | 31 | #include "ssa.h" |
957060b5 AM |
32 | #include "optabs-tree.h" |
33 | #include "gimple-pretty-print.h" | |
34 | #include "diagnostic-core.h" | |
0bca51f0 | 35 | #include "flags.h" |
40e23961 | 36 | #include "fold-const.h" |
d8a2d370 DN |
37 | #include "stor-layout.h" |
38 | #include "calls.h" | |
60393bbc | 39 | #include "cfganal.h" |
2fb9a547 AM |
40 | #include "gimple-fold.h" |
41 | #include "tree-eh.h" | |
5be5c238 AM |
42 | #include "gimple-iterator.h" |
43 | #include "gimple-walk.h" | |
442b4905 | 44 | #include "tree-cfg.h" |
e28030cf AM |
45 | #include "tree-ssa-loop-manip.h" |
46 | #include "tree-ssa-loop-niter.h" | |
442b4905 AM |
47 | #include "tree-ssa-loop.h" |
48 | #include "tree-into-ssa.h" | |
7a300452 | 49 | #include "tree-ssa.h" |
0c948c27 | 50 | #include "intl.h" |
0bca51f0 DN |
51 | #include "cfgloop.h" |
52 | #include "tree-scalar-evolution.h" | |
53 | #include "tree-ssa-propagate.h" | |
54 | #include "tree-chrec.h" | |
5254eac4 | 55 | #include "tree-ssa-threadupdate.h" |
f6c72af4 | 56 | #include "tree-ssa-scopedtables.h" |
4484a35a | 57 | #include "tree-ssa-threadedge.h" |
bd751975 NS |
58 | #include "omp-low.h" |
59 | #include "target.h" | |
9c0a9e12 | 60 | #include "case-cfn-macros.h" |
455e6d5b RG |
61 | |
62 | /* Range of values that can be associated with an SSA_NAME after VRP | |
63 | has executed. */ | |
526ceb68 | 64 | struct value_range |
455e6d5b RG |
65 | { |
66 | /* Lattice value represented by this range. */ | |
67 | enum value_range_type type; | |
68 | ||
69 | /* Minimum and maximum values represented by this range. These | |
70 | values should be interpreted as follows: | |
71 | ||
72 | - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must | |
73 | be NULL. | |
74 | ||
75 | - If TYPE == VR_RANGE then MIN holds the minimum value and | |
76 | MAX holds the maximum value of the range [MIN, MAX]. | |
77 | ||
78 | - If TYPE == ANTI_RANGE the variable is known to NOT | |
79 | take any values in the range [MIN, MAX]. */ | |
80 | tree min; | |
81 | tree max; | |
82 | ||
83 | /* Set of SSA names whose value ranges are equivalent to this one. | |
84 | This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */ | |
85 | bitmap equiv; | |
86 | }; | |
87 | ||
3c9c79e8 RG |
88 | #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL } |
89 | ||
c4ab2baa RG |
90 | /* Set of SSA names found live during the RPO traversal of the function |
91 | for still active basic-blocks. */ | |
92 | static sbitmap *live; | |
93 | ||
94 | /* Return true if the SSA name NAME is live on the edge E. */ | |
95 | ||
96 | static bool | |
97 | live_on_edge (edge e, tree name) | |
98 | { | |
99 | return (live[e->dest->index] | |
d7c028c0 | 100 | && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name))); |
c4ab2baa | 101 | } |
0bca51f0 | 102 | |
0bca51f0 DN |
103 | /* Local functions. */ |
104 | static int compare_values (tree val1, tree val2); | |
12df8a7e | 105 | static int compare_values_warnv (tree val1, tree val2, bool *); |
526ceb68 TS |
106 | static void vrp_meet (value_range *, value_range *); |
107 | static void vrp_intersect_ranges (value_range *, value_range *); | |
2d3cd5d5 | 108 | static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, |
6b99f156 JH |
109 | tree, tree, bool, bool *, |
110 | bool *); | |
0bca51f0 | 111 | |
227858d1 DN |
112 | /* Location information for ASSERT_EXPRs. Each instance of this |
113 | structure describes an ASSERT_EXPR for an SSA name. Since a single | |
114 | SSA name may have more than one assertion associated with it, these | |
115 | locations are kept in a linked list attached to the corresponding | |
116 | SSA name. */ | |
ff507401 | 117 | struct assert_locus |
0bca51f0 | 118 | { |
227858d1 DN |
119 | /* Basic block where the assertion would be inserted. */ |
120 | basic_block bb; | |
121 | ||
122 | /* Some assertions need to be inserted on an edge (e.g., assertions | |
123 | generated by COND_EXPRs). In those cases, BB will be NULL. */ | |
124 | edge e; | |
125 | ||
126 | /* Pointer to the statement that generated this assertion. */ | |
726a989a | 127 | gimple_stmt_iterator si; |
227858d1 DN |
128 | |
129 | /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ | |
130 | enum tree_code comp_code; | |
131 | ||
132 | /* Value being compared against. */ | |
133 | tree val; | |
134 | ||
2ab8dbf4 RG |
135 | /* Expression to compare. */ |
136 | tree expr; | |
137 | ||
227858d1 | 138 | /* Next node in the linked list. */ |
ff507401 | 139 | assert_locus *next; |
227858d1 DN |
140 | }; |
141 | ||
227858d1 DN |
142 | /* If bit I is present, it means that SSA name N_i has a list of |
143 | assertions that should be inserted in the IL. */ | |
144 | static bitmap need_assert_for; | |
145 | ||
146 | /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] | |
147 | holds a list of ASSERT_LOCUS_T nodes that describe where | |
148 | ASSERT_EXPRs for SSA name N_I should be inserted. */ | |
ff507401 | 149 | static assert_locus **asserts_for; |
227858d1 | 150 | |
227858d1 DN |
151 | /* Value range array. After propagation, VR_VALUE[I] holds the range |
152 | of values that SSA name N_I may take. */ | |
d9256277 | 153 | static unsigned num_vr_values; |
526ceb68 | 154 | static value_range **vr_value; |
d9256277 | 155 | static bool values_propagated; |
0bca51f0 | 156 | |
fc6827fe ILT |
157 | /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the |
158 | number of executable edges we saw the last time we visited the | |
159 | node. */ | |
160 | static int *vr_phi_edge_counts; | |
161 | ||
a79683d5 | 162 | struct switch_update { |
538dd0b7 | 163 | gswitch *stmt; |
b7814a18 | 164 | tree vec; |
a79683d5 | 165 | }; |
b7814a18 | 166 | |
9771b263 DN |
167 | static vec<edge> to_remove_edges; |
168 | static vec<switch_update> to_update_switch_stmts; | |
b7814a18 | 169 | |
0bca51f0 | 170 | |
84fb43a1 | 171 | /* Return the maximum value for TYPE. */ |
70b7b037 RG |
172 | |
173 | static inline tree | |
174 | vrp_val_max (const_tree type) | |
175 | { | |
176 | if (!INTEGRAL_TYPE_P (type)) | |
177 | return NULL_TREE; | |
178 | ||
70b7b037 RG |
179 | return TYPE_MAX_VALUE (type); |
180 | } | |
181 | ||
84fb43a1 | 182 | /* Return the minimum value for TYPE. */ |
70b7b037 RG |
183 | |
184 | static inline tree | |
185 | vrp_val_min (const_tree type) | |
186 | { | |
187 | if (!INTEGRAL_TYPE_P (type)) | |
188 | return NULL_TREE; | |
189 | ||
70b7b037 RG |
190 | return TYPE_MIN_VALUE (type); |
191 | } | |
192 | ||
193 | /* Return whether VAL is equal to the maximum value of its type. This | |
194 | will be true for a positive overflow infinity. We can't do a | |
195 | simple equality comparison with TYPE_MAX_VALUE because C typedefs | |
196 | and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == | |
197 | to the integer constant with the same value in the type. */ | |
198 | ||
199 | static inline bool | |
200 | vrp_val_is_max (const_tree val) | |
201 | { | |
202 | tree type_max = vrp_val_max (TREE_TYPE (val)); | |
203 | return (val == type_max | |
204 | || (type_max != NULL_TREE | |
205 | && operand_equal_p (val, type_max, 0))); | |
206 | } | |
207 | ||
208 | /* Return whether VAL is equal to the minimum value of its type. This | |
209 | will be true for a negative overflow infinity. */ | |
210 | ||
211 | static inline bool | |
212 | vrp_val_is_min (const_tree val) | |
213 | { | |
214 | tree type_min = vrp_val_min (TREE_TYPE (val)); | |
215 | return (val == type_min | |
216 | || (type_min != NULL_TREE | |
217 | && operand_equal_p (val, type_min, 0))); | |
218 | } | |
219 | ||
220 | ||
12df8a7e ILT |
221 | /* Return whether TYPE should use an overflow infinity distinct from |
222 | TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to | |
223 | represent a signed overflow during VRP computations. An infinity | |
224 | is distinct from a half-range, which will go from some number to | |
225 | TYPE_{MIN,MAX}_VALUE. */ | |
226 | ||
227 | static inline bool | |
58f9752a | 228 | needs_overflow_infinity (const_tree type) |
12df8a7e | 229 | { |
84fb43a1 | 230 | return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); |
12df8a7e ILT |
231 | } |
232 | ||
233 | /* Return whether TYPE can support our overflow infinity | |
234 | representation: we use the TREE_OVERFLOW flag, which only exists | |
235 | for constants. If TYPE doesn't support this, we don't optimize | |
236 | cases which would require signed overflow--we drop them to | |
237 | VARYING. */ | |
238 | ||
239 | static inline bool | |
58f9752a | 240 | supports_overflow_infinity (const_tree type) |
12df8a7e | 241 | { |
70b7b037 | 242 | tree min = vrp_val_min (type), max = vrp_val_max (type); |
b2b29377 | 243 | gcc_checking_assert (needs_overflow_infinity (type)); |
70b7b037 RG |
244 | return (min != NULL_TREE |
245 | && CONSTANT_CLASS_P (min) | |
246 | && max != NULL_TREE | |
247 | && CONSTANT_CLASS_P (max)); | |
12df8a7e ILT |
248 | } |
249 | ||
250 | /* VAL is the maximum or minimum value of a type. Return a | |
251 | corresponding overflow infinity. */ | |
252 | ||
253 | static inline tree | |
254 | make_overflow_infinity (tree val) | |
255 | { | |
77a74ed7 | 256 | gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); |
12df8a7e ILT |
257 | val = copy_node (val); |
258 | TREE_OVERFLOW (val) = 1; | |
259 | return val; | |
260 | } | |
261 | ||
262 | /* Return a negative overflow infinity for TYPE. */ | |
263 | ||
264 | static inline tree | |
265 | negative_overflow_infinity (tree type) | |
266 | { | |
77a74ed7 | 267 | gcc_checking_assert (supports_overflow_infinity (type)); |
70b7b037 | 268 | return make_overflow_infinity (vrp_val_min (type)); |
12df8a7e ILT |
269 | } |
270 | ||
271 | /* Return a positive overflow infinity for TYPE. */ | |
272 | ||
273 | static inline tree | |
274 | positive_overflow_infinity (tree type) | |
275 | { | |
77a74ed7 | 276 | gcc_checking_assert (supports_overflow_infinity (type)); |
70b7b037 | 277 | return make_overflow_infinity (vrp_val_max (type)); |
12df8a7e ILT |
278 | } |
279 | ||
280 | /* Return whether VAL is a negative overflow infinity. */ | |
281 | ||
282 | static inline bool | |
58f9752a | 283 | is_negative_overflow_infinity (const_tree val) |
12df8a7e | 284 | { |
cb460086 RB |
285 | return (TREE_OVERFLOW_P (val) |
286 | && needs_overflow_infinity (TREE_TYPE (val)) | |
70b7b037 | 287 | && vrp_val_is_min (val)); |
12df8a7e ILT |
288 | } |
289 | ||
290 | /* Return whether VAL is a positive overflow infinity. */ | |
291 | ||
292 | static inline bool | |
58f9752a | 293 | is_positive_overflow_infinity (const_tree val) |
12df8a7e | 294 | { |
cb460086 RB |
295 | return (TREE_OVERFLOW_P (val) |
296 | && needs_overflow_infinity (TREE_TYPE (val)) | |
70b7b037 | 297 | && vrp_val_is_max (val)); |
12df8a7e ILT |
298 | } |
299 | ||
300 | /* Return whether VAL is a positive or negative overflow infinity. */ | |
301 | ||
302 | static inline bool | |
58f9752a | 303 | is_overflow_infinity (const_tree val) |
12df8a7e | 304 | { |
cb460086 RB |
305 | return (TREE_OVERFLOW_P (val) |
306 | && needs_overflow_infinity (TREE_TYPE (val)) | |
70b7b037 | 307 | && (vrp_val_is_min (val) || vrp_val_is_max (val))); |
12df8a7e ILT |
308 | } |
309 | ||
726a989a RB |
310 | /* Return whether STMT has a constant rhs that is_overflow_infinity. */ |
311 | ||
312 | static inline bool | |
355fe088 | 313 | stmt_overflow_infinity (gimple *stmt) |
726a989a RB |
314 | { |
315 | if (is_gimple_assign (stmt) | |
316 | && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == | |
317 | GIMPLE_SINGLE_RHS) | |
318 | return is_overflow_infinity (gimple_assign_rhs1 (stmt)); | |
319 | return false; | |
320 | } | |
321 | ||
b80cca7b ILT |
322 | /* If VAL is now an overflow infinity, return VAL. Otherwise, return |
323 | the same value with TREE_OVERFLOW clear. This can be used to avoid | |
324 | confusing a regular value with an overflow value. */ | |
325 | ||
326 | static inline tree | |
327 | avoid_overflow_infinity (tree val) | |
328 | { | |
329 | if (!is_overflow_infinity (val)) | |
330 | return val; | |
331 | ||
70b7b037 RG |
332 | if (vrp_val_is_max (val)) |
333 | return vrp_val_max (TREE_TYPE (val)); | |
b80cca7b ILT |
334 | else |
335 | { | |
77a74ed7 | 336 | gcc_checking_assert (vrp_val_is_min (val)); |
70b7b037 | 337 | return vrp_val_min (TREE_TYPE (val)); |
b80cca7b ILT |
338 | } |
339 | } | |
340 | ||
12df8a7e | 341 | |
ef5ad3b7 RG |
342 | /* Set value range VR to VR_UNDEFINED. */ |
343 | ||
344 | static inline void | |
526ceb68 | 345 | set_value_range_to_undefined (value_range *vr) |
ef5ad3b7 RG |
346 | { |
347 | vr->type = VR_UNDEFINED; | |
348 | vr->min = vr->max = NULL_TREE; | |
349 | if (vr->equiv) | |
350 | bitmap_clear (vr->equiv); | |
351 | } | |
352 | ||
353 | ||
2ab8dbf4 RG |
354 | /* Set value range VR to VR_VARYING. */ |
355 | ||
356 | static inline void | |
526ceb68 | 357 | set_value_range_to_varying (value_range *vr) |
2ab8dbf4 RG |
358 | { |
359 | vr->type = VR_VARYING; | |
360 | vr->min = vr->max = NULL_TREE; | |
361 | if (vr->equiv) | |
362 | bitmap_clear (vr->equiv); | |
363 | } | |
364 | ||
365 | ||
227858d1 DN |
366 | /* Set value range VR to {T, MIN, MAX, EQUIV}. */ |
367 | ||
368 | static void | |
526ceb68 | 369 | set_value_range (value_range *vr, enum value_range_type t, tree min, |
227858d1 | 370 | tree max, bitmap equiv) |
0bca51f0 | 371 | { |
227858d1 | 372 | /* Check the validity of the range. */ |
b2b29377 MM |
373 | if (flag_checking |
374 | && (t == VR_RANGE || t == VR_ANTI_RANGE)) | |
0bca51f0 DN |
375 | { |
376 | int cmp; | |
377 | ||
378 | gcc_assert (min && max); | |
379 | ||
635bfae0 RB |
380 | gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min)) |
381 | && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max))); | |
382 | ||
0bca51f0 | 383 | if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) |
e1f28918 | 384 | gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); |
0bca51f0 DN |
385 | |
386 | cmp = compare_values (min, max); | |
387 | gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); | |
8cf781f0 ILT |
388 | |
389 | if (needs_overflow_infinity (TREE_TYPE (min))) | |
390 | gcc_assert (!is_overflow_infinity (min) | |
391 | || !is_overflow_infinity (max)); | |
0bca51f0 | 392 | } |
0bca51f0 | 393 | |
b2b29377 MM |
394 | if (flag_checking |
395 | && (t == VR_UNDEFINED || t == VR_VARYING)) | |
396 | { | |
397 | gcc_assert (min == NULL_TREE && max == NULL_TREE); | |
398 | gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); | |
399 | } | |
0bca51f0 DN |
400 | |
401 | vr->type = t; | |
402 | vr->min = min; | |
403 | vr->max = max; | |
227858d1 DN |
404 | |
405 | /* Since updating the equivalence set involves deep copying the | |
406 | bitmaps, only do it if absolutely necessary. */ | |
f5052e29 RG |
407 | if (vr->equiv == NULL |
408 | && equiv != NULL) | |
227858d1 DN |
409 | vr->equiv = BITMAP_ALLOC (NULL); |
410 | ||
411 | if (equiv != vr->equiv) | |
412 | { | |
413 | if (equiv && !bitmap_empty_p (equiv)) | |
414 | bitmap_copy (vr->equiv, equiv); | |
415 | else | |
416 | bitmap_clear (vr->equiv); | |
417 | } | |
0bca51f0 DN |
418 | } |
419 | ||
420 | ||
2ab8dbf4 RG |
421 | /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. |
422 | This means adjusting T, MIN and MAX representing the case of a | |
423 | wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] | |
424 | as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. | |
425 | In corner cases where MAX+1 or MIN-1 wraps this will fall back | |
426 | to varying. | |
427 | This routine exists to ease canonicalization in the case where we | |
428 | extract ranges from var + CST op limit. */ | |
0bca51f0 | 429 | |
2ab8dbf4 | 430 | static void |
526ceb68 | 431 | set_and_canonicalize_value_range (value_range *vr, enum value_range_type t, |
2ab8dbf4 | 432 | tree min, tree max, bitmap equiv) |
0bca51f0 | 433 | { |
ef5ad3b7 RG |
434 | /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */ |
435 | if (t == VR_UNDEFINED) | |
436 | { | |
437 | set_value_range_to_undefined (vr); | |
438 | return; | |
439 | } | |
440 | else if (t == VR_VARYING) | |
441 | { | |
442 | set_value_range_to_varying (vr); | |
443 | return; | |
444 | } | |
445 | ||
446 | /* Nothing to canonicalize for symbolic ranges. */ | |
447 | if (TREE_CODE (min) != INTEGER_CST | |
70b7b037 | 448 | || TREE_CODE (max) != INTEGER_CST) |
2ab8dbf4 RG |
449 | { |
450 | set_value_range (vr, t, min, max, equiv); | |
451 | return; | |
452 | } | |
12df8a7e | 453 | |
2ab8dbf4 RG |
454 | /* Wrong order for min and max, to swap them and the VR type we need |
455 | to adjust them. */ | |
2ab8dbf4 RG |
456 | if (tree_int_cst_lt (max, min)) |
457 | { | |
5717e1f6 JJ |
458 | tree one, tmp; |
459 | ||
460 | /* For one bit precision if max < min, then the swapped | |
461 | range covers all values, so for VR_RANGE it is varying and | |
462 | for VR_ANTI_RANGE empty range, so drop to varying as well. */ | |
463 | if (TYPE_PRECISION (TREE_TYPE (min)) == 1) | |
464 | { | |
465 | set_value_range_to_varying (vr); | |
466 | return; | |
467 | } | |
468 | ||
469 | one = build_int_cst (TREE_TYPE (min), 1); | |
470 | tmp = int_const_binop (PLUS_EXPR, max, one); | |
d35936ab | 471 | max = int_const_binop (MINUS_EXPR, min, one); |
70b7b037 RG |
472 | min = tmp; |
473 | ||
474 | /* There's one corner case, if we had [C+1, C] before we now have | |
475 | that again. But this represents an empty value range, so drop | |
476 | to varying in this case. */ | |
477 | if (tree_int_cst_lt (max, min)) | |
478 | { | |
479 | set_value_range_to_varying (vr); | |
480 | return; | |
481 | } | |
482 | ||
483 | t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; | |
484 | } | |
485 | ||
486 | /* Anti-ranges that can be represented as ranges should be so. */ | |
487 | if (t == VR_ANTI_RANGE) | |
488 | { | |
489 | bool is_min = vrp_val_is_min (min); | |
490 | bool is_max = vrp_val_is_max (max); | |
491 | ||
492 | if (is_min && is_max) | |
493 | { | |
ef5ad3b7 RG |
494 | /* We cannot deal with empty ranges, drop to varying. |
495 | ??? This could be VR_UNDEFINED instead. */ | |
70b7b037 RG |
496 | set_value_range_to_varying (vr); |
497 | return; | |
498 | } | |
5717e1f6 | 499 | else if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
5717e1f6 JJ |
500 | && (is_min || is_max)) |
501 | { | |
1001fb60 RB |
502 | /* Non-empty boolean ranges can always be represented |
503 | as a singleton range. */ | |
504 | if (is_min) | |
505 | min = max = vrp_val_max (TREE_TYPE (min)); | |
5717e1f6 | 506 | else |
1001fb60 RB |
507 | min = max = vrp_val_min (TREE_TYPE (min)); |
508 | t = VR_RANGE; | |
5717e1f6 | 509 | } |
70b7b037 RG |
510 | else if (is_min |
511 | /* As a special exception preserve non-null ranges. */ | |
512 | && !(TYPE_UNSIGNED (TREE_TYPE (min)) | |
513 | && integer_zerop (max))) | |
514 | { | |
515 | tree one = build_int_cst (TREE_TYPE (max), 1); | |
d35936ab | 516 | min = int_const_binop (PLUS_EXPR, max, one); |
70b7b037 RG |
517 | max = vrp_val_max (TREE_TYPE (max)); |
518 | t = VR_RANGE; | |
519 | } | |
520 | else if (is_max) | |
521 | { | |
522 | tree one = build_int_cst (TREE_TYPE (min), 1); | |
d35936ab | 523 | max = int_const_binop (MINUS_EXPR, min, one); |
70b7b037 RG |
524 | min = vrp_val_min (TREE_TYPE (min)); |
525 | t = VR_RANGE; | |
526 | } | |
2ab8dbf4 RG |
527 | } |
528 | ||
ef5ad3b7 RG |
529 | /* Drop [-INF(OVF), +INF(OVF)] to varying. */ |
530 | if (needs_overflow_infinity (TREE_TYPE (min)) | |
531 | && is_overflow_infinity (min) | |
532 | && is_overflow_infinity (max)) | |
533 | { | |
534 | set_value_range_to_varying (vr); | |
535 | return; | |
536 | } | |
537 | ||
2ab8dbf4 RG |
538 | set_value_range (vr, t, min, max, equiv); |
539 | } | |
540 | ||
541 | /* Copy value range FROM into value range TO. */ | |
b16caf72 JL |
542 | |
543 | static inline void | |
526ceb68 | 544 | copy_value_range (value_range *to, value_range *from) |
b16caf72 | 545 | { |
2ab8dbf4 | 546 | set_value_range (to, from->type, from->min, from->max, from->equiv); |
12df8a7e ILT |
547 | } |
548 | ||
8cf781f0 ILT |
549 | /* Set value range VR to a single value. This function is only called |
550 | with values we get from statements, and exists to clear the | |
551 | TREE_OVERFLOW flag so that we don't think we have an overflow | |
552 | infinity when we shouldn't. */ | |
553 | ||
554 | static inline void | |
526ceb68 | 555 | set_value_range_to_value (value_range *vr, tree val, bitmap equiv) |
8cf781f0 ILT |
556 | { |
557 | gcc_assert (is_gimple_min_invariant (val)); | |
635bfae0 RB |
558 | if (TREE_OVERFLOW_P (val)) |
559 | val = drop_tree_overflow (val); | |
b60b4711 | 560 | set_value_range (vr, VR_RANGE, val, val, equiv); |
8cf781f0 ILT |
561 | } |
562 | ||
12df8a7e | 563 | /* Set value range VR to a non-negative range of type TYPE. |
110abdbc | 564 | OVERFLOW_INFINITY indicates whether to use an overflow infinity |
12df8a7e ILT |
565 | rather than TYPE_MAX_VALUE; this should be true if we determine |
566 | that the range is nonnegative based on the assumption that signed | |
567 | overflow does not occur. */ | |
568 | ||
569 | static inline void | |
526ceb68 | 570 | set_value_range_to_nonnegative (value_range *vr, tree type, |
12df8a7e ILT |
571 | bool overflow_infinity) |
572 | { | |
573 | tree zero; | |
574 | ||
575 | if (overflow_infinity && !supports_overflow_infinity (type)) | |
576 | { | |
577 | set_value_range_to_varying (vr); | |
578 | return; | |
579 | } | |
580 | ||
581 | zero = build_int_cst (type, 0); | |
582 | set_value_range (vr, VR_RANGE, zero, | |
583 | (overflow_infinity | |
584 | ? positive_overflow_infinity (type) | |
585 | : TYPE_MAX_VALUE (type)), | |
586 | vr->equiv); | |
b16caf72 | 587 | } |
227858d1 DN |
588 | |
589 | /* Set value range VR to a non-NULL range of type TYPE. */ | |
590 | ||
591 | static inline void | |
526ceb68 | 592 | set_value_range_to_nonnull (value_range *vr, tree type) |
227858d1 DN |
593 | { |
594 | tree zero = build_int_cst (type, 0); | |
595 | set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); | |
596 | } | |
597 | ||
598 | ||
599 | /* Set value range VR to a NULL range of type TYPE. */ | |
600 | ||
601 | static inline void | |
526ceb68 | 602 | set_value_range_to_null (value_range *vr, tree type) |
227858d1 | 603 | { |
b60b4711 | 604 | set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); |
227858d1 DN |
605 | } |
606 | ||
607 | ||
31ab1cc9 RG |
608 | /* Set value range VR to a range of a truthvalue of type TYPE. */ |
609 | ||
610 | static inline void | |
526ceb68 | 611 | set_value_range_to_truthvalue (value_range *vr, tree type) |
31ab1cc9 RG |
612 | { |
613 | if (TYPE_PRECISION (type) == 1) | |
614 | set_value_range_to_varying (vr); | |
615 | else | |
616 | set_value_range (vr, VR_RANGE, | |
617 | build_int_cst (type, 0), build_int_cst (type, 1), | |
618 | vr->equiv); | |
619 | } | |
620 | ||
621 | ||
193a3681 JJ |
622 | /* If abs (min) < abs (max), set VR to [-max, max], if |
623 | abs (min) >= abs (max), set VR to [-min, min]. */ | |
624 | ||
625 | static void | |
526ceb68 | 626 | abs_extent_range (value_range *vr, tree min, tree max) |
193a3681 JJ |
627 | { |
628 | int cmp; | |
629 | ||
630 | gcc_assert (TREE_CODE (min) == INTEGER_CST); | |
631 | gcc_assert (TREE_CODE (max) == INTEGER_CST); | |
632 | gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); | |
633 | gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); | |
634 | min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); | |
635 | max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); | |
636 | if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) | |
637 | { | |
638 | set_value_range_to_varying (vr); | |
639 | return; | |
640 | } | |
641 | cmp = compare_values (min, max); | |
642 | if (cmp == -1) | |
643 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); | |
644 | else if (cmp == 0 || cmp == 1) | |
645 | { | |
646 | max = min; | |
647 | min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); | |
648 | } | |
649 | else | |
650 | { | |
651 | set_value_range_to_varying (vr); | |
652 | return; | |
653 | } | |
654 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); | |
655 | } | |
656 | ||
657 | ||
b8698a0f | 658 | /* Return value range information for VAR. |
b16caf72 JL |
659 | |
660 | If we have no values ranges recorded (ie, VRP is not running), then | |
661 | return NULL. Otherwise create an empty range if none existed for VAR. */ | |
0bca51f0 | 662 | |
526ceb68 | 663 | static value_range * |
58f9752a | 664 | get_value_range (const_tree var) |
0bca51f0 | 665 | { |
526ceb68 | 666 | static const value_range vr_const_varying |
d9256277 | 667 | = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; |
526ceb68 | 668 | value_range *vr; |
0bca51f0 | 669 | tree sym; |
227858d1 | 670 | unsigned ver = SSA_NAME_VERSION (var); |
0bca51f0 | 671 | |
b16caf72 JL |
672 | /* If we have no recorded ranges, then return NULL. */ |
673 | if (! vr_value) | |
674 | return NULL; | |
675 | ||
d9256277 RG |
676 | /* If we query the range for a new SSA name return an unmodifiable VARYING. |
677 | We should get here at most from the substitute-and-fold stage which | |
678 | will never try to change values. */ | |
679 | if (ver >= num_vr_values) | |
526ceb68 | 680 | return CONST_CAST (value_range *, &vr_const_varying); |
d9256277 | 681 | |
227858d1 | 682 | vr = vr_value[ver]; |
0bca51f0 DN |
683 | if (vr) |
684 | return vr; | |
685 | ||
d9256277 RG |
686 | /* After propagation finished do not allocate new value-ranges. */ |
687 | if (values_propagated) | |
526ceb68 | 688 | return CONST_CAST (value_range *, &vr_const_varying); |
d9256277 | 689 | |
0bca51f0 | 690 | /* Create a default value range. */ |
526ceb68 | 691 | vr_value[ver] = vr = XCNEW (value_range); |
0bca51f0 | 692 | |
f5052e29 RG |
693 | /* Defer allocating the equivalence set. */ |
694 | vr->equiv = NULL; | |
227858d1 | 695 | |
a9b332d4 RG |
696 | /* If VAR is a default definition of a parameter, the variable can |
697 | take any value in VAR's type. */ | |
7a04f01c | 698 | if (SSA_NAME_IS_DEFAULT_DEF (var)) |
462508dd | 699 | { |
6b4a85ad | 700 | sym = SSA_NAME_VAR (var); |
7a04f01c JJ |
701 | if (TREE_CODE (sym) == PARM_DECL) |
702 | { | |
703 | /* Try to use the "nonnull" attribute to create ~[0, 0] | |
704 | anti-ranges for pointers. Note that this is only valid with | |
705 | default definitions of PARM_DECLs. */ | |
706 | if (POINTER_TYPE_P (TREE_TYPE (sym)) | |
707 | && nonnull_arg_p (sym)) | |
708 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); | |
709 | else | |
710 | set_value_range_to_varying (vr); | |
711 | } | |
712 | else if (TREE_CODE (sym) == RESULT_DECL | |
713 | && DECL_BY_REFERENCE (sym)) | |
462508dd | 714 | set_value_range_to_nonnull (vr, TREE_TYPE (sym)); |
462508dd | 715 | } |
0bca51f0 DN |
716 | |
717 | return vr; | |
718 | } | |
719 | ||
1ce35d26 RG |
720 | /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ |
721 | ||
722 | static inline bool | |
58f9752a | 723 | vrp_operand_equal_p (const_tree val1, const_tree val2) |
1ce35d26 | 724 | { |
12df8a7e ILT |
725 | if (val1 == val2) |
726 | return true; | |
727 | if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) | |
728 | return false; | |
cb460086 | 729 | return is_overflow_infinity (val1) == is_overflow_infinity (val2); |
1ce35d26 RG |
730 | } |
731 | ||
732 | /* Return true, if the bitmaps B1 and B2 are equal. */ | |
733 | ||
734 | static inline bool | |
22ea9ec0 | 735 | vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) |
1ce35d26 RG |
736 | { |
737 | return (b1 == b2 | |
92b1d23b RG |
738 | || ((!b1 || bitmap_empty_p (b1)) |
739 | && (!b2 || bitmap_empty_p (b2))) | |
1ce35d26 RG |
740 | || (b1 && b2 |
741 | && bitmap_equal_p (b1, b2))); | |
742 | } | |
0bca51f0 | 743 | |
227858d1 DN |
744 | /* Update the value range and equivalence set for variable VAR to |
745 | NEW_VR. Return true if NEW_VR is different from VAR's previous | |
746 | value. | |
747 | ||
748 | NOTE: This function assumes that NEW_VR is a temporary value range | |
749 | object created for the sole purpose of updating VAR's range. The | |
750 | storage used by the equivalence set from NEW_VR will be freed by | |
751 | this function. Do not call update_value_range when NEW_VR | |
752 | is the range object associated with another SSA name. */ | |
0bca51f0 DN |
753 | |
754 | static inline bool | |
526ceb68 | 755 | update_value_range (const_tree var, value_range *new_vr) |
0bca51f0 | 756 | { |
526ceb68 | 757 | value_range *old_vr; |
227858d1 DN |
758 | bool is_new; |
759 | ||
755359b7 RB |
760 | /* If there is a value-range on the SSA name from earlier analysis |
761 | factor that in. */ | |
762 | if (INTEGRAL_TYPE_P (TREE_TYPE (var))) | |
763 | { | |
764 | wide_int min, max; | |
765 | value_range_type rtype = get_range_info (var, &min, &max); | |
766 | if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE) | |
767 | { | |
526ceb68 | 768 | value_range nr; |
755359b7 RB |
769 | nr.type = rtype; |
770 | nr.min = wide_int_to_tree (TREE_TYPE (var), min); | |
771 | nr.max = wide_int_to_tree (TREE_TYPE (var), max); | |
772 | nr.equiv = NULL; | |
773 | vrp_intersect_ranges (new_vr, &nr); | |
774 | } | |
775 | } | |
776 | ||
227858d1 DN |
777 | /* Update the value range, if necessary. */ |
778 | old_vr = get_value_range (var); | |
779 | is_new = old_vr->type != new_vr->type | |
1ce35d26 RG |
780 | || !vrp_operand_equal_p (old_vr->min, new_vr->min) |
781 | || !vrp_operand_equal_p (old_vr->max, new_vr->max) | |
782 | || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); | |
0bca51f0 | 783 | |
227858d1 | 784 | if (is_new) |
43b1bad6 RB |
785 | { |
786 | /* Do not allow transitions up the lattice. The following | |
9c3cb360 | 787 | is slightly more awkward than just new_vr->type < old_vr->type |
43b1bad6 RB |
788 | because VR_RANGE and VR_ANTI_RANGE need to be considered |
789 | the same. We may not have is_new when transitioning to | |
9c3cb360 JJ |
790 | UNDEFINED. If old_vr->type is VARYING, we shouldn't be |
791 | called. */ | |
792 | if (new_vr->type == VR_UNDEFINED) | |
793 | { | |
794 | BITMAP_FREE (new_vr->equiv); | |
795 | set_value_range_to_varying (old_vr); | |
796 | set_value_range_to_varying (new_vr); | |
797 | return true; | |
798 | } | |
43b1bad6 RB |
799 | else |
800 | set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, | |
801 | new_vr->equiv); | |
802 | } | |
0bca51f0 | 803 | |
227858d1 | 804 | BITMAP_FREE (new_vr->equiv); |
0bca51f0 | 805 | |
227858d1 DN |
806 | return is_new; |
807 | } | |
0bca51f0 | 808 | |
0bca51f0 | 809 | |
f5052e29 RG |
810 | /* Add VAR and VAR's equivalence set to EQUIV. This is the central |
811 | point where equivalence processing can be turned on/off. */ | |
0bca51f0 | 812 | |
227858d1 | 813 | static void |
58f9752a | 814 | add_equivalence (bitmap *equiv, const_tree var) |
227858d1 DN |
815 | { |
816 | unsigned ver = SSA_NAME_VERSION (var); | |
526ceb68 | 817 | value_range *vr = vr_value[ver]; |
0bca51f0 | 818 | |
f5052e29 RG |
819 | if (*equiv == NULL) |
820 | *equiv = BITMAP_ALLOC (NULL); | |
821 | bitmap_set_bit (*equiv, ver); | |
227858d1 | 822 | if (vr && vr->equiv) |
f5052e29 | 823 | bitmap_ior_into (*equiv, vr->equiv); |
0bca51f0 DN |
824 | } |
825 | ||
826 | ||
827 | /* Return true if VR is ~[0, 0]. */ | |
828 | ||
829 | static inline bool | |
526ceb68 | 830 | range_is_nonnull (value_range *vr) |
0bca51f0 DN |
831 | { |
832 | return vr->type == VR_ANTI_RANGE | |
833 | && integer_zerop (vr->min) | |
834 | && integer_zerop (vr->max); | |
835 | } | |
836 | ||
837 | ||
838 | /* Return true if VR is [0, 0]. */ | |
839 | ||
840 | static inline bool | |
526ceb68 | 841 | range_is_null (value_range *vr) |
0bca51f0 DN |
842 | { |
843 | return vr->type == VR_RANGE | |
844 | && integer_zerop (vr->min) | |
845 | && integer_zerop (vr->max); | |
846 | } | |
847 | ||
330af32c JZ |
848 | /* Return true if max and min of VR are INTEGER_CST. It's not necessary |
849 | a singleton. */ | |
850 | ||
851 | static inline bool | |
526ceb68 | 852 | range_int_cst_p (value_range *vr) |
330af32c JZ |
853 | { |
854 | return (vr->type == VR_RANGE | |
855 | && TREE_CODE (vr->max) == INTEGER_CST | |
a75f5017 | 856 | && TREE_CODE (vr->min) == INTEGER_CST); |
330af32c JZ |
857 | } |
858 | ||
859 | /* Return true if VR is a INTEGER_CST singleton. */ | |
860 | ||
861 | static inline bool | |
526ceb68 | 862 | range_int_cst_singleton_p (value_range *vr) |
330af32c JZ |
863 | { |
864 | return (range_int_cst_p (vr) | |
3f5c390d RB |
865 | && !is_overflow_infinity (vr->min) |
866 | && !is_overflow_infinity (vr->max) | |
330af32c JZ |
867 | && tree_int_cst_equal (vr->min, vr->max)); |
868 | } | |
0bca51f0 | 869 | |
227858d1 | 870 | /* Return true if value range VR involves at least one symbol. */ |
0bca51f0 | 871 | |
227858d1 | 872 | static inline bool |
526ceb68 | 873 | symbolic_range_p (value_range *vr) |
0bca51f0 | 874 | { |
227858d1 DN |
875 | return (!is_gimple_min_invariant (vr->min) |
876 | || !is_gimple_min_invariant (vr->max)); | |
0bca51f0 DN |
877 | } |
878 | ||
e76340be EB |
879 | /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |
880 | otherwise. We only handle additive operations and set NEG to true if the | |
881 | symbol is negated and INV to the invariant part, if any. */ | |
882 | ||
883 | static tree | |
884 | get_single_symbol (tree t, bool *neg, tree *inv) | |
885 | { | |
886 | bool neg_; | |
887 | tree inv_; | |
888 | ||
889 | if (TREE_CODE (t) == PLUS_EXPR | |
890 | || TREE_CODE (t) == POINTER_PLUS_EXPR | |
891 | || TREE_CODE (t) == MINUS_EXPR) | |
892 | { | |
893 | if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) | |
894 | { | |
895 | neg_ = (TREE_CODE (t) == MINUS_EXPR); | |
896 | inv_ = TREE_OPERAND (t, 0); | |
897 | t = TREE_OPERAND (t, 1); | |
898 | } | |
899 | else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) | |
900 | { | |
901 | neg_ = false; | |
902 | inv_ = TREE_OPERAND (t, 1); | |
903 | t = TREE_OPERAND (t, 0); | |
904 | } | |
905 | else | |
906 | return NULL_TREE; | |
907 | } | |
908 | else | |
909 | { | |
910 | neg_ = false; | |
911 | inv_ = NULL_TREE; | |
912 | } | |
913 | ||
914 | if (TREE_CODE (t) == NEGATE_EXPR) | |
915 | { | |
916 | t = TREE_OPERAND (t, 0); | |
917 | neg_ = !neg_; | |
918 | } | |
919 | ||
920 | if (TREE_CODE (t) != SSA_NAME) | |
921 | return NULL_TREE; | |
922 | ||
923 | *neg = neg_; | |
924 | *inv = inv_; | |
925 | return t; | |
926 | } | |
927 | ||
928 | /* The reverse operation: build a symbolic expression with TYPE | |
929 | from symbol SYM, negated according to NEG, and invariant INV. */ | |
930 | ||
931 | static tree | |
932 | build_symbolic_expr (tree type, tree sym, bool neg, tree inv) | |
933 | { | |
934 | const bool pointer_p = POINTER_TYPE_P (type); | |
935 | tree t = sym; | |
936 | ||
937 | if (neg) | |
938 | t = build1 (NEGATE_EXPR, type, t); | |
939 | ||
940 | if (integer_zerop (inv)) | |
941 | return t; | |
942 | ||
943 | return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv); | |
944 | } | |
945 | ||
946 | /* Return true if value range VR involves exactly one symbol SYM. */ | |
947 | ||
948 | static bool | |
526ceb68 | 949 | symbolic_range_based_on_p (value_range *vr, const_tree sym) |
e76340be EB |
950 | { |
951 | bool neg, min_has_symbol, max_has_symbol; | |
952 | tree inv; | |
953 | ||
954 | if (is_gimple_min_invariant (vr->min)) | |
955 | min_has_symbol = false; | |
956 | else if (get_single_symbol (vr->min, &neg, &inv) == sym) | |
957 | min_has_symbol = true; | |
958 | else | |
959 | return false; | |
960 | ||
961 | if (is_gimple_min_invariant (vr->max)) | |
962 | max_has_symbol = false; | |
963 | else if (get_single_symbol (vr->max, &neg, &inv) == sym) | |
964 | max_has_symbol = true; | |
965 | else | |
966 | return false; | |
967 | ||
968 | return (min_has_symbol || max_has_symbol); | |
969 | } | |
970 | ||
110abdbc | 971 | /* Return true if value range VR uses an overflow infinity. */ |
b16caf72 | 972 | |
12df8a7e | 973 | static inline bool |
526ceb68 | 974 | overflow_infinity_range_p (value_range *vr) |
b16caf72 | 975 | { |
12df8a7e ILT |
976 | return (vr->type == VR_RANGE |
977 | && (is_overflow_infinity (vr->min) | |
978 | || is_overflow_infinity (vr->max))); | |
979 | } | |
6ac01510 | 980 | |
0c948c27 ILT |
981 | /* Return false if we can not make a valid comparison based on VR; |
982 | this will be the case if it uses an overflow infinity and overflow | |
983 | is not undefined (i.e., -fno-strict-overflow is in effect). | |
984 | Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR | |
985 | uses an overflow infinity. */ | |
986 | ||
987 | static bool | |
526ceb68 | 988 | usable_range_p (value_range *vr, bool *strict_overflow_p) |
0c948c27 ILT |
989 | { |
990 | gcc_assert (vr->type == VR_RANGE); | |
991 | if (is_overflow_infinity (vr->min)) | |
992 | { | |
993 | *strict_overflow_p = true; | |
994 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) | |
995 | return false; | |
996 | } | |
997 | if (is_overflow_infinity (vr->max)) | |
998 | { | |
999 | *strict_overflow_p = true; | |
1000 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) | |
1001 | return false; | |
1002 | } | |
1003 | return true; | |
1004 | } | |
1005 | ||
726a989a RB |
1006 | /* Return true if the result of assignment STMT is know to be non-zero. |
1007 | If the return value is based on the assumption that signed overflow is | |
1008 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1009 | *STRICT_OVERFLOW_P.*/ | |
1010 | ||
1011 | static bool | |
355fe088 | 1012 | gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p) |
726a989a RB |
1013 | { |
1014 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
1015 | switch (get_gimple_rhs_class (code)) | |
1016 | { | |
1017 | case GIMPLE_UNARY_RHS: | |
1018 | return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
1019 | gimple_expr_type (stmt), | |
1020 | gimple_assign_rhs1 (stmt), | |
1021 | strict_overflow_p); | |
1022 | case GIMPLE_BINARY_RHS: | |
1023 | return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), | |
1024 | gimple_expr_type (stmt), | |
1025 | gimple_assign_rhs1 (stmt), | |
1026 | gimple_assign_rhs2 (stmt), | |
1027 | strict_overflow_p); | |
0354c0c7 BS |
1028 | case GIMPLE_TERNARY_RHS: |
1029 | return false; | |
726a989a RB |
1030 | case GIMPLE_SINGLE_RHS: |
1031 | return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), | |
1032 | strict_overflow_p); | |
1033 | case GIMPLE_INVALID_RHS: | |
1034 | gcc_unreachable (); | |
1035 | default: | |
1036 | gcc_unreachable (); | |
1037 | } | |
1038 | } | |
1039 | ||
826cacfe | 1040 | /* Return true if STMT is known to compute a non-zero value. |
726a989a RB |
1041 | If the return value is based on the assumption that signed overflow is |
1042 | undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change | |
1043 | *STRICT_OVERFLOW_P.*/ | |
1044 | ||
1045 | static bool | |
355fe088 | 1046 | gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p) |
726a989a RB |
1047 | { |
1048 | switch (gimple_code (stmt)) | |
1049 | { | |
1050 | case GIMPLE_ASSIGN: | |
1051 | return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); | |
1052 | case GIMPLE_CALL: | |
2284b034 MG |
1053 | { |
1054 | tree fndecl = gimple_call_fndecl (stmt); | |
1055 | if (!fndecl) return false; | |
1056 | if (flag_delete_null_pointer_checks && !flag_check_new | |
1057 | && DECL_IS_OPERATOR_NEW (fndecl) | |
1058 | && !TREE_NOTHROW (fndecl)) | |
1059 | return true; | |
10706779 JH |
1060 | /* References are always non-NULL. */ |
1061 | if (flag_delete_null_pointer_checks | |
1062 | && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE) | |
1063 | return true; | |
826cacfe MG |
1064 | if (flag_delete_null_pointer_checks && |
1065 | lookup_attribute ("returns_nonnull", | |
1066 | TYPE_ATTRIBUTES (gimple_call_fntype (stmt)))) | |
1067 | return true; | |
2284b034 MG |
1068 | return gimple_alloca_call_p (stmt); |
1069 | } | |
726a989a RB |
1070 | default: |
1071 | gcc_unreachable (); | |
1072 | } | |
1073 | } | |
1074 | ||
12df8a7e | 1075 | /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges |
227858d1 | 1076 | obtained so far. */ |
0bca51f0 | 1077 | |
227858d1 | 1078 | static bool |
355fe088 | 1079 | vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p) |
0bca51f0 | 1080 | { |
726a989a | 1081 | if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) |
227858d1 | 1082 | return true; |
0bca51f0 | 1083 | |
227858d1 DN |
1084 | /* If we have an expression of the form &X->a, then the expression |
1085 | is nonnull if X is nonnull. */ | |
726a989a RB |
1086 | if (is_gimple_assign (stmt) |
1087 | && gimple_assign_rhs_code (stmt) == ADDR_EXPR) | |
227858d1 | 1088 | { |
726a989a | 1089 | tree expr = gimple_assign_rhs1 (stmt); |
227858d1 | 1090 | tree base = get_base_address (TREE_OPERAND (expr, 0)); |
0bca51f0 | 1091 | |
227858d1 | 1092 | if (base != NULL_TREE |
70f34814 | 1093 | && TREE_CODE (base) == MEM_REF |
227858d1 DN |
1094 | && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) |
1095 | { | |
526ceb68 | 1096 | value_range *vr = get_value_range (TREE_OPERAND (base, 0)); |
227858d1 DN |
1097 | if (range_is_nonnull (vr)) |
1098 | return true; | |
1099 | } | |
1100 | } | |
b565d777 | 1101 | |
227858d1 | 1102 | return false; |
b565d777 DN |
1103 | } |
1104 | ||
04dce5a4 ZD |
1105 | /* Returns true if EXPR is a valid value (as expected by compare_values) -- |
1106 | a gimple invariant, or SSA_NAME +- CST. */ | |
1107 | ||
1108 | static bool | |
1109 | valid_value_p (tree expr) | |
1110 | { | |
1111 | if (TREE_CODE (expr) == SSA_NAME) | |
1112 | return true; | |
1113 | ||
1114 | if (TREE_CODE (expr) == PLUS_EXPR | |
1115 | || TREE_CODE (expr) == MINUS_EXPR) | |
1116 | return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME | |
1117 | && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); | |
b8698a0f | 1118 | |
04dce5a4 ZD |
1119 | return is_gimple_min_invariant (expr); |
1120 | } | |
b565d777 | 1121 | |
b8698a0f | 1122 | /* Return |
6b3c76a9 JH |
1123 | 1 if VAL < VAL2 |
1124 | 0 if !(VAL < VAL2) | |
1125 | -2 if those are incomparable. */ | |
1126 | static inline int | |
1127 | operand_less_p (tree val, tree val2) | |
1128 | { | |
6b3c76a9 JH |
1129 | /* LT is folded faster than GE and others. Inline the common case. */ |
1130 | if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) | |
807e902e | 1131 | return tree_int_cst_lt (val, val2); |
6b3c76a9 | 1132 | else |
12df8a7e ILT |
1133 | { |
1134 | tree tcmp; | |
1135 | ||
c8539275 ILT |
1136 | fold_defer_overflow_warnings (); |
1137 | ||
12df8a7e | 1138 | tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); |
c8539275 ILT |
1139 | |
1140 | fold_undefer_and_ignore_overflow_warnings (); | |
1141 | ||
bd03c084 RG |
1142 | if (!tcmp |
1143 | || TREE_CODE (tcmp) != INTEGER_CST) | |
12df8a7e ILT |
1144 | return -2; |
1145 | ||
1146 | if (!integer_zerop (tcmp)) | |
1147 | return 1; | |
1148 | } | |
1149 | ||
1150 | /* val >= val2, not considering overflow infinity. */ | |
1151 | if (is_negative_overflow_infinity (val)) | |
1152 | return is_negative_overflow_infinity (val2) ? 0 : 1; | |
1153 | else if (is_positive_overflow_infinity (val2)) | |
1154 | return is_positive_overflow_infinity (val) ? 0 : 1; | |
1155 | ||
1156 | return 0; | |
6b3c76a9 JH |
1157 | } |
1158 | ||
0bca51f0 | 1159 | /* Compare two values VAL1 and VAL2. Return |
b8698a0f | 1160 | |
0bca51f0 DN |
1161 | -2 if VAL1 and VAL2 cannot be compared at compile-time, |
1162 | -1 if VAL1 < VAL2, | |
1163 | 0 if VAL1 == VAL2, | |
1164 | +1 if VAL1 > VAL2, and | |
1165 | +2 if VAL1 != VAL2 | |
1166 | ||
1167 | This is similar to tree_int_cst_compare but supports pointer values | |
12df8a7e ILT |
1168 | and values that cannot be compared at compile time. |
1169 | ||
1170 | If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to | |
1171 | true if the return value is only valid if we assume that signed | |
1172 | overflow is undefined. */ | |
0bca51f0 DN |
1173 | |
1174 | static int | |
12df8a7e | 1175 | compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
0bca51f0 DN |
1176 | { |
1177 | if (val1 == val2) | |
1178 | return 0; | |
1179 | ||
30abf793 KH |
1180 | /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
1181 | both integers. */ | |
1182 | gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) | |
1183 | == POINTER_TYPE_P (TREE_TYPE (val2))); | |
e76340be | 1184 | |
5be014d5 AP |
1185 | /* Convert the two values into the same type. This is needed because |
1186 | sizetype causes sign extension even for unsigned types. */ | |
1187 | val2 = fold_convert (TREE_TYPE (val1), val2); | |
1188 | STRIP_USELESS_TYPE_CONVERSION (val2); | |
30abf793 | 1189 | |
0bca51f0 | 1190 | if ((TREE_CODE (val1) == SSA_NAME |
e76340be EB |
1191 | || (TREE_CODE (val1) == NEGATE_EXPR |
1192 | && TREE_CODE (TREE_OPERAND (val1, 0)) == SSA_NAME) | |
0bca51f0 DN |
1193 | || TREE_CODE (val1) == PLUS_EXPR |
1194 | || TREE_CODE (val1) == MINUS_EXPR) | |
1195 | && (TREE_CODE (val2) == SSA_NAME | |
e76340be EB |
1196 | || (TREE_CODE (val2) == NEGATE_EXPR |
1197 | && TREE_CODE (TREE_OPERAND (val2, 0)) == SSA_NAME) | |
0bca51f0 DN |
1198 | || TREE_CODE (val2) == PLUS_EXPR |
1199 | || TREE_CODE (val2) == MINUS_EXPR)) | |
1200 | { | |
1201 | tree n1, c1, n2, c2; | |
67ac6e63 | 1202 | enum tree_code code1, code2; |
b8698a0f | 1203 | |
e76340be | 1204 | /* If VAL1 and VAL2 are of the form '[-]NAME [+-] CST' or 'NAME', |
0bca51f0 DN |
1205 | return -1 or +1 accordingly. If VAL1 and VAL2 don't use the |
1206 | same name, return -2. */ | |
e76340be | 1207 | if (TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == NEGATE_EXPR) |
0bca51f0 | 1208 | { |
67ac6e63 | 1209 | code1 = SSA_NAME; |
0bca51f0 DN |
1210 | n1 = val1; |
1211 | c1 = NULL_TREE; | |
1212 | } | |
1213 | else | |
1214 | { | |
67ac6e63 | 1215 | code1 = TREE_CODE (val1); |
0bca51f0 DN |
1216 | n1 = TREE_OPERAND (val1, 0); |
1217 | c1 = TREE_OPERAND (val1, 1); | |
67ac6e63 RG |
1218 | if (tree_int_cst_sgn (c1) == -1) |
1219 | { | |
12df8a7e ILT |
1220 | if (is_negative_overflow_infinity (c1)) |
1221 | return -2; | |
67ac6e63 RG |
1222 | c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); |
1223 | if (!c1) | |
1224 | return -2; | |
1225 | code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1226 | } | |
0bca51f0 DN |
1227 | } |
1228 | ||
e76340be | 1229 | if (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == NEGATE_EXPR) |
0bca51f0 | 1230 | { |
67ac6e63 | 1231 | code2 = SSA_NAME; |
0bca51f0 DN |
1232 | n2 = val2; |
1233 | c2 = NULL_TREE; | |
1234 | } | |
1235 | else | |
1236 | { | |
67ac6e63 | 1237 | code2 = TREE_CODE (val2); |
0bca51f0 DN |
1238 | n2 = TREE_OPERAND (val2, 0); |
1239 | c2 = TREE_OPERAND (val2, 1); | |
67ac6e63 RG |
1240 | if (tree_int_cst_sgn (c2) == -1) |
1241 | { | |
12df8a7e ILT |
1242 | if (is_negative_overflow_infinity (c2)) |
1243 | return -2; | |
67ac6e63 RG |
1244 | c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); |
1245 | if (!c2) | |
1246 | return -2; | |
1247 | code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; | |
1248 | } | |
0bca51f0 DN |
1249 | } |
1250 | ||
1251 | /* Both values must use the same name. */ | |
e76340be EB |
1252 | if (TREE_CODE (n1) == NEGATE_EXPR && TREE_CODE (n2) == NEGATE_EXPR) |
1253 | { | |
1254 | n1 = TREE_OPERAND (n1, 0); | |
1255 | n2 = TREE_OPERAND (n2, 0); | |
1256 | } | |
0bca51f0 DN |
1257 | if (n1 != n2) |
1258 | return -2; | |
1259 | ||
e76340be | 1260 | if (code1 == SSA_NAME && code2 == SSA_NAME) |
67ac6e63 RG |
1261 | /* NAME == NAME */ |
1262 | return 0; | |
1263 | ||
1264 | /* If overflow is defined we cannot simplify more. */ | |
eeef0e45 | 1265 | if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) |
67ac6e63 RG |
1266 | return -2; |
1267 | ||
3fe5bcaf ILT |
1268 | if (strict_overflow_p != NULL |
1269 | && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) | |
1270 | && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) | |
12df8a7e ILT |
1271 | *strict_overflow_p = true; |
1272 | ||
67ac6e63 | 1273 | if (code1 == SSA_NAME) |
0bca51f0 | 1274 | { |
67ac6e63 | 1275 | if (code2 == PLUS_EXPR) |
0bca51f0 DN |
1276 | /* NAME < NAME + CST */ |
1277 | return -1; | |
67ac6e63 | 1278 | else if (code2 == MINUS_EXPR) |
0bca51f0 DN |
1279 | /* NAME > NAME - CST */ |
1280 | return 1; | |
1281 | } | |
67ac6e63 | 1282 | else if (code1 == PLUS_EXPR) |
0bca51f0 | 1283 | { |
67ac6e63 | 1284 | if (code2 == SSA_NAME) |
0bca51f0 DN |
1285 | /* NAME + CST > NAME */ |
1286 | return 1; | |
67ac6e63 | 1287 | else if (code2 == PLUS_EXPR) |
0bca51f0 | 1288 | /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ |
12df8a7e | 1289 | return compare_values_warnv (c1, c2, strict_overflow_p); |
67ac6e63 | 1290 | else if (code2 == MINUS_EXPR) |
0bca51f0 DN |
1291 | /* NAME + CST1 > NAME - CST2 */ |
1292 | return 1; | |
1293 | } | |
67ac6e63 | 1294 | else if (code1 == MINUS_EXPR) |
0bca51f0 | 1295 | { |
67ac6e63 | 1296 | if (code2 == SSA_NAME) |
0bca51f0 DN |
1297 | /* NAME - CST < NAME */ |
1298 | return -1; | |
67ac6e63 | 1299 | else if (code2 == PLUS_EXPR) |
0bca51f0 DN |
1300 | /* NAME - CST1 < NAME + CST2 */ |
1301 | return -1; | |
67ac6e63 | 1302 | else if (code2 == MINUS_EXPR) |
0bca51f0 DN |
1303 | /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that |
1304 | C1 and C2 are swapped in the call to compare_values. */ | |
12df8a7e | 1305 | return compare_values_warnv (c2, c1, strict_overflow_p); |
0bca51f0 DN |
1306 | } |
1307 | ||
1308 | gcc_unreachable (); | |
1309 | } | |
1310 | ||
1311 | /* We cannot compare non-constants. */ | |
1312 | if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) | |
1313 | return -2; | |
1314 | ||
30abf793 | 1315 | if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
87f2a9f5 | 1316 | { |
12df8a7e ILT |
1317 | /* We cannot compare overflowed values, except for overflow |
1318 | infinities. */ | |
87f2a9f5 | 1319 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
12df8a7e | 1320 | { |
0c948c27 ILT |
1321 | if (strict_overflow_p != NULL) |
1322 | *strict_overflow_p = true; | |
12df8a7e ILT |
1323 | if (is_negative_overflow_infinity (val1)) |
1324 | return is_negative_overflow_infinity (val2) ? 0 : -1; | |
1325 | else if (is_negative_overflow_infinity (val2)) | |
1326 | return 1; | |
1327 | else if (is_positive_overflow_infinity (val1)) | |
1328 | return is_positive_overflow_infinity (val2) ? 0 : 1; | |
1329 | else if (is_positive_overflow_infinity (val2)) | |
1330 | return -1; | |
1331 | return -2; | |
1332 | } | |
87f2a9f5 RS |
1333 | |
1334 | return tree_int_cst_compare (val1, val2); | |
1335 | } | |
0bca51f0 DN |
1336 | else |
1337 | { | |
1338 | tree t; | |
1339 | ||
1340 | /* First see if VAL1 and VAL2 are not the same. */ | |
1341 | if (val1 == val2 || operand_equal_p (val1, val2, 0)) | |
1342 | return 0; | |
b8698a0f | 1343 | |
0bca51f0 | 1344 | /* If VAL1 is a lower address than VAL2, return -1. */ |
6b3c76a9 | 1345 | if (operand_less_p (val1, val2) == 1) |
0bca51f0 DN |
1346 | return -1; |
1347 | ||
1348 | /* If VAL1 is a higher address than VAL2, return +1. */ | |
6b3c76a9 | 1349 | if (operand_less_p (val2, val1) == 1) |
0bca51f0 DN |
1350 | return 1; |
1351 | ||
5daffcc7 JH |
1352 | /* If VAL1 is different than VAL2, return +2. |
1353 | For integer constants we either have already returned -1 or 1 | |
2e226e66 KH |
1354 | or they are equivalent. We still might succeed in proving |
1355 | something about non-trivial operands. */ | |
5daffcc7 JH |
1356 | if (TREE_CODE (val1) != INTEGER_CST |
1357 | || TREE_CODE (val2) != INTEGER_CST) | |
1358 | { | |
1359 | t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); | |
bd03c084 | 1360 | if (t && integer_onep (t)) |
5daffcc7 JH |
1361 | return 2; |
1362 | } | |
0bca51f0 DN |
1363 | |
1364 | return -2; | |
1365 | } | |
1366 | } | |
1367 | ||
0c948c27 ILT |
1368 | /* Compare values like compare_values_warnv, but treat comparisons of |
1369 | nonconstants which rely on undefined overflow as incomparable. */ | |
12df8a7e ILT |
1370 | |
1371 | static int | |
1372 | compare_values (tree val1, tree val2) | |
1373 | { | |
1374 | bool sop; | |
1375 | int ret; | |
1376 | ||
1377 | sop = false; | |
1378 | ret = compare_values_warnv (val1, val2, &sop); | |
0c948c27 ILT |
1379 | if (sop |
1380 | && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) | |
12df8a7e ILT |
1381 | ret = -2; |
1382 | return ret; | |
1383 | } | |
1384 | ||
0bca51f0 | 1385 | |
e8f808b3 RG |
1386 | /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, |
1387 | 0 if VAL is not inside [MIN, MAX], | |
c83033e7 DN |
1388 | -2 if we cannot tell either way. |
1389 | ||
6b3c76a9 JH |
1390 | Benchmark compile/20001226-1.c compilation time after changing this |
1391 | function. */ | |
0bca51f0 DN |
1392 | |
1393 | static inline int | |
e8f808b3 | 1394 | value_inside_range (tree val, tree min, tree max) |
0bca51f0 | 1395 | { |
6b3c76a9 | 1396 | int cmp1, cmp2; |
0bca51f0 | 1397 | |
e8f808b3 | 1398 | cmp1 = operand_less_p (val, min); |
6b3c76a9 | 1399 | if (cmp1 == -2) |
0bca51f0 | 1400 | return -2; |
6b3c76a9 JH |
1401 | if (cmp1 == 1) |
1402 | return 0; | |
0bca51f0 | 1403 | |
e8f808b3 | 1404 | cmp2 = operand_less_p (max, val); |
6b3c76a9 | 1405 | if (cmp2 == -2) |
0bca51f0 DN |
1406 | return -2; |
1407 | ||
6b3c76a9 | 1408 | return !cmp2; |
0bca51f0 DN |
1409 | } |
1410 | ||
1411 | ||
1412 | /* Return true if value ranges VR0 and VR1 have a non-empty | |
b8698a0f L |
1413 | intersection. |
1414 | ||
6b3c76a9 JH |
1415 | Benchmark compile/20001226-1.c compilation time after changing this |
1416 | function. | |
1417 | */ | |
0bca51f0 DN |
1418 | |
1419 | static inline bool | |
526ceb68 | 1420 | value_ranges_intersect_p (value_range *vr0, value_range *vr1) |
0bca51f0 | 1421 | { |
5daffcc7 JH |
1422 | /* The value ranges do not intersect if the maximum of the first range is |
1423 | less than the minimum of the second range or vice versa. | |
1424 | When those relations are unknown, we can't do any better. */ | |
1425 | if (operand_less_p (vr0->max, vr1->min) != 0) | |
1426 | return false; | |
1427 | if (operand_less_p (vr1->max, vr0->min) != 0) | |
1428 | return false; | |
1429 | return true; | |
0bca51f0 DN |
1430 | } |
1431 | ||
1432 | ||
e8f808b3 RG |
1433 | /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not |
1434 | include the value zero, -2 if we cannot tell. */ | |
227858d1 | 1435 | |
e8f808b3 RG |
1436 | static inline int |
1437 | range_includes_zero_p (tree min, tree max) | |
227858d1 | 1438 | { |
e8f808b3 RG |
1439 | tree zero = build_int_cst (TREE_TYPE (min), 0); |
1440 | return value_inside_range (zero, min, max); | |
227858d1 DN |
1441 | } |
1442 | ||
4d320da4 RG |
1443 | /* Return true if *VR is know to only contain nonnegative values. */ |
1444 | ||
1445 | static inline bool | |
526ceb68 | 1446 | value_range_nonnegative_p (value_range *vr) |
4d320da4 | 1447 | { |
1a0fcfa9 RG |
1448 | /* Testing for VR_ANTI_RANGE is not useful here as any anti-range |
1449 | which would return a useful value should be encoded as a | |
1450 | VR_RANGE. */ | |
4d320da4 RG |
1451 | if (vr->type == VR_RANGE) |
1452 | { | |
1453 | int result = compare_values (vr->min, integer_zero_node); | |
1454 | return (result == 0 || result == 1); | |
1455 | } | |
4d320da4 RG |
1456 | |
1457 | return false; | |
1458 | } | |
1459 | ||
4d320da4 RG |
1460 | /* If *VR has a value rante that is a single constant value return that, |
1461 | otherwise return NULL_TREE. */ | |
1462 | ||
1463 | static tree | |
526ceb68 | 1464 | value_range_constant_singleton (value_range *vr) |
4d320da4 RG |
1465 | { |
1466 | if (vr->type == VR_RANGE | |
1467 | && operand_equal_p (vr->min, vr->max, 0) | |
1468 | && is_gimple_min_invariant (vr->min)) | |
1469 | return vr->min; | |
1470 | ||
1471 | return NULL_TREE; | |
b16caf72 JL |
1472 | } |
1473 | ||
73019a42 RG |
1474 | /* If OP has a value range with a single constant value return that, |
1475 | otherwise return NULL_TREE. This returns OP itself if OP is a | |
1476 | constant. */ | |
1477 | ||
1478 | static tree | |
1479 | op_with_constant_singleton_value_range (tree op) | |
1480 | { | |
73019a42 RG |
1481 | if (is_gimple_min_invariant (op)) |
1482 | return op; | |
1483 | ||
1484 | if (TREE_CODE (op) != SSA_NAME) | |
1485 | return NULL_TREE; | |
1486 | ||
4d320da4 | 1487 | return value_range_constant_singleton (get_value_range (op)); |
73019a42 RG |
1488 | } |
1489 | ||
7e29ba60 RG |
1490 | /* Return true if op is in a boolean [0, 1] value-range. */ |
1491 | ||
1492 | static bool | |
1493 | op_with_boolean_value_range_p (tree op) | |
1494 | { | |
526ceb68 | 1495 | value_range *vr; |
7e29ba60 RG |
1496 | |
1497 | if (TYPE_PRECISION (TREE_TYPE (op)) == 1) | |
1498 | return true; | |
1499 | ||
1500 | if (integer_zerop (op) | |
1501 | || integer_onep (op)) | |
1502 | return true; | |
1503 | ||
1504 | if (TREE_CODE (op) != SSA_NAME) | |
1505 | return false; | |
1506 | ||
1507 | vr = get_value_range (op); | |
1508 | return (vr->type == VR_RANGE | |
1509 | && integer_zerop (vr->min) | |
1510 | && integer_onep (vr->max)); | |
1511 | } | |
227858d1 | 1512 | |
0bca51f0 DN |
1513 | /* Extract value range information from an ASSERT_EXPR EXPR and store |
1514 | it in *VR_P. */ | |
1515 | ||
1516 | static void | |
526ceb68 | 1517 | extract_range_from_assert (value_range *vr_p, tree expr) |
0bca51f0 | 1518 | { |
227858d1 | 1519 | tree var, cond, limit, min, max, type; |
526ceb68 | 1520 | value_range *limit_vr; |
b565d777 | 1521 | enum tree_code cond_code; |
0bca51f0 DN |
1522 | |
1523 | var = ASSERT_EXPR_VAR (expr); | |
1524 | cond = ASSERT_EXPR_COND (expr); | |
1525 | ||
7da4bf7d | 1526 | gcc_assert (COMPARISON_CLASS_P (cond)); |
0bca51f0 DN |
1527 | |
1528 | /* Find VAR in the ASSERT_EXPR conditional. */ | |
2ab8dbf4 RG |
1529 | if (var == TREE_OPERAND (cond, 0) |
1530 | || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR | |
1531 | || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) | |
227858d1 DN |
1532 | { |
1533 | /* If the predicate is of the form VAR COMP LIMIT, then we just | |
1534 | take LIMIT from the RHS and use the same comparison code. */ | |
227858d1 | 1535 | cond_code = TREE_CODE (cond); |
2ab8dbf4 RG |
1536 | limit = TREE_OPERAND (cond, 1); |
1537 | cond = TREE_OPERAND (cond, 0); | |
227858d1 DN |
1538 | } |
1539 | else | |
1540 | { | |
1541 | /* If the predicate is of the form LIMIT COMP VAR, then we need | |
1542 | to flip around the comparison code to create the proper range | |
1543 | for VAR. */ | |
09b2f9e8 | 1544 | cond_code = swap_tree_comparison (TREE_CODE (cond)); |
2ab8dbf4 RG |
1545 | limit = TREE_OPERAND (cond, 0); |
1546 | cond = TREE_OPERAND (cond, 1); | |
227858d1 | 1547 | } |
0bca51f0 | 1548 | |
b80cca7b ILT |
1549 | limit = avoid_overflow_infinity (limit); |
1550 | ||
ebbcd0c6 | 1551 | type = TREE_TYPE (var); |
0bca51f0 DN |
1552 | gcc_assert (limit != var); |
1553 | ||
227858d1 DN |
1554 | /* For pointer arithmetic, we only keep track of pointer equality |
1555 | and inequality. */ | |
1556 | if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) | |
0bca51f0 | 1557 | { |
b565d777 | 1558 | set_value_range_to_varying (vr_p); |
0bca51f0 DN |
1559 | return; |
1560 | } | |
1561 | ||
227858d1 DN |
1562 | /* If LIMIT is another SSA name and LIMIT has a range of its own, |
1563 | try to use LIMIT's range to avoid creating symbolic ranges | |
1564 | unnecessarily. */ | |
1565 | limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; | |
1566 | ||
1567 | /* LIMIT's range is only interesting if it has any useful information. */ | |
1568 | if (limit_vr | |
1569 | && (limit_vr->type == VR_UNDEFINED | |
1570 | || limit_vr->type == VR_VARYING | |
1571 | || symbolic_range_p (limit_vr))) | |
1572 | limit_vr = NULL; | |
1573 | ||
db3d5328 DN |
1574 | /* Initially, the new range has the same set of equivalences of |
1575 | VAR's range. This will be revised before returning the final | |
1576 | value. Since assertions may be chained via mutually exclusive | |
1577 | predicates, we will need to trim the set of equivalences before | |
1578 | we are done. */ | |
227858d1 | 1579 | gcc_assert (vr_p->equiv == NULL); |
f5052e29 | 1580 | add_equivalence (&vr_p->equiv, var); |
227858d1 DN |
1581 | |
1582 | /* Extract a new range based on the asserted comparison for VAR and | |
1583 | LIMIT's value range. Notice that if LIMIT has an anti-range, we | |
1584 | will only use it for equality comparisons (EQ_EXPR). For any | |
1585 | other kind of assertion, we cannot derive a range from LIMIT's | |
1586 | anti-range that can be used to describe the new range. For | |
1587 | instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], | |
1588 | then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is | |
1589 | no single range for x_2 that could describe LE_EXPR, so we might | |
2ab8dbf4 RG |
1590 | as well build the range [b_4, +INF] for it. |
1591 | One special case we handle is extracting a range from a | |
1592 | range test encoded as (unsigned)var + CST <= limit. */ | |
1593 | if (TREE_CODE (cond) == NOP_EXPR | |
1594 | || TREE_CODE (cond) == PLUS_EXPR) | |
1595 | { | |
2ab8dbf4 RG |
1596 | if (TREE_CODE (cond) == PLUS_EXPR) |
1597 | { | |
70b7b037 RG |
1598 | min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), |
1599 | TREE_OPERAND (cond, 1)); | |
d35936ab | 1600 | max = int_const_binop (PLUS_EXPR, limit, min); |
2ab8dbf4 RG |
1601 | cond = TREE_OPERAND (cond, 0); |
1602 | } | |
1603 | else | |
70b7b037 RG |
1604 | { |
1605 | min = build_int_cst (TREE_TYPE (var), 0); | |
1606 | max = limit; | |
1607 | } | |
2ab8dbf4 | 1608 | |
70b7b037 RG |
1609 | /* Make sure to not set TREE_OVERFLOW on the final type |
1610 | conversion. We are willingly interpreting large positive | |
ed986827 | 1611 | unsigned values as negative signed values here. */ |
807e902e KZ |
1612 | min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false); |
1613 | max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false); | |
2ab8dbf4 RG |
1614 | |
1615 | /* We can transform a max, min range to an anti-range or | |
1616 | vice-versa. Use set_and_canonicalize_value_range which does | |
1617 | this for us. */ | |
1618 | if (cond_code == LE_EXPR) | |
1619 | set_and_canonicalize_value_range (vr_p, VR_RANGE, | |
1620 | min, max, vr_p->equiv); | |
1621 | else if (cond_code == GT_EXPR) | |
1622 | set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, | |
1623 | min, max, vr_p->equiv); | |
1624 | else | |
1625 | gcc_unreachable (); | |
1626 | } | |
1627 | else if (cond_code == EQ_EXPR) | |
227858d1 DN |
1628 | { |
1629 | enum value_range_type range_type; | |
1630 | ||
1631 | if (limit_vr) | |
1632 | { | |
1633 | range_type = limit_vr->type; | |
1634 | min = limit_vr->min; | |
1635 | max = limit_vr->max; | |
1636 | } | |
1637 | else | |
1638 | { | |
1639 | range_type = VR_RANGE; | |
1640 | min = limit; | |
1641 | max = limit; | |
1642 | } | |
1643 | ||
1644 | set_value_range (vr_p, range_type, min, max, vr_p->equiv); | |
1645 | ||
1646 | /* When asserting the equality VAR == LIMIT and LIMIT is another | |
1647 | SSA name, the new range will also inherit the equivalence set | |
1648 | from LIMIT. */ | |
1649 | if (TREE_CODE (limit) == SSA_NAME) | |
f5052e29 | 1650 | add_equivalence (&vr_p->equiv, limit); |
227858d1 DN |
1651 | } |
1652 | else if (cond_code == NE_EXPR) | |
1653 | { | |
1654 | /* As described above, when LIMIT's range is an anti-range and | |
1655 | this assertion is an inequality (NE_EXPR), then we cannot | |
1656 | derive anything from the anti-range. For instance, if | |
1657 | LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does | |
1658 | not imply that VAR's range is [0, 0]. So, in the case of | |
1659 | anti-ranges, we just assert the inequality using LIMIT and | |
fde5c44c JM |
1660 | not its anti-range. |
1661 | ||
1662 | If LIMIT_VR is a range, we can only use it to build a new | |
1663 | anti-range if LIMIT_VR is a single-valued range. For | |
1664 | instance, if LIMIT_VR is [0, 1], the predicate | |
1665 | VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. | |
1666 | Rather, it means that for value 0 VAR should be ~[0, 0] | |
1667 | and for value 1, VAR should be ~[1, 1]. We cannot | |
1668 | represent these ranges. | |
1669 | ||
1670 | The only situation in which we can build a valid | |
1671 | anti-range is when LIMIT_VR is a single-valued range | |
b8698a0f | 1672 | (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, |
fde5c44c JM |
1673 | build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ |
1674 | if (limit_vr | |
1675 | && limit_vr->type == VR_RANGE | |
1676 | && compare_values (limit_vr->min, limit_vr->max) == 0) | |
227858d1 | 1677 | { |
fde5c44c JM |
1678 | min = limit_vr->min; |
1679 | max = limit_vr->max; | |
227858d1 DN |
1680 | } |
1681 | else | |
1682 | { | |
fde5c44c JM |
1683 | /* In any other case, we cannot use LIMIT's range to build a |
1684 | valid anti-range. */ | |
1685 | min = max = limit; | |
227858d1 DN |
1686 | } |
1687 | ||
1688 | /* If MIN and MAX cover the whole range for their type, then | |
1689 | just use the original LIMIT. */ | |
1690 | if (INTEGRAL_TYPE_P (type) | |
e1f28918 ILT |
1691 | && vrp_val_is_min (min) |
1692 | && vrp_val_is_max (max)) | |
227858d1 DN |
1693 | min = max = limit; |
1694 | ||
1001fb60 RB |
1695 | set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, |
1696 | min, max, vr_p->equiv); | |
227858d1 DN |
1697 | } |
1698 | else if (cond_code == LE_EXPR || cond_code == LT_EXPR) | |
0bca51f0 | 1699 | { |
227858d1 DN |
1700 | min = TYPE_MIN_VALUE (type); |
1701 | ||
1702 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1703 | max = limit; | |
1704 | else | |
1705 | { | |
1706 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1707 | range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for | |
1708 | LT_EXPR. */ | |
1709 | max = limit_vr->max; | |
1710 | } | |
1711 | ||
9d6eefd5 EB |
1712 | /* If the maximum value forces us to be out of bounds, simply punt. |
1713 | It would be pointless to try and do anything more since this | |
1714 | all should be optimized away above us. */ | |
7343ff45 ILT |
1715 | if ((cond_code == LT_EXPR |
1716 | && compare_values (max, min) == 0) | |
3f5c390d | 1717 | || is_overflow_infinity (max)) |
9d6eefd5 EB |
1718 | set_value_range_to_varying (vr_p); |
1719 | else | |
227858d1 | 1720 | { |
9d6eefd5 | 1721 | /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ |
7343ff45 | 1722 | if (cond_code == LT_EXPR) |
9d6eefd5 | 1723 | { |
c360c0fb JJ |
1724 | if (TYPE_PRECISION (TREE_TYPE (max)) == 1 |
1725 | && !TYPE_UNSIGNED (TREE_TYPE (max))) | |
1726 | max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, | |
1727 | build_int_cst (TREE_TYPE (max), -1)); | |
1728 | else | |
1729 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, | |
1730 | build_int_cst (TREE_TYPE (max), 1)); | |
3fe5bcaf ILT |
1731 | if (EXPR_P (max)) |
1732 | TREE_NO_WARNING (max) = 1; | |
9d6eefd5 | 1733 | } |
227858d1 | 1734 | |
9d6eefd5 EB |
1735 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1736 | } | |
0bca51f0 | 1737 | } |
227858d1 | 1738 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) |
0bca51f0 | 1739 | { |
227858d1 DN |
1740 | max = TYPE_MAX_VALUE (type); |
1741 | ||
1742 | if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) | |
1743 | min = limit; | |
1744 | else | |
1745 | { | |
1746 | /* If LIMIT_VR is of the form [N1, N2], we need to build the | |
1747 | range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for | |
1748 | GT_EXPR. */ | |
1749 | min = limit_vr->min; | |
1750 | } | |
1751 | ||
9d6eefd5 EB |
1752 | /* If the minimum value forces us to be out of bounds, simply punt. |
1753 | It would be pointless to try and do anything more since this | |
1754 | all should be optimized away above us. */ | |
7343ff45 ILT |
1755 | if ((cond_code == GT_EXPR |
1756 | && compare_values (min, max) == 0) | |
3f5c390d | 1757 | || is_overflow_infinity (min)) |
9d6eefd5 EB |
1758 | set_value_range_to_varying (vr_p); |
1759 | else | |
227858d1 | 1760 | { |
9d6eefd5 | 1761 | /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ |
7343ff45 | 1762 | if (cond_code == GT_EXPR) |
9d6eefd5 | 1763 | { |
c360c0fb JJ |
1764 | if (TYPE_PRECISION (TREE_TYPE (min)) == 1 |
1765 | && !TYPE_UNSIGNED (TREE_TYPE (min))) | |
1766 | min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, | |
1767 | build_int_cst (TREE_TYPE (min), -1)); | |
1768 | else | |
1769 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, | |
1770 | build_int_cst (TREE_TYPE (min), 1)); | |
3fe5bcaf ILT |
1771 | if (EXPR_P (min)) |
1772 | TREE_NO_WARNING (min) = 1; | |
9d6eefd5 | 1773 | } |
227858d1 | 1774 | |
9d6eefd5 EB |
1775 | set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); |
1776 | } | |
0bca51f0 DN |
1777 | } |
1778 | else | |
1779 | gcc_unreachable (); | |
1780 | ||
3928c098 RG |
1781 | /* Finally intersect the new range with what we already know about var. */ |
1782 | vrp_intersect_ranges (vr_p, get_value_range (var)); | |
0bca51f0 DN |
1783 | } |
1784 | ||
1785 | ||
1786 | /* Extract range information from SSA name VAR and store it in VR. If | |
1787 | VAR has an interesting range, use it. Otherwise, create the | |
1788 | range [VAR, VAR] and return it. This is useful in situations where | |
1789 | we may have conditionals testing values of VARYING names. For | |
1790 | instance, | |
1791 | ||
1792 | x_3 = y_5; | |
1793 | if (x_3 > y_5) | |
1794 | ... | |
1795 | ||
1796 | Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is | |
1797 | always false. */ | |
1798 | ||
1799 | static void | |
526ceb68 | 1800 | extract_range_from_ssa_name (value_range *vr, tree var) |
0bca51f0 | 1801 | { |
526ceb68 | 1802 | value_range *var_vr = get_value_range (var); |
0bca51f0 | 1803 | |
6e5799b9 | 1804 | if (var_vr->type != VR_VARYING) |
227858d1 | 1805 | copy_value_range (vr, var_vr); |
0bca51f0 | 1806 | else |
227858d1 DN |
1807 | set_value_range (vr, VR_RANGE, var, var, NULL); |
1808 | ||
f5052e29 | 1809 | add_equivalence (&vr->equiv, var); |
0bca51f0 DN |
1810 | } |
1811 | ||
1812 | ||
9983270b DN |
1813 | /* Wrapper around int_const_binop. If the operation overflows and we |
1814 | are not using wrapping arithmetic, then adjust the result to be | |
12df8a7e ILT |
1815 | -INF or +INF depending on CODE, VAL1 and VAL2. This can return |
1816 | NULL_TREE if we need to use an overflow infinity representation but | |
1817 | the type does not support it. */ | |
9983270b | 1818 | |
12df8a7e | 1819 | static tree |
9983270b DN |
1820 | vrp_int_const_binop (enum tree_code code, tree val1, tree val2) |
1821 | { | |
1822 | tree res; | |
1823 | ||
d35936ab | 1824 | res = int_const_binop (code, val1, val2); |
9983270b | 1825 | |
9605a606 RG |
1826 | /* If we are using unsigned arithmetic, operate symbolically |
1827 | on -INF and +INF as int_const_binop only handles signed overflow. */ | |
1828 | if (TYPE_UNSIGNED (TREE_TYPE (val1))) | |
b17775ab JM |
1829 | { |
1830 | int checkz = compare_values (res, val1); | |
26ef4301 | 1831 | bool overflow = false; |
b17775ab | 1832 | |
7dc32197 | 1833 | /* Ensure that res = val1 [+*] val2 >= val1 |
b17775ab | 1834 | or that res = val1 - val2 <= val1. */ |
26ef4301 | 1835 | if ((code == PLUS_EXPR |
7dc32197 DN |
1836 | && !(checkz == 1 || checkz == 0)) |
1837 | || (code == MINUS_EXPR | |
1838 | && !(checkz == 0 || checkz == -1))) | |
26ef4301 JL |
1839 | { |
1840 | overflow = true; | |
1841 | } | |
1842 | /* Checking for multiplication overflow is done by dividing the | |
1843 | output of the multiplication by the first input of the | |
1844 | multiplication. If the result of that division operation is | |
1845 | not equal to the second input of the multiplication, then the | |
1846 | multiplication overflowed. */ | |
1847 | else if (code == MULT_EXPR && !integer_zerop (val1)) | |
1848 | { | |
1849 | tree tmp = int_const_binop (TRUNC_DIV_EXPR, | |
3ea0e1e4 | 1850 | res, |
d35936ab | 1851 | val1); |
26ef4301 JL |
1852 | int check = compare_values (tmp, val2); |
1853 | ||
1854 | if (check != 0) | |
1855 | overflow = true; | |
1856 | } | |
1857 | ||
1858 | if (overflow) | |
b17775ab JM |
1859 | { |
1860 | res = copy_node (res); | |
1861 | TREE_OVERFLOW (res) = 1; | |
1862 | } | |
26ef4301 | 1863 | |
b17775ab | 1864 | } |
5418fe84 RG |
1865 | else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) |
1866 | /* If the singed operation wraps then int_const_binop has done | |
1867 | everything we want. */ | |
1868 | ; | |
807e902e KZ |
1869 | /* Signed division of -1/0 overflows and by the time it gets here |
1870 | returns NULL_TREE. */ | |
1871 | else if (!res) | |
1872 | return NULL_TREE; | |
12df8a7e ILT |
1873 | else if ((TREE_OVERFLOW (res) |
1874 | && !TREE_OVERFLOW (val1) | |
1875 | && !TREE_OVERFLOW (val2)) | |
1876 | || is_overflow_infinity (val1) | |
1877 | || is_overflow_infinity (val2)) | |
9983270b | 1878 | { |
7dc32197 DN |
1879 | /* If the operation overflowed but neither VAL1 nor VAL2 are |
1880 | overflown, return -INF or +INF depending on the operation | |
1881 | and the combination of signs of the operands. */ | |
9983270b DN |
1882 | int sgn1 = tree_int_cst_sgn (val1); |
1883 | int sgn2 = tree_int_cst_sgn (val2); | |
1884 | ||
12df8a7e ILT |
1885 | if (needs_overflow_infinity (TREE_TYPE (res)) |
1886 | && !supports_overflow_infinity (TREE_TYPE (res))) | |
1887 | return NULL_TREE; | |
1888 | ||
d7419dec ILT |
1889 | /* We have to punt on adding infinities of different signs, |
1890 | since we can't tell what the sign of the result should be. | |
1891 | Likewise for subtracting infinities of the same sign. */ | |
1892 | if (((code == PLUS_EXPR && sgn1 != sgn2) | |
1893 | || (code == MINUS_EXPR && sgn1 == sgn2)) | |
12df8a7e ILT |
1894 | && is_overflow_infinity (val1) |
1895 | && is_overflow_infinity (val2)) | |
1896 | return NULL_TREE; | |
1897 | ||
d7419dec ILT |
1898 | /* Don't try to handle division or shifting of infinities. */ |
1899 | if ((code == TRUNC_DIV_EXPR | |
1900 | || code == FLOOR_DIV_EXPR | |
1901 | || code == CEIL_DIV_EXPR | |
1902 | || code == EXACT_DIV_EXPR | |
1903 | || code == ROUND_DIV_EXPR | |
1904 | || code == RSHIFT_EXPR) | |
1905 | && (is_overflow_infinity (val1) | |
1906 | || is_overflow_infinity (val2))) | |
1907 | return NULL_TREE; | |
1908 | ||
0d22e81f EB |
1909 | /* Notice that we only need to handle the restricted set of |
1910 | operations handled by extract_range_from_binary_expr. | |
1911 | Among them, only multiplication, addition and subtraction | |
1912 | can yield overflow without overflown operands because we | |
1913 | are working with integral types only... except in the | |
1914 | case VAL1 = -INF and VAL2 = -1 which overflows to +INF | |
1915 | for division too. */ | |
1916 | ||
1917 | /* For multiplication, the sign of the overflow is given | |
1918 | by the comparison of the signs of the operands. */ | |
1919 | if ((code == MULT_EXPR && sgn1 == sgn2) | |
1920 | /* For addition, the operands must be of the same sign | |
1921 | to yield an overflow. Its sign is therefore that | |
d7419dec ILT |
1922 | of one of the operands, for example the first. For |
1923 | infinite operands X + -INF is negative, not positive. */ | |
1924 | || (code == PLUS_EXPR | |
1925 | && (sgn1 >= 0 | |
1926 | ? !is_negative_overflow_infinity (val2) | |
1927 | : is_positive_overflow_infinity (val2))) | |
12df8a7e ILT |
1928 | /* For subtraction, non-infinite operands must be of |
1929 | different signs to yield an overflow. Its sign is | |
1930 | therefore that of the first operand or the opposite of | |
1931 | that of the second operand. A first operand of 0 counts | |
1932 | as positive here, for the corner case 0 - (-INF), which | |
1933 | overflows, but must yield +INF. For infinite operands 0 | |
1934 | - INF is negative, not positive. */ | |
1935 | || (code == MINUS_EXPR | |
1936 | && (sgn1 >= 0 | |
1937 | ? !is_positive_overflow_infinity (val2) | |
1938 | : is_negative_overflow_infinity (val2))) | |
13338552 RG |
1939 | /* We only get in here with positive shift count, so the |
1940 | overflow direction is the same as the sign of val1. | |
1941 | Actually rshift does not overflow at all, but we only | |
1942 | handle the case of shifting overflowed -INF and +INF. */ | |
1943 | || (code == RSHIFT_EXPR | |
1944 | && sgn1 >= 0) | |
0d22e81f EB |
1945 | /* For division, the only case is -INF / -1 = +INF. */ |
1946 | || code == TRUNC_DIV_EXPR | |
1947 | || code == FLOOR_DIV_EXPR | |
1948 | || code == CEIL_DIV_EXPR | |
1949 | || code == EXACT_DIV_EXPR | |
1950 | || code == ROUND_DIV_EXPR) | |
12df8a7e ILT |
1951 | return (needs_overflow_infinity (TREE_TYPE (res)) |
1952 | ? positive_overflow_infinity (TREE_TYPE (res)) | |
1953 | : TYPE_MAX_VALUE (TREE_TYPE (res))); | |
9983270b | 1954 | else |
12df8a7e ILT |
1955 | return (needs_overflow_infinity (TREE_TYPE (res)) |
1956 | ? negative_overflow_infinity (TREE_TYPE (res)) | |
1957 | : TYPE_MIN_VALUE (TREE_TYPE (res))); | |
9983270b DN |
1958 | } |
1959 | ||
1960 | return res; | |
1961 | } | |
1962 | ||
1963 | ||
807e902e | 1964 | /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO |
85e693aa JJ |
1965 | bitmask if some bit is unset, it means for all numbers in the range |
1966 | the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO | |
1967 | bitmask if some bit is set, it means for all numbers in the range | |
1968 | the bit is 1, otherwise it might be 0 or 1. */ | |
1969 | ||
1970 | static bool | |
807e902e | 1971 | zero_nonzero_bits_from_vr (const tree expr_type, |
526ceb68 | 1972 | value_range *vr, |
807e902e KZ |
1973 | wide_int *may_be_nonzero, |
1974 | wide_int *must_be_nonzero) | |
85e693aa | 1975 | { |
807e902e KZ |
1976 | *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |
1977 | *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); | |
a75f5017 | 1978 | if (!range_int_cst_p (vr) |
3f5c390d RB |
1979 | || is_overflow_infinity (vr->min) |
1980 | || is_overflow_infinity (vr->max)) | |
4001900f RG |
1981 | return false; |
1982 | ||
1983 | if (range_int_cst_singleton_p (vr)) | |
1984 | { | |
807e902e | 1985 | *may_be_nonzero = vr->min; |
4001900f RG |
1986 | *must_be_nonzero = *may_be_nonzero; |
1987 | } | |
1988 | else if (tree_int_cst_sgn (vr->min) >= 0 | |
1989 | || tree_int_cst_sgn (vr->max) < 0) | |
85e693aa | 1990 | { |
807e902e KZ |
1991 | wide_int xor_mask = wi::bit_xor (vr->min, vr->max); |
1992 | *may_be_nonzero = wi::bit_or (vr->min, vr->max); | |
1993 | *must_be_nonzero = wi::bit_and (vr->min, vr->max); | |
1994 | if (xor_mask != 0) | |
85e693aa | 1995 | { |
807e902e KZ |
1996 | wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, |
1997 | may_be_nonzero->get_precision ()); | |
1998 | *may_be_nonzero = *may_be_nonzero | mask; | |
1999 | *must_be_nonzero = must_be_nonzero->and_not (mask); | |
85e693aa JJ |
2000 | } |
2001 | } | |
4001900f RG |
2002 | |
2003 | return true; | |
85e693aa JJ |
2004 | } |
2005 | ||
3c9c79e8 RG |
2006 | /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR |
2007 | so that *VR0 U *VR1 == *AR. Returns true if that is possible, | |
2008 | false otherwise. If *AR can be represented with a single range | |
2009 | *VR1 will be VR_UNDEFINED. */ | |
2010 | ||
2011 | static bool | |
526ceb68 TS |
2012 | ranges_from_anti_range (value_range *ar, |
2013 | value_range *vr0, value_range *vr1) | |
3c9c79e8 RG |
2014 | { |
2015 | tree type = TREE_TYPE (ar->min); | |
2016 | ||
2017 | vr0->type = VR_UNDEFINED; | |
2018 | vr1->type = VR_UNDEFINED; | |
2019 | ||
2020 | if (ar->type != VR_ANTI_RANGE | |
2021 | || TREE_CODE (ar->min) != INTEGER_CST | |
2022 | || TREE_CODE (ar->max) != INTEGER_CST | |
2023 | || !vrp_val_min (type) | |
2024 | || !vrp_val_max (type)) | |
2025 | return false; | |
2026 | ||
2027 | if (!vrp_val_is_min (ar->min)) | |
2028 | { | |
2029 | vr0->type = VR_RANGE; | |
2030 | vr0->min = vrp_val_min (type); | |
807e902e | 2031 | vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1)); |
3c9c79e8 RG |
2032 | } |
2033 | if (!vrp_val_is_max (ar->max)) | |
2034 | { | |
2035 | vr1->type = VR_RANGE; | |
807e902e | 2036 | vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1)); |
3c9c79e8 RG |
2037 | vr1->max = vrp_val_max (type); |
2038 | } | |
2039 | if (vr0->type == VR_UNDEFINED) | |
2040 | { | |
2041 | *vr0 = *vr1; | |
2042 | vr1->type = VR_UNDEFINED; | |
2043 | } | |
2044 | ||
2045 | return vr0->type != VR_UNDEFINED; | |
2046 | } | |
2047 | ||
a1bc7628 RG |
2048 | /* Helper to extract a value-range *VR for a multiplicative operation |
2049 | *VR0 CODE *VR1. */ | |
2050 | ||
2051 | static void | |
526ceb68 | 2052 | extract_range_from_multiplicative_op_1 (value_range *vr, |
a1bc7628 | 2053 | enum tree_code code, |
526ceb68 | 2054 | value_range *vr0, value_range *vr1) |
a1bc7628 RG |
2055 | { |
2056 | enum value_range_type type; | |
2057 | tree val[4]; | |
2058 | size_t i; | |
2059 | tree min, max; | |
2060 | bool sop; | |
2061 | int cmp; | |
2062 | ||
2063 | /* Multiplications, divisions and shifts are a bit tricky to handle, | |
2064 | depending on the mix of signs we have in the two ranges, we | |
2065 | need to operate on different values to get the minimum and | |
2066 | maximum values for the new range. One approach is to figure | |
2067 | out all the variations of range combinations and do the | |
2068 | operations. | |
2069 | ||
2070 | However, this involves several calls to compare_values and it | |
2071 | is pretty convoluted. It's simpler to do the 4 operations | |
2072 | (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP | |
2073 | MAX1) and then figure the smallest and largest values to form | |
2074 | the new range. */ | |
2075 | gcc_assert (code == MULT_EXPR | |
2076 | || code == TRUNC_DIV_EXPR | |
2077 | || code == FLOOR_DIV_EXPR | |
2078 | || code == CEIL_DIV_EXPR | |
2079 | || code == EXACT_DIV_EXPR | |
2080 | || code == ROUND_DIV_EXPR | |
25722436 TV |
2081 | || code == RSHIFT_EXPR |
2082 | || code == LSHIFT_EXPR); | |
a1bc7628 RG |
2083 | gcc_assert ((vr0->type == VR_RANGE |
2084 | || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE)) | |
2085 | && vr0->type == vr1->type); | |
2086 | ||
2087 | type = vr0->type; | |
2088 | ||
2089 | /* Compute the 4 cross operations. */ | |
2090 | sop = false; | |
2091 | val[0] = vrp_int_const_binop (code, vr0->min, vr1->min); | |
2092 | if (val[0] == NULL_TREE) | |
2093 | sop = true; | |
2094 | ||
2095 | if (vr1->max == vr1->min) | |
2096 | val[1] = NULL_TREE; | |
2097 | else | |
2098 | { | |
2099 | val[1] = vrp_int_const_binop (code, vr0->min, vr1->max); | |
2100 | if (val[1] == NULL_TREE) | |
2101 | sop = true; | |
2102 | } | |
2103 | ||
2104 | if (vr0->max == vr0->min) | |
2105 | val[2] = NULL_TREE; | |
2106 | else | |
2107 | { | |
2108 | val[2] = vrp_int_const_binop (code, vr0->max, vr1->min); | |
2109 | if (val[2] == NULL_TREE) | |
2110 | sop = true; | |
2111 | } | |
2112 | ||
2113 | if (vr0->min == vr0->max || vr1->min == vr1->max) | |
2114 | val[3] = NULL_TREE; | |
2115 | else | |
2116 | { | |
2117 | val[3] = vrp_int_const_binop (code, vr0->max, vr1->max); | |
2118 | if (val[3] == NULL_TREE) | |
2119 | sop = true; | |
2120 | } | |
2121 | ||
2122 | if (sop) | |
2123 | { | |
2124 | set_value_range_to_varying (vr); | |
2125 | return; | |
2126 | } | |
2127 | ||
2128 | /* Set MIN to the minimum of VAL[i] and MAX to the maximum | |
2129 | of VAL[i]. */ | |
2130 | min = val[0]; | |
2131 | max = val[0]; | |
2132 | for (i = 1; i < 4; i++) | |
2133 | { | |
2134 | if (!is_gimple_min_invariant (min) | |
2135 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2136 | || !is_gimple_min_invariant (max) | |
2137 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2138 | break; | |
2139 | ||
2140 | if (val[i]) | |
2141 | { | |
2142 | if (!is_gimple_min_invariant (val[i]) | |
2143 | || (TREE_OVERFLOW (val[i]) | |
2144 | && !is_overflow_infinity (val[i]))) | |
2145 | { | |
2146 | /* If we found an overflowed value, set MIN and MAX | |
2147 | to it so that we set the resulting range to | |
2148 | VARYING. */ | |
2149 | min = max = val[i]; | |
2150 | break; | |
2151 | } | |
2152 | ||
2153 | if (compare_values (val[i], min) == -1) | |
2154 | min = val[i]; | |
2155 | ||
2156 | if (compare_values (val[i], max) == 1) | |
2157 | max = val[i]; | |
2158 | } | |
2159 | } | |
2160 | ||
2161 | /* If either MIN or MAX overflowed, then set the resulting range to | |
2162 | VARYING. But we do accept an overflow infinity | |
2163 | representation. */ | |
2164 | if (min == NULL_TREE | |
2165 | || !is_gimple_min_invariant (min) | |
2166 | || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) | |
2167 | || max == NULL_TREE | |
2168 | || !is_gimple_min_invariant (max) | |
2169 | || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) | |
2170 | { | |
2171 | set_value_range_to_varying (vr); | |
2172 | return; | |
2173 | } | |
2174 | ||
2175 | /* We punt if: | |
2176 | 1) [-INF, +INF] | |
2177 | 2) [-INF, +-INF(OVF)] | |
2178 | 3) [+-INF(OVF), +INF] | |
2179 | 4) [+-INF(OVF), +-INF(OVF)] | |
2180 | We learn nothing when we have INF and INF(OVF) on both sides. | |
2181 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
2182 | overflow. */ | |
2183 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) | |
2184 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
2185 | { | |
2186 | set_value_range_to_varying (vr); | |
2187 | return; | |
2188 | } | |
2189 | ||
2190 | cmp = compare_values (min, max); | |
2191 | if (cmp == -2 || cmp == 1) | |
2192 | { | |
2193 | /* If the new range has its limits swapped around (MIN > MAX), | |
2194 | then the operation caused one of them to wrap around, mark | |
2195 | the new range VARYING. */ | |
2196 | set_value_range_to_varying (vr); | |
2197 | } | |
2198 | else | |
2199 | set_value_range (vr, type, min, max, NULL); | |
2200 | } | |
85e693aa | 2201 | |
4d320da4 | 2202 | /* Extract range information from a binary operation CODE based on |
e76340be | 2203 | the ranges of each of its operands *VR0 and *VR1 with resulting |
4d320da4 | 2204 | type EXPR_TYPE. The resulting range is stored in *VR. */ |
0bca51f0 DN |
2205 | |
2206 | static void | |
526ceb68 | 2207 | extract_range_from_binary_expr_1 (value_range *vr, |
4d320da4 | 2208 | enum tree_code code, tree expr_type, |
526ceb68 | 2209 | value_range *vr0_, value_range *vr1_) |
0bca51f0 | 2210 | { |
526ceb68 TS |
2211 | value_range vr0 = *vr0_, vr1 = *vr1_; |
2212 | value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; | |
4e2d94a9 | 2213 | enum value_range_type type; |
a1bc7628 | 2214 | tree min = NULL_TREE, max = NULL_TREE; |
0bca51f0 DN |
2215 | int cmp; |
2216 | ||
a1bc7628 RG |
2217 | if (!INTEGRAL_TYPE_P (expr_type) |
2218 | && !POINTER_TYPE_P (expr_type)) | |
2219 | { | |
2220 | set_value_range_to_varying (vr); | |
2221 | return; | |
2222 | } | |
2223 | ||
0bca51f0 DN |
2224 | /* Not all binary expressions can be applied to ranges in a |
2225 | meaningful way. Handle only arithmetic operations. */ | |
2226 | if (code != PLUS_EXPR | |
2227 | && code != MINUS_EXPR | |
5be014d5 | 2228 | && code != POINTER_PLUS_EXPR |
0bca51f0 DN |
2229 | && code != MULT_EXPR |
2230 | && code != TRUNC_DIV_EXPR | |
2231 | && code != FLOOR_DIV_EXPR | |
2232 | && code != CEIL_DIV_EXPR | |
2233 | && code != EXACT_DIV_EXPR | |
2234 | && code != ROUND_DIV_EXPR | |
bab4d587 | 2235 | && code != TRUNC_MOD_EXPR |
6569e716 | 2236 | && code != RSHIFT_EXPR |
8c1f1d42 | 2237 | && code != LSHIFT_EXPR |
0bca51f0 | 2238 | && code != MIN_EXPR |
227858d1 | 2239 | && code != MAX_EXPR |
29c8f8c2 | 2240 | && code != BIT_AND_EXPR |
0f36b2da RG |
2241 | && code != BIT_IOR_EXPR |
2242 | && code != BIT_XOR_EXPR) | |
0bca51f0 | 2243 | { |
b565d777 | 2244 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2245 | return; |
2246 | } | |
2247 | ||
a9b332d4 RG |
2248 | /* If both ranges are UNDEFINED, so is the result. */ |
2249 | if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) | |
0bca51f0 | 2250 | { |
227858d1 | 2251 | set_value_range_to_undefined (vr); |
0bca51f0 DN |
2252 | return; |
2253 | } | |
a9b332d4 RG |
2254 | /* If one of the ranges is UNDEFINED drop it to VARYING for the following |
2255 | code. At some point we may want to special-case operations that | |
2256 | have UNDEFINED result for all or some value-ranges of the not UNDEFINED | |
2257 | operand. */ | |
2258 | else if (vr0.type == VR_UNDEFINED) | |
2259 | set_value_range_to_varying (&vr0); | |
2260 | else if (vr1.type == VR_UNDEFINED) | |
2261 | set_value_range_to_varying (&vr1); | |
0bca51f0 | 2262 | |
3c9c79e8 RG |
2263 | /* Now canonicalize anti-ranges to ranges when they are not symbolic |
2264 | and express ~[] op X as ([]' op X) U ([]'' op X). */ | |
2265 | if (vr0.type == VR_ANTI_RANGE | |
2266 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | |
2267 | { | |
2268 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_); | |
2269 | if (vrtem1.type != VR_UNDEFINED) | |
2270 | { | |
526ceb68 | 2271 | value_range vrres = VR_INITIALIZER; |
3c9c79e8 RG |
2272 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, |
2273 | &vrtem1, vr1_); | |
2274 | vrp_meet (vr, &vrres); | |
2275 | } | |
2276 | return; | |
2277 | } | |
2278 | /* Likewise for X op ~[]. */ | |
2279 | if (vr1.type == VR_ANTI_RANGE | |
2280 | && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1)) | |
2281 | { | |
2282 | extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0); | |
2283 | if (vrtem1.type != VR_UNDEFINED) | |
2284 | { | |
526ceb68 | 2285 | value_range vrres = VR_INITIALIZER; |
3c9c79e8 RG |
2286 | extract_range_from_binary_expr_1 (&vrres, code, expr_type, |
2287 | vr0_, &vrtem1); | |
2288 | vrp_meet (vr, &vrres); | |
2289 | } | |
2290 | return; | |
2291 | } | |
2292 | ||
4e2d94a9 KH |
2293 | /* The type of the resulting value range defaults to VR0.TYPE. */ |
2294 | type = vr0.type; | |
2295 | ||
227858d1 | 2296 | /* Refuse to operate on VARYING ranges, ranges of different kinds |
e76340be | 2297 | and symbolic ranges. As an exception, we allow BIT_{AND,IOR} |
29c8f8c2 | 2298 | because we may be able to derive a useful range even if one of |
193a3681 | 2299 | the operands is VR_VARYING or symbolic range. Similarly for |
e76340be EB |
2300 | divisions, MIN/MAX and PLUS/MINUS. |
2301 | ||
2302 | TODO, we may be able to derive anti-ranges in some cases. */ | |
29c8f8c2 | 2303 | if (code != BIT_AND_EXPR |
aebf4828 | 2304 | && code != BIT_IOR_EXPR |
193a3681 JJ |
2305 | && code != TRUNC_DIV_EXPR |
2306 | && code != FLOOR_DIV_EXPR | |
2307 | && code != CEIL_DIV_EXPR | |
2308 | && code != EXACT_DIV_EXPR | |
2309 | && code != ROUND_DIV_EXPR | |
bab4d587 | 2310 | && code != TRUNC_MOD_EXPR |
83ede847 RB |
2311 | && code != MIN_EXPR |
2312 | && code != MAX_EXPR | |
e76340be EB |
2313 | && code != PLUS_EXPR |
2314 | && code != MINUS_EXPR | |
4c57980f | 2315 | && code != RSHIFT_EXPR |
29c8f8c2 KH |
2316 | && (vr0.type == VR_VARYING |
2317 | || vr1.type == VR_VARYING | |
2318 | || vr0.type != vr1.type | |
2319 | || symbolic_range_p (&vr0) | |
2320 | || symbolic_range_p (&vr1))) | |
0bca51f0 | 2321 | { |
b565d777 | 2322 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2323 | return; |
2324 | } | |
2325 | ||
2326 | /* Now evaluate the expression to determine the new range. */ | |
4d320da4 | 2327 | if (POINTER_TYPE_P (expr_type)) |
0bca51f0 | 2328 | { |
4d320da4 | 2329 | if (code == MIN_EXPR || code == MAX_EXPR) |
e57f2b41 | 2330 | { |
5be014d5 AP |
2331 | /* For MIN/MAX expressions with pointers, we only care about |
2332 | nullness, if both are non null, then the result is nonnull. | |
2333 | If both are null, then the result is null. Otherwise they | |
2334 | are varying. */ | |
2335 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
2d3cd5d5 | 2336 | set_value_range_to_nonnull (vr, expr_type); |
e57f2b41 | 2337 | else if (range_is_null (&vr0) && range_is_null (&vr1)) |
2d3cd5d5 | 2338 | set_value_range_to_null (vr, expr_type); |
e57f2b41 KH |
2339 | else |
2340 | set_value_range_to_varying (vr); | |
2341 | } | |
4d320da4 | 2342 | else if (code == POINTER_PLUS_EXPR) |
fca821b5 RG |
2343 | { |
2344 | /* For pointer types, we are really only interested in asserting | |
2345 | whether the expression evaluates to non-NULL. */ | |
2346 | if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) | |
2347 | set_value_range_to_nonnull (vr, expr_type); | |
2348 | else if (range_is_null (&vr0) && range_is_null (&vr1)) | |
2349 | set_value_range_to_null (vr, expr_type); | |
2350 | else | |
2351 | set_value_range_to_varying (vr); | |
2352 | } | |
2353 | else if (code == BIT_AND_EXPR) | |
2354 | { | |
2355 | /* For pointer types, we are really only interested in asserting | |
2356 | whether the expression evaluates to non-NULL. */ | |
2357 | if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) | |
2358 | set_value_range_to_nonnull (vr, expr_type); | |
2359 | else if (range_is_null (&vr0) || range_is_null (&vr1)) | |
2360 | set_value_range_to_null (vr, expr_type); | |
2361 | else | |
2362 | set_value_range_to_varying (vr); | |
2363 | } | |
0bca51f0 | 2364 | else |
4d320da4 | 2365 | set_value_range_to_varying (vr); |
0bca51f0 DN |
2366 | |
2367 | return; | |
2368 | } | |
2369 | ||
2370 | /* For integer ranges, apply the operation to each end of the | |
2371 | range and see what we end up with. */ | |
933a2c39 | 2372 | if (code == PLUS_EXPR || code == MINUS_EXPR) |
0bca51f0 | 2373 | { |
e76340be EB |
2374 | const bool minus_p = (code == MINUS_EXPR); |
2375 | tree min_op0 = vr0.min; | |
2376 | tree min_op1 = minus_p ? vr1.max : vr1.min; | |
2377 | tree max_op0 = vr0.max; | |
2378 | tree max_op1 = minus_p ? vr1.min : vr1.max; | |
2379 | tree sym_min_op0 = NULL_TREE; | |
2380 | tree sym_min_op1 = NULL_TREE; | |
2381 | tree sym_max_op0 = NULL_TREE; | |
2382 | tree sym_max_op1 = NULL_TREE; | |
2383 | bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1; | |
2384 | ||
2385 | /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or | |
2386 | single-symbolic ranges, try to compute the precise resulting range, | |
2387 | but only if we know that this resulting range will also be constant | |
2388 | or single-symbolic. */ | |
2389 | if (vr0.type == VR_RANGE && vr1.type == VR_RANGE | |
2390 | && (TREE_CODE (min_op0) == INTEGER_CST | |
2391 | || (sym_min_op0 | |
2392 | = get_single_symbol (min_op0, &neg_min_op0, &min_op0))) | |
2393 | && (TREE_CODE (min_op1) == INTEGER_CST | |
2394 | || (sym_min_op1 | |
2395 | = get_single_symbol (min_op1, &neg_min_op1, &min_op1))) | |
2396 | && (!(sym_min_op0 && sym_min_op1) | |
2397 | || (sym_min_op0 == sym_min_op1 | |
2398 | && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1))) | |
2399 | && (TREE_CODE (max_op0) == INTEGER_CST | |
2400 | || (sym_max_op0 | |
2401 | = get_single_symbol (max_op0, &neg_max_op0, &max_op0))) | |
2402 | && (TREE_CODE (max_op1) == INTEGER_CST | |
2403 | || (sym_max_op1 | |
2404 | = get_single_symbol (max_op1, &neg_max_op1, &max_op1))) | |
2405 | && (!(sym_max_op0 && sym_max_op1) | |
2406 | || (sym_max_op0 == sym_max_op1 | |
2407 | && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1)))) | |
807e902e | 2408 | { |
e76340be EB |
2409 | const signop sgn = TYPE_SIGN (expr_type); |
2410 | const unsigned int prec = TYPE_PRECISION (expr_type); | |
2411 | wide_int type_min, type_max, wmin, wmax; | |
933a2c39 MG |
2412 | int min_ovf = 0; |
2413 | int max_ovf = 0; | |
a75f5017 | 2414 | |
e76340be EB |
2415 | /* Get the lower and upper bounds of the type. */ |
2416 | if (TYPE_OVERFLOW_WRAPS (expr_type)) | |
2417 | { | |
2418 | type_min = wi::min_value (prec, sgn); | |
2419 | type_max = wi::max_value (prec, sgn); | |
2420 | } | |
2421 | else | |
933a2c39 | 2422 | { |
e76340be EB |
2423 | type_min = vrp_val_min (expr_type); |
2424 | type_max = vrp_val_max (expr_type); | |
933a2c39 | 2425 | } |
e76340be EB |
2426 | |
2427 | /* Combine the lower bounds, if any. */ | |
2428 | if (min_op0 && min_op1) | |
933a2c39 | 2429 | { |
e76340be EB |
2430 | if (minus_p) |
2431 | { | |
2432 | wmin = wi::sub (min_op0, min_op1); | |
27bcd47c | 2433 | |
e76340be EB |
2434 | /* Check for overflow. */ |
2435 | if (wi::cmp (0, min_op1, sgn) | |
2436 | != wi::cmp (wmin, min_op0, sgn)) | |
2437 | min_ovf = wi::cmp (min_op0, min_op1, sgn); | |
2438 | } | |
2439 | else | |
2440 | { | |
2441 | wmin = wi::add (min_op0, min_op1); | |
2442 | ||
2443 | /* Check for overflow. */ | |
2444 | if (wi::cmp (min_op1, 0, sgn) | |
2445 | != wi::cmp (wmin, min_op0, sgn)) | |
2446 | min_ovf = wi::cmp (min_op0, wmin, sgn); | |
2447 | } | |
933a2c39 | 2448 | } |
e76340be EB |
2449 | else if (min_op0) |
2450 | wmin = min_op0; | |
2451 | else if (min_op1) | |
2452 | wmin = minus_p ? wi::neg (min_op1) : min_op1; | |
2453 | else | |
2454 | wmin = wi::shwi (0, prec); | |
933a2c39 | 2455 | |
e76340be EB |
2456 | /* Combine the upper bounds, if any. */ |
2457 | if (max_op0 && max_op1) | |
933a2c39 | 2458 | { |
e76340be EB |
2459 | if (minus_p) |
2460 | { | |
2461 | wmax = wi::sub (max_op0, max_op1); | |
2462 | ||
2463 | /* Check for overflow. */ | |
2464 | if (wi::cmp (0, max_op1, sgn) | |
2465 | != wi::cmp (wmax, max_op0, sgn)) | |
2466 | max_ovf = wi::cmp (max_op0, max_op1, sgn); | |
2467 | } | |
2468 | else | |
2469 | { | |
2470 | wmax = wi::add (max_op0, max_op1); | |
2471 | ||
2472 | if (wi::cmp (max_op1, 0, sgn) | |
2473 | != wi::cmp (wmax, max_op0, sgn)) | |
2474 | max_ovf = wi::cmp (max_op0, wmax, sgn); | |
2475 | } | |
933a2c39 | 2476 | } |
e76340be EB |
2477 | else if (max_op0) |
2478 | wmax = max_op0; | |
2479 | else if (max_op1) | |
2480 | wmax = minus_p ? wi::neg (max_op1) : max_op1; | |
2481 | else | |
2482 | wmax = wi::shwi (0, prec); | |
933a2c39 MG |
2483 | |
2484 | /* Check for type overflow. */ | |
2485 | if (min_ovf == 0) | |
2486 | { | |
807e902e | 2487 | if (wi::cmp (wmin, type_min, sgn) == -1) |
933a2c39 | 2488 | min_ovf = -1; |
807e902e | 2489 | else if (wi::cmp (wmin, type_max, sgn) == 1) |
933a2c39 MG |
2490 | min_ovf = 1; |
2491 | } | |
2492 | if (max_ovf == 0) | |
2493 | { | |
807e902e | 2494 | if (wi::cmp (wmax, type_min, sgn) == -1) |
933a2c39 | 2495 | max_ovf = -1; |
807e902e | 2496 | else if (wi::cmp (wmax, type_max, sgn) == 1) |
933a2c39 MG |
2497 | max_ovf = 1; |
2498 | } | |
a75f5017 | 2499 | |
e76340be EB |
2500 | /* If we have overflow for the constant part and the resulting |
2501 | range will be symbolic, drop to VR_VARYING. */ | |
2502 | if ((min_ovf && sym_min_op0 != sym_min_op1) | |
2503 | || (max_ovf && sym_max_op0 != sym_max_op1)) | |
2504 | { | |
2505 | set_value_range_to_varying (vr); | |
2506 | return; | |
2507 | } | |
2508 | ||
a75f5017 RG |
2509 | if (TYPE_OVERFLOW_WRAPS (expr_type)) |
2510 | { | |
2511 | /* If overflow wraps, truncate the values and adjust the | |
2512 | range kind and bounds appropriately. */ | |
807e902e KZ |
2513 | wide_int tmin = wide_int::from (wmin, prec, sgn); |
2514 | wide_int tmax = wide_int::from (wmax, prec, sgn); | |
933a2c39 | 2515 | if (min_ovf == max_ovf) |
a75f5017 RG |
2516 | { |
2517 | /* No overflow or both overflow or underflow. The | |
2518 | range kind stays VR_RANGE. */ | |
807e902e KZ |
2519 | min = wide_int_to_tree (expr_type, tmin); |
2520 | max = wide_int_to_tree (expr_type, tmax); | |
a75f5017 | 2521 | } |
e76340be | 2522 | else if (min_ovf == -1 && max_ovf == 1) |
a75f5017 RG |
2523 | { |
2524 | /* Underflow and overflow, drop to VR_VARYING. */ | |
2525 | set_value_range_to_varying (vr); | |
2526 | return; | |
2527 | } | |
2528 | else | |
2529 | { | |
2530 | /* Min underflow or max overflow. The range kind | |
2531 | changes to VR_ANTI_RANGE. */ | |
d9c6ca85 | 2532 | bool covers = false; |
807e902e | 2533 | wide_int tem = tmin; |
933a2c39 MG |
2534 | gcc_assert ((min_ovf == -1 && max_ovf == 0) |
2535 | || (max_ovf == 1 && min_ovf == 0)); | |
a75f5017 | 2536 | type = VR_ANTI_RANGE; |
807e902e KZ |
2537 | tmin = tmax + 1; |
2538 | if (wi::cmp (tmin, tmax, sgn) < 0) | |
d9c6ca85 | 2539 | covers = true; |
807e902e KZ |
2540 | tmax = tem - 1; |
2541 | if (wi::cmp (tmax, tem, sgn) > 0) | |
d9c6ca85 | 2542 | covers = true; |
a75f5017 RG |
2543 | /* If the anti-range would cover nothing, drop to varying. |
2544 | Likewise if the anti-range bounds are outside of the | |
2545 | types values. */ | |
807e902e | 2546 | if (covers || wi::cmp (tmin, tmax, sgn) > 0) |
a75f5017 RG |
2547 | { |
2548 | set_value_range_to_varying (vr); | |
2549 | return; | |
2550 | } | |
807e902e KZ |
2551 | min = wide_int_to_tree (expr_type, tmin); |
2552 | max = wide_int_to_tree (expr_type, tmax); | |
a75f5017 RG |
2553 | } |
2554 | } | |
2555 | else | |
2556 | { | |
a75f5017 RG |
2557 | /* If overflow does not wrap, saturate to the types min/max |
2558 | value. */ | |
933a2c39 | 2559 | if (min_ovf == -1) |
a75f5017 RG |
2560 | { |
2561 | if (needs_overflow_infinity (expr_type) | |
2562 | && supports_overflow_infinity (expr_type)) | |
2563 | min = negative_overflow_infinity (expr_type); | |
2564 | else | |
807e902e | 2565 | min = wide_int_to_tree (expr_type, type_min); |
a75f5017 | 2566 | } |
933a2c39 | 2567 | else if (min_ovf == 1) |
a75f5017 RG |
2568 | { |
2569 | if (needs_overflow_infinity (expr_type) | |
2570 | && supports_overflow_infinity (expr_type)) | |
2571 | min = positive_overflow_infinity (expr_type); | |
2572 | else | |
807e902e | 2573 | min = wide_int_to_tree (expr_type, type_max); |
a75f5017 RG |
2574 | } |
2575 | else | |
807e902e | 2576 | min = wide_int_to_tree (expr_type, wmin); |
a75f5017 | 2577 | |
933a2c39 | 2578 | if (max_ovf == -1) |
a75f5017 RG |
2579 | { |
2580 | if (needs_overflow_infinity (expr_type) | |
2581 | && supports_overflow_infinity (expr_type)) | |
2582 | max = negative_overflow_infinity (expr_type); | |
2583 | else | |
807e902e | 2584 | max = wide_int_to_tree (expr_type, type_min); |
a75f5017 | 2585 | } |
933a2c39 | 2586 | else if (max_ovf == 1) |
a75f5017 RG |
2587 | { |
2588 | if (needs_overflow_infinity (expr_type) | |
2589 | && supports_overflow_infinity (expr_type)) | |
2590 | max = positive_overflow_infinity (expr_type); | |
2591 | else | |
807e902e | 2592 | max = wide_int_to_tree (expr_type, type_max); |
a75f5017 RG |
2593 | } |
2594 | else | |
807e902e | 2595 | max = wide_int_to_tree (expr_type, wmax); |
a75f5017 | 2596 | } |
e76340be | 2597 | |
a75f5017 RG |
2598 | if (needs_overflow_infinity (expr_type) |
2599 | && supports_overflow_infinity (expr_type)) | |
2600 | { | |
e76340be EB |
2601 | if ((min_op0 && is_negative_overflow_infinity (min_op0)) |
2602 | || (min_op1 | |
2603 | && (minus_p | |
2604 | ? is_positive_overflow_infinity (min_op1) | |
2605 | : is_negative_overflow_infinity (min_op1)))) | |
a75f5017 | 2606 | min = negative_overflow_infinity (expr_type); |
e76340be EB |
2607 | if ((max_op0 && is_positive_overflow_infinity (max_op0)) |
2608 | || (max_op1 | |
2609 | && (minus_p | |
2610 | ? is_negative_overflow_infinity (max_op1) | |
2611 | : is_positive_overflow_infinity (max_op1)))) | |
a75f5017 RG |
2612 | max = positive_overflow_infinity (expr_type); |
2613 | } | |
e76340be EB |
2614 | |
2615 | /* If the result lower bound is constant, we're done; | |
2616 | otherwise, build the symbolic lower bound. */ | |
2617 | if (sym_min_op0 == sym_min_op1) | |
2618 | ; | |
2619 | else if (sym_min_op0) | |
2620 | min = build_symbolic_expr (expr_type, sym_min_op0, | |
2621 | neg_min_op0, min); | |
2622 | else if (sym_min_op1) | |
2623 | min = build_symbolic_expr (expr_type, sym_min_op1, | |
2624 | neg_min_op1 ^ minus_p, min); | |
2625 | ||
2626 | /* Likewise for the upper bound. */ | |
2627 | if (sym_max_op0 == sym_max_op1) | |
2628 | ; | |
2629 | else if (sym_max_op0) | |
2630 | max = build_symbolic_expr (expr_type, sym_max_op0, | |
2631 | neg_max_op0, max); | |
2632 | else if (sym_max_op1) | |
2633 | max = build_symbolic_expr (expr_type, sym_max_op1, | |
2634 | neg_max_op1 ^ minus_p, max); | |
a75f5017 RG |
2635 | } |
2636 | else | |
567fb660 | 2637 | { |
a75f5017 RG |
2638 | /* For other cases, for example if we have a PLUS_EXPR with two |
2639 | VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort | |
2640 | to compute a precise range for such a case. | |
2641 | ??? General even mixed range kind operations can be expressed | |
2642 | by for example transforming ~[3, 5] + [1, 2] to range-only | |
2643 | operations and a union primitive: | |
2644 | [-INF, 2] + [1, 2] U [5, +INF] + [1, 2] | |
2645 | [-INF+1, 4] U [6, +INF(OVF)] | |
2646 | though usually the union is not exactly representable with | |
2647 | a single range or anti-range as the above is | |
2648 | [-INF+1, +INF(OVF)] intersected with ~[5, 5] | |
2649 | but one could use a scheme similar to equivalences for this. */ | |
a1bc7628 RG |
2650 | set_value_range_to_varying (vr); |
2651 | return; | |
567fb660 | 2652 | } |
0bca51f0 | 2653 | } |
a1bc7628 RG |
2654 | else if (code == MIN_EXPR |
2655 | || code == MAX_EXPR) | |
2656 | { | |
83ede847 RB |
2657 | if (vr0.type == VR_RANGE |
2658 | && !symbolic_range_p (&vr0)) | |
2659 | { | |
2660 | type = VR_RANGE; | |
2661 | if (vr1.type == VR_RANGE | |
2662 | && !symbolic_range_p (&vr1)) | |
2663 | { | |
2664 | /* For operations that make the resulting range directly | |
2665 | proportional to the original ranges, apply the operation to | |
2666 | the same end of each range. */ | |
2667 | min = vrp_int_const_binop (code, vr0.min, vr1.min); | |
2668 | max = vrp_int_const_binop (code, vr0.max, vr1.max); | |
2669 | } | |
2670 | else if (code == MIN_EXPR) | |
2671 | { | |
2672 | min = vrp_val_min (expr_type); | |
2673 | max = vr0.max; | |
2674 | } | |
2675 | else if (code == MAX_EXPR) | |
2676 | { | |
2677 | min = vr0.min; | |
2678 | max = vrp_val_max (expr_type); | |
2679 | } | |
2680 | } | |
2681 | else if (vr1.type == VR_RANGE | |
2682 | && !symbolic_range_p (&vr1)) | |
a1bc7628 | 2683 | { |
83ede847 RB |
2684 | type = VR_RANGE; |
2685 | if (code == MIN_EXPR) | |
2686 | { | |
2687 | min = vrp_val_min (expr_type); | |
2688 | max = vr1.max; | |
2689 | } | |
2690 | else if (code == MAX_EXPR) | |
2691 | { | |
2692 | min = vr1.min; | |
2693 | max = vrp_val_max (expr_type); | |
2694 | } | |
a1bc7628 RG |
2695 | } |
2696 | else | |
2697 | { | |
83ede847 RB |
2698 | set_value_range_to_varying (vr); |
2699 | return; | |
a1bc7628 RG |
2700 | } |
2701 | } | |
2702 | else if (code == MULT_EXPR) | |
0bca51f0 | 2703 | { |
4e7c4b73 | 2704 | /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not |
807e902e KZ |
2705 | drop to varying. This test requires 2*prec bits if both |
2706 | operands are signed and 2*prec + 2 bits if either is not. */ | |
2707 | ||
2708 | signop sign = TYPE_SIGN (expr_type); | |
2709 | unsigned int prec = TYPE_PRECISION (expr_type); | |
2710 | ||
4e7c4b73 MG |
2711 | if (range_int_cst_p (&vr0) |
2712 | && range_int_cst_p (&vr1) | |
2713 | && TYPE_OVERFLOW_WRAPS (expr_type)) | |
2714 | { | |
807e902e KZ |
2715 | typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int; |
2716 | typedef generic_wide_int | |
2717 | <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst; | |
2718 | vrp_int sizem1 = wi::mask <vrp_int> (prec, false); | |
2719 | vrp_int size = sizem1 + 1; | |
2720 | ||
2721 | /* Extend the values using the sign of the result to PREC2. | |
2722 | From here on out, everthing is just signed math no matter | |
2723 | what the input types were. */ | |
2724 | vrp_int min0 = vrp_int_cst (vr0.min); | |
2725 | vrp_int max0 = vrp_int_cst (vr0.max); | |
2726 | vrp_int min1 = vrp_int_cst (vr1.min); | |
2727 | vrp_int max1 = vrp_int_cst (vr1.max); | |
4e7c4b73 | 2728 | /* Canonicalize the intervals. */ |
807e902e | 2729 | if (sign == UNSIGNED) |
4e7c4b73 | 2730 | { |
807e902e | 2731 | if (wi::ltu_p (size, min0 + max0)) |
4e7c4b73 | 2732 | { |
807e902e | 2733 | min0 -= size; |
27bcd47c | 2734 | max0 -= size; |
4e7c4b73 MG |
2735 | } |
2736 | ||
807e902e | 2737 | if (wi::ltu_p (size, min1 + max1)) |
4e7c4b73 | 2738 | { |
807e902e | 2739 | min1 -= size; |
27bcd47c | 2740 | max1 -= size; |
4e7c4b73 MG |
2741 | } |
2742 | } | |
4e7c4b73 | 2743 | |
807e902e KZ |
2744 | vrp_int prod0 = min0 * min1; |
2745 | vrp_int prod1 = min0 * max1; | |
2746 | vrp_int prod2 = max0 * min1; | |
2747 | vrp_int prod3 = max0 * max1; | |
2748 | ||
2749 | /* Sort the 4 products so that min is in prod0 and max is in | |
2750 | prod3. */ | |
2751 | /* min0min1 > max0max1 */ | |
2752 | if (wi::gts_p (prod0, prod3)) | |
6b4db501 | 2753 | std::swap (prod0, prod3); |
807e902e KZ |
2754 | |
2755 | /* min0max1 > max0min1 */ | |
2756 | if (wi::gts_p (prod1, prod2)) | |
6b4db501 | 2757 | std::swap (prod1, prod2); |
4e7c4b73 | 2758 | |
807e902e | 2759 | if (wi::gts_p (prod0, prod1)) |
6b4db501 | 2760 | std::swap (prod0, prod1); |
807e902e KZ |
2761 | |
2762 | if (wi::gts_p (prod2, prod3)) | |
6b4db501 | 2763 | std::swap (prod2, prod3); |
807e902e KZ |
2764 | |
2765 | /* diff = max - min. */ | |
2766 | prod2 = prod3 - prod0; | |
2767 | if (wi::geu_p (prod2, sizem1)) | |
4e7c4b73 MG |
2768 | { |
2769 | /* the range covers all values. */ | |
2770 | set_value_range_to_varying (vr); | |
2771 | return; | |
2772 | } | |
2773 | ||
2774 | /* The following should handle the wrapping and selecting | |
2775 | VR_ANTI_RANGE for us. */ | |
807e902e KZ |
2776 | min = wide_int_to_tree (expr_type, prod0); |
2777 | max = wide_int_to_tree (expr_type, prod3); | |
4e7c4b73 MG |
2778 | set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); |
2779 | return; | |
2780 | } | |
2781 | ||
567fb660 KH |
2782 | /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, |
2783 | drop to VR_VARYING. It would take more effort to compute a | |
2784 | precise range for such a case. For example, if we have | |
2785 | op0 == 65536 and op1 == 65536 with their ranges both being | |
2786 | ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so | |
2787 | we cannot claim that the product is in ~[0,0]. Note that we | |
2788 | are guaranteed to have vr0.type == vr1.type at this | |
2789 | point. */ | |
a1bc7628 | 2790 | if (vr0.type == VR_ANTI_RANGE |
4d320da4 | 2791 | && !TYPE_OVERFLOW_UNDEFINED (expr_type)) |
567fb660 KH |
2792 | { |
2793 | set_value_range_to_varying (vr); | |
2794 | return; | |
2795 | } | |
2796 | ||
a1bc7628 RG |
2797 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
2798 | return; | |
2799 | } | |
a2872983 RG |
2800 | else if (code == RSHIFT_EXPR |
2801 | || code == LSHIFT_EXPR) | |
a1bc7628 | 2802 | { |
af33044f RH |
2803 | /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], |
2804 | then drop to VR_VARYING. Outside of this range we get undefined | |
7fa7289d | 2805 | behavior from the shift operation. We cannot even trust |
af33044f RH |
2806 | SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl |
2807 | shifts, and the operation at the tree level may be widened. */ | |
a2872983 RG |
2808 | if (range_int_cst_p (&vr1) |
2809 | && compare_tree_int (vr1.min, 0) >= 0 | |
2810 | && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1) | |
13338552 | 2811 | { |
a2872983 RG |
2812 | if (code == RSHIFT_EXPR) |
2813 | { | |
4c57980f JJ |
2814 | /* Even if vr0 is VARYING or otherwise not usable, we can derive |
2815 | useful ranges just from the shift count. E.g. | |
2816 | x >> 63 for signed 64-bit x is always [-1, 0]. */ | |
2817 | if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) | |
2818 | { | |
2819 | vr0.type = type = VR_RANGE; | |
2820 | vr0.min = vrp_val_min (expr_type); | |
2821 | vr0.max = vrp_val_max (expr_type); | |
2822 | } | |
a2872983 RG |
2823 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
2824 | return; | |
2825 | } | |
2826 | /* We can map lshifts by constants to MULT_EXPR handling. */ | |
2827 | else if (code == LSHIFT_EXPR | |
2828 | && range_int_cst_singleton_p (&vr1)) | |
2829 | { | |
2830 | bool saved_flag_wrapv; | |
526ceb68 | 2831 | value_range vr1p = VR_INITIALIZER; |
a2872983 | 2832 | vr1p.type = VR_RANGE; |
807e902e KZ |
2833 | vr1p.min = (wide_int_to_tree |
2834 | (expr_type, | |
2835 | wi::set_bit_in_zero (tree_to_shwi (vr1.min), | |
2836 | TYPE_PRECISION (expr_type)))); | |
a2872983 RG |
2837 | vr1p.max = vr1p.min; |
2838 | /* We have to use a wrapping multiply though as signed overflow | |
2839 | on lshifts is implementation defined in C89. */ | |
2840 | saved_flag_wrapv = flag_wrapv; | |
2841 | flag_wrapv = 1; | |
2842 | extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type, | |
2843 | &vr0, &vr1p); | |
2844 | flag_wrapv = saved_flag_wrapv; | |
2845 | return; | |
2846 | } | |
25722436 TV |
2847 | else if (code == LSHIFT_EXPR |
2848 | && range_int_cst_p (&vr0)) | |
2849 | { | |
b25d9e22 TV |
2850 | int prec = TYPE_PRECISION (expr_type); |
2851 | int overflow_pos = prec; | |
25722436 | 2852 | int bound_shift; |
807e902e | 2853 | wide_int low_bound, high_bound; |
b25d9e22 TV |
2854 | bool uns = TYPE_UNSIGNED (expr_type); |
2855 | bool in_bounds = false; | |
25722436 | 2856 | |
b25d9e22 | 2857 | if (!uns) |
25722436 TV |
2858 | overflow_pos -= 1; |
2859 | ||
807e902e KZ |
2860 | bound_shift = overflow_pos - tree_to_shwi (vr1.max); |
2861 | /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can | |
b25d9e22 TV |
2862 | overflow. However, for that to happen, vr1.max needs to be |
2863 | zero, which means vr1 is a singleton range of zero, which | |
2864 | means it should be handled by the previous LSHIFT_EXPR | |
2865 | if-clause. */ | |
807e902e KZ |
2866 | wide_int bound = wi::set_bit_in_zero (bound_shift, prec); |
2867 | wide_int complement = ~(bound - 1); | |
b25d9e22 TV |
2868 | |
2869 | if (uns) | |
2870 | { | |
807e902e KZ |
2871 | low_bound = bound; |
2872 | high_bound = complement; | |
2873 | if (wi::ltu_p (vr0.max, low_bound)) | |
b25d9e22 TV |
2874 | { |
2875 | /* [5, 6] << [1, 2] == [10, 24]. */ | |
2876 | /* We're shifting out only zeroes, the value increases | |
2877 | monotonically. */ | |
2878 | in_bounds = true; | |
2879 | } | |
807e902e | 2880 | else if (wi::ltu_p (high_bound, vr0.min)) |
b25d9e22 TV |
2881 | { |
2882 | /* [0xffffff00, 0xffffffff] << [1, 2] | |
2883 | == [0xfffffc00, 0xfffffffe]. */ | |
2884 | /* We're shifting out only ones, the value decreases | |
2885 | monotonically. */ | |
2886 | in_bounds = true; | |
2887 | } | |
2888 | } | |
2889 | else | |
2890 | { | |
2891 | /* [-1, 1] << [1, 2] == [-4, 4]. */ | |
807e902e | 2892 | low_bound = complement; |
b25d9e22 | 2893 | high_bound = bound; |
807e902e KZ |
2894 | if (wi::lts_p (vr0.max, high_bound) |
2895 | && wi::lts_p (low_bound, vr0.min)) | |
b25d9e22 TV |
2896 | { |
2897 | /* For non-negative numbers, we're shifting out only | |
2898 | zeroes, the value increases monotonically. | |
2899 | For negative numbers, we're shifting out only ones, the | |
2900 | value decreases monotomically. */ | |
2901 | in_bounds = true; | |
2902 | } | |
2903 | } | |
2904 | ||
2905 | if (in_bounds) | |
25722436 | 2906 | { |
25722436 TV |
2907 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
2908 | return; | |
2909 | } | |
2910 | } | |
8c1f1d42 | 2911 | } |
8c1f1d42 RG |
2912 | set_value_range_to_varying (vr); |
2913 | return; | |
2914 | } | |
a1bc7628 RG |
2915 | else if (code == TRUNC_DIV_EXPR |
2916 | || code == FLOOR_DIV_EXPR | |
2917 | || code == CEIL_DIV_EXPR | |
2918 | || code == EXACT_DIV_EXPR | |
2919 | || code == ROUND_DIV_EXPR) | |
2920 | { | |
2921 | if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) | |
193a3681 JJ |
2922 | { |
2923 | /* For division, if op1 has VR_RANGE but op0 does not, something | |
2924 | can be deduced just from that range. Say [min, max] / [4, max] | |
2925 | gives [min / 4, max / 4] range. */ | |
2926 | if (vr1.type == VR_RANGE | |
2927 | && !symbolic_range_p (&vr1) | |
e8f808b3 | 2928 | && range_includes_zero_p (vr1.min, vr1.max) == 0) |
193a3681 JJ |
2929 | { |
2930 | vr0.type = type = VR_RANGE; | |
4d320da4 RG |
2931 | vr0.min = vrp_val_min (expr_type); |
2932 | vr0.max = vrp_val_max (expr_type); | |
193a3681 JJ |
2933 | } |
2934 | else | |
2935 | { | |
2936 | set_value_range_to_varying (vr); | |
2937 | return; | |
2938 | } | |
2939 | } | |
2940 | ||
0e1b8b10 ILT |
2941 | /* For divisions, if flag_non_call_exceptions is true, we must |
2942 | not eliminate a division by zero. */ | |
a1bc7628 | 2943 | if (cfun->can_throw_non_call_exceptions |
0e1b8b10 | 2944 | && (vr1.type != VR_RANGE |
e8f808b3 | 2945 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
0e1b8b10 ILT |
2946 | { |
2947 | set_value_range_to_varying (vr); | |
2948 | return; | |
2949 | } | |
2950 | ||
193a3681 JJ |
2951 | /* For divisions, if op0 is VR_RANGE, we can deduce a range |
2952 | even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can | |
2953 | include 0. */ | |
a1bc7628 | 2954 | if (vr0.type == VR_RANGE |
193a3681 | 2955 | && (vr1.type != VR_RANGE |
e8f808b3 | 2956 | || range_includes_zero_p (vr1.min, vr1.max) != 0)) |
193a3681 JJ |
2957 | { |
2958 | tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); | |
2959 | int cmp; | |
2960 | ||
193a3681 JJ |
2961 | min = NULL_TREE; |
2962 | max = NULL_TREE; | |
4d320da4 RG |
2963 | if (TYPE_UNSIGNED (expr_type) |
2964 | || value_range_nonnegative_p (&vr1)) | |
193a3681 JJ |
2965 | { |
2966 | /* For unsigned division or when divisor is known | |
2967 | to be non-negative, the range has to cover | |
2968 | all numbers from 0 to max for positive max | |
2969 | and all numbers from min to 0 for negative min. */ | |
2970 | cmp = compare_values (vr0.max, zero); | |
2971 | if (cmp == -1) | |
c44b2a4f KV |
2972 | { |
2973 | /* When vr0.max < 0, vr1.min != 0 and value | |
2974 | ranges for dividend and divisor are available. */ | |
2975 | if (vr1.type == VR_RANGE | |
2976 | && !symbolic_range_p (&vr0) | |
2977 | && !symbolic_range_p (&vr1) | |
41bfbbb6 | 2978 | && compare_values (vr1.min, zero) != 0) |
c44b2a4f KV |
2979 | max = int_const_binop (code, vr0.max, vr1.min); |
2980 | else | |
2981 | max = zero; | |
2982 | } | |
193a3681 JJ |
2983 | else if (cmp == 0 || cmp == 1) |
2984 | max = vr0.max; | |
2985 | else | |
2986 | type = VR_VARYING; | |
2987 | cmp = compare_values (vr0.min, zero); | |
2988 | if (cmp == 1) | |
c44b2a4f KV |
2989 | { |
2990 | /* For unsigned division when value ranges for dividend | |
2991 | and divisor are available. */ | |
2992 | if (vr1.type == VR_RANGE | |
2993 | && !symbolic_range_p (&vr0) | |
2994 | && !symbolic_range_p (&vr1)) | |
2995 | min = int_const_binop (code, vr0.min, vr1.max); | |
2996 | else | |
2997 | min = zero; | |
2998 | } | |
193a3681 JJ |
2999 | else if (cmp == 0 || cmp == -1) |
3000 | min = vr0.min; | |
3001 | else | |
3002 | type = VR_VARYING; | |
3003 | } | |
3004 | else | |
3005 | { | |
3006 | /* Otherwise the range is -max .. max or min .. -min | |
3007 | depending on which bound is bigger in absolute value, | |
3008 | as the division can change the sign. */ | |
3009 | abs_extent_range (vr, vr0.min, vr0.max); | |
3010 | return; | |
3011 | } | |
3012 | if (type == VR_VARYING) | |
3013 | { | |
3014 | set_value_range_to_varying (vr); | |
3015 | return; | |
3016 | } | |
3017 | } | |
78275c8b | 3018 | else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1)) |
12df8a7e | 3019 | { |
a1bc7628 RG |
3020 | extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); |
3021 | return; | |
227858d1 DN |
3022 | } |
3023 | } | |
622d360e | 3024 | else if (code == TRUNC_MOD_EXPR) |
bab4d587 | 3025 | { |
441898b2 | 3026 | if (range_is_null (&vr1)) |
bab4d587 | 3027 | { |
441898b2 | 3028 | set_value_range_to_undefined (vr); |
bab4d587 RG |
3029 | return; |
3030 | } | |
441898b2 MG |
3031 | /* ABS (A % B) < ABS (B) and either |
3032 | 0 <= A % B <= A or A <= A % B <= 0. */ | |
bab4d587 | 3033 | type = VR_RANGE; |
441898b2 MG |
3034 | signop sgn = TYPE_SIGN (expr_type); |
3035 | unsigned int prec = TYPE_PRECISION (expr_type); | |
3036 | wide_int wmin, wmax, tmp; | |
3037 | wide_int zero = wi::zero (prec); | |
3038 | wide_int one = wi::one (prec); | |
3039 | if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1)) | |
3040 | { | |
3041 | wmax = wi::sub (vr1.max, one); | |
3042 | if (sgn == SIGNED) | |
3043 | { | |
3044 | tmp = wi::sub (wi::minus_one (prec), vr1.min); | |
3045 | wmax = wi::smax (wmax, tmp); | |
3046 | } | |
3047 | } | |
3048 | else | |
3049 | { | |
3050 | wmax = wi::max_value (prec, sgn); | |
3051 | /* X % INT_MIN may be INT_MAX. */ | |
3052 | if (sgn == UNSIGNED) | |
3053 | wmax = wmax - one; | |
3054 | } | |
3055 | ||
3056 | if (sgn == UNSIGNED) | |
3057 | wmin = zero; | |
bab4d587 | 3058 | else |
441898b2 MG |
3059 | { |
3060 | wmin = -wmax; | |
3061 | if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST) | |
3062 | { | |
3063 | tmp = vr0.min; | |
3064 | if (wi::gts_p (tmp, zero)) | |
3065 | tmp = zero; | |
3066 | wmin = wi::smax (wmin, tmp); | |
3067 | } | |
3068 | } | |
3069 | ||
3070 | if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST) | |
3071 | { | |
3072 | tmp = vr0.max; | |
3073 | if (sgn == SIGNED && wi::neg_p (tmp)) | |
3074 | tmp = zero; | |
3075 | wmax = wi::min (wmax, tmp, sgn); | |
3076 | } | |
3077 | ||
3078 | min = wide_int_to_tree (expr_type, wmin); | |
3079 | max = wide_int_to_tree (expr_type, wmax); | |
bab4d587 | 3080 | } |
0f36b2da | 3081 | else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) |
29c8f8c2 | 3082 | { |
85e693aa | 3083 | bool int_cst_range0, int_cst_range1; |
807e902e KZ |
3084 | wide_int may_be_nonzero0, may_be_nonzero1; |
3085 | wide_int must_be_nonzero0, must_be_nonzero1; | |
330af32c | 3086 | |
807e902e KZ |
3087 | int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, |
3088 | &may_be_nonzero0, | |
85e693aa | 3089 | &must_be_nonzero0); |
807e902e KZ |
3090 | int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, |
3091 | &may_be_nonzero1, | |
85e693aa | 3092 | &must_be_nonzero1); |
330af32c | 3093 | |
85e693aa | 3094 | type = VR_RANGE; |
8b201bc5 | 3095 | if (code == BIT_AND_EXPR) |
ac285648 | 3096 | { |
807e902e KZ |
3097 | min = wide_int_to_tree (expr_type, |
3098 | must_be_nonzero0 & must_be_nonzero1); | |
3099 | wide_int wmax = may_be_nonzero0 & may_be_nonzero1; | |
4001900f RG |
3100 | /* If both input ranges contain only negative values we can |
3101 | truncate the result range maximum to the minimum of the | |
3102 | input range maxima. */ | |
3103 | if (int_cst_range0 && int_cst_range1 | |
3104 | && tree_int_cst_sgn (vr0.max) < 0 | |
3105 | && tree_int_cst_sgn (vr1.max) < 0) | |
ac285648 | 3106 | { |
807e902e KZ |
3107 | wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); |
3108 | wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); | |
ac285648 | 3109 | } |
4001900f RG |
3110 | /* If either input range contains only non-negative values |
3111 | we can truncate the result range maximum to the respective | |
3112 | maximum of the input range. */ | |
3113 | if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) | |
807e902e | 3114 | wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); |
85e693aa | 3115 | if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) |
807e902e KZ |
3116 | wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); |
3117 | max = wide_int_to_tree (expr_type, wmax); | |
29c8f8c2 | 3118 | } |
8b201bc5 | 3119 | else if (code == BIT_IOR_EXPR) |
30821654 | 3120 | { |
807e902e KZ |
3121 | max = wide_int_to_tree (expr_type, |
3122 | may_be_nonzero0 | may_be_nonzero1); | |
3123 | wide_int wmin = must_be_nonzero0 | must_be_nonzero1; | |
4001900f RG |
3124 | /* If the input ranges contain only positive values we can |
3125 | truncate the minimum of the result range to the maximum | |
3126 | of the input range minima. */ | |
3127 | if (int_cst_range0 && int_cst_range1 | |
3128 | && tree_int_cst_sgn (vr0.min) >= 0 | |
3129 | && tree_int_cst_sgn (vr1.min) >= 0) | |
8b201bc5 | 3130 | { |
807e902e KZ |
3131 | wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); |
3132 | wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); | |
8b201bc5 | 3133 | } |
4001900f RG |
3134 | /* If either input range contains only negative values |
3135 | we can truncate the minimum of the result range to the | |
3136 | respective minimum range. */ | |
3137 | if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) | |
807e902e | 3138 | wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); |
4001900f | 3139 | if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) |
807e902e KZ |
3140 | wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); |
3141 | min = wide_int_to_tree (expr_type, wmin); | |
8b201bc5 | 3142 | } |
0f36b2da RG |
3143 | else if (code == BIT_XOR_EXPR) |
3144 | { | |
807e902e KZ |
3145 | wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) |
3146 | | ~(may_be_nonzero0 | may_be_nonzero1)); | |
3147 | wide_int result_one_bits | |
3148 | = (must_be_nonzero0.and_not (may_be_nonzero1) | |
3149 | | must_be_nonzero1.and_not (may_be_nonzero0)); | |
3150 | max = wide_int_to_tree (expr_type, ~result_zero_bits); | |
3151 | min = wide_int_to_tree (expr_type, result_one_bits); | |
4001900f RG |
3152 | /* If the range has all positive or all negative values the |
3153 | result is better than VARYING. */ | |
3154 | if (tree_int_cst_sgn (min) < 0 | |
3155 | || tree_int_cst_sgn (max) >= 0) | |
3156 | ; | |
0f36b2da | 3157 | else |
0f36b2da RG |
3158 | max = min = NULL_TREE; |
3159 | } | |
30821654 | 3160 | } |
227858d1 DN |
3161 | else |
3162 | gcc_unreachable (); | |
fda05890 | 3163 | |
9983270b | 3164 | /* If either MIN or MAX overflowed, then set the resulting range to |
e76340be | 3165 | VARYING. But we do accept an overflow infinity representation. */ |
12df8a7e | 3166 | if (min == NULL_TREE |
e76340be | 3167 | || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min)) |
12df8a7e | 3168 | || max == NULL_TREE |
e76340be | 3169 | || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max))) |
12df8a7e ILT |
3170 | { |
3171 | set_value_range_to_varying (vr); | |
3172 | return; | |
3173 | } | |
3174 | ||
fa633851 ILT |
3175 | /* We punt if: |
3176 | 1) [-INF, +INF] | |
3177 | 2) [-INF, +-INF(OVF)] | |
3178 | 3) [+-INF(OVF), +INF] | |
3179 | 4) [+-INF(OVF), +-INF(OVF)] | |
3180 | We learn nothing when we have INF and INF(OVF) on both sides. | |
3181 | Note that we do accept [-INF, -INF] and [+INF, +INF] without | |
3182 | overflow. */ | |
e1f28918 ILT |
3183 | if ((vrp_val_is_min (min) || is_overflow_infinity (min)) |
3184 | && (vrp_val_is_max (max) || is_overflow_infinity (max))) | |
227858d1 | 3185 | { |
9983270b DN |
3186 | set_value_range_to_varying (vr); |
3187 | return; | |
fda05890 KH |
3188 | } |
3189 | ||
227858d1 DN |
3190 | cmp = compare_values (min, max); |
3191 | if (cmp == -2 || cmp == 1) | |
3192 | { | |
3193 | /* If the new range has its limits swapped around (MIN > MAX), | |
3194 | then the operation caused one of them to wrap around, mark | |
3195 | the new range VARYING. */ | |
3196 | set_value_range_to_varying (vr); | |
3197 | } | |
3198 | else | |
4e2d94a9 | 3199 | set_value_range (vr, type, min, max, NULL); |
fda05890 KH |
3200 | } |
3201 | ||
4d320da4 RG |
3202 | /* Extract range information from a binary expression OP0 CODE OP1 based on |
3203 | the ranges of each of its operands with resulting type EXPR_TYPE. | |
3204 | The resulting range is stored in *VR. */ | |
3205 | ||
3206 | static void | |
526ceb68 | 3207 | extract_range_from_binary_expr (value_range *vr, |
4d320da4 RG |
3208 | enum tree_code code, |
3209 | tree expr_type, tree op0, tree op1) | |
3210 | { | |
526ceb68 TS |
3211 | value_range vr0 = VR_INITIALIZER; |
3212 | value_range vr1 = VR_INITIALIZER; | |
4d320da4 RG |
3213 | |
3214 | /* Get value ranges for each operand. For constant operands, create | |
3215 | a new value range with the operand to simplify processing. */ | |
3216 | if (TREE_CODE (op0) == SSA_NAME) | |
3217 | vr0 = *(get_value_range (op0)); | |
3218 | else if (is_gimple_min_invariant (op0)) | |
3219 | set_value_range_to_value (&vr0, op0, NULL); | |
3220 | else | |
3221 | set_value_range_to_varying (&vr0); | |
3222 | ||
3223 | if (TREE_CODE (op1) == SSA_NAME) | |
3224 | vr1 = *(get_value_range (op1)); | |
3225 | else if (is_gimple_min_invariant (op1)) | |
3226 | set_value_range_to_value (&vr1, op1, NULL); | |
3227 | else | |
3228 | set_value_range_to_varying (&vr1); | |
3229 | ||
3230 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); | |
e76340be EB |
3231 | |
3232 | /* Try harder for PLUS and MINUS if the range of one operand is symbolic | |
3233 | and based on the other operand, for example if it was deduced from a | |
3234 | symbolic comparison. When a bound of the range of the first operand | |
3235 | is invariant, we set the corresponding bound of the new range to INF | |
3236 | in order to avoid recursing on the range of the second operand. */ | |
3237 | if (vr->type == VR_VARYING | |
3238 | && (code == PLUS_EXPR || code == MINUS_EXPR) | |
3239 | && TREE_CODE (op1) == SSA_NAME | |
3240 | && vr0.type == VR_RANGE | |
3241 | && symbolic_range_based_on_p (&vr0, op1)) | |
3242 | { | |
3243 | const bool minus_p = (code == MINUS_EXPR); | |
526ceb68 | 3244 | value_range n_vr1 = VR_INITIALIZER; |
e76340be EB |
3245 | |
3246 | /* Try with VR0 and [-INF, OP1]. */ | |
3247 | if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min)) | |
3248 | set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL); | |
3249 | ||
3250 | /* Try with VR0 and [OP1, +INF]. */ | |
3251 | else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max)) | |
3252 | set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL); | |
3253 | ||
3254 | /* Try with VR0 and [OP1, OP1]. */ | |
3255 | else | |
3256 | set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL); | |
3257 | ||
3258 | extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1); | |
3259 | } | |
3260 | ||
3261 | if (vr->type == VR_VARYING | |
3262 | && (code == PLUS_EXPR || code == MINUS_EXPR) | |
3263 | && TREE_CODE (op0) == SSA_NAME | |
3264 | && vr1.type == VR_RANGE | |
3265 | && symbolic_range_based_on_p (&vr1, op0)) | |
3266 | { | |
3267 | const bool minus_p = (code == MINUS_EXPR); | |
526ceb68 | 3268 | value_range n_vr0 = VR_INITIALIZER; |
e76340be EB |
3269 | |
3270 | /* Try with [-INF, OP0] and VR1. */ | |
3271 | if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min)) | |
3272 | set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL); | |
3273 | ||
3274 | /* Try with [OP0, +INF] and VR1. */ | |
3275 | else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max)) | |
3276 | set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL); | |
3277 | ||
3278 | /* Try with [OP0, OP0] and VR1. */ | |
3279 | else | |
3280 | set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL); | |
3281 | ||
3282 | extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1); | |
3283 | } | |
4d320da4 | 3284 | } |
fda05890 | 3285 | |
ce6bfa50 RG |
3286 | /* Extract range information from a unary operation CODE based on |
3287 | the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. | |
026c3cfd | 3288 | The resulting range is stored in *VR. */ |
0bca51f0 DN |
3289 | |
3290 | static void | |
526ceb68 | 3291 | extract_range_from_unary_expr_1 (value_range *vr, |
ce6bfa50 | 3292 | enum tree_code code, tree type, |
526ceb68 | 3293 | value_range *vr0_, tree op0_type) |
0bca51f0 | 3294 | { |
526ceb68 | 3295 | value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER; |
227858d1 | 3296 | |
1a0fcfa9 RG |
3297 | /* VRP only operates on integral and pointer types. */ |
3298 | if (!(INTEGRAL_TYPE_P (op0_type) | |
3299 | || POINTER_TYPE_P (op0_type)) | |
3300 | || !(INTEGRAL_TYPE_P (type) | |
3301 | || POINTER_TYPE_P (type))) | |
227858d1 DN |
3302 | { |
3303 | set_value_range_to_varying (vr); | |
3304 | return; | |
3305 | } | |
0bca51f0 | 3306 | |
1a0fcfa9 RG |
3307 | /* If VR0 is UNDEFINED, so is the result. */ |
3308 | if (vr0.type == VR_UNDEFINED) | |
0bca51f0 | 3309 | { |
1a0fcfa9 | 3310 | set_value_range_to_undefined (vr); |
0bca51f0 DN |
3311 | return; |
3312 | } | |
3313 | ||
3c9c79e8 | 3314 | /* Handle operations that we express in terms of others. */ |
a4fff37a | 3315 | if (code == PAREN_EXPR || code == OBJ_TYPE_REF) |
3c9c79e8 | 3316 | { |
a4fff37a | 3317 | /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */ |
3c9c79e8 RG |
3318 | copy_value_range (vr, &vr0); |
3319 | return; | |
3320 | } | |
3321 | else if (code == NEGATE_EXPR) | |
3322 | { | |
3323 | /* -X is simply 0 - X, so re-use existing code that also handles | |
3324 | anti-ranges fine. */ | |
526ceb68 | 3325 | value_range zero = VR_INITIALIZER; |
3c9c79e8 RG |
3326 | set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); |
3327 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); | |
3328 | return; | |
3329 | } | |
3330 | else if (code == BIT_NOT_EXPR) | |
3331 | { | |
3332 | /* ~X is simply -1 - X, so re-use existing code that also handles | |
3333 | anti-ranges fine. */ | |
526ceb68 | 3334 | value_range minusone = VR_INITIALIZER; |
3c9c79e8 RG |
3335 | set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); |
3336 | extract_range_from_binary_expr_1 (vr, MINUS_EXPR, | |
3337 | type, &minusone, &vr0); | |
3338 | return; | |
3339 | } | |
3340 | ||
3341 | /* Now canonicalize anti-ranges to ranges when they are not symbolic | |
3342 | and express op ~[] as (op []') U (op []''). */ | |
3343 | if (vr0.type == VR_ANTI_RANGE | |
3344 | && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1)) | |
3345 | { | |
3346 | extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type); | |
3347 | if (vrtem1.type != VR_UNDEFINED) | |
3348 | { | |
526ceb68 | 3349 | value_range vrres = VR_INITIALIZER; |
3c9c79e8 RG |
3350 | extract_range_from_unary_expr_1 (&vrres, code, type, |
3351 | &vrtem1, op0_type); | |
3352 | vrp_meet (vr, &vrres); | |
3353 | } | |
3354 | return; | |
3355 | } | |
3356 | ||
1a0fcfa9 | 3357 | if (CONVERT_EXPR_CODE_P (code)) |
0bca51f0 | 3358 | { |
ce6bfa50 | 3359 | tree inner_type = op0_type; |
2d3cd5d5 | 3360 | tree outer_type = type; |
441e96b5 | 3361 | |
1a0fcfa9 RG |
3362 | /* If the expression evaluates to a pointer, we are only interested in |
3363 | determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ | |
3364 | if (POINTER_TYPE_P (type)) | |
3365 | { | |
7d5a0f1b RG |
3366 | if (range_is_nonnull (&vr0)) |
3367 | set_value_range_to_nonnull (vr, type); | |
3368 | else if (range_is_null (&vr0)) | |
3369 | set_value_range_to_null (vr, type); | |
1a0fcfa9 RG |
3370 | else |
3371 | set_value_range_to_varying (vr); | |
3372 | return; | |
3373 | } | |
3374 | ||
b47ee386 RG |
3375 | /* If VR0 is varying and we increase the type precision, assume |
3376 | a full range for the following transformation. */ | |
3377 | if (vr0.type == VR_VARYING | |
1a0fcfa9 | 3378 | && INTEGRAL_TYPE_P (inner_type) |
b47ee386 | 3379 | && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) |
2735e93e | 3380 | { |
b47ee386 RG |
3381 | vr0.type = VR_RANGE; |
3382 | vr0.min = TYPE_MIN_VALUE (inner_type); | |
3383 | vr0.max = TYPE_MAX_VALUE (inner_type); | |
2735e93e JL |
3384 | } |
3385 | ||
b47ee386 RG |
3386 | /* If VR0 is a constant range or anti-range and the conversion is |
3387 | not truncating we can convert the min and max values and | |
3388 | canonicalize the resulting range. Otherwise we can do the | |
3389 | conversion if the size of the range is less than what the | |
3390 | precision of the target type can represent and the range is | |
3391 | not an anti-range. */ | |
3392 | if ((vr0.type == VR_RANGE | |
3393 | || vr0.type == VR_ANTI_RANGE) | |
3394 | && TREE_CODE (vr0.min) == INTEGER_CST | |
3395 | && TREE_CODE (vr0.max) == INTEGER_CST | |
56186ac2 RG |
3396 | && (!is_overflow_infinity (vr0.min) |
3397 | || (vr0.type == VR_RANGE | |
3398 | && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) | |
3399 | && needs_overflow_infinity (outer_type) | |
3400 | && supports_overflow_infinity (outer_type))) | |
3401 | && (!is_overflow_infinity (vr0.max) | |
3402 | || (vr0.type == VR_RANGE | |
3403 | && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) | |
3404 | && needs_overflow_infinity (outer_type) | |
3405 | && supports_overflow_infinity (outer_type))) | |
b47ee386 RG |
3406 | && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) |
3407 | || (vr0.type == VR_RANGE | |
3408 | && integer_zerop (int_const_binop (RSHIFT_EXPR, | |
d35936ab RG |
3409 | int_const_binop (MINUS_EXPR, vr0.max, vr0.min), |
3410 | size_int (TYPE_PRECISION (outer_type))))))) | |
441e96b5 | 3411 | { |
b47ee386 | 3412 | tree new_min, new_max; |
56186ac2 RG |
3413 | if (is_overflow_infinity (vr0.min)) |
3414 | new_min = negative_overflow_infinity (outer_type); | |
629c2cca | 3415 | else |
807e902e KZ |
3416 | new_min = force_fit_type (outer_type, wi::to_widest (vr0.min), |
3417 | 0, false); | |
56186ac2 RG |
3418 | if (is_overflow_infinity (vr0.max)) |
3419 | new_max = positive_overflow_infinity (outer_type); | |
629c2cca | 3420 | else |
807e902e KZ |
3421 | new_max = force_fit_type (outer_type, wi::to_widest (vr0.max), |
3422 | 0, false); | |
b47ee386 RG |
3423 | set_and_canonicalize_value_range (vr, vr0.type, |
3424 | new_min, new_max, NULL); | |
441e96b5 DN |
3425 | return; |
3426 | } | |
b47ee386 RG |
3427 | |
3428 | set_value_range_to_varying (vr); | |
3429 | return; | |
0bca51f0 | 3430 | } |
1a0fcfa9 | 3431 | else if (code == ABS_EXPR) |
227858d1 | 3432 | { |
1a0fcfa9 RG |
3433 | tree min, max; |
3434 | int cmp; | |
3435 | ||
3436 | /* Pass through vr0 in the easy cases. */ | |
3437 | if (TYPE_UNSIGNED (type) | |
3438 | || value_range_nonnegative_p (&vr0)) | |
3439 | { | |
3440 | copy_value_range (vr, &vr0); | |
3441 | return; | |
3442 | } | |
3443 | ||
3444 | /* For the remaining varying or symbolic ranges we can't do anything | |
3445 | useful. */ | |
3446 | if (vr0.type == VR_VARYING | |
3447 | || symbolic_range_p (&vr0)) | |
3448 | { | |
3449 | set_value_range_to_varying (vr); | |
3450 | return; | |
3451 | } | |
3452 | ||
ff08cbee JM |
3453 | /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a |
3454 | useful range. */ | |
2d3cd5d5 | 3455 | if (!TYPE_OVERFLOW_UNDEFINED (type) |
ff08cbee | 3456 | && ((vr0.type == VR_RANGE |
e1f28918 | 3457 | && vrp_val_is_min (vr0.min)) |
ff08cbee | 3458 | || (vr0.type == VR_ANTI_RANGE |
1a0fcfa9 | 3459 | && !vrp_val_is_min (vr0.min)))) |
ff08cbee JM |
3460 | { |
3461 | set_value_range_to_varying (vr); | |
3462 | return; | |
3463 | } | |
b8698a0f | 3464 | |
227858d1 DN |
3465 | /* ABS_EXPR may flip the range around, if the original range |
3466 | included negative values. */ | |
12df8a7e | 3467 | if (is_overflow_infinity (vr0.min)) |
2d3cd5d5 | 3468 | min = positive_overflow_infinity (type); |
e1f28918 | 3469 | else if (!vrp_val_is_min (vr0.min)) |
2d3cd5d5 RAE |
3470 | min = fold_unary_to_constant (code, type, vr0.min); |
3471 | else if (!needs_overflow_infinity (type)) | |
3472 | min = TYPE_MAX_VALUE (type); | |
3473 | else if (supports_overflow_infinity (type)) | |
3474 | min = positive_overflow_infinity (type); | |
12df8a7e ILT |
3475 | else |
3476 | { | |
3477 | set_value_range_to_varying (vr); | |
3478 | return; | |
3479 | } | |
227858d1 | 3480 | |
12df8a7e | 3481 | if (is_overflow_infinity (vr0.max)) |
2d3cd5d5 | 3482 | max = positive_overflow_infinity (type); |
e1f28918 | 3483 | else if (!vrp_val_is_min (vr0.max)) |
2d3cd5d5 RAE |
3484 | max = fold_unary_to_constant (code, type, vr0.max); |
3485 | else if (!needs_overflow_infinity (type)) | |
3486 | max = TYPE_MAX_VALUE (type); | |
d3cbd7de RG |
3487 | else if (supports_overflow_infinity (type) |
3488 | /* We shouldn't generate [+INF, +INF] as set_value_range | |
3489 | doesn't like this and ICEs. */ | |
3490 | && !is_positive_overflow_infinity (min)) | |
2d3cd5d5 | 3491 | max = positive_overflow_infinity (type); |
12df8a7e ILT |
3492 | else |
3493 | { | |
3494 | set_value_range_to_varying (vr); | |
3495 | return; | |
3496 | } | |
227858d1 | 3497 | |
ff08cbee JM |
3498 | cmp = compare_values (min, max); |
3499 | ||
3500 | /* If a VR_ANTI_RANGEs contains zero, then we have | |
3501 | ~[-INF, min(MIN, MAX)]. */ | |
3502 | if (vr0.type == VR_ANTI_RANGE) | |
b8698a0f | 3503 | { |
e8f808b3 | 3504 | if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
ff08cbee | 3505 | { |
ff08cbee JM |
3506 | /* Take the lower of the two values. */ |
3507 | if (cmp != 1) | |
3508 | max = min; | |
3509 | ||
3510 | /* Create ~[-INF, min (abs(MIN), abs(MAX))] | |
3511 | or ~[-INF + 1, min (abs(MIN), abs(MAX))] when | |
3512 | flag_wrapv is set and the original anti-range doesn't include | |
3513 | TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ | |
2d3cd5d5 | 3514 | if (TYPE_OVERFLOW_WRAPS (type)) |
12df8a7e | 3515 | { |
2d3cd5d5 | 3516 | tree type_min_value = TYPE_MIN_VALUE (type); |
12df8a7e ILT |
3517 | |
3518 | min = (vr0.min != type_min_value | |
3519 | ? int_const_binop (PLUS_EXPR, type_min_value, | |
807e902e | 3520 | build_int_cst (TREE_TYPE (type_min_value), 1)) |
12df8a7e ILT |
3521 | : type_min_value); |
3522 | } | |
3523 | else | |
3524 | { | |
3525 | if (overflow_infinity_range_p (&vr0)) | |
2d3cd5d5 | 3526 | min = negative_overflow_infinity (type); |
12df8a7e | 3527 | else |
2d3cd5d5 | 3528 | min = TYPE_MIN_VALUE (type); |
12df8a7e | 3529 | } |
ff08cbee JM |
3530 | } |
3531 | else | |
3532 | { | |
3533 | /* All else has failed, so create the range [0, INF], even for | |
3534 | flag_wrapv since TYPE_MIN_VALUE is in the original | |
3535 | anti-range. */ | |
3536 | vr0.type = VR_RANGE; | |
2d3cd5d5 RAE |
3537 | min = build_int_cst (type, 0); |
3538 | if (needs_overflow_infinity (type)) | |
12df8a7e | 3539 | { |
2d3cd5d5 RAE |
3540 | if (supports_overflow_infinity (type)) |
3541 | max = positive_overflow_infinity (type); | |
12df8a7e ILT |
3542 | else |
3543 | { | |
3544 | set_value_range_to_varying (vr); | |
3545 | return; | |
3546 | } | |
3547 | } | |
3548 | else | |
2d3cd5d5 | 3549 | max = TYPE_MAX_VALUE (type); |
ff08cbee JM |
3550 | } |
3551 | } | |
3552 | ||
3553 | /* If the range contains zero then we know that the minimum value in the | |
3554 | range will be zero. */ | |
e8f808b3 | 3555 | else if (range_includes_zero_p (vr0.min, vr0.max) == 1) |
ff08cbee JM |
3556 | { |
3557 | if (cmp == 1) | |
3558 | max = min; | |
2d3cd5d5 | 3559 | min = build_int_cst (type, 0); |
ff08cbee JM |
3560 | } |
3561 | else | |
227858d1 | 3562 | { |
ff08cbee JM |
3563 | /* If the range was reversed, swap MIN and MAX. */ |
3564 | if (cmp == 1) | |
6b4db501 | 3565 | std::swap (min, max); |
227858d1 | 3566 | } |
1a0fcfa9 RG |
3567 | |
3568 | cmp = compare_values (min, max); | |
3569 | if (cmp == -2 || cmp == 1) | |
3570 | { | |
3571 | /* If the new range has its limits swapped around (MIN > MAX), | |
3572 | then the operation caused one of them to wrap around, mark | |
3573 | the new range VARYING. */ | |
3574 | set_value_range_to_varying (vr); | |
3575 | } | |
3576 | else | |
3577 | set_value_range (vr, vr0.type, min, max, NULL); | |
3578 | return; | |
227858d1 | 3579 | } |
0bca51f0 | 3580 | |
1a0fcfa9 RG |
3581 | /* For unhandled operations fall back to varying. */ |
3582 | set_value_range_to_varying (vr); | |
3583 | return; | |
227858d1 DN |
3584 | } |
3585 | ||
3586 | ||
ce6bfa50 RG |
3587 | /* Extract range information from a unary expression CODE OP0 based on |
3588 | the range of its operand with resulting type TYPE. | |
3589 | The resulting range is stored in *VR. */ | |
3590 | ||
3591 | static void | |
526ceb68 | 3592 | extract_range_from_unary_expr (value_range *vr, enum tree_code code, |
ce6bfa50 RG |
3593 | tree type, tree op0) |
3594 | { | |
526ceb68 | 3595 | value_range vr0 = VR_INITIALIZER; |
ce6bfa50 RG |
3596 | |
3597 | /* Get value ranges for the operand. For constant operands, create | |
3598 | a new value range with the operand to simplify processing. */ | |
3599 | if (TREE_CODE (op0) == SSA_NAME) | |
3600 | vr0 = *(get_value_range (op0)); | |
3601 | else if (is_gimple_min_invariant (op0)) | |
3602 | set_value_range_to_value (&vr0, op0, NULL); | |
3603 | else | |
3604 | set_value_range_to_varying (&vr0); | |
3605 | ||
3606 | extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0)); | |
3607 | } | |
3608 | ||
3609 | ||
4e71066d | 3610 | /* Extract range information from a conditional expression STMT based on |
f255541f RC |
3611 | the ranges of each of its operands and the expression code. */ |
3612 | ||
3613 | static void | |
526ceb68 | 3614 | extract_range_from_cond_expr (value_range *vr, gassign *stmt) |
f255541f RC |
3615 | { |
3616 | tree op0, op1; | |
526ceb68 TS |
3617 | value_range vr0 = VR_INITIALIZER; |
3618 | value_range vr1 = VR_INITIALIZER; | |
f255541f RC |
3619 | |
3620 | /* Get value ranges for each operand. For constant operands, create | |
3621 | a new value range with the operand to simplify processing. */ | |
4e71066d | 3622 | op0 = gimple_assign_rhs2 (stmt); |
f255541f RC |
3623 | if (TREE_CODE (op0) == SSA_NAME) |
3624 | vr0 = *(get_value_range (op0)); | |
3625 | else if (is_gimple_min_invariant (op0)) | |
b60b4711 | 3626 | set_value_range_to_value (&vr0, op0, NULL); |
f255541f RC |
3627 | else |
3628 | set_value_range_to_varying (&vr0); | |
3629 | ||
4e71066d | 3630 | op1 = gimple_assign_rhs3 (stmt); |
f255541f RC |
3631 | if (TREE_CODE (op1) == SSA_NAME) |
3632 | vr1 = *(get_value_range (op1)); | |
3633 | else if (is_gimple_min_invariant (op1)) | |
b60b4711 | 3634 | set_value_range_to_value (&vr1, op1, NULL); |
f255541f RC |
3635 | else |
3636 | set_value_range_to_varying (&vr1); | |
3637 | ||
3638 | /* The resulting value range is the union of the operand ranges */ | |
f255541f | 3639 | copy_value_range (vr, &vr0); |
0d5a9e78 | 3640 | vrp_meet (vr, &vr1); |
f255541f RC |
3641 | } |
3642 | ||
3643 | ||
227858d1 DN |
3644 | /* Extract range information from a comparison expression EXPR based |
3645 | on the range of its operand and the expression code. */ | |
3646 | ||
3647 | static void | |
526ceb68 | 3648 | extract_range_from_comparison (value_range *vr, enum tree_code code, |
2d3cd5d5 | 3649 | tree type, tree op0, tree op1) |
227858d1 | 3650 | { |
12df8a7e | 3651 | bool sop = false; |
726a989a | 3652 | tree val; |
b8698a0f | 3653 | |
6b99f156 JH |
3654 | val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, |
3655 | NULL); | |
12df8a7e ILT |
3656 | |
3657 | /* A disadvantage of using a special infinity as an overflow | |
3658 | representation is that we lose the ability to record overflow | |
3659 | when we don't have an infinity. So we have to ignore a result | |
3660 | which relies on overflow. */ | |
3661 | ||
3662 | if (val && !is_overflow_infinity (val) && !sop) | |
227858d1 DN |
3663 | { |
3664 | /* Since this expression was found on the RHS of an assignment, | |
3665 | its type may be different from _Bool. Convert VAL to EXPR's | |
3666 | type. */ | |
2d3cd5d5 | 3667 | val = fold_convert (type, val); |
b60b4711 ILT |
3668 | if (is_gimple_min_invariant (val)) |
3669 | set_value_range_to_value (vr, val, vr->equiv); | |
3670 | else | |
3671 | set_value_range (vr, VR_RANGE, val, val, vr->equiv); | |
227858d1 DN |
3672 | } |
3673 | else | |
31ab1cc9 | 3674 | /* The result of a comparison is always true or false. */ |
2d3cd5d5 | 3675 | set_value_range_to_truthvalue (vr, type); |
0bca51f0 DN |
3676 | } |
3677 | ||
1304953e JJ |
3678 | /* Helper function for simplify_internal_call_using_ranges and |
3679 | extract_range_basic. Return true if OP0 SUBCODE OP1 for | |
3680 | SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or | |
3681 | always overflow. Set *OVF to true if it is known to always | |
3682 | overflow. */ | |
3683 | ||
3684 | static bool | |
3685 | check_for_binary_op_overflow (enum tree_code subcode, tree type, | |
3686 | tree op0, tree op1, bool *ovf) | |
3687 | { | |
526ceb68 TS |
3688 | value_range vr0 = VR_INITIALIZER; |
3689 | value_range vr1 = VR_INITIALIZER; | |
1304953e JJ |
3690 | if (TREE_CODE (op0) == SSA_NAME) |
3691 | vr0 = *get_value_range (op0); | |
3692 | else if (TREE_CODE (op0) == INTEGER_CST) | |
3693 | set_value_range_to_value (&vr0, op0, NULL); | |
3694 | else | |
3695 | set_value_range_to_varying (&vr0); | |
3696 | ||
3697 | if (TREE_CODE (op1) == SSA_NAME) | |
3698 | vr1 = *get_value_range (op1); | |
3699 | else if (TREE_CODE (op1) == INTEGER_CST) | |
3700 | set_value_range_to_value (&vr1, op1, NULL); | |
3701 | else | |
3702 | set_value_range_to_varying (&vr1); | |
3703 | ||
3704 | if (!range_int_cst_p (&vr0) | |
3705 | || TREE_OVERFLOW (vr0.min) | |
3706 | || TREE_OVERFLOW (vr0.max)) | |
3707 | { | |
3708 | vr0.min = vrp_val_min (TREE_TYPE (op0)); | |
3709 | vr0.max = vrp_val_max (TREE_TYPE (op0)); | |
3710 | } | |
3711 | if (!range_int_cst_p (&vr1) | |
3712 | || TREE_OVERFLOW (vr1.min) | |
3713 | || TREE_OVERFLOW (vr1.max)) | |
3714 | { | |
3715 | vr1.min = vrp_val_min (TREE_TYPE (op1)); | |
3716 | vr1.max = vrp_val_max (TREE_TYPE (op1)); | |
3717 | } | |
3718 | *ovf = arith_overflowed_p (subcode, type, vr0.min, | |
3719 | subcode == MINUS_EXPR ? vr1.max : vr1.min); | |
3720 | if (arith_overflowed_p (subcode, type, vr0.max, | |
3721 | subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf) | |
3722 | return false; | |
3723 | if (subcode == MULT_EXPR) | |
3724 | { | |
3725 | if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf | |
3726 | || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf) | |
3727 | return false; | |
3728 | } | |
3729 | if (*ovf) | |
3730 | { | |
3731 | /* So far we found that there is an overflow on the boundaries. | |
3732 | That doesn't prove that there is an overflow even for all values | |
3733 | in between the boundaries. For that compute widest_int range | |
3734 | of the result and see if it doesn't overlap the range of | |
3735 | type. */ | |
3736 | widest_int wmin, wmax; | |
3737 | widest_int w[4]; | |
3738 | int i; | |
3739 | w[0] = wi::to_widest (vr0.min); | |
3740 | w[1] = wi::to_widest (vr0.max); | |
3741 | w[2] = wi::to_widest (vr1.min); | |
3742 | w[3] = wi::to_widest (vr1.max); | |
3743 | for (i = 0; i < 4; i++) | |
3744 | { | |
3745 | widest_int wt; | |
3746 | switch (subcode) | |
3747 | { | |
3748 | case PLUS_EXPR: | |
3749 | wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]); | |
3750 | break; | |
3751 | case MINUS_EXPR: | |
3752 | wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]); | |
3753 | break; | |
3754 | case MULT_EXPR: | |
3755 | wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]); | |
3756 | break; | |
3757 | default: | |
3758 | gcc_unreachable (); | |
3759 | } | |
3760 | if (i == 0) | |
3761 | { | |
3762 | wmin = wt; | |
3763 | wmax = wt; | |
3764 | } | |
3765 | else | |
3766 | { | |
3767 | wmin = wi::smin (wmin, wt); | |
3768 | wmax = wi::smax (wmax, wt); | |
3769 | } | |
3770 | } | |
3771 | /* The result of op0 CODE op1 is known to be in range | |
3772 | [wmin, wmax]. */ | |
3773 | widest_int wtmin = wi::to_widest (vrp_val_min (type)); | |
3774 | widest_int wtmax = wi::to_widest (vrp_val_max (type)); | |
3775 | /* If all values in [wmin, wmax] are smaller than | |
3776 | [wtmin, wtmax] or all are larger than [wtmin, wtmax], | |
3777 | the arithmetic operation will always overflow. */ | |
3778 | if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax)) | |
3779 | return true; | |
3780 | return false; | |
3781 | } | |
3782 | return true; | |
3783 | } | |
3784 | ||
726a989a RB |
3785 | /* Try to derive a nonnegative or nonzero range out of STMT relying |
3786 | primarily on generic routines in fold in conjunction with range data. | |
3787 | Store the result in *VR */ | |
0bca51f0 | 3788 | |
726a989a | 3789 | static void |
526ceb68 | 3790 | extract_range_basic (value_range *vr, gimple *stmt) |
726a989a RB |
3791 | { |
3792 | bool sop = false; | |
3793 | tree type = gimple_expr_type (stmt); | |
3794 | ||
9c0a9e12 | 3795 | if (is_gimple_call (stmt)) |
1f6eac90 | 3796 | { |
9c0a9e12 | 3797 | tree arg; |
1f6eac90 | 3798 | int mini, maxi, zerov = 0, prec; |
9c0a9e12 RS |
3799 | enum tree_code subcode = ERROR_MARK; |
3800 | combined_fn cfn = gimple_call_combined_fn (stmt); | |
1f6eac90 | 3801 | |
9c0a9e12 | 3802 | switch (cfn) |
1f6eac90 | 3803 | { |
9c0a9e12 | 3804 | case CFN_BUILT_IN_CONSTANT_P: |
1f6eac90 JJ |
3805 | /* If the call is __builtin_constant_p and the argument is a |
3806 | function parameter resolve it to false. This avoids bogus | |
3807 | array bound warnings. | |
3808 | ??? We could do this as early as inlining is finished. */ | |
3809 | arg = gimple_call_arg (stmt, 0); | |
3810 | if (TREE_CODE (arg) == SSA_NAME | |
3811 | && SSA_NAME_IS_DEFAULT_DEF (arg) | |
3812 | && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL) | |
3813 | { | |
3814 | set_value_range_to_null (vr, type); | |
3815 | return; | |
3816 | } | |
3817 | break; | |
3818 | /* Both __builtin_ffs* and __builtin_popcount return | |
3819 | [0, prec]. */ | |
9c0a9e12 RS |
3820 | CASE_CFN_FFS: |
3821 | CASE_CFN_POPCOUNT: | |
1f6eac90 JJ |
3822 | arg = gimple_call_arg (stmt, 0); |
3823 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3824 | mini = 0; | |
3825 | maxi = prec; | |
3826 | if (TREE_CODE (arg) == SSA_NAME) | |
3827 | { | |
526ceb68 | 3828 | value_range *vr0 = get_value_range (arg); |
1f6eac90 JJ |
3829 | /* If arg is non-zero, then ffs or popcount |
3830 | are non-zero. */ | |
3831 | if (((vr0->type == VR_RANGE | |
b48e3948 | 3832 | && range_includes_zero_p (vr0->min, vr0->max) == 0) |
1f6eac90 | 3833 | || (vr0->type == VR_ANTI_RANGE |
b48e3948 JJ |
3834 | && range_includes_zero_p (vr0->min, vr0->max) == 1)) |
3835 | && !is_overflow_infinity (vr0->min) | |
3836 | && !is_overflow_infinity (vr0->max)) | |
1f6eac90 JJ |
3837 | mini = 1; |
3838 | /* If some high bits are known to be zero, | |
3839 | we can decrease the maximum. */ | |
3840 | if (vr0->type == VR_RANGE | |
3841 | && TREE_CODE (vr0->max) == INTEGER_CST | |
b48e3948 JJ |
3842 | && !operand_less_p (vr0->min, |
3843 | build_zero_cst (TREE_TYPE (vr0->min))) | |
3f5c390d | 3844 | && !is_overflow_infinity (vr0->max)) |
1f6eac90 JJ |
3845 | maxi = tree_floor_log2 (vr0->max) + 1; |
3846 | } | |
3847 | goto bitop_builtin; | |
3848 | /* __builtin_parity* returns [0, 1]. */ | |
9c0a9e12 | 3849 | CASE_CFN_PARITY: |
1f6eac90 JJ |
3850 | mini = 0; |
3851 | maxi = 1; | |
3852 | goto bitop_builtin; | |
3853 | /* __builtin_c[lt]z* return [0, prec-1], except for | |
3854 | when the argument is 0, but that is undefined behavior. | |
3855 | On many targets where the CLZ RTL or optab value is defined | |
3856 | for 0 the value is prec, so include that in the range | |
3857 | by default. */ | |
9c0a9e12 | 3858 | CASE_CFN_CLZ: |
1f6eac90 JJ |
3859 | arg = gimple_call_arg (stmt, 0); |
3860 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3861 | mini = 0; | |
3862 | maxi = prec; | |
3863 | if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg))) | |
3864 | != CODE_FOR_nothing | |
3865 | && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), | |
3866 | zerov) | |
3867 | /* Handle only the single common value. */ | |
3868 | && zerov != prec) | |
3869 | /* Magic value to give up, unless vr0 proves | |
3870 | arg is non-zero. */ | |
3871 | mini = -2; | |
3872 | if (TREE_CODE (arg) == SSA_NAME) | |
3873 | { | |
526ceb68 | 3874 | value_range *vr0 = get_value_range (arg); |
1f6eac90 JJ |
3875 | /* From clz of VR_RANGE minimum we can compute |
3876 | result maximum. */ | |
3877 | if (vr0->type == VR_RANGE | |
3878 | && TREE_CODE (vr0->min) == INTEGER_CST | |
3f5c390d | 3879 | && !is_overflow_infinity (vr0->min)) |
1f6eac90 JJ |
3880 | { |
3881 | maxi = prec - 1 - tree_floor_log2 (vr0->min); | |
3882 | if (maxi != prec) | |
3883 | mini = 0; | |
3884 | } | |
3885 | else if (vr0->type == VR_ANTI_RANGE | |
3886 | && integer_zerop (vr0->min) | |
3f5c390d | 3887 | && !is_overflow_infinity (vr0->min)) |
1f6eac90 JJ |
3888 | { |
3889 | maxi = prec - 1; | |
3890 | mini = 0; | |
3891 | } | |
3892 | if (mini == -2) | |
3893 | break; | |
3894 | /* From clz of VR_RANGE maximum we can compute | |
3895 | result minimum. */ | |
3896 | if (vr0->type == VR_RANGE | |
3897 | && TREE_CODE (vr0->max) == INTEGER_CST | |
3f5c390d | 3898 | && !is_overflow_infinity (vr0->max)) |
1f6eac90 JJ |
3899 | { |
3900 | mini = prec - 1 - tree_floor_log2 (vr0->max); | |
3901 | if (mini == prec) | |
3902 | break; | |
3903 | } | |
3904 | } | |
3905 | if (mini == -2) | |
3906 | break; | |
3907 | goto bitop_builtin; | |
3908 | /* __builtin_ctz* return [0, prec-1], except for | |
3909 | when the argument is 0, but that is undefined behavior. | |
3910 | If there is a ctz optab for this mode and | |
3911 | CTZ_DEFINED_VALUE_AT_ZERO, include that in the range, | |
3912 | otherwise just assume 0 won't be seen. */ | |
9c0a9e12 | 3913 | CASE_CFN_CTZ: |
1f6eac90 JJ |
3914 | arg = gimple_call_arg (stmt, 0); |
3915 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3916 | mini = 0; | |
3917 | maxi = prec - 1; | |
3918 | if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg))) | |
3919 | != CODE_FOR_nothing | |
3920 | && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)), | |
3921 | zerov)) | |
3922 | { | |
3923 | /* Handle only the two common values. */ | |
3924 | if (zerov == -1) | |
3925 | mini = -1; | |
3926 | else if (zerov == prec) | |
3927 | maxi = prec; | |
3928 | else | |
3929 | /* Magic value to give up, unless vr0 proves | |
3930 | arg is non-zero. */ | |
3931 | mini = -2; | |
3932 | } | |
3933 | if (TREE_CODE (arg) == SSA_NAME) | |
3934 | { | |
526ceb68 | 3935 | value_range *vr0 = get_value_range (arg); |
1f6eac90 JJ |
3936 | /* If arg is non-zero, then use [0, prec - 1]. */ |
3937 | if (((vr0->type == VR_RANGE | |
3938 | && integer_nonzerop (vr0->min)) | |
3939 | || (vr0->type == VR_ANTI_RANGE | |
3940 | && integer_zerop (vr0->min))) | |
3f5c390d | 3941 | && !is_overflow_infinity (vr0->min)) |
1f6eac90 JJ |
3942 | { |
3943 | mini = 0; | |
3944 | maxi = prec - 1; | |
3945 | } | |
3946 | /* If some high bits are known to be zero, | |
3947 | we can decrease the result maximum. */ | |
3948 | if (vr0->type == VR_RANGE | |
3949 | && TREE_CODE (vr0->max) == INTEGER_CST | |
3f5c390d | 3950 | && !is_overflow_infinity (vr0->max)) |
1f6eac90 JJ |
3951 | { |
3952 | maxi = tree_floor_log2 (vr0->max); | |
3953 | /* For vr0 [0, 0] give up. */ | |
3954 | if (maxi == -1) | |
3955 | break; | |
3956 | } | |
3957 | } | |
3958 | if (mini == -2) | |
3959 | break; | |
3960 | goto bitop_builtin; | |
3961 | /* __builtin_clrsb* returns [0, prec-1]. */ | |
9c0a9e12 | 3962 | CASE_CFN_CLRSB: |
1f6eac90 JJ |
3963 | arg = gimple_call_arg (stmt, 0); |
3964 | prec = TYPE_PRECISION (TREE_TYPE (arg)); | |
3965 | mini = 0; | |
3966 | maxi = prec - 1; | |
3967 | goto bitop_builtin; | |
3968 | bitop_builtin: | |
3969 | set_value_range (vr, VR_RANGE, build_int_cst (type, mini), | |
3970 | build_int_cst (type, maxi), NULL); | |
3971 | return; | |
9c0a9e12 | 3972 | case CFN_UBSAN_CHECK_ADD: |
31e071ae MP |
3973 | subcode = PLUS_EXPR; |
3974 | break; | |
9c0a9e12 | 3975 | case CFN_UBSAN_CHECK_SUB: |
31e071ae MP |
3976 | subcode = MINUS_EXPR; |
3977 | break; | |
9c0a9e12 | 3978 | case CFN_UBSAN_CHECK_MUL: |
31e071ae MP |
3979 | subcode = MULT_EXPR; |
3980 | break; | |
9c0a9e12 RS |
3981 | case CFN_GOACC_DIM_SIZE: |
3982 | case CFN_GOACC_DIM_POS: | |
bd751975 NS |
3983 | /* Optimizing these two internal functions helps the loop |
3984 | optimizer eliminate outer comparisons. Size is [1,N] | |
3985 | and pos is [0,N-1]. */ | |
3986 | { | |
9c0a9e12 | 3987 | bool is_pos = cfn == CFN_GOACC_DIM_POS; |
bd751975 NS |
3988 | int axis = get_oacc_ifn_dim_arg (stmt); |
3989 | int size = get_oacc_fn_dim_size (current_function_decl, axis); | |
3990 | ||
3991 | if (!size) | |
3992 | /* If it's dynamic, the backend might know a hardware | |
3993 | limitation. */ | |
3994 | size = targetm.goacc.dim_limit (axis); | |
3995 | ||
3996 | tree type = TREE_TYPE (gimple_call_lhs (stmt)); | |
3997 | set_value_range (vr, VR_RANGE, | |
3998 | build_int_cst (type, is_pos ? 0 : 1), | |
3999 | size ? build_int_cst (type, size - is_pos) | |
4000 | : vrp_val_max (type), NULL); | |
4001 | } | |
4002 | return; | |
31e071ae MP |
4003 | default: |
4004 | break; | |
4005 | } | |
4006 | if (subcode != ERROR_MARK) | |
4007 | { | |
4008 | bool saved_flag_wrapv = flag_wrapv; | |
4009 | /* Pretend the arithmetics is wrapping. If there is | |
4010 | any overflow, we'll complain, but will actually do | |
4011 | wrapping operation. */ | |
4012 | flag_wrapv = 1; | |
4013 | extract_range_from_binary_expr (vr, subcode, type, | |
4014 | gimple_call_arg (stmt, 0), | |
4015 | gimple_call_arg (stmt, 1)); | |
4016 | flag_wrapv = saved_flag_wrapv; | |
4017 | ||
4018 | /* If for both arguments vrp_valueize returned non-NULL, | |
4019 | this should have been already folded and if not, it | |
4020 | wasn't folded because of overflow. Avoid removing the | |
4021 | UBSAN_CHECK_* calls in that case. */ | |
4022 | if (vr->type == VR_RANGE | |
4023 | && (vr->min == vr->max | |
4024 | || operand_equal_p (vr->min, vr->max, 0))) | |
4025 | set_value_range_to_varying (vr); | |
4026 | return; | |
4027 | } | |
4028 | } | |
1304953e JJ |
4029 | /* Handle extraction of the two results (result of arithmetics and |
4030 | a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW | |
4031 | internal function. */ | |
4032 | else if (is_gimple_assign (stmt) | |
4033 | && (gimple_assign_rhs_code (stmt) == REALPART_EXPR | |
4034 | || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR) | |
4035 | && INTEGRAL_TYPE_P (type)) | |
4036 | { | |
4037 | enum tree_code code = gimple_assign_rhs_code (stmt); | |
4038 | tree op = gimple_assign_rhs1 (stmt); | |
4039 | if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME) | |
4040 | { | |
355fe088 | 4041 | gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0)); |
1304953e JJ |
4042 | if (is_gimple_call (g) && gimple_call_internal_p (g)) |
4043 | { | |
4044 | enum tree_code subcode = ERROR_MARK; | |
4045 | switch (gimple_call_internal_fn (g)) | |
4046 | { | |
4047 | case IFN_ADD_OVERFLOW: | |
4048 | subcode = PLUS_EXPR; | |
4049 | break; | |
4050 | case IFN_SUB_OVERFLOW: | |
4051 | subcode = MINUS_EXPR; | |
4052 | break; | |
4053 | case IFN_MUL_OVERFLOW: | |
4054 | subcode = MULT_EXPR; | |
4055 | break; | |
4056 | default: | |
4057 | break; | |
4058 | } | |
4059 | if (subcode != ERROR_MARK) | |
4060 | { | |
4061 | tree op0 = gimple_call_arg (g, 0); | |
4062 | tree op1 = gimple_call_arg (g, 1); | |
4063 | if (code == IMAGPART_EXPR) | |
4064 | { | |
4065 | bool ovf = false; | |
4066 | if (check_for_binary_op_overflow (subcode, type, | |
4067 | op0, op1, &ovf)) | |
4068 | set_value_range_to_value (vr, | |
4069 | build_int_cst (type, ovf), | |
4070 | NULL); | |
4071 | else | |
4072 | set_value_range (vr, VR_RANGE, build_int_cst (type, 0), | |
4073 | build_int_cst (type, 1), NULL); | |
4074 | } | |
4075 | else if (types_compatible_p (type, TREE_TYPE (op0)) | |
4076 | && types_compatible_p (type, TREE_TYPE (op1))) | |
4077 | { | |
4078 | bool saved_flag_wrapv = flag_wrapv; | |
4079 | /* Pretend the arithmetics is wrapping. If there is | |
4080 | any overflow, IMAGPART_EXPR will be set. */ | |
4081 | flag_wrapv = 1; | |
4082 | extract_range_from_binary_expr (vr, subcode, type, | |
4083 | op0, op1); | |
4084 | flag_wrapv = saved_flag_wrapv; | |
4085 | } | |
4086 | else | |
4087 | { | |
526ceb68 TS |
4088 | value_range vr0 = VR_INITIALIZER; |
4089 | value_range vr1 = VR_INITIALIZER; | |
1304953e JJ |
4090 | bool saved_flag_wrapv = flag_wrapv; |
4091 | /* Pretend the arithmetics is wrapping. If there is | |
4092 | any overflow, IMAGPART_EXPR will be set. */ | |
4093 | flag_wrapv = 1; | |
4094 | extract_range_from_unary_expr (&vr0, NOP_EXPR, | |
4095 | type, op0); | |
4096 | extract_range_from_unary_expr (&vr1, NOP_EXPR, | |
4097 | type, op1); | |
4098 | extract_range_from_binary_expr_1 (vr, subcode, type, | |
4099 | &vr0, &vr1); | |
4100 | flag_wrapv = saved_flag_wrapv; | |
4101 | } | |
4102 | return; | |
4103 | } | |
4104 | } | |
4105 | } | |
4106 | } | |
1f6eac90 JJ |
4107 | if (INTEGRAL_TYPE_P (type) |
4108 | && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) | |
726a989a RB |
4109 | set_value_range_to_nonnegative (vr, type, |
4110 | sop || stmt_overflow_infinity (stmt)); | |
4111 | else if (vrp_stmt_computes_nonzero (stmt, &sop) | |
4112 | && !sop) | |
4113 | set_value_range_to_nonnull (vr, type); | |
4114 | else | |
4115 | set_value_range_to_varying (vr); | |
4116 | } | |
4117 | ||
4118 | ||
4119 | /* Try to compute a useful range out of assignment STMT and store it | |
227858d1 | 4120 | in *VR. */ |
0bca51f0 DN |
4121 | |
4122 | static void | |
526ceb68 | 4123 | extract_range_from_assignment (value_range *vr, gassign *stmt) |
0bca51f0 | 4124 | { |
726a989a | 4125 | enum tree_code code = gimple_assign_rhs_code (stmt); |
0bca51f0 DN |
4126 | |
4127 | if (code == ASSERT_EXPR) | |
726a989a | 4128 | extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); |
0bca51f0 | 4129 | else if (code == SSA_NAME) |
726a989a | 4130 | extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); |
aebf4828 | 4131 | else if (TREE_CODE_CLASS (code) == tcc_binary) |
726a989a RB |
4132 | extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), |
4133 | gimple_expr_type (stmt), | |
4134 | gimple_assign_rhs1 (stmt), | |
4135 | gimple_assign_rhs2 (stmt)); | |
0bca51f0 | 4136 | else if (TREE_CODE_CLASS (code) == tcc_unary) |
726a989a RB |
4137 | extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), |
4138 | gimple_expr_type (stmt), | |
4139 | gimple_assign_rhs1 (stmt)); | |
f255541f | 4140 | else if (code == COND_EXPR) |
4e71066d | 4141 | extract_range_from_cond_expr (vr, stmt); |
227858d1 | 4142 | else if (TREE_CODE_CLASS (code) == tcc_comparison) |
726a989a RB |
4143 | extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), |
4144 | gimple_expr_type (stmt), | |
4145 | gimple_assign_rhs1 (stmt), | |
4146 | gimple_assign_rhs2 (stmt)); | |
4147 | else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS | |
4148 | && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) | |
4149 | set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); | |
0bca51f0 | 4150 | else |
b565d777 | 4151 | set_value_range_to_varying (vr); |
b16caf72 | 4152 | |
b16caf72 | 4153 | if (vr->type == VR_VARYING) |
726a989a | 4154 | extract_range_basic (vr, stmt); |
0bca51f0 DN |
4155 | } |
4156 | ||
1e8552eb | 4157 | /* Given a range VR, a LOOP and a variable VAR, determine whether it |
0bca51f0 DN |
4158 | would be profitable to adjust VR using scalar evolution information |
4159 | for VAR. If so, update VR with the new limits. */ | |
4160 | ||
4161 | static void | |
526ceb68 | 4162 | adjust_range_with_scev (value_range *vr, struct loop *loop, |
355fe088 | 4163 | gimple *stmt, tree var) |
0bca51f0 | 4164 | { |
1936a7d4 | 4165 | tree init, step, chrec, tmin, tmax, min, max, type, tem; |
d7f5de76 | 4166 | enum ev_direction dir; |
0bca51f0 DN |
4167 | |
4168 | /* TODO. Don't adjust anti-ranges. An anti-range may provide | |
4169 | better opportunities than a regular range, but I'm not sure. */ | |
4170 | if (vr->type == VR_ANTI_RANGE) | |
4171 | return; | |
4172 | ||
d7770457 | 4173 | chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); |
6f1c9cd0 SP |
4174 | |
4175 | /* Like in PR19590, scev can return a constant function. */ | |
4176 | if (is_gimple_min_invariant (chrec)) | |
4177 | { | |
cdc64612 | 4178 | set_value_range_to_value (vr, chrec, vr->equiv); |
6f1c9cd0 SP |
4179 | return; |
4180 | } | |
4181 | ||
0bca51f0 DN |
4182 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) |
4183 | return; | |
4184 | ||
d7770457 | 4185 | init = initial_condition_in_loop_num (chrec, loop->num); |
1936a7d4 RG |
4186 | tem = op_with_constant_singleton_value_range (init); |
4187 | if (tem) | |
4188 | init = tem; | |
d7770457 | 4189 | step = evolution_part_in_loop_num (chrec, loop->num); |
1936a7d4 RG |
4190 | tem = op_with_constant_singleton_value_range (step); |
4191 | if (tem) | |
4192 | step = tem; | |
0bca51f0 DN |
4193 | |
4194 | /* If STEP is symbolic, we can't know whether INIT will be the | |
04dce5a4 ZD |
4195 | minimum or maximum value in the range. Also, unless INIT is |
4196 | a simple expression, compare_values and possibly other functions | |
4197 | in tree-vrp won't be able to handle it. */ | |
d7770457 | 4198 | if (step == NULL_TREE |
04dce5a4 ZD |
4199 | || !is_gimple_min_invariant (step) |
4200 | || !valid_value_p (init)) | |
0bca51f0 DN |
4201 | return; |
4202 | ||
d7f5de76 ZD |
4203 | dir = scev_direction (chrec); |
4204 | if (/* Do not adjust ranges if we do not know whether the iv increases | |
4205 | or decreases, ... */ | |
4206 | dir == EV_DIR_UNKNOWN | |
4207 | /* ... or if it may wrap. */ | |
42fd6772 | 4208 | || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), |
d7f5de76 | 4209 | true)) |
227858d1 DN |
4210 | return; |
4211 | ||
12df8a7e ILT |
4212 | /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of |
4213 | negative_overflow_infinity and positive_overflow_infinity, | |
4214 | because we have concluded that the loop probably does not | |
4215 | wrap. */ | |
4216 | ||
20527215 ZD |
4217 | type = TREE_TYPE (var); |
4218 | if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) | |
4219 | tmin = lower_bound_in_type (type, type); | |
4220 | else | |
4221 | tmin = TYPE_MIN_VALUE (type); | |
4222 | if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) | |
4223 | tmax = upper_bound_in_type (type, type); | |
4224 | else | |
4225 | tmax = TYPE_MAX_VALUE (type); | |
4226 | ||
e3488283 | 4227 | /* Try to use estimated number of iterations for the loop to constrain the |
b4a9343c | 4228 | final value in the evolution. */ |
e3488283 | 4229 | if (TREE_CODE (step) == INTEGER_CST |
e3488283 RG |
4230 | && is_gimple_val (init) |
4231 | && (TREE_CODE (init) != SSA_NAME | |
4232 | || get_value_range (init)->type == VR_RANGE)) | |
4233 | { | |
807e902e | 4234 | widest_int nit; |
b4a9343c | 4235 | |
7c98ec60 RG |
4236 | /* We are only entering here for loop header PHI nodes, so using |
4237 | the number of latch executions is the correct thing to use. */ | |
4238 | if (max_loop_iterations (loop, &nit)) | |
b4a9343c | 4239 | { |
526ceb68 | 4240 | value_range maxvr = VR_INITIALIZER; |
807e902e KZ |
4241 | signop sgn = TYPE_SIGN (TREE_TYPE (step)); |
4242 | bool overflow; | |
b4a9343c | 4243 | |
807e902e KZ |
4244 | widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn, |
4245 | &overflow); | |
b4a9343c ZD |
4246 | /* If the multiplication overflowed we can't do a meaningful |
4247 | adjustment. Likewise if the result doesn't fit in the type | |
4248 | of the induction variable. For a signed type we have to | |
4249 | check whether the result has the expected signedness which | |
4250 | is that of the step as number of iterations is unsigned. */ | |
4251 | if (!overflow | |
807e902e KZ |
4252 | && wi::fits_to_tree_p (wtmp, TREE_TYPE (init)) |
4253 | && (sgn == UNSIGNED | |
4254 | || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0))) | |
e3488283 | 4255 | { |
807e902e | 4256 | tem = wide_int_to_tree (TREE_TYPE (init), wtmp); |
b4a9343c ZD |
4257 | extract_range_from_binary_expr (&maxvr, PLUS_EXPR, |
4258 | TREE_TYPE (init), init, tem); | |
4259 | /* Likewise if the addition did. */ | |
4260 | if (maxvr.type == VR_RANGE) | |
4261 | { | |
c446cf07 BC |
4262 | value_range initvr = VR_INITIALIZER; |
4263 | ||
4264 | if (TREE_CODE (init) == SSA_NAME) | |
4265 | initvr = *(get_value_range (init)); | |
4266 | else if (is_gimple_min_invariant (init)) | |
4267 | set_value_range_to_value (&initvr, init, NULL); | |
4268 | else | |
4269 | return; | |
4270 | ||
4271 | /* Check if init + nit * step overflows. Though we checked | |
4272 | scev {init, step}_loop doesn't wrap, it is not enough | |
4273 | because the loop may exit immediately. Overflow could | |
4274 | happen in the plus expression in this case. */ | |
4275 | if ((dir == EV_DIR_DECREASES | |
4276 | && (is_negative_overflow_infinity (maxvr.min) | |
4277 | || compare_values (maxvr.min, initvr.min) != -1)) | |
4278 | || (dir == EV_DIR_GROWS | |
4279 | && (is_positive_overflow_infinity (maxvr.max) | |
4280 | || compare_values (maxvr.max, initvr.max) != 1))) | |
4281 | return; | |
4282 | ||
b4a9343c ZD |
4283 | tmin = maxvr.min; |
4284 | tmax = maxvr.max; | |
4285 | } | |
e3488283 RG |
4286 | } |
4287 | } | |
4288 | } | |
4289 | ||
20527215 | 4290 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) |
0bca51f0 | 4291 | { |
20527215 ZD |
4292 | min = tmin; |
4293 | max = tmax; | |
4294 | ||
0bca51f0 DN |
4295 | /* For VARYING or UNDEFINED ranges, just about anything we get |
4296 | from scalar evolutions should be better. */ | |
4f67dfcf | 4297 | |
d7f5de76 | 4298 | if (dir == EV_DIR_DECREASES) |
4f67dfcf | 4299 | max = init; |
0bca51f0 | 4300 | else |
4f67dfcf | 4301 | min = init; |
0bca51f0 DN |
4302 | } |
4303 | else if (vr->type == VR_RANGE) | |
4304 | { | |
20527215 ZD |
4305 | min = vr->min; |
4306 | max = vr->max; | |
d5448566 | 4307 | |
d7f5de76 | 4308 | if (dir == EV_DIR_DECREASES) |
0bca51f0 | 4309 | { |
d5448566 KH |
4310 | /* INIT is the maximum value. If INIT is lower than VR->MAX |
4311 | but no smaller than VR->MIN, set VR->MAX to INIT. */ | |
4312 | if (compare_values (init, max) == -1) | |
e3488283 | 4313 | max = init; |
9a46cc16 ILT |
4314 | |
4315 | /* According to the loop information, the variable does not | |
4316 | overflow. If we think it does, probably because of an | |
4317 | overflow due to arithmetic on a different INF value, | |
4318 | reset now. */ | |
e3488283 RG |
4319 | if (is_negative_overflow_infinity (min) |
4320 | || compare_values (min, tmin) == -1) | |
9a46cc16 | 4321 | min = tmin; |
e3488283 | 4322 | |
0bca51f0 DN |
4323 | } |
4324 | else | |
4325 | { | |
4326 | /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ | |
d5448566 | 4327 | if (compare_values (init, min) == 1) |
e3488283 | 4328 | min = init; |
9a46cc16 | 4329 | |
e3488283 RG |
4330 | if (is_positive_overflow_infinity (max) |
4331 | || compare_values (tmax, max) == -1) | |
9a46cc16 | 4332 | max = tmax; |
0bca51f0 | 4333 | } |
9e9f6bf0 RB |
4334 | } |
4335 | else | |
4336 | return; | |
d5448566 | 4337 | |
9e9f6bf0 RB |
4338 | /* If we just created an invalid range with the minimum |
4339 | greater than the maximum, we fail conservatively. | |
4340 | This should happen only in unreachable | |
4341 | parts of code, or for invalid programs. */ | |
4342 | if (compare_values (min, max) == 1 | |
4343 | || (is_negative_overflow_infinity (min) | |
4344 | && is_positive_overflow_infinity (max))) | |
4345 | return; | |
e3488283 | 4346 | |
f7b492ea JW |
4347 | /* Even for valid range info, sometimes overflow flag will leak in. |
4348 | As GIMPLE IL should have no constants with TREE_OVERFLOW set, we | |
4349 | drop them except for +-overflow_infinity which still need special | |
4350 | handling in vrp pass. */ | |
4351 | if (TREE_OVERFLOW_P (min) | |
4352 | && ! is_negative_overflow_infinity (min)) | |
4353 | min = drop_tree_overflow (min); | |
4354 | if (TREE_OVERFLOW_P (max) | |
4355 | && ! is_positive_overflow_infinity (max)) | |
4356 | max = drop_tree_overflow (max); | |
4357 | ||
9e9f6bf0 | 4358 | set_value_range (vr, VR_RANGE, min, max, vr->equiv); |
0bca51f0 DN |
4359 | } |
4360 | ||
4361 | ||
4362 | /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: | |
b8698a0f | 4363 | |
227858d1 DN |
4364 | - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for |
4365 | all the values in the ranges. | |
0bca51f0 DN |
4366 | |
4367 | - Return BOOLEAN_FALSE_NODE if the comparison always returns false. | |
4368 | ||
227858d1 | 4369 | - Return NULL_TREE if it is not always possible to determine the |
12df8a7e ILT |
4370 | value of the comparison. |
4371 | ||
4372 | Also set *STRICT_OVERFLOW_P to indicate whether a range with an | |
4373 | overflow infinity was used in the test. */ | |
227858d1 | 4374 | |
0bca51f0 DN |
4375 | |
4376 | static tree | |
526ceb68 | 4377 | compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1, |
12df8a7e | 4378 | bool *strict_overflow_p) |
0bca51f0 DN |
4379 | { |
4380 | /* VARYING or UNDEFINED ranges cannot be compared. */ | |
4381 | if (vr0->type == VR_VARYING | |
4382 | || vr0->type == VR_UNDEFINED | |
4383 | || vr1->type == VR_VARYING | |
4384 | || vr1->type == VR_UNDEFINED) | |
4385 | return NULL_TREE; | |
4386 | ||
4387 | /* Anti-ranges need to be handled separately. */ | |
4388 | if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) | |
4389 | { | |
4390 | /* If both are anti-ranges, then we cannot compute any | |
4391 | comparison. */ | |
4392 | if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) | |
4393 | return NULL_TREE; | |
4394 | ||
4395 | /* These comparisons are never statically computable. */ | |
4396 | if (comp == GT_EXPR | |
4397 | || comp == GE_EXPR | |
4398 | || comp == LT_EXPR | |
4399 | || comp == LE_EXPR) | |
4400 | return NULL_TREE; | |
4401 | ||
4402 | /* Equality can be computed only between a range and an | |
4403 | anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ | |
4404 | if (vr0->type == VR_RANGE) | |
4405 | { | |
4406 | /* To simplify processing, make VR0 the anti-range. */ | |
526ceb68 | 4407 | value_range *tmp = vr0; |
0bca51f0 DN |
4408 | vr0 = vr1; |
4409 | vr1 = tmp; | |
4410 | } | |
4411 | ||
4412 | gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); | |
4413 | ||
12df8a7e ILT |
4414 | if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 |
4415 | && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) | |
0bca51f0 DN |
4416 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
4417 | ||
4418 | return NULL_TREE; | |
4419 | } | |
4420 | ||
0c948c27 ILT |
4421 | if (!usable_range_p (vr0, strict_overflow_p) |
4422 | || !usable_range_p (vr1, strict_overflow_p)) | |
4423 | return NULL_TREE; | |
4424 | ||
0bca51f0 DN |
4425 | /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the |
4426 | operands around and change the comparison code. */ | |
4427 | if (comp == GT_EXPR || comp == GE_EXPR) | |
4428 | { | |
0bca51f0 | 4429 | comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; |
fab27f52 | 4430 | std::swap (vr0, vr1); |
0bca51f0 DN |
4431 | } |
4432 | ||
4433 | if (comp == EQ_EXPR) | |
4434 | { | |
4435 | /* Equality may only be computed if both ranges represent | |
4436 | exactly one value. */ | |
12df8a7e ILT |
4437 | if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 |
4438 | && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) | |
0bca51f0 | 4439 | { |
12df8a7e ILT |
4440 | int cmp_min = compare_values_warnv (vr0->min, vr1->min, |
4441 | strict_overflow_p); | |
4442 | int cmp_max = compare_values_warnv (vr0->max, vr1->max, | |
4443 | strict_overflow_p); | |
0bca51f0 DN |
4444 | if (cmp_min == 0 && cmp_max == 0) |
4445 | return boolean_true_node; | |
4446 | else if (cmp_min != -2 && cmp_max != -2) | |
4447 | return boolean_false_node; | |
4448 | } | |
7ab1122a | 4449 | /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ |
12df8a7e ILT |
4450 | else if (compare_values_warnv (vr0->min, vr1->max, |
4451 | strict_overflow_p) == 1 | |
4452 | || compare_values_warnv (vr1->min, vr0->max, | |
4453 | strict_overflow_p) == 1) | |
7ab1122a | 4454 | return boolean_false_node; |
0bca51f0 DN |
4455 | |
4456 | return NULL_TREE; | |
4457 | } | |
4458 | else if (comp == NE_EXPR) | |
4459 | { | |
4460 | int cmp1, cmp2; | |
4461 | ||
4462 | /* If VR0 is completely to the left or completely to the right | |
4463 | of VR1, they are always different. Notice that we need to | |
4464 | make sure that both comparisons yield similar results to | |
4465 | avoid comparing values that cannot be compared at | |
4466 | compile-time. */ | |
12df8a7e ILT |
4467 | cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
4468 | cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); | |
0bca51f0 DN |
4469 | if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) |
4470 | return boolean_true_node; | |
4471 | ||
4472 | /* If VR0 and VR1 represent a single value and are identical, | |
4473 | return false. */ | |
12df8a7e ILT |
4474 | else if (compare_values_warnv (vr0->min, vr0->max, |
4475 | strict_overflow_p) == 0 | |
4476 | && compare_values_warnv (vr1->min, vr1->max, | |
4477 | strict_overflow_p) == 0 | |
4478 | && compare_values_warnv (vr0->min, vr1->min, | |
4479 | strict_overflow_p) == 0 | |
4480 | && compare_values_warnv (vr0->max, vr1->max, | |
4481 | strict_overflow_p) == 0) | |
0bca51f0 DN |
4482 | return boolean_false_node; |
4483 | ||
4484 | /* Otherwise, they may or may not be different. */ | |
4485 | else | |
4486 | return NULL_TREE; | |
4487 | } | |
4488 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
4489 | { | |
4490 | int tst; | |
4491 | ||
4492 | /* If VR0 is to the left of VR1, return true. */ | |
12df8a7e | 4493 | tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); |
0bca51f0 DN |
4494 | if ((comp == LT_EXPR && tst == -1) |
4495 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
12df8a7e ILT |
4496 | { |
4497 | if (overflow_infinity_range_p (vr0) | |
4498 | || overflow_infinity_range_p (vr1)) | |
4499 | *strict_overflow_p = true; | |
4500 | return boolean_true_node; | |
4501 | } | |
0bca51f0 DN |
4502 | |
4503 | /* If VR0 is to the right of VR1, return false. */ | |
12df8a7e | 4504 | tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); |
0bca51f0 DN |
4505 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
4506 | || (comp == LE_EXPR && tst == 1)) | |
12df8a7e ILT |
4507 | { |
4508 | if (overflow_infinity_range_p (vr0) | |
4509 | || overflow_infinity_range_p (vr1)) | |
4510 | *strict_overflow_p = true; | |
4511 | return boolean_false_node; | |
4512 | } | |
0bca51f0 DN |
4513 | |
4514 | /* Otherwise, we don't know. */ | |
4515 | return NULL_TREE; | |
4516 | } | |
b8698a0f | 4517 | |
0bca51f0 DN |
4518 | gcc_unreachable (); |
4519 | } | |
4520 | ||
4521 | ||
4522 | /* Given a value range VR, a value VAL and a comparison code COMP, return | |
227858d1 | 4523 | BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the |
0bca51f0 DN |
4524 | values in VR. Return BOOLEAN_FALSE_NODE if the comparison |
4525 | always returns false. Return NULL_TREE if it is not always | |
12df8a7e ILT |
4526 | possible to determine the value of the comparison. Also set |
4527 | *STRICT_OVERFLOW_P to indicate whether a range with an overflow | |
4528 | infinity was used in the test. */ | |
0bca51f0 DN |
4529 | |
4530 | static tree | |
526ceb68 | 4531 | compare_range_with_value (enum tree_code comp, value_range *vr, tree val, |
12df8a7e | 4532 | bool *strict_overflow_p) |
0bca51f0 DN |
4533 | { |
4534 | if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) | |
4535 | return NULL_TREE; | |
4536 | ||
4537 | /* Anti-ranges need to be handled separately. */ | |
4538 | if (vr->type == VR_ANTI_RANGE) | |
4539 | { | |
4540 | /* For anti-ranges, the only predicates that we can compute at | |
4541 | compile time are equality and inequality. */ | |
4542 | if (comp == GT_EXPR | |
4543 | || comp == GE_EXPR | |
4544 | || comp == LT_EXPR | |
4545 | || comp == LE_EXPR) | |
4546 | return NULL_TREE; | |
4547 | ||
d2f3ffba | 4548 | /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ |
e8f808b3 | 4549 | if (value_inside_range (val, vr->min, vr->max) == 1) |
0bca51f0 DN |
4550 | return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; |
4551 | ||
4552 | return NULL_TREE; | |
4553 | } | |
4554 | ||
0c948c27 ILT |
4555 | if (!usable_range_p (vr, strict_overflow_p)) |
4556 | return NULL_TREE; | |
4557 | ||
0bca51f0 DN |
4558 | if (comp == EQ_EXPR) |
4559 | { | |
4560 | /* EQ_EXPR may only be computed if VR represents exactly | |
4561 | one value. */ | |
12df8a7e | 4562 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) |
0bca51f0 | 4563 | { |
12df8a7e | 4564 | int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); |
0bca51f0 DN |
4565 | if (cmp == 0) |
4566 | return boolean_true_node; | |
4567 | else if (cmp == -1 || cmp == 1 || cmp == 2) | |
4568 | return boolean_false_node; | |
4569 | } | |
12df8a7e ILT |
4570 | else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 |
4571 | || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) | |
5de2df7b | 4572 | return boolean_false_node; |
0bca51f0 DN |
4573 | |
4574 | return NULL_TREE; | |
4575 | } | |
4576 | else if (comp == NE_EXPR) | |
4577 | { | |
4578 | /* If VAL is not inside VR, then they are always different. */ | |
12df8a7e ILT |
4579 | if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 |
4580 | || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) | |
0bca51f0 DN |
4581 | return boolean_true_node; |
4582 | ||
4583 | /* If VR represents exactly one value equal to VAL, then return | |
4584 | false. */ | |
12df8a7e ILT |
4585 | if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 |
4586 | && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) | |
0bca51f0 DN |
4587 | return boolean_false_node; |
4588 | ||
4589 | /* Otherwise, they may or may not be different. */ | |
4590 | return NULL_TREE; | |
4591 | } | |
4592 | else if (comp == LT_EXPR || comp == LE_EXPR) | |
4593 | { | |
4594 | int tst; | |
4595 | ||
4596 | /* If VR is to the left of VAL, return true. */ | |
12df8a7e | 4597 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
0bca51f0 DN |
4598 | if ((comp == LT_EXPR && tst == -1) |
4599 | || (comp == LE_EXPR && (tst == -1 || tst == 0))) | |
12df8a7e ILT |
4600 | { |
4601 | if (overflow_infinity_range_p (vr)) | |
4602 | *strict_overflow_p = true; | |
4603 | return boolean_true_node; | |
4604 | } | |
0bca51f0 DN |
4605 | |
4606 | /* If VR is to the right of VAL, return false. */ | |
12df8a7e | 4607 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
0bca51f0 DN |
4608 | if ((comp == LT_EXPR && (tst == 0 || tst == 1)) |
4609 | || (comp == LE_EXPR && tst == 1)) | |
12df8a7e ILT |
4610 | { |
4611 | if (overflow_infinity_range_p (vr)) | |
4612 | *strict_overflow_p = true; | |
4613 | return boolean_false_node; | |
4614 | } | |
0bca51f0 DN |
4615 | |
4616 | /* Otherwise, we don't know. */ | |
4617 | return NULL_TREE; | |
4618 | } | |
4619 | else if (comp == GT_EXPR || comp == GE_EXPR) | |
4620 | { | |
4621 | int tst; | |
4622 | ||
4623 | /* If VR is to the right of VAL, return true. */ | |
12df8a7e | 4624 | tst = compare_values_warnv (vr->min, val, strict_overflow_p); |
0bca51f0 DN |
4625 | if ((comp == GT_EXPR && tst == 1) |
4626 | || (comp == GE_EXPR && (tst == 0 || tst == 1))) | |
12df8a7e ILT |
4627 | { |
4628 | if (overflow_infinity_range_p (vr)) | |
4629 | *strict_overflow_p = true; | |
4630 | return boolean_true_node; | |
4631 | } | |
0bca51f0 DN |
4632 | |
4633 | /* If VR is to the left of VAL, return false. */ | |
12df8a7e | 4634 | tst = compare_values_warnv (vr->max, val, strict_overflow_p); |
0bca51f0 DN |
4635 | if ((comp == GT_EXPR && (tst == -1 || tst == 0)) |
4636 | || (comp == GE_EXPR && tst == -1)) | |
12df8a7e ILT |
4637 | { |
4638 | if (overflow_infinity_range_p (vr)) | |
4639 | *strict_overflow_p = true; | |
4640 | return boolean_false_node; | |
4641 | } | |
0bca51f0 DN |
4642 | |
4643 | /* Otherwise, we don't know. */ | |
4644 | return NULL_TREE; | |
4645 | } | |
4646 | ||
4647 | gcc_unreachable (); | |
4648 | } | |
4649 | ||
4650 | ||
4651 | /* Debugging dumps. */ | |
4652 | ||
526ceb68 TS |
4653 | void dump_value_range (FILE *, value_range *); |
4654 | void debug_value_range (value_range *); | |
227858d1 DN |
4655 | void dump_all_value_ranges (FILE *); |
4656 | void debug_all_value_ranges (void); | |
4657 | void dump_vr_equiv (FILE *, bitmap); | |
4658 | void debug_vr_equiv (bitmap); | |
4659 | ||
4660 | ||
4661 | /* Dump value range VR to FILE. */ | |
4662 | ||
0bca51f0 | 4663 | void |
526ceb68 | 4664 | dump_value_range (FILE *file, value_range *vr) |
0bca51f0 DN |
4665 | { |
4666 | if (vr == NULL) | |
4667 | fprintf (file, "[]"); | |
4668 | else if (vr->type == VR_UNDEFINED) | |
4669 | fprintf (file, "UNDEFINED"); | |
4670 | else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
4671 | { | |
227858d1 DN |
4672 | tree type = TREE_TYPE (vr->min); |
4673 | ||
0bca51f0 | 4674 | fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); |
227858d1 | 4675 | |
e1f28918 | 4676 | if (is_negative_overflow_infinity (vr->min)) |
12df8a7e | 4677 | fprintf (file, "-INF(OVF)"); |
e1f28918 ILT |
4678 | else if (INTEGRAL_TYPE_P (type) |
4679 | && !TYPE_UNSIGNED (type) | |
4680 | && vrp_val_is_min (vr->min)) | |
4681 | fprintf (file, "-INF"); | |
227858d1 DN |
4682 | else |
4683 | print_generic_expr (file, vr->min, 0); | |
4684 | ||
0bca51f0 | 4685 | fprintf (file, ", "); |
227858d1 | 4686 | |
e1f28918 | 4687 | if (is_positive_overflow_infinity (vr->max)) |
12df8a7e | 4688 | fprintf (file, "+INF(OVF)"); |
e1f28918 ILT |
4689 | else if (INTEGRAL_TYPE_P (type) |
4690 | && vrp_val_is_max (vr->max)) | |
4691 | fprintf (file, "+INF"); | |
227858d1 DN |
4692 | else |
4693 | print_generic_expr (file, vr->max, 0); | |
4694 | ||
0bca51f0 | 4695 | fprintf (file, "]"); |
227858d1 DN |
4696 | |
4697 | if (vr->equiv) | |
4698 | { | |
4699 | bitmap_iterator bi; | |
4700 | unsigned i, c = 0; | |
4701 | ||
4702 | fprintf (file, " EQUIVALENCES: { "); | |
4703 | ||
4704 | EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) | |
4705 | { | |
4706 | print_generic_expr (file, ssa_name (i), 0); | |
4707 | fprintf (file, " "); | |
4708 | c++; | |
4709 | } | |
4710 | ||
4711 | fprintf (file, "} (%u elements)", c); | |
4712 | } | |
0bca51f0 DN |
4713 | } |
4714 | else if (vr->type == VR_VARYING) | |
4715 | fprintf (file, "VARYING"); | |
4716 | else | |
4717 | fprintf (file, "INVALID RANGE"); | |
4718 | } | |
4719 | ||
4720 | ||
4721 | /* Dump value range VR to stderr. */ | |
4722 | ||
24e47c76 | 4723 | DEBUG_FUNCTION void |
526ceb68 | 4724 | debug_value_range (value_range *vr) |
0bca51f0 DN |
4725 | { |
4726 | dump_value_range (stderr, vr); | |
96644aba | 4727 | fprintf (stderr, "\n"); |
0bca51f0 DN |
4728 | } |
4729 | ||
4730 | ||
4731 | /* Dump value ranges of all SSA_NAMEs to FILE. */ | |
4732 | ||
4733 | void | |
4734 | dump_all_value_ranges (FILE *file) | |
4735 | { | |
4736 | size_t i; | |
4737 | ||
d9256277 | 4738 | for (i = 0; i < num_vr_values; i++) |
0bca51f0 | 4739 | { |
227858d1 | 4740 | if (vr_value[i]) |
0bca51f0 | 4741 | { |
227858d1 | 4742 | print_generic_expr (file, ssa_name (i), 0); |
0bca51f0 | 4743 | fprintf (file, ": "); |
227858d1 | 4744 | dump_value_range (file, vr_value[i]); |
0bca51f0 DN |
4745 | fprintf (file, "\n"); |
4746 | } | |
4747 | } | |
4748 | ||
4749 | fprintf (file, "\n"); | |
4750 | } | |
4751 | ||
4752 | ||
4753 | /* Dump all value ranges to stderr. */ | |
4754 | ||
24e47c76 | 4755 | DEBUG_FUNCTION void |
0bca51f0 DN |
4756 | debug_all_value_ranges (void) |
4757 | { | |
4758 | dump_all_value_ranges (stderr); | |
4759 | } | |
4760 | ||
4761 | ||
0bca51f0 DN |
4762 | /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, |
4763 | create a new SSA name N and return the assertion assignment | |
36f291f7 | 4764 | 'N = ASSERT_EXPR <V, V OP W>'. */ |
0bca51f0 | 4765 | |
355fe088 | 4766 | static gimple * |
0bca51f0 DN |
4767 | build_assert_expr_for (tree cond, tree v) |
4768 | { | |
45db3141 | 4769 | tree a; |
538dd0b7 | 4770 | gassign *assertion; |
0bca51f0 | 4771 | |
45db3141 RG |
4772 | gcc_assert (TREE_CODE (v) == SSA_NAME |
4773 | && COMPARISON_CLASS_P (cond)); | |
0bca51f0 | 4774 | |
45db3141 RG |
4775 | a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); |
4776 | assertion = gimple_build_assign (NULL_TREE, a); | |
0bca51f0 DN |
4777 | |
4778 | /* The new ASSERT_EXPR, creates a new SSA name that replaces the | |
45db3141 RG |
4779 | operand of the ASSERT_EXPR. Create it so the new name and the old one |
4780 | are registered in the replacement table so that we can fix the SSA web | |
4781 | after adding all the ASSERT_EXPRs. */ | |
4782 | create_new_def_for (v, assertion, NULL); | |
0bca51f0 DN |
4783 | |
4784 | return assertion; | |
4785 | } | |
4786 | ||
4787 | ||
4788 | /* Return false if EXPR is a predicate expression involving floating | |
4789 | point values. */ | |
4790 | ||
4791 | static inline bool | |
355fe088 | 4792 | fp_predicate (gimple *stmt) |
0bca51f0 | 4793 | { |
726a989a RB |
4794 | GIMPLE_CHECK (stmt, GIMPLE_COND); |
4795 | ||
4796 | return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); | |
0bca51f0 DN |
4797 | } |
4798 | ||
227858d1 DN |
4799 | /* If the range of values taken by OP can be inferred after STMT executes, |
4800 | return the comparison code (COMP_CODE_P) and value (VAL_P) that | |
4801 | describes the inferred range. Return true if a range could be | |
4802 | inferred. */ | |
0bca51f0 | 4803 | |
227858d1 | 4804 | static bool |
355fe088 | 4805 | infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p) |
0bca51f0 | 4806 | { |
227858d1 DN |
4807 | *val_p = NULL_TREE; |
4808 | *comp_code_p = ERROR_MARK; | |
4809 | ||
9fabf0d4 DN |
4810 | /* Do not attempt to infer anything in names that flow through |
4811 | abnormal edges. */ | |
4812 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) | |
227858d1 DN |
4813 | return false; |
4814 | ||
4815 | /* Similarly, don't infer anything from statements that may throw | |
0e6a0e48 | 4816 | exceptions. ??? Relax this requirement? */ |
726a989a | 4817 | if (stmt_could_throw_p (stmt)) |
227858d1 | 4818 | return false; |
9fabf0d4 | 4819 | |
3d750496 | 4820 | /* If STMT is the last statement of a basic block with no normal |
8c5285e1 DN |
4821 | successors, there is no point inferring anything about any of its |
4822 | operands. We would not be able to find a proper insertion point | |
4823 | for the assertion, anyway. */ | |
3d750496 JL |
4824 | if (stmt_ends_bb_p (stmt)) |
4825 | { | |
4826 | edge_iterator ei; | |
4827 | edge e; | |
4828 | ||
4829 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) | |
4830 | if (!(e->flags & EDGE_ABNORMAL)) | |
4831 | break; | |
4832 | if (e == NULL) | |
4833 | return false; | |
4834 | } | |
8c5285e1 | 4835 | |
76787f70 | 4836 | if (infer_nonnull_range (stmt, op)) |
0bca51f0 | 4837 | { |
0e6a0e48 MG |
4838 | *val_p = build_int_cst (TREE_TYPE (op), 0); |
4839 | *comp_code_p = NE_EXPR; | |
4840 | return true; | |
0bca51f0 DN |
4841 | } |
4842 | ||
227858d1 | 4843 | return false; |
0bca51f0 DN |
4844 | } |
4845 | ||
4846 | ||
227858d1 DN |
4847 | void dump_asserts_for (FILE *, tree); |
4848 | void debug_asserts_for (tree); | |
4849 | void dump_all_asserts (FILE *); | |
4850 | void debug_all_asserts (void); | |
4851 | ||
4852 | /* Dump all the registered assertions for NAME to FILE. */ | |
4853 | ||
4854 | void | |
4855 | dump_asserts_for (FILE *file, tree name) | |
4856 | { | |
ff507401 | 4857 | assert_locus *loc; |
227858d1 DN |
4858 | |
4859 | fprintf (file, "Assertions to be inserted for "); | |
4860 | print_generic_expr (file, name, 0); | |
4861 | fprintf (file, "\n"); | |
4862 | ||
4863 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
4864 | while (loc) | |
4865 | { | |
4866 | fprintf (file, "\t"); | |
726a989a | 4867 | print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); |
227858d1 DN |
4868 | fprintf (file, "\n\tBB #%d", loc->bb->index); |
4869 | if (loc->e) | |
4870 | { | |
4871 | fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, | |
4872 | loc->e->dest->index); | |
a315c44c | 4873 | dump_edge_info (file, loc->e, dump_flags, 0); |
227858d1 DN |
4874 | } |
4875 | fprintf (file, "\n\tPREDICATE: "); | |
4876 | print_generic_expr (file, name, 0); | |
5806f481 | 4877 | fprintf (file, " %s ", get_tree_code_name (loc->comp_code)); |
227858d1 DN |
4878 | print_generic_expr (file, loc->val, 0); |
4879 | fprintf (file, "\n\n"); | |
4880 | loc = loc->next; | |
4881 | } | |
4882 | ||
4883 | fprintf (file, "\n"); | |
4884 | } | |
4885 | ||
4886 | ||
4887 | /* Dump all the registered assertions for NAME to stderr. */ | |
4888 | ||
24e47c76 | 4889 | DEBUG_FUNCTION void |
227858d1 DN |
4890 | debug_asserts_for (tree name) |
4891 | { | |
4892 | dump_asserts_for (stderr, name); | |
4893 | } | |
4894 | ||
4895 | ||
4896 | /* Dump all the registered assertions for all the names to FILE. */ | |
4897 | ||
4898 | void | |
4899 | dump_all_asserts (FILE *file) | |
4900 | { | |
4901 | unsigned i; | |
4902 | bitmap_iterator bi; | |
4903 | ||
4904 | fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); | |
4905 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) | |
4906 | dump_asserts_for (file, ssa_name (i)); | |
4907 | fprintf (file, "\n"); | |
4908 | } | |
4909 | ||
4910 | ||
4911 | /* Dump all the registered assertions for all the names to stderr. */ | |
4912 | ||
24e47c76 | 4913 | DEBUG_FUNCTION void |
227858d1 DN |
4914 | debug_all_asserts (void) |
4915 | { | |
4916 | dump_all_asserts (stderr); | |
4917 | } | |
4918 | ||
4919 | ||
4920 | /* If NAME doesn't have an ASSERT_EXPR registered for asserting | |
2ab8dbf4 | 4921 | 'EXPR COMP_CODE VAL' at a location that dominates block BB or |
227858d1 | 4922 | E->DEST, then register this location as a possible insertion point |
2ab8dbf4 | 4923 | for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. |
227858d1 DN |
4924 | |
4925 | BB, E and SI provide the exact insertion point for the new | |
4926 | ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted | |
4927 | on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on | |
4928 | BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E | |
4929 | must not be NULL. */ | |
4930 | ||
4931 | static void | |
2ab8dbf4 | 4932 | register_new_assert_for (tree name, tree expr, |
227858d1 DN |
4933 | enum tree_code comp_code, |
4934 | tree val, | |
4935 | basic_block bb, | |
4936 | edge e, | |
726a989a | 4937 | gimple_stmt_iterator si) |
227858d1 | 4938 | { |
ff507401 | 4939 | assert_locus *n, *loc, *last_loc; |
227858d1 DN |
4940 | basic_block dest_bb; |
4941 | ||
77a74ed7 | 4942 | gcc_checking_assert (bb == NULL || e == NULL); |
227858d1 DN |
4943 | |
4944 | if (e == NULL) | |
77a74ed7 NF |
4945 | gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND |
4946 | && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); | |
227858d1 | 4947 | |
a1b969a0 RG |
4948 | /* Never build an assert comparing against an integer constant with |
4949 | TREE_OVERFLOW set. This confuses our undefined overflow warning | |
4950 | machinery. */ | |
3f5c390d RB |
4951 | if (TREE_OVERFLOW_P (val)) |
4952 | val = drop_tree_overflow (val); | |
a1b969a0 | 4953 | |
227858d1 DN |
4954 | /* The new assertion A will be inserted at BB or E. We need to |
4955 | determine if the new location is dominated by a previously | |
4956 | registered location for A. If we are doing an edge insertion, | |
4957 | assume that A will be inserted at E->DEST. Note that this is not | |
4958 | necessarily true. | |
b8698a0f | 4959 | |
227858d1 DN |
4960 | If E is a critical edge, it will be split. But even if E is |
4961 | split, the new block will dominate the same set of blocks that | |
4962 | E->DEST dominates. | |
b8698a0f | 4963 | |
227858d1 DN |
4964 | The reverse, however, is not true, blocks dominated by E->DEST |
4965 | will not be dominated by the new block created to split E. So, | |
4966 | if the insertion location is on a critical edge, we will not use | |
4967 | the new location to move another assertion previously registered | |
4968 | at a block dominated by E->DEST. */ | |
4969 | dest_bb = (bb) ? bb : e->dest; | |
4970 | ||
4971 | /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and | |
4972 | VAL at a block dominating DEST_BB, then we don't need to insert a new | |
4973 | one. Similarly, if the same assertion already exists at a block | |
4974 | dominated by DEST_BB and the new location is not on a critical | |
4975 | edge, then update the existing location for the assertion (i.e., | |
4976 | move the assertion up in the dominance tree). | |
4977 | ||
4978 | Note, this is implemented as a simple linked list because there | |
4979 | should not be more than a handful of assertions registered per | |
4980 | name. If this becomes a performance problem, a table hashed by | |
4981 | COMP_CODE and VAL could be implemented. */ | |
4982 | loc = asserts_for[SSA_NAME_VERSION (name)]; | |
4983 | last_loc = loc; | |
227858d1 DN |
4984 | while (loc) |
4985 | { | |
4986 | if (loc->comp_code == comp_code | |
4987 | && (loc->val == val | |
2ab8dbf4 RG |
4988 | || operand_equal_p (loc->val, val, 0)) |
4989 | && (loc->expr == expr | |
4990 | || operand_equal_p (loc->expr, expr, 0))) | |
227858d1 | 4991 | { |
f7a39c55 | 4992 | /* If E is not a critical edge and DEST_BB |
227858d1 DN |
4993 | dominates the existing location for the assertion, move |
4994 | the assertion up in the dominance tree by updating its | |
4995 | location information. */ | |
4996 | if ((e == NULL || !EDGE_CRITICAL_P (e)) | |
4997 | && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) | |
4998 | { | |
4999 | loc->bb = dest_bb; | |
5000 | loc->e = e; | |
5001 | loc->si = si; | |
5002 | return; | |
5003 | } | |
5004 | } | |
5005 | ||
5006 | /* Update the last node of the list and move to the next one. */ | |
5007 | last_loc = loc; | |
5008 | loc = loc->next; | |
5009 | } | |
5010 | ||
5011 | /* If we didn't find an assertion already registered for | |
5012 | NAME COMP_CODE VAL, add a new one at the end of the list of | |
5013 | assertions associated with NAME. */ | |
ff507401 | 5014 | n = XNEW (struct assert_locus); |
227858d1 DN |
5015 | n->bb = dest_bb; |
5016 | n->e = e; | |
5017 | n->si = si; | |
5018 | n->comp_code = comp_code; | |
5019 | n->val = val; | |
2ab8dbf4 | 5020 | n->expr = expr; |
227858d1 DN |
5021 | n->next = NULL; |
5022 | ||
5023 | if (last_loc) | |
5024 | last_loc->next = n; | |
5025 | else | |
5026 | asserts_for[SSA_NAME_VERSION (name)] = n; | |
5027 | ||
5028 | bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); | |
5029 | } | |
5030 | ||
a26a02d7 RAE |
5031 | /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. |
5032 | Extract a suitable test code and value and store them into *CODE_P and | |
5033 | *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. | |
5034 | ||
5035 | If no extraction was possible, return FALSE, otherwise return TRUE. | |
5036 | ||
5037 | If INVERT is true, then we invert the result stored into *CODE_P. */ | |
764a79ed RAE |
5038 | |
5039 | static bool | |
5040 | extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, | |
5041 | tree cond_op0, tree cond_op1, | |
5042 | bool invert, enum tree_code *code_p, | |
5043 | tree *val_p) | |
5044 | { | |
5045 | enum tree_code comp_code; | |
5046 | tree val; | |
5047 | ||
5048 | /* Otherwise, we have a comparison of the form NAME COMP VAL | |
5049 | or VAL COMP NAME. */ | |
5050 | if (name == cond_op1) | |
5051 | { | |
5052 | /* If the predicate is of the form VAL COMP NAME, flip | |
5053 | COMP around because we need to register NAME as the | |
5054 | first operand in the predicate. */ | |
5055 | comp_code = swap_tree_comparison (cond_code); | |
5056 | val = cond_op0; | |
5057 | } | |
5058 | else | |
5059 | { | |
5060 | /* The comparison is of the form NAME COMP VAL, so the | |
5061 | comparison code remains unchanged. */ | |
5062 | comp_code = cond_code; | |
5063 | val = cond_op1; | |
5064 | } | |
5065 | ||
5066 | /* Invert the comparison code as necessary. */ | |
5067 | if (invert) | |
5068 | comp_code = invert_tree_comparison (comp_code, 0); | |
5069 | ||
305708ce YR |
5070 | /* VRP only handles integral and pointer types. */ |
5071 | if (! INTEGRAL_TYPE_P (TREE_TYPE (val)) | |
5072 | && ! POINTER_TYPE_P (TREE_TYPE (val))) | |
764a79ed RAE |
5073 | return false; |
5074 | ||
5075 | /* Do not register always-false predicates. | |
5076 | FIXME: this works around a limitation in fold() when dealing with | |
5077 | enumerations. Given 'enum { N1, N2 } x;', fold will not | |
5078 | fold 'if (x > N2)' to 'if (0)'. */ | |
5079 | if ((comp_code == GT_EXPR || comp_code == LT_EXPR) | |
5080 | && INTEGRAL_TYPE_P (TREE_TYPE (val))) | |
5081 | { | |
5082 | tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); | |
5083 | tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); | |
5084 | ||
5085 | if (comp_code == GT_EXPR | |
5086 | && (!max | |
5087 | || compare_values (val, max) == 0)) | |
5088 | return false; | |
5089 | ||
5090 | if (comp_code == LT_EXPR | |
5091 | && (!min | |
5092 | || compare_values (val, min) == 0)) | |
5093 | return false; | |
5094 | } | |
5095 | *code_p = comp_code; | |
5096 | *val_p = val; | |
5097 | return true; | |
5098 | } | |
279f3eb5 | 5099 | |
ad193f32 JJ |
5100 | /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any |
5101 | (otherwise return VAL). VAL and MASK must be zero-extended for | |
5102 | precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT | |
5103 | (to transform signed values into unsigned) and at the end xor | |
5104 | SGNBIT back. */ | |
5105 | ||
807e902e KZ |
5106 | static wide_int |
5107 | masked_increment (const wide_int &val_in, const wide_int &mask, | |
5108 | const wide_int &sgnbit, unsigned int prec) | |
ad193f32 | 5109 | { |
807e902e | 5110 | wide_int bit = wi::one (prec), res; |
ad193f32 JJ |
5111 | unsigned int i; |
5112 | ||
807e902e | 5113 | wide_int val = val_in ^ sgnbit; |
27bcd47c | 5114 | for (i = 0; i < prec; i++, bit += bit) |
ad193f32 JJ |
5115 | { |
5116 | res = mask; | |
807e902e | 5117 | if ((res & bit) == 0) |
ad193f32 | 5118 | continue; |
807e902e | 5119 | res = bit - 1; |
27bcd47c LC |
5120 | res = (val + bit).and_not (res); |
5121 | res &= mask; | |
807e902e | 5122 | if (wi::gtu_p (res, val)) |
27bcd47c | 5123 | return res ^ sgnbit; |
ad193f32 | 5124 | } |
27bcd47c | 5125 | return val ^ sgnbit; |
ad193f32 JJ |
5126 | } |
5127 | ||
2ab8dbf4 RG |
5128 | /* Try to register an edge assertion for SSA name NAME on edge E for |
5129 | the condition COND contributing to the conditional jump pointed to by BSI. | |
d476245d | 5130 | Invert the condition COND if INVERT is true. */ |
2ab8dbf4 | 5131 | |
d476245d | 5132 | static void |
726a989a | 5133 | register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, |
a26a02d7 RAE |
5134 | enum tree_code cond_code, |
5135 | tree cond_op0, tree cond_op1, bool invert) | |
2ab8dbf4 RG |
5136 | { |
5137 | tree val; | |
5138 | enum tree_code comp_code; | |
2ab8dbf4 | 5139 | |
a26a02d7 RAE |
5140 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
5141 | cond_op0, | |
5142 | cond_op1, | |
5143 | invert, &comp_code, &val)) | |
d476245d | 5144 | return; |
2ab8dbf4 RG |
5145 | |
5146 | /* Only register an ASSERT_EXPR if NAME was found in the sub-graph | |
5147 | reachable from E. */ | |
c4ab2baa | 5148 | if (live_on_edge (e, name) |
2ab8dbf4 | 5149 | && !has_single_use (name)) |
d476245d | 5150 | register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); |
2ab8dbf4 RG |
5151 | |
5152 | /* In the case of NAME <= CST and NAME being defined as | |
5153 | NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 | |
5154 | and NAME2 <= CST - CST2. We can do the same for NAME > CST. | |
5155 | This catches range and anti-range tests. */ | |
5156 | if ((comp_code == LE_EXPR | |
5157 | || comp_code == GT_EXPR) | |
5158 | && TREE_CODE (val) == INTEGER_CST | |
5159 | && TYPE_UNSIGNED (TREE_TYPE (val))) | |
5160 | { | |
355fe088 | 5161 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
70b7b037 | 5162 | tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; |
2ab8dbf4 RG |
5163 | |
5164 | /* Extract CST2 from the (optional) addition. */ | |
726a989a RB |
5165 | if (is_gimple_assign (def_stmt) |
5166 | && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) | |
2ab8dbf4 | 5167 | { |
726a989a RB |
5168 | name2 = gimple_assign_rhs1 (def_stmt); |
5169 | cst2 = gimple_assign_rhs2 (def_stmt); | |
2ab8dbf4 RG |
5170 | if (TREE_CODE (name2) == SSA_NAME |
5171 | && TREE_CODE (cst2) == INTEGER_CST) | |
5172 | def_stmt = SSA_NAME_DEF_STMT (name2); | |
5173 | } | |
5174 | ||
70b7b037 | 5175 | /* Extract NAME2 from the (optional) sign-changing cast. */ |
726a989a | 5176 | if (gimple_assign_cast_p (def_stmt)) |
70b7b037 | 5177 | { |
1a87cf0c | 5178 | if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) |
726a989a RB |
5179 | && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) |
5180 | && (TYPE_PRECISION (gimple_expr_type (def_stmt)) | |
5181 | == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) | |
5182 | name3 = gimple_assign_rhs1 (def_stmt); | |
70b7b037 | 5183 | } |
2ab8dbf4 | 5184 | |
70b7b037 RG |
5185 | /* If name3 is used later, create an ASSERT_EXPR for it. */ |
5186 | if (name3 != NULL_TREE | |
5187 | && TREE_CODE (name3) == SSA_NAME | |
2ab8dbf4 RG |
5188 | && (cst2 == NULL_TREE |
5189 | || TREE_CODE (cst2) == INTEGER_CST) | |
70b7b037 | 5190 | && INTEGRAL_TYPE_P (TREE_TYPE (name3)) |
c4ab2baa | 5191 | && live_on_edge (e, name3) |
70b7b037 RG |
5192 | && !has_single_use (name3)) |
5193 | { | |
5194 | tree tmp; | |
5195 | ||
5196 | /* Build an expression for the range test. */ | |
5197 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); | |
5198 | if (cst2 != NULL_TREE) | |
5199 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
5200 | ||
5201 | if (dump_file) | |
5202 | { | |
5203 | fprintf (dump_file, "Adding assert for "); | |
5204 | print_generic_expr (dump_file, name3, 0); | |
5205 | fprintf (dump_file, " from "); | |
5206 | print_generic_expr (dump_file, tmp, 0); | |
5207 | fprintf (dump_file, "\n"); | |
5208 | } | |
5209 | ||
5210 | register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); | |
70b7b037 RG |
5211 | } |
5212 | ||
5213 | /* If name2 is used later, create an ASSERT_EXPR for it. */ | |
5214 | if (name2 != NULL_TREE | |
5215 | && TREE_CODE (name2) == SSA_NAME | |
5216 | && TREE_CODE (cst2) == INTEGER_CST | |
5217 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
c4ab2baa | 5218 | && live_on_edge (e, name2) |
2ab8dbf4 RG |
5219 | && !has_single_use (name2)) |
5220 | { | |
5221 | tree tmp; | |
5222 | ||
5223 | /* Build an expression for the range test. */ | |
5224 | tmp = name2; | |
5225 | if (TREE_TYPE (name) != TREE_TYPE (name2)) | |
5226 | tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); | |
5227 | if (cst2 != NULL_TREE) | |
5228 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); | |
5229 | ||
5230 | if (dump_file) | |
5231 | { | |
5232 | fprintf (dump_file, "Adding assert for "); | |
5233 | print_generic_expr (dump_file, name2, 0); | |
5234 | fprintf (dump_file, " from "); | |
5235 | print_generic_expr (dump_file, tmp, 0); | |
5236 | fprintf (dump_file, "\n"); | |
5237 | } | |
5238 | ||
5239 | register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); | |
2ab8dbf4 RG |
5240 | } |
5241 | } | |
5242 | ||
83ede847 RB |
5243 | /* In the case of post-in/decrement tests like if (i++) ... and uses |
5244 | of the in/decremented value on the edge the extra name we want to | |
5245 | assert for is not on the def chain of the name compared. Instead | |
4fe65172 RB |
5246 | it is in the set of use stmts. |
5247 | Similar cases happen for conversions that were simplified through | |
5248 | fold_{sign_changed,widened}_comparison. */ | |
83ede847 RB |
5249 | if ((comp_code == NE_EXPR |
5250 | || comp_code == EQ_EXPR) | |
5251 | && TREE_CODE (val) == INTEGER_CST) | |
5252 | { | |
5253 | imm_use_iterator ui; | |
355fe088 | 5254 | gimple *use_stmt; |
83ede847 RB |
5255 | FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) |
5256 | { | |
83ede847 RB |
5257 | if (!is_gimple_assign (use_stmt)) |
5258 | continue; | |
5259 | ||
4fe65172 RB |
5260 | /* Cut off to use-stmts that are dominating the predecessor. */ |
5261 | if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt))) | |
83ede847 RB |
5262 | continue; |
5263 | ||
4fe65172 RB |
5264 | tree name2 = gimple_assign_lhs (use_stmt); |
5265 | if (TREE_CODE (name2) != SSA_NAME | |
5266 | || !live_on_edge (e, name2)) | |
83ede847 RB |
5267 | continue; |
5268 | ||
4fe65172 RB |
5269 | enum tree_code code = gimple_assign_rhs_code (use_stmt); |
5270 | tree cst; | |
5271 | if (code == PLUS_EXPR | |
5272 | || code == MINUS_EXPR) | |
83ede847 | 5273 | { |
4fe65172 RB |
5274 | cst = gimple_assign_rhs2 (use_stmt); |
5275 | if (TREE_CODE (cst) != INTEGER_CST) | |
5276 | continue; | |
83ede847 | 5277 | cst = int_const_binop (code, val, cst); |
83ede847 | 5278 | } |
4fe65172 | 5279 | else if (CONVERT_EXPR_CODE_P (code)) |
fe9acb3a | 5280 | { |
ef3b59ac | 5281 | /* For truncating conversions we cannot record |
fe9acb3a RB |
5282 | an inequality. */ |
5283 | if (comp_code == NE_EXPR | |
5284 | && (TYPE_PRECISION (TREE_TYPE (name2)) | |
ef3b59ac | 5285 | < TYPE_PRECISION (TREE_TYPE (name)))) |
fe9acb3a RB |
5286 | continue; |
5287 | cst = fold_convert (TREE_TYPE (name2), val); | |
5288 | } | |
4fe65172 RB |
5289 | else |
5290 | continue; | |
5291 | ||
5292 | if (TREE_OVERFLOW_P (cst)) | |
5293 | cst = drop_tree_overflow (cst); | |
5294 | register_new_assert_for (name2, name2, comp_code, cst, | |
5295 | NULL, e, bsi); | |
83ede847 RB |
5296 | } |
5297 | } | |
5298 | ||
3877a6a6 JJ |
5299 | if (TREE_CODE_CLASS (comp_code) == tcc_comparison |
5300 | && TREE_CODE (val) == INTEGER_CST) | |
5301 | { | |
355fe088 | 5302 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
ad193f32 | 5303 | tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; |
3877a6a6 | 5304 | tree val2 = NULL_TREE; |
01c1f20d | 5305 | unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); |
807e902e | 5306 | wide_int mask = wi::zero (prec); |
440d3472 JJ |
5307 | unsigned int nprec = prec; |
5308 | enum tree_code rhs_code = ERROR_MARK; | |
5309 | ||
5310 | if (is_gimple_assign (def_stmt)) | |
5311 | rhs_code = gimple_assign_rhs_code (def_stmt); | |
3877a6a6 | 5312 | |
7b5c5139 JJ |
5313 | /* Add asserts for NAME cmp CST and NAME being defined |
5314 | as NAME = (int) NAME2. */ | |
5315 | if (!TYPE_UNSIGNED (TREE_TYPE (val)) | |
5316 | && (comp_code == LE_EXPR || comp_code == LT_EXPR | |
5317 | || comp_code == GT_EXPR || comp_code == GE_EXPR) | |
5318 | && gimple_assign_cast_p (def_stmt)) | |
5319 | { | |
5320 | name2 = gimple_assign_rhs1 (def_stmt); | |
440d3472 | 5321 | if (CONVERT_EXPR_CODE_P (rhs_code) |
7b5c5139 JJ |
5322 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
5323 | && TYPE_UNSIGNED (TREE_TYPE (name2)) | |
5324 | && prec == TYPE_PRECISION (TREE_TYPE (name2)) | |
5325 | && (comp_code == LE_EXPR || comp_code == GT_EXPR | |
5326 | || !tree_int_cst_equal (val, | |
5327 | TYPE_MIN_VALUE (TREE_TYPE (val)))) | |
5328 | && live_on_edge (e, name2) | |
5329 | && !has_single_use (name2)) | |
5330 | { | |
5331 | tree tmp, cst; | |
5332 | enum tree_code new_comp_code = comp_code; | |
5333 | ||
5334 | cst = fold_convert (TREE_TYPE (name2), | |
5335 | TYPE_MIN_VALUE (TREE_TYPE (val))); | |
5336 | /* Build an expression for the range test. */ | |
5337 | tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst); | |
5338 | cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst, | |
5339 | fold_convert (TREE_TYPE (name2), val)); | |
5340 | if (comp_code == LT_EXPR || comp_code == GE_EXPR) | |
5341 | { | |
5342 | new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR; | |
5343 | cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst, | |
5344 | build_int_cst (TREE_TYPE (name2), 1)); | |
5345 | } | |
5346 | ||
5347 | if (dump_file) | |
5348 | { | |
5349 | fprintf (dump_file, "Adding assert for "); | |
5350 | print_generic_expr (dump_file, name2, 0); | |
5351 | fprintf (dump_file, " from "); | |
5352 | print_generic_expr (dump_file, tmp, 0); | |
5353 | fprintf (dump_file, "\n"); | |
5354 | } | |
5355 | ||
5356 | register_new_assert_for (name2, tmp, new_comp_code, cst, NULL, | |
5357 | e, bsi); | |
7b5c5139 JJ |
5358 | } |
5359 | } | |
5360 | ||
5361 | /* Add asserts for NAME cmp CST and NAME being defined as | |
5362 | NAME = NAME2 >> CST2. | |
5363 | ||
5364 | Extract CST2 from the right shift. */ | |
440d3472 | 5365 | if (rhs_code == RSHIFT_EXPR) |
3877a6a6 JJ |
5366 | { |
5367 | name2 = gimple_assign_rhs1 (def_stmt); | |
5368 | cst2 = gimple_assign_rhs2 (def_stmt); | |
5369 | if (TREE_CODE (name2) == SSA_NAME | |
cc269bb6 | 5370 | && tree_fits_uhwi_p (cst2) |
3877a6a6 | 5371 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) |
ae7e9ddd | 5372 | && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) |
0ea62d93 | 5373 | && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) |
3877a6a6 JJ |
5374 | && live_on_edge (e, name2) |
5375 | && !has_single_use (name2)) | |
5376 | { | |
807e902e | 5377 | mask = wi::mask (tree_to_uhwi (cst2), false, prec); |
3877a6a6 JJ |
5378 | val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); |
5379 | } | |
5380 | } | |
3877a6a6 JJ |
5381 | if (val2 != NULL_TREE |
5382 | && TREE_CODE (val2) == INTEGER_CST | |
5383 | && simple_cst_equal (fold_build2 (RSHIFT_EXPR, | |
5384 | TREE_TYPE (val), | |
5385 | val2, cst2), val)) | |
5386 | { | |
5387 | enum tree_code new_comp_code = comp_code; | |
5388 | tree tmp, new_val; | |
5389 | ||
5390 | tmp = name2; | |
5391 | if (comp_code == EQ_EXPR || comp_code == NE_EXPR) | |
5392 | { | |
5393 | if (!TYPE_UNSIGNED (TREE_TYPE (val))) | |
5394 | { | |
3877a6a6 JJ |
5395 | tree type = build_nonstandard_integer_type (prec, 1); |
5396 | tmp = build1 (NOP_EXPR, type, name2); | |
5397 | val2 = fold_convert (type, val2); | |
5398 | } | |
5399 | tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); | |
807e902e | 5400 | new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); |
3877a6a6 JJ |
5401 | new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; |
5402 | } | |
5403 | else if (comp_code == LT_EXPR || comp_code == GE_EXPR) | |
4c445590 | 5404 | { |
807e902e KZ |
5405 | wide_int minval |
5406 | = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); | |
4c445590 | 5407 | new_val = val2; |
807e902e | 5408 | if (minval == new_val) |
4c445590 JJ |
5409 | new_val = NULL_TREE; |
5410 | } | |
3877a6a6 JJ |
5411 | else |
5412 | { | |
807e902e KZ |
5413 | wide_int maxval |
5414 | = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); | |
5415 | mask |= val2; | |
27bcd47c | 5416 | if (mask == maxval) |
01c1f20d JJ |
5417 | new_val = NULL_TREE; |
5418 | else | |
807e902e | 5419 | new_val = wide_int_to_tree (TREE_TYPE (val2), mask); |
3877a6a6 JJ |
5420 | } |
5421 | ||
01c1f20d | 5422 | if (new_val) |
3877a6a6 | 5423 | { |
01c1f20d JJ |
5424 | if (dump_file) |
5425 | { | |
5426 | fprintf (dump_file, "Adding assert for "); | |
5427 | print_generic_expr (dump_file, name2, 0); | |
5428 | fprintf (dump_file, " from "); | |
5429 | print_generic_expr (dump_file, tmp, 0); | |
5430 | fprintf (dump_file, "\n"); | |
5431 | } | |
3877a6a6 | 5432 | |
01c1f20d JJ |
5433 | register_new_assert_for (name2, tmp, new_comp_code, new_val, |
5434 | NULL, e, bsi); | |
01c1f20d | 5435 | } |
3877a6a6 | 5436 | } |
ad193f32 JJ |
5437 | |
5438 | /* Add asserts for NAME cmp CST and NAME being defined as | |
5439 | NAME = NAME2 & CST2. | |
5440 | ||
440d3472 JJ |
5441 | Extract CST2 from the and. |
5442 | ||
5443 | Also handle | |
5444 | NAME = (unsigned) NAME2; | |
5445 | casts where NAME's type is unsigned and has smaller precision | |
5446 | than NAME2's type as if it was NAME = NAME2 & MASK. */ | |
ad193f32 JJ |
5447 | names[0] = NULL_TREE; |
5448 | names[1] = NULL_TREE; | |
5449 | cst2 = NULL_TREE; | |
440d3472 JJ |
5450 | if (rhs_code == BIT_AND_EXPR |
5451 | || (CONVERT_EXPR_CODE_P (rhs_code) | |
5452 | && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE | |
5453 | && TYPE_UNSIGNED (TREE_TYPE (val)) | |
5454 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) | |
d476245d | 5455 | > prec)) |
ad193f32 JJ |
5456 | { |
5457 | name2 = gimple_assign_rhs1 (def_stmt); | |
440d3472 JJ |
5458 | if (rhs_code == BIT_AND_EXPR) |
5459 | cst2 = gimple_assign_rhs2 (def_stmt); | |
5460 | else | |
5461 | { | |
5462 | cst2 = TYPE_MAX_VALUE (TREE_TYPE (val)); | |
5463 | nprec = TYPE_PRECISION (TREE_TYPE (name2)); | |
5464 | } | |
ad193f32 JJ |
5465 | if (TREE_CODE (name2) == SSA_NAME |
5466 | && INTEGRAL_TYPE_P (TREE_TYPE (name2)) | |
5467 | && TREE_CODE (cst2) == INTEGER_CST | |
5468 | && !integer_zerop (cst2) | |
440d3472 | 5469 | && (nprec > 1 |
ad193f32 JJ |
5470 | || TYPE_UNSIGNED (TREE_TYPE (val)))) |
5471 | { | |
355fe088 | 5472 | gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2); |
ad193f32 JJ |
5473 | if (gimple_assign_cast_p (def_stmt2)) |
5474 | { | |
5475 | names[1] = gimple_assign_rhs1 (def_stmt2); | |
5476 | if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2)) | |
5477 | || !INTEGRAL_TYPE_P (TREE_TYPE (names[1])) | |
5478 | || (TYPE_PRECISION (TREE_TYPE (name2)) | |
5479 | != TYPE_PRECISION (TREE_TYPE (names[1]))) | |
5480 | || !live_on_edge (e, names[1]) | |
5481 | || has_single_use (names[1])) | |
5482 | names[1] = NULL_TREE; | |
5483 | } | |
5484 | if (live_on_edge (e, name2) | |
5485 | && !has_single_use (name2)) | |
5486 | names[0] = name2; | |
5487 | } | |
5488 | } | |
5489 | if (names[0] || names[1]) | |
5490 | { | |
807e902e KZ |
5491 | wide_int minv, maxv, valv, cst2v; |
5492 | wide_int tem, sgnbit; | |
5493 | bool valid_p = false, valn, cst2n; | |
ad193f32 JJ |
5494 | enum tree_code ccode = comp_code; |
5495 | ||
807e902e KZ |
5496 | valv = wide_int::from (val, nprec, UNSIGNED); |
5497 | cst2v = wide_int::from (cst2, nprec, UNSIGNED); | |
5498 | valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val))); | |
5499 | cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val))); | |
ad193f32 JJ |
5500 | /* If CST2 doesn't have most significant bit set, |
5501 | but VAL is negative, we have comparison like | |
5502 | if ((x & 0x123) > -4) (always true). Just give up. */ | |
5503 | if (!cst2n && valn) | |
5504 | ccode = ERROR_MARK; | |
5505 | if (cst2n) | |
807e902e | 5506 | sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); |
ad193f32 | 5507 | else |
807e902e | 5508 | sgnbit = wi::zero (nprec); |
27bcd47c | 5509 | minv = valv & cst2v; |
ad193f32 JJ |
5510 | switch (ccode) |
5511 | { | |
5512 | case EQ_EXPR: | |
5513 | /* Minimum unsigned value for equality is VAL & CST2 | |
5514 | (should be equal to VAL, otherwise we probably should | |
5515 | have folded the comparison into false) and | |
5516 | maximum unsigned value is VAL | ~CST2. */ | |
27bcd47c | 5517 | maxv = valv | ~cst2v; |
ad193f32 JJ |
5518 | valid_p = true; |
5519 | break; | |
807e902e | 5520 | |
ad193f32 | 5521 | case NE_EXPR: |
27bcd47c | 5522 | tem = valv | ~cst2v; |
ad193f32 | 5523 | /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ |
807e902e | 5524 | if (valv == 0) |
ad193f32 JJ |
5525 | { |
5526 | cst2n = false; | |
807e902e | 5527 | sgnbit = wi::zero (nprec); |
ad193f32 JJ |
5528 | goto gt_expr; |
5529 | } | |
5530 | /* If (VAL | ~CST2) is all ones, handle it as | |
5531 | (X & CST2) < VAL. */ | |
807e902e | 5532 | if (tem == -1) |
ad193f32 JJ |
5533 | { |
5534 | cst2n = false; | |
5535 | valn = false; | |
807e902e | 5536 | sgnbit = wi::zero (nprec); |
ad193f32 JJ |
5537 | goto lt_expr; |
5538 | } | |
807e902e KZ |
5539 | if (!cst2n && wi::neg_p (cst2v)) |
5540 | sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); | |
5541 | if (sgnbit != 0) | |
ad193f32 | 5542 | { |
27bcd47c | 5543 | if (valv == sgnbit) |
ad193f32 JJ |
5544 | { |
5545 | cst2n = true; | |
5546 | valn = true; | |
5547 | goto gt_expr; | |
5548 | } | |
807e902e | 5549 | if (tem == wi::mask (nprec - 1, false, nprec)) |
ad193f32 JJ |
5550 | { |
5551 | cst2n = true; | |
5552 | goto lt_expr; | |
5553 | } | |
5554 | if (!cst2n) | |
807e902e | 5555 | sgnbit = wi::zero (nprec); |
ad193f32 JJ |
5556 | } |
5557 | break; | |
807e902e | 5558 | |
ad193f32 JJ |
5559 | case GE_EXPR: |
5560 | /* Minimum unsigned value for >= if (VAL & CST2) == VAL | |
5561 | is VAL and maximum unsigned value is ~0. For signed | |
5562 | comparison, if CST2 doesn't have most significant bit | |
5563 | set, handle it similarly. If CST2 has MSB set, | |
5564 | the minimum is the same, and maximum is ~0U/2. */ | |
27bcd47c | 5565 | if (minv != valv) |
ad193f32 JJ |
5566 | { |
5567 | /* If (VAL & CST2) != VAL, X & CST2 can't be equal to | |
5568 | VAL. */ | |
440d3472 | 5569 | minv = masked_increment (valv, cst2v, sgnbit, nprec); |
27bcd47c | 5570 | if (minv == valv) |
ad193f32 JJ |
5571 | break; |
5572 | } | |
807e902e | 5573 | maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |
ad193f32 JJ |
5574 | valid_p = true; |
5575 | break; | |
807e902e | 5576 | |
ad193f32 JJ |
5577 | case GT_EXPR: |
5578 | gt_expr: | |
5579 | /* Find out smallest MINV where MINV > VAL | |
5580 | && (MINV & CST2) == MINV, if any. If VAL is signed and | |
440d3472 JJ |
5581 | CST2 has MSB set, compute it biased by 1 << (nprec - 1). */ |
5582 | minv = masked_increment (valv, cst2v, sgnbit, nprec); | |
27bcd47c | 5583 | if (minv == valv) |
ad193f32 | 5584 | break; |
807e902e | 5585 | maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); |
ad193f32 JJ |
5586 | valid_p = true; |
5587 | break; | |
807e902e | 5588 | |
ad193f32 JJ |
5589 | case LE_EXPR: |
5590 | /* Minimum unsigned value for <= is 0 and maximum | |
5591 | unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. | |
5592 | Otherwise, find smallest VAL2 where VAL2 > VAL | |
5593 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 | |
5594 | as maximum. | |
5595 | For signed comparison, if CST2 doesn't have most | |
5596 | significant bit set, handle it similarly. If CST2 has | |
5597 | MSB set, the maximum is the same and minimum is INT_MIN. */ | |
27bcd47c | 5598 | if (minv == valv) |
ad193f32 JJ |
5599 | maxv = valv; |
5600 | else | |
5601 | { | |
440d3472 | 5602 | maxv = masked_increment (valv, cst2v, sgnbit, nprec); |
27bcd47c | 5603 | if (maxv == valv) |
ad193f32 | 5604 | break; |
807e902e | 5605 | maxv -= 1; |
ad193f32 | 5606 | } |
27bcd47c | 5607 | maxv |= ~cst2v; |
ad193f32 JJ |
5608 | minv = sgnbit; |
5609 | valid_p = true; | |
5610 | break; | |
807e902e | 5611 | |
ad193f32 JJ |
5612 | case LT_EXPR: |
5613 | lt_expr: | |
5614 | /* Minimum unsigned value for < is 0 and maximum | |
5615 | unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL. | |
5616 | Otherwise, find smallest VAL2 where VAL2 > VAL | |
5617 | && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2 | |
5618 | as maximum. | |
5619 | For signed comparison, if CST2 doesn't have most | |
5620 | significant bit set, handle it similarly. If CST2 has | |
5621 | MSB set, the maximum is the same and minimum is INT_MIN. */ | |
27bcd47c | 5622 | if (minv == valv) |
ad193f32 | 5623 | { |
27bcd47c | 5624 | if (valv == sgnbit) |
ad193f32 JJ |
5625 | break; |
5626 | maxv = valv; | |
5627 | } | |
5628 | else | |
5629 | { | |
440d3472 | 5630 | maxv = masked_increment (valv, cst2v, sgnbit, nprec); |
27bcd47c | 5631 | if (maxv == valv) |
ad193f32 JJ |
5632 | break; |
5633 | } | |
807e902e | 5634 | maxv -= 1; |
27bcd47c | 5635 | maxv |= ~cst2v; |
ad193f32 JJ |
5636 | minv = sgnbit; |
5637 | valid_p = true; | |
5638 | break; | |
807e902e | 5639 | |
ad193f32 JJ |
5640 | default: |
5641 | break; | |
5642 | } | |
5643 | if (valid_p | |
807e902e | 5644 | && (maxv - minv) != -1) |
ad193f32 JJ |
5645 | { |
5646 | tree tmp, new_val, type; | |
5647 | int i; | |
5648 | ||
5649 | for (i = 0; i < 2; i++) | |
5650 | if (names[i]) | |
5651 | { | |
807e902e | 5652 | wide_int maxv2 = maxv; |
ad193f32 JJ |
5653 | tmp = names[i]; |
5654 | type = TREE_TYPE (names[i]); | |
5655 | if (!TYPE_UNSIGNED (type)) | |
5656 | { | |
440d3472 | 5657 | type = build_nonstandard_integer_type (nprec, 1); |
ad193f32 JJ |
5658 | tmp = build1 (NOP_EXPR, type, names[i]); |
5659 | } | |
807e902e | 5660 | if (minv != 0) |
ad193f32 JJ |
5661 | { |
5662 | tmp = build2 (PLUS_EXPR, type, tmp, | |
807e902e | 5663 | wide_int_to_tree (type, -minv)); |
27bcd47c | 5664 | maxv2 = maxv - minv; |
ad193f32 | 5665 | } |
807e902e | 5666 | new_val = wide_int_to_tree (type, maxv2); |
ad193f32 JJ |
5667 | |
5668 | if (dump_file) | |
5669 | { | |
5670 | fprintf (dump_file, "Adding assert for "); | |
5671 | print_generic_expr (dump_file, names[i], 0); | |
5672 | fprintf (dump_file, " from "); | |
5673 | print_generic_expr (dump_file, tmp, 0); | |
5674 | fprintf (dump_file, "\n"); | |
5675 | } | |
5676 | ||
5677 | register_new_assert_for (names[i], tmp, LE_EXPR, | |
5678 | new_val, NULL, e, bsi); | |
ad193f32 JJ |
5679 | } |
5680 | } | |
5681 | } | |
3877a6a6 | 5682 | } |
2ab8dbf4 RG |
5683 | } |
5684 | ||
279f3eb5 JL |
5685 | /* OP is an operand of a truth value expression which is known to have |
5686 | a particular value. Register any asserts for OP and for any | |
b8698a0f | 5687 | operands in OP's defining statement. |
279f3eb5 JL |
5688 | |
5689 | If CODE is EQ_EXPR, then we want to register OP is zero (false), | |
5690 | if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ | |
5691 | ||
d476245d | 5692 | static void |
279f3eb5 | 5693 | register_edge_assert_for_1 (tree op, enum tree_code code, |
726a989a | 5694 | edge e, gimple_stmt_iterator bsi) |
279f3eb5 | 5695 | { |
355fe088 | 5696 | gimple *op_def; |
726a989a | 5697 | tree val; |
a26a02d7 | 5698 | enum tree_code rhs_code; |
227858d1 | 5699 | |
279f3eb5 JL |
5700 | /* We only care about SSA_NAMEs. */ |
5701 | if (TREE_CODE (op) != SSA_NAME) | |
d476245d | 5702 | return; |
227858d1 | 5703 | |
279f3eb5 | 5704 | /* We know that OP will have a zero or nonzero value. If OP is used |
b24ca895 JJ |
5705 | more than once go ahead and register an assert for OP. */ |
5706 | if (live_on_edge (e, op) | |
5707 | && !has_single_use (op)) | |
279f3eb5 JL |
5708 | { |
5709 | val = build_int_cst (TREE_TYPE (op), 0); | |
2ab8dbf4 | 5710 | register_new_assert_for (op, op, code, val, NULL, e, bsi); |
279f3eb5 JL |
5711 | } |
5712 | ||
5713 | /* Now look at how OP is set. If it's set from a comparison, | |
5714 | a truth operation or some bit operations, then we may be able | |
5715 | to register information about the operands of that assignment. */ | |
5716 | op_def = SSA_NAME_DEF_STMT (op); | |
726a989a | 5717 | if (gimple_code (op_def) != GIMPLE_ASSIGN) |
d476245d | 5718 | return; |
279f3eb5 | 5719 | |
726a989a | 5720 | rhs_code = gimple_assign_rhs_code (op_def); |
279f3eb5 | 5721 | |
726a989a | 5722 | if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) |
227858d1 | 5723 | { |
34fc5065 | 5724 | bool invert = (code == EQ_EXPR ? true : false); |
726a989a RB |
5725 | tree op0 = gimple_assign_rhs1 (op_def); |
5726 | tree op1 = gimple_assign_rhs2 (op_def); | |
227858d1 | 5727 | |
2ab8dbf4 | 5728 | if (TREE_CODE (op0) == SSA_NAME) |
d476245d | 5729 | register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert); |
2ab8dbf4 | 5730 | if (TREE_CODE (op1) == SSA_NAME) |
d476245d | 5731 | register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert); |
279f3eb5 JL |
5732 | } |
5733 | else if ((code == NE_EXPR | |
aebf4828 | 5734 | && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) |
279f3eb5 | 5735 | || (code == EQ_EXPR |
aebf4828 | 5736 | && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) |
279f3eb5 JL |
5737 | { |
5738 | /* Recurse on each operand. */ | |
6b1184ba RB |
5739 | tree op0 = gimple_assign_rhs1 (op_def); |
5740 | tree op1 = gimple_assign_rhs2 (op_def); | |
5741 | if (TREE_CODE (op0) == SSA_NAME | |
5742 | && has_single_use (op0)) | |
d476245d | 5743 | register_edge_assert_for_1 (op0, code, e, bsi); |
6b1184ba RB |
5744 | if (TREE_CODE (op1) == SSA_NAME |
5745 | && has_single_use (op1)) | |
d476245d | 5746 | register_edge_assert_for_1 (op1, code, e, bsi); |
279f3eb5 | 5747 | } |
98958241 KT |
5748 | else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR |
5749 | && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) | |
279f3eb5 | 5750 | { |
34fc5065 RG |
5751 | /* Recurse, flipping CODE. */ |
5752 | code = invert_tree_comparison (code, false); | |
d476245d | 5753 | register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi); |
279f3eb5 | 5754 | } |
726a989a | 5755 | else if (gimple_assign_rhs_code (op_def) == SSA_NAME) |
279f3eb5 | 5756 | { |
34fc5065 | 5757 | /* Recurse through the copy. */ |
d476245d | 5758 | register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi); |
279f3eb5 | 5759 | } |
1a87cf0c | 5760 | else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) |
b8698a0f | 5761 | { |
b168a8df JJ |
5762 | /* Recurse through the type conversion, unless it is a narrowing |
5763 | conversion or conversion from non-integral type. */ | |
5764 | tree rhs = gimple_assign_rhs1 (op_def); | |
5765 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs)) | |
5766 | && (TYPE_PRECISION (TREE_TYPE (rhs)) | |
5767 | <= TYPE_PRECISION (TREE_TYPE (op)))) | |
d476245d | 5768 | register_edge_assert_for_1 (rhs, code, e, bsi); |
279f3eb5 | 5769 | } |
279f3eb5 | 5770 | } |
da11c5d2 | 5771 | |
279f3eb5 | 5772 | /* Try to register an edge assertion for SSA name NAME on edge E for |
d476245d PP |
5773 | the condition COND contributing to the conditional jump pointed to by |
5774 | SI. */ | |
da11c5d2 | 5775 | |
d476245d | 5776 | static void |
726a989a | 5777 | register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, |
a26a02d7 RAE |
5778 | enum tree_code cond_code, tree cond_op0, |
5779 | tree cond_op1) | |
279f3eb5 JL |
5780 | { |
5781 | tree val; | |
5782 | enum tree_code comp_code; | |
279f3eb5 JL |
5783 | bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; |
5784 | ||
5785 | /* Do not attempt to infer anything in names that flow through | |
5786 | abnormal edges. */ | |
5787 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) | |
d476245d | 5788 | return; |
279f3eb5 | 5789 | |
a26a02d7 RAE |
5790 | if (!extract_code_and_val_from_cond_with_ops (name, cond_code, |
5791 | cond_op0, cond_op1, | |
5792 | is_else_edge, | |
5793 | &comp_code, &val)) | |
d476245d | 5794 | return; |
279f3eb5 | 5795 | |
2ab8dbf4 | 5796 | /* Register ASSERT_EXPRs for name. */ |
d476245d PP |
5797 | register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, |
5798 | cond_op1, is_else_edge); | |
2ab8dbf4 | 5799 | |
279f3eb5 JL |
5800 | |
5801 | /* If COND is effectively an equality test of an SSA_NAME against | |
5802 | the value zero or one, then we may be able to assert values | |
5803 | for SSA_NAMEs which flow into COND. */ | |
5804 | ||
aebf4828 KT |
5805 | /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining |
5806 | statement of NAME we can assert both operands of the BIT_AND_EXPR | |
2f8e468b | 5807 | have nonzero value. */ |
279f3eb5 JL |
5808 | if (((comp_code == EQ_EXPR && integer_onep (val)) |
5809 | || (comp_code == NE_EXPR && integer_zerop (val)))) | |
5810 | { | |
355fe088 | 5811 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
279f3eb5 | 5812 | |
726a989a | 5813 | if (is_gimple_assign (def_stmt) |
aebf4828 | 5814 | && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) |
279f3eb5 | 5815 | { |
726a989a RB |
5816 | tree op0 = gimple_assign_rhs1 (def_stmt); |
5817 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
d476245d PP |
5818 | register_edge_assert_for_1 (op0, NE_EXPR, e, si); |
5819 | register_edge_assert_for_1 (op1, NE_EXPR, e, si); | |
227858d1 DN |
5820 | } |
5821 | } | |
279f3eb5 | 5822 | |
aebf4828 KT |
5823 | /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining |
5824 | statement of NAME we can assert both operands of the BIT_IOR_EXPR | |
279f3eb5 JL |
5825 | have zero value. */ |
5826 | if (((comp_code == EQ_EXPR && integer_zerop (val)) | |
5827 | || (comp_code == NE_EXPR && integer_onep (val)))) | |
227858d1 | 5828 | { |
355fe088 | 5829 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
279f3eb5 | 5830 | |
aebf4828 KT |
5831 | /* For BIT_IOR_EXPR only if NAME == 0 both operands have |
5832 | necessarily zero value, or if type-precision is one. */ | |
726a989a | 5833 | if (is_gimple_assign (def_stmt) |
aebf4828 KT |
5834 | && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR |
5835 | && (TYPE_PRECISION (TREE_TYPE (name)) == 1 | |
5836 | || comp_code == EQ_EXPR))) | |
279f3eb5 | 5837 | { |
726a989a RB |
5838 | tree op0 = gimple_assign_rhs1 (def_stmt); |
5839 | tree op1 = gimple_assign_rhs2 (def_stmt); | |
d476245d PP |
5840 | register_edge_assert_for_1 (op0, EQ_EXPR, e, si); |
5841 | register_edge_assert_for_1 (op1, EQ_EXPR, e, si); | |
279f3eb5 | 5842 | } |
227858d1 | 5843 | } |
227858d1 DN |
5844 | } |
5845 | ||
5846 | ||
227858d1 | 5847 | /* Determine whether the outgoing edges of BB should receive an |
279f3eb5 | 5848 | ASSERT_EXPR for each of the operands of BB's LAST statement. |
9bb6aa43 | 5849 | The last statement of BB must be a COND_EXPR. |
227858d1 DN |
5850 | |
5851 | If any of the sub-graphs rooted at BB have an interesting use of | |
5852 | the predicate operands, an assert location node is added to the | |
5853 | list of assertions for the corresponding operands. */ | |
5854 | ||
d476245d | 5855 | static void |
538dd0b7 | 5856 | find_conditional_asserts (basic_block bb, gcond *last) |
227858d1 | 5857 | { |
726a989a | 5858 | gimple_stmt_iterator bsi; |
279f3eb5 | 5859 | tree op; |
227858d1 DN |
5860 | edge_iterator ei; |
5861 | edge e; | |
5862 | ssa_op_iter iter; | |
5863 | ||
726a989a | 5864 | bsi = gsi_for_stmt (last); |
227858d1 DN |
5865 | |
5866 | /* Look for uses of the operands in each of the sub-graphs | |
5867 | rooted at BB. We need to check each of the outgoing edges | |
5868 | separately, so that we know what kind of ASSERT_EXPR to | |
5869 | insert. */ | |
5870 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5871 | { | |
5872 | if (e->dest == bb) | |
5873 | continue; | |
5874 | ||
227858d1 DN |
5875 | /* Register the necessary assertions for each operand in the |
5876 | conditional predicate. */ | |
5877 | FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) | |
d476245d PP |
5878 | register_edge_assert_for (op, e, bsi, |
5879 | gimple_cond_code (last), | |
5880 | gimple_cond_lhs (last), | |
5881 | gimple_cond_rhs (last)); | |
227858d1 | 5882 | } |
227858d1 DN |
5883 | } |
5884 | ||
1aa9438f JJ |
5885 | struct case_info |
5886 | { | |
5887 | tree expr; | |
5888 | basic_block bb; | |
5889 | }; | |
5890 | ||
5891 | /* Compare two case labels sorting first by the destination bb index | |
9bb6aa43 RG |
5892 | and then by the case value. */ |
5893 | ||
5894 | static int | |
5895 | compare_case_labels (const void *p1, const void *p2) | |
5896 | { | |
1aa9438f JJ |
5897 | const struct case_info *ci1 = (const struct case_info *) p1; |
5898 | const struct case_info *ci2 = (const struct case_info *) p2; | |
5899 | int idx1 = ci1->bb->index; | |
5900 | int idx2 = ci2->bb->index; | |
9bb6aa43 | 5901 | |
1aa9438f | 5902 | if (idx1 < idx2) |
9bb6aa43 | 5903 | return -1; |
1aa9438f | 5904 | else if (idx1 == idx2) |
9bb6aa43 RG |
5905 | { |
5906 | /* Make sure the default label is first in a group. */ | |
1aa9438f | 5907 | if (!CASE_LOW (ci1->expr)) |
9bb6aa43 | 5908 | return -1; |
1aa9438f | 5909 | else if (!CASE_LOW (ci2->expr)) |
9bb6aa43 RG |
5910 | return 1; |
5911 | else | |
1aa9438f JJ |
5912 | return tree_int_cst_compare (CASE_LOW (ci1->expr), |
5913 | CASE_LOW (ci2->expr)); | |
9bb6aa43 RG |
5914 | } |
5915 | else | |
5916 | return 1; | |
5917 | } | |
5918 | ||
5919 | /* Determine whether the outgoing edges of BB should receive an | |
5920 | ASSERT_EXPR for each of the operands of BB's LAST statement. | |
5921 | The last statement of BB must be a SWITCH_EXPR. | |
5922 | ||
5923 | If any of the sub-graphs rooted at BB have an interesting use of | |
5924 | the predicate operands, an assert location node is added to the | |
5925 | list of assertions for the corresponding operands. */ | |
5926 | ||
d476245d | 5927 | static void |
538dd0b7 | 5928 | find_switch_asserts (basic_block bb, gswitch *last) |
9bb6aa43 | 5929 | { |
726a989a | 5930 | gimple_stmt_iterator bsi; |
a26a02d7 | 5931 | tree op; |
9bb6aa43 | 5932 | edge e; |
1aa9438f JJ |
5933 | struct case_info *ci; |
5934 | size_t n = gimple_switch_num_labels (last); | |
109e637b | 5935 | #if GCC_VERSION >= 4000 |
9bb6aa43 | 5936 | unsigned int idx; |
109e637b JM |
5937 | #else |
5938 | /* Work around GCC 3.4 bug (PR 37086). */ | |
5939 | volatile unsigned int idx; | |
5940 | #endif | |
9bb6aa43 | 5941 | |
726a989a RB |
5942 | bsi = gsi_for_stmt (last); |
5943 | op = gimple_switch_index (last); | |
9bb6aa43 | 5944 | if (TREE_CODE (op) != SSA_NAME) |
d476245d | 5945 | return; |
9bb6aa43 RG |
5946 | |
5947 | /* Build a vector of case labels sorted by destination label. */ | |
1aa9438f | 5948 | ci = XNEWVEC (struct case_info, n); |
9bb6aa43 | 5949 | for (idx = 0; idx < n; ++idx) |
1aa9438f JJ |
5950 | { |
5951 | ci[idx].expr = gimple_switch_label (last, idx); | |
5952 | ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); | |
5953 | } | |
5954 | qsort (ci, n, sizeof (struct case_info), compare_case_labels); | |
9bb6aa43 RG |
5955 | |
5956 | for (idx = 0; idx < n; ++idx) | |
5957 | { | |
5958 | tree min, max; | |
1aa9438f JJ |
5959 | tree cl = ci[idx].expr; |
5960 | basic_block cbb = ci[idx].bb; | |
9bb6aa43 RG |
5961 | |
5962 | min = CASE_LOW (cl); | |
5963 | max = CASE_HIGH (cl); | |
5964 | ||
5965 | /* If there are multiple case labels with the same destination | |
5966 | we need to combine them to a single value range for the edge. */ | |
1aa9438f | 5967 | if (idx + 1 < n && cbb == ci[idx + 1].bb) |
9bb6aa43 RG |
5968 | { |
5969 | /* Skip labels until the last of the group. */ | |
5970 | do { | |
5971 | ++idx; | |
1aa9438f | 5972 | } while (idx < n && cbb == ci[idx].bb); |
9bb6aa43 RG |
5973 | --idx; |
5974 | ||
5975 | /* Pick up the maximum of the case label range. */ | |
1aa9438f JJ |
5976 | if (CASE_HIGH (ci[idx].expr)) |
5977 | max = CASE_HIGH (ci[idx].expr); | |
9bb6aa43 | 5978 | else |
1aa9438f | 5979 | max = CASE_LOW (ci[idx].expr); |
9bb6aa43 RG |
5980 | } |
5981 | ||
5982 | /* Nothing to do if the range includes the default label until we | |
5983 | can register anti-ranges. */ | |
5984 | if (min == NULL_TREE) | |
5985 | continue; | |
5986 | ||
5987 | /* Find the edge to register the assert expr on. */ | |
1aa9438f | 5988 | e = find_edge (bb, cbb); |
9bb6aa43 | 5989 | |
9bb6aa43 RG |
5990 | /* Register the necessary assertions for the operand in the |
5991 | SWITCH_EXPR. */ | |
d476245d PP |
5992 | register_edge_assert_for (op, e, bsi, |
5993 | max ? GE_EXPR : EQ_EXPR, | |
5994 | op, fold_convert (TREE_TYPE (op), min)); | |
9bb6aa43 | 5995 | if (max) |
d476245d PP |
5996 | register_edge_assert_for (op, e, bsi, LE_EXPR, op, |
5997 | fold_convert (TREE_TYPE (op), max)); | |
9bb6aa43 RG |
5998 | } |
5999 | ||
1aa9438f | 6000 | XDELETEVEC (ci); |
9bb6aa43 RG |
6001 | } |
6002 | ||
227858d1 DN |
6003 | |
6004 | /* Traverse all the statements in block BB looking for statements that | |
6005 | may generate useful assertions for the SSA names in their operand. | |
6006 | If a statement produces a useful assertion A for name N_i, then the | |
6007 | list of assertions already generated for N_i is scanned to | |
6008 | determine if A is actually needed. | |
b8698a0f | 6009 | |
227858d1 DN |
6010 | If N_i already had the assertion A at a location dominating the |
6011 | current location, then nothing needs to be done. Otherwise, the | |
6012 | new location for A is recorded instead. | |
6013 | ||
6014 | 1- For every statement S in BB, all the variables used by S are | |
6015 | added to bitmap FOUND_IN_SUBGRAPH. | |
6016 | ||
6017 | 2- If statement S uses an operand N in a way that exposes a known | |
6018 | value range for N, then if N was not already generated by an | |
6019 | ASSERT_EXPR, create a new assert location for N. For instance, | |
6020 | if N is a pointer and the statement dereferences it, we can | |
6021 | assume that N is not NULL. | |
6022 | ||
6023 | 3- COND_EXPRs are a special case of #2. We can derive range | |
6024 | information from the predicate but need to insert different | |
6025 | ASSERT_EXPRs for each of the sub-graphs rooted at the | |
6026 | conditional block. If the last statement of BB is a conditional | |
6027 | expression of the form 'X op Y', then | |
6028 | ||
6029 | a) Remove X and Y from the set FOUND_IN_SUBGRAPH. | |
6030 | ||
6031 | b) If the conditional is the only entry point to the sub-graph | |
6032 | corresponding to the THEN_CLAUSE, recurse into it. On | |
6033 | return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then | |
6034 | an ASSERT_EXPR is added for the corresponding variable. | |
6035 | ||
6036 | c) Repeat step (b) on the ELSE_CLAUSE. | |
6037 | ||
6038 | d) Mark X and Y in FOUND_IN_SUBGRAPH. | |
6039 | ||
6040 | For instance, | |
6041 | ||
6042 | if (a == 9) | |
6043 | b = a; | |
6044 | else | |
6045 | b = c + 1; | |
6046 | ||
6047 | In this case, an assertion on the THEN clause is useful to | |
6048 | determine that 'a' is always 9 on that edge. However, an assertion | |
6049 | on the ELSE clause would be unnecessary. | |
6050 | ||
6051 | 4- If BB does not end in a conditional expression, then we recurse | |
6052 | into BB's dominator children. | |
b8698a0f | 6053 | |
227858d1 DN |
6054 | At the end of the recursive traversal, every SSA name will have a |
6055 | list of locations where ASSERT_EXPRs should be added. When a new | |
6056 | location for name N is found, it is registered by calling | |
6057 | register_new_assert_for. That function keeps track of all the | |
6058 | registered assertions to prevent adding unnecessary assertions. | |
6059 | For instance, if a pointer P_4 is dereferenced more than once in a | |
6060 | dominator tree, only the location dominating all the dereference of | |
d476245d | 6061 | P_4 will receive an ASSERT_EXPR. */ |
227858d1 | 6062 | |
d476245d | 6063 | static void |
c4ab2baa | 6064 | find_assert_locations_1 (basic_block bb, sbitmap live) |
227858d1 | 6065 | { |
355fe088 | 6066 | gimple *last; |
227858d1 | 6067 | |
c4ab2baa | 6068 | last = last_stmt (bb); |
227858d1 | 6069 | |
c4ab2baa RG |
6070 | /* If BB's last statement is a conditional statement involving integer |
6071 | operands, determine if we need to add ASSERT_EXPRs. */ | |
6072 | if (last | |
6073 | && gimple_code (last) == GIMPLE_COND | |
6074 | && !fp_predicate (last) | |
6075 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
538dd0b7 | 6076 | find_conditional_asserts (bb, as_a <gcond *> (last)); |
227858d1 | 6077 | |
c4ab2baa RG |
6078 | /* If BB's last statement is a switch statement involving integer |
6079 | operands, determine if we need to add ASSERT_EXPRs. */ | |
6080 | if (last | |
6081 | && gimple_code (last) == GIMPLE_SWITCH | |
6082 | && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) | |
538dd0b7 | 6083 | find_switch_asserts (bb, as_a <gswitch *> (last)); |
227858d1 DN |
6084 | |
6085 | /* Traverse all the statements in BB marking used names and looking | |
6086 | for statements that may infer assertions for their used operands. */ | |
538dd0b7 DM |
6087 | for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si); |
6088 | gsi_prev (&si)) | |
227858d1 | 6089 | { |
355fe088 | 6090 | gimple *stmt; |
726a989a | 6091 | tree op; |
227858d1 DN |
6092 | ssa_op_iter i; |
6093 | ||
726a989a | 6094 | stmt = gsi_stmt (si); |
227858d1 | 6095 | |
b5b8b0ac AO |
6096 | if (is_gimple_debug (stmt)) |
6097 | continue; | |
6098 | ||
227858d1 DN |
6099 | /* See if we can derive an assertion for any of STMT's operands. */ |
6100 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
6101 | { | |
6102 | tree value; | |
6103 | enum tree_code comp_code; | |
6104 | ||
f7a39c55 RG |
6105 | /* If op is not live beyond this stmt, do not bother to insert |
6106 | asserts for it. */ | |
d7c028c0 | 6107 | if (!bitmap_bit_p (live, SSA_NAME_VERSION (op))) |
f7a39c55 | 6108 | continue; |
227858d1 | 6109 | |
227858d1 DN |
6110 | /* If OP is used in such a way that we can infer a value |
6111 | range for it, and we don't find a previous assertion for | |
6112 | it, create a new assertion location node for OP. */ | |
6113 | if (infer_value_range (stmt, op, &comp_code, &value)) | |
6114 | { | |
917f1b7e | 6115 | /* If we are able to infer a nonzero value range for OP, |
60c9ad46 JL |
6116 | then walk backwards through the use-def chain to see if OP |
6117 | was set via a typecast. | |
6118 | ||
6119 | If so, then we can also infer a nonzero value range | |
6120 | for the operand of the NOP_EXPR. */ | |
6121 | if (comp_code == NE_EXPR && integer_zerop (value)) | |
6122 | { | |
6123 | tree t = op; | |
355fe088 | 6124 | gimple *def_stmt = SSA_NAME_DEF_STMT (t); |
b8698a0f | 6125 | |
726a989a | 6126 | while (is_gimple_assign (def_stmt) |
625a9766 RB |
6127 | && CONVERT_EXPR_CODE_P |
6128 | (gimple_assign_rhs_code (def_stmt)) | |
07beea0d | 6129 | && TREE_CODE |
726a989a | 6130 | (gimple_assign_rhs1 (def_stmt)) == SSA_NAME |
07beea0d | 6131 | && POINTER_TYPE_P |
726a989a | 6132 | (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) |
60c9ad46 | 6133 | { |
726a989a | 6134 | t = gimple_assign_rhs1 (def_stmt); |
60c9ad46 JL |
6135 | def_stmt = SSA_NAME_DEF_STMT (t); |
6136 | ||
6137 | /* Note we want to register the assert for the | |
6138 | operand of the NOP_EXPR after SI, not after the | |
6139 | conversion. */ | |
6140 | if (! has_single_use (t)) | |
d476245d PP |
6141 | register_new_assert_for (t, t, comp_code, value, |
6142 | bb, NULL, si); | |
60c9ad46 JL |
6143 | } |
6144 | } | |
6145 | ||
f7a39c55 | 6146 | register_new_assert_for (op, op, comp_code, value, bb, NULL, si); |
0bca51f0 DN |
6147 | } |
6148 | } | |
f7a39c55 RG |
6149 | |
6150 | /* Update live. */ | |
6151 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) | |
d7c028c0 | 6152 | bitmap_set_bit (live, SSA_NAME_VERSION (op)); |
f7a39c55 | 6153 | FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF) |
d7c028c0 | 6154 | bitmap_clear_bit (live, SSA_NAME_VERSION (op)); |
0bca51f0 DN |
6155 | } |
6156 | ||
f7a39c55 | 6157 | /* Traverse all PHI nodes in BB, updating live. */ |
538dd0b7 DM |
6158 | for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
6159 | gsi_next (&si)) | |
c4ab2baa RG |
6160 | { |
6161 | use_operand_p arg_p; | |
6162 | ssa_op_iter i; | |
538dd0b7 | 6163 | gphi *phi = si.phi (); |
f7a39c55 RG |
6164 | tree res = gimple_phi_result (phi); |
6165 | ||
6166 | if (virtual_operand_p (res)) | |
6167 | continue; | |
9bb6aa43 | 6168 | |
c4ab2baa RG |
6169 | FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) |
6170 | { | |
6171 | tree arg = USE_FROM_PTR (arg_p); | |
6172 | if (TREE_CODE (arg) == SSA_NAME) | |
d7c028c0 | 6173 | bitmap_set_bit (live, SSA_NAME_VERSION (arg)); |
c4ab2baa | 6174 | } |
f7a39c55 | 6175 | |
d7c028c0 | 6176 | bitmap_clear_bit (live, SSA_NAME_VERSION (res)); |
c4ab2baa | 6177 | } |
227858d1 DN |
6178 | } |
6179 | ||
c4ab2baa | 6180 | /* Do an RPO walk over the function computing SSA name liveness |
d476245d | 6181 | on-the-fly and deciding on assert expressions to insert. */ |
c4ab2baa | 6182 | |
d476245d | 6183 | static void |
c4ab2baa RG |
6184 | find_assert_locations (void) |
6185 | { | |
8b1c6fd7 DM |
6186 | int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
6187 | int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); | |
6188 | int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun)); | |
c4ab2baa | 6189 | int rpo_cnt, i; |
c4ab2baa | 6190 | |
8b1c6fd7 | 6191 | live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun)); |
c4ab2baa RG |
6192 | rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); |
6193 | for (i = 0; i < rpo_cnt; ++i) | |
6194 | bb_rpo[rpo[i]] = i; | |
6195 | ||
d23c0a32 JJ |
6196 | /* Pre-seed loop latch liveness from loop header PHI nodes. Due to |
6197 | the order we compute liveness and insert asserts we otherwise | |
6198 | fail to insert asserts into the loop latch. */ | |
6199 | loop_p loop; | |
f0bd40b1 | 6200 | FOR_EACH_LOOP (loop, 0) |
d23c0a32 JJ |
6201 | { |
6202 | i = loop->latch->index; | |
6203 | unsigned int j = single_succ_edge (loop->latch)->dest_idx; | |
538dd0b7 | 6204 | for (gphi_iterator gsi = gsi_start_phis (loop->header); |
d23c0a32 JJ |
6205 | !gsi_end_p (gsi); gsi_next (&gsi)) |
6206 | { | |
538dd0b7 | 6207 | gphi *phi = gsi.phi (); |
d23c0a32 JJ |
6208 | if (virtual_operand_p (gimple_phi_result (phi))) |
6209 | continue; | |
6210 | tree arg = gimple_phi_arg_def (phi, j); | |
6211 | if (TREE_CODE (arg) == SSA_NAME) | |
6212 | { | |
6213 | if (live[i] == NULL) | |
6214 | { | |
6215 | live[i] = sbitmap_alloc (num_ssa_names); | |
6216 | bitmap_clear (live[i]); | |
6217 | } | |
6218 | bitmap_set_bit (live[i], SSA_NAME_VERSION (arg)); | |
6219 | } | |
6220 | } | |
6221 | } | |
6222 | ||
c302207e | 6223 | for (i = rpo_cnt - 1; i >= 0; --i) |
c4ab2baa | 6224 | { |
06e28de2 | 6225 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); |
c4ab2baa RG |
6226 | edge e; |
6227 | edge_iterator ei; | |
6228 | ||
6229 | if (!live[rpo[i]]) | |
6230 | { | |
6231 | live[rpo[i]] = sbitmap_alloc (num_ssa_names); | |
f61e445a | 6232 | bitmap_clear (live[rpo[i]]); |
c4ab2baa RG |
6233 | } |
6234 | ||
6235 | /* Process BB and update the live information with uses in | |
6236 | this block. */ | |
d476245d | 6237 | find_assert_locations_1 (bb, live[rpo[i]]); |
c4ab2baa RG |
6238 | |
6239 | /* Merge liveness into the predecessor blocks and free it. */ | |
f61e445a | 6240 | if (!bitmap_empty_p (live[rpo[i]])) |
c4ab2baa RG |
6241 | { |
6242 | int pred_rpo = i; | |
6243 | FOR_EACH_EDGE (e, ei, bb->preds) | |
6244 | { | |
6245 | int pred = e->src->index; | |
6f723d33 | 6246 | if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK) |
c4ab2baa RG |
6247 | continue; |
6248 | ||
6249 | if (!live[pred]) | |
6250 | { | |
6251 | live[pred] = sbitmap_alloc (num_ssa_names); | |
f61e445a | 6252 | bitmap_clear (live[pred]); |
c4ab2baa | 6253 | } |
f61e445a | 6254 | bitmap_ior (live[pred], live[pred], live[rpo[i]]); |
c4ab2baa RG |
6255 | |
6256 | if (bb_rpo[pred] < pred_rpo) | |
6257 | pred_rpo = bb_rpo[pred]; | |
6258 | } | |
6259 | ||
6260 | /* Record the RPO number of the last visited block that needs | |
6261 | live information from this block. */ | |
6262 | last_rpo[rpo[i]] = pred_rpo; | |
6263 | } | |
6264 | else | |
6265 | { | |
6266 | sbitmap_free (live[rpo[i]]); | |
6267 | live[rpo[i]] = NULL; | |
6268 | } | |
6269 | ||
6270 | /* We can free all successors live bitmaps if all their | |
6271 | predecessors have been visited already. */ | |
6272 | FOR_EACH_EDGE (e, ei, bb->succs) | |
6273 | if (last_rpo[e->dest->index] == i | |
6274 | && live[e->dest->index]) | |
6275 | { | |
6276 | sbitmap_free (live[e->dest->index]); | |
6277 | live[e->dest->index] = NULL; | |
6278 | } | |
6279 | } | |
6280 | ||
6281 | XDELETEVEC (rpo); | |
6282 | XDELETEVEC (bb_rpo); | |
6283 | XDELETEVEC (last_rpo); | |
8b1c6fd7 | 6284 | for (i = 0; i < last_basic_block_for_fn (cfun); ++i) |
c4ab2baa RG |
6285 | if (live[i]) |
6286 | sbitmap_free (live[i]); | |
6287 | XDELETEVEC (live); | |
c4ab2baa | 6288 | } |
227858d1 DN |
6289 | |
6290 | /* Create an ASSERT_EXPR for NAME and insert it in the location | |
6291 | indicated by LOC. Return true if we made any edge insertions. */ | |
6292 | ||
6293 | static bool | |
ff507401 | 6294 | process_assert_insertions_for (tree name, assert_locus *loc) |
227858d1 DN |
6295 | { |
6296 | /* Build the comparison expression NAME_i COMP_CODE VAL. */ | |
355fe088 | 6297 | gimple *stmt; |
726a989a | 6298 | tree cond; |
355fe088 | 6299 | gimple *assert_stmt; |
227858d1 DN |
6300 | edge_iterator ei; |
6301 | edge e; | |
6302 | ||
ff0a0c1d RG |
6303 | /* If we have X <=> X do not insert an assert expr for that. */ |
6304 | if (loc->expr == loc->val) | |
6305 | return false; | |
6306 | ||
2ab8dbf4 | 6307 | cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); |
726a989a | 6308 | assert_stmt = build_assert_expr_for (cond, name); |
227858d1 | 6309 | if (loc->e) |
0bca51f0 | 6310 | { |
227858d1 DN |
6311 | /* We have been asked to insert the assertion on an edge. This |
6312 | is used only by COND_EXPR and SWITCH_EXPR assertions. */ | |
77a74ed7 NF |
6313 | gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND |
6314 | || (gimple_code (gsi_stmt (loc->si)) | |
6315 | == GIMPLE_SWITCH)); | |
0bca51f0 | 6316 | |
726a989a | 6317 | gsi_insert_on_edge (loc->e, assert_stmt); |
227858d1 DN |
6318 | return true; |
6319 | } | |
9fabf0d4 | 6320 | |
227858d1 DN |
6321 | /* Otherwise, we can insert right after LOC->SI iff the |
6322 | statement must not be the last statement in the block. */ | |
726a989a | 6323 | stmt = gsi_stmt (loc->si); |
227858d1 DN |
6324 | if (!stmt_ends_bb_p (stmt)) |
6325 | { | |
726a989a | 6326 | gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); |
227858d1 DN |
6327 | return false; |
6328 | } | |
9fabf0d4 | 6329 | |
227858d1 DN |
6330 | /* If STMT must be the last statement in BB, we can only insert new |
6331 | assertions on the non-abnormal edge out of BB. Note that since | |
6332 | STMT is not control flow, there may only be one non-abnormal edge | |
6333 | out of BB. */ | |
6334 | FOR_EACH_EDGE (e, ei, loc->bb->succs) | |
6335 | if (!(e->flags & EDGE_ABNORMAL)) | |
6336 | { | |
726a989a | 6337 | gsi_insert_on_edge (e, assert_stmt); |
227858d1 DN |
6338 | return true; |
6339 | } | |
0bca51f0 | 6340 | |
227858d1 DN |
6341 | gcc_unreachable (); |
6342 | } | |
0bca51f0 | 6343 | |
0bca51f0 | 6344 | |
227858d1 DN |
6345 | /* Process all the insertions registered for every name N_i registered |
6346 | in NEED_ASSERT_FOR. The list of assertions to be inserted are | |
6347 | found in ASSERTS_FOR[i]. */ | |
0bca51f0 | 6348 | |
227858d1 DN |
6349 | static void |
6350 | process_assert_insertions (void) | |
6351 | { | |
6352 | unsigned i; | |
6353 | bitmap_iterator bi; | |
6354 | bool update_edges_p = false; | |
6355 | int num_asserts = 0; | |
0bca51f0 | 6356 | |
227858d1 DN |
6357 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6358 | dump_all_asserts (dump_file); | |
60b4ccde | 6359 | |
227858d1 DN |
6360 | EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) |
6361 | { | |
ff507401 | 6362 | assert_locus *loc = asserts_for[i]; |
227858d1 DN |
6363 | gcc_assert (loc); |
6364 | ||
6365 | while (loc) | |
60b4ccde | 6366 | { |
ff507401 | 6367 | assert_locus *next = loc->next; |
227858d1 DN |
6368 | update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); |
6369 | free (loc); | |
6370 | loc = next; | |
6371 | num_asserts++; | |
60b4ccde | 6372 | } |
0bca51f0 | 6373 | } |
0bca51f0 | 6374 | |
227858d1 | 6375 | if (update_edges_p) |
726a989a | 6376 | gsi_commit_edge_inserts (); |
0bca51f0 | 6377 | |
01902653 RG |
6378 | statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", |
6379 | num_asserts); | |
0bca51f0 DN |
6380 | } |
6381 | ||
6382 | ||
6383 | /* Traverse the flowgraph looking for conditional jumps to insert range | |
6384 | expressions. These range expressions are meant to provide information | |
6385 | to optimizations that need to reason in terms of value ranges. They | |
6386 | will not be expanded into RTL. For instance, given: | |
6387 | ||
6388 | x = ... | |
6389 | y = ... | |
6390 | if (x < y) | |
6391 | y = x - 2; | |
6392 | else | |
6393 | x = y + 3; | |
6394 | ||
6395 | this pass will transform the code into: | |
6396 | ||
6397 | x = ... | |
6398 | y = ... | |
6399 | if (x < y) | |
6400 | { | |
6401 | x = ASSERT_EXPR <x, x < y> | |
6402 | y = x - 2 | |
6403 | } | |
6404 | else | |
6405 | { | |
36f291f7 | 6406 | y = ASSERT_EXPR <y, x >= y> |
0bca51f0 DN |
6407 | x = y + 3 |
6408 | } | |
6409 | ||
6410 | The idea is that once copy and constant propagation have run, other | |
6411 | optimizations will be able to determine what ranges of values can 'x' | |
6412 | take in different paths of the code, simply by checking the reaching | |
6413 | definition of 'x'. */ | |
6414 | ||
6415 | static void | |
6416 | insert_range_assertions (void) | |
6417 | { | |
227858d1 | 6418 | need_assert_for = BITMAP_ALLOC (NULL); |
ff507401 | 6419 | asserts_for = XCNEWVEC (assert_locus *, num_ssa_names); |
0bca51f0 DN |
6420 | |
6421 | calculate_dominance_info (CDI_DOMINATORS); | |
6422 | ||
d476245d PP |
6423 | find_assert_locations (); |
6424 | if (!bitmap_empty_p (need_assert_for)) | |
0bca51f0 | 6425 | { |
227858d1 | 6426 | process_assert_insertions (); |
0bca51f0 DN |
6427 | update_ssa (TODO_update_ssa_no_phi); |
6428 | } | |
6429 | ||
6430 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
6431 | { | |
6432 | fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); | |
6433 | dump_function_to_file (current_function_decl, dump_file, dump_flags); | |
6434 | } | |
6435 | ||
227858d1 DN |
6436 | free (asserts_for); |
6437 | BITMAP_FREE (need_assert_for); | |
0bca51f0 DN |
6438 | } |
6439 | ||
590b1f2d DM |
6440 | /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays |
6441 | and "struct" hacks. If VRP can determine that the | |
9f5ed61a | 6442 | array subscript is a constant, check if it is outside valid |
590b1f2d DM |
6443 | range. If the array subscript is a RANGE, warn if it is |
6444 | non-overlapping with valid range. | |
6445 | IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ | |
6446 | ||
6447 | static void | |
c2255bc4 | 6448 | check_array_ref (location_t location, tree ref, bool ignore_off_by_one) |
590b1f2d | 6449 | { |
526ceb68 | 6450 | value_range *vr = NULL; |
590b1f2d | 6451 | tree low_sub, up_sub; |
12bd5a1e RG |
6452 | tree low_bound, up_bound, up_bound_p1; |
6453 | tree base; | |
6454 | ||
6455 | if (TREE_NO_WARNING (ref)) | |
6456 | return; | |
590b1f2d DM |
6457 | |
6458 | low_sub = up_sub = TREE_OPERAND (ref, 1); | |
12bd5a1e | 6459 | up_bound = array_ref_up_bound (ref); |
590b1f2d | 6460 | |
db8800bc | 6461 | /* Can not check flexible arrays. */ |
12bd5a1e | 6462 | if (!up_bound |
db8800bc | 6463 | || TREE_CODE (up_bound) != INTEGER_CST) |
590b1f2d DM |
6464 | return; |
6465 | ||
12bd5a1e RG |
6466 | /* Accesses to trailing arrays via pointers may access storage |
6467 | beyond the types array bounds. */ | |
6468 | base = get_base_address (ref); | |
de1b5c17 MU |
6469 | if ((warn_array_bounds < 2) |
6470 | && base && TREE_CODE (base) == MEM_REF) | |
12bd5a1e RG |
6471 | { |
6472 | tree cref, next = NULL_TREE; | |
6473 | ||
6474 | if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF) | |
6475 | return; | |
6476 | ||
6477 | cref = TREE_OPERAND (ref, 0); | |
6478 | if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE) | |
910ad8de | 6479 | for (next = DECL_CHAIN (TREE_OPERAND (cref, 1)); |
12bd5a1e | 6480 | next && TREE_CODE (next) != FIELD_DECL; |
910ad8de | 6481 | next = DECL_CHAIN (next)) |
12bd5a1e RG |
6482 | ; |
6483 | ||
6484 | /* If this is the last field in a struct type or a field in a | |
6485 | union type do not warn. */ | |
6486 | if (!next) | |
6487 | return; | |
6488 | } | |
6489 | ||
590b1f2d | 6490 | low_bound = array_ref_low_bound (ref); |
807e902e KZ |
6491 | up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, |
6492 | build_int_cst (TREE_TYPE (up_bound), 1)); | |
590b1f2d | 6493 | |
f8269ad4 RB |
6494 | /* Empty array. */ |
6495 | if (tree_int_cst_equal (low_bound, up_bound_p1)) | |
6496 | { | |
6497 | warning_at (location, OPT_Warray_bounds, | |
6498 | "array subscript is above array bounds"); | |
6499 | TREE_NO_WARNING (ref) = 1; | |
6500 | } | |
6501 | ||
590b1f2d DM |
6502 | if (TREE_CODE (low_sub) == SSA_NAME) |
6503 | { | |
6504 | vr = get_value_range (low_sub); | |
6505 | if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) | |
6506 | { | |
6507 | low_sub = vr->type == VR_RANGE ? vr->max : vr->min; | |
6508 | up_sub = vr->type == VR_RANGE ? vr->min : vr->max; | |
6509 | } | |
6510 | } | |
6511 | ||
6512 | if (vr && vr->type == VR_ANTI_RANGE) | |
6513 | { | |
6514 | if (TREE_CODE (up_sub) == INTEGER_CST | |
f8269ad4 RB |
6515 | && (ignore_off_by_one |
6516 | ? tree_int_cst_lt (up_bound, up_sub) | |
6517 | : tree_int_cst_le (up_bound, up_sub)) | |
590b1f2d | 6518 | && TREE_CODE (low_sub) == INTEGER_CST |
f8269ad4 | 6519 | && tree_int_cst_le (low_sub, low_bound)) |
590b1f2d | 6520 | { |
92ef7fb1 MLI |
6521 | warning_at (location, OPT_Warray_bounds, |
6522 | "array subscript is outside array bounds"); | |
590b1f2d DM |
6523 | TREE_NO_WARNING (ref) = 1; |
6524 | } | |
6525 | } | |
6526 | else if (TREE_CODE (up_sub) == INTEGER_CST | |
12bd5a1e | 6527 | && (ignore_off_by_one |
f8269ad4 RB |
6528 | ? !tree_int_cst_le (up_sub, up_bound_p1) |
6529 | : !tree_int_cst_le (up_sub, up_bound))) | |
590b1f2d | 6530 | { |
83ede847 RB |
6531 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6532 | { | |
6533 | fprintf (dump_file, "Array bound warning for "); | |
6534 | dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | |
b4a4b56d | 6535 | fprintf (dump_file, "\n"); |
83ede847 | 6536 | } |
92ef7fb1 MLI |
6537 | warning_at (location, OPT_Warray_bounds, |
6538 | "array subscript is above array bounds"); | |
590b1f2d DM |
6539 | TREE_NO_WARNING (ref) = 1; |
6540 | } | |
6541 | else if (TREE_CODE (low_sub) == INTEGER_CST | |
6542 | && tree_int_cst_lt (low_sub, low_bound)) | |
6543 | { | |
83ede847 RB |
6544 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6545 | { | |
6546 | fprintf (dump_file, "Array bound warning for "); | |
6547 | dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | |
b4a4b56d | 6548 | fprintf (dump_file, "\n"); |
83ede847 | 6549 | } |
92ef7fb1 MLI |
6550 | warning_at (location, OPT_Warray_bounds, |
6551 | "array subscript is below array bounds"); | |
590b1f2d DM |
6552 | TREE_NO_WARNING (ref) = 1; |
6553 | } | |
6554 | } | |
6555 | ||
05fb69e4 DM |
6556 | /* Searches if the expr T, located at LOCATION computes |
6557 | address of an ARRAY_REF, and call check_array_ref on it. */ | |
6558 | ||
6559 | static void | |
92ef7fb1 | 6560 | search_for_addr_array (tree t, location_t location) |
05fb69e4 | 6561 | { |
05fb69e4 | 6562 | /* Check each ARRAY_REFs in the reference chain. */ |
b8698a0f | 6563 | do |
05fb69e4 DM |
6564 | { |
6565 | if (TREE_CODE (t) == ARRAY_REF) | |
c2255bc4 | 6566 | check_array_ref (location, t, true /*ignore_off_by_one*/); |
05fb69e4 | 6567 | |
9968d233 | 6568 | t = TREE_OPERAND (t, 0); |
05fb69e4 DM |
6569 | } |
6570 | while (handled_component_p (t)); | |
70f34814 RG |
6571 | |
6572 | if (TREE_CODE (t) == MEM_REF | |
6573 | && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR | |
6574 | && !TREE_NO_WARNING (t)) | |
6575 | { | |
6576 | tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); | |
6577 | tree low_bound, up_bound, el_sz; | |
807e902e | 6578 | offset_int idx; |
70f34814 RG |
6579 | if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE |
6580 | || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE | |
6581 | || !TYPE_DOMAIN (TREE_TYPE (tem))) | |
6582 | return; | |
6583 | ||
6584 | low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | |
6585 | up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); | |
6586 | el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); | |
6587 | if (!low_bound | |
6588 | || TREE_CODE (low_bound) != INTEGER_CST | |
6589 | || !up_bound | |
6590 | || TREE_CODE (up_bound) != INTEGER_CST | |
6591 | || !el_sz | |
6592 | || TREE_CODE (el_sz) != INTEGER_CST) | |
6593 | return; | |
6594 | ||
6595 | idx = mem_ref_offset (t); | |
807e902e KZ |
6596 | idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); |
6597 | if (wi::lts_p (idx, 0)) | |
70f34814 | 6598 | { |
83ede847 RB |
6599 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6600 | { | |
6601 | fprintf (dump_file, "Array bound warning for "); | |
6602 | dump_generic_expr (MSG_NOTE, TDF_SLIM, t); | |
b4a4b56d | 6603 | fprintf (dump_file, "\n"); |
83ede847 | 6604 | } |
70f34814 RG |
6605 | warning_at (location, OPT_Warray_bounds, |
6606 | "array subscript is below array bounds"); | |
6607 | TREE_NO_WARNING (t) = 1; | |
6608 | } | |
807e902e KZ |
6609 | else if (wi::gts_p (idx, (wi::to_offset (up_bound) |
6610 | - wi::to_offset (low_bound) + 1))) | |
70f34814 | 6611 | { |
83ede847 RB |
6612 | if (dump_file && (dump_flags & TDF_DETAILS)) |
6613 | { | |
6614 | fprintf (dump_file, "Array bound warning for "); | |
6615 | dump_generic_expr (MSG_NOTE, TDF_SLIM, t); | |
b4a4b56d | 6616 | fprintf (dump_file, "\n"); |
83ede847 | 6617 | } |
70f34814 RG |
6618 | warning_at (location, OPT_Warray_bounds, |
6619 | "array subscript is above array bounds"); | |
6620 | TREE_NO_WARNING (t) = 1; | |
6621 | } | |
6622 | } | |
05fb69e4 DM |
6623 | } |
6624 | ||
590b1f2d DM |
6625 | /* walk_tree() callback that checks if *TP is |
6626 | an ARRAY_REF inside an ADDR_EXPR (in which an array | |
6627 | subscript one outside the valid range is allowed). Call | |
b8698a0f | 6628 | check_array_ref for each ARRAY_REF found. The location is |
590b1f2d DM |
6629 | passed in DATA. */ |
6630 | ||
6631 | static tree | |
6632 | check_array_bounds (tree *tp, int *walk_subtree, void *data) | |
6633 | { | |
6634 | tree t = *tp; | |
726a989a | 6635 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; |
c2255bc4 AH |
6636 | location_t location; |
6637 | ||
6638 | if (EXPR_HAS_LOCATION (t)) | |
6639 | location = EXPR_LOCATION (t); | |
6640 | else | |
6641 | { | |
6642 | location_t *locp = (location_t *) wi->info; | |
6643 | location = *locp; | |
6644 | } | |
88df9da1 | 6645 | |
590b1f2d DM |
6646 | *walk_subtree = TRUE; |
6647 | ||
6648 | if (TREE_CODE (t) == ARRAY_REF) | |
c2255bc4 | 6649 | check_array_ref (location, t, false /*ignore_off_by_one*/); |
1eb7b049 | 6650 | |
f8269ad4 RB |
6651 | else if (TREE_CODE (t) == ADDR_EXPR) |
6652 | { | |
6653 | search_for_addr_array (t, location); | |
6654 | *walk_subtree = FALSE; | |
6655 | } | |
05fb69e4 | 6656 | |
590b1f2d DM |
6657 | return NULL_TREE; |
6658 | } | |
6659 | ||
6660 | /* Walk over all statements of all reachable BBs and call check_array_bounds | |
6661 | on them. */ | |
6662 | ||
6663 | static void | |
6664 | check_all_array_refs (void) | |
6665 | { | |
6666 | basic_block bb; | |
726a989a | 6667 | gimple_stmt_iterator si; |
590b1f2d | 6668 | |
11cd3bed | 6669 | FOR_EACH_BB_FN (bb, cfun) |
590b1f2d | 6670 | { |
1d86f5e9 RG |
6671 | edge_iterator ei; |
6672 | edge e; | |
6673 | bool executable = false; | |
92ef7fb1 | 6674 | |
1d86f5e9 RG |
6675 | /* Skip blocks that were found to be unreachable. */ |
6676 | FOR_EACH_EDGE (e, ei, bb->preds) | |
6677 | executable |= !!(e->flags & EDGE_EXECUTABLE); | |
6678 | if (!executable) | |
6679 | continue; | |
590b1f2d | 6680 | |
726a989a RB |
6681 | for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) |
6682 | { | |
355fe088 | 6683 | gimple *stmt = gsi_stmt (si); |
726a989a | 6684 | struct walk_stmt_info wi; |
f8269ad4 RB |
6685 | if (!gimple_has_location (stmt) |
6686 | || is_gimple_debug (stmt)) | |
726a989a RB |
6687 | continue; |
6688 | ||
f8269ad4 | 6689 | memset (&wi, 0, sizeof (wi)); |
b2b91e85 TS |
6690 | |
6691 | location_t loc = gimple_location (stmt); | |
6692 | wi.info = &loc; | |
726a989a | 6693 | |
f8269ad4 RB |
6694 | walk_gimple_op (gsi_stmt (si), |
6695 | check_array_bounds, | |
6696 | &wi); | |
726a989a | 6697 | } |
590b1f2d DM |
6698 | } |
6699 | } | |
0bca51f0 | 6700 | |
d8202b84 JJ |
6701 | /* Return true if all imm uses of VAR are either in STMT, or |
6702 | feed (optionally through a chain of single imm uses) GIMPLE_COND | |
6703 | in basic block COND_BB. */ | |
6704 | ||
6705 | static bool | |
355fe088 | 6706 | all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb) |
d8202b84 JJ |
6707 | { |
6708 | use_operand_p use_p, use2_p; | |
6709 | imm_use_iterator iter; | |
6710 | ||
6711 | FOR_EACH_IMM_USE_FAST (use_p, iter, var) | |
6712 | if (USE_STMT (use_p) != stmt) | |
6713 | { | |
355fe088 | 6714 | gimple *use_stmt = USE_STMT (use_p), *use_stmt2; |
d8202b84 JJ |
6715 | if (is_gimple_debug (use_stmt)) |
6716 | continue; | |
6717 | while (is_gimple_assign (use_stmt) | |
7e8c8abc | 6718 | && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME |
d8202b84 | 6719 | && single_imm_use (gimple_assign_lhs (use_stmt), |
7e8c8abc JJ |
6720 | &use2_p, &use_stmt2)) |
6721 | use_stmt = use_stmt2; | |
d8202b84 JJ |
6722 | if (gimple_code (use_stmt) != GIMPLE_COND |
6723 | || gimple_bb (use_stmt) != cond_bb) | |
6724 | return false; | |
6725 | } | |
6726 | return true; | |
6727 | } | |
6728 | ||
1e99c6e0 JJ |
6729 | /* Handle |
6730 | _4 = x_3 & 31; | |
6731 | if (_4 != 0) | |
6732 | goto <bb 6>; | |
6733 | else | |
6734 | goto <bb 7>; | |
6735 | <bb 6>: | |
6736 | __builtin_unreachable (); | |
6737 | <bb 7>: | |
6738 | x_5 = ASSERT_EXPR <x_3, ...>; | |
6739 | If x_3 has no other immediate uses (checked by caller), | |
6740 | var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits | |
6741 | from the non-zero bitmask. */ | |
6742 | ||
6743 | static void | |
6744 | maybe_set_nonzero_bits (basic_block bb, tree var) | |
6745 | { | |
6746 | edge e = single_pred_edge (bb); | |
6747 | basic_block cond_bb = e->src; | |
355fe088 | 6748 | gimple *stmt = last_stmt (cond_bb); |
1e99c6e0 JJ |
6749 | tree cst; |
6750 | ||
6751 | if (stmt == NULL | |
6752 | || gimple_code (stmt) != GIMPLE_COND | |
6753 | || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE) | |
6754 | ? EQ_EXPR : NE_EXPR) | |
6755 | || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME | |
6756 | || !integer_zerop (gimple_cond_rhs (stmt))) | |
6757 | return; | |
6758 | ||
6759 | stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt)); | |
6760 | if (!is_gimple_assign (stmt) | |
6761 | || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR | |
6762 | || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST) | |
6763 | return; | |
6764 | if (gimple_assign_rhs1 (stmt) != var) | |
6765 | { | |
355fe088 | 6766 | gimple *stmt2; |
1e99c6e0 JJ |
6767 | |
6768 | if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) | |
6769 | return; | |
6770 | stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
6771 | if (!gimple_assign_cast_p (stmt2) | |
6772 | || gimple_assign_rhs1 (stmt2) != var | |
6773 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2)) | |
6774 | || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))) | |
6775 | != TYPE_PRECISION (TREE_TYPE (var)))) | |
6776 | return; | |
6777 | } | |
6778 | cst = gimple_assign_rhs2 (stmt); | |
807e902e | 6779 | set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst)); |
1e99c6e0 JJ |
6780 | } |
6781 | ||
94908762 JL |
6782 | /* Convert range assertion expressions into the implied copies and |
6783 | copy propagate away the copies. Doing the trivial copy propagation | |
6784 | here avoids the need to run the full copy propagation pass after | |
b8698a0f L |
6785 | VRP. |
6786 | ||
227858d1 DN |
6787 | FIXME, this will eventually lead to copy propagation removing the |
6788 | names that had useful range information attached to them. For | |
6789 | instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, | |
6790 | then N_i will have the range [3, +INF]. | |
b8698a0f | 6791 | |
227858d1 DN |
6792 | However, by converting the assertion into the implied copy |
6793 | operation N_i = N_j, we will then copy-propagate N_j into the uses | |
6794 | of N_i and lose the range information. We may want to hold on to | |
6795 | ASSERT_EXPRs a little while longer as the ranges could be used in | |
6796 | things like jump threading. | |
b8698a0f | 6797 | |
227858d1 | 6798 | The problem with keeping ASSERT_EXPRs around is that passes after |
b8698a0f | 6799 | VRP need to handle them appropriately. |
94908762 JL |
6800 | |
6801 | Another approach would be to make the range information a first | |
6802 | class property of the SSA_NAME so that it can be queried from | |
6803 | any pass. This is made somewhat more complex by the need for | |
6804 | multiple ranges to be associated with one SSA_NAME. */ | |
0bca51f0 DN |
6805 | |
6806 | static void | |
6807 | remove_range_assertions (void) | |
6808 | { | |
6809 | basic_block bb; | |
726a989a | 6810 | gimple_stmt_iterator si; |
d8202b84 JJ |
6811 | /* 1 if looking at ASSERT_EXPRs immediately at the beginning of |
6812 | a basic block preceeded by GIMPLE_COND branching to it and | |
6813 | __builtin_trap, -1 if not yet checked, 0 otherwise. */ | |
6814 | int is_unreachable; | |
0bca51f0 | 6815 | |
94908762 JL |
6816 | /* Note that the BSI iterator bump happens at the bottom of the |
6817 | loop and no bump is necessary if we're removing the statement | |
6818 | referenced by the current BSI. */ | |
11cd3bed | 6819 | FOR_EACH_BB_FN (bb, cfun) |
d8202b84 | 6820 | for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);) |
0bca51f0 | 6821 | { |
355fe088 TS |
6822 | gimple *stmt = gsi_stmt (si); |
6823 | gimple *use_stmt; | |
0bca51f0 | 6824 | |
726a989a RB |
6825 | if (is_gimple_assign (stmt) |
6826 | && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) | |
0bca51f0 | 6827 | { |
d8202b84 | 6828 | tree lhs = gimple_assign_lhs (stmt); |
726a989a RB |
6829 | tree rhs = gimple_assign_rhs1 (stmt); |
6830 | tree var; | |
94908762 JL |
6831 | use_operand_p use_p; |
6832 | imm_use_iterator iter; | |
6833 | ||
701b8964 | 6834 | var = ASSERT_EXPR_VAR (rhs); |
d8202b84 JJ |
6835 | gcc_assert (TREE_CODE (var) == SSA_NAME); |
6836 | ||
6837 | if (!POINTER_TYPE_P (TREE_TYPE (lhs)) | |
6838 | && SSA_NAME_RANGE_INFO (lhs)) | |
6839 | { | |
6840 | if (is_unreachable == -1) | |
6841 | { | |
6842 | is_unreachable = 0; | |
6843 | if (single_pred_p (bb) | |
6844 | && assert_unreachable_fallthru_edge_p | |
6845 | (single_pred_edge (bb))) | |
6846 | is_unreachable = 1; | |
6847 | } | |
6848 | /* Handle | |
6849 | if (x_7 >= 10 && x_7 < 20) | |
6850 | __builtin_unreachable (); | |
6851 | x_8 = ASSERT_EXPR <x_7, ...>; | |
6852 | if the only uses of x_7 are in the ASSERT_EXPR and | |
6853 | in the condition. In that case, we can copy the | |
6854 | range info from x_8 computed in this pass also | |
6855 | for x_7. */ | |
6856 | if (is_unreachable | |
6857 | && all_imm_uses_in_stmt_or_feed_cond (var, stmt, | |
6858 | single_pred (bb))) | |
1e99c6e0 | 6859 | { |
f5c8b24c | 6860 | set_range_info (var, SSA_NAME_RANGE_TYPE (lhs), |
807e902e KZ |
6861 | SSA_NAME_RANGE_INFO (lhs)->get_min (), |
6862 | SSA_NAME_RANGE_INFO (lhs)->get_max ()); | |
1e99c6e0 JJ |
6863 | maybe_set_nonzero_bits (bb, var); |
6864 | } | |
d8202b84 JJ |
6865 | } |
6866 | ||
6867 | /* Propagate the RHS into every use of the LHS. */ | |
6868 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) | |
6c00f606 | 6869 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
d8202b84 | 6870 | SET_USE (use_p, var); |
94908762 JL |
6871 | |
6872 | /* And finally, remove the copy, it is not needed. */ | |
726a989a | 6873 | gsi_remove (&si, true); |
b8698a0f | 6874 | release_defs (stmt); |
0bca51f0 | 6875 | } |
94908762 | 6876 | else |
d8202b84 | 6877 | { |
82bb9245 AM |
6878 | if (!is_gimple_debug (gsi_stmt (si))) |
6879 | is_unreachable = 0; | |
d8202b84 | 6880 | gsi_next (&si); |
d8202b84 | 6881 | } |
0bca51f0 DN |
6882 | } |
6883 | } | |
6884 | ||
6885 | ||
6886 | /* Return true if STMT is interesting for VRP. */ | |
6887 | ||
6888 | static bool | |
355fe088 | 6889 | stmt_interesting_for_vrp (gimple *stmt) |
0bca51f0 | 6890 | { |
ea057359 RG |
6891 | if (gimple_code (stmt) == GIMPLE_PHI) |
6892 | { | |
6893 | tree res = gimple_phi_result (stmt); | |
6894 | return (!virtual_operand_p (res) | |
6895 | && (INTEGRAL_TYPE_P (TREE_TYPE (res)) | |
6896 | || POINTER_TYPE_P (TREE_TYPE (res)))); | |
6897 | } | |
726a989a | 6898 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) |
0bca51f0 | 6899 | { |
726a989a | 6900 | tree lhs = gimple_get_lhs (stmt); |
0bca51f0 | 6901 | |
2bbec6d9 JL |
6902 | /* In general, assignments with virtual operands are not useful |
6903 | for deriving ranges, with the obvious exception of calls to | |
6904 | builtin functions. */ | |
726a989a | 6905 | if (lhs && TREE_CODE (lhs) == SSA_NAME |
0bca51f0 DN |
6906 | && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
6907 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
826cacfe | 6908 | && (is_gimple_call (stmt) |
5006671f | 6909 | || !gimple_vuse (stmt))) |
0bca51f0 | 6910 | return true; |
09877e13 JJ |
6911 | else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) |
6912 | switch (gimple_call_internal_fn (stmt)) | |
6913 | { | |
6914 | case IFN_ADD_OVERFLOW: | |
6915 | case IFN_SUB_OVERFLOW: | |
6916 | case IFN_MUL_OVERFLOW: | |
6917 | /* These internal calls return _Complex integer type, | |
6918 | but are interesting to VRP nevertheless. */ | |
6919 | if (lhs && TREE_CODE (lhs) == SSA_NAME) | |
6920 | return true; | |
6921 | break; | |
6922 | default: | |
6923 | break; | |
6924 | } | |
0bca51f0 | 6925 | } |
726a989a RB |
6926 | else if (gimple_code (stmt) == GIMPLE_COND |
6927 | || gimple_code (stmt) == GIMPLE_SWITCH) | |
0bca51f0 DN |
6928 | return true; |
6929 | ||
6930 | return false; | |
6931 | } | |
6932 | ||
6933 | ||
87e71ff4 | 6934 | /* Initialize local data structures for VRP. */ |
0bca51f0 | 6935 | |
227858d1 | 6936 | static void |
0bca51f0 DN |
6937 | vrp_initialize (void) |
6938 | { | |
6939 | basic_block bb; | |
0bca51f0 | 6940 | |
d9256277 RG |
6941 | values_propagated = false; |
6942 | num_vr_values = num_ssa_names; | |
526ceb68 | 6943 | vr_value = XCNEWVEC (value_range *, num_vr_values); |
fc6827fe | 6944 | vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); |
0bca51f0 | 6945 | |
11cd3bed | 6946 | FOR_EACH_BB_FN (bb, cfun) |
0bca51f0 | 6947 | { |
538dd0b7 DM |
6948 | for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); |
6949 | gsi_next (&si)) | |
0bca51f0 | 6950 | { |
538dd0b7 | 6951 | gphi *phi = si.phi (); |
0bca51f0 DN |
6952 | if (!stmt_interesting_for_vrp (phi)) |
6953 | { | |
6954 | tree lhs = PHI_RESULT (phi); | |
b565d777 | 6955 | set_value_range_to_varying (get_value_range (lhs)); |
726a989a | 6956 | prop_set_simulate_again (phi, false); |
0bca51f0 DN |
6957 | } |
6958 | else | |
726a989a | 6959 | prop_set_simulate_again (phi, true); |
0bca51f0 DN |
6960 | } |
6961 | ||
538dd0b7 DM |
6962 | for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); |
6963 | gsi_next (&si)) | |
0bca51f0 | 6964 | { |
355fe088 | 6965 | gimple *stmt = gsi_stmt (si); |
0bca51f0 | 6966 | |
cd6ea7a2 RH |
6967 | /* If the statement is a control insn, then we do not |
6968 | want to avoid simulating the statement once. Failure | |
6969 | to do so means that those edges will never get added. */ | |
6970 | if (stmt_ends_bb_p (stmt)) | |
6971 | prop_set_simulate_again (stmt, true); | |
6972 | else if (!stmt_interesting_for_vrp (stmt)) | |
0bca51f0 DN |
6973 | { |
6974 | ssa_op_iter i; | |
6975 | tree def; | |
6976 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) | |
b565d777 | 6977 | set_value_range_to_varying (get_value_range (def)); |
726a989a | 6978 | prop_set_simulate_again (stmt, false); |
0bca51f0 DN |
6979 | } |
6980 | else | |
cd6ea7a2 | 6981 | prop_set_simulate_again (stmt, true); |
0bca51f0 DN |
6982 | } |
6983 | } | |
0bca51f0 DN |
6984 | } |
6985 | ||
cfef45c8 RG |
6986 | /* Return the singleton value-range for NAME or NAME. */ |
6987 | ||
6988 | static inline tree | |
6989 | vrp_valueize (tree name) | |
6990 | { | |
6991 | if (TREE_CODE (name) == SSA_NAME) | |
6992 | { | |
526ceb68 | 6993 | value_range *vr = get_value_range (name); |
cfef45c8 RG |
6994 | if (vr->type == VR_RANGE |
6995 | && (vr->min == vr->max | |
6996 | || operand_equal_p (vr->min, vr->max, 0))) | |
6997 | return vr->min; | |
6998 | } | |
6999 | return name; | |
7000 | } | |
0bca51f0 | 7001 | |
d2a85801 RB |
7002 | /* Return the singleton value-range for NAME if that is a constant |
7003 | but signal to not follow SSA edges. */ | |
7004 | ||
7005 | static inline tree | |
7006 | vrp_valueize_1 (tree name) | |
7007 | { | |
7008 | if (TREE_CODE (name) == SSA_NAME) | |
7009 | { | |
d2a85801 RB |
7010 | /* If the definition may be simulated again we cannot follow |
7011 | this SSA edge as the SSA propagator does not necessarily | |
7012 | re-visit the use. */ | |
355fe088 | 7013 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
7dd1f7ac RB |
7014 | if (!gimple_nop_p (def_stmt) |
7015 | && prop_simulate_again_p (def_stmt)) | |
d2a85801 | 7016 | return NULL_TREE; |
526ceb68 | 7017 | value_range *vr = get_value_range (name); |
d94e3e75 RB |
7018 | if (range_int_cst_singleton_p (vr)) |
7019 | return vr->min; | |
d2a85801 RB |
7020 | } |
7021 | return name; | |
7022 | } | |
7023 | ||
0bca51f0 DN |
7024 | /* Visit assignment STMT. If it produces an interesting range, record |
7025 | the SSA name in *OUTPUT_P. */ | |
7026 | ||
7027 | static enum ssa_prop_result | |
355fe088 | 7028 | vrp_visit_assignment_or_call (gimple *stmt, tree *output_p) |
0bca51f0 | 7029 | { |
726a989a | 7030 | tree def, lhs; |
0bca51f0 | 7031 | ssa_op_iter iter; |
726a989a RB |
7032 | enum gimple_code code = gimple_code (stmt); |
7033 | lhs = gimple_get_lhs (stmt); | |
0bca51f0 DN |
7034 | |
7035 | /* We only keep track of ranges in integral and pointer types. */ | |
7036 | if (TREE_CODE (lhs) == SSA_NAME | |
e260a614 JL |
7037 | && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
7038 | /* It is valid to have NULL MIN/MAX values on a type. See | |
7039 | build_range_type. */ | |
7040 | && TYPE_MIN_VALUE (TREE_TYPE (lhs)) | |
7041 | && TYPE_MAX_VALUE (TREE_TYPE (lhs))) | |
0bca51f0 DN |
7042 | || POINTER_TYPE_P (TREE_TYPE (lhs)))) |
7043 | { | |
526ceb68 | 7044 | value_range new_vr = VR_INITIALIZER; |
227858d1 | 7045 | |
cfef45c8 | 7046 | /* Try folding the statement to a constant first. */ |
d2a85801 RB |
7047 | tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize, |
7048 | vrp_valueize_1); | |
7049 | if (tem && is_gimple_min_invariant (tem)) | |
635bfae0 | 7050 | set_value_range_to_value (&new_vr, tem, NULL); |
cfef45c8 RG |
7051 | /* Then dispatch to value-range extracting functions. */ |
7052 | else if (code == GIMPLE_CALL) | |
726a989a RB |
7053 | extract_range_basic (&new_vr, stmt); |
7054 | else | |
538dd0b7 | 7055 | extract_range_from_assignment (&new_vr, as_a <gassign *> (stmt)); |
0bca51f0 | 7056 | |
227858d1 | 7057 | if (update_value_range (lhs, &new_vr)) |
0bca51f0 DN |
7058 | { |
7059 | *output_p = lhs; | |
7060 | ||
7061 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7062 | { | |
227858d1 | 7063 | fprintf (dump_file, "Found new range for "); |
0bca51f0 | 7064 | print_generic_expr (dump_file, lhs, 0); |
227858d1 DN |
7065 | fprintf (dump_file, ": "); |
7066 | dump_value_range (dump_file, &new_vr); | |
6e5799b9 | 7067 | fprintf (dump_file, "\n"); |
0bca51f0 DN |
7068 | } |
7069 | ||
7070 | if (new_vr.type == VR_VARYING) | |
7071 | return SSA_PROP_VARYING; | |
7072 | ||
7073 | return SSA_PROP_INTERESTING; | |
7074 | } | |
7075 | ||
7076 | return SSA_PROP_NOT_INTERESTING; | |
7077 | } | |
09877e13 JJ |
7078 | else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)) |
7079 | switch (gimple_call_internal_fn (stmt)) | |
7080 | { | |
7081 | case IFN_ADD_OVERFLOW: | |
7082 | case IFN_SUB_OVERFLOW: | |
7083 | case IFN_MUL_OVERFLOW: | |
7084 | /* These internal calls return _Complex integer type, | |
7085 | which VRP does not track, but the immediate uses | |
7086 | thereof might be interesting. */ | |
7087 | if (lhs && TREE_CODE (lhs) == SSA_NAME) | |
7088 | { | |
7089 | imm_use_iterator iter; | |
7090 | use_operand_p use_p; | |
7091 | enum ssa_prop_result res = SSA_PROP_VARYING; | |
7092 | ||
7093 | set_value_range_to_varying (get_value_range (lhs)); | |
7094 | ||
7095 | FOR_EACH_IMM_USE_FAST (use_p, iter, lhs) | |
7096 | { | |
355fe088 | 7097 | gimple *use_stmt = USE_STMT (use_p); |
09877e13 JJ |
7098 | if (!is_gimple_assign (use_stmt)) |
7099 | continue; | |
7100 | enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt); | |
7101 | if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR) | |
7102 | continue; | |
7103 | tree rhs1 = gimple_assign_rhs1 (use_stmt); | |
7104 | tree use_lhs = gimple_assign_lhs (use_stmt); | |
7105 | if (TREE_CODE (rhs1) != rhs_code | |
7106 | || TREE_OPERAND (rhs1, 0) != lhs | |
7107 | || TREE_CODE (use_lhs) != SSA_NAME | |
7108 | || !stmt_interesting_for_vrp (use_stmt) | |
7109 | || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs)) | |
7110 | || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs)) | |
7111 | || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs)))) | |
7112 | continue; | |
7113 | ||
7114 | /* If there is a change in the value range for any of the | |
7115 | REALPART_EXPR/IMAGPART_EXPR immediate uses, return | |
7116 | SSA_PROP_INTERESTING. If there are any REALPART_EXPR | |
7117 | or IMAGPART_EXPR immediate uses, but none of them have | |
7118 | a change in their value ranges, return | |
7119 | SSA_PROP_NOT_INTERESTING. If there are no | |
7120 | {REAL,IMAG}PART_EXPR uses at all, | |
7121 | return SSA_PROP_VARYING. */ | |
526ceb68 | 7122 | value_range new_vr = VR_INITIALIZER; |
09877e13 | 7123 | extract_range_basic (&new_vr, use_stmt); |
526ceb68 | 7124 | value_range *old_vr = get_value_range (use_lhs); |
09877e13 JJ |
7125 | if (old_vr->type != new_vr.type |
7126 | || !vrp_operand_equal_p (old_vr->min, new_vr.min) | |
7127 | || !vrp_operand_equal_p (old_vr->max, new_vr.max) | |
7128 | || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv)) | |
7129 | res = SSA_PROP_INTERESTING; | |
7130 | else | |
7131 | res = SSA_PROP_NOT_INTERESTING; | |
7132 | BITMAP_FREE (new_vr.equiv); | |
7133 | if (res == SSA_PROP_INTERESTING) | |
7134 | { | |
7135 | *output_p = lhs; | |
7136 | return res; | |
7137 | } | |
7138 | } | |
7139 | ||
7140 | return res; | |
7141 | } | |
7142 | break; | |
7143 | default: | |
7144 | break; | |
7145 | } | |
b8698a0f | 7146 | |
227858d1 | 7147 | /* Every other statement produces no useful ranges. */ |
0bca51f0 | 7148 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) |
b565d777 | 7149 | set_value_range_to_varying (get_value_range (def)); |
0bca51f0 DN |
7150 | |
7151 | return SSA_PROP_VARYING; | |
7152 | } | |
7153 | ||
f5052e29 | 7154 | /* Helper that gets the value range of the SSA_NAME with version I |
c80b4100 | 7155 | or a symbolic range containing the SSA_NAME only if the value range |
f5052e29 RG |
7156 | is varying or undefined. */ |
7157 | ||
526ceb68 | 7158 | static inline value_range |
f5052e29 RG |
7159 | get_vr_for_comparison (int i) |
7160 | { | |
526ceb68 | 7161 | value_range vr = *get_value_range (ssa_name (i)); |
f5052e29 RG |
7162 | |
7163 | /* If name N_i does not have a valid range, use N_i as its own | |
7164 | range. This allows us to compare against names that may | |
7165 | have N_i in their ranges. */ | |
7166 | if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) | |
7167 | { | |
7168 | vr.type = VR_RANGE; | |
7169 | vr.min = ssa_name (i); | |
7170 | vr.max = ssa_name (i); | |
7171 | } | |
7172 | ||
7173 | return vr; | |
7174 | } | |
0bca51f0 | 7175 | |
227858d1 DN |
7176 | /* Compare all the value ranges for names equivalent to VAR with VAL |
7177 | using comparison code COMP. Return the same value returned by | |
12df8a7e ILT |
7178 | compare_range_with_value, including the setting of |
7179 | *STRICT_OVERFLOW_P. */ | |
227858d1 DN |
7180 | |
7181 | static tree | |
12df8a7e ILT |
7182 | compare_name_with_value (enum tree_code comp, tree var, tree val, |
7183 | bool *strict_overflow_p) | |
227858d1 DN |
7184 | { |
7185 | bitmap_iterator bi; | |
7186 | unsigned i; | |
7187 | bitmap e; | |
7188 | tree retval, t; | |
12df8a7e | 7189 | int used_strict_overflow; |
f5052e29 | 7190 | bool sop; |
526ceb68 | 7191 | value_range equiv_vr; |
227858d1 DN |
7192 | |
7193 | /* Get the set of equivalences for VAR. */ | |
7194 | e = get_value_range (var)->equiv; | |
7195 | ||
12df8a7e ILT |
7196 | /* Start at -1. Set it to 0 if we do a comparison without relying |
7197 | on overflow, or 1 if all comparisons rely on overflow. */ | |
7198 | used_strict_overflow = -1; | |
7199 | ||
f5052e29 RG |
7200 | /* Compare vars' value range with val. */ |
7201 | equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); | |
7202 | sop = false; | |
7203 | retval = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
e07e405d ILT |
7204 | if (retval) |
7205 | used_strict_overflow = sop ? 1 : 0; | |
227858d1 | 7206 | |
f5052e29 RG |
7207 | /* If the equiv set is empty we have done all work we need to do. */ |
7208 | if (e == NULL) | |
7209 | { | |
7210 | if (retval | |
7211 | && used_strict_overflow > 0) | |
7212 | *strict_overflow_p = true; | |
7213 | return retval; | |
7214 | } | |
227858d1 | 7215 | |
f5052e29 RG |
7216 | EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) |
7217 | { | |
7218 | equiv_vr = get_vr_for_comparison (i); | |
12df8a7e ILT |
7219 | sop = false; |
7220 | t = compare_range_with_value (comp, &equiv_vr, val, &sop); | |
227858d1 DN |
7221 | if (t) |
7222 | { | |
96644aba RG |
7223 | /* If we get different answers from different members |
7224 | of the equivalence set this check must be in a dead | |
7225 | code region. Folding it to a trap representation | |
7226 | would be correct here. For now just return don't-know. */ | |
7227 | if (retval != NULL | |
7228 | && t != retval) | |
7229 | { | |
7230 | retval = NULL_TREE; | |
7231 | break; | |
7232 | } | |
227858d1 | 7233 | retval = t; |
12df8a7e ILT |
7234 | |
7235 | if (!sop) | |
7236 | used_strict_overflow = 0; | |
7237 | else if (used_strict_overflow < 0) | |
7238 | used_strict_overflow = 1; | |
227858d1 DN |
7239 | } |
7240 | } | |
7241 | ||
f5052e29 RG |
7242 | if (retval |
7243 | && used_strict_overflow > 0) | |
7244 | *strict_overflow_p = true; | |
227858d1 | 7245 | |
f5052e29 | 7246 | return retval; |
227858d1 DN |
7247 | } |
7248 | ||
7249 | ||
7250 | /* Given a comparison code COMP and names N1 and N2, compare all the | |
8ab5f5c9 | 7251 | ranges equivalent to N1 against all the ranges equivalent to N2 |
227858d1 | 7252 | to determine the value of N1 COMP N2. Return the same value |
12df8a7e ILT |
7253 | returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate |
7254 | whether we relied on an overflow infinity in the comparison. */ | |
7255 | ||
0bca51f0 DN |
7256 | |
7257 | static tree | |
12df8a7e ILT |
7258 | compare_names (enum tree_code comp, tree n1, tree n2, |
7259 | bool *strict_overflow_p) | |
227858d1 DN |
7260 | { |
7261 | tree t, retval; | |
7262 | bitmap e1, e2; | |
7263 | bitmap_iterator bi1, bi2; | |
7264 | unsigned i1, i2; | |
12df8a7e | 7265 | int used_strict_overflow; |
f5052e29 RG |
7266 | static bitmap_obstack *s_obstack = NULL; |
7267 | static bitmap s_e1 = NULL, s_e2 = NULL; | |
227858d1 DN |
7268 | |
7269 | /* Compare the ranges of every name equivalent to N1 against the | |
7270 | ranges of every name equivalent to N2. */ | |
7271 | e1 = get_value_range (n1)->equiv; | |
7272 | e2 = get_value_range (n2)->equiv; | |
7273 | ||
f5052e29 RG |
7274 | /* Use the fake bitmaps if e1 or e2 are not available. */ |
7275 | if (s_obstack == NULL) | |
7276 | { | |
7277 | s_obstack = XNEW (bitmap_obstack); | |
7278 | bitmap_obstack_initialize (s_obstack); | |
7279 | s_e1 = BITMAP_ALLOC (s_obstack); | |
7280 | s_e2 = BITMAP_ALLOC (s_obstack); | |
7281 | } | |
7282 | if (e1 == NULL) | |
7283 | e1 = s_e1; | |
7284 | if (e2 == NULL) | |
7285 | e2 = s_e2; | |
7286 | ||
227858d1 DN |
7287 | /* Add N1 and N2 to their own set of equivalences to avoid |
7288 | duplicating the body of the loop just to check N1 and N2 | |
7289 | ranges. */ | |
7290 | bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); | |
7291 | bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); | |
7292 | ||
7293 | /* If the equivalence sets have a common intersection, then the two | |
7294 | names can be compared without checking their ranges. */ | |
7295 | if (bitmap_intersect_p (e1, e2)) | |
7296 | { | |
7297 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7298 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7299 | ||
7300 | return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) | |
7301 | ? boolean_true_node | |
7302 | : boolean_false_node; | |
7303 | } | |
7304 | ||
12df8a7e ILT |
7305 | /* Start at -1. Set it to 0 if we do a comparison without relying |
7306 | on overflow, or 1 if all comparisons rely on overflow. */ | |
7307 | used_strict_overflow = -1; | |
7308 | ||
227858d1 DN |
7309 | /* Otherwise, compare all the equivalent ranges. First, add N1 and |
7310 | N2 to their own set of equivalences to avoid duplicating the body | |
7311 | of the loop just to check N1 and N2 ranges. */ | |
7312 | EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) | |
7313 | { | |
526ceb68 | 7314 | value_range vr1 = get_vr_for_comparison (i1); |
227858d1 DN |
7315 | |
7316 | t = retval = NULL_TREE; | |
7317 | EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) | |
7318 | { | |
3b7bab4d | 7319 | bool sop = false; |
12df8a7e | 7320 | |
526ceb68 | 7321 | value_range vr2 = get_vr_for_comparison (i2); |
227858d1 | 7322 | |
12df8a7e | 7323 | t = compare_ranges (comp, &vr1, &vr2, &sop); |
227858d1 DN |
7324 | if (t) |
7325 | { | |
96644aba RG |
7326 | /* If we get different answers from different members |
7327 | of the equivalence set this check must be in a dead | |
7328 | code region. Folding it to a trap representation | |
7329 | would be correct here. For now just return don't-know. */ | |
7330 | if (retval != NULL | |
7331 | && t != retval) | |
7332 | { | |
7333 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7334 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7335 | return NULL_TREE; | |
7336 | } | |
227858d1 | 7337 | retval = t; |
12df8a7e ILT |
7338 | |
7339 | if (!sop) | |
7340 | used_strict_overflow = 0; | |
7341 | else if (used_strict_overflow < 0) | |
7342 | used_strict_overflow = 1; | |
227858d1 DN |
7343 | } |
7344 | } | |
7345 | ||
7346 | if (retval) | |
7347 | { | |
7348 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7349 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
12df8a7e ILT |
7350 | if (used_strict_overflow > 0) |
7351 | *strict_overflow_p = true; | |
227858d1 DN |
7352 | return retval; |
7353 | } | |
7354 | } | |
7355 | ||
7356 | /* None of the equivalent ranges are useful in computing this | |
7357 | comparison. */ | |
7358 | bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); | |
7359 | bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); | |
7360 | return NULL_TREE; | |
7361 | } | |
7362 | ||
da7db2ce NS |
7363 | /* Helper function for vrp_evaluate_conditional_warnv & other |
7364 | optimizers. */ | |
6b99f156 JH |
7365 | |
7366 | static tree | |
7367 | vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, | |
7368 | tree op0, tree op1, | |
7369 | bool * strict_overflow_p) | |
7370 | { | |
526ceb68 | 7371 | value_range *vr0, *vr1; |
6b99f156 JH |
7372 | |
7373 | vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; | |
7374 | vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; | |
7375 | ||
597f5997 | 7376 | tree res = NULL_TREE; |
6b99f156 | 7377 | if (vr0 && vr1) |
597f5997 RB |
7378 | res = compare_ranges (code, vr0, vr1, strict_overflow_p); |
7379 | if (!res && vr0) | |
7380 | res = compare_range_with_value (code, vr0, op1, strict_overflow_p); | |
7381 | if (!res && vr1) | |
7382 | res = (compare_range_with_value | |
6b99f156 | 7383 | (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); |
597f5997 | 7384 | return res; |
6b99f156 JH |
7385 | } |
7386 | ||
2d3cd5d5 RAE |
7387 | /* Helper function for vrp_evaluate_conditional_warnv. */ |
7388 | ||
7389 | static tree | |
7390 | vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, | |
7391 | tree op1, bool use_equiv_p, | |
6b99f156 | 7392 | bool *strict_overflow_p, bool *only_ranges) |
2d3cd5d5 | 7393 | { |
6b99f156 JH |
7394 | tree ret; |
7395 | if (only_ranges) | |
7396 | *only_ranges = true; | |
7397 | ||
2d3cd5d5 RAE |
7398 | /* We only deal with integral and pointer types. */ |
7399 | if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
7400 | && !POINTER_TYPE_P (TREE_TYPE (op0))) | |
7401 | return NULL_TREE; | |
7402 | ||
7403 | if (use_equiv_p) | |
7404 | { | |
6b99f156 JH |
7405 | if (only_ranges |
7406 | && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
7407 | (code, op0, op1, strict_overflow_p))) | |
7408 | return ret; | |
7409 | *only_ranges = false; | |
2d3cd5d5 | 7410 | if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) |
726a989a | 7411 | return compare_names (code, op0, op1, strict_overflow_p); |
2d3cd5d5 | 7412 | else if (TREE_CODE (op0) == SSA_NAME) |
726a989a | 7413 | return compare_name_with_value (code, op0, op1, strict_overflow_p); |
2d3cd5d5 RAE |
7414 | else if (TREE_CODE (op1) == SSA_NAME) |
7415 | return (compare_name_with_value | |
726a989a | 7416 | (swap_tree_comparison (code), op1, op0, strict_overflow_p)); |
2d3cd5d5 RAE |
7417 | } |
7418 | else | |
6b99f156 JH |
7419 | return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, |
7420 | strict_overflow_p); | |
2d3cd5d5 RAE |
7421 | return NULL_TREE; |
7422 | } | |
227858d1 | 7423 | |
e80d7580 | 7424 | /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range |
0c948c27 ILT |
7425 | information. Return NULL if the conditional can not be evaluated. |
7426 | The ranges of all the names equivalent with the operands in COND | |
7427 | will be used when trying to compute the value. If the result is | |
7428 | based on undefined signed overflow, issue a warning if | |
7429 | appropriate. */ | |
7430 | ||
ff7ffb8f | 7431 | static tree |
355fe088 | 7432 | vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt) |
0c948c27 ILT |
7433 | { |
7434 | bool sop; | |
7435 | tree ret; | |
6b99f156 | 7436 | bool only_ranges; |
0c948c27 | 7437 | |
09a782eb RG |
7438 | /* Some passes and foldings leak constants with overflow flag set |
7439 | into the IL. Avoid doing wrong things with these and bail out. */ | |
7440 | if ((TREE_CODE (op0) == INTEGER_CST | |
7441 | && TREE_OVERFLOW (op0)) | |
7442 | || (TREE_CODE (op1) == INTEGER_CST | |
7443 | && TREE_OVERFLOW (op1))) | |
7444 | return NULL_TREE; | |
7445 | ||
0c948c27 | 7446 | sop = false; |
6b99f156 JH |
7447 | ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, |
7448 | &only_ranges); | |
0c948c27 ILT |
7449 | |
7450 | if (ret && sop) | |
7451 | { | |
7452 | enum warn_strict_overflow_code wc; | |
7453 | const char* warnmsg; | |
7454 | ||
7455 | if (is_gimple_min_invariant (ret)) | |
7456 | { | |
7457 | wc = WARN_STRICT_OVERFLOW_CONDITIONAL; | |
7458 | warnmsg = G_("assuming signed overflow does not occur when " | |
7459 | "simplifying conditional to constant"); | |
7460 | } | |
7461 | else | |
7462 | { | |
7463 | wc = WARN_STRICT_OVERFLOW_COMPARISON; | |
7464 | warnmsg = G_("assuming signed overflow does not occur when " | |
7465 | "simplifying conditional"); | |
7466 | } | |
7467 | ||
7468 | if (issue_strict_overflow_warning (wc)) | |
7469 | { | |
726a989a | 7470 | location_t location; |
0c948c27 | 7471 | |
726a989a RB |
7472 | if (!gimple_has_location (stmt)) |
7473 | location = input_location; | |
0c948c27 | 7474 | else |
726a989a | 7475 | location = gimple_location (stmt); |
fab922b1 | 7476 | warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); |
0c948c27 ILT |
7477 | } |
7478 | } | |
7479 | ||
faebccf9 | 7480 | if (warn_type_limits |
6b99f156 | 7481 | && ret && only_ranges |
e80d7580 RAE |
7482 | && TREE_CODE_CLASS (code) == tcc_comparison |
7483 | && TREE_CODE (op0) == SSA_NAME) | |
faebccf9 DN |
7484 | { |
7485 | /* If the comparison is being folded and the operand on the LHS | |
7486 | is being compared against a constant value that is outside of | |
7487 | the natural range of OP0's type, then the predicate will | |
7488 | always fold regardless of the value of OP0. If -Wtype-limits | |
7489 | was specified, emit a warning. */ | |
faebccf9 | 7490 | tree type = TREE_TYPE (op0); |
526ceb68 | 7491 | value_range *vr0 = get_value_range (op0); |
faebccf9 | 7492 | |
737fde1d | 7493 | if (vr0->type == VR_RANGE |
faebccf9 DN |
7494 | && INTEGRAL_TYPE_P (type) |
7495 | && vrp_val_is_min (vr0->min) | |
7496 | && vrp_val_is_max (vr0->max) | |
7497 | && is_gimple_min_invariant (op1)) | |
faebccf9 | 7498 | { |
726a989a | 7499 | location_t location; |
faebccf9 | 7500 | |
726a989a RB |
7501 | if (!gimple_has_location (stmt)) |
7502 | location = input_location; | |
faebccf9 | 7503 | else |
726a989a | 7504 | location = gimple_location (stmt); |
faebccf9 | 7505 | |
b8698a0f | 7506 | warning_at (location, OPT_Wtype_limits, |
fab922b1 | 7507 | integer_zerop (ret) |
2349c14b MLI |
7508 | ? G_("comparison always false " |
7509 | "due to limited range of data type") | |
7510 | : G_("comparison always true " | |
7511 | "due to limited range of data type")); | |
faebccf9 DN |
7512 | } |
7513 | } | |
7514 | ||
0c948c27 ILT |
7515 | return ret; |
7516 | } | |
7517 | ||
0bca51f0 DN |
7518 | |
7519 | /* Visit conditional statement STMT. If we can determine which edge | |
7520 | will be taken out of STMT's basic block, record it in | |
7521 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
7522 | SSA_PROP_VARYING. */ | |
7523 | ||
7524 | static enum ssa_prop_result | |
538dd0b7 | 7525 | vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p) |
0bca51f0 | 7526 | { |
726a989a | 7527 | tree val; |
12df8a7e | 7528 | bool sop; |
0bca51f0 DN |
7529 | |
7530 | *taken_edge_p = NULL; | |
0bca51f0 DN |
7531 | |
7532 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7533 | { | |
7534 | tree use; | |
7535 | ssa_op_iter i; | |
7536 | ||
7537 | fprintf (dump_file, "\nVisiting conditional with predicate: "); | |
726a989a | 7538 | print_gimple_stmt (dump_file, stmt, 0, 0); |
0bca51f0 | 7539 | fprintf (dump_file, "\nWith known ranges\n"); |
b8698a0f | 7540 | |
0bca51f0 DN |
7541 | FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) |
7542 | { | |
7543 | fprintf (dump_file, "\t"); | |
7544 | print_generic_expr (dump_file, use, 0); | |
7545 | fprintf (dump_file, ": "); | |
227858d1 | 7546 | dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); |
0bca51f0 DN |
7547 | } |
7548 | ||
7549 | fprintf (dump_file, "\n"); | |
7550 | } | |
7551 | ||
7552 | /* Compute the value of the predicate COND by checking the known | |
227858d1 | 7553 | ranges of each of its operands. |
b8698a0f | 7554 | |
227858d1 DN |
7555 | Note that we cannot evaluate all the equivalent ranges here |
7556 | because those ranges may not yet be final and with the current | |
7557 | propagation strategy, we cannot determine when the value ranges | |
7558 | of the names in the equivalence set have changed. | |
7559 | ||
7560 | For instance, given the following code fragment | |
7561 | ||
7562 | i_5 = PHI <8, i_13> | |
7563 | ... | |
7564 | i_14 = ASSERT_EXPR <i_5, i_5 != 0> | |
7565 | if (i_14 == 1) | |
7566 | ... | |
7567 | ||
7568 | Assume that on the first visit to i_14, i_5 has the temporary | |
7569 | range [8, 8] because the second argument to the PHI function is | |
7570 | not yet executable. We derive the range ~[0, 0] for i_14 and the | |
7571 | equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for | |
7572 | the first time, since i_14 is equivalent to the range [8, 8], we | |
7573 | determine that the predicate is always false. | |
7574 | ||
7575 | On the next round of propagation, i_13 is determined to be | |
7576 | VARYING, which causes i_5 to drop down to VARYING. So, another | |
7577 | visit to i_14 is scheduled. In this second visit, we compute the | |
7578 | exact same range and equivalence set for i_14, namely ~[0, 0] and | |
7579 | { i_5 }. But we did not have the previous range for i_5 | |
7580 | registered, so vrp_visit_assignment thinks that the range for | |
7581 | i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' | |
7582 | is not visited again, which stops propagation from visiting | |
7583 | statements in the THEN clause of that if(). | |
7584 | ||
7585 | To properly fix this we would need to keep the previous range | |
7586 | value for the names in the equivalence set. This way we would've | |
7587 | discovered that from one visit to the other i_5 changed from | |
7588 | range [8, 8] to VR_VARYING. | |
7589 | ||
7590 | However, fixing this apparent limitation may not be worth the | |
7591 | additional checking. Testing on several code bases (GCC, DLV, | |
7592 | MICO, TRAMP3D and SPEC2000) showed that doing this results in | |
7593 | 4 more predicates folded in SPEC. */ | |
12df8a7e | 7594 | sop = false; |
e80d7580 | 7595 | |
726a989a RB |
7596 | val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), |
7597 | gimple_cond_lhs (stmt), | |
7598 | gimple_cond_rhs (stmt), | |
6b99f156 | 7599 | false, &sop, NULL); |
0bca51f0 | 7600 | if (val) |
12df8a7e ILT |
7601 | { |
7602 | if (!sop) | |
726a989a | 7603 | *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); |
12df8a7e ILT |
7604 | else |
7605 | { | |
7606 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7607 | fprintf (dump_file, | |
7608 | "\nIgnoring predicate evaluation because " | |
7609 | "it assumes that signed overflow is undefined"); | |
7610 | val = NULL_TREE; | |
7611 | } | |
7612 | } | |
0bca51f0 DN |
7613 | |
7614 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7615 | { | |
7616 | fprintf (dump_file, "\nPredicate evaluates to: "); | |
7617 | if (val == NULL_TREE) | |
7618 | fprintf (dump_file, "DON'T KNOW\n"); | |
7619 | else | |
7620 | print_generic_stmt (dump_file, val, 0); | |
7621 | } | |
7622 | ||
7623 | return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; | |
7624 | } | |
7625 | ||
b7d8d447 RAE |
7626 | /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL |
7627 | that includes the value VAL. The search is restricted to the range | |
726a989a | 7628 | [START_IDX, n - 1] where n is the size of VEC. |
0bca51f0 | 7629 | |
b7d8d447 RAE |
7630 | If there is a CASE_LABEL for VAL, its index is placed in IDX and true is |
7631 | returned. | |
7632 | ||
92ef7fb1 | 7633 | If there is no CASE_LABEL for VAL and there is one that is larger than VAL, |
b7d8d447 RAE |
7634 | it is placed in IDX and false is returned. |
7635 | ||
726a989a | 7636 | If VAL is larger than any CASE_LABEL, n is placed on IDX and false is |
b7d8d447 | 7637 | returned. */ |
8aea0bf0 RG |
7638 | |
7639 | static bool | |
538dd0b7 | 7640 | find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx) |
8aea0bf0 | 7641 | { |
726a989a | 7642 | size_t n = gimple_switch_num_labels (stmt); |
b7d8d447 RAE |
7643 | size_t low, high; |
7644 | ||
7645 | /* Find case label for minimum of the value range or the next one. | |
7646 | At each iteration we are searching in [low, high - 1]. */ | |
8aea0bf0 | 7647 | |
726a989a | 7648 | for (low = start_idx, high = n; high != low; ) |
8aea0bf0 RG |
7649 | { |
7650 | tree t; | |
7651 | int cmp; | |
726a989a | 7652 | /* Note that i != high, so we never ask for n. */ |
b7d8d447 | 7653 | size_t i = (high + low) / 2; |
726a989a | 7654 | t = gimple_switch_label (stmt, i); |
8aea0bf0 RG |
7655 | |
7656 | /* Cache the result of comparing CASE_LOW and val. */ | |
7657 | cmp = tree_int_cst_compare (CASE_LOW (t), val); | |
7658 | ||
b7d8d447 RAE |
7659 | if (cmp == 0) |
7660 | { | |
7661 | /* Ranges cannot be empty. */ | |
7662 | *idx = i; | |
7663 | return true; | |
7664 | } | |
7665 | else if (cmp > 0) | |
8aea0bf0 RG |
7666 | high = i; |
7667 | else | |
b7d8d447 RAE |
7668 | { |
7669 | low = i + 1; | |
7670 | if (CASE_HIGH (t) != NULL | |
7671 | && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) | |
8aea0bf0 RG |
7672 | { |
7673 | *idx = i; | |
7674 | return true; | |
7675 | } | |
7676 | } | |
7677 | } | |
7678 | ||
b7d8d447 | 7679 | *idx = high; |
8aea0bf0 RG |
7680 | return false; |
7681 | } | |
7682 | ||
b7d8d447 RAE |
7683 | /* Searches the case label vector VEC for the range of CASE_LABELs that is used |
7684 | for values between MIN and MAX. The first index is placed in MIN_IDX. The | |
7685 | last index is placed in MAX_IDX. If the range of CASE_LABELs is empty | |
7686 | then MAX_IDX < MIN_IDX. | |
7687 | Returns true if the default label is not needed. */ | |
7688 | ||
7689 | static bool | |
538dd0b7 | 7690 | find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx, |
726a989a | 7691 | size_t *max_idx) |
b7d8d447 RAE |
7692 | { |
7693 | size_t i, j; | |
726a989a RB |
7694 | bool min_take_default = !find_case_label_index (stmt, 1, min, &i); |
7695 | bool max_take_default = !find_case_label_index (stmt, i, max, &j); | |
b7d8d447 RAE |
7696 | |
7697 | if (i == j | |
7698 | && min_take_default | |
7699 | && max_take_default) | |
7700 | { | |
b8698a0f | 7701 | /* Only the default case label reached. |
b7d8d447 RAE |
7702 | Return an empty range. */ |
7703 | *min_idx = 1; | |
7704 | *max_idx = 0; | |
7705 | return false; | |
7706 | } | |
7707 | else | |
7708 | { | |
7709 | bool take_default = min_take_default || max_take_default; | |
7710 | tree low, high; | |
7711 | size_t k; | |
7712 | ||
7713 | if (max_take_default) | |
7714 | j--; | |
7715 | ||
7716 | /* If the case label range is continuous, we do not need | |
7717 | the default case label. Verify that. */ | |
726a989a RB |
7718 | high = CASE_LOW (gimple_switch_label (stmt, i)); |
7719 | if (CASE_HIGH (gimple_switch_label (stmt, i))) | |
7720 | high = CASE_HIGH (gimple_switch_label (stmt, i)); | |
b7d8d447 RAE |
7721 | for (k = i + 1; k <= j; ++k) |
7722 | { | |
726a989a | 7723 | low = CASE_LOW (gimple_switch_label (stmt, k)); |
d35936ab | 7724 | if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) |
b7d8d447 RAE |
7725 | { |
7726 | take_default = true; | |
7727 | break; | |
7728 | } | |
7729 | high = low; | |
726a989a RB |
7730 | if (CASE_HIGH (gimple_switch_label (stmt, k))) |
7731 | high = CASE_HIGH (gimple_switch_label (stmt, k)); | |
b7d8d447 RAE |
7732 | } |
7733 | ||
7734 | *min_idx = i; | |
7735 | *max_idx = j; | |
7736 | return !take_default; | |
7737 | } | |
7738 | } | |
7739 | ||
8bb37e9a TV |
7740 | /* Searches the case label vector VEC for the ranges of CASE_LABELs that are |
7741 | used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and | |
7742 | MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. | |
7743 | Returns true if the default label is not needed. */ | |
7744 | ||
7745 | static bool | |
526ceb68 | 7746 | find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1, |
8bb37e9a TV |
7747 | size_t *max_idx1, size_t *min_idx2, |
7748 | size_t *max_idx2) | |
7749 | { | |
7750 | size_t i, j, k, l; | |
7751 | unsigned int n = gimple_switch_num_labels (stmt); | |
7752 | bool take_default; | |
7753 | tree case_low, case_high; | |
7754 | tree min = vr->min, max = vr->max; | |
7755 | ||
7756 | gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE); | |
7757 | ||
7758 | take_default = !find_case_label_range (stmt, min, max, &i, &j); | |
7759 | ||
7760 | /* Set second range to emtpy. */ | |
7761 | *min_idx2 = 1; | |
7762 | *max_idx2 = 0; | |
7763 | ||
7764 | if (vr->type == VR_RANGE) | |
7765 | { | |
7766 | *min_idx1 = i; | |
7767 | *max_idx1 = j; | |
7768 | return !take_default; | |
7769 | } | |
7770 | ||
7771 | /* Set first range to all case labels. */ | |
7772 | *min_idx1 = 1; | |
7773 | *max_idx1 = n - 1; | |
7774 | ||
7775 | if (i > j) | |
7776 | return false; | |
7777 | ||
7778 | /* Make sure all the values of case labels [i , j] are contained in | |
7779 | range [MIN, MAX]. */ | |
7780 | case_low = CASE_LOW (gimple_switch_label (stmt, i)); | |
7781 | case_high = CASE_HIGH (gimple_switch_label (stmt, j)); | |
7782 | if (tree_int_cst_compare (case_low, min) < 0) | |
7783 | i += 1; | |
7784 | if (case_high != NULL_TREE | |
7785 | && tree_int_cst_compare (max, case_high) < 0) | |
7786 | j -= 1; | |
7787 | ||
7788 | if (i > j) | |
7789 | return false; | |
7790 | ||
7791 | /* If the range spans case labels [i, j], the corresponding anti-range spans | |
7792 | the labels [1, i - 1] and [j + 1, n - 1]. */ | |
7793 | k = j + 1; | |
7794 | l = n - 1; | |
7795 | if (k > l) | |
7796 | { | |
7797 | k = 1; | |
7798 | l = 0; | |
7799 | } | |
7800 | ||
7801 | j = i - 1; | |
7802 | i = 1; | |
7803 | if (i > j) | |
7804 | { | |
7805 | i = k; | |
7806 | j = l; | |
7807 | k = 1; | |
7808 | l = 0; | |
7809 | } | |
7810 | ||
7811 | *min_idx1 = i; | |
7812 | *max_idx1 = j; | |
7813 | *min_idx2 = k; | |
7814 | *max_idx2 = l; | |
7815 | return false; | |
7816 | } | |
7817 | ||
8aea0bf0 RG |
7818 | /* Visit switch statement STMT. If we can determine which edge |
7819 | will be taken out of STMT's basic block, record it in | |
7820 | *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return | |
7821 | SSA_PROP_VARYING. */ | |
7822 | ||
7823 | static enum ssa_prop_result | |
538dd0b7 | 7824 | vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p) |
8aea0bf0 RG |
7825 | { |
7826 | tree op, val; | |
526ceb68 | 7827 | value_range *vr; |
8bb37e9a | 7828 | size_t i = 0, j = 0, k, l; |
b7d8d447 | 7829 | bool take_default; |
8aea0bf0 RG |
7830 | |
7831 | *taken_edge_p = NULL; | |
726a989a | 7832 | op = gimple_switch_index (stmt); |
8aea0bf0 RG |
7833 | if (TREE_CODE (op) != SSA_NAME) |
7834 | return SSA_PROP_VARYING; | |
7835 | ||
7836 | vr = get_value_range (op); | |
7837 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7838 | { | |
7839 | fprintf (dump_file, "\nVisiting switch expression with operand "); | |
7840 | print_generic_expr (dump_file, op, 0); | |
7841 | fprintf (dump_file, " with known range "); | |
7842 | dump_value_range (dump_file, vr); | |
7843 | fprintf (dump_file, "\n"); | |
7844 | } | |
7845 | ||
8bb37e9a TV |
7846 | if ((vr->type != VR_RANGE |
7847 | && vr->type != VR_ANTI_RANGE) | |
8aea0bf0 RG |
7848 | || symbolic_range_p (vr)) |
7849 | return SSA_PROP_VARYING; | |
7850 | ||
7851 | /* Find the single edge that is taken from the switch expression. */ | |
8bb37e9a | 7852 | take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); |
8aea0bf0 | 7853 | |
b7d8d447 RAE |
7854 | /* Check if the range spans no CASE_LABEL. If so, we only reach the default |
7855 | label */ | |
8aea0bf0 | 7856 | if (j < i) |
b7d8d447 RAE |
7857 | { |
7858 | gcc_assert (take_default); | |
726a989a | 7859 | val = gimple_switch_default_label (stmt); |
b7d8d447 | 7860 | } |
8aea0bf0 RG |
7861 | else |
7862 | { | |
b7d8d447 RAE |
7863 | /* Check if labels with index i to j and maybe the default label |
7864 | are all reaching the same label. */ | |
7865 | ||
726a989a | 7866 | val = gimple_switch_label (stmt, i); |
b7d8d447 | 7867 | if (take_default |
726a989a RB |
7868 | && CASE_LABEL (gimple_switch_default_label (stmt)) |
7869 | != CASE_LABEL (val)) | |
8aea0bf0 RG |
7870 | { |
7871 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7872 | fprintf (dump_file, " not a single destination for this " | |
7873 | "range\n"); | |
7874 | return SSA_PROP_VARYING; | |
7875 | } | |
7876 | for (++i; i <= j; ++i) | |
7877 | { | |
726a989a | 7878 | if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) |
8aea0bf0 RG |
7879 | { |
7880 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7881 | fprintf (dump_file, " not a single destination for this " | |
7882 | "range\n"); | |
7883 | return SSA_PROP_VARYING; | |
7884 | } | |
7885 | } | |
8bb37e9a TV |
7886 | for (; k <= l; ++k) |
7887 | { | |
7888 | if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val)) | |
7889 | { | |
7890 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7891 | fprintf (dump_file, " not a single destination for this " | |
7892 | "range\n"); | |
7893 | return SSA_PROP_VARYING; | |
7894 | } | |
7895 | } | |
8aea0bf0 RG |
7896 | } |
7897 | ||
726a989a | 7898 | *taken_edge_p = find_edge (gimple_bb (stmt), |
8aea0bf0 RG |
7899 | label_to_block (CASE_LABEL (val))); |
7900 | ||
7901 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7902 | { | |
7903 | fprintf (dump_file, " will take edge to "); | |
7904 | print_generic_stmt (dump_file, CASE_LABEL (val), 0); | |
7905 | } | |
7906 | ||
7907 | return SSA_PROP_INTERESTING; | |
7908 | } | |
7909 | ||
7910 | ||
0bca51f0 DN |
7911 | /* Evaluate statement STMT. If the statement produces a useful range, |
7912 | return SSA_PROP_INTERESTING and record the SSA name with the | |
7913 | interesting range into *OUTPUT_P. | |
7914 | ||
7915 | If STMT is a conditional branch and we can determine its truth | |
7916 | value, the taken edge is recorded in *TAKEN_EDGE_P. | |
7917 | ||
7918 | If STMT produces a varying value, return SSA_PROP_VARYING. */ | |
7919 | ||
7920 | static enum ssa_prop_result | |
355fe088 | 7921 | vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p) |
0bca51f0 DN |
7922 | { |
7923 | tree def; | |
7924 | ssa_op_iter iter; | |
0bca51f0 DN |
7925 | |
7926 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
7927 | { | |
7928 | fprintf (dump_file, "\nVisiting statement:\n"); | |
726a989a | 7929 | print_gimple_stmt (dump_file, stmt, 0, dump_flags); |
0bca51f0 DN |
7930 | } |
7931 | ||
cd6ea7a2 RH |
7932 | if (!stmt_interesting_for_vrp (stmt)) |
7933 | gcc_assert (stmt_ends_bb_p (stmt)); | |
7934 | else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) | |
2284b034 | 7935 | return vrp_visit_assignment_or_call (stmt, output_p); |
726a989a | 7936 | else if (gimple_code (stmt) == GIMPLE_COND) |
538dd0b7 | 7937 | return vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p); |
726a989a | 7938 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
538dd0b7 | 7939 | return vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p); |
0bca51f0 DN |
7940 | |
7941 | /* All other statements produce nothing of interest for VRP, so mark | |
7942 | their outputs varying and prevent further simulation. */ | |
7943 | FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) | |
b565d777 | 7944 | set_value_range_to_varying (get_value_range (def)); |
0bca51f0 DN |
7945 | |
7946 | return SSA_PROP_VARYING; | |
7947 | } | |
7948 | ||
b54e19c2 RG |
7949 | /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and |
7950 | { VR1TYPE, VR0MIN, VR0MAX } and store the result | |
7951 | in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | |
7952 | possible such range. The resulting range is not canonicalized. */ | |
7953 | ||
7954 | static void | |
7955 | union_ranges (enum value_range_type *vr0type, | |
7956 | tree *vr0min, tree *vr0max, | |
7957 | enum value_range_type vr1type, | |
7958 | tree vr1min, tree vr1max) | |
7959 | { | |
7960 | bool mineq = operand_equal_p (*vr0min, vr1min, 0); | |
7961 | bool maxeq = operand_equal_p (*vr0max, vr1max, 0); | |
7962 | ||
7963 | /* [] is vr0, () is vr1 in the following classification comments. */ | |
7964 | if (mineq && maxeq) | |
7965 | { | |
7966 | /* [( )] */ | |
7967 | if (*vr0type == vr1type) | |
7968 | /* Nothing to do for equal ranges. */ | |
7969 | ; | |
7970 | else if ((*vr0type == VR_RANGE | |
7971 | && vr1type == VR_ANTI_RANGE) | |
7972 | || (*vr0type == VR_ANTI_RANGE | |
7973 | && vr1type == VR_RANGE)) | |
7974 | { | |
7975 | /* For anti-range with range union the result is varying. */ | |
7976 | goto give_up; | |
7977 | } | |
7978 | else | |
7979 | gcc_unreachable (); | |
7980 | } | |
7981 | else if (operand_less_p (*vr0max, vr1min) == 1 | |
7982 | || operand_less_p (vr1max, *vr0min) == 1) | |
7983 | { | |
7984 | /* [ ] ( ) or ( ) [ ] | |
7985 | If the ranges have an empty intersection, result of the union | |
7986 | operation is the anti-range or if both are anti-ranges | |
7987 | it covers all. */ | |
7988 | if (*vr0type == VR_ANTI_RANGE | |
7989 | && vr1type == VR_ANTI_RANGE) | |
7990 | goto give_up; | |
7991 | else if (*vr0type == VR_ANTI_RANGE | |
7992 | && vr1type == VR_RANGE) | |
7993 | ; | |
7994 | else if (*vr0type == VR_RANGE | |
7995 | && vr1type == VR_ANTI_RANGE) | |
7996 | { | |
7997 | *vr0type = vr1type; | |
7998 | *vr0min = vr1min; | |
7999 | *vr0max = vr1max; | |
8000 | } | |
8001 | else if (*vr0type == VR_RANGE | |
8002 | && vr1type == VR_RANGE) | |
8003 | { | |
8004 | /* The result is the convex hull of both ranges. */ | |
8005 | if (operand_less_p (*vr0max, vr1min) == 1) | |
8006 | { | |
8007 | /* If the result can be an anti-range, create one. */ | |
8008 | if (TREE_CODE (*vr0max) == INTEGER_CST | |
8009 | && TREE_CODE (vr1min) == INTEGER_CST | |
8010 | && vrp_val_is_min (*vr0min) | |
8011 | && vrp_val_is_max (vr1max)) | |
8012 | { | |
8013 | tree min = int_const_binop (PLUS_EXPR, | |
807e902e KZ |
8014 | *vr0max, |
8015 | build_int_cst (TREE_TYPE (*vr0max), 1)); | |
b54e19c2 | 8016 | tree max = int_const_binop (MINUS_EXPR, |
807e902e KZ |
8017 | vr1min, |
8018 | build_int_cst (TREE_TYPE (vr1min), 1)); | |
b54e19c2 RG |
8019 | if (!operand_less_p (max, min)) |
8020 | { | |
8021 | *vr0type = VR_ANTI_RANGE; | |
8022 | *vr0min = min; | |
8023 | *vr0max = max; | |
8024 | } | |
8025 | else | |
8026 | *vr0max = vr1max; | |
8027 | } | |
8028 | else | |
8029 | *vr0max = vr1max; | |
8030 | } | |
8031 | else | |
8032 | { | |
8033 | /* If the result can be an anti-range, create one. */ | |
8034 | if (TREE_CODE (vr1max) == INTEGER_CST | |
8035 | && TREE_CODE (*vr0min) == INTEGER_CST | |
8036 | && vrp_val_is_min (vr1min) | |
8037 | && vrp_val_is_max (*vr0max)) | |
8038 | { | |
8039 | tree min = int_const_binop (PLUS_EXPR, | |
807e902e KZ |
8040 | vr1max, |
8041 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
b54e19c2 | 8042 | tree max = int_const_binop (MINUS_EXPR, |
807e902e KZ |
8043 | *vr0min, |
8044 | build_int_cst (TREE_TYPE (*vr0min), 1)); | |
b54e19c2 RG |
8045 | if (!operand_less_p (max, min)) |
8046 | { | |
8047 | *vr0type = VR_ANTI_RANGE; | |
8048 | *vr0min = min; | |
8049 | *vr0max = max; | |
8050 | } | |
8051 | else | |
8052 | *vr0min = vr1min; | |
8053 | } | |
8054 | else | |
8055 | *vr0min = vr1min; | |
8056 | } | |
8057 | } | |
8058 | else | |
8059 | gcc_unreachable (); | |
8060 | } | |
8061 | else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) | |
8062 | && (mineq || operand_less_p (*vr0min, vr1min) == 1)) | |
8063 | { | |
8064 | /* [ ( ) ] or [( ) ] or [ ( )] */ | |
8065 | if (*vr0type == VR_RANGE | |
8066 | && vr1type == VR_RANGE) | |
8067 | ; | |
8068 | else if (*vr0type == VR_ANTI_RANGE | |
8069 | && vr1type == VR_ANTI_RANGE) | |
8070 | { | |
8071 | *vr0type = vr1type; | |
8072 | *vr0min = vr1min; | |
8073 | *vr0max = vr1max; | |
8074 | } | |
8075 | else if (*vr0type == VR_ANTI_RANGE | |
8076 | && vr1type == VR_RANGE) | |
8077 | { | |
8078 | /* Arbitrarily choose the right or left gap. */ | |
8079 | if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) | |
807e902e KZ |
8080 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, |
8081 | build_int_cst (TREE_TYPE (vr1min), 1)); | |
b54e19c2 | 8082 | else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) |
807e902e KZ |
8083 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, |
8084 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
b54e19c2 RG |
8085 | else |
8086 | goto give_up; | |
8087 | } | |
8088 | else if (*vr0type == VR_RANGE | |
8089 | && vr1type == VR_ANTI_RANGE) | |
8090 | /* The result covers everything. */ | |
8091 | goto give_up; | |
8092 | else | |
8093 | gcc_unreachable (); | |
8094 | } | |
8095 | else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) | |
8096 | && (mineq || operand_less_p (vr1min, *vr0min) == 1)) | |
8097 | { | |
8098 | /* ( [ ] ) or ([ ] ) or ( [ ]) */ | |
8099 | if (*vr0type == VR_RANGE | |
8100 | && vr1type == VR_RANGE) | |
8101 | { | |
8102 | *vr0type = vr1type; | |
8103 | *vr0min = vr1min; | |
8104 | *vr0max = vr1max; | |
8105 | } | |
8106 | else if (*vr0type == VR_ANTI_RANGE | |
8107 | && vr1type == VR_ANTI_RANGE) | |
8108 | ; | |
8109 | else if (*vr0type == VR_RANGE | |
8110 | && vr1type == VR_ANTI_RANGE) | |
8111 | { | |
8112 | *vr0type = VR_ANTI_RANGE; | |
8113 | if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) | |
8114 | { | |
807e902e KZ |
8115 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, |
8116 | build_int_cst (TREE_TYPE (*vr0min), 1)); | |
b54e19c2 RG |
8117 | *vr0min = vr1min; |
8118 | } | |
8119 | else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) | |
8120 | { | |
807e902e KZ |
8121 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, |
8122 | build_int_cst (TREE_TYPE (*vr0max), 1)); | |
b54e19c2 RG |
8123 | *vr0max = vr1max; |
8124 | } | |
8125 | else | |
8126 | goto give_up; | |
8127 | } | |
8128 | else if (*vr0type == VR_ANTI_RANGE | |
8129 | && vr1type == VR_RANGE) | |
8130 | /* The result covers everything. */ | |
8131 | goto give_up; | |
8132 | else | |
8133 | gcc_unreachable (); | |
8134 | } | |
8135 | else if ((operand_less_p (vr1min, *vr0max) == 1 | |
8136 | || operand_equal_p (vr1min, *vr0max, 0)) | |
5ef0de9b JJ |
8137 | && operand_less_p (*vr0min, vr1min) == 1 |
8138 | && operand_less_p (*vr0max, vr1max) == 1) | |
b54e19c2 RG |
8139 | { |
8140 | /* [ ( ] ) or [ ]( ) */ | |
8141 | if (*vr0type == VR_RANGE | |
8142 | && vr1type == VR_RANGE) | |
8143 | *vr0max = vr1max; | |
8144 | else if (*vr0type == VR_ANTI_RANGE | |
8145 | && vr1type == VR_ANTI_RANGE) | |
8146 | *vr0min = vr1min; | |
8147 | else if (*vr0type == VR_ANTI_RANGE | |
8148 | && vr1type == VR_RANGE) | |
8149 | { | |
8150 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
807e902e KZ |
8151 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, |
8152 | build_int_cst (TREE_TYPE (vr1min), 1)); | |
b54e19c2 RG |
8153 | else |
8154 | goto give_up; | |
8155 | } | |
8156 | else if (*vr0type == VR_RANGE | |
8157 | && vr1type == VR_ANTI_RANGE) | |
8158 | { | |
8159 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
8160 | { | |
8161 | *vr0type = vr1type; | |
807e902e KZ |
8162 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, |
8163 | build_int_cst (TREE_TYPE (*vr0max), 1)); | |
b54e19c2 RG |
8164 | *vr0max = vr1max; |
8165 | } | |
8166 | else | |
8167 | goto give_up; | |
8168 | } | |
8169 | else | |
8170 | gcc_unreachable (); | |
8171 | } | |
8172 | else if ((operand_less_p (*vr0min, vr1max) == 1 | |
8173 | || operand_equal_p (*vr0min, vr1max, 0)) | |
5ef0de9b JJ |
8174 | && operand_less_p (vr1min, *vr0min) == 1 |
8175 | && operand_less_p (vr1max, *vr0max) == 1) | |
b54e19c2 RG |
8176 | { |
8177 | /* ( [ ) ] or ( )[ ] */ | |
8178 | if (*vr0type == VR_RANGE | |
8179 | && vr1type == VR_RANGE) | |
8180 | *vr0min = vr1min; | |
8181 | else if (*vr0type == VR_ANTI_RANGE | |
8182 | && vr1type == VR_ANTI_RANGE) | |
8183 | *vr0max = vr1max; | |
8184 | else if (*vr0type == VR_ANTI_RANGE | |
8185 | && vr1type == VR_RANGE) | |
8186 | { | |
8187 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
807e902e KZ |
8188 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, |
8189 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
b54e19c2 RG |
8190 | else |
8191 | goto give_up; | |
8192 | } | |
8193 | else if (*vr0type == VR_RANGE | |
8194 | && vr1type == VR_ANTI_RANGE) | |
8195 | { | |
8196 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
8197 | { | |
8198 | *vr0type = vr1type; | |
8199 | *vr0min = vr1min; | |
807e902e KZ |
8200 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, |
8201 | build_int_cst (TREE_TYPE (*vr0min), 1)); | |
b54e19c2 RG |
8202 | } |
8203 | else | |
8204 | goto give_up; | |
8205 | } | |
8206 | else | |
8207 | gcc_unreachable (); | |
8208 | } | |
8209 | else | |
8210 | goto give_up; | |
8211 | ||
8212 | return; | |
8213 | ||
8214 | give_up: | |
8215 | *vr0type = VR_VARYING; | |
8216 | *vr0min = NULL_TREE; | |
8217 | *vr0max = NULL_TREE; | |
8218 | } | |
8219 | ||
3928c098 RG |
8220 | /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and |
8221 | { VR1TYPE, VR0MIN, VR0MAX } and store the result | |
8222 | in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest | |
8223 | possible such range. The resulting range is not canonicalized. */ | |
8224 | ||
8225 | static void | |
8226 | intersect_ranges (enum value_range_type *vr0type, | |
8227 | tree *vr0min, tree *vr0max, | |
8228 | enum value_range_type vr1type, | |
8229 | tree vr1min, tree vr1max) | |
8230 | { | |
105b7208 RG |
8231 | bool mineq = operand_equal_p (*vr0min, vr1min, 0); |
8232 | bool maxeq = operand_equal_p (*vr0max, vr1max, 0); | |
8233 | ||
3928c098 | 8234 | /* [] is vr0, () is vr1 in the following classification comments. */ |
105b7208 RG |
8235 | if (mineq && maxeq) |
8236 | { | |
8237 | /* [( )] */ | |
8238 | if (*vr0type == vr1type) | |
8239 | /* Nothing to do for equal ranges. */ | |
8240 | ; | |
8241 | else if ((*vr0type == VR_RANGE | |
8242 | && vr1type == VR_ANTI_RANGE) | |
8243 | || (*vr0type == VR_ANTI_RANGE | |
8244 | && vr1type == VR_RANGE)) | |
8245 | { | |
8246 | /* For anti-range with range intersection the result is empty. */ | |
8247 | *vr0type = VR_UNDEFINED; | |
8248 | *vr0min = NULL_TREE; | |
8249 | *vr0max = NULL_TREE; | |
8250 | } | |
8251 | else | |
8252 | gcc_unreachable (); | |
8253 | } | |
8254 | else if (operand_less_p (*vr0max, vr1min) == 1 | |
8255 | || operand_less_p (vr1max, *vr0min) == 1) | |
3928c098 RG |
8256 | { |
8257 | /* [ ] ( ) or ( ) [ ] | |
8258 | If the ranges have an empty intersection, the result of the | |
8259 | intersect operation is the range for intersecting an | |
a75f5017 | 8260 | anti-range with a range or empty when intersecting two ranges. */ |
3928c098 RG |
8261 | if (*vr0type == VR_RANGE |
8262 | && vr1type == VR_ANTI_RANGE) | |
8263 | ; | |
8264 | else if (*vr0type == VR_ANTI_RANGE | |
8265 | && vr1type == VR_RANGE) | |
8266 | { | |
8267 | *vr0type = vr1type; | |
8268 | *vr0min = vr1min; | |
8269 | *vr0max = vr1max; | |
8270 | } | |
8271 | else if (*vr0type == VR_RANGE | |
8272 | && vr1type == VR_RANGE) | |
8273 | { | |
8274 | *vr0type = VR_UNDEFINED; | |
8275 | *vr0min = NULL_TREE; | |
8276 | *vr0max = NULL_TREE; | |
8277 | } | |
8278 | else if (*vr0type == VR_ANTI_RANGE | |
8279 | && vr1type == VR_ANTI_RANGE) | |
8280 | { | |
a75f5017 RG |
8281 | /* If the anti-ranges are adjacent to each other merge them. */ |
8282 | if (TREE_CODE (*vr0max) == INTEGER_CST | |
8283 | && TREE_CODE (vr1min) == INTEGER_CST | |
8284 | && operand_less_p (*vr0max, vr1min) == 1 | |
8285 | && integer_onep (int_const_binop (MINUS_EXPR, | |
8286 | vr1min, *vr0max))) | |
8287 | *vr0max = vr1max; | |
8288 | else if (TREE_CODE (vr1max) == INTEGER_CST | |
8289 | && TREE_CODE (*vr0min) == INTEGER_CST | |
8290 | && operand_less_p (vr1max, *vr0min) == 1 | |
8291 | && integer_onep (int_const_binop (MINUS_EXPR, | |
8292 | *vr0min, vr1max))) | |
8293 | *vr0min = vr1min; | |
8294 | /* Else arbitrarily take VR0. */ | |
3928c098 RG |
8295 | } |
8296 | } | |
105b7208 RG |
8297 | else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1) |
8298 | && (mineq || operand_less_p (*vr0min, vr1min) == 1)) | |
3928c098 | 8299 | { |
105b7208 RG |
8300 | /* [ ( ) ] or [( ) ] or [ ( )] */ |
8301 | if (*vr0type == VR_RANGE | |
8302 | && vr1type == VR_RANGE) | |
3928c098 | 8303 | { |
105b7208 | 8304 | /* If both are ranges the result is the inner one. */ |
3928c098 RG |
8305 | *vr0type = vr1type; |
8306 | *vr0min = vr1min; | |
8307 | *vr0max = vr1max; | |
8308 | } | |
105b7208 RG |
8309 | else if (*vr0type == VR_RANGE |
8310 | && vr1type == VR_ANTI_RANGE) | |
8311 | { | |
8312 | /* Choose the right gap if the left one is empty. */ | |
8313 | if (mineq) | |
8314 | { | |
8315 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
807e902e KZ |
8316 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, |
8317 | build_int_cst (TREE_TYPE (vr1max), 1)); | |
105b7208 RG |
8318 | else |
8319 | *vr0min = vr1max; | |
8320 | } | |
8321 | /* Choose the left gap if the right one is empty. */ | |
8322 | else if (maxeq) | |
8323 | { | |
8324 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
8325 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, | |
807e902e | 8326 | build_int_cst (TREE_TYPE (vr1min), 1)); |
105b7208 RG |
8327 | else |
8328 | *vr0max = vr1min; | |
8329 | } | |
8330 | /* Choose the anti-range if the range is effectively varying. */ | |
8331 | else if (vrp_val_is_min (*vr0min) | |
8332 | && vrp_val_is_max (*vr0max)) | |
8333 | { | |
8334 | *vr0type = vr1type; | |
8335 | *vr0min = vr1min; | |
8336 | *vr0max = vr1max; | |
8337 | } | |
8338 | /* Else choose the range. */ | |
8339 | } | |
3928c098 RG |
8340 | else if (*vr0type == VR_ANTI_RANGE |
8341 | && vr1type == VR_ANTI_RANGE) | |
8342 | /* If both are anti-ranges the result is the outer one. */ | |
8343 | ; | |
8344 | else if (*vr0type == VR_ANTI_RANGE | |
8345 | && vr1type == VR_RANGE) | |
8346 | { | |
8347 | /* The intersection is empty. */ | |
8348 | *vr0type = VR_UNDEFINED; | |
8349 | *vr0min = NULL_TREE; | |
8350 | *vr0max = NULL_TREE; | |
8351 | } | |
8352 | else | |
8353 | gcc_unreachable (); | |
8354 | } | |
105b7208 RG |
8355 | else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1) |
8356 | && (mineq || operand_less_p (vr1min, *vr0min) == 1)) | |
3928c098 | 8357 | { |
105b7208 RG |
8358 | /* ( [ ] ) or ([ ] ) or ( [ ]) */ |
8359 | if (*vr0type == VR_RANGE | |
8360 | && vr1type == VR_RANGE) | |
8361 | /* Choose the inner range. */ | |
3928c098 | 8362 | ; |
105b7208 RG |
8363 | else if (*vr0type == VR_ANTI_RANGE |
8364 | && vr1type == VR_RANGE) | |
8365 | { | |
8366 | /* Choose the right gap if the left is empty. */ | |
8367 | if (mineq) | |
8368 | { | |
8369 | *vr0type = VR_RANGE; | |
8370 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
8371 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, | |
807e902e | 8372 | build_int_cst (TREE_TYPE (*vr0max), 1)); |
105b7208 RG |
8373 | else |
8374 | *vr0min = *vr0max; | |
8375 | *vr0max = vr1max; | |
8376 | } | |
8377 | /* Choose the left gap if the right is empty. */ | |
8378 | else if (maxeq) | |
8379 | { | |
8380 | *vr0type = VR_RANGE; | |
8381 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
8382 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | |
807e902e | 8383 | build_int_cst (TREE_TYPE (*vr0min), 1)); |
105b7208 RG |
8384 | else |
8385 | *vr0max = *vr0min; | |
8386 | *vr0min = vr1min; | |
8387 | } | |
8388 | /* Choose the anti-range if the range is effectively varying. */ | |
8389 | else if (vrp_val_is_min (vr1min) | |
8390 | && vrp_val_is_max (vr1max)) | |
8391 | ; | |
8392 | /* Else choose the range. */ | |
8393 | else | |
8394 | { | |
8395 | *vr0type = vr1type; | |
8396 | *vr0min = vr1min; | |
8397 | *vr0max = vr1max; | |
8398 | } | |
8399 | } | |
3928c098 RG |
8400 | else if (*vr0type == VR_ANTI_RANGE |
8401 | && vr1type == VR_ANTI_RANGE) | |
8402 | { | |
8403 | /* If both are anti-ranges the result is the outer one. */ | |
8404 | *vr0type = vr1type; | |
8405 | *vr0min = vr1min; | |
8406 | *vr0max = vr1max; | |
8407 | } | |
8408 | else if (vr1type == VR_ANTI_RANGE | |
8409 | && *vr0type == VR_RANGE) | |
8410 | { | |
8411 | /* The intersection is empty. */ | |
8412 | *vr0type = VR_UNDEFINED; | |
8413 | *vr0min = NULL_TREE; | |
8414 | *vr0max = NULL_TREE; | |
8415 | } | |
8416 | else | |
8417 | gcc_unreachable (); | |
8418 | } | |
8419 | else if ((operand_less_p (vr1min, *vr0max) == 1 | |
8420 | || operand_equal_p (vr1min, *vr0max, 0)) | |
105b7208 | 8421 | && operand_less_p (*vr0min, vr1min) == 1) |
3928c098 | 8422 | { |
105b7208 | 8423 | /* [ ( ] ) or [ ]( ) */ |
3928c098 RG |
8424 | if (*vr0type == VR_ANTI_RANGE |
8425 | && vr1type == VR_ANTI_RANGE) | |
8426 | *vr0max = vr1max; | |
8427 | else if (*vr0type == VR_RANGE | |
8428 | && vr1type == VR_RANGE) | |
8429 | *vr0min = vr1min; | |
8430 | else if (*vr0type == VR_RANGE | |
8431 | && vr1type == VR_ANTI_RANGE) | |
8432 | { | |
8433 | if (TREE_CODE (vr1min) == INTEGER_CST) | |
8434 | *vr0max = int_const_binop (MINUS_EXPR, vr1min, | |
807e902e | 8435 | build_int_cst (TREE_TYPE (vr1min), 1)); |
3928c098 RG |
8436 | else |
8437 | *vr0max = vr1min; | |
8438 | } | |
8439 | else if (*vr0type == VR_ANTI_RANGE | |
8440 | && vr1type == VR_RANGE) | |
8441 | { | |
8442 | *vr0type = VR_RANGE; | |
8443 | if (TREE_CODE (*vr0max) == INTEGER_CST) | |
8444 | *vr0min = int_const_binop (PLUS_EXPR, *vr0max, | |
807e902e | 8445 | build_int_cst (TREE_TYPE (*vr0max), 1)); |
3928c098 RG |
8446 | else |
8447 | *vr0min = *vr0max; | |
8448 | *vr0max = vr1max; | |
8449 | } | |
8450 | else | |
8451 | gcc_unreachable (); | |
8452 | } | |
8453 | else if ((operand_less_p (*vr0min, vr1max) == 1 | |
8454 | || operand_equal_p (*vr0min, vr1max, 0)) | |
105b7208 | 8455 | && operand_less_p (vr1min, *vr0min) == 1) |
3928c098 | 8456 | { |
105b7208 | 8457 | /* ( [ ) ] or ( )[ ] */ |
3928c098 RG |
8458 | if (*vr0type == VR_ANTI_RANGE |
8459 | && vr1type == VR_ANTI_RANGE) | |
8460 | *vr0min = vr1min; | |
8461 | else if (*vr0type == VR_RANGE | |
8462 | && vr1type == VR_RANGE) | |
8463 | *vr0max = vr1max; | |
8464 | else if (*vr0type == VR_RANGE | |
8465 | && vr1type == VR_ANTI_RANGE) | |
8466 | { | |
8467 | if (TREE_CODE (vr1max) == INTEGER_CST) | |
8468 | *vr0min = int_const_binop (PLUS_EXPR, vr1max, | |
807e902e | 8469 | build_int_cst (TREE_TYPE (vr1max), 1)); |
3928c098 RG |
8470 | else |
8471 | *vr0min = vr1max; | |
8472 | } | |
8473 | else if (*vr0type == VR_ANTI_RANGE | |
8474 | && vr1type == VR_RANGE) | |
8475 | { | |
8476 | *vr0type = VR_RANGE; | |
8477 | if (TREE_CODE (*vr0min) == INTEGER_CST) | |
8478 | *vr0max = int_const_binop (MINUS_EXPR, *vr0min, | |
807e902e | 8479 | build_int_cst (TREE_TYPE (*vr0min), 1)); |
3928c098 RG |
8480 | else |
8481 | *vr0max = *vr0min; | |
8482 | *vr0min = vr1min; | |
8483 | } | |
8484 | else | |
8485 | gcc_unreachable (); | |
8486 | } | |
8487 | ||
8488 | /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as | |
8489 | result for the intersection. That's always a conservative | |
8490 | correct estimate. */ | |
8491 | ||
8492 | return; | |
8493 | } | |
8494 | ||
8495 | ||
8496 | /* Intersect the two value-ranges *VR0 and *VR1 and store the result | |
8497 | in *VR0. This may not be the smallest possible such range. */ | |
8498 | ||
8499 | static void | |
526ceb68 | 8500 | vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1) |
3928c098 | 8501 | { |
526ceb68 | 8502 | value_range saved; |
3928c098 RG |
8503 | |
8504 | /* If either range is VR_VARYING the other one wins. */ | |
8505 | if (vr1->type == VR_VARYING) | |
8506 | return; | |
8507 | if (vr0->type == VR_VARYING) | |
8508 | { | |
8509 | copy_value_range (vr0, vr1); | |
8510 | return; | |
8511 | } | |
8512 | ||
8513 | /* When either range is VR_UNDEFINED the resulting range is | |
8514 | VR_UNDEFINED, too. */ | |
8515 | if (vr0->type == VR_UNDEFINED) | |
8516 | return; | |
8517 | if (vr1->type == VR_UNDEFINED) | |
8518 | { | |
8519 | set_value_range_to_undefined (vr0); | |
8520 | return; | |
8521 | } | |
8522 | ||
8523 | /* Save the original vr0 so we can return it as conservative intersection | |
8524 | result when our worker turns things to varying. */ | |
8525 | saved = *vr0; | |
8526 | intersect_ranges (&vr0->type, &vr0->min, &vr0->max, | |
8527 | vr1->type, vr1->min, vr1->max); | |
8528 | /* Make sure to canonicalize the result though as the inversion of a | |
8529 | VR_RANGE can still be a VR_RANGE. */ | |
8530 | set_and_canonicalize_value_range (vr0, vr0->type, | |
8531 | vr0->min, vr0->max, vr0->equiv); | |
8532 | /* If that failed, use the saved original VR0. */ | |
8533 | if (vr0->type == VR_VARYING) | |
8534 | { | |
8535 | *vr0 = saved; | |
8536 | return; | |
8537 | } | |
8538 | /* If the result is VR_UNDEFINED there is no need to mess with | |
8539 | the equivalencies. */ | |
8540 | if (vr0->type == VR_UNDEFINED) | |
8541 | return; | |
8542 | ||
8543 | /* The resulting set of equivalences for range intersection is the union of | |
8544 | the two sets. */ | |
8545 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) | |
8546 | bitmap_ior_into (vr0->equiv, vr1->equiv); | |
8547 | else if (vr1->equiv && !vr0->equiv) | |
8548 | bitmap_copy (vr0->equiv, vr1->equiv); | |
8549 | } | |
0bca51f0 | 8550 | |
105b7208 | 8551 | static void |
526ceb68 | 8552 | vrp_intersect_ranges (value_range *vr0, value_range *vr1) |
105b7208 RG |
8553 | { |
8554 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8555 | { | |
8556 | fprintf (dump_file, "Intersecting\n "); | |
8557 | dump_value_range (dump_file, vr0); | |
8558 | fprintf (dump_file, "\nand\n "); | |
8559 | dump_value_range (dump_file, vr1); | |
8560 | fprintf (dump_file, "\n"); | |
8561 | } | |
8562 | vrp_intersect_ranges_1 (vr0, vr1); | |
8563 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8564 | { | |
8565 | fprintf (dump_file, "to\n "); | |
8566 | dump_value_range (dump_file, vr0); | |
8567 | fprintf (dump_file, "\n"); | |
8568 | } | |
8569 | } | |
8570 | ||
0bca51f0 | 8571 | /* Meet operation for value ranges. Given two value ranges VR0 and |
32c8bce7 DS |
8572 | VR1, store in VR0 a range that contains both VR0 and VR1. This |
8573 | may not be the smallest possible such range. */ | |
0bca51f0 DN |
8574 | |
8575 | static void | |
526ceb68 | 8576 | vrp_meet_1 (value_range *vr0, value_range *vr1) |
0bca51f0 | 8577 | { |
526ceb68 | 8578 | value_range saved; |
b54e19c2 | 8579 | |
0bca51f0 DN |
8580 | if (vr0->type == VR_UNDEFINED) |
8581 | { | |
c25a0c60 | 8582 | set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv); |
0bca51f0 DN |
8583 | return; |
8584 | } | |
8585 | ||
8586 | if (vr1->type == VR_UNDEFINED) | |
8587 | { | |
c25a0c60 | 8588 | /* VR0 already has the resulting range. */ |
0bca51f0 DN |
8589 | return; |
8590 | } | |
8591 | ||
8592 | if (vr0->type == VR_VARYING) | |
8593 | { | |
8594 | /* Nothing to do. VR0 already has the resulting range. */ | |
8595 | return; | |
8596 | } | |
8597 | ||
8598 | if (vr1->type == VR_VARYING) | |
0bca51f0 | 8599 | { |
b565d777 | 8600 | set_value_range_to_varying (vr0); |
0bca51f0 DN |
8601 | return; |
8602 | } | |
8603 | ||
b54e19c2 RG |
8604 | saved = *vr0; |
8605 | union_ranges (&vr0->type, &vr0->min, &vr0->max, | |
8606 | vr1->type, vr1->min, vr1->max); | |
8607 | if (vr0->type == VR_VARYING) | |
0bca51f0 | 8608 | { |
b54e19c2 RG |
8609 | /* Failed to find an efficient meet. Before giving up and setting |
8610 | the result to VARYING, see if we can at least derive a useful | |
8611 | anti-range. FIXME, all this nonsense about distinguishing | |
8612 | anti-ranges from ranges is necessary because of the odd | |
8613 | semantics of range_includes_zero_p and friends. */ | |
e8f808b3 RG |
8614 | if (((saved.type == VR_RANGE |
8615 | && range_includes_zero_p (saved.min, saved.max) == 0) | |
8616 | || (saved.type == VR_ANTI_RANGE | |
8617 | && range_includes_zero_p (saved.min, saved.max) == 1)) | |
8618 | && ((vr1->type == VR_RANGE | |
8619 | && range_includes_zero_p (vr1->min, vr1->max) == 0) | |
8620 | || (vr1->type == VR_ANTI_RANGE | |
8621 | && range_includes_zero_p (vr1->min, vr1->max) == 1))) | |
b54e19c2 RG |
8622 | { |
8623 | set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min)); | |
8624 | ||
8625 | /* Since this meet operation did not result from the meeting of | |
8626 | two equivalent names, VR0 cannot have any equivalences. */ | |
8627 | if (vr0->equiv) | |
8628 | bitmap_clear (vr0->equiv); | |
8629 | return; | |
cf35667e | 8630 | } |
227858d1 | 8631 | |
b54e19c2 RG |
8632 | set_value_range_to_varying (vr0); |
8633 | return; | |
0bca51f0 | 8634 | } |
b54e19c2 RG |
8635 | set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max, |
8636 | vr0->equiv); | |
8637 | if (vr0->type == VR_VARYING) | |
8638 | return; | |
227858d1 | 8639 | |
cf35667e | 8640 | /* The resulting set of equivalences is always the intersection of |
b54e19c2 | 8641 | the two sets. */ |
cf35667e RG |
8642 | if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) |
8643 | bitmap_and_into (vr0->equiv, vr1->equiv); | |
8644 | else if (vr0->equiv && !vr1->equiv) | |
8645 | bitmap_clear (vr0->equiv); | |
b54e19c2 | 8646 | } |
cf35667e | 8647 | |
b54e19c2 | 8648 | static void |
526ceb68 | 8649 | vrp_meet (value_range *vr0, value_range *vr1) |
b54e19c2 RG |
8650 | { |
8651 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8652 | { | |
8653 | fprintf (dump_file, "Meeting\n "); | |
8654 | dump_value_range (dump_file, vr0); | |
8655 | fprintf (dump_file, "\nand\n "); | |
8656 | dump_value_range (dump_file, vr1); | |
8657 | fprintf (dump_file, "\n"); | |
8658 | } | |
8659 | vrp_meet_1 (vr0, vr1); | |
8660 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8661 | { | |
8662 | fprintf (dump_file, "to\n "); | |
8663 | dump_value_range (dump_file, vr0); | |
8664 | fprintf (dump_file, "\n"); | |
e82d7e60 | 8665 | } |
0bca51f0 DN |
8666 | } |
8667 | ||
227858d1 | 8668 | |
0bca51f0 DN |
8669 | /* Visit all arguments for PHI node PHI that flow through executable |
8670 | edges. If a valid value range can be derived from all the incoming | |
8671 | value ranges, set a new range for the LHS of PHI. */ | |
8672 | ||
8673 | static enum ssa_prop_result | |
538dd0b7 | 8674 | vrp_visit_phi_node (gphi *phi) |
0bca51f0 | 8675 | { |
726a989a | 8676 | size_t i; |
0bca51f0 | 8677 | tree lhs = PHI_RESULT (phi); |
526ceb68 TS |
8678 | value_range *lhs_vr = get_value_range (lhs); |
8679 | value_range vr_result = VR_INITIALIZER; | |
0d5a9e78 | 8680 | bool first = true; |
fc6827fe | 8681 | int edges, old_edges; |
b09bae68 | 8682 | struct loop *l; |
227858d1 | 8683 | |
0bca51f0 DN |
8684 | if (dump_file && (dump_flags & TDF_DETAILS)) |
8685 | { | |
8686 | fprintf (dump_file, "\nVisiting PHI node: "); | |
726a989a | 8687 | print_gimple_stmt (dump_file, phi, 0, dump_flags); |
0bca51f0 DN |
8688 | } |
8689 | ||
fc6827fe | 8690 | edges = 0; |
726a989a | 8691 | for (i = 0; i < gimple_phi_num_args (phi); i++) |
0bca51f0 | 8692 | { |
726a989a | 8693 | edge e = gimple_phi_arg_edge (phi, i); |
0bca51f0 DN |
8694 | |
8695 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8696 | { | |
8697 | fprintf (dump_file, | |
6e5799b9 | 8698 | " Argument #%d (%d -> %d %sexecutable)\n", |
726a989a | 8699 | (int) i, e->src->index, e->dest->index, |
0bca51f0 DN |
8700 | (e->flags & EDGE_EXECUTABLE) ? "" : "not "); |
8701 | } | |
8702 | ||
8703 | if (e->flags & EDGE_EXECUTABLE) | |
8704 | { | |
8705 | tree arg = PHI_ARG_DEF (phi, i); | |
526ceb68 | 8706 | value_range vr_arg; |
0bca51f0 | 8707 | |
fc6827fe ILT |
8708 | ++edges; |
8709 | ||
0bca51f0 | 8710 | if (TREE_CODE (arg) == SSA_NAME) |
31ab1cc9 RG |
8711 | { |
8712 | vr_arg = *(get_value_range (arg)); | |
c25a0c60 RB |
8713 | /* Do not allow equivalences or symbolic ranges to leak in from |
8714 | backedges. That creates invalid equivalencies. | |
8715 | See PR53465 and PR54767. */ | |
6e5799b9 | 8716 | if (e->flags & EDGE_DFS_BACK) |
c25a0c60 | 8717 | { |
6e5799b9 RB |
8718 | if (vr_arg.type == VR_RANGE |
8719 | || vr_arg.type == VR_ANTI_RANGE) | |
c25a0c60 | 8720 | { |
6e5799b9 RB |
8721 | vr_arg.equiv = NULL; |
8722 | if (symbolic_range_p (&vr_arg)) | |
8723 | { | |
8724 | vr_arg.type = VR_VARYING; | |
8725 | vr_arg.min = NULL_TREE; | |
8726 | vr_arg.max = NULL_TREE; | |
8727 | } | |
8728 | } | |
8729 | } | |
8730 | else | |
8731 | { | |
8732 | /* If the non-backedge arguments range is VR_VARYING then | |
8733 | we can still try recording a simple equivalence. */ | |
8734 | if (vr_arg.type == VR_VARYING) | |
8735 | { | |
8736 | vr_arg.type = VR_RANGE; | |
8737 | vr_arg.min = arg; | |
8738 | vr_arg.max = arg; | |
8739 | vr_arg.equiv = NULL; | |
c25a0c60 RB |
8740 | } |
8741 | } | |
31ab1cc9 | 8742 | } |
0bca51f0 DN |
8743 | else |
8744 | { | |
635bfae0 | 8745 | if (TREE_OVERFLOW_P (arg)) |
3f5c390d | 8746 | arg = drop_tree_overflow (arg); |
8cf781f0 | 8747 | |
0bca51f0 DN |
8748 | vr_arg.type = VR_RANGE; |
8749 | vr_arg.min = arg; | |
8750 | vr_arg.max = arg; | |
227858d1 | 8751 | vr_arg.equiv = NULL; |
0bca51f0 DN |
8752 | } |
8753 | ||
8754 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8755 | { | |
8756 | fprintf (dump_file, "\t"); | |
8757 | print_generic_expr (dump_file, arg, dump_flags); | |
6e5799b9 | 8758 | fprintf (dump_file, ": "); |
0bca51f0 DN |
8759 | dump_value_range (dump_file, &vr_arg); |
8760 | fprintf (dump_file, "\n"); | |
8761 | } | |
8762 | ||
0d5a9e78 JJ |
8763 | if (first) |
8764 | copy_value_range (&vr_result, &vr_arg); | |
8765 | else | |
8766 | vrp_meet (&vr_result, &vr_arg); | |
8767 | first = false; | |
0bca51f0 DN |
8768 | |
8769 | if (vr_result.type == VR_VARYING) | |
8770 | break; | |
8771 | } | |
8772 | } | |
8773 | ||
8774 | if (vr_result.type == VR_VARYING) | |
227858d1 | 8775 | goto varying; |
a9b332d4 RG |
8776 | else if (vr_result.type == VR_UNDEFINED) |
8777 | goto update_range; | |
0bca51f0 | 8778 | |
fc6827fe ILT |
8779 | old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; |
8780 | vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; | |
8781 | ||
0bca51f0 DN |
8782 | /* To prevent infinite iterations in the algorithm, derive ranges |
8783 | when the new value is slightly bigger or smaller than the | |
fc6827fe ILT |
8784 | previous one. We don't do this if we have seen a new executable |
8785 | edge; this helps us avoid an overflow infinity for conditionals | |
2f33158f RG |
8786 | which are not in a loop. If the old value-range was VR_UNDEFINED |
8787 | use the updated range and iterate one more time. */ | |
e3488283 | 8788 | if (edges > 0 |
7bec30e1 | 8789 | && gimple_phi_num_args (phi) > 1 |
2f33158f RG |
8790 | && edges == old_edges |
8791 | && lhs_vr->type != VR_UNDEFINED) | |
e3488283 | 8792 | { |
a896172d RB |
8793 | /* Compare old and new ranges, fall back to varying if the |
8794 | values are not comparable. */ | |
e3488283 | 8795 | int cmp_min = compare_values (lhs_vr->min, vr_result.min); |
a896172d RB |
8796 | if (cmp_min == -2) |
8797 | goto varying; | |
e3488283 | 8798 | int cmp_max = compare_values (lhs_vr->max, vr_result.max); |
a896172d RB |
8799 | if (cmp_max == -2) |
8800 | goto varying; | |
e3488283 RG |
8801 | |
8802 | /* For non VR_RANGE or for pointers fall back to varying if | |
8803 | the range changed. */ | |
8804 | if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE | |
8805 | || POINTER_TYPE_P (TREE_TYPE (lhs))) | |
8806 | && (cmp_min != 0 || cmp_max != 0)) | |
8807 | goto varying; | |
8808 | ||
026c3cfd | 8809 | /* If the new minimum is larger than the previous one |
771c9501 RB |
8810 | retain the old value. If the new minimum value is smaller |
8811 | than the previous one and not -INF go all the way to -INF + 1. | |
8812 | In the first case, to avoid infinite bouncing between different | |
8813 | minimums, and in the other case to avoid iterating millions of | |
8814 | times to reach -INF. Going to -INF + 1 also lets the following | |
8815 | iteration compute whether there will be any overflow, at the | |
8816 | expense of one additional iteration. */ | |
8817 | if (cmp_min < 0) | |
8818 | vr_result.min = lhs_vr->min; | |
8819 | else if (cmp_min > 0 | |
8820 | && !vrp_val_is_min (vr_result.min)) | |
8821 | vr_result.min | |
8822 | = int_const_binop (PLUS_EXPR, | |
8823 | vrp_val_min (TREE_TYPE (vr_result.min)), | |
8824 | build_int_cst (TREE_TYPE (vr_result.min), 1)); | |
8825 | ||
8826 | /* Similarly for the maximum value. */ | |
8827 | if (cmp_max > 0) | |
8828 | vr_result.max = lhs_vr->max; | |
8829 | else if (cmp_max < 0 | |
8830 | && !vrp_val_is_max (vr_result.max)) | |
8831 | vr_result.max | |
8832 | = int_const_binop (MINUS_EXPR, | |
8833 | vrp_val_max (TREE_TYPE (vr_result.min)), | |
8834 | build_int_cst (TREE_TYPE (vr_result.min), 1)); | |
e3488283 RG |
8835 | |
8836 | /* If we dropped either bound to +-INF then if this is a loop | |
8837 | PHI node SCEV may known more about its value-range. */ | |
35e2b6e1 | 8838 | if (cmp_min > 0 || cmp_min < 0 |
e3488283 | 8839 | || cmp_max < 0 || cmp_max > 0) |
35e2b6e1 RB |
8840 | goto scev_check; |
8841 | ||
8842 | goto infinite_check; | |
0bca51f0 DN |
8843 | } |
8844 | ||
8845 | /* If the new range is different than the previous value, keep | |
8846 | iterating. */ | |
a9b332d4 | 8847 | update_range: |
227858d1 | 8848 | if (update_value_range (lhs, &vr_result)) |
1936a7d4 RG |
8849 | { |
8850 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
8851 | { | |
8852 | fprintf (dump_file, "Found new range for "); | |
8853 | print_generic_expr (dump_file, lhs, 0); | |
8854 | fprintf (dump_file, ": "); | |
8855 | dump_value_range (dump_file, &vr_result); | |
6e5799b9 | 8856 | fprintf (dump_file, "\n"); |
1936a7d4 RG |
8857 | } |
8858 | ||
9c3cb360 JJ |
8859 | if (vr_result.type == VR_VARYING) |
8860 | return SSA_PROP_VARYING; | |
8861 | ||
1936a7d4 RG |
8862 | return SSA_PROP_INTERESTING; |
8863 | } | |
0bca51f0 DN |
8864 | |
8865 | /* Nothing changed, don't add outgoing edges. */ | |
8866 | return SSA_PROP_NOT_INTERESTING; | |
227858d1 | 8867 | |
227858d1 | 8868 | varying: |
35e2b6e1 RB |
8869 | set_value_range_to_varying (&vr_result); |
8870 | ||
8871 | scev_check: | |
8872 | /* If this is a loop PHI node SCEV may known more about its value-range. | |
8873 | scev_check can be reached from two paths, one is a fall through from above | |
8874 | "varying" label, the other is direct goto from code block which tries to | |
8875 | avoid infinite simulation. */ | |
8876 | if ((l = loop_containing_stmt (phi)) | |
8877 | && l->header == gimple_bb (phi)) | |
8878 | adjust_range_with_scev (&vr_result, l, phi, lhs); | |
8879 | ||
8880 | infinite_check: | |
8881 | /* If we will end up with a (-INF, +INF) range, set it to | |
8882 | VARYING. Same if the previous max value was invalid for | |
8883 | the type and we end up with vr_result.min > vr_result.max. */ | |
8884 | if ((vr_result.type == VR_RANGE || vr_result.type == VR_ANTI_RANGE) | |
8885 | && !((vrp_val_is_max (vr_result.max) && vrp_val_is_min (vr_result.min)) | |
8886 | || compare_values (vr_result.min, vr_result.max) > 0)) | |
8887 | goto update_range; | |
8888 | ||
8889 | /* No match found. Set the LHS to VARYING. */ | |
227858d1 DN |
8890 | set_value_range_to_varying (lhs_vr); |
8891 | return SSA_PROP_VARYING; | |
0bca51f0 DN |
8892 | } |
8893 | ||
30821654 PB |
8894 | /* Simplify boolean operations if the source is known |
8895 | to be already a boolean. */ | |
8896 | static bool | |
355fe088 | 8897 | simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) |
30821654 PB |
8898 | { |
8899 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); | |
7e29ba60 | 8900 | tree lhs, op0, op1; |
30821654 PB |
8901 | bool need_conversion; |
8902 | ||
98958241 KT |
8903 | /* We handle only !=/== case here. */ |
8904 | gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); | |
8905 | ||
30821654 | 8906 | op0 = gimple_assign_rhs1 (stmt); |
7e29ba60 RG |
8907 | if (!op_with_boolean_value_range_p (op0)) |
8908 | return false; | |
30821654 | 8909 | |
98958241 | 8910 | op1 = gimple_assign_rhs2 (stmt); |
7e29ba60 RG |
8911 | if (!op_with_boolean_value_range_p (op1)) |
8912 | return false; | |
98958241 | 8913 | |
7e29ba60 RG |
8914 | /* Reduce number of cases to handle to NE_EXPR. As there is no |
8915 | BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ | |
8916 | if (rhs_code == EQ_EXPR) | |
30821654 | 8917 | { |
7e29ba60 | 8918 | if (TREE_CODE (op1) == INTEGER_CST) |
807e902e KZ |
8919 | op1 = int_const_binop (BIT_XOR_EXPR, op1, |
8920 | build_int_cst (TREE_TYPE (op1), 1)); | |
30821654 | 8921 | else |
7e29ba60 | 8922 | return false; |
30821654 PB |
8923 | } |
8924 | ||
7e29ba60 RG |
8925 | lhs = gimple_assign_lhs (stmt); |
8926 | need_conversion | |
8927 | = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); | |
30821654 | 8928 | |
7e29ba60 | 8929 | /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ |
e61451e8 RG |
8930 | if (need_conversion |
8931 | && !TYPE_UNSIGNED (TREE_TYPE (op0)) | |
7e29ba60 RG |
8932 | && TYPE_PRECISION (TREE_TYPE (op0)) == 1 |
8933 | && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) | |
30821654 PB |
8934 | return false; |
8935 | ||
7e29ba60 RG |
8936 | /* For A != 0 we can substitute A itself. */ |
8937 | if (integer_zerop (op1)) | |
8938 | gimple_assign_set_rhs_with_ops (gsi, | |
8939 | need_conversion | |
00d66391 | 8940 | ? NOP_EXPR : TREE_CODE (op0), op0); |
7e29ba60 RG |
8941 | /* For A != B we substitute A ^ B. Either with conversion. */ |
8942 | else if (need_conversion) | |
8943 | { | |
b731b390 | 8944 | tree tem = make_ssa_name (TREE_TYPE (op0)); |
538dd0b7 | 8945 | gassign *newop |
0d0e4a03 | 8946 | = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1); |
7e29ba60 | 8947 | gsi_insert_before (gsi, newop, GSI_SAME_STMT); |
00d66391 | 8948 | gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem); |
7e29ba60 RG |
8949 | } |
8950 | /* Or without. */ | |
8951 | else | |
8952 | gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); | |
30821654 | 8953 | update_stmt (gsi_stmt (*gsi)); |
7e29ba60 | 8954 | |
30821654 PB |
8955 | return true; |
8956 | } | |
8957 | ||
1a557723 JL |
8958 | /* Simplify a division or modulo operator to a right shift or |
8959 | bitwise and if the first operand is unsigned or is greater | |
f51286f2 JJ |
8960 | than zero and the second operand is an exact power of two. |
8961 | For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it | |
8962 | into just op0 if op0's range is known to be a subset of | |
8963 | [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned | |
8964 | modulo. */ | |
a513fe88 | 8965 | |
30821654 | 8966 | static bool |
20b8d734 | 8967 | simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) |
a513fe88 | 8968 | { |
726a989a | 8969 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
1a557723 | 8970 | tree val = NULL; |
726a989a RB |
8971 | tree op0 = gimple_assign_rhs1 (stmt); |
8972 | tree op1 = gimple_assign_rhs2 (stmt); | |
526ceb68 | 8973 | value_range *vr = get_value_range (op0); |
f51286f2 JJ |
8974 | |
8975 | if (rhs_code == TRUNC_MOD_EXPR | |
8976 | && TREE_CODE (op1) == INTEGER_CST | |
8977 | && tree_int_cst_sgn (op1) == 1 | |
8978 | && range_int_cst_p (vr) | |
8979 | && tree_int_cst_lt (vr->max, op1)) | |
8980 | { | |
8981 | if (TYPE_UNSIGNED (TREE_TYPE (op0)) | |
8982 | || tree_int_cst_sgn (vr->min) >= 0 | |
8983 | || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1), | |
8984 | vr->min)) | |
8985 | { | |
8986 | /* If op0 already has the range op0 % op1 has, | |
8987 | then TRUNC_MOD_EXPR won't change anything. */ | |
8988 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
8989 | gimple_assign_set_rhs_from_tree (&gsi, op0); | |
8990 | update_stmt (stmt); | |
8991 | return true; | |
8992 | } | |
8993 | } | |
8994 | ||
8995 | if (!integer_pow2p (op1)) | |
1a557723 | 8996 | { |
20b8d734 JJ |
8997 | /* X % -Y can be only optimized into X % Y either if |
8998 | X is not INT_MIN, or Y is not -1. Fold it now, as after | |
8999 | remove_range_assertions the range info might be not available | |
9000 | anymore. */ | |
9001 | if (rhs_code == TRUNC_MOD_EXPR | |
9002 | && fold_stmt (gsi, follow_single_use_edges)) | |
9003 | return true; | |
9004 | return false; | |
1a557723 | 9005 | } |
20b8d734 JJ |
9006 | |
9007 | if (TYPE_UNSIGNED (TREE_TYPE (op0))) | |
9008 | val = integer_one_node; | |
1a557723 JL |
9009 | else |
9010 | { | |
12df8a7e ILT |
9011 | bool sop = false; |
9012 | ||
737b0891 | 9013 | val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); |
0c948c27 ILT |
9014 | |
9015 | if (val | |
9016 | && sop | |
9017 | && integer_onep (val) | |
9018 | && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
9019 | { | |
726a989a | 9020 | location_t location; |
0c948c27 | 9021 | |
726a989a RB |
9022 | if (!gimple_has_location (stmt)) |
9023 | location = input_location; | |
0c948c27 | 9024 | else |
726a989a | 9025 | location = gimple_location (stmt); |
fab922b1 MLI |
9026 | warning_at (location, OPT_Wstrict_overflow, |
9027 | "assuming signed overflow does not occur when " | |
9028 | "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); | |
0c948c27 | 9029 | } |
1a557723 JL |
9030 | } |
9031 | ||
9032 | if (val && integer_onep (val)) | |
a513fe88 | 9033 | { |
1a557723 | 9034 | tree t; |
a513fe88 | 9035 | |
1a557723 JL |
9036 | if (rhs_code == TRUNC_DIV_EXPR) |
9037 | { | |
45a2c477 | 9038 | t = build_int_cst (integer_type_node, tree_log2 (op1)); |
726a989a RB |
9039 | gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); |
9040 | gimple_assign_set_rhs1 (stmt, op0); | |
9041 | gimple_assign_set_rhs2 (stmt, t); | |
1a557723 JL |
9042 | } |
9043 | else | |
a513fe88 | 9044 | { |
1a557723 | 9045 | t = build_int_cst (TREE_TYPE (op1), 1); |
d35936ab | 9046 | t = int_const_binop (MINUS_EXPR, op1, t); |
1a557723 | 9047 | t = fold_convert (TREE_TYPE (op0), t); |
726a989a RB |
9048 | |
9049 | gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); | |
9050 | gimple_assign_set_rhs1 (stmt, op0); | |
9051 | gimple_assign_set_rhs2 (stmt, t); | |
1a557723 JL |
9052 | } |
9053 | ||
1a557723 | 9054 | update_stmt (stmt); |
30821654 | 9055 | return true; |
1a557723 | 9056 | } |
30821654 PB |
9057 | |
9058 | return false; | |
1a557723 | 9059 | } |
a513fe88 | 9060 | |
da7db2ce NS |
9061 | /* Simplify a min or max if the ranges of the two operands are |
9062 | disjoint. Return true if we do simplify. */ | |
9063 | ||
9064 | static bool | |
355fe088 | 9065 | simplify_min_or_max_using_ranges (gimple *stmt) |
da7db2ce NS |
9066 | { |
9067 | tree op0 = gimple_assign_rhs1 (stmt); | |
9068 | tree op1 = gimple_assign_rhs2 (stmt); | |
9069 | bool sop = false; | |
9070 | tree val; | |
9071 | ||
9072 | val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
9073 | (LE_EXPR, op0, op1, &sop)); | |
9074 | if (!val) | |
9075 | { | |
9076 | sop = false; | |
9077 | val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges | |
9078 | (LT_EXPR, op0, op1, &sop)); | |
9079 | } | |
9080 | ||
9081 | if (val) | |
9082 | { | |
9083 | if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) | |
9084 | { | |
9085 | location_t location; | |
9086 | ||
9087 | if (!gimple_has_location (stmt)) | |
9088 | location = input_location; | |
9089 | else | |
9090 | location = gimple_location (stmt); | |
9091 | warning_at (location, OPT_Wstrict_overflow, | |
9092 | "assuming signed overflow does not occur when " | |
9093 | "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>"); | |
9094 | } | |
9095 | ||
9096 | /* VAL == TRUE -> OP0 < or <= op1 | |
9097 | VAL == FALSE -> OP0 > or >= op1. */ | |
9098 | tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR) | |
9099 | == integer_zerop (val)) ? op0 : op1; | |
9100 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
9101 | gimple_assign_set_rhs_from_tree (&gsi, res); | |
9102 | update_stmt (stmt); | |
9103 | return true; | |
9104 | } | |
9105 | ||
9106 | return false; | |
9107 | } | |
9108 | ||
1a557723 JL |
9109 | /* If the operand to an ABS_EXPR is >= 0, then eliminate the |
9110 | ABS_EXPR. If the operand is <= 0, then simplify the | |
9111 | ABS_EXPR into a NEGATE_EXPR. */ | |
9112 | ||
30821654 | 9113 | static bool |
355fe088 | 9114 | simplify_abs_using_ranges (gimple *stmt) |
1a557723 | 9115 | { |
726a989a | 9116 | tree op = gimple_assign_rhs1 (stmt); |
526ceb68 | 9117 | value_range *vr = get_value_range (op); |
1a557723 | 9118 | |
8299dd5c | 9119 | if (vr) |
1a557723 | 9120 | { |
8299dd5c | 9121 | tree val = NULL; |
12df8a7e ILT |
9122 | bool sop = false; |
9123 | ||
9124 | val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); | |
1a557723 JL |
9125 | if (!val) |
9126 | { | |
8299dd5c NS |
9127 | /* The range is neither <= 0 nor > 0. Now see if it is |
9128 | either < 0 or >= 0. */ | |
12df8a7e | 9129 | sop = false; |
8299dd5c | 9130 | val = compare_range_with_value (LT_EXPR, vr, integer_zero_node, |
12df8a7e | 9131 | &sop); |
1a557723 | 9132 | } |
a513fe88 | 9133 | |
8299dd5c | 9134 | if (val) |
1a557723 | 9135 | { |
0c948c27 ILT |
9136 | if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) |
9137 | { | |
726a989a | 9138 | location_t location; |
0c948c27 | 9139 | |
726a989a RB |
9140 | if (!gimple_has_location (stmt)) |
9141 | location = input_location; | |
0c948c27 | 9142 | else |
726a989a | 9143 | location = gimple_location (stmt); |
fab922b1 MLI |
9144 | warning_at (location, OPT_Wstrict_overflow, |
9145 | "assuming signed overflow does not occur when " | |
9146 | "simplifying %<abs (X)%> to %<X%> or %<-X%>"); | |
0c948c27 ILT |
9147 | } |
9148 | ||
726a989a | 9149 | gimple_assign_set_rhs1 (stmt, op); |
8299dd5c | 9150 | if (integer_zerop (val)) |
726a989a | 9151 | gimple_assign_set_rhs_code (stmt, SSA_NAME); |
8299dd5c NS |
9152 | else |
9153 | gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); | |
1a557723 | 9154 | update_stmt (stmt); |
30821654 | 9155 | return true; |
1a557723 JL |
9156 | } |
9157 | } | |
30821654 PB |
9158 | |
9159 | return false; | |
1a557723 JL |
9160 | } |
9161 | ||
8556f58f JJ |
9162 | /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. |
9163 | If all the bits that are being cleared by & are already | |
9164 | known to be zero from VR, or all the bits that are being | |
9165 | set by | are already known to be one from VR, the bit | |
9166 | operation is redundant. */ | |
9167 | ||
9168 | static bool | |
355fe088 | 9169 | simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) |
8556f58f JJ |
9170 | { |
9171 | tree op0 = gimple_assign_rhs1 (stmt); | |
9172 | tree op1 = gimple_assign_rhs2 (stmt); | |
9173 | tree op = NULL_TREE; | |
526ceb68 TS |
9174 | value_range vr0 = VR_INITIALIZER; |
9175 | value_range vr1 = VR_INITIALIZER; | |
807e902e KZ |
9176 | wide_int may_be_nonzero0, may_be_nonzero1; |
9177 | wide_int must_be_nonzero0, must_be_nonzero1; | |
9178 | wide_int mask; | |
8556f58f JJ |
9179 | |
9180 | if (TREE_CODE (op0) == SSA_NAME) | |
9181 | vr0 = *(get_value_range (op0)); | |
9182 | else if (is_gimple_min_invariant (op0)) | |
9183 | set_value_range_to_value (&vr0, op0, NULL); | |
9184 | else | |
9185 | return false; | |
9186 | ||
9187 | if (TREE_CODE (op1) == SSA_NAME) | |
9188 | vr1 = *(get_value_range (op1)); | |
9189 | else if (is_gimple_min_invariant (op1)) | |
9190 | set_value_range_to_value (&vr1, op1, NULL); | |
9191 | else | |
9192 | return false; | |
9193 | ||
807e902e KZ |
9194 | if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, |
9195 | &must_be_nonzero0)) | |
8556f58f | 9196 | return false; |
807e902e KZ |
9197 | if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, |
9198 | &must_be_nonzero1)) | |
8556f58f JJ |
9199 | return false; |
9200 | ||
9201 | switch (gimple_assign_rhs_code (stmt)) | |
9202 | { | |
9203 | case BIT_AND_EXPR: | |
27bcd47c | 9204 | mask = may_be_nonzero0.and_not (must_be_nonzero1); |
807e902e | 9205 | if (mask == 0) |
8556f58f JJ |
9206 | { |
9207 | op = op0; | |
9208 | break; | |
9209 | } | |
27bcd47c | 9210 | mask = may_be_nonzero1.and_not (must_be_nonzero0); |
807e902e | 9211 | if (mask == 0) |
8556f58f JJ |
9212 | { |
9213 | op = op1; | |
9214 | break; | |
9215 | } | |
9216 | break; | |
9217 | case BIT_IOR_EXPR: | |
27bcd47c | 9218 | mask = may_be_nonzero0.and_not (must_be_nonzero1); |
807e902e | 9219 | if (mask == 0) |
8556f58f JJ |
9220 | { |
9221 | op = op1; | |
9222 | break; | |
9223 | } | |
27bcd47c | 9224 | mask = may_be_nonzero1.and_not (must_be_nonzero0); |
807e902e | 9225 | if (mask == 0) |
8556f58f JJ |
9226 | { |
9227 | op = op0; | |
9228 | break; | |
9229 | } | |
9230 | break; | |
9231 | default: | |
9232 | gcc_unreachable (); | |
9233 | } | |
9234 | ||
9235 | if (op == NULL_TREE) | |
9236 | return false; | |
9237 | ||
00d66391 | 9238 | gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op); |
8556f58f JJ |
9239 | update_stmt (gsi_stmt (*gsi)); |
9240 | return true; | |
9241 | } | |
9242 | ||
d579f20b JL |
9243 | /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has |
9244 | a known value range VR. | |
9245 | ||
9246 | If there is one and only one value which will satisfy the | |
7ac753f9 PP |
9247 | conditional, then return that value. Else return NULL. |
9248 | ||
9249 | If signed overflow must be undefined for the value to satisfy | |
9250 | the conditional, then set *STRICT_OVERFLOW_P to true. */ | |
d579f20b JL |
9251 | |
9252 | static tree | |
9253 | test_for_singularity (enum tree_code cond_code, tree op0, | |
526ceb68 | 9254 | tree op1, value_range *vr, |
7ac753f9 | 9255 | bool *strict_overflow_p) |
d579f20b JL |
9256 | { |
9257 | tree min = NULL; | |
9258 | tree max = NULL; | |
9259 | ||
6af801f5 JJ |
9260 | /* Extract minimum/maximum values which satisfy the conditional as it was |
9261 | written. */ | |
d579f20b JL |
9262 | if (cond_code == LE_EXPR || cond_code == LT_EXPR) |
9263 | { | |
12df8a7e ILT |
9264 | /* This should not be negative infinity; there is no overflow |
9265 | here. */ | |
d579f20b JL |
9266 | min = TYPE_MIN_VALUE (TREE_TYPE (op0)); |
9267 | ||
9268 | max = op1; | |
12df8a7e | 9269 | if (cond_code == LT_EXPR && !is_overflow_infinity (max)) |
d579f20b JL |
9270 | { |
9271 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
a5ad7269 | 9272 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); |
3fe5bcaf ILT |
9273 | if (EXPR_P (max)) |
9274 | TREE_NO_WARNING (max) = 1; | |
d579f20b JL |
9275 | } |
9276 | } | |
9277 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) | |
9278 | { | |
12df8a7e ILT |
9279 | /* This should not be positive infinity; there is no overflow |
9280 | here. */ | |
d579f20b JL |
9281 | max = TYPE_MAX_VALUE (TREE_TYPE (op0)); |
9282 | ||
9283 | min = op1; | |
12df8a7e | 9284 | if (cond_code == GT_EXPR && !is_overflow_infinity (min)) |
d579f20b JL |
9285 | { |
9286 | tree one = build_int_cst (TREE_TYPE (op0), 1); | |
f9fe7aed | 9287 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); |
3fe5bcaf ILT |
9288 | if (EXPR_P (min)) |
9289 | TREE_NO_WARNING (min) = 1; | |
d579f20b JL |
9290 | } |
9291 | } | |
9292 | ||
9293 | /* Now refine the minimum and maximum values using any | |
9294 | value range information we have for op0. */ | |
9295 | if (min && max) | |
9296 | { | |
fbd43827 | 9297 | if (compare_values (vr->min, min) == 1) |
d579f20b | 9298 | min = vr->min; |
fbd43827 | 9299 | if (compare_values (vr->max, max) == -1) |
d579f20b JL |
9300 | max = vr->max; |
9301 | ||
f9fe7aed JL |
9302 | /* If the new min/max values have converged to a single value, |
9303 | then there is only one value which can satisfy the condition, | |
9304 | return that value. */ | |
9305 | if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) | |
7ac753f9 PP |
9306 | { |
9307 | if ((cond_code == LE_EXPR || cond_code == LT_EXPR) | |
9308 | && is_overflow_infinity (vr->max)) | |
9309 | *strict_overflow_p = true; | |
9310 | if ((cond_code == GE_EXPR || cond_code == GT_EXPR) | |
9311 | && is_overflow_infinity (vr->min)) | |
9312 | *strict_overflow_p = true; | |
9313 | ||
9314 | return min; | |
9315 | } | |
d579f20b JL |
9316 | } |
9317 | return NULL; | |
9318 | } | |
9319 | ||
ebbd90d8 JL |
9320 | /* Return whether the value range *VR fits in an integer type specified |
9321 | by PRECISION and UNSIGNED_P. */ | |
9322 | ||
9323 | static bool | |
526ceb68 | 9324 | range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn) |
ebbd90d8 JL |
9325 | { |
9326 | tree src_type; | |
9327 | unsigned src_precision; | |
807e902e KZ |
9328 | widest_int tem; |
9329 | signop src_sgn; | |
ebbd90d8 JL |
9330 | |
9331 | /* We can only handle integral and pointer types. */ | |
9332 | src_type = TREE_TYPE (vr->min); | |
9333 | if (!INTEGRAL_TYPE_P (src_type) | |
9334 | && !POINTER_TYPE_P (src_type)) | |
9335 | return false; | |
9336 | ||
807e902e | 9337 | /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, |
ebbd90d8 JL |
9338 | and so is an identity transform. */ |
9339 | src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); | |
807e902e KZ |
9340 | src_sgn = TYPE_SIGN (src_type); |
9341 | if ((src_precision < dest_precision | |
9342 | && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) | |
9343 | || (src_precision == dest_precision && src_sgn == dest_sgn)) | |
ebbd90d8 JL |
9344 | return true; |
9345 | ||
9346 | /* Now we can only handle ranges with constant bounds. */ | |
9347 | if (vr->type != VR_RANGE | |
9348 | || TREE_CODE (vr->min) != INTEGER_CST | |
9349 | || TREE_CODE (vr->max) != INTEGER_CST) | |
9350 | return false; | |
9351 | ||
807e902e | 9352 | /* For sign changes, the MSB of the wide_int has to be clear. |
ebbd90d8 | 9353 | An unsigned value with its MSB set cannot be represented by |
807e902e KZ |
9354 | a signed wide_int, while a negative value cannot be represented |
9355 | by an unsigned wide_int. */ | |
9356 | if (src_sgn != dest_sgn | |
9357 | && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0))) | |
ebbd90d8 JL |
9358 | return false; |
9359 | ||
9360 | /* Then we can perform the conversion on both ends and compare | |
9361 | the result for equality. */ | |
807e902e KZ |
9362 | tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn); |
9363 | if (tem != wi::to_widest (vr->min)) | |
ebbd90d8 | 9364 | return false; |
807e902e KZ |
9365 | tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn); |
9366 | if (tem != wi::to_widest (vr->max)) | |
ebbd90d8 JL |
9367 | return false; |
9368 | ||
9369 | return true; | |
9370 | } | |
9371 | ||
22deefcb RB |
9372 | /* Simplify a conditional using a relational operator to an equality |
9373 | test if the range information indicates only one value can satisfy | |
9374 | the original conditional. */ | |
1a557723 | 9375 | |
30821654 | 9376 | static bool |
538dd0b7 | 9377 | simplify_cond_using_ranges (gcond *stmt) |
1a557723 | 9378 | { |
726a989a RB |
9379 | tree op0 = gimple_cond_lhs (stmt); |
9380 | tree op1 = gimple_cond_rhs (stmt); | |
9381 | enum tree_code cond_code = gimple_cond_code (stmt); | |
1a557723 | 9382 | |
22deefcb | 9383 | if (cond_code != NE_EXPR |
1a557723 JL |
9384 | && cond_code != EQ_EXPR |
9385 | && TREE_CODE (op0) == SSA_NAME | |
9386 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) | |
9387 | && is_gimple_min_invariant (op1)) | |
9388 | { | |
526ceb68 | 9389 | value_range *vr = get_value_range (op0); |
b8698a0f | 9390 | |
1a557723 JL |
9391 | /* If we have range information for OP0, then we might be |
9392 | able to simplify this conditional. */ | |
9393 | if (vr->type == VR_RANGE) | |
9394 | { | |
7ac753f9 PP |
9395 | enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON; |
9396 | bool sop = false; | |
9397 | tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop); | |
1a557723 | 9398 | |
7ac753f9 PP |
9399 | if (new_tree |
9400 | && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))) | |
1a557723 | 9401 | { |
d579f20b | 9402 | if (dump_file) |
1a557723 | 9403 | { |
d579f20b | 9404 | fprintf (dump_file, "Simplified relational "); |
726a989a | 9405 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b | 9406 | fprintf (dump_file, " into "); |
a513fe88 JL |
9407 | } |
9408 | ||
726a989a RB |
9409 | gimple_cond_set_code (stmt, EQ_EXPR); |
9410 | gimple_cond_set_lhs (stmt, op0); | |
82d6e6fc | 9411 | gimple_cond_set_rhs (stmt, new_tree); |
726a989a | 9412 | |
d579f20b JL |
9413 | update_stmt (stmt); |
9414 | ||
9415 | if (dump_file) | |
a513fe88 | 9416 | { |
726a989a | 9417 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b | 9418 | fprintf (dump_file, "\n"); |
a513fe88 | 9419 | } |
d579f20b | 9420 | |
7ac753f9 PP |
9421 | if (sop && issue_strict_overflow_warning (wc)) |
9422 | { | |
9423 | location_t location = input_location; | |
9424 | if (gimple_has_location (stmt)) | |
9425 | location = gimple_location (stmt); | |
9426 | ||
9427 | warning_at (location, OPT_Wstrict_overflow, | |
9428 | "assuming signed overflow does not occur when " | |
9429 | "simplifying conditional"); | |
9430 | } | |
9431 | ||
30821654 | 9432 | return true; |
a513fe88 JL |
9433 | } |
9434 | ||
d579f20b JL |
9435 | /* Try again after inverting the condition. We only deal |
9436 | with integral types here, so no need to worry about | |
9437 | issues with inverting FP comparisons. */ | |
7ac753f9 PP |
9438 | sop = false; |
9439 | new_tree = test_for_singularity | |
9440 | (invert_tree_comparison (cond_code, false), | |
9441 | op0, op1, vr, &sop); | |
d579f20b | 9442 | |
7ac753f9 PP |
9443 | if (new_tree |
9444 | && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))) | |
1a557723 | 9445 | { |
d579f20b | 9446 | if (dump_file) |
1a557723 | 9447 | { |
d579f20b | 9448 | fprintf (dump_file, "Simplified relational "); |
726a989a | 9449 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b | 9450 | fprintf (dump_file, " into "); |
1a557723 | 9451 | } |
d579f20b | 9452 | |
726a989a RB |
9453 | gimple_cond_set_code (stmt, NE_EXPR); |
9454 | gimple_cond_set_lhs (stmt, op0); | |
82d6e6fc | 9455 | gimple_cond_set_rhs (stmt, new_tree); |
726a989a | 9456 | |
d579f20b JL |
9457 | update_stmt (stmt); |
9458 | ||
9459 | if (dump_file) | |
9460 | { | |
726a989a | 9461 | print_gimple_stmt (dump_file, stmt, 0, 0); |
d579f20b JL |
9462 | fprintf (dump_file, "\n"); |
9463 | } | |
d579f20b | 9464 | |
7ac753f9 PP |
9465 | if (sop && issue_strict_overflow_warning (wc)) |
9466 | { | |
9467 | location_t location = input_location; | |
9468 | if (gimple_has_location (stmt)) | |
9469 | location = gimple_location (stmt); | |
9470 | ||
9471 | warning_at (location, OPT_Wstrict_overflow, | |
9472 | "assuming signed overflow does not occur when " | |
9473 | "simplifying conditional"); | |
9474 | } | |
9475 | ||
30821654 | 9476 | return true; |
1a557723 | 9477 | } |
a513fe88 JL |
9478 | } |
9479 | } | |
30821654 | 9480 | |
ebbd90d8 JL |
9481 | /* If we have a comparison of an SSA_NAME (OP0) against a constant, |
9482 | see if OP0 was set by a type conversion where the source of | |
9483 | the conversion is another SSA_NAME with a range that fits | |
9484 | into the range of OP0's type. | |
a32dfe9d | 9485 | |
ebbd90d8 JL |
9486 | If so, the conversion is redundant as the earlier SSA_NAME can be |
9487 | used for the comparison directly if we just massage the constant in the | |
9488 | comparison. */ | |
a32dfe9d | 9489 | if (TREE_CODE (op0) == SSA_NAME |
a32dfe9d JL |
9490 | && TREE_CODE (op1) == INTEGER_CST) |
9491 | { | |
355fe088 | 9492 | gimple *def_stmt = SSA_NAME_DEF_STMT (op0); |
a32dfe9d JL |
9493 | tree innerop; |
9494 | ||
9495 | if (!is_gimple_assign (def_stmt) | |
9496 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
9497 | return false; | |
9498 | ||
9499 | innerop = gimple_assign_rhs1 (def_stmt); | |
9500 | ||
a4ce1258 | 9501 | if (TREE_CODE (innerop) == SSA_NAME |
1014b6f5 | 9502 | && !POINTER_TYPE_P (TREE_TYPE (innerop)) |
1ebd5558 JJ |
9503 | && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop) |
9504 | && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0))) | |
a32dfe9d | 9505 | { |
526ceb68 | 9506 | value_range *vr = get_value_range (innerop); |
a32dfe9d JL |
9507 | |
9508 | if (range_int_cst_p (vr) | |
ebbd90d8 JL |
9509 | && range_fits_type_p (vr, |
9510 | TYPE_PRECISION (TREE_TYPE (op0)), | |
807e902e | 9511 | TYPE_SIGN (TREE_TYPE (op0))) |
2343af65 JL |
9512 | && int_fits_type_p (op1, TREE_TYPE (innerop)) |
9513 | /* The range must not have overflowed, or if it did overflow | |
9514 | we must not be wrapping/trapping overflow and optimizing | |
9515 | with strict overflow semantics. */ | |
9516 | && ((!is_negative_overflow_infinity (vr->min) | |
9517 | && !is_positive_overflow_infinity (vr->max)) | |
9518 | || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop)))) | |
a32dfe9d | 9519 | { |
2343af65 JL |
9520 | /* If the range overflowed and the user has asked for warnings |
9521 | when strict overflow semantics were used to optimize code, | |
9522 | issue an appropriate warning. */ | |
ee68591e RB |
9523 | if (cond_code != EQ_EXPR && cond_code != NE_EXPR |
9524 | && (is_negative_overflow_infinity (vr->min) | |
9525 | || is_positive_overflow_infinity (vr->max)) | |
2343af65 JL |
9526 | && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL)) |
9527 | { | |
9528 | location_t location; | |
9529 | ||
9530 | if (!gimple_has_location (stmt)) | |
9531 | location = input_location; | |
9532 | else | |
9533 | location = gimple_location (stmt); | |
9534 | warning_at (location, OPT_Wstrict_overflow, | |
1ebd5558 JJ |
9535 | "assuming signed overflow does not occur when " |
9536 | "simplifying conditional"); | |
2343af65 JL |
9537 | } |
9538 | ||
a32dfe9d JL |
9539 | tree newconst = fold_convert (TREE_TYPE (innerop), op1); |
9540 | gimple_cond_set_lhs (stmt, innerop); | |
9541 | gimple_cond_set_rhs (stmt, newconst); | |
9542 | return true; | |
9543 | } | |
9544 | } | |
9545 | } | |
9546 | ||
30821654 | 9547 | return false; |
a513fe88 JL |
9548 | } |
9549 | ||
b7814a18 RG |
9550 | /* Simplify a switch statement using the value range of the switch |
9551 | argument. */ | |
9552 | ||
30821654 | 9553 | static bool |
538dd0b7 | 9554 | simplify_switch_using_ranges (gswitch *stmt) |
b7814a18 | 9555 | { |
726a989a | 9556 | tree op = gimple_switch_index (stmt); |
526ceb68 | 9557 | value_range *vr; |
b7814a18 RG |
9558 | bool take_default; |
9559 | edge e; | |
9560 | edge_iterator ei; | |
9561 | size_t i = 0, j = 0, n, n2; | |
726a989a | 9562 | tree vec2; |
b7814a18 | 9563 | switch_update su; |
8bb37e9a | 9564 | size_t k = 1, l = 0; |
b7814a18 | 9565 | |
92ef7fb1 MLI |
9566 | if (TREE_CODE (op) == SSA_NAME) |
9567 | { | |
9568 | vr = get_value_range (op); | |
b7814a18 | 9569 | |
92ef7fb1 | 9570 | /* We can only handle integer ranges. */ |
8bb37e9a TV |
9571 | if ((vr->type != VR_RANGE |
9572 | && vr->type != VR_ANTI_RANGE) | |
92ef7fb1 MLI |
9573 | || symbolic_range_p (vr)) |
9574 | return false; | |
b7814a18 | 9575 | |
92ef7fb1 | 9576 | /* Find case label for min/max of the value range. */ |
8bb37e9a | 9577 | take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l); |
92ef7fb1 MLI |
9578 | } |
9579 | else if (TREE_CODE (op) == INTEGER_CST) | |
9580 | { | |
9581 | take_default = !find_case_label_index (stmt, 1, op, &i); | |
9582 | if (take_default) | |
9583 | { | |
9584 | i = 1; | |
9585 | j = 0; | |
9586 | } | |
b8698a0f | 9587 | else |
92ef7fb1 MLI |
9588 | { |
9589 | j = i; | |
9590 | } | |
9591 | } | |
9592 | else | |
30821654 | 9593 | return false; |
b7814a18 | 9594 | |
726a989a | 9595 | n = gimple_switch_num_labels (stmt); |
b7814a18 RG |
9596 | |
9597 | /* Bail out if this is just all edges taken. */ | |
726a989a RB |
9598 | if (i == 1 |
9599 | && j == n - 1 | |
b7814a18 | 9600 | && take_default) |
30821654 | 9601 | return false; |
b7814a18 RG |
9602 | |
9603 | /* Build a new vector of taken case labels. */ | |
8bb37e9a | 9604 | vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); |
726a989a | 9605 | n2 = 0; |
b7814a18 RG |
9606 | |
9607 | /* Add the default edge, if necessary. */ | |
9608 | if (take_default) | |
726a989a RB |
9609 | TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); |
9610 | ||
9611 | for (; i <= j; ++i, ++n2) | |
9612 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); | |
b7814a18 | 9613 | |
8bb37e9a TV |
9614 | for (; k <= l; ++k, ++n2) |
9615 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k); | |
9616 | ||
b7814a18 RG |
9617 | /* Mark needed edges. */ |
9618 | for (i = 0; i < n2; ++i) | |
9619 | { | |
726a989a | 9620 | e = find_edge (gimple_bb (stmt), |
b7814a18 RG |
9621 | label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); |
9622 | e->aux = (void *)-1; | |
9623 | } | |
9624 | ||
9625 | /* Queue not needed edges for later removal. */ | |
726a989a | 9626 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
b7814a18 RG |
9627 | { |
9628 | if (e->aux == (void *)-1) | |
9629 | { | |
9630 | e->aux = NULL; | |
9631 | continue; | |
9632 | } | |
9633 | ||
9634 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
9635 | { | |
9636 | fprintf (dump_file, "removing unreachable case label\n"); | |
9637 | } | |
9771b263 | 9638 | to_remove_edges.safe_push (e); |
1d86f5e9 | 9639 | e->flags &= ~EDGE_EXECUTABLE; |
b7814a18 RG |
9640 | } |
9641 | ||
9642 | /* And queue an update for the stmt. */ | |
9643 | su.stmt = stmt; | |
9644 | su.vec = vec2; | |
9771b263 | 9645 | to_update_switch_stmts.safe_push (su); |
30821654 | 9646 | return false; |
b7814a18 RG |
9647 | } |
9648 | ||
29c5134a RG |
9649 | /* Simplify an integral conversion from an SSA name in STMT. */ |
9650 | ||
9651 | static bool | |
355fe088 | 9652 | simplify_conversion_using_ranges (gimple *stmt) |
29c5134a | 9653 | { |
dcc95c20 | 9654 | tree innerop, middleop, finaltype; |
355fe088 | 9655 | gimple *def_stmt; |
526ceb68 | 9656 | value_range *innervr; |
807e902e | 9657 | signop inner_sgn, middle_sgn, final_sgn; |
6ebbd277 | 9658 | unsigned inner_prec, middle_prec, final_prec; |
807e902e | 9659 | widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; |
dcc95c20 RG |
9660 | |
9661 | finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); | |
9111c715 RG |
9662 | if (!INTEGRAL_TYPE_P (finaltype)) |
9663 | return false; | |
dcc95c20 RG |
9664 | middleop = gimple_assign_rhs1 (stmt); |
9665 | def_stmt = SSA_NAME_DEF_STMT (middleop); | |
29c5134a RG |
9666 | if (!is_gimple_assign (def_stmt) |
9667 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) | |
9668 | return false; | |
dcc95c20 | 9669 | innerop = gimple_assign_rhs1 (def_stmt); |
999c1171 RB |
9670 | if (TREE_CODE (innerop) != SSA_NAME |
9671 | || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) | |
29c5134a | 9672 | return false; |
dcc95c20 RG |
9673 | |
9674 | /* Get the value-range of the inner operand. */ | |
9675 | innervr = get_value_range (innerop); | |
9676 | if (innervr->type != VR_RANGE | |
9677 | || TREE_CODE (innervr->min) != INTEGER_CST | |
9678 | || TREE_CODE (innervr->max) != INTEGER_CST) | |
29c5134a | 9679 | return false; |
dcc95c20 RG |
9680 | |
9681 | /* Simulate the conversion chain to check if the result is equal if | |
9682 | the middle conversion is removed. */ | |
807e902e KZ |
9683 | innermin = wi::to_widest (innervr->min); |
9684 | innermax = wi::to_widest (innervr->max); | |
6ebbd277 JR |
9685 | |
9686 | inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); | |
9687 | middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); | |
9688 | final_prec = TYPE_PRECISION (finaltype); | |
9689 | ||
9690 | /* If the first conversion is not injective, the second must not | |
9691 | be widening. */ | |
807e902e KZ |
9692 | if (wi::gtu_p (innermax - innermin, |
9693 | wi::mask <widest_int> (middle_prec, false)) | |
6ebbd277 | 9694 | && middle_prec < final_prec) |
29c5134a | 9695 | return false; |
6ebbd277 JR |
9696 | /* We also want a medium value so that we can track the effect that |
9697 | narrowing conversions with sign change have. */ | |
807e902e KZ |
9698 | inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); |
9699 | if (inner_sgn == UNSIGNED) | |
9700 | innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false); | |
6ebbd277 | 9701 | else |
807e902e KZ |
9702 | innermed = 0; |
9703 | if (wi::cmp (innermin, innermed, inner_sgn) >= 0 | |
9704 | || wi::cmp (innermed, innermax, inner_sgn) >= 0) | |
6ebbd277 JR |
9705 | innermed = innermin; |
9706 | ||
807e902e KZ |
9707 | middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); |
9708 | middlemin = wi::ext (innermin, middle_prec, middle_sgn); | |
9709 | middlemed = wi::ext (innermed, middle_prec, middle_sgn); | |
9710 | middlemax = wi::ext (innermax, middle_prec, middle_sgn); | |
6ebbd277 | 9711 | |
7d5a0f1b RG |
9712 | /* Require that the final conversion applied to both the original |
9713 | and the intermediate range produces the same result. */ | |
807e902e KZ |
9714 | final_sgn = TYPE_SIGN (finaltype); |
9715 | if (wi::ext (middlemin, final_prec, final_sgn) | |
9716 | != wi::ext (innermin, final_prec, final_sgn) | |
9717 | || wi::ext (middlemed, final_prec, final_sgn) | |
9718 | != wi::ext (innermed, final_prec, final_sgn) | |
9719 | || wi::ext (middlemax, final_prec, final_sgn) | |
9720 | != wi::ext (innermax, final_prec, final_sgn)) | |
dcc95c20 RG |
9721 | return false; |
9722 | ||
9723 | gimple_assign_set_rhs1 (stmt, innerop); | |
29c5134a RG |
9724 | update_stmt (stmt); |
9725 | return true; | |
9726 | } | |
9727 | ||
ebeadd91 RG |
9728 | /* Simplify a conversion from integral SSA name to float in STMT. */ |
9729 | ||
9730 | static bool | |
355fe088 TS |
9731 | simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, |
9732 | gimple *stmt) | |
ebeadd91 RG |
9733 | { |
9734 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
526ceb68 | 9735 | value_range *vr = get_value_range (rhs1); |
ef4bddc2 RS |
9736 | machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); |
9737 | machine_mode mode; | |
ebeadd91 | 9738 | tree tem; |
538dd0b7 | 9739 | gassign *conv; |
ebeadd91 RG |
9740 | |
9741 | /* We can only handle constant ranges. */ | |
9742 | if (vr->type != VR_RANGE | |
9743 | || TREE_CODE (vr->min) != INTEGER_CST | |
9744 | || TREE_CODE (vr->max) != INTEGER_CST) | |
9745 | return false; | |
9746 | ||
9747 | /* First check if we can use a signed type in place of an unsigned. */ | |
9748 | if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) | |
9749 | && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) | |
9750 | != CODE_FOR_nothing) | |
807e902e | 9751 | && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) |
ebeadd91 RG |
9752 | mode = TYPE_MODE (TREE_TYPE (rhs1)); |
9753 | /* If we can do the conversion in the current input mode do nothing. */ | |
9754 | else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), | |
5f4e6de3 | 9755 | TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing) |
ebeadd91 RG |
9756 | return false; |
9757 | /* Otherwise search for a mode we can use, starting from the narrowest | |
9758 | integer mode available. */ | |
9759 | else | |
9760 | { | |
9761 | mode = GET_CLASS_NARROWEST_MODE (MODE_INT); | |
9762 | do | |
9763 | { | |
9764 | /* If we cannot do a signed conversion to float from mode | |
9765 | or if the value-range does not fit in the signed type | |
9766 | try with a wider mode. */ | |
9767 | if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing | |
807e902e | 9768 | && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) |
ebeadd91 RG |
9769 | break; |
9770 | ||
9771 | mode = GET_MODE_WIDER_MODE (mode); | |
9772 | /* But do not widen the input. Instead leave that to the | |
9773 | optabs expansion code. */ | |
9774 | if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) | |
9775 | return false; | |
9776 | } | |
9777 | while (mode != VOIDmode); | |
9778 | if (mode == VOIDmode) | |
9779 | return false; | |
9780 | } | |
9781 | ||
9782 | /* It works, insert a truncation or sign-change before the | |
9783 | float conversion. */ | |
83d5977e | 9784 | tem = make_ssa_name (build_nonstandard_integer_type |
b731b390 | 9785 | (GET_MODE_PRECISION (mode), 0)); |
0d0e4a03 | 9786 | conv = gimple_build_assign (tem, NOP_EXPR, rhs1); |
ebeadd91 RG |
9787 | gsi_insert_before (gsi, conv, GSI_SAME_STMT); |
9788 | gimple_assign_set_rhs1 (stmt, tem); | |
9789 | update_stmt (stmt); | |
9790 | ||
9791 | return true; | |
9792 | } | |
9793 | ||
97286431 JJ |
9794 | /* Simplify an internal fn call using ranges if possible. */ |
9795 | ||
9796 | static bool | |
355fe088 | 9797 | simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) |
97286431 JJ |
9798 | { |
9799 | enum tree_code subcode; | |
1304953e JJ |
9800 | bool is_ubsan = false; |
9801 | bool ovf = false; | |
97286431 JJ |
9802 | switch (gimple_call_internal_fn (stmt)) |
9803 | { | |
9804 | case IFN_UBSAN_CHECK_ADD: | |
9805 | subcode = PLUS_EXPR; | |
1304953e | 9806 | is_ubsan = true; |
97286431 JJ |
9807 | break; |
9808 | case IFN_UBSAN_CHECK_SUB: | |
9809 | subcode = MINUS_EXPR; | |
1304953e | 9810 | is_ubsan = true; |
97286431 JJ |
9811 | break; |
9812 | case IFN_UBSAN_CHECK_MUL: | |
1304953e JJ |
9813 | subcode = MULT_EXPR; |
9814 | is_ubsan = true; | |
9815 | break; | |
9816 | case IFN_ADD_OVERFLOW: | |
9817 | subcode = PLUS_EXPR; | |
9818 | break; | |
9819 | case IFN_SUB_OVERFLOW: | |
9820 | subcode = MINUS_EXPR; | |
9821 | break; | |
9822 | case IFN_MUL_OVERFLOW: | |
97286431 JJ |
9823 | subcode = MULT_EXPR; |
9824 | break; | |
9825 | default: | |
9826 | return false; | |
9827 | } | |
9828 | ||
97286431 JJ |
9829 | tree op0 = gimple_call_arg (stmt, 0); |
9830 | tree op1 = gimple_call_arg (stmt, 1); | |
1304953e JJ |
9831 | tree type; |
9832 | if (is_ubsan) | |
9833 | type = TREE_TYPE (op0); | |
9834 | else if (gimple_call_lhs (stmt) == NULL_TREE) | |
9835 | return false; | |
97286431 | 9836 | else |
1304953e JJ |
9837 | type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt))); |
9838 | if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf) | |
9839 | || (is_ubsan && ovf)) | |
9840 | return false; | |
97286431 | 9841 | |
355fe088 | 9842 | gimple *g; |
1304953e JJ |
9843 | location_t loc = gimple_location (stmt); |
9844 | if (is_ubsan) | |
0d0e4a03 | 9845 | g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1); |
368b454d JJ |
9846 | else |
9847 | { | |
1304953e JJ |
9848 | int prec = TYPE_PRECISION (type); |
9849 | tree utype = type; | |
9850 | if (ovf | |
9851 | || !useless_type_conversion_p (type, TREE_TYPE (op0)) | |
9852 | || !useless_type_conversion_p (type, TREE_TYPE (op1))) | |
9853 | utype = build_nonstandard_integer_type (prec, 1); | |
9854 | if (TREE_CODE (op0) == INTEGER_CST) | |
9855 | op0 = fold_convert (utype, op0); | |
9856 | else if (!useless_type_conversion_p (utype, TREE_TYPE (op0))) | |
9857 | { | |
0d0e4a03 | 9858 | g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0); |
1304953e JJ |
9859 | gimple_set_location (g, loc); |
9860 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
9861 | op0 = gimple_assign_lhs (g); | |
368b454d | 9862 | } |
1304953e JJ |
9863 | if (TREE_CODE (op1) == INTEGER_CST) |
9864 | op1 = fold_convert (utype, op1); | |
9865 | else if (!useless_type_conversion_p (utype, TREE_TYPE (op1))) | |
9866 | { | |
0d0e4a03 | 9867 | g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1); |
1304953e JJ |
9868 | gimple_set_location (g, loc); |
9869 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
9870 | op1 = gimple_assign_lhs (g); | |
9871 | } | |
0d0e4a03 | 9872 | g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1); |
1304953e JJ |
9873 | gimple_set_location (g, loc); |
9874 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
9875 | if (utype != type) | |
9876 | { | |
0d0e4a03 JJ |
9877 | g = gimple_build_assign (make_ssa_name (type), NOP_EXPR, |
9878 | gimple_assign_lhs (g)); | |
1304953e JJ |
9879 | gimple_set_location (g, loc); |
9880 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
9881 | } | |
0d0e4a03 JJ |
9882 | g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR, |
9883 | gimple_assign_lhs (g), | |
9884 | build_int_cst (type, ovf)); | |
1304953e JJ |
9885 | } |
9886 | gimple_set_location (g, loc); | |
97286431 JJ |
9887 | gsi_replace (gsi, g, false); |
9888 | return true; | |
9889 | } | |
9890 | ||
1a557723 JL |
9891 | /* Simplify STMT using ranges if possible. */ |
9892 | ||
ff7ffb8f | 9893 | static bool |
30821654 | 9894 | simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) |
1a557723 | 9895 | { |
355fe088 | 9896 | gimple *stmt = gsi_stmt (*gsi); |
726a989a | 9897 | if (is_gimple_assign (stmt)) |
1a557723 | 9898 | { |
726a989a | 9899 | enum tree_code rhs_code = gimple_assign_rhs_code (stmt); |
29c5134a | 9900 | tree rhs1 = gimple_assign_rhs1 (stmt); |
1a557723 | 9901 | |
30821654 PB |
9902 | switch (rhs_code) |
9903 | { | |
9904 | case EQ_EXPR: | |
9905 | case NE_EXPR: | |
98958241 KT |
9906 | /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity |
9907 | if the RHS is zero or one, and the LHS are known to be boolean | |
9908 | values. */ | |
29c5134a | 9909 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
30821654 PB |
9910 | return simplify_truth_ops_using_ranges (gsi, stmt); |
9911 | break; | |
9912 | ||
1a557723 JL |
9913 | /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR |
9914 | and BIT_AND_EXPR respectively if the first operand is greater | |
f51286f2 JJ |
9915 | than zero and the second operand is an exact power of two. |
9916 | Also optimize TRUNC_MOD_EXPR away if the second operand is | |
9917 | constant and the first operand already has the right value | |
9918 | range. */ | |
30821654 PB |
9919 | case TRUNC_DIV_EXPR: |
9920 | case TRUNC_MOD_EXPR: | |
f51286f2 JJ |
9921 | if (TREE_CODE (rhs1) == SSA_NAME |
9922 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
20b8d734 | 9923 | return simplify_div_or_mod_using_ranges (gsi, stmt); |
30821654 | 9924 | break; |
1a557723 JL |
9925 | |
9926 | /* Transform ABS (X) into X or -X as appropriate. */ | |
30821654 | 9927 | case ABS_EXPR: |
29c5134a RG |
9928 | if (TREE_CODE (rhs1) == SSA_NAME |
9929 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
30821654 PB |
9930 | return simplify_abs_using_ranges (stmt); |
9931 | break; | |
9932 | ||
8556f58f JJ |
9933 | case BIT_AND_EXPR: |
9934 | case BIT_IOR_EXPR: | |
9935 | /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR | |
9936 | if all the bits being cleared are already cleared or | |
9937 | all the bits being set are already set. */ | |
29c5134a | 9938 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
8556f58f JJ |
9939 | return simplify_bit_ops_using_ranges (gsi, stmt); |
9940 | break; | |
9941 | ||
29c5134a RG |
9942 | CASE_CONVERT: |
9943 | if (TREE_CODE (rhs1) == SSA_NAME | |
9944 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
9945 | return simplify_conversion_using_ranges (stmt); | |
9946 | break; | |
9947 | ||
ebeadd91 RG |
9948 | case FLOAT_EXPR: |
9949 | if (TREE_CODE (rhs1) == SSA_NAME | |
9950 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) | |
9951 | return simplify_float_conversion_using_ranges (gsi, stmt); | |
9952 | break; | |
9953 | ||
da7db2ce NS |
9954 | case MIN_EXPR: |
9955 | case MAX_EXPR: | |
9956 | return simplify_min_or_max_using_ranges (stmt); | |
9957 | break; | |
9958 | ||
30821654 PB |
9959 | default: |
9960 | break; | |
9961 | } | |
1a557723 | 9962 | } |
726a989a | 9963 | else if (gimple_code (stmt) == GIMPLE_COND) |
538dd0b7 | 9964 | return simplify_cond_using_ranges (as_a <gcond *> (stmt)); |
726a989a | 9965 | else if (gimple_code (stmt) == GIMPLE_SWITCH) |
538dd0b7 | 9966 | return simplify_switch_using_ranges (as_a <gswitch *> (stmt)); |
97286431 JJ |
9967 | else if (is_gimple_call (stmt) |
9968 | && gimple_call_internal_p (stmt)) | |
9969 | return simplify_internal_call_using_ranges (gsi, stmt); | |
30821654 PB |
9970 | |
9971 | return false; | |
1a557723 JL |
9972 | } |
9973 | ||
ff7ffb8f RG |
9974 | /* If the statement pointed by SI has a predicate whose value can be |
9975 | computed using the value range information computed by VRP, compute | |
9976 | its value and return true. Otherwise, return false. */ | |
9977 | ||
9978 | static bool | |
9979 | fold_predicate_in (gimple_stmt_iterator *si) | |
9980 | { | |
9981 | bool assignment_p = false; | |
9982 | tree val; | |
355fe088 | 9983 | gimple *stmt = gsi_stmt (*si); |
ff7ffb8f RG |
9984 | |
9985 | if (is_gimple_assign (stmt) | |
9986 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) | |
9987 | { | |
9988 | assignment_p = true; | |
9989 | val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), | |
9990 | gimple_assign_rhs1 (stmt), | |
9991 | gimple_assign_rhs2 (stmt), | |
9992 | stmt); | |
9993 | } | |
538dd0b7 DM |
9994 | else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) |
9995 | val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt), | |
9996 | gimple_cond_lhs (cond_stmt), | |
9997 | gimple_cond_rhs (cond_stmt), | |
ff7ffb8f RG |
9998 | stmt); |
9999 | else | |
10000 | return false; | |
10001 | ||
10002 | if (val) | |
10003 | { | |
10004 | if (assignment_p) | |
10005 | val = fold_convert (gimple_expr_type (stmt), val); | |
b8698a0f | 10006 | |
ff7ffb8f RG |
10007 | if (dump_file) |
10008 | { | |
10009 | fprintf (dump_file, "Folding predicate "); | |
10010 | print_gimple_expr (dump_file, stmt, 0, 0); | |
10011 | fprintf (dump_file, " to "); | |
10012 | print_generic_expr (dump_file, val, 0); | |
10013 | fprintf (dump_file, "\n"); | |
10014 | } | |
10015 | ||
10016 | if (is_gimple_assign (stmt)) | |
10017 | gimple_assign_set_rhs_from_tree (si, val); | |
10018 | else | |
10019 | { | |
10020 | gcc_assert (gimple_code (stmt) == GIMPLE_COND); | |
538dd0b7 | 10021 | gcond *cond_stmt = as_a <gcond *> (stmt); |
ff7ffb8f | 10022 | if (integer_zerop (val)) |
538dd0b7 | 10023 | gimple_cond_make_false (cond_stmt); |
ff7ffb8f | 10024 | else if (integer_onep (val)) |
538dd0b7 | 10025 | gimple_cond_make_true (cond_stmt); |
ff7ffb8f RG |
10026 | else |
10027 | gcc_unreachable (); | |
10028 | } | |
10029 | ||
10030 | return true; | |
10031 | } | |
10032 | ||
10033 | return false; | |
10034 | } | |
10035 | ||
10036 | /* Callback for substitute_and_fold folding the stmt at *SI. */ | |
10037 | ||
10038 | static bool | |
10039 | vrp_fold_stmt (gimple_stmt_iterator *si) | |
10040 | { | |
10041 | if (fold_predicate_in (si)) | |
10042 | return true; | |
10043 | ||
10044 | return simplify_stmt_using_ranges (si); | |
10045 | } | |
10046 | ||
f6c72af4 JL |
10047 | /* Unwindable const/copy equivalences. */ |
10048 | const_and_copies *equiv_stack; | |
2090d6a0 | 10049 | |
0c948c27 ILT |
10050 | /* A trivial wrapper so that we can present the generic jump threading |
10051 | code with a simple API for simplifying statements. STMT is the | |
10052 | statement we want to simplify, WITHIN_STMT provides the location | |
10053 | for any overflow warnings. */ | |
10054 | ||
2090d6a0 | 10055 | static tree |
355fe088 | 10056 | simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt, |
8e33db8f | 10057 | class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED) |
2090d6a0 | 10058 | { |
538dd0b7 DM |
10059 | if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) |
10060 | return vrp_evaluate_conditional (gimple_cond_code (cond_stmt), | |
10061 | gimple_cond_lhs (cond_stmt), | |
10062 | gimple_cond_rhs (cond_stmt), | |
10063 | within_stmt); | |
5562e26e | 10064 | |
538dd0b7 | 10065 | if (gassign *assign_stmt = dyn_cast <gassign *> (stmt)) |
5562e26e | 10066 | { |
526ceb68 | 10067 | value_range new_vr = VR_INITIALIZER; |
538dd0b7 | 10068 | tree lhs = gimple_assign_lhs (assign_stmt); |
5562e26e JL |
10069 | |
10070 | if (TREE_CODE (lhs) == SSA_NAME | |
10071 | && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
10072 | || POINTER_TYPE_P (TREE_TYPE (lhs)))) | |
10073 | { | |
538dd0b7 | 10074 | extract_range_from_assignment (&new_vr, assign_stmt); |
5562e26e JL |
10075 | if (range_int_cst_singleton_p (&new_vr)) |
10076 | return new_vr.min; | |
10077 | } | |
10078 | } | |
2090d6a0 | 10079 | |
5562e26e | 10080 | return NULL_TREE; |
2090d6a0 JL |
10081 | } |
10082 | ||
10083 | /* Blocks which have more than one predecessor and more than | |
fa10beec | 10084 | one successor present jump threading opportunities, i.e., |
2090d6a0 JL |
10085 | when the block is reached from a specific predecessor, we |
10086 | may be able to determine which of the outgoing edges will | |
10087 | be traversed. When this optimization applies, we are able | |
10088 | to avoid conditionals at runtime and we may expose secondary | |
10089 | optimization opportunities. | |
10090 | ||
10091 | This routine is effectively a driver for the generic jump | |
10092 | threading code. It basically just presents the generic code | |
10093 | with edges that may be suitable for jump threading. | |
10094 | ||
10095 | Unlike DOM, we do not iterate VRP if jump threading was successful. | |
10096 | While iterating may expose new opportunities for VRP, it is expected | |
10097 | those opportunities would be very limited and the compile time cost | |
b8698a0f | 10098 | to expose those opportunities would be significant. |
2090d6a0 JL |
10099 | |
10100 | As jump threading opportunities are discovered, they are registered | |
10101 | for later realization. */ | |
10102 | ||
10103 | static void | |
10104 | identify_jump_threads (void) | |
10105 | { | |
10106 | basic_block bb; | |
538dd0b7 | 10107 | gcond *dummy; |
b7814a18 RG |
10108 | int i; |
10109 | edge e; | |
2090d6a0 JL |
10110 | |
10111 | /* Ugh. When substituting values earlier in this pass we can | |
10112 | wipe the dominance information. So rebuild the dominator | |
10113 | information as we need it within the jump threading code. */ | |
10114 | calculate_dominance_info (CDI_DOMINATORS); | |
10115 | ||
10116 | /* We do not allow VRP information to be used for jump threading | |
10117 | across a back edge in the CFG. Otherwise it becomes too | |
10118 | difficult to avoid eliminating loop exit tests. Of course | |
10119 | EDGE_DFS_BACK is not accurate at this time so we have to | |
10120 | recompute it. */ | |
10121 | mark_dfs_back_edges (); | |
10122 | ||
b7814a18 | 10123 | /* Do not thread across edges we are about to remove. Just marking |
b9e59e4f | 10124 | them as EDGE_IGNORE will do. */ |
9771b263 | 10125 | FOR_EACH_VEC_ELT (to_remove_edges, i, e) |
b9e59e4f | 10126 | e->flags |= EDGE_IGNORE; |
b7814a18 | 10127 | |
2090d6a0 JL |
10128 | /* Allocate our unwinder stack to unwind any temporary equivalences |
10129 | that might be recorded. */ | |
a12cbc57 | 10130 | equiv_stack = new const_and_copies (); |
2090d6a0 JL |
10131 | |
10132 | /* To avoid lots of silly node creation, we create a single | |
10133 | conditional and just modify it in-place when attempting to | |
10134 | thread jumps. */ | |
726a989a RB |
10135 | dummy = gimple_build_cond (EQ_EXPR, |
10136 | integer_zero_node, integer_zero_node, | |
10137 | NULL, NULL); | |
2090d6a0 JL |
10138 | |
10139 | /* Walk through all the blocks finding those which present a | |
10140 | potential jump threading opportunity. We could set this up | |
10141 | as a dominator walker and record data during the walk, but | |
10142 | I doubt it's worth the effort for the classes of jump | |
10143 | threading opportunities we are trying to identify at this | |
10144 | point in compilation. */ | |
11cd3bed | 10145 | FOR_EACH_BB_FN (bb, cfun) |
2090d6a0 | 10146 | { |
355fe088 | 10147 | gimple *last; |
2090d6a0 JL |
10148 | |
10149 | /* If the generic jump threading code does not find this block | |
10150 | interesting, then there is nothing to do. */ | |
10151 | if (! potentially_threadable_block (bb)) | |
10152 | continue; | |
10153 | ||
1d93fa5c | 10154 | last = last_stmt (bb); |
2090d6a0 | 10155 | |
1f3fcdc3 | 10156 | /* We're basically looking for a switch or any kind of conditional with |
6261ab0e JL |
10157 | integral or pointer type arguments. Note the type of the second |
10158 | argument will be the same as the first argument, so no need to | |
215f8d9e JL |
10159 | check it explicitly. |
10160 | ||
10161 | We also handle the case where there are no statements in the | |
10162 | block. This come up with forwarder blocks that are not | |
10163 | optimized away because they lead to a loop header. But we do | |
10164 | want to thread through them as we can sometimes thread to the | |
10165 | loop exit which is obviously profitable. */ | |
10166 | if (!last | |
10167 | || gimple_code (last) == GIMPLE_SWITCH | |
1f3fcdc3 JL |
10168 | || (gimple_code (last) == GIMPLE_COND |
10169 | && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME | |
10170 | && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) | |
10171 | || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))) | |
10172 | && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME | |
10173 | || is_gimple_min_invariant (gimple_cond_rhs (last))))) | |
2090d6a0 JL |
10174 | { |
10175 | edge_iterator ei; | |
2090d6a0 JL |
10176 | |
10177 | /* We've got a block with multiple predecessors and multiple | |
1f3fcdc3 JL |
10178 | successors which also ends in a suitable conditional or |
10179 | switch statement. For each predecessor, see if we can thread | |
10180 | it to a specific successor. */ | |
2090d6a0 JL |
10181 | FOR_EACH_EDGE (e, ei, bb->preds) |
10182 | { | |
b9e59e4f JL |
10183 | /* Do not thread across edges marked to ignoreor abnormal |
10184 | edges in the CFG. */ | |
10185 | if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX)) | |
2090d6a0 JL |
10186 | continue; |
10187 | ||
e8ae63bb | 10188 | thread_across_edge (dummy, e, true, equiv_stack, NULL, |
2090d6a0 JL |
10189 | simplify_stmt_for_jump_threading); |
10190 | } | |
10191 | } | |
10192 | } | |
10193 | ||
b9e59e4f JL |
10194 | /* Clear EDGE_IGNORE. */ |
10195 | FOR_EACH_VEC_ELT (to_remove_edges, i, e) | |
10196 | e->flags &= ~EDGE_IGNORE; | |
10197 | ||
2090d6a0 JL |
10198 | /* We do not actually update the CFG or SSA graphs at this point as |
10199 | ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet | |
10200 | handle ASSERT_EXPRs gracefully. */ | |
10201 | } | |
10202 | ||
10203 | /* We identified all the jump threading opportunities earlier, but could | |
10204 | not transform the CFG at that time. This routine transforms the | |
10205 | CFG and arranges for the dominator tree to be rebuilt if necessary. | |
10206 | ||
10207 | Note the SSA graph update will occur during the normal TODO | |
10208 | processing by the pass manager. */ | |
10209 | static void | |
10210 | finalize_jump_threads (void) | |
10211 | { | |
b02b9b53 | 10212 | thread_through_all_blocks (false); |
f6c72af4 | 10213 | delete equiv_stack; |
2090d6a0 | 10214 | } |
1a557723 | 10215 | |
0bca51f0 DN |
10216 | |
10217 | /* Traverse all the blocks folding conditionals with known ranges. */ | |
10218 | ||
10219 | static void | |
b0c77505 | 10220 | vrp_finalize (bool warn_array_bounds_p) |
0bca51f0 | 10221 | { |
227858d1 | 10222 | size_t i; |
d9256277 RG |
10223 | |
10224 | values_propagated = true; | |
0bca51f0 DN |
10225 | |
10226 | if (dump_file) | |
10227 | { | |
10228 | fprintf (dump_file, "\nValue ranges after VRP:\n\n"); | |
10229 | dump_all_value_ranges (dump_file); | |
10230 | fprintf (dump_file, "\n"); | |
10231 | } | |
10232 | ||
a895a2b8 KV |
10233 | /* Set value range to non pointer SSA_NAMEs. */ |
10234 | for (i = 0; i < num_vr_values; i++) | |
0498471b CL |
10235 | if (vr_value[i]) |
10236 | { | |
10237 | tree name = ssa_name (i); | |
a895a2b8 | 10238 | |
583e8bf5 RB |
10239 | if (!name |
10240 | || POINTER_TYPE_P (TREE_TYPE (name)) | |
0498471b CL |
10241 | || (vr_value[i]->type == VR_VARYING) |
10242 | || (vr_value[i]->type == VR_UNDEFINED)) | |
10243 | continue; | |
a895a2b8 | 10244 | |
21c0a521 DM |
10245 | if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST) |
10246 | && (TREE_CODE (vr_value[i]->max) == INTEGER_CST) | |
10247 | && (vr_value[i]->type == VR_RANGE | |
10248 | || vr_value[i]->type == VR_ANTI_RANGE)) | |
10249 | set_range_info (name, vr_value[i]->type, vr_value[i]->min, | |
10250 | vr_value[i]->max); | |
0498471b | 10251 | } |
a895a2b8 | 10252 | |
20b8d734 JJ |
10253 | substitute_and_fold (op_with_constant_singleton_value_range, |
10254 | vrp_fold_stmt, false); | |
10255 | ||
10256 | if (warn_array_bounds && warn_array_bounds_p) | |
10257 | check_all_array_refs (); | |
10258 | ||
10259 | /* We must identify jump threading opportunities before we release | |
10260 | the datastructures built by VRP. */ | |
10261 | identify_jump_threads (); | |
10262 | ||
227858d1 | 10263 | /* Free allocated memory. */ |
d9256277 | 10264 | for (i = 0; i < num_vr_values; i++) |
227858d1 DN |
10265 | if (vr_value[i]) |
10266 | { | |
10267 | BITMAP_FREE (vr_value[i]->equiv); | |
10268 | free (vr_value[i]); | |
10269 | } | |
10270 | ||
227858d1 | 10271 | free (vr_value); |
fc6827fe | 10272 | free (vr_phi_edge_counts); |
b16caf72 JL |
10273 | |
10274 | /* So that we can distinguish between VRP data being available | |
10275 | and not available. */ | |
10276 | vr_value = NULL; | |
fc6827fe | 10277 | vr_phi_edge_counts = NULL; |
0bca51f0 DN |
10278 | } |
10279 | ||
10280 | ||
10281 | /* Main entry point to VRP (Value Range Propagation). This pass is | |
10282 | loosely based on J. R. C. Patterson, ``Accurate Static Branch | |
10283 | Prediction by Value Range Propagation,'' in SIGPLAN Conference on | |
10284 | Programming Language Design and Implementation, pp. 67-78, 1995. | |
10285 | Also available at http://citeseer.ist.psu.edu/patterson95accurate.html | |
10286 | ||
10287 | This is essentially an SSA-CCP pass modified to deal with ranges | |
10288 | instead of constants. | |
10289 | ||
227858d1 DN |
10290 | While propagating ranges, we may find that two or more SSA name |
10291 | have equivalent, though distinct ranges. For instance, | |
10292 | ||
10293 | 1 x_9 = p_3->a; | |
10294 | 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> | |
10295 | 3 if (p_4 == q_2) | |
10296 | 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; | |
10297 | 5 endif | |
10298 | 6 if (q_2) | |
b8698a0f | 10299 | |
227858d1 DN |
10300 | In the code above, pointer p_5 has range [q_2, q_2], but from the |
10301 | code we can also determine that p_5 cannot be NULL and, if q_2 had | |
10302 | a non-varying range, p_5's range should also be compatible with it. | |
10303 | ||
8ab5f5c9 | 10304 | These equivalences are created by two expressions: ASSERT_EXPR and |
227858d1 DN |
10305 | copy operations. Since p_5 is an assertion on p_4, and p_4 was the |
10306 | result of another assertion, then we can use the fact that p_5 and | |
10307 | p_4 are equivalent when evaluating p_5's range. | |
10308 | ||
8ab5f5c9 | 10309 | Together with value ranges, we also propagate these equivalences |
227858d1 DN |
10310 | between names so that we can take advantage of information from |
10311 | multiple ranges when doing final replacement. Note that this | |
10312 | equivalency relation is transitive but not symmetric. | |
b8698a0f | 10313 | |
227858d1 DN |
10314 | In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we |
10315 | cannot assert that q_2 is equivalent to p_5 because q_2 may be used | |
10316 | in contexts where that assertion does not hold (e.g., in line 6). | |
10317 | ||
0bca51f0 DN |
10318 | TODO, the main difference between this pass and Patterson's is that |
10319 | we do not propagate edge probabilities. We only compute whether | |
10320 | edges can be taken or not. That is, instead of having a spectrum | |
10321 | of jump probabilities between 0 and 1, we only deal with 0, 1 and | |
10322 | DON'T KNOW. In the future, it may be worthwhile to propagate | |
10323 | probabilities to aid branch prediction. */ | |
10324 | ||
c2924966 | 10325 | static unsigned int |
b0c77505 | 10326 | execute_vrp (bool warn_array_bounds_p) |
0bca51f0 | 10327 | { |
b7814a18 RG |
10328 | int i; |
10329 | edge e; | |
10330 | switch_update *su; | |
10331 | ||
b02b9b53 | 10332 | loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
d51157de ZD |
10333 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
10334 | scev_initialize (); | |
b02b9b53 | 10335 | |
c25a0c60 RB |
10336 | /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. |
10337 | Inserting assertions may split edges which will invalidate | |
10338 | EDGE_DFS_BACK. */ | |
09302442 JJ |
10339 | insert_range_assertions (); |
10340 | ||
9771b263 DN |
10341 | to_remove_edges.create (10); |
10342 | to_update_switch_stmts.create (5); | |
448ee662 | 10343 | threadedge_initialize_values (); |
b7814a18 | 10344 | |
c25a0c60 RB |
10345 | /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ |
10346 | mark_dfs_back_edges (); | |
10347 | ||
227858d1 DN |
10348 | vrp_initialize (); |
10349 | ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); | |
b0c77505 | 10350 | vrp_finalize (warn_array_bounds_p); |
0bca51f0 | 10351 | |
61183076 | 10352 | free_numbers_of_iterations_estimates (cfun); |
09302442 | 10353 | |
2090d6a0 JL |
10354 | /* ASSERT_EXPRs must be removed before finalizing jump threads |
10355 | as finalizing jump threads calls the CFG cleanup code which | |
10356 | does not properly handle ASSERT_EXPRs. */ | |
0bca51f0 | 10357 | remove_range_assertions (); |
59c02d8a JL |
10358 | |
10359 | /* If we exposed any new variables, go ahead and put them into | |
10360 | SSA form now, before we handle jump threading. This simplifies | |
10361 | interactions between rewriting of _DECL nodes into SSA form | |
10362 | and rewriting SSA_NAME nodes into SSA form after block | |
10363 | duplication and CFG manipulation. */ | |
10364 | update_ssa (TODO_update_ssa); | |
10365 | ||
2090d6a0 | 10366 | finalize_jump_threads (); |
0a4bf1d3 RG |
10367 | |
10368 | /* Remove dead edges from SWITCH_EXPR optimization. This leaves the | |
10369 | CFG in a broken state and requires a cfg_cleanup run. */ | |
9771b263 | 10370 | FOR_EACH_VEC_ELT (to_remove_edges, i, e) |
0a4bf1d3 RG |
10371 | remove_edge (e); |
10372 | /* Update SWITCH_EXPR case label vector. */ | |
9771b263 | 10373 | FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su) |
726a989a RB |
10374 | { |
10375 | size_t j; | |
10376 | size_t n = TREE_VEC_LENGTH (su->vec); | |
256f88c6 | 10377 | tree label; |
726a989a RB |
10378 | gimple_switch_set_num_labels (su->stmt, n); |
10379 | for (j = 0; j < n; j++) | |
10380 | gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); | |
256f88c6 RG |
10381 | /* As we may have replaced the default label with a regular one |
10382 | make sure to make it a real default label again. This ensures | |
10383 | optimal expansion. */ | |
fd8d363e | 10384 | label = gimple_switch_label (su->stmt, 0); |
256f88c6 RG |
10385 | CASE_LOW (label) = NULL_TREE; |
10386 | CASE_HIGH (label) = NULL_TREE; | |
726a989a | 10387 | } |
0a4bf1d3 | 10388 | |
9771b263 | 10389 | if (to_remove_edges.length () > 0) |
051b9446 RB |
10390 | { |
10391 | free_dominance_info (CDI_DOMINATORS); | |
726338f4 | 10392 | loops_state_set (LOOPS_NEED_FIXUP); |
051b9446 | 10393 | } |
0a4bf1d3 | 10394 | |
9771b263 DN |
10395 | to_remove_edges.release (); |
10396 | to_update_switch_stmts.release (); | |
448ee662 | 10397 | threadedge_finalize_values (); |
0a4bf1d3 | 10398 | |
d51157de ZD |
10399 | scev_finalize (); |
10400 | loop_optimizer_finalize (); | |
c2924966 | 10401 | return 0; |
0bca51f0 DN |
10402 | } |
10403 | ||
27a4cd48 DM |
10404 | namespace { |
10405 | ||
10406 | const pass_data pass_data_vrp = | |
10407 | { | |
10408 | GIMPLE_PASS, /* type */ | |
10409 | "vrp", /* name */ | |
10410 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
10411 | TV_TREE_VRP, /* tv_id */ |
10412 | PROP_ssa, /* properties_required */ | |
10413 | 0, /* properties_provided */ | |
10414 | 0, /* properties_destroyed */ | |
10415 | 0, /* todo_flags_start */ | |
3bea341f | 10416 | ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ |
0bca51f0 | 10417 | }; |
27a4cd48 DM |
10418 | |
10419 | class pass_vrp : public gimple_opt_pass | |
10420 | { | |
10421 | public: | |
c3284718 | 10422 | pass_vrp (gcc::context *ctxt) |
b0c77505 | 10423 | : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false) |
27a4cd48 DM |
10424 | {} |
10425 | ||
10426 | /* opt_pass methods: */ | |
65d3284b | 10427 | opt_pass * clone () { return new pass_vrp (m_ctxt); } |
b0c77505 TV |
10428 | void set_pass_param (unsigned int n, bool param) |
10429 | { | |
10430 | gcc_assert (n == 0); | |
10431 | warn_array_bounds_p = param; | |
10432 | } | |
1a3d085c | 10433 | virtual bool gate (function *) { return flag_tree_vrp != 0; } |
b0c77505 TV |
10434 | virtual unsigned int execute (function *) |
10435 | { return execute_vrp (warn_array_bounds_p); } | |
27a4cd48 | 10436 | |
b0c77505 TV |
10437 | private: |
10438 | bool warn_array_bounds_p; | |
27a4cd48 DM |
10439 | }; // class pass_vrp |
10440 | ||
10441 | } // anon namespace | |
10442 | ||
10443 | gimple_opt_pass * | |
10444 | make_pass_vrp (gcc::context *ctxt) | |
10445 | { | |
10446 | return new pass_vrp (ctxt); | |
10447 | } |