]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vrp.c
Split omp-low into multiple files
[thirdparty/gcc.git] / gcc / tree-vrp.c
CommitLineData
0bca51f0 1/* Support routines for Value Range Propagation (VRP).
818ab71a 2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
0bca51f0
DN
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9dcd6f09 9the Free Software Foundation; either version 3, or (at your option)
0bca51f0
DN
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
0bca51f0
DN
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
957060b5
AM
25#include "insn-codes.h"
26#include "rtl.h"
c7131fb2
AM
27#include "tree.h"
28#include "gimple.h"
957060b5
AM
29#include "cfghooks.h"
30#include "tree-pass.h"
c7131fb2 31#include "ssa.h"
957060b5
AM
32#include "optabs-tree.h"
33#include "gimple-pretty-print.h"
34#include "diagnostic-core.h"
0bca51f0 35#include "flags.h"
40e23961 36#include "fold-const.h"
d8a2d370
DN
37#include "stor-layout.h"
38#include "calls.h"
60393bbc 39#include "cfganal.h"
2fb9a547
AM
40#include "gimple-fold.h"
41#include "tree-eh.h"
5be5c238
AM
42#include "gimple-iterator.h"
43#include "gimple-walk.h"
442b4905 44#include "tree-cfg.h"
e28030cf
AM
45#include "tree-ssa-loop-manip.h"
46#include "tree-ssa-loop-niter.h"
442b4905
AM
47#include "tree-ssa-loop.h"
48#include "tree-into-ssa.h"
7a300452 49#include "tree-ssa.h"
0c948c27 50#include "intl.h"
0bca51f0
DN
51#include "cfgloop.h"
52#include "tree-scalar-evolution.h"
53#include "tree-ssa-propagate.h"
54#include "tree-chrec.h"
5254eac4 55#include "tree-ssa-threadupdate.h"
f6c72af4 56#include "tree-ssa-scopedtables.h"
4484a35a 57#include "tree-ssa-threadedge.h"
629b3d75 58#include "omp-general.h"
bd751975 59#include "target.h"
9c0a9e12 60#include "case-cfn-macros.h"
524cf1e4 61#include "params.h"
b29fcf3b 62#include "alloc-pool.h"
973625a0
KV
63#include "domwalk.h"
64#include "tree-cfgcleanup.h"
455e6d5b 65
3c9c79e8
RG
66#define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
67
b29fcf3b
KV
68/* Allocation pools for tree-vrp allocations. */
69static object_allocator<value_range> vrp_value_range_pool ("Tree VRP value ranges");
70static bitmap_obstack vrp_equiv_obstack;
71
c4ab2baa
RG
72/* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74static sbitmap *live;
75
76/* Return true if the SSA name NAME is live on the edge E. */
77
78static bool
79live_on_edge (edge e, tree name)
80{
81 return (live[e->dest->index]
d7c028c0 82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
c4ab2baa 83}
0bca51f0 84
0bca51f0
DN
85/* Local functions. */
86static int compare_values (tree val1, tree val2);
12df8a7e 87static int compare_values_warnv (tree val1, tree val2, bool *);
2d3cd5d5 88static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
6b99f156
JH
89 tree, tree, bool, bool *,
90 bool *);
0bca51f0 91
227858d1
DN
92/* Location information for ASSERT_EXPRs. Each instance of this
93 structure describes an ASSERT_EXPR for an SSA name. Since a single
94 SSA name may have more than one assertion associated with it, these
95 locations are kept in a linked list attached to the corresponding
96 SSA name. */
ff507401 97struct assert_locus
0bca51f0 98{
227858d1
DN
99 /* Basic block where the assertion would be inserted. */
100 basic_block bb;
101
102 /* Some assertions need to be inserted on an edge (e.g., assertions
103 generated by COND_EXPRs). In those cases, BB will be NULL. */
104 edge e;
105
106 /* Pointer to the statement that generated this assertion. */
726a989a 107 gimple_stmt_iterator si;
227858d1
DN
108
109 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
110 enum tree_code comp_code;
111
112 /* Value being compared against. */
113 tree val;
114
2ab8dbf4
RG
115 /* Expression to compare. */
116 tree expr;
117
227858d1 118 /* Next node in the linked list. */
ff507401 119 assert_locus *next;
227858d1
DN
120};
121
227858d1
DN
122/* If bit I is present, it means that SSA name N_i has a list of
123 assertions that should be inserted in the IL. */
124static bitmap need_assert_for;
125
126/* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
127 holds a list of ASSERT_LOCUS_T nodes that describe where
128 ASSERT_EXPRs for SSA name N_I should be inserted. */
ff507401 129static assert_locus **asserts_for;
227858d1 130
227858d1
DN
131/* Value range array. After propagation, VR_VALUE[I] holds the range
132 of values that SSA name N_I may take. */
d9256277 133static unsigned num_vr_values;
526ceb68 134static value_range **vr_value;
d9256277 135static bool values_propagated;
0bca51f0 136
fc6827fe
ILT
137/* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
138 number of executable edges we saw the last time we visited the
139 node. */
140static int *vr_phi_edge_counts;
141
a79683d5 142struct switch_update {
538dd0b7 143 gswitch *stmt;
b7814a18 144 tree vec;
a79683d5 145};
b7814a18 146
9771b263
DN
147static vec<edge> to_remove_edges;
148static vec<switch_update> to_update_switch_stmts;
b7814a18 149
0bca51f0 150
84fb43a1 151/* Return the maximum value for TYPE. */
70b7b037
RG
152
153static inline tree
154vrp_val_max (const_tree type)
155{
156 if (!INTEGRAL_TYPE_P (type))
157 return NULL_TREE;
158
70b7b037
RG
159 return TYPE_MAX_VALUE (type);
160}
161
84fb43a1 162/* Return the minimum value for TYPE. */
70b7b037
RG
163
164static inline tree
165vrp_val_min (const_tree type)
166{
167 if (!INTEGRAL_TYPE_P (type))
168 return NULL_TREE;
169
70b7b037
RG
170 return TYPE_MIN_VALUE (type);
171}
172
173/* Return whether VAL is equal to the maximum value of its type. This
174 will be true for a positive overflow infinity. We can't do a
175 simple equality comparison with TYPE_MAX_VALUE because C typedefs
176 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
177 to the integer constant with the same value in the type. */
178
179static inline bool
180vrp_val_is_max (const_tree val)
181{
182 tree type_max = vrp_val_max (TREE_TYPE (val));
183 return (val == type_max
184 || (type_max != NULL_TREE
185 && operand_equal_p (val, type_max, 0)));
186}
187
188/* Return whether VAL is equal to the minimum value of its type. This
189 will be true for a negative overflow infinity. */
190
191static inline bool
192vrp_val_is_min (const_tree val)
193{
194 tree type_min = vrp_val_min (TREE_TYPE (val));
195 return (val == type_min
196 || (type_min != NULL_TREE
197 && operand_equal_p (val, type_min, 0)));
198}
199
200
12df8a7e
ILT
201/* Return whether TYPE should use an overflow infinity distinct from
202 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
203 represent a signed overflow during VRP computations. An infinity
204 is distinct from a half-range, which will go from some number to
205 TYPE_{MIN,MAX}_VALUE. */
206
207static inline bool
58f9752a 208needs_overflow_infinity (const_tree type)
12df8a7e 209{
84fb43a1 210 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
12df8a7e
ILT
211}
212
213/* Return whether TYPE can support our overflow infinity
214 representation: we use the TREE_OVERFLOW flag, which only exists
215 for constants. If TYPE doesn't support this, we don't optimize
216 cases which would require signed overflow--we drop them to
217 VARYING. */
218
219static inline bool
58f9752a 220supports_overflow_infinity (const_tree type)
12df8a7e 221{
70b7b037 222 tree min = vrp_val_min (type), max = vrp_val_max (type);
b2b29377 223 gcc_checking_assert (needs_overflow_infinity (type));
70b7b037
RG
224 return (min != NULL_TREE
225 && CONSTANT_CLASS_P (min)
226 && max != NULL_TREE
227 && CONSTANT_CLASS_P (max));
12df8a7e
ILT
228}
229
230/* VAL is the maximum or minimum value of a type. Return a
231 corresponding overflow infinity. */
232
233static inline tree
234make_overflow_infinity (tree val)
235{
77a74ed7 236 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
12df8a7e
ILT
237 val = copy_node (val);
238 TREE_OVERFLOW (val) = 1;
239 return val;
240}
241
242/* Return a negative overflow infinity for TYPE. */
243
244static inline tree
245negative_overflow_infinity (tree type)
246{
77a74ed7 247 gcc_checking_assert (supports_overflow_infinity (type));
70b7b037 248 return make_overflow_infinity (vrp_val_min (type));
12df8a7e
ILT
249}
250
251/* Return a positive overflow infinity for TYPE. */
252
253static inline tree
254positive_overflow_infinity (tree type)
255{
77a74ed7 256 gcc_checking_assert (supports_overflow_infinity (type));
70b7b037 257 return make_overflow_infinity (vrp_val_max (type));
12df8a7e
ILT
258}
259
260/* Return whether VAL is a negative overflow infinity. */
261
262static inline bool
58f9752a 263is_negative_overflow_infinity (const_tree val)
12df8a7e 264{
cb460086
RB
265 return (TREE_OVERFLOW_P (val)
266 && needs_overflow_infinity (TREE_TYPE (val))
70b7b037 267 && vrp_val_is_min (val));
12df8a7e
ILT
268}
269
270/* Return whether VAL is a positive overflow infinity. */
271
272static inline bool
58f9752a 273is_positive_overflow_infinity (const_tree val)
12df8a7e 274{
cb460086
RB
275 return (TREE_OVERFLOW_P (val)
276 && needs_overflow_infinity (TREE_TYPE (val))
70b7b037 277 && vrp_val_is_max (val));
12df8a7e
ILT
278}
279
280/* Return whether VAL is a positive or negative overflow infinity. */
281
282static inline bool
58f9752a 283is_overflow_infinity (const_tree val)
12df8a7e 284{
cb460086
RB
285 return (TREE_OVERFLOW_P (val)
286 && needs_overflow_infinity (TREE_TYPE (val))
70b7b037 287 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
12df8a7e
ILT
288}
289
726a989a
RB
290/* Return whether STMT has a constant rhs that is_overflow_infinity. */
291
292static inline bool
355fe088 293stmt_overflow_infinity (gimple *stmt)
726a989a
RB
294{
295 if (is_gimple_assign (stmt)
296 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
297 GIMPLE_SINGLE_RHS)
298 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
299 return false;
300}
301
b80cca7b
ILT
302/* If VAL is now an overflow infinity, return VAL. Otherwise, return
303 the same value with TREE_OVERFLOW clear. This can be used to avoid
304 confusing a regular value with an overflow value. */
305
306static inline tree
307avoid_overflow_infinity (tree val)
308{
309 if (!is_overflow_infinity (val))
310 return val;
311
70b7b037
RG
312 if (vrp_val_is_max (val))
313 return vrp_val_max (TREE_TYPE (val));
b80cca7b
ILT
314 else
315 {
77a74ed7 316 gcc_checking_assert (vrp_val_is_min (val));
70b7b037 317 return vrp_val_min (TREE_TYPE (val));
b80cca7b
ILT
318 }
319}
320
12df8a7e 321
ef5ad3b7
RG
322/* Set value range VR to VR_UNDEFINED. */
323
324static inline void
526ceb68 325set_value_range_to_undefined (value_range *vr)
ef5ad3b7
RG
326{
327 vr->type = VR_UNDEFINED;
328 vr->min = vr->max = NULL_TREE;
329 if (vr->equiv)
330 bitmap_clear (vr->equiv);
331}
332
333
2ab8dbf4
RG
334/* Set value range VR to VR_VARYING. */
335
336static inline void
526ceb68 337set_value_range_to_varying (value_range *vr)
2ab8dbf4
RG
338{
339 vr->type = VR_VARYING;
340 vr->min = vr->max = NULL_TREE;
341 if (vr->equiv)
342 bitmap_clear (vr->equiv);
343}
344
345
227858d1
DN
346/* Set value range VR to {T, MIN, MAX, EQUIV}. */
347
348static void
526ceb68 349set_value_range (value_range *vr, enum value_range_type t, tree min,
227858d1 350 tree max, bitmap equiv)
0bca51f0 351{
227858d1 352 /* Check the validity of the range. */
b2b29377
MM
353 if (flag_checking
354 && (t == VR_RANGE || t == VR_ANTI_RANGE))
0bca51f0
DN
355 {
356 int cmp;
357
358 gcc_assert (min && max);
359
635bfae0
RB
360 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
361 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
362
0bca51f0 363 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
e1f28918 364 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
0bca51f0
DN
365
366 cmp = compare_values (min, max);
367 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
368 }
0bca51f0 369
b2b29377
MM
370 if (flag_checking
371 && (t == VR_UNDEFINED || t == VR_VARYING))
372 {
373 gcc_assert (min == NULL_TREE && max == NULL_TREE);
374 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
375 }
0bca51f0
DN
376
377 vr->type = t;
378 vr->min = min;
379 vr->max = max;
227858d1
DN
380
381 /* Since updating the equivalence set involves deep copying the
382 bitmaps, only do it if absolutely necessary. */
f5052e29
RG
383 if (vr->equiv == NULL
384 && equiv != NULL)
b29fcf3b 385 vr->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
227858d1
DN
386
387 if (equiv != vr->equiv)
388 {
389 if (equiv && !bitmap_empty_p (equiv))
390 bitmap_copy (vr->equiv, equiv);
391 else
392 bitmap_clear (vr->equiv);
393 }
0bca51f0
DN
394}
395
396
2ab8dbf4
RG
397/* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
398 This means adjusting T, MIN and MAX representing the case of a
399 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
400 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
401 In corner cases where MAX+1 or MIN-1 wraps this will fall back
402 to varying.
403 This routine exists to ease canonicalization in the case where we
404 extract ranges from var + CST op limit. */
0bca51f0 405
2ab8dbf4 406static void
526ceb68 407set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
2ab8dbf4 408 tree min, tree max, bitmap equiv)
0bca51f0 409{
ef5ad3b7
RG
410 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
411 if (t == VR_UNDEFINED)
412 {
413 set_value_range_to_undefined (vr);
414 return;
415 }
416 else if (t == VR_VARYING)
417 {
418 set_value_range_to_varying (vr);
419 return;
420 }
421
422 /* Nothing to canonicalize for symbolic ranges. */
423 if (TREE_CODE (min) != INTEGER_CST
70b7b037 424 || TREE_CODE (max) != INTEGER_CST)
2ab8dbf4
RG
425 {
426 set_value_range (vr, t, min, max, equiv);
427 return;
428 }
12df8a7e 429
2ab8dbf4
RG
430 /* Wrong order for min and max, to swap them and the VR type we need
431 to adjust them. */
2ab8dbf4
RG
432 if (tree_int_cst_lt (max, min))
433 {
5717e1f6
JJ
434 tree one, tmp;
435
436 /* For one bit precision if max < min, then the swapped
437 range covers all values, so for VR_RANGE it is varying and
438 for VR_ANTI_RANGE empty range, so drop to varying as well. */
439 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
440 {
441 set_value_range_to_varying (vr);
442 return;
443 }
444
445 one = build_int_cst (TREE_TYPE (min), 1);
446 tmp = int_const_binop (PLUS_EXPR, max, one);
d35936ab 447 max = int_const_binop (MINUS_EXPR, min, one);
70b7b037
RG
448 min = tmp;
449
450 /* There's one corner case, if we had [C+1, C] before we now have
451 that again. But this represents an empty value range, so drop
452 to varying in this case. */
453 if (tree_int_cst_lt (max, min))
454 {
455 set_value_range_to_varying (vr);
456 return;
457 }
458
459 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
460 }
461
462 /* Anti-ranges that can be represented as ranges should be so. */
463 if (t == VR_ANTI_RANGE)
464 {
465 bool is_min = vrp_val_is_min (min);
466 bool is_max = vrp_val_is_max (max);
467
468 if (is_min && is_max)
469 {
ef5ad3b7
RG
470 /* We cannot deal with empty ranges, drop to varying.
471 ??? This could be VR_UNDEFINED instead. */
70b7b037
RG
472 set_value_range_to_varying (vr);
473 return;
474 }
5717e1f6 475 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
5717e1f6
JJ
476 && (is_min || is_max))
477 {
1001fb60
RB
478 /* Non-empty boolean ranges can always be represented
479 as a singleton range. */
480 if (is_min)
481 min = max = vrp_val_max (TREE_TYPE (min));
5717e1f6 482 else
1001fb60
RB
483 min = max = vrp_val_min (TREE_TYPE (min));
484 t = VR_RANGE;
5717e1f6 485 }
70b7b037
RG
486 else if (is_min
487 /* As a special exception preserve non-null ranges. */
488 && !(TYPE_UNSIGNED (TREE_TYPE (min))
489 && integer_zerop (max)))
490 {
491 tree one = build_int_cst (TREE_TYPE (max), 1);
d35936ab 492 min = int_const_binop (PLUS_EXPR, max, one);
70b7b037
RG
493 max = vrp_val_max (TREE_TYPE (max));
494 t = VR_RANGE;
495 }
496 else if (is_max)
497 {
498 tree one = build_int_cst (TREE_TYPE (min), 1);
d35936ab 499 max = int_const_binop (MINUS_EXPR, min, one);
70b7b037
RG
500 min = vrp_val_min (TREE_TYPE (min));
501 t = VR_RANGE;
502 }
2ab8dbf4
RG
503 }
504
253c0172
RB
505 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
506 to make sure VRP iteration terminates, otherwise we can get into
507 oscillations. */
ef5ad3b7 508
2ab8dbf4
RG
509 set_value_range (vr, t, min, max, equiv);
510}
511
512/* Copy value range FROM into value range TO. */
b16caf72
JL
513
514static inline void
526ceb68 515copy_value_range (value_range *to, value_range *from)
b16caf72 516{
2ab8dbf4 517 set_value_range (to, from->type, from->min, from->max, from->equiv);
12df8a7e
ILT
518}
519
8cf781f0
ILT
520/* Set value range VR to a single value. This function is only called
521 with values we get from statements, and exists to clear the
522 TREE_OVERFLOW flag so that we don't think we have an overflow
523 infinity when we shouldn't. */
524
525static inline void
526ceb68 526set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
8cf781f0
ILT
527{
528 gcc_assert (is_gimple_min_invariant (val));
635bfae0
RB
529 if (TREE_OVERFLOW_P (val))
530 val = drop_tree_overflow (val);
b60b4711 531 set_value_range (vr, VR_RANGE, val, val, equiv);
8cf781f0
ILT
532}
533
12df8a7e 534/* Set value range VR to a non-negative range of type TYPE.
110abdbc 535 OVERFLOW_INFINITY indicates whether to use an overflow infinity
12df8a7e
ILT
536 rather than TYPE_MAX_VALUE; this should be true if we determine
537 that the range is nonnegative based on the assumption that signed
538 overflow does not occur. */
539
540static inline void
526ceb68 541set_value_range_to_nonnegative (value_range *vr, tree type,
12df8a7e
ILT
542 bool overflow_infinity)
543{
544 tree zero;
545
546 if (overflow_infinity && !supports_overflow_infinity (type))
547 {
548 set_value_range_to_varying (vr);
549 return;
550 }
551
552 zero = build_int_cst (type, 0);
553 set_value_range (vr, VR_RANGE, zero,
554 (overflow_infinity
555 ? positive_overflow_infinity (type)
556 : TYPE_MAX_VALUE (type)),
557 vr->equiv);
b16caf72 558}
227858d1
DN
559
560/* Set value range VR to a non-NULL range of type TYPE. */
561
562static inline void
526ceb68 563set_value_range_to_nonnull (value_range *vr, tree type)
227858d1
DN
564{
565 tree zero = build_int_cst (type, 0);
566 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
567}
568
569
570/* Set value range VR to a NULL range of type TYPE. */
571
572static inline void
526ceb68 573set_value_range_to_null (value_range *vr, tree type)
227858d1 574{
b60b4711 575 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
227858d1
DN
576}
577
578
31ab1cc9
RG
579/* Set value range VR to a range of a truthvalue of type TYPE. */
580
581static inline void
526ceb68 582set_value_range_to_truthvalue (value_range *vr, tree type)
31ab1cc9
RG
583{
584 if (TYPE_PRECISION (type) == 1)
585 set_value_range_to_varying (vr);
586 else
587 set_value_range (vr, VR_RANGE,
588 build_int_cst (type, 0), build_int_cst (type, 1),
589 vr->equiv);
590}
591
592
193a3681
JJ
593/* If abs (min) < abs (max), set VR to [-max, max], if
594 abs (min) >= abs (max), set VR to [-min, min]. */
595
596static void
526ceb68 597abs_extent_range (value_range *vr, tree min, tree max)
193a3681
JJ
598{
599 int cmp;
600
601 gcc_assert (TREE_CODE (min) == INTEGER_CST);
602 gcc_assert (TREE_CODE (max) == INTEGER_CST);
603 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
604 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
605 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
606 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
607 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
608 {
609 set_value_range_to_varying (vr);
610 return;
611 }
612 cmp = compare_values (min, max);
613 if (cmp == -1)
614 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
615 else if (cmp == 0 || cmp == 1)
616 {
617 max = min;
618 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
619 }
620 else
621 {
622 set_value_range_to_varying (vr);
623 return;
624 }
625 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
626}
627
628
b8698a0f 629/* Return value range information for VAR.
b16caf72
JL
630
631 If we have no values ranges recorded (ie, VRP is not running), then
632 return NULL. Otherwise create an empty range if none existed for VAR. */
0bca51f0 633
526ceb68 634static value_range *
58f9752a 635get_value_range (const_tree var)
0bca51f0 636{
526ceb68 637 static const value_range vr_const_varying
d9256277 638 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
526ceb68 639 value_range *vr;
0bca51f0 640 tree sym;
227858d1 641 unsigned ver = SSA_NAME_VERSION (var);
0bca51f0 642
b16caf72
JL
643 /* If we have no recorded ranges, then return NULL. */
644 if (! vr_value)
645 return NULL;
646
d9256277
RG
647 /* If we query the range for a new SSA name return an unmodifiable VARYING.
648 We should get here at most from the substitute-and-fold stage which
649 will never try to change values. */
650 if (ver >= num_vr_values)
526ceb68 651 return CONST_CAST (value_range *, &vr_const_varying);
d9256277 652
227858d1 653 vr = vr_value[ver];
0bca51f0
DN
654 if (vr)
655 return vr;
656
d9256277
RG
657 /* After propagation finished do not allocate new value-ranges. */
658 if (values_propagated)
526ceb68 659 return CONST_CAST (value_range *, &vr_const_varying);
d9256277 660
0bca51f0 661 /* Create a default value range. */
b29fcf3b
KV
662 vr_value[ver] = vr = vrp_value_range_pool.allocate ();
663 memset (vr, 0, sizeof (*vr));
0bca51f0 664
f5052e29
RG
665 /* Defer allocating the equivalence set. */
666 vr->equiv = NULL;
227858d1 667
a9b332d4
RG
668 /* If VAR is a default definition of a parameter, the variable can
669 take any value in VAR's type. */
7a04f01c 670 if (SSA_NAME_IS_DEFAULT_DEF (var))
462508dd 671 {
6b4a85ad 672 sym = SSA_NAME_VAR (var);
7a04f01c
JJ
673 if (TREE_CODE (sym) == PARM_DECL)
674 {
675 /* Try to use the "nonnull" attribute to create ~[0, 0]
676 anti-ranges for pointers. Note that this is only valid with
677 default definitions of PARM_DECLs. */
678 if (POINTER_TYPE_P (TREE_TYPE (sym))
7fd4f180
KV
679 && (nonnull_arg_p (sym)
680 || get_ptr_nonnull (var)))
7a04f01c 681 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
a03df1d5
KV
682 else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
683 {
684 wide_int min, max;
685 value_range_type rtype = get_range_info (var, &min, &max);
686 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
687 set_value_range (vr, rtype,
688 wide_int_to_tree (TREE_TYPE (var), min),
689 wide_int_to_tree (TREE_TYPE (var), max),
690 NULL);
691 else
692 set_value_range_to_varying (vr);
693 }
7a04f01c
JJ
694 else
695 set_value_range_to_varying (vr);
696 }
697 else if (TREE_CODE (sym) == RESULT_DECL
698 && DECL_BY_REFERENCE (sym))
462508dd 699 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
462508dd 700 }
0bca51f0
DN
701
702 return vr;
703}
704
4d6484dd
RB
705/* Set value-ranges of all SSA names defined by STMT to varying. */
706
707static void
708set_defs_to_varying (gimple *stmt)
709{
710 ssa_op_iter i;
711 tree def;
712 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
713 {
714 value_range *vr = get_value_range (def);
715 /* Avoid writing to vr_const_varying get_value_range may return. */
716 if (vr->type != VR_VARYING)
717 set_value_range_to_varying (vr);
718 }
719}
720
721
1ce35d26
RG
722/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
723
724static inline bool
58f9752a 725vrp_operand_equal_p (const_tree val1, const_tree val2)
1ce35d26 726{
12df8a7e
ILT
727 if (val1 == val2)
728 return true;
729 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
730 return false;
cb460086 731 return is_overflow_infinity (val1) == is_overflow_infinity (val2);
1ce35d26
RG
732}
733
734/* Return true, if the bitmaps B1 and B2 are equal. */
735
736static inline bool
22ea9ec0 737vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
1ce35d26
RG
738{
739 return (b1 == b2
92b1d23b
RG
740 || ((!b1 || bitmap_empty_p (b1))
741 && (!b2 || bitmap_empty_p (b2)))
1ce35d26
RG
742 || (b1 && b2
743 && bitmap_equal_p (b1, b2)));
744}
0bca51f0 745
227858d1
DN
746/* Update the value range and equivalence set for variable VAR to
747 NEW_VR. Return true if NEW_VR is different from VAR's previous
748 value.
749
750 NOTE: This function assumes that NEW_VR is a temporary value range
751 object created for the sole purpose of updating VAR's range. The
752 storage used by the equivalence set from NEW_VR will be freed by
753 this function. Do not call update_value_range when NEW_VR
754 is the range object associated with another SSA name. */
0bca51f0
DN
755
756static inline bool
526ceb68 757update_value_range (const_tree var, value_range *new_vr)
0bca51f0 758{
526ceb68 759 value_range *old_vr;
227858d1
DN
760 bool is_new;
761
755359b7
RB
762 /* If there is a value-range on the SSA name from earlier analysis
763 factor that in. */
764 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
765 {
766 wide_int min, max;
767 value_range_type rtype = get_range_info (var, &min, &max);
768 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
769 {
209b636e 770 tree nr_min, nr_max;
661d6efd
RB
771 /* Range info on SSA names doesn't carry overflow information
772 so make sure to preserve the overflow bit on the lattice. */
209b636e
RB
773 if (rtype == VR_RANGE
774 && needs_overflow_infinity (TREE_TYPE (var))
775 && (new_vr->type == VR_VARYING
776 || (new_vr->type == VR_RANGE
777 && is_negative_overflow_infinity (new_vr->min)))
778 && wi::eq_p (vrp_val_min (TREE_TYPE (var)), min))
779 nr_min = negative_overflow_infinity (TREE_TYPE (var));
661d6efd 780 else
209b636e
RB
781 nr_min = wide_int_to_tree (TREE_TYPE (var), min);
782 if (rtype == VR_RANGE
783 && needs_overflow_infinity (TREE_TYPE (var))
784 && (new_vr->type == VR_VARYING
785 || (new_vr->type == VR_RANGE
786 && is_positive_overflow_infinity (new_vr->max)))
787 && wi::eq_p (vrp_val_max (TREE_TYPE (var)), max))
788 nr_max = positive_overflow_infinity (TREE_TYPE (var));
661d6efd 789 else
209b636e
RB
790 nr_max = wide_int_to_tree (TREE_TYPE (var), max);
791 value_range nr = VR_INITIALIZER;
792 set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
755359b7
RB
793 vrp_intersect_ranges (new_vr, &nr);
794 }
795 }
796
227858d1
DN
797 /* Update the value range, if necessary. */
798 old_vr = get_value_range (var);
799 is_new = old_vr->type != new_vr->type
1ce35d26
RG
800 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
801 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
802 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
0bca51f0 803
227858d1 804 if (is_new)
43b1bad6
RB
805 {
806 /* Do not allow transitions up the lattice. The following
9c3cb360 807 is slightly more awkward than just new_vr->type < old_vr->type
43b1bad6
RB
808 because VR_RANGE and VR_ANTI_RANGE need to be considered
809 the same. We may not have is_new when transitioning to
9c3cb360
JJ
810 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
811 called. */
812 if (new_vr->type == VR_UNDEFINED)
813 {
814 BITMAP_FREE (new_vr->equiv);
815 set_value_range_to_varying (old_vr);
816 set_value_range_to_varying (new_vr);
817 return true;
818 }
43b1bad6
RB
819 else
820 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
821 new_vr->equiv);
822 }
0bca51f0 823
227858d1 824 BITMAP_FREE (new_vr->equiv);
0bca51f0 825
227858d1
DN
826 return is_new;
827}
0bca51f0 828
0bca51f0 829
f5052e29
RG
830/* Add VAR and VAR's equivalence set to EQUIV. This is the central
831 point where equivalence processing can be turned on/off. */
0bca51f0 832
227858d1 833static void
58f9752a 834add_equivalence (bitmap *equiv, const_tree var)
227858d1
DN
835{
836 unsigned ver = SSA_NAME_VERSION (var);
48057089 837 value_range *vr = get_value_range (var);
0bca51f0 838
f5052e29 839 if (*equiv == NULL)
b29fcf3b 840 *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
f5052e29 841 bitmap_set_bit (*equiv, ver);
227858d1 842 if (vr && vr->equiv)
f5052e29 843 bitmap_ior_into (*equiv, vr->equiv);
0bca51f0
DN
844}
845
846
847/* Return true if VR is ~[0, 0]. */
848
849static inline bool
526ceb68 850range_is_nonnull (value_range *vr)
0bca51f0
DN
851{
852 return vr->type == VR_ANTI_RANGE
853 && integer_zerop (vr->min)
854 && integer_zerop (vr->max);
855}
856
857
858/* Return true if VR is [0, 0]. */
859
860static inline bool
526ceb68 861range_is_null (value_range *vr)
0bca51f0
DN
862{
863 return vr->type == VR_RANGE
864 && integer_zerop (vr->min)
865 && integer_zerop (vr->max);
866}
867
330af32c
JZ
868/* Return true if max and min of VR are INTEGER_CST. It's not necessary
869 a singleton. */
870
871static inline bool
526ceb68 872range_int_cst_p (value_range *vr)
330af32c
JZ
873{
874 return (vr->type == VR_RANGE
875 && TREE_CODE (vr->max) == INTEGER_CST
a75f5017 876 && TREE_CODE (vr->min) == INTEGER_CST);
330af32c
JZ
877}
878
879/* Return true if VR is a INTEGER_CST singleton. */
880
881static inline bool
526ceb68 882range_int_cst_singleton_p (value_range *vr)
330af32c
JZ
883{
884 return (range_int_cst_p (vr)
3f5c390d
RB
885 && !is_overflow_infinity (vr->min)
886 && !is_overflow_infinity (vr->max)
330af32c
JZ
887 && tree_int_cst_equal (vr->min, vr->max));
888}
0bca51f0 889
227858d1 890/* Return true if value range VR involves at least one symbol. */
0bca51f0 891
227858d1 892static inline bool
526ceb68 893symbolic_range_p (value_range *vr)
0bca51f0 894{
227858d1
DN
895 return (!is_gimple_min_invariant (vr->min)
896 || !is_gimple_min_invariant (vr->max));
0bca51f0
DN
897}
898
e76340be
EB
899/* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
900 otherwise. We only handle additive operations and set NEG to true if the
901 symbol is negated and INV to the invariant part, if any. */
902
903static tree
904get_single_symbol (tree t, bool *neg, tree *inv)
905{
906 bool neg_;
907 tree inv_;
908
b08e71f9
AK
909 *inv = NULL_TREE;
910 *neg = false;
911
e76340be
EB
912 if (TREE_CODE (t) == PLUS_EXPR
913 || TREE_CODE (t) == POINTER_PLUS_EXPR
914 || TREE_CODE (t) == MINUS_EXPR)
915 {
916 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
917 {
918 neg_ = (TREE_CODE (t) == MINUS_EXPR);
919 inv_ = TREE_OPERAND (t, 0);
920 t = TREE_OPERAND (t, 1);
921 }
922 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
923 {
924 neg_ = false;
925 inv_ = TREE_OPERAND (t, 1);
926 t = TREE_OPERAND (t, 0);
927 }
928 else
929 return NULL_TREE;
930 }
931 else
932 {
933 neg_ = false;
934 inv_ = NULL_TREE;
935 }
936
937 if (TREE_CODE (t) == NEGATE_EXPR)
938 {
939 t = TREE_OPERAND (t, 0);
940 neg_ = !neg_;
941 }
942
943 if (TREE_CODE (t) != SSA_NAME)
944 return NULL_TREE;
945
946 *neg = neg_;
947 *inv = inv_;
948 return t;
949}
950
951/* The reverse operation: build a symbolic expression with TYPE
952 from symbol SYM, negated according to NEG, and invariant INV. */
953
954static tree
955build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
956{
957 const bool pointer_p = POINTER_TYPE_P (type);
958 tree t = sym;
959
960 if (neg)
961 t = build1 (NEGATE_EXPR, type, t);
962
963 if (integer_zerop (inv))
964 return t;
965
966 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
967}
968
969/* Return true if value range VR involves exactly one symbol SYM. */
970
971static bool
526ceb68 972symbolic_range_based_on_p (value_range *vr, const_tree sym)
e76340be
EB
973{
974 bool neg, min_has_symbol, max_has_symbol;
975 tree inv;
976
977 if (is_gimple_min_invariant (vr->min))
978 min_has_symbol = false;
979 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
980 min_has_symbol = true;
981 else
982 return false;
983
984 if (is_gimple_min_invariant (vr->max))
985 max_has_symbol = false;
986 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
987 max_has_symbol = true;
988 else
989 return false;
990
991 return (min_has_symbol || max_has_symbol);
992}
993
110abdbc 994/* Return true if value range VR uses an overflow infinity. */
b16caf72 995
12df8a7e 996static inline bool
526ceb68 997overflow_infinity_range_p (value_range *vr)
b16caf72 998{
12df8a7e
ILT
999 return (vr->type == VR_RANGE
1000 && (is_overflow_infinity (vr->min)
1001 || is_overflow_infinity (vr->max)));
1002}
6ac01510 1003
0c948c27
ILT
1004/* Return false if we can not make a valid comparison based on VR;
1005 this will be the case if it uses an overflow infinity and overflow
1006 is not undefined (i.e., -fno-strict-overflow is in effect).
1007 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
1008 uses an overflow infinity. */
1009
1010static bool
526ceb68 1011usable_range_p (value_range *vr, bool *strict_overflow_p)
0c948c27
ILT
1012{
1013 gcc_assert (vr->type == VR_RANGE);
1014 if (is_overflow_infinity (vr->min))
1015 {
1016 *strict_overflow_p = true;
1017 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
1018 return false;
1019 }
1020 if (is_overflow_infinity (vr->max))
1021 {
1022 *strict_overflow_p = true;
1023 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1024 return false;
1025 }
1026 return true;
1027}
1028
726a989a
RB
1029/* Return true if the result of assignment STMT is know to be non-zero.
1030 If the return value is based on the assumption that signed overflow is
1031 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1032 *STRICT_OVERFLOW_P.*/
1033
1034static bool
355fe088 1035gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
726a989a
RB
1036{
1037 enum tree_code code = gimple_assign_rhs_code (stmt);
1038 switch (get_gimple_rhs_class (code))
1039 {
1040 case GIMPLE_UNARY_RHS:
1041 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1042 gimple_expr_type (stmt),
1043 gimple_assign_rhs1 (stmt),
1044 strict_overflow_p);
1045 case GIMPLE_BINARY_RHS:
1046 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1047 gimple_expr_type (stmt),
1048 gimple_assign_rhs1 (stmt),
1049 gimple_assign_rhs2 (stmt),
1050 strict_overflow_p);
0354c0c7
BS
1051 case GIMPLE_TERNARY_RHS:
1052 return false;
726a989a
RB
1053 case GIMPLE_SINGLE_RHS:
1054 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1055 strict_overflow_p);
1056 case GIMPLE_INVALID_RHS:
1057 gcc_unreachable ();
1058 default:
1059 gcc_unreachable ();
1060 }
1061}
1062
826cacfe 1063/* Return true if STMT is known to compute a non-zero value.
726a989a
RB
1064 If the return value is based on the assumption that signed overflow is
1065 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1066 *STRICT_OVERFLOW_P.*/
1067
1068static bool
355fe088 1069gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
726a989a
RB
1070{
1071 switch (gimple_code (stmt))
1072 {
1073 case GIMPLE_ASSIGN:
1074 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1075 case GIMPLE_CALL:
2284b034
MG
1076 {
1077 tree fndecl = gimple_call_fndecl (stmt);
1078 if (!fndecl) return false;
1079 if (flag_delete_null_pointer_checks && !flag_check_new
1080 && DECL_IS_OPERATOR_NEW (fndecl)
1081 && !TREE_NOTHROW (fndecl))
1082 return true;
10706779
JH
1083 /* References are always non-NULL. */
1084 if (flag_delete_null_pointer_checks
1085 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1086 return true;
826cacfe
MG
1087 if (flag_delete_null_pointer_checks &&
1088 lookup_attribute ("returns_nonnull",
1089 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1090 return true;
a5bb8a5c
RB
1091
1092 gcall *call_stmt = as_a<gcall *> (stmt);
1093 unsigned rf = gimple_call_return_flags (call_stmt);
1094 if (rf & ERF_RETURNS_ARG)
1095 {
1096 unsigned argnum = rf & ERF_RETURN_ARG_MASK;
1097 if (argnum < gimple_call_num_args (call_stmt))
1098 {
1099 tree arg = gimple_call_arg (call_stmt, argnum);
1100 if (SSA_VAR_P (arg)
1101 && infer_nonnull_range_by_attribute (stmt, arg))
1102 return true;
1103 }
1104 }
2284b034
MG
1105 return gimple_alloca_call_p (stmt);
1106 }
726a989a
RB
1107 default:
1108 gcc_unreachable ();
1109 }
1110}
1111
12df8a7e 1112/* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
227858d1 1113 obtained so far. */
0bca51f0 1114
227858d1 1115static bool
355fe088 1116vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
0bca51f0 1117{
726a989a 1118 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
227858d1 1119 return true;
0bca51f0 1120
227858d1
DN
1121 /* If we have an expression of the form &X->a, then the expression
1122 is nonnull if X is nonnull. */
726a989a
RB
1123 if (is_gimple_assign (stmt)
1124 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
227858d1 1125 {
726a989a 1126 tree expr = gimple_assign_rhs1 (stmt);
227858d1 1127 tree base = get_base_address (TREE_OPERAND (expr, 0));
0bca51f0 1128
227858d1 1129 if (base != NULL_TREE
70f34814 1130 && TREE_CODE (base) == MEM_REF
227858d1
DN
1131 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1132 {
526ceb68 1133 value_range *vr = get_value_range (TREE_OPERAND (base, 0));
227858d1
DN
1134 if (range_is_nonnull (vr))
1135 return true;
1136 }
1137 }
b565d777 1138
227858d1 1139 return false;
b565d777
DN
1140}
1141
04dce5a4
ZD
1142/* Returns true if EXPR is a valid value (as expected by compare_values) --
1143 a gimple invariant, or SSA_NAME +- CST. */
1144
1145static bool
1146valid_value_p (tree expr)
1147{
1148 if (TREE_CODE (expr) == SSA_NAME)
1149 return true;
1150
1151 if (TREE_CODE (expr) == PLUS_EXPR
1152 || TREE_CODE (expr) == MINUS_EXPR)
1153 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1154 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
b8698a0f 1155
04dce5a4
ZD
1156 return is_gimple_min_invariant (expr);
1157}
b565d777 1158
b8698a0f 1159/* Return
6b3c76a9
JH
1160 1 if VAL < VAL2
1161 0 if !(VAL < VAL2)
1162 -2 if those are incomparable. */
1163static inline int
1164operand_less_p (tree val, tree val2)
1165{
6b3c76a9
JH
1166 /* LT is folded faster than GE and others. Inline the common case. */
1167 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
661d6efd
RB
1168 {
1169 if (! is_positive_overflow_infinity (val2))
1170 return tree_int_cst_lt (val, val2);
1171 }
6b3c76a9 1172 else
12df8a7e
ILT
1173 {
1174 tree tcmp;
1175
c8539275
ILT
1176 fold_defer_overflow_warnings ();
1177
12df8a7e 1178 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
c8539275
ILT
1179
1180 fold_undefer_and_ignore_overflow_warnings ();
1181
bd03c084
RG
1182 if (!tcmp
1183 || TREE_CODE (tcmp) != INTEGER_CST)
12df8a7e
ILT
1184 return -2;
1185
1186 if (!integer_zerop (tcmp))
1187 return 1;
1188 }
1189
1190 /* val >= val2, not considering overflow infinity. */
1191 if (is_negative_overflow_infinity (val))
1192 return is_negative_overflow_infinity (val2) ? 0 : 1;
1193 else if (is_positive_overflow_infinity (val2))
1194 return is_positive_overflow_infinity (val) ? 0 : 1;
1195
1196 return 0;
6b3c76a9
JH
1197}
1198
0bca51f0 1199/* Compare two values VAL1 and VAL2. Return
b8698a0f 1200
0bca51f0
DN
1201 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1202 -1 if VAL1 < VAL2,
1203 0 if VAL1 == VAL2,
1204 +1 if VAL1 > VAL2, and
1205 +2 if VAL1 != VAL2
1206
1207 This is similar to tree_int_cst_compare but supports pointer values
12df8a7e
ILT
1208 and values that cannot be compared at compile time.
1209
1210 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1211 true if the return value is only valid if we assume that signed
1212 overflow is undefined. */
0bca51f0
DN
1213
1214static int
12df8a7e 1215compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
0bca51f0
DN
1216{
1217 if (val1 == val2)
1218 return 0;
1219
30abf793
KH
1220 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1221 both integers. */
1222 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1223 == POINTER_TYPE_P (TREE_TYPE (val2)));
e76340be 1224
5be014d5
AP
1225 /* Convert the two values into the same type. This is needed because
1226 sizetype causes sign extension even for unsigned types. */
1227 val2 = fold_convert (TREE_TYPE (val1), val2);
1228 STRIP_USELESS_TYPE_CONVERSION (val2);
30abf793 1229
55c8849f
EB
1230 const bool overflow_undefined
1231 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1232 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1233 tree inv1, inv2;
1234 bool neg1, neg2;
1235 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1236 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1237
1238 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1239 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1240 if (sym1 && sym2)
0bca51f0 1241 {
55c8849f
EB
1242 /* Both values must use the same name with the same sign. */
1243 if (sym1 != sym2 || neg1 != neg2)
0bca51f0
DN
1244 return -2;
1245
55c8849f
EB
1246 /* [-]NAME + CST == [-]NAME + CST. */
1247 if (inv1 == inv2)
67ac6e63
RG
1248 return 0;
1249
1250 /* If overflow is defined we cannot simplify more. */
55c8849f 1251 if (!overflow_undefined)
67ac6e63
RG
1252 return -2;
1253
3fe5bcaf 1254 if (strict_overflow_p != NULL
55c8849f
EB
1255 && (!inv1 || !TREE_NO_WARNING (val1))
1256 && (!inv2 || !TREE_NO_WARNING (val2)))
12df8a7e
ILT
1257 *strict_overflow_p = true;
1258
55c8849f
EB
1259 if (!inv1)
1260 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1261 if (!inv2)
1262 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1263
1264 return compare_values_warnv (inv1, inv2, strict_overflow_p);
1265 }
1266
1267 const bool cst1 = is_gimple_min_invariant (val1);
1268 const bool cst2 = is_gimple_min_invariant (val2);
1269
1270 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1271 it might be possible to say something depending on the constants. */
1272 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1273 {
1274 if (!overflow_undefined)
1275 return -2;
1276
1277 if (strict_overflow_p != NULL
1278 && (!sym1 || !TREE_NO_WARNING (val1))
1279 && (!sym2 || !TREE_NO_WARNING (val2)))
1280 *strict_overflow_p = true;
1281
1282 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1283 tree cst = cst1 ? val1 : val2;
1284 tree inv = cst1 ? inv2 : inv1;
1285
1286 /* Compute the difference between the constants. If it overflows or
1287 underflows, this means that we can trivially compare the NAME with
1288 it and, consequently, the two values with each other. */
1289 wide_int diff = wi::sub (cst, inv);
1290 if (wi::cmp (0, inv, sgn) != wi::cmp (diff, cst, sgn))
0bca51f0 1291 {
55c8849f
EB
1292 const int res = wi::cmp (cst, inv, sgn);
1293 return cst1 ? res : -res;
0bca51f0
DN
1294 }
1295
55c8849f 1296 return -2;
0bca51f0
DN
1297 }
1298
55c8849f
EB
1299 /* We cannot say anything more for non-constants. */
1300 if (!cst1 || !cst2)
0bca51f0
DN
1301 return -2;
1302
30abf793 1303 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
87f2a9f5 1304 {
12df8a7e
ILT
1305 /* We cannot compare overflowed values, except for overflow
1306 infinities. */
87f2a9f5 1307 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
12df8a7e 1308 {
0c948c27
ILT
1309 if (strict_overflow_p != NULL)
1310 *strict_overflow_p = true;
12df8a7e
ILT
1311 if (is_negative_overflow_infinity (val1))
1312 return is_negative_overflow_infinity (val2) ? 0 : -1;
1313 else if (is_negative_overflow_infinity (val2))
1314 return 1;
1315 else if (is_positive_overflow_infinity (val1))
1316 return is_positive_overflow_infinity (val2) ? 0 : 1;
1317 else if (is_positive_overflow_infinity (val2))
1318 return -1;
1319 return -2;
1320 }
87f2a9f5
RS
1321
1322 return tree_int_cst_compare (val1, val2);
1323 }
0bca51f0
DN
1324 else
1325 {
1326 tree t;
1327
1328 /* First see if VAL1 and VAL2 are not the same. */
1329 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1330 return 0;
b8698a0f 1331
0bca51f0 1332 /* If VAL1 is a lower address than VAL2, return -1. */
6b3c76a9 1333 if (operand_less_p (val1, val2) == 1)
0bca51f0
DN
1334 return -1;
1335
1336 /* If VAL1 is a higher address than VAL2, return +1. */
6b3c76a9 1337 if (operand_less_p (val2, val1) == 1)
0bca51f0
DN
1338 return 1;
1339
5daffcc7
JH
1340 /* If VAL1 is different than VAL2, return +2.
1341 For integer constants we either have already returned -1 or 1
2e226e66
KH
1342 or they are equivalent. We still might succeed in proving
1343 something about non-trivial operands. */
5daffcc7
JH
1344 if (TREE_CODE (val1) != INTEGER_CST
1345 || TREE_CODE (val2) != INTEGER_CST)
1346 {
1347 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
bd03c084 1348 if (t && integer_onep (t))
5daffcc7
JH
1349 return 2;
1350 }
0bca51f0
DN
1351
1352 return -2;
1353 }
1354}
1355
0c948c27
ILT
1356/* Compare values like compare_values_warnv, but treat comparisons of
1357 nonconstants which rely on undefined overflow as incomparable. */
12df8a7e
ILT
1358
1359static int
1360compare_values (tree val1, tree val2)
1361{
1362 bool sop;
1363 int ret;
1364
1365 sop = false;
1366 ret = compare_values_warnv (val1, val2, &sop);
0c948c27
ILT
1367 if (sop
1368 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
12df8a7e
ILT
1369 ret = -2;
1370 return ret;
1371}
1372
0bca51f0 1373
e8f808b3
RG
1374/* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1375 0 if VAL is not inside [MIN, MAX],
c83033e7
DN
1376 -2 if we cannot tell either way.
1377
6b3c76a9
JH
1378 Benchmark compile/20001226-1.c compilation time after changing this
1379 function. */
0bca51f0
DN
1380
1381static inline int
e8f808b3 1382value_inside_range (tree val, tree min, tree max)
0bca51f0 1383{
6b3c76a9 1384 int cmp1, cmp2;
0bca51f0 1385
e8f808b3 1386 cmp1 = operand_less_p (val, min);
6b3c76a9 1387 if (cmp1 == -2)
0bca51f0 1388 return -2;
6b3c76a9
JH
1389 if (cmp1 == 1)
1390 return 0;
0bca51f0 1391
e8f808b3 1392 cmp2 = operand_less_p (max, val);
6b3c76a9 1393 if (cmp2 == -2)
0bca51f0
DN
1394 return -2;
1395
6b3c76a9 1396 return !cmp2;
0bca51f0
DN
1397}
1398
1399
1400/* Return true if value ranges VR0 and VR1 have a non-empty
b8698a0f
L
1401 intersection.
1402
6b3c76a9
JH
1403 Benchmark compile/20001226-1.c compilation time after changing this
1404 function.
1405 */
0bca51f0
DN
1406
1407static inline bool
526ceb68 1408value_ranges_intersect_p (value_range *vr0, value_range *vr1)
0bca51f0 1409{
5daffcc7
JH
1410 /* The value ranges do not intersect if the maximum of the first range is
1411 less than the minimum of the second range or vice versa.
1412 When those relations are unknown, we can't do any better. */
1413 if (operand_less_p (vr0->max, vr1->min) != 0)
1414 return false;
1415 if (operand_less_p (vr1->max, vr0->min) != 0)
1416 return false;
1417 return true;
0bca51f0
DN
1418}
1419
1420
e8f808b3
RG
1421/* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1422 include the value zero, -2 if we cannot tell. */
227858d1 1423
e8f808b3
RG
1424static inline int
1425range_includes_zero_p (tree min, tree max)
227858d1 1426{
e8f808b3
RG
1427 tree zero = build_int_cst (TREE_TYPE (min), 0);
1428 return value_inside_range (zero, min, max);
227858d1
DN
1429}
1430
4d320da4
RG
1431/* Return true if *VR is know to only contain nonnegative values. */
1432
1433static inline bool
526ceb68 1434value_range_nonnegative_p (value_range *vr)
4d320da4 1435{
1a0fcfa9
RG
1436 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1437 which would return a useful value should be encoded as a
1438 VR_RANGE. */
4d320da4
RG
1439 if (vr->type == VR_RANGE)
1440 {
1441 int result = compare_values (vr->min, integer_zero_node);
1442 return (result == 0 || result == 1);
1443 }
4d320da4
RG
1444
1445 return false;
1446}
1447
4d320da4
RG
1448/* If *VR has a value rante that is a single constant value return that,
1449 otherwise return NULL_TREE. */
1450
1451static tree
526ceb68 1452value_range_constant_singleton (value_range *vr)
4d320da4
RG
1453{
1454 if (vr->type == VR_RANGE
661d6efd 1455 && vrp_operand_equal_p (vr->min, vr->max)
4d320da4
RG
1456 && is_gimple_min_invariant (vr->min))
1457 return vr->min;
1458
1459 return NULL_TREE;
b16caf72
JL
1460}
1461
73019a42
RG
1462/* If OP has a value range with a single constant value return that,
1463 otherwise return NULL_TREE. This returns OP itself if OP is a
1464 constant. */
1465
1466static tree
1467op_with_constant_singleton_value_range (tree op)
1468{
73019a42
RG
1469 if (is_gimple_min_invariant (op))
1470 return op;
1471
1472 if (TREE_CODE (op) != SSA_NAME)
1473 return NULL_TREE;
1474
4d320da4 1475 return value_range_constant_singleton (get_value_range (op));
73019a42
RG
1476}
1477
7e29ba60
RG
1478/* Return true if op is in a boolean [0, 1] value-range. */
1479
1480static bool
1481op_with_boolean_value_range_p (tree op)
1482{
526ceb68 1483 value_range *vr;
7e29ba60
RG
1484
1485 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1486 return true;
1487
1488 if (integer_zerop (op)
1489 || integer_onep (op))
1490 return true;
1491
1492 if (TREE_CODE (op) != SSA_NAME)
1493 return false;
1494
1495 vr = get_value_range (op);
1496 return (vr->type == VR_RANGE
1497 && integer_zerop (vr->min)
1498 && integer_onep (vr->max));
1499}
227858d1 1500
973625a0
KV
1501/* Extract value range information for VAR when (OP COND_CODE LIMIT) is
1502 true and store it in *VR_P. */
0bca51f0
DN
1503
1504static void
973625a0
KV
1505extract_range_for_var_from_comparison_expr (tree var, enum tree_code cond_code,
1506 tree op, tree limit,
1507 value_range *vr_p)
0bca51f0 1508{
973625a0 1509 tree min, max, type;
526ceb68 1510 value_range *limit_vr;
b80cca7b 1511 limit = avoid_overflow_infinity (limit);
ebbcd0c6 1512 type = TREE_TYPE (var);
0bca51f0
DN
1513 gcc_assert (limit != var);
1514
227858d1
DN
1515 /* For pointer arithmetic, we only keep track of pointer equality
1516 and inequality. */
1517 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
0bca51f0 1518 {
b565d777 1519 set_value_range_to_varying (vr_p);
0bca51f0
DN
1520 return;
1521 }
1522
227858d1
DN
1523 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1524 try to use LIMIT's range to avoid creating symbolic ranges
1525 unnecessarily. */
1526 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1527
1528 /* LIMIT's range is only interesting if it has any useful information. */
959cef74
RB
1529 if (! limit_vr
1530 || limit_vr->type == VR_UNDEFINED
1531 || limit_vr->type == VR_VARYING
1532 || (symbolic_range_p (limit_vr)
1533 && ! (limit_vr->type == VR_RANGE
1534 && (limit_vr->min == limit_vr->max
1535 || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
227858d1
DN
1536 limit_vr = NULL;
1537
db3d5328
DN
1538 /* Initially, the new range has the same set of equivalences of
1539 VAR's range. This will be revised before returning the final
1540 value. Since assertions may be chained via mutually exclusive
1541 predicates, we will need to trim the set of equivalences before
1542 we are done. */
227858d1 1543 gcc_assert (vr_p->equiv == NULL);
f5052e29 1544 add_equivalence (&vr_p->equiv, var);
227858d1
DN
1545
1546 /* Extract a new range based on the asserted comparison for VAR and
1547 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1548 will only use it for equality comparisons (EQ_EXPR). For any
1549 other kind of assertion, we cannot derive a range from LIMIT's
1550 anti-range that can be used to describe the new range. For
1551 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1552 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1553 no single range for x_2 that could describe LE_EXPR, so we might
2ab8dbf4
RG
1554 as well build the range [b_4, +INF] for it.
1555 One special case we handle is extracting a range from a
1556 range test encoded as (unsigned)var + CST <= limit. */
973625a0
KV
1557 if (TREE_CODE (op) == NOP_EXPR
1558 || TREE_CODE (op) == PLUS_EXPR)
2ab8dbf4 1559 {
973625a0 1560 if (TREE_CODE (op) == PLUS_EXPR)
2ab8dbf4 1561 {
973625a0
KV
1562 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
1563 TREE_OPERAND (op, 1));
d35936ab 1564 max = int_const_binop (PLUS_EXPR, limit, min);
973625a0 1565 op = TREE_OPERAND (op, 0);
2ab8dbf4
RG
1566 }
1567 else
70b7b037
RG
1568 {
1569 min = build_int_cst (TREE_TYPE (var), 0);
1570 max = limit;
1571 }
2ab8dbf4 1572
70b7b037
RG
1573 /* Make sure to not set TREE_OVERFLOW on the final type
1574 conversion. We are willingly interpreting large positive
ed986827 1575 unsigned values as negative signed values here. */
807e902e
KZ
1576 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1577 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
2ab8dbf4
RG
1578
1579 /* We can transform a max, min range to an anti-range or
1580 vice-versa. Use set_and_canonicalize_value_range which does
1581 this for us. */
1582 if (cond_code == LE_EXPR)
1583 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1584 min, max, vr_p->equiv);
1585 else if (cond_code == GT_EXPR)
1586 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1587 min, max, vr_p->equiv);
1588 else
1589 gcc_unreachable ();
1590 }
1591 else if (cond_code == EQ_EXPR)
227858d1
DN
1592 {
1593 enum value_range_type range_type;
1594
1595 if (limit_vr)
1596 {
1597 range_type = limit_vr->type;
1598 min = limit_vr->min;
1599 max = limit_vr->max;
1600 }
1601 else
1602 {
1603 range_type = VR_RANGE;
1604 min = limit;
1605 max = limit;
1606 }
1607
1608 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1609
1610 /* When asserting the equality VAR == LIMIT and LIMIT is another
1611 SSA name, the new range will also inherit the equivalence set
1612 from LIMIT. */
1613 if (TREE_CODE (limit) == SSA_NAME)
f5052e29 1614 add_equivalence (&vr_p->equiv, limit);
227858d1
DN
1615 }
1616 else if (cond_code == NE_EXPR)
1617 {
1618 /* As described above, when LIMIT's range is an anti-range and
1619 this assertion is an inequality (NE_EXPR), then we cannot
1620 derive anything from the anti-range. For instance, if
1621 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1622 not imply that VAR's range is [0, 0]. So, in the case of
1623 anti-ranges, we just assert the inequality using LIMIT and
fde5c44c
JM
1624 not its anti-range.
1625
1626 If LIMIT_VR is a range, we can only use it to build a new
1627 anti-range if LIMIT_VR is a single-valued range. For
1628 instance, if LIMIT_VR is [0, 1], the predicate
1629 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1630 Rather, it means that for value 0 VAR should be ~[0, 0]
1631 and for value 1, VAR should be ~[1, 1]. We cannot
1632 represent these ranges.
1633
1634 The only situation in which we can build a valid
1635 anti-range is when LIMIT_VR is a single-valued range
b8698a0f 1636 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
fde5c44c
JM
1637 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1638 if (limit_vr
1639 && limit_vr->type == VR_RANGE
1640 && compare_values (limit_vr->min, limit_vr->max) == 0)
227858d1 1641 {
fde5c44c
JM
1642 min = limit_vr->min;
1643 max = limit_vr->max;
227858d1
DN
1644 }
1645 else
1646 {
fde5c44c
JM
1647 /* In any other case, we cannot use LIMIT's range to build a
1648 valid anti-range. */
1649 min = max = limit;
227858d1
DN
1650 }
1651
1652 /* If MIN and MAX cover the whole range for their type, then
1653 just use the original LIMIT. */
1654 if (INTEGRAL_TYPE_P (type)
e1f28918
ILT
1655 && vrp_val_is_min (min)
1656 && vrp_val_is_max (max))
227858d1
DN
1657 min = max = limit;
1658
1001fb60
RB
1659 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1660 min, max, vr_p->equiv);
227858d1
DN
1661 }
1662 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
0bca51f0 1663 {
227858d1
DN
1664 min = TYPE_MIN_VALUE (type);
1665
1666 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1667 max = limit;
1668 else
1669 {
1670 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1671 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1672 LT_EXPR. */
1673 max = limit_vr->max;
1674 }
1675
9d6eefd5
EB
1676 /* If the maximum value forces us to be out of bounds, simply punt.
1677 It would be pointless to try and do anything more since this
1678 all should be optimized away above us. */
7343ff45
ILT
1679 if ((cond_code == LT_EXPR
1680 && compare_values (max, min) == 0)
3f5c390d 1681 || is_overflow_infinity (max))
9d6eefd5
EB
1682 set_value_range_to_varying (vr_p);
1683 else
227858d1 1684 {
9d6eefd5 1685 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
7343ff45 1686 if (cond_code == LT_EXPR)
9d6eefd5 1687 {
c360c0fb
JJ
1688 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1689 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1690 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1691 build_int_cst (TREE_TYPE (max), -1));
1692 else
1693 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1694 build_int_cst (TREE_TYPE (max), 1));
3fe5bcaf
ILT
1695 if (EXPR_P (max))
1696 TREE_NO_WARNING (max) = 1;
9d6eefd5 1697 }
227858d1 1698
9d6eefd5
EB
1699 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1700 }
0bca51f0 1701 }
227858d1 1702 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
0bca51f0 1703 {
227858d1
DN
1704 max = TYPE_MAX_VALUE (type);
1705
1706 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1707 min = limit;
1708 else
1709 {
1710 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1711 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1712 GT_EXPR. */
1713 min = limit_vr->min;
1714 }
1715
9d6eefd5
EB
1716 /* If the minimum value forces us to be out of bounds, simply punt.
1717 It would be pointless to try and do anything more since this
1718 all should be optimized away above us. */
7343ff45
ILT
1719 if ((cond_code == GT_EXPR
1720 && compare_values (min, max) == 0)
3f5c390d 1721 || is_overflow_infinity (min))
9d6eefd5
EB
1722 set_value_range_to_varying (vr_p);
1723 else
227858d1 1724 {
9d6eefd5 1725 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
7343ff45 1726 if (cond_code == GT_EXPR)
9d6eefd5 1727 {
c360c0fb
JJ
1728 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1729 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1730 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1731 build_int_cst (TREE_TYPE (min), -1));
1732 else
1733 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1734 build_int_cst (TREE_TYPE (min), 1));
3fe5bcaf
ILT
1735 if (EXPR_P (min))
1736 TREE_NO_WARNING (min) = 1;
9d6eefd5 1737 }
227858d1 1738
9d6eefd5
EB
1739 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1740 }
0bca51f0
DN
1741 }
1742 else
1743 gcc_unreachable ();
1744
3928c098
RG
1745 /* Finally intersect the new range with what we already know about var. */
1746 vrp_intersect_ranges (vr_p, get_value_range (var));
0bca51f0
DN
1747}
1748
973625a0
KV
1749/* Extract value range information from an ASSERT_EXPR EXPR and store
1750 it in *VR_P. */
1751
1752static void
1753extract_range_from_assert (value_range *vr_p, tree expr)
1754{
1755 tree var = ASSERT_EXPR_VAR (expr);
1756 tree cond = ASSERT_EXPR_COND (expr);
1757 tree limit, op;
1758 enum tree_code cond_code;
1759 gcc_assert (COMPARISON_CLASS_P (cond));
1760
1761 /* Find VAR in the ASSERT_EXPR conditional. */
1762 if (var == TREE_OPERAND (cond, 0)
1763 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1764 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1765 {
1766 /* If the predicate is of the form VAR COMP LIMIT, then we just
1767 take LIMIT from the RHS and use the same comparison code. */
1768 cond_code = TREE_CODE (cond);
1769 limit = TREE_OPERAND (cond, 1);
1770 op = TREE_OPERAND (cond, 0);
1771 }
1772 else
1773 {
1774 /* If the predicate is of the form LIMIT COMP VAR, then we need
1775 to flip around the comparison code to create the proper range
1776 for VAR. */
1777 cond_code = swap_tree_comparison (TREE_CODE (cond));
1778 limit = TREE_OPERAND (cond, 0);
1779 op = TREE_OPERAND (cond, 1);
1780 }
1781 extract_range_for_var_from_comparison_expr (var, cond_code, op,
1782 limit, vr_p);
1783}
0bca51f0
DN
1784
1785/* Extract range information from SSA name VAR and store it in VR. If
1786 VAR has an interesting range, use it. Otherwise, create the
1787 range [VAR, VAR] and return it. This is useful in situations where
1788 we may have conditionals testing values of VARYING names. For
1789 instance,
1790
1791 x_3 = y_5;
1792 if (x_3 > y_5)
1793 ...
1794
1795 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1796 always false. */
1797
1798static void
526ceb68 1799extract_range_from_ssa_name (value_range *vr, tree var)
0bca51f0 1800{
526ceb68 1801 value_range *var_vr = get_value_range (var);
0bca51f0 1802
6e5799b9 1803 if (var_vr->type != VR_VARYING)
227858d1 1804 copy_value_range (vr, var_vr);
0bca51f0 1805 else
227858d1
DN
1806 set_value_range (vr, VR_RANGE, var, var, NULL);
1807
f5052e29 1808 add_equivalence (&vr->equiv, var);
0bca51f0
DN
1809}
1810
1811
9983270b
DN
1812/* Wrapper around int_const_binop. If the operation overflows and we
1813 are not using wrapping arithmetic, then adjust the result to be
12df8a7e
ILT
1814 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1815 NULL_TREE if we need to use an overflow infinity representation but
1816 the type does not support it. */
9983270b 1817
12df8a7e 1818static tree
9983270b
DN
1819vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1820{
1821 tree res;
1822
d35936ab 1823 res = int_const_binop (code, val1, val2);
9983270b 1824
9605a606
RG
1825 /* If we are using unsigned arithmetic, operate symbolically
1826 on -INF and +INF as int_const_binop only handles signed overflow. */
1827 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
b17775ab
JM
1828 {
1829 int checkz = compare_values (res, val1);
26ef4301 1830 bool overflow = false;
b17775ab 1831
7dc32197 1832 /* Ensure that res = val1 [+*] val2 >= val1
b17775ab 1833 or that res = val1 - val2 <= val1. */
26ef4301 1834 if ((code == PLUS_EXPR
7dc32197
DN
1835 && !(checkz == 1 || checkz == 0))
1836 || (code == MINUS_EXPR
1837 && !(checkz == 0 || checkz == -1)))
26ef4301
JL
1838 {
1839 overflow = true;
1840 }
1841 /* Checking for multiplication overflow is done by dividing the
1842 output of the multiplication by the first input of the
1843 multiplication. If the result of that division operation is
1844 not equal to the second input of the multiplication, then the
1845 multiplication overflowed. */
1846 else if (code == MULT_EXPR && !integer_zerop (val1))
1847 {
1848 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
3ea0e1e4 1849 res,
d35936ab 1850 val1);
26ef4301
JL
1851 int check = compare_values (tmp, val2);
1852
1853 if (check != 0)
1854 overflow = true;
1855 }
1856
1857 if (overflow)
b17775ab
JM
1858 {
1859 res = copy_node (res);
1860 TREE_OVERFLOW (res) = 1;
1861 }
26ef4301 1862
b17775ab 1863 }
5418fe84
RG
1864 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1865 /* If the singed operation wraps then int_const_binop has done
1866 everything we want. */
1867 ;
807e902e
KZ
1868 /* Signed division of -1/0 overflows and by the time it gets here
1869 returns NULL_TREE. */
1870 else if (!res)
1871 return NULL_TREE;
12df8a7e
ILT
1872 else if ((TREE_OVERFLOW (res)
1873 && !TREE_OVERFLOW (val1)
1874 && !TREE_OVERFLOW (val2))
1875 || is_overflow_infinity (val1)
1876 || is_overflow_infinity (val2))
9983270b 1877 {
7dc32197
DN
1878 /* If the operation overflowed but neither VAL1 nor VAL2 are
1879 overflown, return -INF or +INF depending on the operation
1880 and the combination of signs of the operands. */
9983270b
DN
1881 int sgn1 = tree_int_cst_sgn (val1);
1882 int sgn2 = tree_int_cst_sgn (val2);
1883
12df8a7e
ILT
1884 if (needs_overflow_infinity (TREE_TYPE (res))
1885 && !supports_overflow_infinity (TREE_TYPE (res)))
1886 return NULL_TREE;
1887
d7419dec
ILT
1888 /* We have to punt on adding infinities of different signs,
1889 since we can't tell what the sign of the result should be.
1890 Likewise for subtracting infinities of the same sign. */
1891 if (((code == PLUS_EXPR && sgn1 != sgn2)
1892 || (code == MINUS_EXPR && sgn1 == sgn2))
12df8a7e
ILT
1893 && is_overflow_infinity (val1)
1894 && is_overflow_infinity (val2))
1895 return NULL_TREE;
1896
d7419dec
ILT
1897 /* Don't try to handle division or shifting of infinities. */
1898 if ((code == TRUNC_DIV_EXPR
1899 || code == FLOOR_DIV_EXPR
1900 || code == CEIL_DIV_EXPR
1901 || code == EXACT_DIV_EXPR
1902 || code == ROUND_DIV_EXPR
1903 || code == RSHIFT_EXPR)
1904 && (is_overflow_infinity (val1)
1905 || is_overflow_infinity (val2)))
1906 return NULL_TREE;
1907
0d22e81f
EB
1908 /* Notice that we only need to handle the restricted set of
1909 operations handled by extract_range_from_binary_expr.
1910 Among them, only multiplication, addition and subtraction
1911 can yield overflow without overflown operands because we
1912 are working with integral types only... except in the
1913 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1914 for division too. */
1915
1916 /* For multiplication, the sign of the overflow is given
1917 by the comparison of the signs of the operands. */
1918 if ((code == MULT_EXPR && sgn1 == sgn2)
1919 /* For addition, the operands must be of the same sign
1920 to yield an overflow. Its sign is therefore that
d7419dec
ILT
1921 of one of the operands, for example the first. For
1922 infinite operands X + -INF is negative, not positive. */
1923 || (code == PLUS_EXPR
1924 && (sgn1 >= 0
1925 ? !is_negative_overflow_infinity (val2)
1926 : is_positive_overflow_infinity (val2)))
12df8a7e
ILT
1927 /* For subtraction, non-infinite operands must be of
1928 different signs to yield an overflow. Its sign is
1929 therefore that of the first operand or the opposite of
1930 that of the second operand. A first operand of 0 counts
1931 as positive here, for the corner case 0 - (-INF), which
1932 overflows, but must yield +INF. For infinite operands 0
1933 - INF is negative, not positive. */
1934 || (code == MINUS_EXPR
1935 && (sgn1 >= 0
1936 ? !is_positive_overflow_infinity (val2)
1937 : is_negative_overflow_infinity (val2)))
13338552
RG
1938 /* We only get in here with positive shift count, so the
1939 overflow direction is the same as the sign of val1.
1940 Actually rshift does not overflow at all, but we only
1941 handle the case of shifting overflowed -INF and +INF. */
1942 || (code == RSHIFT_EXPR
1943 && sgn1 >= 0)
0d22e81f
EB
1944 /* For division, the only case is -INF / -1 = +INF. */
1945 || code == TRUNC_DIV_EXPR
1946 || code == FLOOR_DIV_EXPR
1947 || code == CEIL_DIV_EXPR
1948 || code == EXACT_DIV_EXPR
1949 || code == ROUND_DIV_EXPR)
12df8a7e
ILT
1950 return (needs_overflow_infinity (TREE_TYPE (res))
1951 ? positive_overflow_infinity (TREE_TYPE (res))
1952 : TYPE_MAX_VALUE (TREE_TYPE (res)));
9983270b 1953 else
12df8a7e
ILT
1954 return (needs_overflow_infinity (TREE_TYPE (res))
1955 ? negative_overflow_infinity (TREE_TYPE (res))
1956 : TYPE_MIN_VALUE (TREE_TYPE (res)));
9983270b
DN
1957 }
1958
1959 return res;
1960}
1961
1962
807e902e 1963/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
85e693aa
JJ
1964 bitmask if some bit is unset, it means for all numbers in the range
1965 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1966 bitmask if some bit is set, it means for all numbers in the range
1967 the bit is 1, otherwise it might be 0 or 1. */
1968
1969static bool
807e902e 1970zero_nonzero_bits_from_vr (const tree expr_type,
526ceb68 1971 value_range *vr,
807e902e
KZ
1972 wide_int *may_be_nonzero,
1973 wide_int *must_be_nonzero)
85e693aa 1974{
807e902e
KZ
1975 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1976 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
a75f5017 1977 if (!range_int_cst_p (vr)
3f5c390d
RB
1978 || is_overflow_infinity (vr->min)
1979 || is_overflow_infinity (vr->max))
4001900f
RG
1980 return false;
1981
1982 if (range_int_cst_singleton_p (vr))
1983 {
807e902e 1984 *may_be_nonzero = vr->min;
4001900f
RG
1985 *must_be_nonzero = *may_be_nonzero;
1986 }
1987 else if (tree_int_cst_sgn (vr->min) >= 0
1988 || tree_int_cst_sgn (vr->max) < 0)
85e693aa 1989 {
807e902e
KZ
1990 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
1991 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
1992 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
1993 if (xor_mask != 0)
85e693aa 1994 {
807e902e
KZ
1995 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1996 may_be_nonzero->get_precision ());
1997 *may_be_nonzero = *may_be_nonzero | mask;
1998 *must_be_nonzero = must_be_nonzero->and_not (mask);
85e693aa
JJ
1999 }
2000 }
4001900f
RG
2001
2002 return true;
85e693aa
JJ
2003}
2004
3c9c79e8
RG
2005/* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2006 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2007 false otherwise. If *AR can be represented with a single range
2008 *VR1 will be VR_UNDEFINED. */
2009
2010static bool
526ceb68
TS
2011ranges_from_anti_range (value_range *ar,
2012 value_range *vr0, value_range *vr1)
3c9c79e8
RG
2013{
2014 tree type = TREE_TYPE (ar->min);
2015
2016 vr0->type = VR_UNDEFINED;
2017 vr1->type = VR_UNDEFINED;
2018
2019 if (ar->type != VR_ANTI_RANGE
2020 || TREE_CODE (ar->min) != INTEGER_CST
2021 || TREE_CODE (ar->max) != INTEGER_CST
2022 || !vrp_val_min (type)
2023 || !vrp_val_max (type))
2024 return false;
2025
2026 if (!vrp_val_is_min (ar->min))
2027 {
2028 vr0->type = VR_RANGE;
2029 vr0->min = vrp_val_min (type);
807e902e 2030 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
3c9c79e8
RG
2031 }
2032 if (!vrp_val_is_max (ar->max))
2033 {
2034 vr1->type = VR_RANGE;
807e902e 2035 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
3c9c79e8
RG
2036 vr1->max = vrp_val_max (type);
2037 }
2038 if (vr0->type == VR_UNDEFINED)
2039 {
2040 *vr0 = *vr1;
2041 vr1->type = VR_UNDEFINED;
2042 }
2043
2044 return vr0->type != VR_UNDEFINED;
2045}
2046
a1bc7628
RG
2047/* Helper to extract a value-range *VR for a multiplicative operation
2048 *VR0 CODE *VR1. */
2049
2050static void
526ceb68 2051extract_range_from_multiplicative_op_1 (value_range *vr,
a1bc7628 2052 enum tree_code code,
526ceb68 2053 value_range *vr0, value_range *vr1)
a1bc7628
RG
2054{
2055 enum value_range_type type;
2056 tree val[4];
2057 size_t i;
2058 tree min, max;
2059 bool sop;
2060 int cmp;
2061
2062 /* Multiplications, divisions and shifts are a bit tricky to handle,
2063 depending on the mix of signs we have in the two ranges, we
2064 need to operate on different values to get the minimum and
2065 maximum values for the new range. One approach is to figure
2066 out all the variations of range combinations and do the
2067 operations.
2068
2069 However, this involves several calls to compare_values and it
2070 is pretty convoluted. It's simpler to do the 4 operations
2071 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2072 MAX1) and then figure the smallest and largest values to form
2073 the new range. */
2074 gcc_assert (code == MULT_EXPR
2075 || code == TRUNC_DIV_EXPR
2076 || code == FLOOR_DIV_EXPR
2077 || code == CEIL_DIV_EXPR
2078 || code == EXACT_DIV_EXPR
2079 || code == ROUND_DIV_EXPR
25722436
TV
2080 || code == RSHIFT_EXPR
2081 || code == LSHIFT_EXPR);
a1bc7628
RG
2082 gcc_assert ((vr0->type == VR_RANGE
2083 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2084 && vr0->type == vr1->type);
2085
2086 type = vr0->type;
2087
2088 /* Compute the 4 cross operations. */
2089 sop = false;
2090 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2091 if (val[0] == NULL_TREE)
2092 sop = true;
2093
2094 if (vr1->max == vr1->min)
2095 val[1] = NULL_TREE;
2096 else
2097 {
2098 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2099 if (val[1] == NULL_TREE)
2100 sop = true;
2101 }
2102
2103 if (vr0->max == vr0->min)
2104 val[2] = NULL_TREE;
2105 else
2106 {
2107 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2108 if (val[2] == NULL_TREE)
2109 sop = true;
2110 }
2111
2112 if (vr0->min == vr0->max || vr1->min == vr1->max)
2113 val[3] = NULL_TREE;
2114 else
2115 {
2116 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2117 if (val[3] == NULL_TREE)
2118 sop = true;
2119 }
2120
2121 if (sop)
2122 {
2123 set_value_range_to_varying (vr);
2124 return;
2125 }
2126
2127 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2128 of VAL[i]. */
2129 min = val[0];
2130 max = val[0];
2131 for (i = 1; i < 4; i++)
2132 {
2133 if (!is_gimple_min_invariant (min)
2134 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2135 || !is_gimple_min_invariant (max)
2136 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2137 break;
2138
2139 if (val[i])
2140 {
2141 if (!is_gimple_min_invariant (val[i])
2142 || (TREE_OVERFLOW (val[i])
2143 && !is_overflow_infinity (val[i])))
2144 {
2145 /* If we found an overflowed value, set MIN and MAX
2146 to it so that we set the resulting range to
2147 VARYING. */
2148 min = max = val[i];
2149 break;
2150 }
2151
2152 if (compare_values (val[i], min) == -1)
2153 min = val[i];
2154
2155 if (compare_values (val[i], max) == 1)
2156 max = val[i];
2157 }
2158 }
2159
2160 /* If either MIN or MAX overflowed, then set the resulting range to
2161 VARYING. But we do accept an overflow infinity
2162 representation. */
2163 if (min == NULL_TREE
2164 || !is_gimple_min_invariant (min)
2165 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2166 || max == NULL_TREE
2167 || !is_gimple_min_invariant (max)
2168 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2169 {
2170 set_value_range_to_varying (vr);
2171 return;
2172 }
2173
2174 /* We punt if:
2175 1) [-INF, +INF]
2176 2) [-INF, +-INF(OVF)]
2177 3) [+-INF(OVF), +INF]
2178 4) [+-INF(OVF), +-INF(OVF)]
2179 We learn nothing when we have INF and INF(OVF) on both sides.
2180 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2181 overflow. */
2182 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2183 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2184 {
2185 set_value_range_to_varying (vr);
2186 return;
2187 }
2188
2189 cmp = compare_values (min, max);
2190 if (cmp == -2 || cmp == 1)
2191 {
2192 /* If the new range has its limits swapped around (MIN > MAX),
2193 then the operation caused one of them to wrap around, mark
2194 the new range VARYING. */
2195 set_value_range_to_varying (vr);
2196 }
2197 else
2198 set_value_range (vr, type, min, max, NULL);
2199}
85e693aa 2200
4d320da4 2201/* Extract range information from a binary operation CODE based on
e76340be 2202 the ranges of each of its operands *VR0 and *VR1 with resulting
4d320da4 2203 type EXPR_TYPE. The resulting range is stored in *VR. */
0bca51f0
DN
2204
2205static void
526ceb68 2206extract_range_from_binary_expr_1 (value_range *vr,
4d320da4 2207 enum tree_code code, tree expr_type,
526ceb68 2208 value_range *vr0_, value_range *vr1_)
0bca51f0 2209{
526ceb68
TS
2210 value_range vr0 = *vr0_, vr1 = *vr1_;
2211 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
4e2d94a9 2212 enum value_range_type type;
a1bc7628 2213 tree min = NULL_TREE, max = NULL_TREE;
0bca51f0
DN
2214 int cmp;
2215
a1bc7628
RG
2216 if (!INTEGRAL_TYPE_P (expr_type)
2217 && !POINTER_TYPE_P (expr_type))
2218 {
2219 set_value_range_to_varying (vr);
2220 return;
2221 }
2222
0bca51f0
DN
2223 /* Not all binary expressions can be applied to ranges in a
2224 meaningful way. Handle only arithmetic operations. */
2225 if (code != PLUS_EXPR
2226 && code != MINUS_EXPR
5be014d5 2227 && code != POINTER_PLUS_EXPR
0bca51f0
DN
2228 && code != MULT_EXPR
2229 && code != TRUNC_DIV_EXPR
2230 && code != FLOOR_DIV_EXPR
2231 && code != CEIL_DIV_EXPR
2232 && code != EXACT_DIV_EXPR
2233 && code != ROUND_DIV_EXPR
bab4d587 2234 && code != TRUNC_MOD_EXPR
6569e716 2235 && code != RSHIFT_EXPR
8c1f1d42 2236 && code != LSHIFT_EXPR
0bca51f0 2237 && code != MIN_EXPR
227858d1 2238 && code != MAX_EXPR
29c8f8c2 2239 && code != BIT_AND_EXPR
0f36b2da
RG
2240 && code != BIT_IOR_EXPR
2241 && code != BIT_XOR_EXPR)
0bca51f0 2242 {
b565d777 2243 set_value_range_to_varying (vr);
0bca51f0
DN
2244 return;
2245 }
2246
a9b332d4
RG
2247 /* If both ranges are UNDEFINED, so is the result. */
2248 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
0bca51f0 2249 {
227858d1 2250 set_value_range_to_undefined (vr);
0bca51f0
DN
2251 return;
2252 }
a9b332d4
RG
2253 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2254 code. At some point we may want to special-case operations that
2255 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2256 operand. */
2257 else if (vr0.type == VR_UNDEFINED)
2258 set_value_range_to_varying (&vr0);
2259 else if (vr1.type == VR_UNDEFINED)
2260 set_value_range_to_varying (&vr1);
0bca51f0 2261
3c9c79e8
RG
2262 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2263 and express ~[] op X as ([]' op X) U ([]'' op X). */
2264 if (vr0.type == VR_ANTI_RANGE
2265 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2266 {
2267 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2268 if (vrtem1.type != VR_UNDEFINED)
2269 {
526ceb68 2270 value_range vrres = VR_INITIALIZER;
3c9c79e8
RG
2271 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2272 &vrtem1, vr1_);
2273 vrp_meet (vr, &vrres);
2274 }
2275 return;
2276 }
2277 /* Likewise for X op ~[]. */
2278 if (vr1.type == VR_ANTI_RANGE
2279 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2280 {
2281 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2282 if (vrtem1.type != VR_UNDEFINED)
2283 {
526ceb68 2284 value_range vrres = VR_INITIALIZER;
3c9c79e8
RG
2285 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2286 vr0_, &vrtem1);
2287 vrp_meet (vr, &vrres);
2288 }
2289 return;
2290 }
2291
4e2d94a9
KH
2292 /* The type of the resulting value range defaults to VR0.TYPE. */
2293 type = vr0.type;
2294
227858d1 2295 /* Refuse to operate on VARYING ranges, ranges of different kinds
e76340be 2296 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
29c8f8c2 2297 because we may be able to derive a useful range even if one of
193a3681 2298 the operands is VR_VARYING or symbolic range. Similarly for
e76340be
EB
2299 divisions, MIN/MAX and PLUS/MINUS.
2300
2301 TODO, we may be able to derive anti-ranges in some cases. */
29c8f8c2 2302 if (code != BIT_AND_EXPR
aebf4828 2303 && code != BIT_IOR_EXPR
193a3681
JJ
2304 && code != TRUNC_DIV_EXPR
2305 && code != FLOOR_DIV_EXPR
2306 && code != CEIL_DIV_EXPR
2307 && code != EXACT_DIV_EXPR
2308 && code != ROUND_DIV_EXPR
bab4d587 2309 && code != TRUNC_MOD_EXPR
83ede847
RB
2310 && code != MIN_EXPR
2311 && code != MAX_EXPR
e76340be
EB
2312 && code != PLUS_EXPR
2313 && code != MINUS_EXPR
4c57980f 2314 && code != RSHIFT_EXPR
29c8f8c2
KH
2315 && (vr0.type == VR_VARYING
2316 || vr1.type == VR_VARYING
2317 || vr0.type != vr1.type
2318 || symbolic_range_p (&vr0)
2319 || symbolic_range_p (&vr1)))
0bca51f0 2320 {
b565d777 2321 set_value_range_to_varying (vr);
0bca51f0
DN
2322 return;
2323 }
2324
2325 /* Now evaluate the expression to determine the new range. */
4d320da4 2326 if (POINTER_TYPE_P (expr_type))
0bca51f0 2327 {
4d320da4 2328 if (code == MIN_EXPR || code == MAX_EXPR)
e57f2b41 2329 {
5be014d5
AP
2330 /* For MIN/MAX expressions with pointers, we only care about
2331 nullness, if both are non null, then the result is nonnull.
2332 If both are null, then the result is null. Otherwise they
2333 are varying. */
2334 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2d3cd5d5 2335 set_value_range_to_nonnull (vr, expr_type);
e57f2b41 2336 else if (range_is_null (&vr0) && range_is_null (&vr1))
2d3cd5d5 2337 set_value_range_to_null (vr, expr_type);
e57f2b41
KH
2338 else
2339 set_value_range_to_varying (vr);
2340 }
4d320da4 2341 else if (code == POINTER_PLUS_EXPR)
fca821b5
RG
2342 {
2343 /* For pointer types, we are really only interested in asserting
2344 whether the expression evaluates to non-NULL. */
2345 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2346 set_value_range_to_nonnull (vr, expr_type);
2347 else if (range_is_null (&vr0) && range_is_null (&vr1))
2348 set_value_range_to_null (vr, expr_type);
2349 else
2350 set_value_range_to_varying (vr);
2351 }
2352 else if (code == BIT_AND_EXPR)
2353 {
2354 /* For pointer types, we are really only interested in asserting
2355 whether the expression evaluates to non-NULL. */
2356 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2357 set_value_range_to_nonnull (vr, expr_type);
2358 else if (range_is_null (&vr0) || range_is_null (&vr1))
2359 set_value_range_to_null (vr, expr_type);
2360 else
2361 set_value_range_to_varying (vr);
2362 }
0bca51f0 2363 else
4d320da4 2364 set_value_range_to_varying (vr);
0bca51f0
DN
2365
2366 return;
2367 }
2368
2369 /* For integer ranges, apply the operation to each end of the
2370 range and see what we end up with. */
933a2c39 2371 if (code == PLUS_EXPR || code == MINUS_EXPR)
0bca51f0 2372 {
e76340be
EB
2373 const bool minus_p = (code == MINUS_EXPR);
2374 tree min_op0 = vr0.min;
2375 tree min_op1 = minus_p ? vr1.max : vr1.min;
2376 tree max_op0 = vr0.max;
2377 tree max_op1 = minus_p ? vr1.min : vr1.max;
2378 tree sym_min_op0 = NULL_TREE;
2379 tree sym_min_op1 = NULL_TREE;
2380 tree sym_max_op0 = NULL_TREE;
2381 tree sym_max_op1 = NULL_TREE;
2382 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2383
2384 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2385 single-symbolic ranges, try to compute the precise resulting range,
2386 but only if we know that this resulting range will also be constant
2387 or single-symbolic. */
2388 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2389 && (TREE_CODE (min_op0) == INTEGER_CST
2390 || (sym_min_op0
2391 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2392 && (TREE_CODE (min_op1) == INTEGER_CST
2393 || (sym_min_op1
2394 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2395 && (!(sym_min_op0 && sym_min_op1)
2396 || (sym_min_op0 == sym_min_op1
2397 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2398 && (TREE_CODE (max_op0) == INTEGER_CST
2399 || (sym_max_op0
2400 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2401 && (TREE_CODE (max_op1) == INTEGER_CST
2402 || (sym_max_op1
2403 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2404 && (!(sym_max_op0 && sym_max_op1)
2405 || (sym_max_op0 == sym_max_op1
2406 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
807e902e 2407 {
e76340be
EB
2408 const signop sgn = TYPE_SIGN (expr_type);
2409 const unsigned int prec = TYPE_PRECISION (expr_type);
2410 wide_int type_min, type_max, wmin, wmax;
933a2c39
MG
2411 int min_ovf = 0;
2412 int max_ovf = 0;
a75f5017 2413
e76340be
EB
2414 /* Get the lower and upper bounds of the type. */
2415 if (TYPE_OVERFLOW_WRAPS (expr_type))
2416 {
2417 type_min = wi::min_value (prec, sgn);
2418 type_max = wi::max_value (prec, sgn);
2419 }
2420 else
933a2c39 2421 {
e76340be
EB
2422 type_min = vrp_val_min (expr_type);
2423 type_max = vrp_val_max (expr_type);
933a2c39 2424 }
e76340be
EB
2425
2426 /* Combine the lower bounds, if any. */
2427 if (min_op0 && min_op1)
933a2c39 2428 {
e76340be
EB
2429 if (minus_p)
2430 {
2431 wmin = wi::sub (min_op0, min_op1);
27bcd47c 2432
e76340be
EB
2433 /* Check for overflow. */
2434 if (wi::cmp (0, min_op1, sgn)
2435 != wi::cmp (wmin, min_op0, sgn))
2436 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2437 }
2438 else
2439 {
2440 wmin = wi::add (min_op0, min_op1);
2441
2442 /* Check for overflow. */
2443 if (wi::cmp (min_op1, 0, sgn)
2444 != wi::cmp (wmin, min_op0, sgn))
2445 min_ovf = wi::cmp (min_op0, wmin, sgn);
2446 }
933a2c39 2447 }
e76340be
EB
2448 else if (min_op0)
2449 wmin = min_op0;
2450 else if (min_op1)
2451 wmin = minus_p ? wi::neg (min_op1) : min_op1;
2452 else
2453 wmin = wi::shwi (0, prec);
933a2c39 2454
e76340be
EB
2455 /* Combine the upper bounds, if any. */
2456 if (max_op0 && max_op1)
933a2c39 2457 {
e76340be
EB
2458 if (minus_p)
2459 {
2460 wmax = wi::sub (max_op0, max_op1);
2461
2462 /* Check for overflow. */
2463 if (wi::cmp (0, max_op1, sgn)
2464 != wi::cmp (wmax, max_op0, sgn))
2465 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2466 }
2467 else
2468 {
2469 wmax = wi::add (max_op0, max_op1);
2470
2471 if (wi::cmp (max_op1, 0, sgn)
2472 != wi::cmp (wmax, max_op0, sgn))
2473 max_ovf = wi::cmp (max_op0, wmax, sgn);
2474 }
933a2c39 2475 }
e76340be
EB
2476 else if (max_op0)
2477 wmax = max_op0;
2478 else if (max_op1)
2479 wmax = minus_p ? wi::neg (max_op1) : max_op1;
2480 else
2481 wmax = wi::shwi (0, prec);
933a2c39
MG
2482
2483 /* Check for type overflow. */
2484 if (min_ovf == 0)
2485 {
807e902e 2486 if (wi::cmp (wmin, type_min, sgn) == -1)
933a2c39 2487 min_ovf = -1;
807e902e 2488 else if (wi::cmp (wmin, type_max, sgn) == 1)
933a2c39
MG
2489 min_ovf = 1;
2490 }
2491 if (max_ovf == 0)
2492 {
807e902e 2493 if (wi::cmp (wmax, type_min, sgn) == -1)
933a2c39 2494 max_ovf = -1;
807e902e 2495 else if (wi::cmp (wmax, type_max, sgn) == 1)
933a2c39
MG
2496 max_ovf = 1;
2497 }
a75f5017 2498
e76340be
EB
2499 /* If we have overflow for the constant part and the resulting
2500 range will be symbolic, drop to VR_VARYING. */
2501 if ((min_ovf && sym_min_op0 != sym_min_op1)
2502 || (max_ovf && sym_max_op0 != sym_max_op1))
2503 {
2504 set_value_range_to_varying (vr);
2505 return;
2506 }
2507
a75f5017
RG
2508 if (TYPE_OVERFLOW_WRAPS (expr_type))
2509 {
2510 /* If overflow wraps, truncate the values and adjust the
2511 range kind and bounds appropriately. */
807e902e
KZ
2512 wide_int tmin = wide_int::from (wmin, prec, sgn);
2513 wide_int tmax = wide_int::from (wmax, prec, sgn);
933a2c39 2514 if (min_ovf == max_ovf)
a75f5017
RG
2515 {
2516 /* No overflow or both overflow or underflow. The
2517 range kind stays VR_RANGE. */
807e902e
KZ
2518 min = wide_int_to_tree (expr_type, tmin);
2519 max = wide_int_to_tree (expr_type, tmax);
a75f5017 2520 }
4177437e
MP
2521 else if ((min_ovf == -1 && max_ovf == 0)
2522 || (max_ovf == 1 && min_ovf == 0))
a75f5017
RG
2523 {
2524 /* Min underflow or max overflow. The range kind
2525 changes to VR_ANTI_RANGE. */
d9c6ca85 2526 bool covers = false;
807e902e 2527 wide_int tem = tmin;
a75f5017 2528 type = VR_ANTI_RANGE;
807e902e
KZ
2529 tmin = tmax + 1;
2530 if (wi::cmp (tmin, tmax, sgn) < 0)
d9c6ca85 2531 covers = true;
807e902e
KZ
2532 tmax = tem - 1;
2533 if (wi::cmp (tmax, tem, sgn) > 0)
d9c6ca85 2534 covers = true;
a75f5017
RG
2535 /* If the anti-range would cover nothing, drop to varying.
2536 Likewise if the anti-range bounds are outside of the
2537 types values. */
807e902e 2538 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
a75f5017
RG
2539 {
2540 set_value_range_to_varying (vr);
2541 return;
2542 }
807e902e
KZ
2543 min = wide_int_to_tree (expr_type, tmin);
2544 max = wide_int_to_tree (expr_type, tmax);
a75f5017 2545 }
4177437e
MP
2546 else
2547 {
2548 /* Other underflow and/or overflow, drop to VR_VARYING. */
2549 set_value_range_to_varying (vr);
2550 return;
2551 }
a75f5017
RG
2552 }
2553 else
2554 {
a75f5017
RG
2555 /* If overflow does not wrap, saturate to the types min/max
2556 value. */
933a2c39 2557 if (min_ovf == -1)
a75f5017
RG
2558 {
2559 if (needs_overflow_infinity (expr_type)
2560 && supports_overflow_infinity (expr_type))
2561 min = negative_overflow_infinity (expr_type);
2562 else
807e902e 2563 min = wide_int_to_tree (expr_type, type_min);
a75f5017 2564 }
933a2c39 2565 else if (min_ovf == 1)
a75f5017
RG
2566 {
2567 if (needs_overflow_infinity (expr_type)
2568 && supports_overflow_infinity (expr_type))
2569 min = positive_overflow_infinity (expr_type);
2570 else
807e902e 2571 min = wide_int_to_tree (expr_type, type_max);
a75f5017
RG
2572 }
2573 else
807e902e 2574 min = wide_int_to_tree (expr_type, wmin);
a75f5017 2575
933a2c39 2576 if (max_ovf == -1)
a75f5017
RG
2577 {
2578 if (needs_overflow_infinity (expr_type)
2579 && supports_overflow_infinity (expr_type))
2580 max = negative_overflow_infinity (expr_type);
2581 else
807e902e 2582 max = wide_int_to_tree (expr_type, type_min);
a75f5017 2583 }
933a2c39 2584 else if (max_ovf == 1)
a75f5017
RG
2585 {
2586 if (needs_overflow_infinity (expr_type)
2587 && supports_overflow_infinity (expr_type))
2588 max = positive_overflow_infinity (expr_type);
2589 else
807e902e 2590 max = wide_int_to_tree (expr_type, type_max);
a75f5017
RG
2591 }
2592 else
807e902e 2593 max = wide_int_to_tree (expr_type, wmax);
a75f5017 2594 }
e76340be 2595
a75f5017
RG
2596 if (needs_overflow_infinity (expr_type)
2597 && supports_overflow_infinity (expr_type))
2598 {
e76340be
EB
2599 if ((min_op0 && is_negative_overflow_infinity (min_op0))
2600 || (min_op1
2601 && (minus_p
2602 ? is_positive_overflow_infinity (min_op1)
2603 : is_negative_overflow_infinity (min_op1))))
a75f5017 2604 min = negative_overflow_infinity (expr_type);
e76340be
EB
2605 if ((max_op0 && is_positive_overflow_infinity (max_op0))
2606 || (max_op1
2607 && (minus_p
2608 ? is_negative_overflow_infinity (max_op1)
2609 : is_positive_overflow_infinity (max_op1))))
a75f5017
RG
2610 max = positive_overflow_infinity (expr_type);
2611 }
e76340be
EB
2612
2613 /* If the result lower bound is constant, we're done;
2614 otherwise, build the symbolic lower bound. */
2615 if (sym_min_op0 == sym_min_op1)
2616 ;
2617 else if (sym_min_op0)
2618 min = build_symbolic_expr (expr_type, sym_min_op0,
2619 neg_min_op0, min);
2620 else if (sym_min_op1)
2621 min = build_symbolic_expr (expr_type, sym_min_op1,
2622 neg_min_op1 ^ minus_p, min);
2623
2624 /* Likewise for the upper bound. */
2625 if (sym_max_op0 == sym_max_op1)
2626 ;
2627 else if (sym_max_op0)
2628 max = build_symbolic_expr (expr_type, sym_max_op0,
2629 neg_max_op0, max);
2630 else if (sym_max_op1)
2631 max = build_symbolic_expr (expr_type, sym_max_op1,
2632 neg_max_op1 ^ minus_p, max);
a75f5017
RG
2633 }
2634 else
567fb660 2635 {
a75f5017
RG
2636 /* For other cases, for example if we have a PLUS_EXPR with two
2637 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2638 to compute a precise range for such a case.
2639 ??? General even mixed range kind operations can be expressed
2640 by for example transforming ~[3, 5] + [1, 2] to range-only
2641 operations and a union primitive:
2642 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2643 [-INF+1, 4] U [6, +INF(OVF)]
2644 though usually the union is not exactly representable with
2645 a single range or anti-range as the above is
2646 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2647 but one could use a scheme similar to equivalences for this. */
a1bc7628
RG
2648 set_value_range_to_varying (vr);
2649 return;
567fb660 2650 }
0bca51f0 2651 }
a1bc7628
RG
2652 else if (code == MIN_EXPR
2653 || code == MAX_EXPR)
2654 {
83ede847
RB
2655 if (vr0.type == VR_RANGE
2656 && !symbolic_range_p (&vr0))
2657 {
2658 type = VR_RANGE;
2659 if (vr1.type == VR_RANGE
2660 && !symbolic_range_p (&vr1))
2661 {
2662 /* For operations that make the resulting range directly
2663 proportional to the original ranges, apply the operation to
2664 the same end of each range. */
2665 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2666 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2667 }
2668 else if (code == MIN_EXPR)
2669 {
2670 min = vrp_val_min (expr_type);
2671 max = vr0.max;
2672 }
2673 else if (code == MAX_EXPR)
2674 {
2675 min = vr0.min;
2676 max = vrp_val_max (expr_type);
2677 }
2678 }
2679 else if (vr1.type == VR_RANGE
2680 && !symbolic_range_p (&vr1))
a1bc7628 2681 {
83ede847
RB
2682 type = VR_RANGE;
2683 if (code == MIN_EXPR)
2684 {
2685 min = vrp_val_min (expr_type);
2686 max = vr1.max;
2687 }
2688 else if (code == MAX_EXPR)
2689 {
2690 min = vr1.min;
2691 max = vrp_val_max (expr_type);
2692 }
a1bc7628
RG
2693 }
2694 else
2695 {
83ede847
RB
2696 set_value_range_to_varying (vr);
2697 return;
a1bc7628
RG
2698 }
2699 }
2700 else if (code == MULT_EXPR)
0bca51f0 2701 {
4e7c4b73 2702 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
807e902e
KZ
2703 drop to varying. This test requires 2*prec bits if both
2704 operands are signed and 2*prec + 2 bits if either is not. */
2705
2706 signop sign = TYPE_SIGN (expr_type);
2707 unsigned int prec = TYPE_PRECISION (expr_type);
2708
4e7c4b73
MG
2709 if (range_int_cst_p (&vr0)
2710 && range_int_cst_p (&vr1)
2711 && TYPE_OVERFLOW_WRAPS (expr_type))
2712 {
807e902e
KZ
2713 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2714 typedef generic_wide_int
2715 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2716 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2717 vrp_int size = sizem1 + 1;
2718
2719 /* Extend the values using the sign of the result to PREC2.
2720 From here on out, everthing is just signed math no matter
2721 what the input types were. */
2722 vrp_int min0 = vrp_int_cst (vr0.min);
2723 vrp_int max0 = vrp_int_cst (vr0.max);
2724 vrp_int min1 = vrp_int_cst (vr1.min);
2725 vrp_int max1 = vrp_int_cst (vr1.max);
4e7c4b73 2726 /* Canonicalize the intervals. */
807e902e 2727 if (sign == UNSIGNED)
4e7c4b73 2728 {
807e902e 2729 if (wi::ltu_p (size, min0 + max0))
4e7c4b73 2730 {
807e902e 2731 min0 -= size;
27bcd47c 2732 max0 -= size;
4e7c4b73
MG
2733 }
2734
807e902e 2735 if (wi::ltu_p (size, min1 + max1))
4e7c4b73 2736 {
807e902e 2737 min1 -= size;
27bcd47c 2738 max1 -= size;
4e7c4b73
MG
2739 }
2740 }
4e7c4b73 2741
807e902e
KZ
2742 vrp_int prod0 = min0 * min1;
2743 vrp_int prod1 = min0 * max1;
2744 vrp_int prod2 = max0 * min1;
2745 vrp_int prod3 = max0 * max1;
2746
2747 /* Sort the 4 products so that min is in prod0 and max is in
2748 prod3. */
2749 /* min0min1 > max0max1 */
032c80e9 2750 if (prod0 > prod3)
6b4db501 2751 std::swap (prod0, prod3);
807e902e
KZ
2752
2753 /* min0max1 > max0min1 */
032c80e9 2754 if (prod1 > prod2)
6b4db501 2755 std::swap (prod1, prod2);
4e7c4b73 2756
032c80e9 2757 if (prod0 > prod1)
6b4db501 2758 std::swap (prod0, prod1);
807e902e 2759
032c80e9 2760 if (prod2 > prod3)
6b4db501 2761 std::swap (prod2, prod3);
807e902e
KZ
2762
2763 /* diff = max - min. */
2764 prod2 = prod3 - prod0;
2765 if (wi::geu_p (prod2, sizem1))
4e7c4b73
MG
2766 {
2767 /* the range covers all values. */
2768 set_value_range_to_varying (vr);
2769 return;
2770 }
2771
2772 /* The following should handle the wrapping and selecting
2773 VR_ANTI_RANGE for us. */
807e902e
KZ
2774 min = wide_int_to_tree (expr_type, prod0);
2775 max = wide_int_to_tree (expr_type, prod3);
4e7c4b73
MG
2776 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2777 return;
2778 }
2779
567fb660
KH
2780 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2781 drop to VR_VARYING. It would take more effort to compute a
2782 precise range for such a case. For example, if we have
2783 op0 == 65536 and op1 == 65536 with their ranges both being
2784 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2785 we cannot claim that the product is in ~[0,0]. Note that we
2786 are guaranteed to have vr0.type == vr1.type at this
2787 point. */
a1bc7628 2788 if (vr0.type == VR_ANTI_RANGE
4d320da4 2789 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
567fb660
KH
2790 {
2791 set_value_range_to_varying (vr);
2792 return;
2793 }
2794
a1bc7628
RG
2795 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2796 return;
2797 }
a2872983
RG
2798 else if (code == RSHIFT_EXPR
2799 || code == LSHIFT_EXPR)
a1bc7628 2800 {
af33044f
RH
2801 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2802 then drop to VR_VARYING. Outside of this range we get undefined
7fa7289d 2803 behavior from the shift operation. We cannot even trust
af33044f
RH
2804 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2805 shifts, and the operation at the tree level may be widened. */
a2872983
RG
2806 if (range_int_cst_p (&vr1)
2807 && compare_tree_int (vr1.min, 0) >= 0
2808 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
13338552 2809 {
a2872983
RG
2810 if (code == RSHIFT_EXPR)
2811 {
4c57980f
JJ
2812 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2813 useful ranges just from the shift count. E.g.
2814 x >> 63 for signed 64-bit x is always [-1, 0]. */
2815 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2816 {
2817 vr0.type = type = VR_RANGE;
2818 vr0.min = vrp_val_min (expr_type);
2819 vr0.max = vrp_val_max (expr_type);
2820 }
a2872983
RG
2821 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2822 return;
2823 }
2824 /* We can map lshifts by constants to MULT_EXPR handling. */
2825 else if (code == LSHIFT_EXPR
2826 && range_int_cst_singleton_p (&vr1))
2827 {
2828 bool saved_flag_wrapv;
526ceb68 2829 value_range vr1p = VR_INITIALIZER;
a2872983 2830 vr1p.type = VR_RANGE;
807e902e
KZ
2831 vr1p.min = (wide_int_to_tree
2832 (expr_type,
2833 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2834 TYPE_PRECISION (expr_type))));
a2872983
RG
2835 vr1p.max = vr1p.min;
2836 /* We have to use a wrapping multiply though as signed overflow
2837 on lshifts is implementation defined in C89. */
2838 saved_flag_wrapv = flag_wrapv;
2839 flag_wrapv = 1;
2840 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2841 &vr0, &vr1p);
2842 flag_wrapv = saved_flag_wrapv;
2843 return;
2844 }
25722436
TV
2845 else if (code == LSHIFT_EXPR
2846 && range_int_cst_p (&vr0))
2847 {
b25d9e22
TV
2848 int prec = TYPE_PRECISION (expr_type);
2849 int overflow_pos = prec;
25722436 2850 int bound_shift;
807e902e 2851 wide_int low_bound, high_bound;
b25d9e22
TV
2852 bool uns = TYPE_UNSIGNED (expr_type);
2853 bool in_bounds = false;
25722436 2854
b25d9e22 2855 if (!uns)
25722436
TV
2856 overflow_pos -= 1;
2857
807e902e
KZ
2858 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2859 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
b25d9e22
TV
2860 overflow. However, for that to happen, vr1.max needs to be
2861 zero, which means vr1 is a singleton range of zero, which
2862 means it should be handled by the previous LSHIFT_EXPR
2863 if-clause. */
807e902e
KZ
2864 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2865 wide_int complement = ~(bound - 1);
b25d9e22
TV
2866
2867 if (uns)
2868 {
807e902e
KZ
2869 low_bound = bound;
2870 high_bound = complement;
2871 if (wi::ltu_p (vr0.max, low_bound))
b25d9e22
TV
2872 {
2873 /* [5, 6] << [1, 2] == [10, 24]. */
2874 /* We're shifting out only zeroes, the value increases
2875 monotonically. */
2876 in_bounds = true;
2877 }
807e902e 2878 else if (wi::ltu_p (high_bound, vr0.min))
b25d9e22
TV
2879 {
2880 /* [0xffffff00, 0xffffffff] << [1, 2]
2881 == [0xfffffc00, 0xfffffffe]. */
2882 /* We're shifting out only ones, the value decreases
2883 monotonically. */
2884 in_bounds = true;
2885 }
2886 }
2887 else
2888 {
2889 /* [-1, 1] << [1, 2] == [-4, 4]. */
807e902e 2890 low_bound = complement;
b25d9e22 2891 high_bound = bound;
807e902e
KZ
2892 if (wi::lts_p (vr0.max, high_bound)
2893 && wi::lts_p (low_bound, vr0.min))
b25d9e22
TV
2894 {
2895 /* For non-negative numbers, we're shifting out only
2896 zeroes, the value increases monotonically.
2897 For negative numbers, we're shifting out only ones, the
2898 value decreases monotomically. */
2899 in_bounds = true;
2900 }
2901 }
2902
2903 if (in_bounds)
25722436 2904 {
25722436
TV
2905 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2906 return;
2907 }
2908 }
8c1f1d42 2909 }
8c1f1d42
RG
2910 set_value_range_to_varying (vr);
2911 return;
2912 }
a1bc7628
RG
2913 else if (code == TRUNC_DIV_EXPR
2914 || code == FLOOR_DIV_EXPR
2915 || code == CEIL_DIV_EXPR
2916 || code == EXACT_DIV_EXPR
2917 || code == ROUND_DIV_EXPR)
2918 {
2919 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
193a3681
JJ
2920 {
2921 /* For division, if op1 has VR_RANGE but op0 does not, something
2922 can be deduced just from that range. Say [min, max] / [4, max]
2923 gives [min / 4, max / 4] range. */
2924 if (vr1.type == VR_RANGE
2925 && !symbolic_range_p (&vr1)
e8f808b3 2926 && range_includes_zero_p (vr1.min, vr1.max) == 0)
193a3681
JJ
2927 {
2928 vr0.type = type = VR_RANGE;
4d320da4
RG
2929 vr0.min = vrp_val_min (expr_type);
2930 vr0.max = vrp_val_max (expr_type);
193a3681
JJ
2931 }
2932 else
2933 {
2934 set_value_range_to_varying (vr);
2935 return;
2936 }
2937 }
2938
0e1b8b10
ILT
2939 /* For divisions, if flag_non_call_exceptions is true, we must
2940 not eliminate a division by zero. */
a1bc7628 2941 if (cfun->can_throw_non_call_exceptions
0e1b8b10 2942 && (vr1.type != VR_RANGE
e8f808b3 2943 || range_includes_zero_p (vr1.min, vr1.max) != 0))
0e1b8b10
ILT
2944 {
2945 set_value_range_to_varying (vr);
2946 return;
2947 }
2948
193a3681
JJ
2949 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2950 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2951 include 0. */
a1bc7628 2952 if (vr0.type == VR_RANGE
193a3681 2953 && (vr1.type != VR_RANGE
e8f808b3 2954 || range_includes_zero_p (vr1.min, vr1.max) != 0))
193a3681
JJ
2955 {
2956 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2957 int cmp;
2958
193a3681
JJ
2959 min = NULL_TREE;
2960 max = NULL_TREE;
4d320da4
RG
2961 if (TYPE_UNSIGNED (expr_type)
2962 || value_range_nonnegative_p (&vr1))
193a3681
JJ
2963 {
2964 /* For unsigned division or when divisor is known
2965 to be non-negative, the range has to cover
2966 all numbers from 0 to max for positive max
2967 and all numbers from min to 0 for negative min. */
2968 cmp = compare_values (vr0.max, zero);
2969 if (cmp == -1)
c44b2a4f
KV
2970 {
2971 /* When vr0.max < 0, vr1.min != 0 and value
2972 ranges for dividend and divisor are available. */
2973 if (vr1.type == VR_RANGE
2974 && !symbolic_range_p (&vr0)
2975 && !symbolic_range_p (&vr1)
41bfbbb6 2976 && compare_values (vr1.min, zero) != 0)
c44b2a4f
KV
2977 max = int_const_binop (code, vr0.max, vr1.min);
2978 else
2979 max = zero;
2980 }
193a3681
JJ
2981 else if (cmp == 0 || cmp == 1)
2982 max = vr0.max;
2983 else
2984 type = VR_VARYING;
2985 cmp = compare_values (vr0.min, zero);
2986 if (cmp == 1)
c44b2a4f
KV
2987 {
2988 /* For unsigned division when value ranges for dividend
2989 and divisor are available. */
2990 if (vr1.type == VR_RANGE
2991 && !symbolic_range_p (&vr0)
90b72e84
RB
2992 && !symbolic_range_p (&vr1)
2993 && compare_values (vr1.max, zero) != 0)
c44b2a4f
KV
2994 min = int_const_binop (code, vr0.min, vr1.max);
2995 else
2996 min = zero;
2997 }
193a3681
JJ
2998 else if (cmp == 0 || cmp == -1)
2999 min = vr0.min;
3000 else
3001 type = VR_VARYING;
3002 }
3003 else
3004 {
3005 /* Otherwise the range is -max .. max or min .. -min
3006 depending on which bound is bigger in absolute value,
3007 as the division can change the sign. */
3008 abs_extent_range (vr, vr0.min, vr0.max);
3009 return;
3010 }
3011 if (type == VR_VARYING)
3012 {
3013 set_value_range_to_varying (vr);
3014 return;
3015 }
3016 }
78275c8b 3017 else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
12df8a7e 3018 {
a1bc7628
RG
3019 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
3020 return;
227858d1
DN
3021 }
3022 }
622d360e 3023 else if (code == TRUNC_MOD_EXPR)
bab4d587 3024 {
441898b2 3025 if (range_is_null (&vr1))
bab4d587 3026 {
441898b2 3027 set_value_range_to_undefined (vr);
bab4d587
RG
3028 return;
3029 }
441898b2
MG
3030 /* ABS (A % B) < ABS (B) and either
3031 0 <= A % B <= A or A <= A % B <= 0. */
bab4d587 3032 type = VR_RANGE;
441898b2
MG
3033 signop sgn = TYPE_SIGN (expr_type);
3034 unsigned int prec = TYPE_PRECISION (expr_type);
3035 wide_int wmin, wmax, tmp;
3036 wide_int zero = wi::zero (prec);
3037 wide_int one = wi::one (prec);
3038 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
3039 {
3040 wmax = wi::sub (vr1.max, one);
3041 if (sgn == SIGNED)
3042 {
3043 tmp = wi::sub (wi::minus_one (prec), vr1.min);
3044 wmax = wi::smax (wmax, tmp);
3045 }
3046 }
3047 else
3048 {
3049 wmax = wi::max_value (prec, sgn);
3050 /* X % INT_MIN may be INT_MAX. */
3051 if (sgn == UNSIGNED)
3052 wmax = wmax - one;
3053 }
3054
3055 if (sgn == UNSIGNED)
3056 wmin = zero;
bab4d587 3057 else
441898b2
MG
3058 {
3059 wmin = -wmax;
3060 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3061 {
3062 tmp = vr0.min;
3063 if (wi::gts_p (tmp, zero))
3064 tmp = zero;
3065 wmin = wi::smax (wmin, tmp);
3066 }
3067 }
3068
3069 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3070 {
3071 tmp = vr0.max;
3072 if (sgn == SIGNED && wi::neg_p (tmp))
3073 tmp = zero;
3074 wmax = wi::min (wmax, tmp, sgn);
3075 }
3076
3077 min = wide_int_to_tree (expr_type, wmin);
3078 max = wide_int_to_tree (expr_type, wmax);
bab4d587 3079 }
0f36b2da 3080 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
29c8f8c2 3081 {
85e693aa 3082 bool int_cst_range0, int_cst_range1;
807e902e
KZ
3083 wide_int may_be_nonzero0, may_be_nonzero1;
3084 wide_int must_be_nonzero0, must_be_nonzero1;
330af32c 3085
807e902e
KZ
3086 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3087 &may_be_nonzero0,
85e693aa 3088 &must_be_nonzero0);
807e902e
KZ
3089 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3090 &may_be_nonzero1,
85e693aa 3091 &must_be_nonzero1);
330af32c 3092
85e693aa 3093 type = VR_RANGE;
8b201bc5 3094 if (code == BIT_AND_EXPR)
ac285648 3095 {
807e902e
KZ
3096 min = wide_int_to_tree (expr_type,
3097 must_be_nonzero0 & must_be_nonzero1);
3098 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
4001900f
RG
3099 /* If both input ranges contain only negative values we can
3100 truncate the result range maximum to the minimum of the
3101 input range maxima. */
3102 if (int_cst_range0 && int_cst_range1
3103 && tree_int_cst_sgn (vr0.max) < 0
3104 && tree_int_cst_sgn (vr1.max) < 0)
ac285648 3105 {
807e902e
KZ
3106 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3107 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
ac285648 3108 }
4001900f
RG
3109 /* If either input range contains only non-negative values
3110 we can truncate the result range maximum to the respective
3111 maximum of the input range. */
3112 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
807e902e 3113 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
85e693aa 3114 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
807e902e
KZ
3115 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3116 max = wide_int_to_tree (expr_type, wmax);
883b6d4e
KV
3117 cmp = compare_values (min, max);
3118 /* PR68217: In case of signed & sign-bit-CST should
3119 result in [-INF, 0] instead of [-INF, INF]. */
3120 if (cmp == -2 || cmp == 1)
3121 {
3122 wide_int sign_bit
3123 = wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
3124 TYPE_PRECISION (expr_type));
3125 if (!TYPE_UNSIGNED (expr_type)
3126 && ((value_range_constant_singleton (&vr0)
3127 && !wi::cmps (vr0.min, sign_bit))
3128 || (value_range_constant_singleton (&vr1)
3129 && !wi::cmps (vr1.min, sign_bit))))
3130 {
3131 min = TYPE_MIN_VALUE (expr_type);
3132 max = build_int_cst (expr_type, 0);
3133 }
3134 }
29c8f8c2 3135 }
8b201bc5 3136 else if (code == BIT_IOR_EXPR)
30821654 3137 {
807e902e
KZ
3138 max = wide_int_to_tree (expr_type,
3139 may_be_nonzero0 | may_be_nonzero1);
3140 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
4001900f
RG
3141 /* If the input ranges contain only positive values we can
3142 truncate the minimum of the result range to the maximum
3143 of the input range minima. */
3144 if (int_cst_range0 && int_cst_range1
3145 && tree_int_cst_sgn (vr0.min) >= 0
3146 && tree_int_cst_sgn (vr1.min) >= 0)
8b201bc5 3147 {
807e902e
KZ
3148 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3149 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
8b201bc5 3150 }
4001900f
RG
3151 /* If either input range contains only negative values
3152 we can truncate the minimum of the result range to the
3153 respective minimum range. */
3154 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
807e902e 3155 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
4001900f 3156 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
807e902e
KZ
3157 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3158 min = wide_int_to_tree (expr_type, wmin);
8b201bc5 3159 }
0f36b2da
RG
3160 else if (code == BIT_XOR_EXPR)
3161 {
807e902e
KZ
3162 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3163 | ~(may_be_nonzero0 | may_be_nonzero1));
3164 wide_int result_one_bits
3165 = (must_be_nonzero0.and_not (may_be_nonzero1)
3166 | must_be_nonzero1.and_not (may_be_nonzero0));
3167 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3168 min = wide_int_to_tree (expr_type, result_one_bits);
4001900f
RG
3169 /* If the range has all positive or all negative values the
3170 result is better than VARYING. */
3171 if (tree_int_cst_sgn (min) < 0
3172 || tree_int_cst_sgn (max) >= 0)
3173 ;
0f36b2da 3174 else
0f36b2da
RG
3175 max = min = NULL_TREE;
3176 }
30821654 3177 }
227858d1
DN
3178 else
3179 gcc_unreachable ();
fda05890 3180
9983270b 3181 /* If either MIN or MAX overflowed, then set the resulting range to
e76340be 3182 VARYING. But we do accept an overflow infinity representation. */
12df8a7e 3183 if (min == NULL_TREE
e76340be 3184 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
12df8a7e 3185 || max == NULL_TREE
e76340be 3186 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
12df8a7e
ILT
3187 {
3188 set_value_range_to_varying (vr);
3189 return;
3190 }
3191
fa633851
ILT
3192 /* We punt if:
3193 1) [-INF, +INF]
3194 2) [-INF, +-INF(OVF)]
3195 3) [+-INF(OVF), +INF]
3196 4) [+-INF(OVF), +-INF(OVF)]
3197 We learn nothing when we have INF and INF(OVF) on both sides.
3198 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3199 overflow. */
e1f28918
ILT
3200 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3201 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
227858d1 3202 {
9983270b
DN
3203 set_value_range_to_varying (vr);
3204 return;
fda05890
KH
3205 }
3206
227858d1
DN
3207 cmp = compare_values (min, max);
3208 if (cmp == -2 || cmp == 1)
3209 {
3210 /* If the new range has its limits swapped around (MIN > MAX),
3211 then the operation caused one of them to wrap around, mark
3212 the new range VARYING. */
3213 set_value_range_to_varying (vr);
3214 }
3215 else
4e2d94a9 3216 set_value_range (vr, type, min, max, NULL);
fda05890
KH
3217}
3218
4d320da4
RG
3219/* Extract range information from a binary expression OP0 CODE OP1 based on
3220 the ranges of each of its operands with resulting type EXPR_TYPE.
3221 The resulting range is stored in *VR. */
3222
3223static void
526ceb68 3224extract_range_from_binary_expr (value_range *vr,
4d320da4
RG
3225 enum tree_code code,
3226 tree expr_type, tree op0, tree op1)
3227{
526ceb68
TS
3228 value_range vr0 = VR_INITIALIZER;
3229 value_range vr1 = VR_INITIALIZER;
4d320da4
RG
3230
3231 /* Get value ranges for each operand. For constant operands, create
3232 a new value range with the operand to simplify processing. */
3233 if (TREE_CODE (op0) == SSA_NAME)
3234 vr0 = *(get_value_range (op0));
3235 else if (is_gimple_min_invariant (op0))
3236 set_value_range_to_value (&vr0, op0, NULL);
3237 else
3238 set_value_range_to_varying (&vr0);
3239
3240 if (TREE_CODE (op1) == SSA_NAME)
3241 vr1 = *(get_value_range (op1));
3242 else if (is_gimple_min_invariant (op1))
3243 set_value_range_to_value (&vr1, op1, NULL);
3244 else
3245 set_value_range_to_varying (&vr1);
3246
3247 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
e76340be
EB
3248
3249 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3250 and based on the other operand, for example if it was deduced from a
3251 symbolic comparison. When a bound of the range of the first operand
3252 is invariant, we set the corresponding bound of the new range to INF
3253 in order to avoid recursing on the range of the second operand. */
3254 if (vr->type == VR_VARYING
3255 && (code == PLUS_EXPR || code == MINUS_EXPR)
3256 && TREE_CODE (op1) == SSA_NAME
3257 && vr0.type == VR_RANGE
3258 && symbolic_range_based_on_p (&vr0, op1))
3259 {
3260 const bool minus_p = (code == MINUS_EXPR);
526ceb68 3261 value_range n_vr1 = VR_INITIALIZER;
e76340be
EB
3262
3263 /* Try with VR0 and [-INF, OP1]. */
3264 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3265 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3266
3267 /* Try with VR0 and [OP1, +INF]. */
3268 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3269 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3270
3271 /* Try with VR0 and [OP1, OP1]. */
3272 else
3273 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3274
3275 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3276 }
3277
3278 if (vr->type == VR_VARYING
3279 && (code == PLUS_EXPR || code == MINUS_EXPR)
3280 && TREE_CODE (op0) == SSA_NAME
3281 && vr1.type == VR_RANGE
3282 && symbolic_range_based_on_p (&vr1, op0))
3283 {
3284 const bool minus_p = (code == MINUS_EXPR);
526ceb68 3285 value_range n_vr0 = VR_INITIALIZER;
e76340be
EB
3286
3287 /* Try with [-INF, OP0] and VR1. */
3288 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3289 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3290
3291 /* Try with [OP0, +INF] and VR1. */
3292 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3293 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3294
3295 /* Try with [OP0, OP0] and VR1. */
3296 else
3297 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3298
3299 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3300 }
4d320da4 3301}
fda05890 3302
ce6bfa50
RG
3303/* Extract range information from a unary operation CODE based on
3304 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
026c3cfd 3305 The resulting range is stored in *VR. */
0bca51f0 3306
3a4228ba
KV
3307void
3308extract_range_from_unary_expr (value_range *vr,
3309 enum tree_code code, tree type,
3310 value_range *vr0_, tree op0_type)
0bca51f0 3311{
526ceb68 3312 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
227858d1 3313
1a0fcfa9
RG
3314 /* VRP only operates on integral and pointer types. */
3315 if (!(INTEGRAL_TYPE_P (op0_type)
3316 || POINTER_TYPE_P (op0_type))
3317 || !(INTEGRAL_TYPE_P (type)
3318 || POINTER_TYPE_P (type)))
227858d1
DN
3319 {
3320 set_value_range_to_varying (vr);
3321 return;
3322 }
0bca51f0 3323
1a0fcfa9
RG
3324 /* If VR0 is UNDEFINED, so is the result. */
3325 if (vr0.type == VR_UNDEFINED)
0bca51f0 3326 {
1a0fcfa9 3327 set_value_range_to_undefined (vr);
0bca51f0
DN
3328 return;
3329 }
3330
3c9c79e8 3331 /* Handle operations that we express in terms of others. */
a4fff37a 3332 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3c9c79e8 3333 {
a4fff37a 3334 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3c9c79e8
RG
3335 copy_value_range (vr, &vr0);
3336 return;
3337 }
3338 else if (code == NEGATE_EXPR)
3339 {
3340 /* -X is simply 0 - X, so re-use existing code that also handles
3341 anti-ranges fine. */
526ceb68 3342 value_range zero = VR_INITIALIZER;
3c9c79e8
RG
3343 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3344 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3345 return;
3346 }
3347 else if (code == BIT_NOT_EXPR)
3348 {
3349 /* ~X is simply -1 - X, so re-use existing code that also handles
3350 anti-ranges fine. */
526ceb68 3351 value_range minusone = VR_INITIALIZER;
3c9c79e8
RG
3352 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3353 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3354 type, &minusone, &vr0);
3355 return;
3356 }
3357
3358 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3359 and express op ~[] as (op []') U (op []''). */
3360 if (vr0.type == VR_ANTI_RANGE
3361 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3362 {
3a4228ba 3363 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
3c9c79e8
RG
3364 if (vrtem1.type != VR_UNDEFINED)
3365 {
526ceb68 3366 value_range vrres = VR_INITIALIZER;
3a4228ba
KV
3367 extract_range_from_unary_expr (&vrres, code, type,
3368 &vrtem1, op0_type);
3c9c79e8
RG
3369 vrp_meet (vr, &vrres);
3370 }
3371 return;
3372 }
3373
1a0fcfa9 3374 if (CONVERT_EXPR_CODE_P (code))
0bca51f0 3375 {
ce6bfa50 3376 tree inner_type = op0_type;
2d3cd5d5 3377 tree outer_type = type;
441e96b5 3378
1a0fcfa9
RG
3379 /* If the expression evaluates to a pointer, we are only interested in
3380 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3381 if (POINTER_TYPE_P (type))
3382 {
7d5a0f1b
RG
3383 if (range_is_nonnull (&vr0))
3384 set_value_range_to_nonnull (vr, type);
3385 else if (range_is_null (&vr0))
3386 set_value_range_to_null (vr, type);
1a0fcfa9
RG
3387 else
3388 set_value_range_to_varying (vr);
3389 return;
3390 }
3391
b47ee386
RG
3392 /* If VR0 is varying and we increase the type precision, assume
3393 a full range for the following transformation. */
3394 if (vr0.type == VR_VARYING
1a0fcfa9 3395 && INTEGRAL_TYPE_P (inner_type)
b47ee386 3396 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2735e93e 3397 {
b47ee386
RG
3398 vr0.type = VR_RANGE;
3399 vr0.min = TYPE_MIN_VALUE (inner_type);
3400 vr0.max = TYPE_MAX_VALUE (inner_type);
2735e93e
JL
3401 }
3402
b47ee386
RG
3403 /* If VR0 is a constant range or anti-range and the conversion is
3404 not truncating we can convert the min and max values and
3405 canonicalize the resulting range. Otherwise we can do the
3406 conversion if the size of the range is less than what the
3407 precision of the target type can represent and the range is
3408 not an anti-range. */
3409 if ((vr0.type == VR_RANGE
3410 || vr0.type == VR_ANTI_RANGE)
3411 && TREE_CODE (vr0.min) == INTEGER_CST
3412 && TREE_CODE (vr0.max) == INTEGER_CST
56186ac2
RG
3413 && (!is_overflow_infinity (vr0.min)
3414 || (vr0.type == VR_RANGE
3415 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3416 && needs_overflow_infinity (outer_type)
3417 && supports_overflow_infinity (outer_type)))
3418 && (!is_overflow_infinity (vr0.max)
3419 || (vr0.type == VR_RANGE
3420 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3421 && needs_overflow_infinity (outer_type)
3422 && supports_overflow_infinity (outer_type)))
b47ee386
RG
3423 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3424 || (vr0.type == VR_RANGE
3425 && integer_zerop (int_const_binop (RSHIFT_EXPR,
d35936ab
RG
3426 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3427 size_int (TYPE_PRECISION (outer_type)))))))
441e96b5 3428 {
b47ee386 3429 tree new_min, new_max;
56186ac2
RG
3430 if (is_overflow_infinity (vr0.min))
3431 new_min = negative_overflow_infinity (outer_type);
629c2cca 3432 else
807e902e
KZ
3433 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3434 0, false);
56186ac2
RG
3435 if (is_overflow_infinity (vr0.max))
3436 new_max = positive_overflow_infinity (outer_type);
629c2cca 3437 else
807e902e
KZ
3438 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3439 0, false);
b47ee386
RG
3440 set_and_canonicalize_value_range (vr, vr0.type,
3441 new_min, new_max, NULL);
441e96b5
DN
3442 return;
3443 }
b47ee386
RG
3444
3445 set_value_range_to_varying (vr);
3446 return;
0bca51f0 3447 }
1a0fcfa9 3448 else if (code == ABS_EXPR)
227858d1 3449 {
1a0fcfa9
RG
3450 tree min, max;
3451 int cmp;
3452
3453 /* Pass through vr0 in the easy cases. */
3454 if (TYPE_UNSIGNED (type)
3455 || value_range_nonnegative_p (&vr0))
3456 {
3457 copy_value_range (vr, &vr0);
3458 return;
3459 }
3460
3461 /* For the remaining varying or symbolic ranges we can't do anything
3462 useful. */
3463 if (vr0.type == VR_VARYING
3464 || symbolic_range_p (&vr0))
3465 {
3466 set_value_range_to_varying (vr);
3467 return;
3468 }
3469
ff08cbee
JM
3470 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3471 useful range. */
2d3cd5d5 3472 if (!TYPE_OVERFLOW_UNDEFINED (type)
ff08cbee 3473 && ((vr0.type == VR_RANGE
e1f28918 3474 && vrp_val_is_min (vr0.min))
ff08cbee 3475 || (vr0.type == VR_ANTI_RANGE
1a0fcfa9 3476 && !vrp_val_is_min (vr0.min))))
ff08cbee
JM
3477 {
3478 set_value_range_to_varying (vr);
3479 return;
3480 }
b8698a0f 3481
227858d1
DN
3482 /* ABS_EXPR may flip the range around, if the original range
3483 included negative values. */
12df8a7e 3484 if (is_overflow_infinity (vr0.min))
2d3cd5d5 3485 min = positive_overflow_infinity (type);
e1f28918 3486 else if (!vrp_val_is_min (vr0.min))
2d3cd5d5
RAE
3487 min = fold_unary_to_constant (code, type, vr0.min);
3488 else if (!needs_overflow_infinity (type))
3489 min = TYPE_MAX_VALUE (type);
3490 else if (supports_overflow_infinity (type))
3491 min = positive_overflow_infinity (type);
12df8a7e
ILT
3492 else
3493 {
3494 set_value_range_to_varying (vr);
3495 return;
3496 }
227858d1 3497
12df8a7e 3498 if (is_overflow_infinity (vr0.max))
2d3cd5d5 3499 max = positive_overflow_infinity (type);
e1f28918 3500 else if (!vrp_val_is_min (vr0.max))
2d3cd5d5
RAE
3501 max = fold_unary_to_constant (code, type, vr0.max);
3502 else if (!needs_overflow_infinity (type))
3503 max = TYPE_MAX_VALUE (type);
d3cbd7de
RG
3504 else if (supports_overflow_infinity (type)
3505 /* We shouldn't generate [+INF, +INF] as set_value_range
3506 doesn't like this and ICEs. */
3507 && !is_positive_overflow_infinity (min))
2d3cd5d5 3508 max = positive_overflow_infinity (type);
12df8a7e
ILT
3509 else
3510 {
3511 set_value_range_to_varying (vr);
3512 return;
3513 }
227858d1 3514
ff08cbee
JM
3515 cmp = compare_values (min, max);
3516
3517 /* If a VR_ANTI_RANGEs contains zero, then we have
3518 ~[-INF, min(MIN, MAX)]. */
3519 if (vr0.type == VR_ANTI_RANGE)
b8698a0f 3520 {
e8f808b3 3521 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
ff08cbee 3522 {
ff08cbee
JM
3523 /* Take the lower of the two values. */
3524 if (cmp != 1)
3525 max = min;
3526
3527 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3528 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3529 flag_wrapv is set and the original anti-range doesn't include
3530 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2d3cd5d5 3531 if (TYPE_OVERFLOW_WRAPS (type))
12df8a7e 3532 {
2d3cd5d5 3533 tree type_min_value = TYPE_MIN_VALUE (type);
12df8a7e
ILT
3534
3535 min = (vr0.min != type_min_value
3536 ? int_const_binop (PLUS_EXPR, type_min_value,
807e902e 3537 build_int_cst (TREE_TYPE (type_min_value), 1))
12df8a7e
ILT
3538 : type_min_value);
3539 }
3540 else
3541 {
3542 if (overflow_infinity_range_p (&vr0))
2d3cd5d5 3543 min = negative_overflow_infinity (type);
12df8a7e 3544 else
2d3cd5d5 3545 min = TYPE_MIN_VALUE (type);
12df8a7e 3546 }
ff08cbee
JM
3547 }
3548 else
3549 {
3550 /* All else has failed, so create the range [0, INF], even for
3551 flag_wrapv since TYPE_MIN_VALUE is in the original
3552 anti-range. */
3553 vr0.type = VR_RANGE;
2d3cd5d5
RAE
3554 min = build_int_cst (type, 0);
3555 if (needs_overflow_infinity (type))
12df8a7e 3556 {
2d3cd5d5
RAE
3557 if (supports_overflow_infinity (type))
3558 max = positive_overflow_infinity (type);
12df8a7e
ILT
3559 else
3560 {
3561 set_value_range_to_varying (vr);
3562 return;
3563 }
3564 }
3565 else
2d3cd5d5 3566 max = TYPE_MAX_VALUE (type);
ff08cbee
JM
3567 }
3568 }
3569
3570 /* If the range contains zero then we know that the minimum value in the
3571 range will be zero. */
e8f808b3 3572 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
ff08cbee
JM
3573 {
3574 if (cmp == 1)
3575 max = min;
2d3cd5d5 3576 min = build_int_cst (type, 0);
ff08cbee
JM
3577 }
3578 else
227858d1 3579 {
ff08cbee
JM
3580 /* If the range was reversed, swap MIN and MAX. */
3581 if (cmp == 1)
6b4db501 3582 std::swap (min, max);
227858d1 3583 }
1a0fcfa9
RG
3584
3585 cmp = compare_values (min, max);
3586 if (cmp == -2 || cmp == 1)
3587 {
3588 /* If the new range has its limits swapped around (MIN > MAX),
3589 then the operation caused one of them to wrap around, mark
3590 the new range VARYING. */
3591 set_value_range_to_varying (vr);
3592 }
3593 else
3594 set_value_range (vr, vr0.type, min, max, NULL);
3595 return;
227858d1 3596 }
0bca51f0 3597
1a0fcfa9
RG
3598 /* For unhandled operations fall back to varying. */
3599 set_value_range_to_varying (vr);
3600 return;
227858d1
DN
3601}
3602
3603
ce6bfa50
RG
3604/* Extract range information from a unary expression CODE OP0 based on
3605 the range of its operand with resulting type TYPE.
3606 The resulting range is stored in *VR. */
3607
3608static void
526ceb68 3609extract_range_from_unary_expr (value_range *vr, enum tree_code code,
ce6bfa50
RG
3610 tree type, tree op0)
3611{
526ceb68 3612 value_range vr0 = VR_INITIALIZER;
ce6bfa50
RG
3613
3614 /* Get value ranges for the operand. For constant operands, create
3615 a new value range with the operand to simplify processing. */
3616 if (TREE_CODE (op0) == SSA_NAME)
3617 vr0 = *(get_value_range (op0));
3618 else if (is_gimple_min_invariant (op0))
3619 set_value_range_to_value (&vr0, op0, NULL);
3620 else
3621 set_value_range_to_varying (&vr0);
3622
3a4228ba 3623 extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
ce6bfa50
RG
3624}
3625
3626
4e71066d 3627/* Extract range information from a conditional expression STMT based on
f255541f
RC
3628 the ranges of each of its operands and the expression code. */
3629
3630static void
526ceb68 3631extract_range_from_cond_expr (value_range *vr, gassign *stmt)
f255541f
RC
3632{
3633 tree op0, op1;
526ceb68
TS
3634 value_range vr0 = VR_INITIALIZER;
3635 value_range vr1 = VR_INITIALIZER;
f255541f
RC
3636
3637 /* Get value ranges for each operand. For constant operands, create
3638 a new value range with the operand to simplify processing. */
4e71066d 3639 op0 = gimple_assign_rhs2 (stmt);
f255541f
RC
3640 if (TREE_CODE (op0) == SSA_NAME)
3641 vr0 = *(get_value_range (op0));
3642 else if (is_gimple_min_invariant (op0))
b60b4711 3643 set_value_range_to_value (&vr0, op0, NULL);
f255541f
RC
3644 else
3645 set_value_range_to_varying (&vr0);
3646
4e71066d 3647 op1 = gimple_assign_rhs3 (stmt);
f255541f
RC
3648 if (TREE_CODE (op1) == SSA_NAME)
3649 vr1 = *(get_value_range (op1));
3650 else if (is_gimple_min_invariant (op1))
b60b4711 3651 set_value_range_to_value (&vr1, op1, NULL);
f255541f
RC
3652 else
3653 set_value_range_to_varying (&vr1);
3654
3655 /* The resulting value range is the union of the operand ranges */
f255541f 3656 copy_value_range (vr, &vr0);
0d5a9e78 3657 vrp_meet (vr, &vr1);
f255541f
RC
3658}
3659
3660
227858d1
DN
3661/* Extract range information from a comparison expression EXPR based
3662 on the range of its operand and the expression code. */
3663
3664static void
526ceb68 3665extract_range_from_comparison (value_range *vr, enum tree_code code,
2d3cd5d5 3666 tree type, tree op0, tree op1)
227858d1 3667{
12df8a7e 3668 bool sop = false;
726a989a 3669 tree val;
b8698a0f 3670
6b99f156
JH
3671 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3672 NULL);
12df8a7e
ILT
3673
3674 /* A disadvantage of using a special infinity as an overflow
3675 representation is that we lose the ability to record overflow
3676 when we don't have an infinity. So we have to ignore a result
3677 which relies on overflow. */
3678
3679 if (val && !is_overflow_infinity (val) && !sop)
227858d1
DN
3680 {
3681 /* Since this expression was found on the RHS of an assignment,
3682 its type may be different from _Bool. Convert VAL to EXPR's
3683 type. */
2d3cd5d5 3684 val = fold_convert (type, val);
b60b4711
ILT
3685 if (is_gimple_min_invariant (val))
3686 set_value_range_to_value (vr, val, vr->equiv);
3687 else
3688 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
227858d1
DN
3689 }
3690 else
31ab1cc9 3691 /* The result of a comparison is always true or false. */
2d3cd5d5 3692 set_value_range_to_truthvalue (vr, type);
0bca51f0
DN
3693}
3694
1304953e
JJ
3695/* Helper function for simplify_internal_call_using_ranges and
3696 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3697 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3698 always overflow. Set *OVF to true if it is known to always
3699 overflow. */
3700
3701static bool
3702check_for_binary_op_overflow (enum tree_code subcode, tree type,
3703 tree op0, tree op1, bool *ovf)
3704{
526ceb68
TS
3705 value_range vr0 = VR_INITIALIZER;
3706 value_range vr1 = VR_INITIALIZER;
1304953e
JJ
3707 if (TREE_CODE (op0) == SSA_NAME)
3708 vr0 = *get_value_range (op0);
3709 else if (TREE_CODE (op0) == INTEGER_CST)
3710 set_value_range_to_value (&vr0, op0, NULL);
3711 else
3712 set_value_range_to_varying (&vr0);
3713
3714 if (TREE_CODE (op1) == SSA_NAME)
3715 vr1 = *get_value_range (op1);
3716 else if (TREE_CODE (op1) == INTEGER_CST)
3717 set_value_range_to_value (&vr1, op1, NULL);
3718 else
3719 set_value_range_to_varying (&vr1);
3720
3721 if (!range_int_cst_p (&vr0)
3722 || TREE_OVERFLOW (vr0.min)
3723 || TREE_OVERFLOW (vr0.max))
3724 {
3725 vr0.min = vrp_val_min (TREE_TYPE (op0));
3726 vr0.max = vrp_val_max (TREE_TYPE (op0));
3727 }
3728 if (!range_int_cst_p (&vr1)
3729 || TREE_OVERFLOW (vr1.min)
3730 || TREE_OVERFLOW (vr1.max))
3731 {
3732 vr1.min = vrp_val_min (TREE_TYPE (op1));
3733 vr1.max = vrp_val_max (TREE_TYPE (op1));
3734 }
3735 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3736 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3737 if (arith_overflowed_p (subcode, type, vr0.max,
3738 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3739 return false;
3740 if (subcode == MULT_EXPR)
3741 {
3742 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3743 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3744 return false;
3745 }
3746 if (*ovf)
3747 {
3748 /* So far we found that there is an overflow on the boundaries.
3749 That doesn't prove that there is an overflow even for all values
3750 in between the boundaries. For that compute widest_int range
3751 of the result and see if it doesn't overlap the range of
3752 type. */
3753 widest_int wmin, wmax;
3754 widest_int w[4];
3755 int i;
3756 w[0] = wi::to_widest (vr0.min);
3757 w[1] = wi::to_widest (vr0.max);
3758 w[2] = wi::to_widest (vr1.min);
3759 w[3] = wi::to_widest (vr1.max);
3760 for (i = 0; i < 4; i++)
3761 {
3762 widest_int wt;
3763 switch (subcode)
3764 {
3765 case PLUS_EXPR:
3766 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3767 break;
3768 case MINUS_EXPR:
3769 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3770 break;
3771 case MULT_EXPR:
3772 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3773 break;
3774 default:
3775 gcc_unreachable ();
3776 }
3777 if (i == 0)
3778 {
3779 wmin = wt;
3780 wmax = wt;
3781 }
3782 else
3783 {
3784 wmin = wi::smin (wmin, wt);
3785 wmax = wi::smax (wmax, wt);
3786 }
3787 }
3788 /* The result of op0 CODE op1 is known to be in range
3789 [wmin, wmax]. */
3790 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3791 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3792 /* If all values in [wmin, wmax] are smaller than
3793 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3794 the arithmetic operation will always overflow. */
032c80e9 3795 if (wmax < wtmin || wmin > wtmax)
1304953e
JJ
3796 return true;
3797 return false;
3798 }
3799 return true;
3800}
3801
726a989a
RB
3802/* Try to derive a nonnegative or nonzero range out of STMT relying
3803 primarily on generic routines in fold in conjunction with range data.
3804 Store the result in *VR */
0bca51f0 3805
726a989a 3806static void
526ceb68 3807extract_range_basic (value_range *vr, gimple *stmt)
726a989a
RB
3808{
3809 bool sop = false;
3810 tree type = gimple_expr_type (stmt);
3811
9c0a9e12 3812 if (is_gimple_call (stmt))
1f6eac90 3813 {
9c0a9e12 3814 tree arg;
1f6eac90 3815 int mini, maxi, zerov = 0, prec;
9c0a9e12
RS
3816 enum tree_code subcode = ERROR_MARK;
3817 combined_fn cfn = gimple_call_combined_fn (stmt);
1f6eac90 3818
9c0a9e12 3819 switch (cfn)
1f6eac90 3820 {
9c0a9e12 3821 case CFN_BUILT_IN_CONSTANT_P:
1f6eac90
JJ
3822 /* If the call is __builtin_constant_p and the argument is a
3823 function parameter resolve it to false. This avoids bogus
3824 array bound warnings.
3825 ??? We could do this as early as inlining is finished. */
3826 arg = gimple_call_arg (stmt, 0);
3827 if (TREE_CODE (arg) == SSA_NAME
3828 && SSA_NAME_IS_DEFAULT_DEF (arg)
343092cf
KV
3829 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
3830 && cfun->after_inlining)
1f6eac90
JJ
3831 {
3832 set_value_range_to_null (vr, type);
3833 return;
3834 }
3835 break;
3836 /* Both __builtin_ffs* and __builtin_popcount return
3837 [0, prec]. */
9c0a9e12
RS
3838 CASE_CFN_FFS:
3839 CASE_CFN_POPCOUNT:
1f6eac90
JJ
3840 arg = gimple_call_arg (stmt, 0);
3841 prec = TYPE_PRECISION (TREE_TYPE (arg));
3842 mini = 0;
3843 maxi = prec;
3844 if (TREE_CODE (arg) == SSA_NAME)
3845 {
526ceb68 3846 value_range *vr0 = get_value_range (arg);
1f6eac90
JJ
3847 /* If arg is non-zero, then ffs or popcount
3848 are non-zero. */
3849 if (((vr0->type == VR_RANGE
b48e3948 3850 && range_includes_zero_p (vr0->min, vr0->max) == 0)
1f6eac90 3851 || (vr0->type == VR_ANTI_RANGE
b48e3948
JJ
3852 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3853 && !is_overflow_infinity (vr0->min)
3854 && !is_overflow_infinity (vr0->max))
1f6eac90
JJ
3855 mini = 1;
3856 /* If some high bits are known to be zero,
3857 we can decrease the maximum. */
3858 if (vr0->type == VR_RANGE
3859 && TREE_CODE (vr0->max) == INTEGER_CST
b48e3948
JJ
3860 && !operand_less_p (vr0->min,
3861 build_zero_cst (TREE_TYPE (vr0->min)))
3f5c390d 3862 && !is_overflow_infinity (vr0->max))
1f6eac90
JJ
3863 maxi = tree_floor_log2 (vr0->max) + 1;
3864 }
3865 goto bitop_builtin;
3866 /* __builtin_parity* returns [0, 1]. */
9c0a9e12 3867 CASE_CFN_PARITY:
1f6eac90
JJ
3868 mini = 0;
3869 maxi = 1;
3870 goto bitop_builtin;
3871 /* __builtin_c[lt]z* return [0, prec-1], except for
3872 when the argument is 0, but that is undefined behavior.
3873 On many targets where the CLZ RTL or optab value is defined
3874 for 0 the value is prec, so include that in the range
3875 by default. */
9c0a9e12 3876 CASE_CFN_CLZ:
1f6eac90
JJ
3877 arg = gimple_call_arg (stmt, 0);
3878 prec = TYPE_PRECISION (TREE_TYPE (arg));
3879 mini = 0;
3880 maxi = prec;
3881 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3882 != CODE_FOR_nothing
3883 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3884 zerov)
3885 /* Handle only the single common value. */
3886 && zerov != prec)
3887 /* Magic value to give up, unless vr0 proves
3888 arg is non-zero. */
3889 mini = -2;
3890 if (TREE_CODE (arg) == SSA_NAME)
3891 {
526ceb68 3892 value_range *vr0 = get_value_range (arg);
1f6eac90
JJ
3893 /* From clz of VR_RANGE minimum we can compute
3894 result maximum. */
3895 if (vr0->type == VR_RANGE
3896 && TREE_CODE (vr0->min) == INTEGER_CST
3f5c390d 3897 && !is_overflow_infinity (vr0->min))
1f6eac90
JJ
3898 {
3899 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3900 if (maxi != prec)
3901 mini = 0;
3902 }
3903 else if (vr0->type == VR_ANTI_RANGE
3904 && integer_zerop (vr0->min)
3f5c390d 3905 && !is_overflow_infinity (vr0->min))
1f6eac90
JJ
3906 {
3907 maxi = prec - 1;
3908 mini = 0;
3909 }
3910 if (mini == -2)
3911 break;
3912 /* From clz of VR_RANGE maximum we can compute
3913 result minimum. */
3914 if (vr0->type == VR_RANGE
3915 && TREE_CODE (vr0->max) == INTEGER_CST
3f5c390d 3916 && !is_overflow_infinity (vr0->max))
1f6eac90
JJ
3917 {
3918 mini = prec - 1 - tree_floor_log2 (vr0->max);
3919 if (mini == prec)
3920 break;
3921 }
3922 }
3923 if (mini == -2)
3924 break;
3925 goto bitop_builtin;
3926 /* __builtin_ctz* return [0, prec-1], except for
3927 when the argument is 0, but that is undefined behavior.
3928 If there is a ctz optab for this mode and
3929 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3930 otherwise just assume 0 won't be seen. */
9c0a9e12 3931 CASE_CFN_CTZ:
1f6eac90
JJ
3932 arg = gimple_call_arg (stmt, 0);
3933 prec = TYPE_PRECISION (TREE_TYPE (arg));
3934 mini = 0;
3935 maxi = prec - 1;
3936 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3937 != CODE_FOR_nothing
3938 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3939 zerov))
3940 {
3941 /* Handle only the two common values. */
3942 if (zerov == -1)
3943 mini = -1;
3944 else if (zerov == prec)
3945 maxi = prec;
3946 else
3947 /* Magic value to give up, unless vr0 proves
3948 arg is non-zero. */
3949 mini = -2;
3950 }
3951 if (TREE_CODE (arg) == SSA_NAME)
3952 {
526ceb68 3953 value_range *vr0 = get_value_range (arg);
1f6eac90
JJ
3954 /* If arg is non-zero, then use [0, prec - 1]. */
3955 if (((vr0->type == VR_RANGE
3956 && integer_nonzerop (vr0->min))
3957 || (vr0->type == VR_ANTI_RANGE
3958 && integer_zerop (vr0->min)))
3f5c390d 3959 && !is_overflow_infinity (vr0->min))
1f6eac90
JJ
3960 {
3961 mini = 0;
3962 maxi = prec - 1;
3963 }
3964 /* If some high bits are known to be zero,
3965 we can decrease the result maximum. */
3966 if (vr0->type == VR_RANGE
3967 && TREE_CODE (vr0->max) == INTEGER_CST
3f5c390d 3968 && !is_overflow_infinity (vr0->max))
1f6eac90
JJ
3969 {
3970 maxi = tree_floor_log2 (vr0->max);
3971 /* For vr0 [0, 0] give up. */
3972 if (maxi == -1)
3973 break;
3974 }
3975 }
3976 if (mini == -2)
3977 break;
3978 goto bitop_builtin;
3979 /* __builtin_clrsb* returns [0, prec-1]. */
9c0a9e12 3980 CASE_CFN_CLRSB:
1f6eac90
JJ
3981 arg = gimple_call_arg (stmt, 0);
3982 prec = TYPE_PRECISION (TREE_TYPE (arg));
3983 mini = 0;
3984 maxi = prec - 1;
3985 goto bitop_builtin;
3986 bitop_builtin:
3987 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3988 build_int_cst (type, maxi), NULL);
3989 return;
9c0a9e12 3990 case CFN_UBSAN_CHECK_ADD:
31e071ae
MP
3991 subcode = PLUS_EXPR;
3992 break;
9c0a9e12 3993 case CFN_UBSAN_CHECK_SUB:
31e071ae
MP
3994 subcode = MINUS_EXPR;
3995 break;
9c0a9e12 3996 case CFN_UBSAN_CHECK_MUL:
31e071ae
MP
3997 subcode = MULT_EXPR;
3998 break;
9c0a9e12
RS
3999 case CFN_GOACC_DIM_SIZE:
4000 case CFN_GOACC_DIM_POS:
bd751975
NS
4001 /* Optimizing these two internal functions helps the loop
4002 optimizer eliminate outer comparisons. Size is [1,N]
4003 and pos is [0,N-1]. */
4004 {
9c0a9e12 4005 bool is_pos = cfn == CFN_GOACC_DIM_POS;
629b3d75
MJ
4006 int axis = oacc_get_ifn_dim_arg (stmt);
4007 int size = oacc_get_fn_dim_size (current_function_decl, axis);
bd751975
NS
4008
4009 if (!size)
4010 /* If it's dynamic, the backend might know a hardware
4011 limitation. */
4012 size = targetm.goacc.dim_limit (axis);
4013
4014 tree type = TREE_TYPE (gimple_call_lhs (stmt));
4015 set_value_range (vr, VR_RANGE,
4016 build_int_cst (type, is_pos ? 0 : 1),
4017 size ? build_int_cst (type, size - is_pos)
4018 : vrp_val_max (type), NULL);
4019 }
4020 return;
cfe3d653 4021 case CFN_BUILT_IN_STRLEN:
2131e489
JJ
4022 if (tree lhs = gimple_call_lhs (stmt))
4023 if (ptrdiff_type_node
4024 && (TYPE_PRECISION (ptrdiff_type_node)
4025 == TYPE_PRECISION (TREE_TYPE (lhs))))
4026 {
4027 tree type = TREE_TYPE (lhs);
4028 tree max = vrp_val_max (ptrdiff_type_node);
4029 wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
4030 tree range_min = build_zero_cst (type);
4031 tree range_max = wide_int_to_tree (type, wmax - 1);
4032 set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
4033 return;
4034 }
4035 break;
31e071ae
MP
4036 default:
4037 break;
4038 }
4039 if (subcode != ERROR_MARK)
4040 {
4041 bool saved_flag_wrapv = flag_wrapv;
4042 /* Pretend the arithmetics is wrapping. If there is
4043 any overflow, we'll complain, but will actually do
4044 wrapping operation. */
4045 flag_wrapv = 1;
4046 extract_range_from_binary_expr (vr, subcode, type,
4047 gimple_call_arg (stmt, 0),
4048 gimple_call_arg (stmt, 1));
4049 flag_wrapv = saved_flag_wrapv;
4050
4051 /* If for both arguments vrp_valueize returned non-NULL,
4052 this should have been already folded and if not, it
4053 wasn't folded because of overflow. Avoid removing the
4054 UBSAN_CHECK_* calls in that case. */
4055 if (vr->type == VR_RANGE
4056 && (vr->min == vr->max
4057 || operand_equal_p (vr->min, vr->max, 0)))
4058 set_value_range_to_varying (vr);
4059 return;
4060 }
4061 }
1304953e
JJ
4062 /* Handle extraction of the two results (result of arithmetics and
4063 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
4064 internal function. */
4065 else if (is_gimple_assign (stmt)
4066 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
4067 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
4068 && INTEGRAL_TYPE_P (type))
4069 {
4070 enum tree_code code = gimple_assign_rhs_code (stmt);
4071 tree op = gimple_assign_rhs1 (stmt);
4072 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
4073 {
355fe088 4074 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
1304953e
JJ
4075 if (is_gimple_call (g) && gimple_call_internal_p (g))
4076 {
4077 enum tree_code subcode = ERROR_MARK;
4078 switch (gimple_call_internal_fn (g))
4079 {
4080 case IFN_ADD_OVERFLOW:
4081 subcode = PLUS_EXPR;
4082 break;
4083 case IFN_SUB_OVERFLOW:
4084 subcode = MINUS_EXPR;
4085 break;
4086 case IFN_MUL_OVERFLOW:
4087 subcode = MULT_EXPR;
4088 break;
4089 default:
4090 break;
4091 }
4092 if (subcode != ERROR_MARK)
4093 {
4094 tree op0 = gimple_call_arg (g, 0);
4095 tree op1 = gimple_call_arg (g, 1);
4096 if (code == IMAGPART_EXPR)
4097 {
4098 bool ovf = false;
4099 if (check_for_binary_op_overflow (subcode, type,
4100 op0, op1, &ovf))
4101 set_value_range_to_value (vr,
4102 build_int_cst (type, ovf),
4103 NULL);
a86451b9
JJ
4104 else if (TYPE_PRECISION (type) == 1
4105 && !TYPE_UNSIGNED (type))
4106 set_value_range_to_varying (vr);
1304953e
JJ
4107 else
4108 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4109 build_int_cst (type, 1), NULL);
4110 }
4111 else if (types_compatible_p (type, TREE_TYPE (op0))
4112 && types_compatible_p (type, TREE_TYPE (op1)))
4113 {
4114 bool saved_flag_wrapv = flag_wrapv;
4115 /* Pretend the arithmetics is wrapping. If there is
4116 any overflow, IMAGPART_EXPR will be set. */
4117 flag_wrapv = 1;
4118 extract_range_from_binary_expr (vr, subcode, type,
4119 op0, op1);
4120 flag_wrapv = saved_flag_wrapv;
4121 }
4122 else
4123 {
526ceb68
TS
4124 value_range vr0 = VR_INITIALIZER;
4125 value_range vr1 = VR_INITIALIZER;
1304953e
JJ
4126 bool saved_flag_wrapv = flag_wrapv;
4127 /* Pretend the arithmetics is wrapping. If there is
4128 any overflow, IMAGPART_EXPR will be set. */
4129 flag_wrapv = 1;
4130 extract_range_from_unary_expr (&vr0, NOP_EXPR,
4131 type, op0);
4132 extract_range_from_unary_expr (&vr1, NOP_EXPR,
4133 type, op1);
4134 extract_range_from_binary_expr_1 (vr, subcode, type,
4135 &vr0, &vr1);
4136 flag_wrapv = saved_flag_wrapv;
4137 }
4138 return;
4139 }
4140 }
4141 }
4142 }
1f6eac90
JJ
4143 if (INTEGRAL_TYPE_P (type)
4144 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
726a989a
RB
4145 set_value_range_to_nonnegative (vr, type,
4146 sop || stmt_overflow_infinity (stmt));
4147 else if (vrp_stmt_computes_nonzero (stmt, &sop)
4148 && !sop)
4149 set_value_range_to_nonnull (vr, type);
4150 else
4151 set_value_range_to_varying (vr);
4152}
4153
4154
4155/* Try to compute a useful range out of assignment STMT and store it
227858d1 4156 in *VR. */
0bca51f0
DN
4157
4158static void
526ceb68 4159extract_range_from_assignment (value_range *vr, gassign *stmt)
0bca51f0 4160{
726a989a 4161 enum tree_code code = gimple_assign_rhs_code (stmt);
0bca51f0
DN
4162
4163 if (code == ASSERT_EXPR)
726a989a 4164 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
0bca51f0 4165 else if (code == SSA_NAME)
726a989a 4166 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
aebf4828 4167 else if (TREE_CODE_CLASS (code) == tcc_binary)
726a989a
RB
4168 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4169 gimple_expr_type (stmt),
4170 gimple_assign_rhs1 (stmt),
4171 gimple_assign_rhs2 (stmt));
0bca51f0 4172 else if (TREE_CODE_CLASS (code) == tcc_unary)
726a989a
RB
4173 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4174 gimple_expr_type (stmt),
4175 gimple_assign_rhs1 (stmt));
f255541f 4176 else if (code == COND_EXPR)
4e71066d 4177 extract_range_from_cond_expr (vr, stmt);
227858d1 4178 else if (TREE_CODE_CLASS (code) == tcc_comparison)
726a989a
RB
4179 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4180 gimple_expr_type (stmt),
4181 gimple_assign_rhs1 (stmt),
4182 gimple_assign_rhs2 (stmt));
4183 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4184 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4185 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
0bca51f0 4186 else
b565d777 4187 set_value_range_to_varying (vr);
b16caf72 4188
b16caf72 4189 if (vr->type == VR_VARYING)
726a989a 4190 extract_range_basic (vr, stmt);
0bca51f0
DN
4191}
4192
1e8552eb 4193/* Given a range VR, a LOOP and a variable VAR, determine whether it
0bca51f0
DN
4194 would be profitable to adjust VR using scalar evolution information
4195 for VAR. If so, update VR with the new limits. */
4196
4197static void
526ceb68 4198adjust_range_with_scev (value_range *vr, struct loop *loop,
355fe088 4199 gimple *stmt, tree var)
0bca51f0 4200{
1936a7d4 4201 tree init, step, chrec, tmin, tmax, min, max, type, tem;
d7f5de76 4202 enum ev_direction dir;
0bca51f0
DN
4203
4204 /* TODO. Don't adjust anti-ranges. An anti-range may provide
4205 better opportunities than a regular range, but I'm not sure. */
4206 if (vr->type == VR_ANTI_RANGE)
4207 return;
4208
d7770457 4209 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
6f1c9cd0
SP
4210
4211 /* Like in PR19590, scev can return a constant function. */
4212 if (is_gimple_min_invariant (chrec))
4213 {
cdc64612 4214 set_value_range_to_value (vr, chrec, vr->equiv);
6f1c9cd0
SP
4215 return;
4216 }
4217
0bca51f0
DN
4218 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4219 return;
4220
d7770457 4221 init = initial_condition_in_loop_num (chrec, loop->num);
1936a7d4
RG
4222 tem = op_with_constant_singleton_value_range (init);
4223 if (tem)
4224 init = tem;
d7770457 4225 step = evolution_part_in_loop_num (chrec, loop->num);
1936a7d4
RG
4226 tem = op_with_constant_singleton_value_range (step);
4227 if (tem)
4228 step = tem;
0bca51f0
DN
4229
4230 /* If STEP is symbolic, we can't know whether INIT will be the
04dce5a4
ZD
4231 minimum or maximum value in the range. Also, unless INIT is
4232 a simple expression, compare_values and possibly other functions
4233 in tree-vrp won't be able to handle it. */
d7770457 4234 if (step == NULL_TREE
04dce5a4
ZD
4235 || !is_gimple_min_invariant (step)
4236 || !valid_value_p (init))
0bca51f0
DN
4237 return;
4238
d7f5de76
ZD
4239 dir = scev_direction (chrec);
4240 if (/* Do not adjust ranges if we do not know whether the iv increases
4241 or decreases, ... */
4242 dir == EV_DIR_UNKNOWN
4243 /* ... or if it may wrap. */
b24d9420
BC
4244 || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
4245 get_chrec_loop (chrec), true))
227858d1
DN
4246 return;
4247
12df8a7e
ILT
4248 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4249 negative_overflow_infinity and positive_overflow_infinity,
4250 because we have concluded that the loop probably does not
4251 wrap. */
4252
20527215
ZD
4253 type = TREE_TYPE (var);
4254 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4255 tmin = lower_bound_in_type (type, type);
4256 else
4257 tmin = TYPE_MIN_VALUE (type);
4258 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4259 tmax = upper_bound_in_type (type, type);
4260 else
4261 tmax = TYPE_MAX_VALUE (type);
4262
e3488283 4263 /* Try to use estimated number of iterations for the loop to constrain the
b4a9343c 4264 final value in the evolution. */
e3488283 4265 if (TREE_CODE (step) == INTEGER_CST
e3488283
RG
4266 && is_gimple_val (init)
4267 && (TREE_CODE (init) != SSA_NAME
4268 || get_value_range (init)->type == VR_RANGE))
4269 {
807e902e 4270 widest_int nit;
b4a9343c 4271
7c98ec60
RG
4272 /* We are only entering here for loop header PHI nodes, so using
4273 the number of latch executions is the correct thing to use. */
4274 if (max_loop_iterations (loop, &nit))
b4a9343c 4275 {
526ceb68 4276 value_range maxvr = VR_INITIALIZER;
807e902e
KZ
4277 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4278 bool overflow;
b4a9343c 4279
807e902e
KZ
4280 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4281 &overflow);
b4a9343c
ZD
4282 /* If the multiplication overflowed we can't do a meaningful
4283 adjustment. Likewise if the result doesn't fit in the type
4284 of the induction variable. For a signed type we have to
4285 check whether the result has the expected signedness which
4286 is that of the step as number of iterations is unsigned. */
4287 if (!overflow
807e902e
KZ
4288 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4289 && (sgn == UNSIGNED
4290 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
e3488283 4291 {
807e902e 4292 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
b4a9343c
ZD
4293 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4294 TREE_TYPE (init), init, tem);
4295 /* Likewise if the addition did. */
4296 if (maxvr.type == VR_RANGE)
4297 {
c446cf07
BC
4298 value_range initvr = VR_INITIALIZER;
4299
4300 if (TREE_CODE (init) == SSA_NAME)
4301 initvr = *(get_value_range (init));
4302 else if (is_gimple_min_invariant (init))
4303 set_value_range_to_value (&initvr, init, NULL);
4304 else
4305 return;
4306
4307 /* Check if init + nit * step overflows. Though we checked
4308 scev {init, step}_loop doesn't wrap, it is not enough
4309 because the loop may exit immediately. Overflow could
4310 happen in the plus expression in this case. */
4311 if ((dir == EV_DIR_DECREASES
4312 && (is_negative_overflow_infinity (maxvr.min)
4313 || compare_values (maxvr.min, initvr.min) != -1))
4314 || (dir == EV_DIR_GROWS
4315 && (is_positive_overflow_infinity (maxvr.max)
4316 || compare_values (maxvr.max, initvr.max) != 1)))
4317 return;
4318
b4a9343c
ZD
4319 tmin = maxvr.min;
4320 tmax = maxvr.max;
4321 }
e3488283
RG
4322 }
4323 }
4324 }
4325
20527215 4326 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
0bca51f0 4327 {
20527215
ZD
4328 min = tmin;
4329 max = tmax;
4330
0bca51f0
DN
4331 /* For VARYING or UNDEFINED ranges, just about anything we get
4332 from scalar evolutions should be better. */
4f67dfcf 4333
d7f5de76 4334 if (dir == EV_DIR_DECREASES)
4f67dfcf 4335 max = init;
0bca51f0 4336 else
4f67dfcf 4337 min = init;
0bca51f0
DN
4338 }
4339 else if (vr->type == VR_RANGE)
4340 {
20527215
ZD
4341 min = vr->min;
4342 max = vr->max;
d5448566 4343
d7f5de76 4344 if (dir == EV_DIR_DECREASES)
0bca51f0 4345 {
d5448566
KH
4346 /* INIT is the maximum value. If INIT is lower than VR->MAX
4347 but no smaller than VR->MIN, set VR->MAX to INIT. */
4348 if (compare_values (init, max) == -1)
e3488283 4349 max = init;
9a46cc16
ILT
4350
4351 /* According to the loop information, the variable does not
4352 overflow. If we think it does, probably because of an
4353 overflow due to arithmetic on a different INF value,
4354 reset now. */
e3488283
RG
4355 if (is_negative_overflow_infinity (min)
4356 || compare_values (min, tmin) == -1)
9a46cc16 4357 min = tmin;
e3488283 4358
0bca51f0
DN
4359 }
4360 else
4361 {
4362 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
d5448566 4363 if (compare_values (init, min) == 1)
e3488283 4364 min = init;
9a46cc16 4365
e3488283
RG
4366 if (is_positive_overflow_infinity (max)
4367 || compare_values (tmax, max) == -1)
9a46cc16 4368 max = tmax;
0bca51f0 4369 }
9e9f6bf0
RB
4370 }
4371 else
4372 return;
d5448566 4373
9e9f6bf0
RB
4374 /* If we just created an invalid range with the minimum
4375 greater than the maximum, we fail conservatively.
4376 This should happen only in unreachable
4377 parts of code, or for invalid programs. */
4378 if (compare_values (min, max) == 1
4379 || (is_negative_overflow_infinity (min)
4380 && is_positive_overflow_infinity (max)))
4381 return;
e3488283 4382
f7b492ea
JW
4383 /* Even for valid range info, sometimes overflow flag will leak in.
4384 As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
4385 drop them except for +-overflow_infinity which still need special
4386 handling in vrp pass. */
4387 if (TREE_OVERFLOW_P (min)
4388 && ! is_negative_overflow_infinity (min))
4389 min = drop_tree_overflow (min);
4390 if (TREE_OVERFLOW_P (max)
4391 && ! is_positive_overflow_infinity (max))
4392 max = drop_tree_overflow (max);
4393
9e9f6bf0 4394 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
0bca51f0
DN
4395}
4396
4397
4398/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
b8698a0f 4399
227858d1
DN
4400 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4401 all the values in the ranges.
0bca51f0
DN
4402
4403 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4404
227858d1 4405 - Return NULL_TREE if it is not always possible to determine the
12df8a7e
ILT
4406 value of the comparison.
4407
4408 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4409 overflow infinity was used in the test. */
227858d1 4410
0bca51f0
DN
4411
4412static tree
526ceb68 4413compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
12df8a7e 4414 bool *strict_overflow_p)
0bca51f0
DN
4415{
4416 /* VARYING or UNDEFINED ranges cannot be compared. */
4417 if (vr0->type == VR_VARYING
4418 || vr0->type == VR_UNDEFINED
4419 || vr1->type == VR_VARYING
4420 || vr1->type == VR_UNDEFINED)
4421 return NULL_TREE;
4422
4423 /* Anti-ranges need to be handled separately. */
4424 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4425 {
4426 /* If both are anti-ranges, then we cannot compute any
4427 comparison. */
4428 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4429 return NULL_TREE;
4430
4431 /* These comparisons are never statically computable. */
4432 if (comp == GT_EXPR
4433 || comp == GE_EXPR
4434 || comp == LT_EXPR
4435 || comp == LE_EXPR)
4436 return NULL_TREE;
4437
4438 /* Equality can be computed only between a range and an
4439 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4440 if (vr0->type == VR_RANGE)
4441 {
4442 /* To simplify processing, make VR0 the anti-range. */
526ceb68 4443 value_range *tmp = vr0;
0bca51f0
DN
4444 vr0 = vr1;
4445 vr1 = tmp;
4446 }
4447
4448 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4449
12df8a7e
ILT
4450 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4451 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
0bca51f0
DN
4452 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4453
4454 return NULL_TREE;
4455 }
4456
0c948c27
ILT
4457 if (!usable_range_p (vr0, strict_overflow_p)
4458 || !usable_range_p (vr1, strict_overflow_p))
4459 return NULL_TREE;
4460
0bca51f0
DN
4461 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4462 operands around and change the comparison code. */
4463 if (comp == GT_EXPR || comp == GE_EXPR)
4464 {
0bca51f0 4465 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
fab27f52 4466 std::swap (vr0, vr1);
0bca51f0
DN
4467 }
4468
4469 if (comp == EQ_EXPR)
4470 {
4471 /* Equality may only be computed if both ranges represent
4472 exactly one value. */
12df8a7e
ILT
4473 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4474 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
0bca51f0 4475 {
12df8a7e
ILT
4476 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4477 strict_overflow_p);
4478 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4479 strict_overflow_p);
0bca51f0
DN
4480 if (cmp_min == 0 && cmp_max == 0)
4481 return boolean_true_node;
4482 else if (cmp_min != -2 && cmp_max != -2)
4483 return boolean_false_node;
4484 }
7ab1122a 4485 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
12df8a7e
ILT
4486 else if (compare_values_warnv (vr0->min, vr1->max,
4487 strict_overflow_p) == 1
4488 || compare_values_warnv (vr1->min, vr0->max,
4489 strict_overflow_p) == 1)
7ab1122a 4490 return boolean_false_node;
0bca51f0
DN
4491
4492 return NULL_TREE;
4493 }
4494 else if (comp == NE_EXPR)
4495 {
4496 int cmp1, cmp2;
4497
4498 /* If VR0 is completely to the left or completely to the right
4499 of VR1, they are always different. Notice that we need to
4500 make sure that both comparisons yield similar results to
4501 avoid comparing values that cannot be compared at
4502 compile-time. */
12df8a7e
ILT
4503 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4504 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
0bca51f0
DN
4505 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4506 return boolean_true_node;
4507
4508 /* If VR0 and VR1 represent a single value and are identical,
4509 return false. */
12df8a7e
ILT
4510 else if (compare_values_warnv (vr0->min, vr0->max,
4511 strict_overflow_p) == 0
4512 && compare_values_warnv (vr1->min, vr1->max,
4513 strict_overflow_p) == 0
4514 && compare_values_warnv (vr0->min, vr1->min,
4515 strict_overflow_p) == 0
4516 && compare_values_warnv (vr0->max, vr1->max,
4517 strict_overflow_p) == 0)
0bca51f0
DN
4518 return boolean_false_node;
4519
4520 /* Otherwise, they may or may not be different. */
4521 else
4522 return NULL_TREE;
4523 }
4524 else if (comp == LT_EXPR || comp == LE_EXPR)
4525 {
4526 int tst;
4527
4528 /* If VR0 is to the left of VR1, return true. */
12df8a7e 4529 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
0bca51f0
DN
4530 if ((comp == LT_EXPR && tst == -1)
4531 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
12df8a7e
ILT
4532 {
4533 if (overflow_infinity_range_p (vr0)
4534 || overflow_infinity_range_p (vr1))
4535 *strict_overflow_p = true;
4536 return boolean_true_node;
4537 }
0bca51f0
DN
4538
4539 /* If VR0 is to the right of VR1, return false. */
12df8a7e 4540 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
0bca51f0
DN
4541 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4542 || (comp == LE_EXPR && tst == 1))
12df8a7e
ILT
4543 {
4544 if (overflow_infinity_range_p (vr0)
4545 || overflow_infinity_range_p (vr1))
4546 *strict_overflow_p = true;
4547 return boolean_false_node;
4548 }
0bca51f0
DN
4549
4550 /* Otherwise, we don't know. */
4551 return NULL_TREE;
4552 }
b8698a0f 4553
0bca51f0
DN
4554 gcc_unreachable ();
4555}
4556
4557
4558/* Given a value range VR, a value VAL and a comparison code COMP, return
227858d1 4559 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
0bca51f0
DN
4560 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4561 always returns false. Return NULL_TREE if it is not always
12df8a7e
ILT
4562 possible to determine the value of the comparison. Also set
4563 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4564 infinity was used in the test. */
0bca51f0
DN
4565
4566static tree
526ceb68 4567compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
12df8a7e 4568 bool *strict_overflow_p)
0bca51f0
DN
4569{
4570 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4571 return NULL_TREE;
4572
4573 /* Anti-ranges need to be handled separately. */
4574 if (vr->type == VR_ANTI_RANGE)
4575 {
4576 /* For anti-ranges, the only predicates that we can compute at
4577 compile time are equality and inequality. */
4578 if (comp == GT_EXPR
4579 || comp == GE_EXPR
4580 || comp == LT_EXPR
4581 || comp == LE_EXPR)
4582 return NULL_TREE;
4583
d2f3ffba 4584 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
e8f808b3 4585 if (value_inside_range (val, vr->min, vr->max) == 1)
0bca51f0
DN
4586 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4587
4588 return NULL_TREE;
4589 }
4590
0c948c27
ILT
4591 if (!usable_range_p (vr, strict_overflow_p))
4592 return NULL_TREE;
4593
0bca51f0
DN
4594 if (comp == EQ_EXPR)
4595 {
4596 /* EQ_EXPR may only be computed if VR represents exactly
4597 one value. */
12df8a7e 4598 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
0bca51f0 4599 {
12df8a7e 4600 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
0bca51f0
DN
4601 if (cmp == 0)
4602 return boolean_true_node;
4603 else if (cmp == -1 || cmp == 1 || cmp == 2)
4604 return boolean_false_node;
4605 }
12df8a7e
ILT
4606 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4607 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
5de2df7b 4608 return boolean_false_node;
0bca51f0
DN
4609
4610 return NULL_TREE;
4611 }
4612 else if (comp == NE_EXPR)
4613 {
4614 /* If VAL is not inside VR, then they are always different. */
12df8a7e
ILT
4615 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4616 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
0bca51f0
DN
4617 return boolean_true_node;
4618
4619 /* If VR represents exactly one value equal to VAL, then return
4620 false. */
12df8a7e
ILT
4621 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4622 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
0bca51f0
DN
4623 return boolean_false_node;
4624
4625 /* Otherwise, they may or may not be different. */
4626 return NULL_TREE;
4627 }
4628 else if (comp == LT_EXPR || comp == LE_EXPR)
4629 {
4630 int tst;
4631
4632 /* If VR is to the left of VAL, return true. */
12df8a7e 4633 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
0bca51f0
DN
4634 if ((comp == LT_EXPR && tst == -1)
4635 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
12df8a7e
ILT
4636 {
4637 if (overflow_infinity_range_p (vr))
4638 *strict_overflow_p = true;
4639 return boolean_true_node;
4640 }
0bca51f0
DN
4641
4642 /* If VR is to the right of VAL, return false. */
12df8a7e 4643 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
0bca51f0
DN
4644 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4645 || (comp == LE_EXPR && tst == 1))
12df8a7e
ILT
4646 {
4647 if (overflow_infinity_range_p (vr))
4648 *strict_overflow_p = true;
4649 return boolean_false_node;
4650 }
0bca51f0
DN
4651
4652 /* Otherwise, we don't know. */
4653 return NULL_TREE;
4654 }
4655 else if (comp == GT_EXPR || comp == GE_EXPR)
4656 {
4657 int tst;
4658
4659 /* If VR is to the right of VAL, return true. */
12df8a7e 4660 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
0bca51f0
DN
4661 if ((comp == GT_EXPR && tst == 1)
4662 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
12df8a7e
ILT
4663 {
4664 if (overflow_infinity_range_p (vr))
4665 *strict_overflow_p = true;
4666 return boolean_true_node;
4667 }
0bca51f0
DN
4668
4669 /* If VR is to the left of VAL, return false. */
12df8a7e 4670 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
0bca51f0
DN
4671 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4672 || (comp == GE_EXPR && tst == -1))
12df8a7e
ILT
4673 {
4674 if (overflow_infinity_range_p (vr))
4675 *strict_overflow_p = true;
4676 return boolean_false_node;
4677 }
0bca51f0
DN
4678
4679 /* Otherwise, we don't know. */
4680 return NULL_TREE;
4681 }
4682
4683 gcc_unreachable ();
4684}
4685
4686
4687/* Debugging dumps. */
4688
f90aa46c 4689void dump_value_range (FILE *, const value_range *);
526ceb68 4690void debug_value_range (value_range *);
227858d1
DN
4691void dump_all_value_ranges (FILE *);
4692void debug_all_value_ranges (void);
4693void dump_vr_equiv (FILE *, bitmap);
4694void debug_vr_equiv (bitmap);
4695
4696
4697/* Dump value range VR to FILE. */
4698
0bca51f0 4699void
f90aa46c 4700dump_value_range (FILE *file, const value_range *vr)
0bca51f0
DN
4701{
4702 if (vr == NULL)
4703 fprintf (file, "[]");
4704 else if (vr->type == VR_UNDEFINED)
4705 fprintf (file, "UNDEFINED");
4706 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4707 {
227858d1
DN
4708 tree type = TREE_TYPE (vr->min);
4709
0bca51f0 4710 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
227858d1 4711
e1f28918 4712 if (is_negative_overflow_infinity (vr->min))
12df8a7e 4713 fprintf (file, "-INF(OVF)");
e1f28918
ILT
4714 else if (INTEGRAL_TYPE_P (type)
4715 && !TYPE_UNSIGNED (type)
4716 && vrp_val_is_min (vr->min))
4717 fprintf (file, "-INF");
227858d1
DN
4718 else
4719 print_generic_expr (file, vr->min, 0);
4720
0bca51f0 4721 fprintf (file, ", ");
227858d1 4722
e1f28918 4723 if (is_positive_overflow_infinity (vr->max))
12df8a7e 4724 fprintf (file, "+INF(OVF)");
e1f28918
ILT
4725 else if (INTEGRAL_TYPE_P (type)
4726 && vrp_val_is_max (vr->max))
4727 fprintf (file, "+INF");
227858d1
DN
4728 else
4729 print_generic_expr (file, vr->max, 0);
4730
0bca51f0 4731 fprintf (file, "]");
227858d1
DN
4732
4733 if (vr->equiv)
4734 {
4735 bitmap_iterator bi;
4736 unsigned i, c = 0;
4737
4738 fprintf (file, " EQUIVALENCES: { ");
4739
4740 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4741 {
4742 print_generic_expr (file, ssa_name (i), 0);
4743 fprintf (file, " ");
4744 c++;
4745 }
4746
4747 fprintf (file, "} (%u elements)", c);
4748 }
0bca51f0
DN
4749 }
4750 else if (vr->type == VR_VARYING)
4751 fprintf (file, "VARYING");
4752 else
4753 fprintf (file, "INVALID RANGE");
4754}
4755
4756
4757/* Dump value range VR to stderr. */
4758
24e47c76 4759DEBUG_FUNCTION void
526ceb68 4760debug_value_range (value_range *vr)
0bca51f0
DN
4761{
4762 dump_value_range (stderr, vr);
96644aba 4763 fprintf (stderr, "\n");
0bca51f0
DN
4764}
4765
4766
4767/* Dump value ranges of all SSA_NAMEs to FILE. */
4768
4769void
4770dump_all_value_ranges (FILE *file)
4771{
4772 size_t i;
4773
d9256277 4774 for (i = 0; i < num_vr_values; i++)
0bca51f0 4775 {
227858d1 4776 if (vr_value[i])
0bca51f0 4777 {
227858d1 4778 print_generic_expr (file, ssa_name (i), 0);
0bca51f0 4779 fprintf (file, ": ");
227858d1 4780 dump_value_range (file, vr_value[i]);
0bca51f0
DN
4781 fprintf (file, "\n");
4782 }
4783 }
4784
4785 fprintf (file, "\n");
4786}
4787
4788
4789/* Dump all value ranges to stderr. */
4790
24e47c76 4791DEBUG_FUNCTION void
0bca51f0
DN
4792debug_all_value_ranges (void)
4793{
4794 dump_all_value_ranges (stderr);
4795}
4796
4797
0bca51f0
DN
4798/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4799 create a new SSA name N and return the assertion assignment
36f291f7 4800 'N = ASSERT_EXPR <V, V OP W>'. */
0bca51f0 4801
355fe088 4802static gimple *
0bca51f0
DN
4803build_assert_expr_for (tree cond, tree v)
4804{
45db3141 4805 tree a;
538dd0b7 4806 gassign *assertion;
0bca51f0 4807
45db3141
RG
4808 gcc_assert (TREE_CODE (v) == SSA_NAME
4809 && COMPARISON_CLASS_P (cond));
0bca51f0 4810
45db3141
RG
4811 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4812 assertion = gimple_build_assign (NULL_TREE, a);
0bca51f0
DN
4813
4814 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
45db3141
RG
4815 operand of the ASSERT_EXPR. Create it so the new name and the old one
4816 are registered in the replacement table so that we can fix the SSA web
4817 after adding all the ASSERT_EXPRs. */
4818 create_new_def_for (v, assertion, NULL);
0bca51f0
DN
4819
4820 return assertion;
4821}
4822
4823
4824/* Return false if EXPR is a predicate expression involving floating
4825 point values. */
4826
4827static inline bool
355fe088 4828fp_predicate (gimple *stmt)
0bca51f0 4829{
726a989a
RB
4830 GIMPLE_CHECK (stmt, GIMPLE_COND);
4831
4832 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
0bca51f0
DN
4833}
4834
227858d1
DN
4835/* If the range of values taken by OP can be inferred after STMT executes,
4836 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4837 describes the inferred range. Return true if a range could be
4838 inferred. */
0bca51f0 4839
227858d1 4840static bool
355fe088 4841infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
0bca51f0 4842{
227858d1
DN
4843 *val_p = NULL_TREE;
4844 *comp_code_p = ERROR_MARK;
4845
9fabf0d4
DN
4846 /* Do not attempt to infer anything in names that flow through
4847 abnormal edges. */
4848 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
227858d1
DN
4849 return false;
4850
3d750496 4851 /* If STMT is the last statement of a basic block with no normal
8c5285e1
DN
4852 successors, there is no point inferring anything about any of its
4853 operands. We would not be able to find a proper insertion point
4854 for the assertion, anyway. */
3d750496
JL
4855 if (stmt_ends_bb_p (stmt))
4856 {
4857 edge_iterator ei;
4858 edge e;
4859
4860 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1fd9f058 4861 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3d750496
JL
4862 break;
4863 if (e == NULL)
4864 return false;
4865 }
8c5285e1 4866
76787f70 4867 if (infer_nonnull_range (stmt, op))
0bca51f0 4868 {
0e6a0e48
MG
4869 *val_p = build_int_cst (TREE_TYPE (op), 0);
4870 *comp_code_p = NE_EXPR;
4871 return true;
0bca51f0
DN
4872 }
4873
227858d1 4874 return false;
0bca51f0
DN
4875}
4876
4877
227858d1
DN
4878void dump_asserts_for (FILE *, tree);
4879void debug_asserts_for (tree);
4880void dump_all_asserts (FILE *);
4881void debug_all_asserts (void);
4882
4883/* Dump all the registered assertions for NAME to FILE. */
4884
4885void
4886dump_asserts_for (FILE *file, tree name)
4887{
ff507401 4888 assert_locus *loc;
227858d1
DN
4889
4890 fprintf (file, "Assertions to be inserted for ");
4891 print_generic_expr (file, name, 0);
4892 fprintf (file, "\n");
4893
4894 loc = asserts_for[SSA_NAME_VERSION (name)];
4895 while (loc)
4896 {
4897 fprintf (file, "\t");
726a989a 4898 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
227858d1
DN
4899 fprintf (file, "\n\tBB #%d", loc->bb->index);
4900 if (loc->e)
4901 {
4902 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4903 loc->e->dest->index);
a315c44c 4904 dump_edge_info (file, loc->e, dump_flags, 0);
227858d1
DN
4905 }
4906 fprintf (file, "\n\tPREDICATE: ");
e04de667 4907 print_generic_expr (file, loc->expr, 0);
5806f481 4908 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
227858d1
DN
4909 print_generic_expr (file, loc->val, 0);
4910 fprintf (file, "\n\n");
4911 loc = loc->next;
4912 }
4913
4914 fprintf (file, "\n");
4915}
4916
4917
4918/* Dump all the registered assertions for NAME to stderr. */
4919
24e47c76 4920DEBUG_FUNCTION void
227858d1
DN
4921debug_asserts_for (tree name)
4922{
4923 dump_asserts_for (stderr, name);
4924}
4925
4926
4927/* Dump all the registered assertions for all the names to FILE. */
4928
4929void
4930dump_all_asserts (FILE *file)
4931{
4932 unsigned i;
4933 bitmap_iterator bi;
4934
4935 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4936 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4937 dump_asserts_for (file, ssa_name (i));
4938 fprintf (file, "\n");
4939}
4940
4941
4942/* Dump all the registered assertions for all the names to stderr. */
4943
24e47c76 4944DEBUG_FUNCTION void
227858d1
DN
4945debug_all_asserts (void)
4946{
4947 dump_all_asserts (stderr);
4948}
4949
4950
4951/* If NAME doesn't have an ASSERT_EXPR registered for asserting
2ab8dbf4 4952 'EXPR COMP_CODE VAL' at a location that dominates block BB or
227858d1 4953 E->DEST, then register this location as a possible insertion point
2ab8dbf4 4954 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
227858d1
DN
4955
4956 BB, E and SI provide the exact insertion point for the new
4957 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4958 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4959 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4960 must not be NULL. */
4961
4962static void
2ab8dbf4 4963register_new_assert_for (tree name, tree expr,
227858d1
DN
4964 enum tree_code comp_code,
4965 tree val,
4966 basic_block bb,
4967 edge e,
726a989a 4968 gimple_stmt_iterator si)
227858d1 4969{
ff507401 4970 assert_locus *n, *loc, *last_loc;
227858d1
DN
4971 basic_block dest_bb;
4972
77a74ed7 4973 gcc_checking_assert (bb == NULL || e == NULL);
227858d1
DN
4974
4975 if (e == NULL)
77a74ed7
NF
4976 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4977 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
227858d1 4978
a1b969a0
RG
4979 /* Never build an assert comparing against an integer constant with
4980 TREE_OVERFLOW set. This confuses our undefined overflow warning
4981 machinery. */
3f5c390d
RB
4982 if (TREE_OVERFLOW_P (val))
4983 val = drop_tree_overflow (val);
a1b969a0 4984
227858d1
DN
4985 /* The new assertion A will be inserted at BB or E. We need to
4986 determine if the new location is dominated by a previously
4987 registered location for A. If we are doing an edge insertion,
4988 assume that A will be inserted at E->DEST. Note that this is not
4989 necessarily true.
b8698a0f 4990
227858d1
DN
4991 If E is a critical edge, it will be split. But even if E is
4992 split, the new block will dominate the same set of blocks that
4993 E->DEST dominates.
b8698a0f 4994
227858d1
DN
4995 The reverse, however, is not true, blocks dominated by E->DEST
4996 will not be dominated by the new block created to split E. So,
4997 if the insertion location is on a critical edge, we will not use
4998 the new location to move another assertion previously registered
4999 at a block dominated by E->DEST. */
5000 dest_bb = (bb) ? bb : e->dest;
5001
5002 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
5003 VAL at a block dominating DEST_BB, then we don't need to insert a new
5004 one. Similarly, if the same assertion already exists at a block
5005 dominated by DEST_BB and the new location is not on a critical
5006 edge, then update the existing location for the assertion (i.e.,
5007 move the assertion up in the dominance tree).
5008
5009 Note, this is implemented as a simple linked list because there
5010 should not be more than a handful of assertions registered per
5011 name. If this becomes a performance problem, a table hashed by
5012 COMP_CODE and VAL could be implemented. */
5013 loc = asserts_for[SSA_NAME_VERSION (name)];
5014 last_loc = loc;
227858d1
DN
5015 while (loc)
5016 {
5017 if (loc->comp_code == comp_code
5018 && (loc->val == val
2ab8dbf4
RG
5019 || operand_equal_p (loc->val, val, 0))
5020 && (loc->expr == expr
5021 || operand_equal_p (loc->expr, expr, 0)))
227858d1 5022 {
f7a39c55 5023 /* If E is not a critical edge and DEST_BB
227858d1
DN
5024 dominates the existing location for the assertion, move
5025 the assertion up in the dominance tree by updating its
5026 location information. */
5027 if ((e == NULL || !EDGE_CRITICAL_P (e))
5028 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
5029 {
5030 loc->bb = dest_bb;
5031 loc->e = e;
5032 loc->si = si;
5033 return;
5034 }
5035 }
5036
5037 /* Update the last node of the list and move to the next one. */
5038 last_loc = loc;
5039 loc = loc->next;
5040 }
5041
5042 /* If we didn't find an assertion already registered for
5043 NAME COMP_CODE VAL, add a new one at the end of the list of
5044 assertions associated with NAME. */
ff507401 5045 n = XNEW (struct assert_locus);
227858d1
DN
5046 n->bb = dest_bb;
5047 n->e = e;
5048 n->si = si;
5049 n->comp_code = comp_code;
5050 n->val = val;
2ab8dbf4 5051 n->expr = expr;
227858d1
DN
5052 n->next = NULL;
5053
5054 if (last_loc)
5055 last_loc->next = n;
5056 else
5057 asserts_for[SSA_NAME_VERSION (name)] = n;
5058
5059 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
5060}
5061
a26a02d7
RAE
5062/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
5063 Extract a suitable test code and value and store them into *CODE_P and
5064 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
5065
5066 If no extraction was possible, return FALSE, otherwise return TRUE.
5067
5068 If INVERT is true, then we invert the result stored into *CODE_P. */
764a79ed
RAE
5069
5070static bool
5071extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
5072 tree cond_op0, tree cond_op1,
5073 bool invert, enum tree_code *code_p,
5074 tree *val_p)
5075{
5076 enum tree_code comp_code;
5077 tree val;
5078
5079 /* Otherwise, we have a comparison of the form NAME COMP VAL
5080 or VAL COMP NAME. */
5081 if (name == cond_op1)
5082 {
5083 /* If the predicate is of the form VAL COMP NAME, flip
5084 COMP around because we need to register NAME as the
5085 first operand in the predicate. */
5086 comp_code = swap_tree_comparison (cond_code);
5087 val = cond_op0;
5088 }
e04de667 5089 else if (name == cond_op0)
764a79ed
RAE
5090 {
5091 /* The comparison is of the form NAME COMP VAL, so the
5092 comparison code remains unchanged. */
5093 comp_code = cond_code;
5094 val = cond_op1;
5095 }
e04de667
PP
5096 else
5097 gcc_unreachable ();
764a79ed
RAE
5098
5099 /* Invert the comparison code as necessary. */
5100 if (invert)
5101 comp_code = invert_tree_comparison (comp_code, 0);
5102
305708ce
YR
5103 /* VRP only handles integral and pointer types. */
5104 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
5105 && ! POINTER_TYPE_P (TREE_TYPE (val)))
764a79ed
RAE
5106 return false;
5107
5108 /* Do not register always-false predicates.
5109 FIXME: this works around a limitation in fold() when dealing with
5110 enumerations. Given 'enum { N1, N2 } x;', fold will not
5111 fold 'if (x > N2)' to 'if (0)'. */
5112 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5113 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5114 {
5115 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5116 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5117
5118 if (comp_code == GT_EXPR
5119 && (!max
5120 || compare_values (val, max) == 0))
5121 return false;
5122
5123 if (comp_code == LT_EXPR
5124 && (!min
5125 || compare_values (val, min) == 0))
5126 return false;
5127 }
5128 *code_p = comp_code;
5129 *val_p = val;
5130 return true;
5131}
279f3eb5 5132
ad193f32
JJ
5133/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5134 (otherwise return VAL). VAL and MASK must be zero-extended for
5135 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
5136 (to transform signed values into unsigned) and at the end xor
5137 SGNBIT back. */
5138
807e902e
KZ
5139static wide_int
5140masked_increment (const wide_int &val_in, const wide_int &mask,
5141 const wide_int &sgnbit, unsigned int prec)
ad193f32 5142{
807e902e 5143 wide_int bit = wi::one (prec), res;
ad193f32
JJ
5144 unsigned int i;
5145
807e902e 5146 wide_int val = val_in ^ sgnbit;
27bcd47c 5147 for (i = 0; i < prec; i++, bit += bit)
ad193f32
JJ
5148 {
5149 res = mask;
807e902e 5150 if ((res & bit) == 0)
ad193f32 5151 continue;
807e902e 5152 res = bit - 1;
27bcd47c
LC
5153 res = (val + bit).and_not (res);
5154 res &= mask;
807e902e 5155 if (wi::gtu_p (res, val))
27bcd47c 5156 return res ^ sgnbit;
ad193f32 5157 }
27bcd47c 5158 return val ^ sgnbit;
ad193f32
JJ
5159}
5160
2ab8dbf4
RG
5161/* Try to register an edge assertion for SSA name NAME on edge E for
5162 the condition COND contributing to the conditional jump pointed to by BSI.
d476245d 5163 Invert the condition COND if INVERT is true. */
2ab8dbf4 5164
d476245d 5165static void
726a989a 5166register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
a26a02d7
RAE
5167 enum tree_code cond_code,
5168 tree cond_op0, tree cond_op1, bool invert)
2ab8dbf4
RG
5169{
5170 tree val;
5171 enum tree_code comp_code;
2ab8dbf4 5172
a26a02d7
RAE
5173 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5174 cond_op0,
5175 cond_op1,
5176 invert, &comp_code, &val))
d476245d 5177 return;
2ab8dbf4
RG
5178
5179 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5180 reachable from E. */
6e07f9aa 5181 if (live_on_edge (e, name))
d476245d 5182 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
2ab8dbf4
RG
5183
5184 /* In the case of NAME <= CST and NAME being defined as
5185 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5186 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5187 This catches range and anti-range tests. */
5188 if ((comp_code == LE_EXPR
5189 || comp_code == GT_EXPR)
5190 && TREE_CODE (val) == INTEGER_CST
5191 && TYPE_UNSIGNED (TREE_TYPE (val)))
5192 {
355fe088 5193 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
70b7b037 5194 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2ab8dbf4
RG
5195
5196 /* Extract CST2 from the (optional) addition. */
726a989a
RB
5197 if (is_gimple_assign (def_stmt)
5198 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2ab8dbf4 5199 {
726a989a
RB
5200 name2 = gimple_assign_rhs1 (def_stmt);
5201 cst2 = gimple_assign_rhs2 (def_stmt);
2ab8dbf4
RG
5202 if (TREE_CODE (name2) == SSA_NAME
5203 && TREE_CODE (cst2) == INTEGER_CST)
5204 def_stmt = SSA_NAME_DEF_STMT (name2);
5205 }
5206
70b7b037 5207 /* Extract NAME2 from the (optional) sign-changing cast. */
726a989a 5208 if (gimple_assign_cast_p (def_stmt))
70b7b037 5209 {
1a87cf0c 5210 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
726a989a
RB
5211 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5212 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5213 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5214 name3 = gimple_assign_rhs1 (def_stmt);
70b7b037 5215 }
2ab8dbf4 5216
70b7b037
RG
5217 /* If name3 is used later, create an ASSERT_EXPR for it. */
5218 if (name3 != NULL_TREE
5219 && TREE_CODE (name3) == SSA_NAME
2ab8dbf4
RG
5220 && (cst2 == NULL_TREE
5221 || TREE_CODE (cst2) == INTEGER_CST)
70b7b037 5222 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
6e07f9aa 5223 && live_on_edge (e, name3))
70b7b037
RG
5224 {
5225 tree tmp;
5226
5227 /* Build an expression for the range test. */
5228 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5229 if (cst2 != NULL_TREE)
5230 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5231
5232 if (dump_file)
5233 {
5234 fprintf (dump_file, "Adding assert for ");
5235 print_generic_expr (dump_file, name3, 0);
5236 fprintf (dump_file, " from ");
5237 print_generic_expr (dump_file, tmp, 0);
5238 fprintf (dump_file, "\n");
5239 }
5240
5241 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
70b7b037
RG
5242 }
5243
5244 /* If name2 is used later, create an ASSERT_EXPR for it. */
5245 if (name2 != NULL_TREE
5246 && TREE_CODE (name2) == SSA_NAME
5247 && TREE_CODE (cst2) == INTEGER_CST
5248 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
6e07f9aa 5249 && live_on_edge (e, name2))
2ab8dbf4
RG
5250 {
5251 tree tmp;
5252
5253 /* Build an expression for the range test. */
5254 tmp = name2;
5255 if (TREE_TYPE (name) != TREE_TYPE (name2))
5256 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5257 if (cst2 != NULL_TREE)
5258 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5259
5260 if (dump_file)
5261 {
5262 fprintf (dump_file, "Adding assert for ");
5263 print_generic_expr (dump_file, name2, 0);
5264 fprintf (dump_file, " from ");
5265 print_generic_expr (dump_file, tmp, 0);
5266 fprintf (dump_file, "\n");
5267 }
5268
5269 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
2ab8dbf4
RG
5270 }
5271 }
5272
83ede847
RB
5273 /* In the case of post-in/decrement tests like if (i++) ... and uses
5274 of the in/decremented value on the edge the extra name we want to
5275 assert for is not on the def chain of the name compared. Instead
4fe65172
RB
5276 it is in the set of use stmts.
5277 Similar cases happen for conversions that were simplified through
5278 fold_{sign_changed,widened}_comparison. */
83ede847
RB
5279 if ((comp_code == NE_EXPR
5280 || comp_code == EQ_EXPR)
5281 && TREE_CODE (val) == INTEGER_CST)
5282 {
5283 imm_use_iterator ui;
355fe088 5284 gimple *use_stmt;
83ede847
RB
5285 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5286 {
83ede847
RB
5287 if (!is_gimple_assign (use_stmt))
5288 continue;
5289
4fe65172
RB
5290 /* Cut off to use-stmts that are dominating the predecessor. */
5291 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
83ede847
RB
5292 continue;
5293
4fe65172
RB
5294 tree name2 = gimple_assign_lhs (use_stmt);
5295 if (TREE_CODE (name2) != SSA_NAME
5296 || !live_on_edge (e, name2))
83ede847
RB
5297 continue;
5298
4fe65172
RB
5299 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5300 tree cst;
5301 if (code == PLUS_EXPR
5302 || code == MINUS_EXPR)
83ede847 5303 {
4fe65172
RB
5304 cst = gimple_assign_rhs2 (use_stmt);
5305 if (TREE_CODE (cst) != INTEGER_CST)
5306 continue;
83ede847 5307 cst = int_const_binop (code, val, cst);
83ede847 5308 }
4fe65172 5309 else if (CONVERT_EXPR_CODE_P (code))
fe9acb3a 5310 {
ef3b59ac 5311 /* For truncating conversions we cannot record
fe9acb3a
RB
5312 an inequality. */
5313 if (comp_code == NE_EXPR
5314 && (TYPE_PRECISION (TREE_TYPE (name2))
ef3b59ac 5315 < TYPE_PRECISION (TREE_TYPE (name))))
fe9acb3a
RB
5316 continue;
5317 cst = fold_convert (TREE_TYPE (name2), val);
5318 }
4fe65172
RB
5319 else
5320 continue;
5321
5322 if (TREE_OVERFLOW_P (cst))
5323 cst = drop_tree_overflow (cst);
5324 register_new_assert_for (name2, name2, comp_code, cst,
5325 NULL, e, bsi);
83ede847
RB
5326 }
5327 }
5328
3877a6a6
JJ
5329 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5330 && TREE_CODE (val) == INTEGER_CST)
5331 {
355fe088 5332 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
ad193f32 5333 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
3877a6a6 5334 tree val2 = NULL_TREE;
01c1f20d 5335 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
807e902e 5336 wide_int mask = wi::zero (prec);
440d3472
JJ
5337 unsigned int nprec = prec;
5338 enum tree_code rhs_code = ERROR_MARK;
5339
5340 if (is_gimple_assign (def_stmt))
5341 rhs_code = gimple_assign_rhs_code (def_stmt);
3877a6a6 5342
d014a712
PP
5343 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
5344 assert that A != CST1 -+ CST2. */
5345 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5346 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
5347 {
5348 tree op0 = gimple_assign_rhs1 (def_stmt);
5349 tree op1 = gimple_assign_rhs2 (def_stmt);
5350 if (TREE_CODE (op0) == SSA_NAME
5351 && TREE_CODE (op1) == INTEGER_CST
6e07f9aa 5352 && live_on_edge (e, op0))
d014a712
PP
5353 {
5354 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
5355 ? MINUS_EXPR : PLUS_EXPR);
5356 op1 = int_const_binop (reverse_op, val, op1);
5357 if (TREE_OVERFLOW (op1))
5358 op1 = drop_tree_overflow (op1);
5359 register_new_assert_for (op0, op0, comp_code, op1, NULL, e, bsi);
5360 }
5361 }
5362
7b5c5139
JJ
5363 /* Add asserts for NAME cmp CST and NAME being defined
5364 as NAME = (int) NAME2. */
5365 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5366 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5367 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5368 && gimple_assign_cast_p (def_stmt))
5369 {
5370 name2 = gimple_assign_rhs1 (def_stmt);
440d3472 5371 if (CONVERT_EXPR_CODE_P (rhs_code)
7b5c5139
JJ
5372 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5373 && TYPE_UNSIGNED (TREE_TYPE (name2))
5374 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5375 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5376 || !tree_int_cst_equal (val,
5377 TYPE_MIN_VALUE (TREE_TYPE (val))))
6e07f9aa 5378 && live_on_edge (e, name2))
7b5c5139
JJ
5379 {
5380 tree tmp, cst;
5381 enum tree_code new_comp_code = comp_code;
5382
5383 cst = fold_convert (TREE_TYPE (name2),
5384 TYPE_MIN_VALUE (TREE_TYPE (val)));
5385 /* Build an expression for the range test. */
5386 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5387 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5388 fold_convert (TREE_TYPE (name2), val));
5389 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5390 {
5391 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5392 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5393 build_int_cst (TREE_TYPE (name2), 1));
5394 }
5395
5396 if (dump_file)
5397 {
5398 fprintf (dump_file, "Adding assert for ");
5399 print_generic_expr (dump_file, name2, 0);
5400 fprintf (dump_file, " from ");
5401 print_generic_expr (dump_file, tmp, 0);
5402 fprintf (dump_file, "\n");
5403 }
5404
5405 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5406 e, bsi);
7b5c5139
JJ
5407 }
5408 }
5409
5410 /* Add asserts for NAME cmp CST and NAME being defined as
5411 NAME = NAME2 >> CST2.
5412
5413 Extract CST2 from the right shift. */
440d3472 5414 if (rhs_code == RSHIFT_EXPR)
3877a6a6
JJ
5415 {
5416 name2 = gimple_assign_rhs1 (def_stmt);
5417 cst2 = gimple_assign_rhs2 (def_stmt);
5418 if (TREE_CODE (name2) == SSA_NAME
cc269bb6 5419 && tree_fits_uhwi_p (cst2)
3877a6a6 5420 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
ae7e9ddd 5421 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
0ea62d93 5422 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
6e07f9aa 5423 && live_on_edge (e, name2))
3877a6a6 5424 {
807e902e 5425 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
3877a6a6
JJ
5426 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5427 }
5428 }
3877a6a6
JJ
5429 if (val2 != NULL_TREE
5430 && TREE_CODE (val2) == INTEGER_CST
5431 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5432 TREE_TYPE (val),
5433 val2, cst2), val))
5434 {
5435 enum tree_code new_comp_code = comp_code;
5436 tree tmp, new_val;
5437
5438 tmp = name2;
5439 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5440 {
5441 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5442 {
3877a6a6
JJ
5443 tree type = build_nonstandard_integer_type (prec, 1);
5444 tmp = build1 (NOP_EXPR, type, name2);
5445 val2 = fold_convert (type, val2);
5446 }
5447 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
807e902e 5448 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
3877a6a6
JJ
5449 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5450 }
5451 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4c445590 5452 {
807e902e
KZ
5453 wide_int minval
5454 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
4c445590 5455 new_val = val2;
807e902e 5456 if (minval == new_val)
4c445590
JJ
5457 new_val = NULL_TREE;
5458 }
3877a6a6
JJ
5459 else
5460 {
807e902e
KZ
5461 wide_int maxval
5462 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5463 mask |= val2;
27bcd47c 5464 if (mask == maxval)
01c1f20d
JJ
5465 new_val = NULL_TREE;
5466 else
807e902e 5467 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
3877a6a6
JJ
5468 }
5469
01c1f20d 5470 if (new_val)
3877a6a6 5471 {
01c1f20d
JJ
5472 if (dump_file)
5473 {
5474 fprintf (dump_file, "Adding assert for ");
5475 print_generic_expr (dump_file, name2, 0);
5476 fprintf (dump_file, " from ");
5477 print_generic_expr (dump_file, tmp, 0);
5478 fprintf (dump_file, "\n");
5479 }
3877a6a6 5480
01c1f20d
JJ
5481 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5482 NULL, e, bsi);
01c1f20d 5483 }
3877a6a6 5484 }
ad193f32
JJ
5485
5486 /* Add asserts for NAME cmp CST and NAME being defined as
5487 NAME = NAME2 & CST2.
5488
440d3472
JJ
5489 Extract CST2 from the and.
5490
5491 Also handle
5492 NAME = (unsigned) NAME2;
5493 casts where NAME's type is unsigned and has smaller precision
5494 than NAME2's type as if it was NAME = NAME2 & MASK. */
ad193f32
JJ
5495 names[0] = NULL_TREE;
5496 names[1] = NULL_TREE;
5497 cst2 = NULL_TREE;
440d3472
JJ
5498 if (rhs_code == BIT_AND_EXPR
5499 || (CONVERT_EXPR_CODE_P (rhs_code)
4cb5f5a3 5500 && INTEGRAL_TYPE_P (TREE_TYPE (val))
440d3472
JJ
5501 && TYPE_UNSIGNED (TREE_TYPE (val))
5502 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
d476245d 5503 > prec))
ad193f32
JJ
5504 {
5505 name2 = gimple_assign_rhs1 (def_stmt);
440d3472
JJ
5506 if (rhs_code == BIT_AND_EXPR)
5507 cst2 = gimple_assign_rhs2 (def_stmt);
5508 else
5509 {
5510 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5511 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5512 }
ad193f32
JJ
5513 if (TREE_CODE (name2) == SSA_NAME
5514 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5515 && TREE_CODE (cst2) == INTEGER_CST
5516 && !integer_zerop (cst2)
440d3472 5517 && (nprec > 1
ad193f32
JJ
5518 || TYPE_UNSIGNED (TREE_TYPE (val))))
5519 {
355fe088 5520 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
ad193f32
JJ
5521 if (gimple_assign_cast_p (def_stmt2))
5522 {
5523 names[1] = gimple_assign_rhs1 (def_stmt2);
5524 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5525 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5526 || (TYPE_PRECISION (TREE_TYPE (name2))
5527 != TYPE_PRECISION (TREE_TYPE (names[1])))
6e07f9aa 5528 || !live_on_edge (e, names[1]))
ad193f32
JJ
5529 names[1] = NULL_TREE;
5530 }
6e07f9aa 5531 if (live_on_edge (e, name2))
ad193f32
JJ
5532 names[0] = name2;
5533 }
5534 }
5535 if (names[0] || names[1])
5536 {
807e902e
KZ
5537 wide_int minv, maxv, valv, cst2v;
5538 wide_int tem, sgnbit;
5539 bool valid_p = false, valn, cst2n;
ad193f32
JJ
5540 enum tree_code ccode = comp_code;
5541
807e902e
KZ
5542 valv = wide_int::from (val, nprec, UNSIGNED);
5543 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5544 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5545 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
ad193f32
JJ
5546 /* If CST2 doesn't have most significant bit set,
5547 but VAL is negative, we have comparison like
5548 if ((x & 0x123) > -4) (always true). Just give up. */
5549 if (!cst2n && valn)
5550 ccode = ERROR_MARK;
5551 if (cst2n)
807e902e 5552 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
ad193f32 5553 else
807e902e 5554 sgnbit = wi::zero (nprec);
27bcd47c 5555 minv = valv & cst2v;
ad193f32
JJ
5556 switch (ccode)
5557 {
5558 case EQ_EXPR:
5559 /* Minimum unsigned value for equality is VAL & CST2
5560 (should be equal to VAL, otherwise we probably should
5561 have folded the comparison into false) and
5562 maximum unsigned value is VAL | ~CST2. */
27bcd47c 5563 maxv = valv | ~cst2v;
ad193f32
JJ
5564 valid_p = true;
5565 break;
807e902e 5566
ad193f32 5567 case NE_EXPR:
27bcd47c 5568 tem = valv | ~cst2v;
ad193f32 5569 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
807e902e 5570 if (valv == 0)
ad193f32
JJ
5571 {
5572 cst2n = false;
807e902e 5573 sgnbit = wi::zero (nprec);
ad193f32
JJ
5574 goto gt_expr;
5575 }
5576 /* If (VAL | ~CST2) is all ones, handle it as
5577 (X & CST2) < VAL. */
807e902e 5578 if (tem == -1)
ad193f32
JJ
5579 {
5580 cst2n = false;
5581 valn = false;
807e902e 5582 sgnbit = wi::zero (nprec);
ad193f32
JJ
5583 goto lt_expr;
5584 }
807e902e
KZ
5585 if (!cst2n && wi::neg_p (cst2v))
5586 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5587 if (sgnbit != 0)
ad193f32 5588 {
27bcd47c 5589 if (valv == sgnbit)
ad193f32
JJ
5590 {
5591 cst2n = true;
5592 valn = true;
5593 goto gt_expr;
5594 }
807e902e 5595 if (tem == wi::mask (nprec - 1, false, nprec))
ad193f32
JJ
5596 {
5597 cst2n = true;
5598 goto lt_expr;
5599 }
5600 if (!cst2n)
807e902e 5601 sgnbit = wi::zero (nprec);
ad193f32
JJ
5602 }
5603 break;
807e902e 5604
ad193f32
JJ
5605 case GE_EXPR:
5606 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5607 is VAL and maximum unsigned value is ~0. For signed
5608 comparison, if CST2 doesn't have most significant bit
5609 set, handle it similarly. If CST2 has MSB set,
5610 the minimum is the same, and maximum is ~0U/2. */
27bcd47c 5611 if (minv != valv)
ad193f32
JJ
5612 {
5613 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5614 VAL. */
440d3472 5615 minv = masked_increment (valv, cst2v, sgnbit, nprec);
27bcd47c 5616 if (minv == valv)
ad193f32
JJ
5617 break;
5618 }
807e902e 5619 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
ad193f32
JJ
5620 valid_p = true;
5621 break;
807e902e 5622
ad193f32
JJ
5623 case GT_EXPR:
5624 gt_expr:
5625 /* Find out smallest MINV where MINV > VAL
5626 && (MINV & CST2) == MINV, if any. If VAL is signed and
440d3472
JJ
5627 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5628 minv = masked_increment (valv, cst2v, sgnbit, nprec);
27bcd47c 5629 if (minv == valv)
ad193f32 5630 break;
807e902e 5631 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
ad193f32
JJ
5632 valid_p = true;
5633 break;
807e902e 5634
ad193f32
JJ
5635 case LE_EXPR:
5636 /* Minimum unsigned value for <= is 0 and maximum
5637 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5638 Otherwise, find smallest VAL2 where VAL2 > VAL
5639 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5640 as maximum.
5641 For signed comparison, if CST2 doesn't have most
5642 significant bit set, handle it similarly. If CST2 has
5643 MSB set, the maximum is the same and minimum is INT_MIN. */
27bcd47c 5644 if (minv == valv)
ad193f32
JJ
5645 maxv = valv;
5646 else
5647 {
440d3472 5648 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
27bcd47c 5649 if (maxv == valv)
ad193f32 5650 break;
807e902e 5651 maxv -= 1;
ad193f32 5652 }
27bcd47c 5653 maxv |= ~cst2v;
ad193f32
JJ
5654 minv = sgnbit;
5655 valid_p = true;
5656 break;
807e902e 5657
ad193f32
JJ
5658 case LT_EXPR:
5659 lt_expr:
5660 /* Minimum unsigned value for < is 0 and maximum
5661 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5662 Otherwise, find smallest VAL2 where VAL2 > VAL
5663 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5664 as maximum.
5665 For signed comparison, if CST2 doesn't have most
5666 significant bit set, handle it similarly. If CST2 has
5667 MSB set, the maximum is the same and minimum is INT_MIN. */
27bcd47c 5668 if (minv == valv)
ad193f32 5669 {
27bcd47c 5670 if (valv == sgnbit)
ad193f32
JJ
5671 break;
5672 maxv = valv;
5673 }
5674 else
5675 {
440d3472 5676 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
27bcd47c 5677 if (maxv == valv)
ad193f32
JJ
5678 break;
5679 }
807e902e 5680 maxv -= 1;
27bcd47c 5681 maxv |= ~cst2v;
ad193f32
JJ
5682 minv = sgnbit;
5683 valid_p = true;
5684 break;
807e902e 5685
ad193f32
JJ
5686 default:
5687 break;
5688 }
5689 if (valid_p
807e902e 5690 && (maxv - minv) != -1)
ad193f32
JJ
5691 {
5692 tree tmp, new_val, type;
5693 int i;
5694
5695 for (i = 0; i < 2; i++)
5696 if (names[i])
5697 {
807e902e 5698 wide_int maxv2 = maxv;
ad193f32
JJ
5699 tmp = names[i];
5700 type = TREE_TYPE (names[i]);
5701 if (!TYPE_UNSIGNED (type))
5702 {
440d3472 5703 type = build_nonstandard_integer_type (nprec, 1);
ad193f32
JJ
5704 tmp = build1 (NOP_EXPR, type, names[i]);
5705 }
807e902e 5706 if (minv != 0)
ad193f32
JJ
5707 {
5708 tmp = build2 (PLUS_EXPR, type, tmp,
807e902e 5709 wide_int_to_tree (type, -minv));
27bcd47c 5710 maxv2 = maxv - minv;
ad193f32 5711 }
807e902e 5712 new_val = wide_int_to_tree (type, maxv2);
ad193f32
JJ
5713
5714 if (dump_file)
5715 {
5716 fprintf (dump_file, "Adding assert for ");
5717 print_generic_expr (dump_file, names[i], 0);
5718 fprintf (dump_file, " from ");
5719 print_generic_expr (dump_file, tmp, 0);
5720 fprintf (dump_file, "\n");
5721 }
5722
5723 register_new_assert_for (names[i], tmp, LE_EXPR,
5724 new_val, NULL, e, bsi);
ad193f32
JJ
5725 }
5726 }
5727 }
3877a6a6 5728 }
2ab8dbf4
RG
5729}
5730
279f3eb5
JL
5731/* OP is an operand of a truth value expression which is known to have
5732 a particular value. Register any asserts for OP and for any
b8698a0f 5733 operands in OP's defining statement.
279f3eb5
JL
5734
5735 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5736 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5737
d476245d 5738static void
279f3eb5 5739register_edge_assert_for_1 (tree op, enum tree_code code,
726a989a 5740 edge e, gimple_stmt_iterator bsi)
279f3eb5 5741{
355fe088 5742 gimple *op_def;
726a989a 5743 tree val;
a26a02d7 5744 enum tree_code rhs_code;
227858d1 5745
279f3eb5
JL
5746 /* We only care about SSA_NAMEs. */
5747 if (TREE_CODE (op) != SSA_NAME)
d476245d 5748 return;
227858d1 5749
279f3eb5 5750 /* We know that OP will have a zero or nonzero value. If OP is used
b24ca895 5751 more than once go ahead and register an assert for OP. */
6e07f9aa 5752 if (live_on_edge (e, op))
279f3eb5
JL
5753 {
5754 val = build_int_cst (TREE_TYPE (op), 0);
2ab8dbf4 5755 register_new_assert_for (op, op, code, val, NULL, e, bsi);
279f3eb5
JL
5756 }
5757
5758 /* Now look at how OP is set. If it's set from a comparison,
5759 a truth operation or some bit operations, then we may be able
5760 to register information about the operands of that assignment. */
5761 op_def = SSA_NAME_DEF_STMT (op);
726a989a 5762 if (gimple_code (op_def) != GIMPLE_ASSIGN)
d476245d 5763 return;
279f3eb5 5764
726a989a 5765 rhs_code = gimple_assign_rhs_code (op_def);
279f3eb5 5766
726a989a 5767 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
227858d1 5768 {
34fc5065 5769 bool invert = (code == EQ_EXPR ? true : false);
726a989a
RB
5770 tree op0 = gimple_assign_rhs1 (op_def);
5771 tree op1 = gimple_assign_rhs2 (op_def);
227858d1 5772
2ab8dbf4 5773 if (TREE_CODE (op0) == SSA_NAME)
d476245d 5774 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
2ab8dbf4 5775 if (TREE_CODE (op1) == SSA_NAME)
d476245d 5776 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
279f3eb5
JL
5777 }
5778 else if ((code == NE_EXPR
aebf4828 5779 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
279f3eb5 5780 || (code == EQ_EXPR
aebf4828 5781 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
279f3eb5
JL
5782 {
5783 /* Recurse on each operand. */
6b1184ba
RB
5784 tree op0 = gimple_assign_rhs1 (op_def);
5785 tree op1 = gimple_assign_rhs2 (op_def);
5786 if (TREE_CODE (op0) == SSA_NAME
5787 && has_single_use (op0))
d476245d 5788 register_edge_assert_for_1 (op0, code, e, bsi);
6b1184ba
RB
5789 if (TREE_CODE (op1) == SSA_NAME
5790 && has_single_use (op1))
d476245d 5791 register_edge_assert_for_1 (op1, code, e, bsi);
279f3eb5 5792 }
98958241
KT
5793 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5794 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
279f3eb5 5795 {
34fc5065
RG
5796 /* Recurse, flipping CODE. */
5797 code = invert_tree_comparison (code, false);
d476245d 5798 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
279f3eb5 5799 }
726a989a 5800 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
279f3eb5 5801 {
34fc5065 5802 /* Recurse through the copy. */
d476245d 5803 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
279f3eb5 5804 }
1a87cf0c 5805 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
b8698a0f 5806 {
b168a8df
JJ
5807 /* Recurse through the type conversion, unless it is a narrowing
5808 conversion or conversion from non-integral type. */
5809 tree rhs = gimple_assign_rhs1 (op_def);
5810 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5811 && (TYPE_PRECISION (TREE_TYPE (rhs))
5812 <= TYPE_PRECISION (TREE_TYPE (op))))
d476245d 5813 register_edge_assert_for_1 (rhs, code, e, bsi);
279f3eb5 5814 }
279f3eb5 5815}
da11c5d2 5816
279f3eb5 5817/* Try to register an edge assertion for SSA name NAME on edge E for
d476245d
PP
5818 the condition COND contributing to the conditional jump pointed to by
5819 SI. */
da11c5d2 5820
d476245d 5821static void
726a989a 5822register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
a26a02d7
RAE
5823 enum tree_code cond_code, tree cond_op0,
5824 tree cond_op1)
279f3eb5
JL
5825{
5826 tree val;
5827 enum tree_code comp_code;
279f3eb5
JL
5828 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5829
5830 /* Do not attempt to infer anything in names that flow through
5831 abnormal edges. */
5832 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
d476245d 5833 return;
279f3eb5 5834
a26a02d7
RAE
5835 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5836 cond_op0, cond_op1,
5837 is_else_edge,
5838 &comp_code, &val))
d476245d 5839 return;
279f3eb5 5840
2ab8dbf4 5841 /* Register ASSERT_EXPRs for name. */
d476245d
PP
5842 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5843 cond_op1, is_else_edge);
2ab8dbf4 5844
279f3eb5
JL
5845
5846 /* If COND is effectively an equality test of an SSA_NAME against
5847 the value zero or one, then we may be able to assert values
5848 for SSA_NAMEs which flow into COND. */
5849
aebf4828
KT
5850 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5851 statement of NAME we can assert both operands of the BIT_AND_EXPR
2f8e468b 5852 have nonzero value. */
279f3eb5
JL
5853 if (((comp_code == EQ_EXPR && integer_onep (val))
5854 || (comp_code == NE_EXPR && integer_zerop (val))))
5855 {
355fe088 5856 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
279f3eb5 5857
726a989a 5858 if (is_gimple_assign (def_stmt)
aebf4828 5859 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
279f3eb5 5860 {
726a989a
RB
5861 tree op0 = gimple_assign_rhs1 (def_stmt);
5862 tree op1 = gimple_assign_rhs2 (def_stmt);
d476245d
PP
5863 register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5864 register_edge_assert_for_1 (op1, NE_EXPR, e, si);
227858d1
DN
5865 }
5866 }
279f3eb5 5867
aebf4828
KT
5868 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5869 statement of NAME we can assert both operands of the BIT_IOR_EXPR
279f3eb5
JL
5870 have zero value. */
5871 if (((comp_code == EQ_EXPR && integer_zerop (val))
5872 || (comp_code == NE_EXPR && integer_onep (val))))
227858d1 5873 {
355fe088 5874 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
279f3eb5 5875
aebf4828
KT
5876 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5877 necessarily zero value, or if type-precision is one. */
726a989a 5878 if (is_gimple_assign (def_stmt)
aebf4828
KT
5879 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5880 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5881 || comp_code == EQ_EXPR)))
279f3eb5 5882 {
726a989a
RB
5883 tree op0 = gimple_assign_rhs1 (def_stmt);
5884 tree op1 = gimple_assign_rhs2 (def_stmt);
d476245d
PP
5885 register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5886 register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
279f3eb5 5887 }
227858d1 5888 }
227858d1
DN
5889}
5890
5891
227858d1 5892/* Determine whether the outgoing edges of BB should receive an
279f3eb5 5893 ASSERT_EXPR for each of the operands of BB's LAST statement.
9bb6aa43 5894 The last statement of BB must be a COND_EXPR.
227858d1
DN
5895
5896 If any of the sub-graphs rooted at BB have an interesting use of
5897 the predicate operands, an assert location node is added to the
5898 list of assertions for the corresponding operands. */
5899
d476245d 5900static void
538dd0b7 5901find_conditional_asserts (basic_block bb, gcond *last)
227858d1 5902{
726a989a 5903 gimple_stmt_iterator bsi;
279f3eb5 5904 tree op;
227858d1
DN
5905 edge_iterator ei;
5906 edge e;
5907 ssa_op_iter iter;
5908
726a989a 5909 bsi = gsi_for_stmt (last);
227858d1
DN
5910
5911 /* Look for uses of the operands in each of the sub-graphs
5912 rooted at BB. We need to check each of the outgoing edges
5913 separately, so that we know what kind of ASSERT_EXPR to
5914 insert. */
5915 FOR_EACH_EDGE (e, ei, bb->succs)
5916 {
5917 if (e->dest == bb)
5918 continue;
5919
227858d1
DN
5920 /* Register the necessary assertions for each operand in the
5921 conditional predicate. */
5922 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
d476245d
PP
5923 register_edge_assert_for (op, e, bsi,
5924 gimple_cond_code (last),
5925 gimple_cond_lhs (last),
5926 gimple_cond_rhs (last));
227858d1 5927 }
227858d1
DN
5928}
5929
1aa9438f
JJ
5930struct case_info
5931{
5932 tree expr;
5933 basic_block bb;
5934};
5935
5936/* Compare two case labels sorting first by the destination bb index
9bb6aa43
RG
5937 and then by the case value. */
5938
5939static int
5940compare_case_labels (const void *p1, const void *p2)
5941{
1aa9438f
JJ
5942 const struct case_info *ci1 = (const struct case_info *) p1;
5943 const struct case_info *ci2 = (const struct case_info *) p2;
5944 int idx1 = ci1->bb->index;
5945 int idx2 = ci2->bb->index;
9bb6aa43 5946
1aa9438f 5947 if (idx1 < idx2)
9bb6aa43 5948 return -1;
1aa9438f 5949 else if (idx1 == idx2)
9bb6aa43
RG
5950 {
5951 /* Make sure the default label is first in a group. */
1aa9438f 5952 if (!CASE_LOW (ci1->expr))
9bb6aa43 5953 return -1;
1aa9438f 5954 else if (!CASE_LOW (ci2->expr))
9bb6aa43
RG
5955 return 1;
5956 else
1aa9438f
JJ
5957 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5958 CASE_LOW (ci2->expr));
9bb6aa43
RG
5959 }
5960 else
5961 return 1;
5962}
5963
5964/* Determine whether the outgoing edges of BB should receive an
5965 ASSERT_EXPR for each of the operands of BB's LAST statement.
5966 The last statement of BB must be a SWITCH_EXPR.
5967
5968 If any of the sub-graphs rooted at BB have an interesting use of
5969 the predicate operands, an assert location node is added to the
5970 list of assertions for the corresponding operands. */
5971
d476245d 5972static void
538dd0b7 5973find_switch_asserts (basic_block bb, gswitch *last)
9bb6aa43 5974{
726a989a 5975 gimple_stmt_iterator bsi;
a26a02d7 5976 tree op;
9bb6aa43 5977 edge e;
1aa9438f
JJ
5978 struct case_info *ci;
5979 size_t n = gimple_switch_num_labels (last);
109e637b 5980#if GCC_VERSION >= 4000
9bb6aa43 5981 unsigned int idx;
109e637b
JM
5982#else
5983 /* Work around GCC 3.4 bug (PR 37086). */
5984 volatile unsigned int idx;
5985#endif
9bb6aa43 5986
726a989a
RB
5987 bsi = gsi_for_stmt (last);
5988 op = gimple_switch_index (last);
9bb6aa43 5989 if (TREE_CODE (op) != SSA_NAME)
d476245d 5990 return;
9bb6aa43
RG
5991
5992 /* Build a vector of case labels sorted by destination label. */
1aa9438f 5993 ci = XNEWVEC (struct case_info, n);
9bb6aa43 5994 for (idx = 0; idx < n; ++idx)
1aa9438f
JJ
5995 {
5996 ci[idx].expr = gimple_switch_label (last, idx);
5997 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5998 }
524cf1e4 5999 edge default_edge = find_edge (bb, ci[0].bb);
1aa9438f 6000 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
9bb6aa43
RG
6001
6002 for (idx = 0; idx < n; ++idx)
6003 {
6004 tree min, max;
1aa9438f
JJ
6005 tree cl = ci[idx].expr;
6006 basic_block cbb = ci[idx].bb;
9bb6aa43
RG
6007
6008 min = CASE_LOW (cl);
6009 max = CASE_HIGH (cl);
6010
6011 /* If there are multiple case labels with the same destination
6012 we need to combine them to a single value range for the edge. */
1aa9438f 6013 if (idx + 1 < n && cbb == ci[idx + 1].bb)
9bb6aa43
RG
6014 {
6015 /* Skip labels until the last of the group. */
6016 do {
6017 ++idx;
1aa9438f 6018 } while (idx < n && cbb == ci[idx].bb);
9bb6aa43
RG
6019 --idx;
6020
6021 /* Pick up the maximum of the case label range. */
1aa9438f
JJ
6022 if (CASE_HIGH (ci[idx].expr))
6023 max = CASE_HIGH (ci[idx].expr);
9bb6aa43 6024 else
1aa9438f 6025 max = CASE_LOW (ci[idx].expr);
9bb6aa43
RG
6026 }
6027
524cf1e4
PP
6028 /* Can't extract a useful assertion out of a range that includes the
6029 default label. */
9bb6aa43
RG
6030 if (min == NULL_TREE)
6031 continue;
6032
6033 /* Find the edge to register the assert expr on. */
1aa9438f 6034 e = find_edge (bb, cbb);
9bb6aa43 6035
9bb6aa43
RG
6036 /* Register the necessary assertions for the operand in the
6037 SWITCH_EXPR. */
d476245d
PP
6038 register_edge_assert_for (op, e, bsi,
6039 max ? GE_EXPR : EQ_EXPR,
6040 op, fold_convert (TREE_TYPE (op), min));
9bb6aa43 6041 if (max)
d476245d
PP
6042 register_edge_assert_for (op, e, bsi, LE_EXPR, op,
6043 fold_convert (TREE_TYPE (op), max));
9bb6aa43
RG
6044 }
6045
1aa9438f 6046 XDELETEVEC (ci);
524cf1e4
PP
6047
6048 if (!live_on_edge (default_edge, op))
6049 return;
6050
6051 /* Now register along the default label assertions that correspond to the
6052 anti-range of each label. */
6053 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
6054 for (idx = 1; idx < n; idx++)
6055 {
6056 tree min, max;
6057 tree cl = gimple_switch_label (last, idx);
6058
6059 min = CASE_LOW (cl);
6060 max = CASE_HIGH (cl);
6061
6062 /* Combine contiguous case ranges to reduce the number of assertions
6063 to insert. */
6064 for (idx = idx + 1; idx < n; idx++)
6065 {
6066 tree next_min, next_max;
6067 tree next_cl = gimple_switch_label (last, idx);
6068
6069 next_min = CASE_LOW (next_cl);
6070 next_max = CASE_HIGH (next_cl);
6071
6072 wide_int difference = wi::sub (next_min, max ? max : min);
6073 if (wi::eq_p (difference, 1))
6074 max = next_max ? next_max : next_min;
6075 else
6076 break;
6077 }
6078 idx--;
6079
6080 if (max == NULL_TREE)
6081 {
6082 /* Register the assertion OP != MIN. */
6083 min = fold_convert (TREE_TYPE (op), min);
6084 register_edge_assert_for (op, default_edge, bsi, NE_EXPR, op, min);
6085 }
6086 else
6087 {
6088 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
6089 which will give OP the anti-range ~[MIN,MAX]. */
6090 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
6091 min = fold_convert (TREE_TYPE (uop), min);
6092 max = fold_convert (TREE_TYPE (uop), max);
6093
6094 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
6095 tree rhs = int_const_binop (MINUS_EXPR, max, min);
6096 register_new_assert_for (op, lhs, GT_EXPR, rhs,
6097 NULL, default_edge, bsi);
6098 }
6099
6100 if (--insertion_limit == 0)
6101 break;
6102 }
9bb6aa43
RG
6103}
6104
227858d1
DN
6105
6106/* Traverse all the statements in block BB looking for statements that
6107 may generate useful assertions for the SSA names in their operand.
6108 If a statement produces a useful assertion A for name N_i, then the
6109 list of assertions already generated for N_i is scanned to
6110 determine if A is actually needed.
b8698a0f 6111
227858d1
DN
6112 If N_i already had the assertion A at a location dominating the
6113 current location, then nothing needs to be done. Otherwise, the
6114 new location for A is recorded instead.
6115
6116 1- For every statement S in BB, all the variables used by S are
6117 added to bitmap FOUND_IN_SUBGRAPH.
6118
6119 2- If statement S uses an operand N in a way that exposes a known
6120 value range for N, then if N was not already generated by an
6121 ASSERT_EXPR, create a new assert location for N. For instance,
6122 if N is a pointer and the statement dereferences it, we can
6123 assume that N is not NULL.
6124
6125 3- COND_EXPRs are a special case of #2. We can derive range
6126 information from the predicate but need to insert different
6127 ASSERT_EXPRs for each of the sub-graphs rooted at the
6128 conditional block. If the last statement of BB is a conditional
6129 expression of the form 'X op Y', then
6130
6131 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6132
6133 b) If the conditional is the only entry point to the sub-graph
6134 corresponding to the THEN_CLAUSE, recurse into it. On
6135 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6136 an ASSERT_EXPR is added for the corresponding variable.
6137
6138 c) Repeat step (b) on the ELSE_CLAUSE.
6139
6140 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6141
6142 For instance,
6143
6144 if (a == 9)
6145 b = a;
6146 else
6147 b = c + 1;
6148
6149 In this case, an assertion on the THEN clause is useful to
6150 determine that 'a' is always 9 on that edge. However, an assertion
6151 on the ELSE clause would be unnecessary.
6152
6153 4- If BB does not end in a conditional expression, then we recurse
6154 into BB's dominator children.
b8698a0f 6155
227858d1
DN
6156 At the end of the recursive traversal, every SSA name will have a
6157 list of locations where ASSERT_EXPRs should be added. When a new
6158 location for name N is found, it is registered by calling
6159 register_new_assert_for. That function keeps track of all the
6160 registered assertions to prevent adding unnecessary assertions.
6161 For instance, if a pointer P_4 is dereferenced more than once in a
6162 dominator tree, only the location dominating all the dereference of
d476245d 6163 P_4 will receive an ASSERT_EXPR. */
227858d1 6164
d476245d 6165static void
c4ab2baa 6166find_assert_locations_1 (basic_block bb, sbitmap live)
227858d1 6167{
355fe088 6168 gimple *last;
227858d1 6169
c4ab2baa 6170 last = last_stmt (bb);
227858d1 6171
c4ab2baa
RG
6172 /* If BB's last statement is a conditional statement involving integer
6173 operands, determine if we need to add ASSERT_EXPRs. */
6174 if (last
6175 && gimple_code (last) == GIMPLE_COND
6176 && !fp_predicate (last)
6177 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
538dd0b7 6178 find_conditional_asserts (bb, as_a <gcond *> (last));
227858d1 6179
c4ab2baa
RG
6180 /* If BB's last statement is a switch statement involving integer
6181 operands, determine if we need to add ASSERT_EXPRs. */
6182 if (last
6183 && gimple_code (last) == GIMPLE_SWITCH
6184 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
538dd0b7 6185 find_switch_asserts (bb, as_a <gswitch *> (last));
227858d1
DN
6186
6187 /* Traverse all the statements in BB marking used names and looking
6188 for statements that may infer assertions for their used operands. */
538dd0b7
DM
6189 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6190 gsi_prev (&si))
227858d1 6191 {
355fe088 6192 gimple *stmt;
726a989a 6193 tree op;
227858d1
DN
6194 ssa_op_iter i;
6195
726a989a 6196 stmt = gsi_stmt (si);
227858d1 6197
b5b8b0ac
AO
6198 if (is_gimple_debug (stmt))
6199 continue;
6200
227858d1
DN
6201 /* See if we can derive an assertion for any of STMT's operands. */
6202 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6203 {
6204 tree value;
6205 enum tree_code comp_code;
6206
f7a39c55
RG
6207 /* If op is not live beyond this stmt, do not bother to insert
6208 asserts for it. */
d7c028c0 6209 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
f7a39c55 6210 continue;
227858d1 6211
227858d1
DN
6212 /* If OP is used in such a way that we can infer a value
6213 range for it, and we don't find a previous assertion for
6214 it, create a new assertion location node for OP. */
6215 if (infer_value_range (stmt, op, &comp_code, &value))
6216 {
917f1b7e 6217 /* If we are able to infer a nonzero value range for OP,
60c9ad46
JL
6218 then walk backwards through the use-def chain to see if OP
6219 was set via a typecast.
6220
6221 If so, then we can also infer a nonzero value range
6222 for the operand of the NOP_EXPR. */
6223 if (comp_code == NE_EXPR && integer_zerop (value))
6224 {
6225 tree t = op;
355fe088 6226 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
b8698a0f 6227
726a989a 6228 while (is_gimple_assign (def_stmt)
625a9766
RB
6229 && CONVERT_EXPR_CODE_P
6230 (gimple_assign_rhs_code (def_stmt))
07beea0d 6231 && TREE_CODE
726a989a 6232 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
07beea0d 6233 && POINTER_TYPE_P
726a989a 6234 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
60c9ad46 6235 {
726a989a 6236 t = gimple_assign_rhs1 (def_stmt);
60c9ad46
JL
6237 def_stmt = SSA_NAME_DEF_STMT (t);
6238
6239 /* Note we want to register the assert for the
6240 operand of the NOP_EXPR after SI, not after the
6241 conversion. */
6e07f9aa 6242 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
d476245d
PP
6243 register_new_assert_for (t, t, comp_code, value,
6244 bb, NULL, si);
60c9ad46
JL
6245 }
6246 }
6247
f7a39c55 6248 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
0bca51f0
DN
6249 }
6250 }
f7a39c55
RG
6251
6252 /* Update live. */
6253 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
d7c028c0 6254 bitmap_set_bit (live, SSA_NAME_VERSION (op));
f7a39c55 6255 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
d7c028c0 6256 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
0bca51f0
DN
6257 }
6258
f7a39c55 6259 /* Traverse all PHI nodes in BB, updating live. */
538dd0b7
DM
6260 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6261 gsi_next (&si))
c4ab2baa
RG
6262 {
6263 use_operand_p arg_p;
6264 ssa_op_iter i;
538dd0b7 6265 gphi *phi = si.phi ();
f7a39c55
RG
6266 tree res = gimple_phi_result (phi);
6267
6268 if (virtual_operand_p (res))
6269 continue;
9bb6aa43 6270
c4ab2baa
RG
6271 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6272 {
6273 tree arg = USE_FROM_PTR (arg_p);
6274 if (TREE_CODE (arg) == SSA_NAME)
d7c028c0 6275 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
c4ab2baa 6276 }
f7a39c55 6277
d7c028c0 6278 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
c4ab2baa 6279 }
227858d1
DN
6280}
6281
c4ab2baa 6282/* Do an RPO walk over the function computing SSA name liveness
d476245d 6283 on-the-fly and deciding on assert expressions to insert. */
c4ab2baa 6284
d476245d 6285static void
c4ab2baa
RG
6286find_assert_locations (void)
6287{
8b1c6fd7
DM
6288 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6289 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6290 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
c4ab2baa 6291 int rpo_cnt, i;
c4ab2baa 6292
8b1c6fd7 6293 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
c4ab2baa
RG
6294 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6295 for (i = 0; i < rpo_cnt; ++i)
6296 bb_rpo[rpo[i]] = i;
6297
d23c0a32
JJ
6298 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6299 the order we compute liveness and insert asserts we otherwise
6300 fail to insert asserts into the loop latch. */
6301 loop_p loop;
f0bd40b1 6302 FOR_EACH_LOOP (loop, 0)
d23c0a32
JJ
6303 {
6304 i = loop->latch->index;
6305 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
538dd0b7 6306 for (gphi_iterator gsi = gsi_start_phis (loop->header);
d23c0a32
JJ
6307 !gsi_end_p (gsi); gsi_next (&gsi))
6308 {
538dd0b7 6309 gphi *phi = gsi.phi ();
d23c0a32
JJ
6310 if (virtual_operand_p (gimple_phi_result (phi)))
6311 continue;
6312 tree arg = gimple_phi_arg_def (phi, j);
6313 if (TREE_CODE (arg) == SSA_NAME)
6314 {
6315 if (live[i] == NULL)
6316 {
6317 live[i] = sbitmap_alloc (num_ssa_names);
6318 bitmap_clear (live[i]);
6319 }
6320 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6321 }
6322 }
6323 }
6324
c302207e 6325 for (i = rpo_cnt - 1; i >= 0; --i)
c4ab2baa 6326 {
06e28de2 6327 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
c4ab2baa
RG
6328 edge e;
6329 edge_iterator ei;
6330
6331 if (!live[rpo[i]])
6332 {
6333 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
f61e445a 6334 bitmap_clear (live[rpo[i]]);
c4ab2baa
RG
6335 }
6336
6337 /* Process BB and update the live information with uses in
6338 this block. */
d476245d 6339 find_assert_locations_1 (bb, live[rpo[i]]);
c4ab2baa
RG
6340
6341 /* Merge liveness into the predecessor blocks and free it. */
f61e445a 6342 if (!bitmap_empty_p (live[rpo[i]]))
c4ab2baa
RG
6343 {
6344 int pred_rpo = i;
6345 FOR_EACH_EDGE (e, ei, bb->preds)
6346 {
6347 int pred = e->src->index;
6f723d33 6348 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
c4ab2baa
RG
6349 continue;
6350
6351 if (!live[pred])
6352 {
6353 live[pred] = sbitmap_alloc (num_ssa_names);
f61e445a 6354 bitmap_clear (live[pred]);
c4ab2baa 6355 }
f61e445a 6356 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
c4ab2baa
RG
6357
6358 if (bb_rpo[pred] < pred_rpo)
6359 pred_rpo = bb_rpo[pred];
6360 }
6361
6362 /* Record the RPO number of the last visited block that needs
6363 live information from this block. */
6364 last_rpo[rpo[i]] = pred_rpo;
6365 }
6366 else
6367 {
6368 sbitmap_free (live[rpo[i]]);
6369 live[rpo[i]] = NULL;
6370 }
6371
6372 /* We can free all successors live bitmaps if all their
6373 predecessors have been visited already. */
6374 FOR_EACH_EDGE (e, ei, bb->succs)
6375 if (last_rpo[e->dest->index] == i
6376 && live[e->dest->index])
6377 {
6378 sbitmap_free (live[e->dest->index]);
6379 live[e->dest->index] = NULL;
6380 }
6381 }
6382
6383 XDELETEVEC (rpo);
6384 XDELETEVEC (bb_rpo);
6385 XDELETEVEC (last_rpo);
8b1c6fd7 6386 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
c4ab2baa
RG
6387 if (live[i])
6388 sbitmap_free (live[i]);
6389 XDELETEVEC (live);
c4ab2baa 6390}
227858d1
DN
6391
6392/* Create an ASSERT_EXPR for NAME and insert it in the location
6393 indicated by LOC. Return true if we made any edge insertions. */
6394
6395static bool
ff507401 6396process_assert_insertions_for (tree name, assert_locus *loc)
227858d1
DN
6397{
6398 /* Build the comparison expression NAME_i COMP_CODE VAL. */
355fe088 6399 gimple *stmt;
726a989a 6400 tree cond;
355fe088 6401 gimple *assert_stmt;
227858d1
DN
6402 edge_iterator ei;
6403 edge e;
6404
ff0a0c1d
RG
6405 /* If we have X <=> X do not insert an assert expr for that. */
6406 if (loc->expr == loc->val)
6407 return false;
6408
2ab8dbf4 6409 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
726a989a 6410 assert_stmt = build_assert_expr_for (cond, name);
227858d1 6411 if (loc->e)
0bca51f0 6412 {
227858d1
DN
6413 /* We have been asked to insert the assertion on an edge. This
6414 is used only by COND_EXPR and SWITCH_EXPR assertions. */
77a74ed7
NF
6415 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6416 || (gimple_code (gsi_stmt (loc->si))
6417 == GIMPLE_SWITCH));
0bca51f0 6418
726a989a 6419 gsi_insert_on_edge (loc->e, assert_stmt);
227858d1
DN
6420 return true;
6421 }
9fabf0d4 6422
227858d1
DN
6423 /* Otherwise, we can insert right after LOC->SI iff the
6424 statement must not be the last statement in the block. */
726a989a 6425 stmt = gsi_stmt (loc->si);
227858d1
DN
6426 if (!stmt_ends_bb_p (stmt))
6427 {
726a989a 6428 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
227858d1
DN
6429 return false;
6430 }
9fabf0d4 6431
227858d1
DN
6432 /* If STMT must be the last statement in BB, we can only insert new
6433 assertions on the non-abnormal edge out of BB. Note that since
1fd9f058 6434 STMT is not control flow, there may only be one non-abnormal/eh edge
227858d1
DN
6435 out of BB. */
6436 FOR_EACH_EDGE (e, ei, loc->bb->succs)
1fd9f058 6437 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
227858d1 6438 {
726a989a 6439 gsi_insert_on_edge (e, assert_stmt);
227858d1
DN
6440 return true;
6441 }
0bca51f0 6442
227858d1
DN
6443 gcc_unreachable ();
6444}
0bca51f0 6445
0bca51f0 6446
227858d1
DN
6447/* Process all the insertions registered for every name N_i registered
6448 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6449 found in ASSERTS_FOR[i]. */
0bca51f0 6450
227858d1
DN
6451static void
6452process_assert_insertions (void)
6453{
6454 unsigned i;
6455 bitmap_iterator bi;
6456 bool update_edges_p = false;
6457 int num_asserts = 0;
0bca51f0 6458
227858d1
DN
6459 if (dump_file && (dump_flags & TDF_DETAILS))
6460 dump_all_asserts (dump_file);
60b4ccde 6461
227858d1
DN
6462 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6463 {
ff507401 6464 assert_locus *loc = asserts_for[i];
227858d1
DN
6465 gcc_assert (loc);
6466
6467 while (loc)
60b4ccde 6468 {
ff507401 6469 assert_locus *next = loc->next;
227858d1
DN
6470 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6471 free (loc);
6472 loc = next;
6473 num_asserts++;
60b4ccde 6474 }
0bca51f0 6475 }
0bca51f0 6476
227858d1 6477 if (update_edges_p)
726a989a 6478 gsi_commit_edge_inserts ();
0bca51f0 6479
01902653
RG
6480 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6481 num_asserts);
0bca51f0
DN
6482}
6483
6484
6485/* Traverse the flowgraph looking for conditional jumps to insert range
6486 expressions. These range expressions are meant to provide information
6487 to optimizations that need to reason in terms of value ranges. They
6488 will not be expanded into RTL. For instance, given:
6489
6490 x = ...
6491 y = ...
6492 if (x < y)
6493 y = x - 2;
6494 else
6495 x = y + 3;
6496
6497 this pass will transform the code into:
6498
6499 x = ...
6500 y = ...
6501 if (x < y)
6502 {
6503 x = ASSERT_EXPR <x, x < y>
6504 y = x - 2
6505 }
6506 else
6507 {
36f291f7 6508 y = ASSERT_EXPR <y, x >= y>
0bca51f0
DN
6509 x = y + 3
6510 }
6511
6512 The idea is that once copy and constant propagation have run, other
6513 optimizations will be able to determine what ranges of values can 'x'
6514 take in different paths of the code, simply by checking the reaching
6515 definition of 'x'. */
6516
6517static void
6518insert_range_assertions (void)
6519{
227858d1 6520 need_assert_for = BITMAP_ALLOC (NULL);
ff507401 6521 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
0bca51f0
DN
6522
6523 calculate_dominance_info (CDI_DOMINATORS);
6524
d476245d
PP
6525 find_assert_locations ();
6526 if (!bitmap_empty_p (need_assert_for))
0bca51f0 6527 {
227858d1 6528 process_assert_insertions ();
0bca51f0
DN
6529 update_ssa (TODO_update_ssa_no_phi);
6530 }
6531
6532 if (dump_file && (dump_flags & TDF_DETAILS))
6533 {
6534 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6535 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6536 }
6537
227858d1
DN
6538 free (asserts_for);
6539 BITMAP_FREE (need_assert_for);
0bca51f0
DN
6540}
6541
590b1f2d
DM
6542/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6543 and "struct" hacks. If VRP can determine that the
9f5ed61a 6544 array subscript is a constant, check if it is outside valid
590b1f2d
DM
6545 range. If the array subscript is a RANGE, warn if it is
6546 non-overlapping with valid range.
6547 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6548
6549static void
c2255bc4 6550check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
590b1f2d 6551{
526ceb68 6552 value_range *vr = NULL;
590b1f2d 6553 tree low_sub, up_sub;
12bd5a1e 6554 tree low_bound, up_bound, up_bound_p1;
12bd5a1e
RG
6555
6556 if (TREE_NO_WARNING (ref))
6557 return;
590b1f2d
DM
6558
6559 low_sub = up_sub = TREE_OPERAND (ref, 1);
12bd5a1e 6560 up_bound = array_ref_up_bound (ref);
590b1f2d 6561
db8800bc 6562 /* Can not check flexible arrays. */
12bd5a1e 6563 if (!up_bound
db8800bc 6564 || TREE_CODE (up_bound) != INTEGER_CST)
590b1f2d
DM
6565 return;
6566
12bd5a1e
RG
6567 /* Accesses to trailing arrays via pointers may access storage
6568 beyond the types array bounds. */
798e2a8e
RB
6569 if (warn_array_bounds < 2
6570 && array_at_struct_end_p (ref))
6571 return;
12bd5a1e 6572
590b1f2d 6573 low_bound = array_ref_low_bound (ref);
807e902e
KZ
6574 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6575 build_int_cst (TREE_TYPE (up_bound), 1));
590b1f2d 6576
f8269ad4
RB
6577 /* Empty array. */
6578 if (tree_int_cst_equal (low_bound, up_bound_p1))
6579 {
6580 warning_at (location, OPT_Warray_bounds,
6581 "array subscript is above array bounds");
6582 TREE_NO_WARNING (ref) = 1;
6583 }
6584
590b1f2d
DM
6585 if (TREE_CODE (low_sub) == SSA_NAME)
6586 {
6587 vr = get_value_range (low_sub);
6588 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6589 {
6590 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6591 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6592 }
6593 }
6594
6595 if (vr && vr->type == VR_ANTI_RANGE)
6596 {
6597 if (TREE_CODE (up_sub) == INTEGER_CST
f8269ad4
RB
6598 && (ignore_off_by_one
6599 ? tree_int_cst_lt (up_bound, up_sub)
6600 : tree_int_cst_le (up_bound, up_sub))
590b1f2d 6601 && TREE_CODE (low_sub) == INTEGER_CST
f8269ad4 6602 && tree_int_cst_le (low_sub, low_bound))
590b1f2d 6603 {
92ef7fb1
MLI
6604 warning_at (location, OPT_Warray_bounds,
6605 "array subscript is outside array bounds");
590b1f2d
DM
6606 TREE_NO_WARNING (ref) = 1;
6607 }
6608 }
6609 else if (TREE_CODE (up_sub) == INTEGER_CST
12bd5a1e 6610 && (ignore_off_by_one
f8269ad4
RB
6611 ? !tree_int_cst_le (up_sub, up_bound_p1)
6612 : !tree_int_cst_le (up_sub, up_bound)))
590b1f2d 6613 {
83ede847
RB
6614 if (dump_file && (dump_flags & TDF_DETAILS))
6615 {
6616 fprintf (dump_file, "Array bound warning for ");
6617 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
b4a4b56d 6618 fprintf (dump_file, "\n");
83ede847 6619 }
92ef7fb1
MLI
6620 warning_at (location, OPT_Warray_bounds,
6621 "array subscript is above array bounds");
590b1f2d
DM
6622 TREE_NO_WARNING (ref) = 1;
6623 }
6624 else if (TREE_CODE (low_sub) == INTEGER_CST
6625 && tree_int_cst_lt (low_sub, low_bound))
6626 {
83ede847
RB
6627 if (dump_file && (dump_flags & TDF_DETAILS))
6628 {
6629 fprintf (dump_file, "Array bound warning for ");
6630 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
b4a4b56d 6631 fprintf (dump_file, "\n");
83ede847 6632 }
92ef7fb1
MLI
6633 warning_at (location, OPT_Warray_bounds,
6634 "array subscript is below array bounds");
590b1f2d
DM
6635 TREE_NO_WARNING (ref) = 1;
6636 }
6637}
6638
05fb69e4
DM
6639/* Searches if the expr T, located at LOCATION computes
6640 address of an ARRAY_REF, and call check_array_ref on it. */
6641
6642static void
92ef7fb1 6643search_for_addr_array (tree t, location_t location)
05fb69e4 6644{
05fb69e4 6645 /* Check each ARRAY_REFs in the reference chain. */
b8698a0f 6646 do
05fb69e4
DM
6647 {
6648 if (TREE_CODE (t) == ARRAY_REF)
c2255bc4 6649 check_array_ref (location, t, true /*ignore_off_by_one*/);
05fb69e4 6650
9968d233 6651 t = TREE_OPERAND (t, 0);
05fb69e4
DM
6652 }
6653 while (handled_component_p (t));
70f34814
RG
6654
6655 if (TREE_CODE (t) == MEM_REF
6656 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6657 && !TREE_NO_WARNING (t))
6658 {
6659 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6660 tree low_bound, up_bound, el_sz;
807e902e 6661 offset_int idx;
70f34814
RG
6662 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6663 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6664 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6665 return;
6666
6667 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6668 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6669 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6670 if (!low_bound
6671 || TREE_CODE (low_bound) != INTEGER_CST
6672 || !up_bound
6673 || TREE_CODE (up_bound) != INTEGER_CST
6674 || !el_sz
6675 || TREE_CODE (el_sz) != INTEGER_CST)
6676 return;
6677
6678 idx = mem_ref_offset (t);
807e902e 6679 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
032c80e9 6680 if (idx < 0)
70f34814 6681 {
83ede847
RB
6682 if (dump_file && (dump_flags & TDF_DETAILS))
6683 {
6684 fprintf (dump_file, "Array bound warning for ");
6685 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
b4a4b56d 6686 fprintf (dump_file, "\n");
83ede847 6687 }
70f34814
RG
6688 warning_at (location, OPT_Warray_bounds,
6689 "array subscript is below array bounds");
6690 TREE_NO_WARNING (t) = 1;
6691 }
032c80e9
RS
6692 else if (idx > (wi::to_offset (up_bound)
6693 - wi::to_offset (low_bound) + 1))
70f34814 6694 {
83ede847
RB
6695 if (dump_file && (dump_flags & TDF_DETAILS))
6696 {
6697 fprintf (dump_file, "Array bound warning for ");
6698 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
b4a4b56d 6699 fprintf (dump_file, "\n");
83ede847 6700 }
70f34814
RG
6701 warning_at (location, OPT_Warray_bounds,
6702 "array subscript is above array bounds");
6703 TREE_NO_WARNING (t) = 1;
6704 }
6705 }
05fb69e4
DM
6706}
6707
590b1f2d
DM
6708/* walk_tree() callback that checks if *TP is
6709 an ARRAY_REF inside an ADDR_EXPR (in which an array
6710 subscript one outside the valid range is allowed). Call
b8698a0f 6711 check_array_ref for each ARRAY_REF found. The location is
590b1f2d
DM
6712 passed in DATA. */
6713
6714static tree
6715check_array_bounds (tree *tp, int *walk_subtree, void *data)
6716{
6717 tree t = *tp;
726a989a 6718 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
c2255bc4
AH
6719 location_t location;
6720
6721 if (EXPR_HAS_LOCATION (t))
6722 location = EXPR_LOCATION (t);
6723 else
6724 {
6725 location_t *locp = (location_t *) wi->info;
6726 location = *locp;
6727 }
88df9da1 6728
590b1f2d
DM
6729 *walk_subtree = TRUE;
6730
6731 if (TREE_CODE (t) == ARRAY_REF)
c2255bc4 6732 check_array_ref (location, t, false /*ignore_off_by_one*/);
1eb7b049 6733
f8269ad4
RB
6734 else if (TREE_CODE (t) == ADDR_EXPR)
6735 {
6736 search_for_addr_array (t, location);
6737 *walk_subtree = FALSE;
6738 }
05fb69e4 6739
590b1f2d
DM
6740 return NULL_TREE;
6741}
6742
6743/* Walk over all statements of all reachable BBs and call check_array_bounds
6744 on them. */
6745
6746static void
6747check_all_array_refs (void)
6748{
6749 basic_block bb;
726a989a 6750 gimple_stmt_iterator si;
590b1f2d 6751
11cd3bed 6752 FOR_EACH_BB_FN (bb, cfun)
590b1f2d 6753 {
1d86f5e9
RG
6754 edge_iterator ei;
6755 edge e;
6756 bool executable = false;
92ef7fb1 6757
1d86f5e9
RG
6758 /* Skip blocks that were found to be unreachable. */
6759 FOR_EACH_EDGE (e, ei, bb->preds)
6760 executable |= !!(e->flags & EDGE_EXECUTABLE);
6761 if (!executable)
6762 continue;
590b1f2d 6763
726a989a
RB
6764 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6765 {
355fe088 6766 gimple *stmt = gsi_stmt (si);
726a989a 6767 struct walk_stmt_info wi;
f8269ad4
RB
6768 if (!gimple_has_location (stmt)
6769 || is_gimple_debug (stmt))
726a989a
RB
6770 continue;
6771
f8269ad4 6772 memset (&wi, 0, sizeof (wi));
b2b91e85
TS
6773
6774 location_t loc = gimple_location (stmt);
6775 wi.info = &loc;
726a989a 6776
f8269ad4
RB
6777 walk_gimple_op (gsi_stmt (si),
6778 check_array_bounds,
6779 &wi);
726a989a 6780 }
590b1f2d
DM
6781 }
6782}
0bca51f0 6783
d8202b84
JJ
6784/* Return true if all imm uses of VAR are either in STMT, or
6785 feed (optionally through a chain of single imm uses) GIMPLE_COND
6786 in basic block COND_BB. */
6787
6788static bool
355fe088 6789all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
d8202b84
JJ
6790{
6791 use_operand_p use_p, use2_p;
6792 imm_use_iterator iter;
6793
6794 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6795 if (USE_STMT (use_p) != stmt)
6796 {
355fe088 6797 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
d8202b84
JJ
6798 if (is_gimple_debug (use_stmt))
6799 continue;
6800 while (is_gimple_assign (use_stmt)
7e8c8abc 6801 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
d8202b84 6802 && single_imm_use (gimple_assign_lhs (use_stmt),
7e8c8abc
JJ
6803 &use2_p, &use_stmt2))
6804 use_stmt = use_stmt2;
d8202b84
JJ
6805 if (gimple_code (use_stmt) != GIMPLE_COND
6806 || gimple_bb (use_stmt) != cond_bb)
6807 return false;
6808 }
6809 return true;
6810}
6811
1e99c6e0
JJ
6812/* Handle
6813 _4 = x_3 & 31;
6814 if (_4 != 0)
6815 goto <bb 6>;
6816 else
6817 goto <bb 7>;
6818 <bb 6>:
6819 __builtin_unreachable ();
6820 <bb 7>:
6821 x_5 = ASSERT_EXPR <x_3, ...>;
6822 If x_3 has no other immediate uses (checked by caller),
6823 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6824 from the non-zero bitmask. */
6825
6826static void
6827maybe_set_nonzero_bits (basic_block bb, tree var)
6828{
6829 edge e = single_pred_edge (bb);
6830 basic_block cond_bb = e->src;
355fe088 6831 gimple *stmt = last_stmt (cond_bb);
1e99c6e0
JJ
6832 tree cst;
6833
6834 if (stmt == NULL
6835 || gimple_code (stmt) != GIMPLE_COND
6836 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6837 ? EQ_EXPR : NE_EXPR)
6838 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6839 || !integer_zerop (gimple_cond_rhs (stmt)))
6840 return;
6841
6842 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6843 if (!is_gimple_assign (stmt)
6844 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6845 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6846 return;
6847 if (gimple_assign_rhs1 (stmt) != var)
6848 {
355fe088 6849 gimple *stmt2;
1e99c6e0
JJ
6850
6851 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6852 return;
6853 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6854 if (!gimple_assign_cast_p (stmt2)
6855 || gimple_assign_rhs1 (stmt2) != var
6856 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6857 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6858 != TYPE_PRECISION (TREE_TYPE (var))))
6859 return;
6860 }
6861 cst = gimple_assign_rhs2 (stmt);
807e902e 6862 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
1e99c6e0
JJ
6863}
6864
94908762
JL
6865/* Convert range assertion expressions into the implied copies and
6866 copy propagate away the copies. Doing the trivial copy propagation
6867 here avoids the need to run the full copy propagation pass after
b8698a0f
L
6868 VRP.
6869
227858d1
DN
6870 FIXME, this will eventually lead to copy propagation removing the
6871 names that had useful range information attached to them. For
6872 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6873 then N_i will have the range [3, +INF].
b8698a0f 6874
227858d1
DN
6875 However, by converting the assertion into the implied copy
6876 operation N_i = N_j, we will then copy-propagate N_j into the uses
6877 of N_i and lose the range information. We may want to hold on to
6878 ASSERT_EXPRs a little while longer as the ranges could be used in
6879 things like jump threading.
b8698a0f 6880
227858d1 6881 The problem with keeping ASSERT_EXPRs around is that passes after
b8698a0f 6882 VRP need to handle them appropriately.
94908762
JL
6883
6884 Another approach would be to make the range information a first
6885 class property of the SSA_NAME so that it can be queried from
6886 any pass. This is made somewhat more complex by the need for
6887 multiple ranges to be associated with one SSA_NAME. */
0bca51f0
DN
6888
6889static void
6890remove_range_assertions (void)
6891{
6892 basic_block bb;
726a989a 6893 gimple_stmt_iterator si;
d8202b84
JJ
6894 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6895 a basic block preceeded by GIMPLE_COND branching to it and
6896 __builtin_trap, -1 if not yet checked, 0 otherwise. */
6897 int is_unreachable;
0bca51f0 6898
94908762
JL
6899 /* Note that the BSI iterator bump happens at the bottom of the
6900 loop and no bump is necessary if we're removing the statement
6901 referenced by the current BSI. */
11cd3bed 6902 FOR_EACH_BB_FN (bb, cfun)
d8202b84 6903 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
0bca51f0 6904 {
355fe088 6905 gimple *stmt = gsi_stmt (si);
0bca51f0 6906
726a989a
RB
6907 if (is_gimple_assign (stmt)
6908 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
0bca51f0 6909 {
d8202b84 6910 tree lhs = gimple_assign_lhs (stmt);
726a989a
RB
6911 tree rhs = gimple_assign_rhs1 (stmt);
6912 tree var;
94908762 6913
701b8964 6914 var = ASSERT_EXPR_VAR (rhs);
d8202b84 6915
8a7c91cd
RB
6916 if (TREE_CODE (var) == SSA_NAME
6917 && !POINTER_TYPE_P (TREE_TYPE (lhs))
d8202b84
JJ
6918 && SSA_NAME_RANGE_INFO (lhs))
6919 {
6920 if (is_unreachable == -1)
6921 {
6922 is_unreachable = 0;
6923 if (single_pred_p (bb)
6924 && assert_unreachable_fallthru_edge_p
6925 (single_pred_edge (bb)))
6926 is_unreachable = 1;
6927 }
6928 /* Handle
6929 if (x_7 >= 10 && x_7 < 20)
6930 __builtin_unreachable ();
6931 x_8 = ASSERT_EXPR <x_7, ...>;
6932 if the only uses of x_7 are in the ASSERT_EXPR and
6933 in the condition. In that case, we can copy the
6934 range info from x_8 computed in this pass also
6935 for x_7. */
6936 if (is_unreachable
6937 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
6938 single_pred (bb)))
1e99c6e0 6939 {
f5c8b24c 6940 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
807e902e
KZ
6941 SSA_NAME_RANGE_INFO (lhs)->get_min (),
6942 SSA_NAME_RANGE_INFO (lhs)->get_max ());
1e99c6e0
JJ
6943 maybe_set_nonzero_bits (bb, var);
6944 }
d8202b84
JJ
6945 }
6946
6947 /* Propagate the RHS into every use of the LHS. */
44b00dbc 6948 replace_uses_by (lhs, var);
94908762
JL
6949
6950 /* And finally, remove the copy, it is not needed. */
726a989a 6951 gsi_remove (&si, true);
b8698a0f 6952 release_defs (stmt);
0bca51f0 6953 }
94908762 6954 else
d8202b84 6955 {
82bb9245
AM
6956 if (!is_gimple_debug (gsi_stmt (si)))
6957 is_unreachable = 0;
d8202b84 6958 gsi_next (&si);
d8202b84 6959 }
0bca51f0
DN
6960 }
6961}
6962
6963
6964/* Return true if STMT is interesting for VRP. */
6965
6966static bool
355fe088 6967stmt_interesting_for_vrp (gimple *stmt)
0bca51f0 6968{
ea057359
RG
6969 if (gimple_code (stmt) == GIMPLE_PHI)
6970 {
6971 tree res = gimple_phi_result (stmt);
6972 return (!virtual_operand_p (res)
6973 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6974 || POINTER_TYPE_P (TREE_TYPE (res))));
6975 }
726a989a 6976 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
0bca51f0 6977 {
726a989a 6978 tree lhs = gimple_get_lhs (stmt);
0bca51f0 6979
2bbec6d9
JL
6980 /* In general, assignments with virtual operands are not useful
6981 for deriving ranges, with the obvious exception of calls to
6982 builtin functions. */
726a989a 6983 if (lhs && TREE_CODE (lhs) == SSA_NAME
0bca51f0
DN
6984 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6985 || POINTER_TYPE_P (TREE_TYPE (lhs)))
826cacfe 6986 && (is_gimple_call (stmt)
5006671f 6987 || !gimple_vuse (stmt)))
0bca51f0 6988 return true;
09877e13
JJ
6989 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
6990 switch (gimple_call_internal_fn (stmt))
6991 {
6992 case IFN_ADD_OVERFLOW:
6993 case IFN_SUB_OVERFLOW:
6994 case IFN_MUL_OVERFLOW:
6995 /* These internal calls return _Complex integer type,
6996 but are interesting to VRP nevertheless. */
6997 if (lhs && TREE_CODE (lhs) == SSA_NAME)
6998 return true;
6999 break;
7000 default:
7001 break;
7002 }
0bca51f0 7003 }
726a989a
RB
7004 else if (gimple_code (stmt) == GIMPLE_COND
7005 || gimple_code (stmt) == GIMPLE_SWITCH)
0bca51f0
DN
7006 return true;
7007
7008 return false;
7009}
7010
973625a0 7011/* Initialize VRP lattice. */
0bca51f0 7012
227858d1 7013static void
973625a0 7014vrp_initialize_lattice ()
0bca51f0 7015{
d9256277
RG
7016 values_propagated = false;
7017 num_vr_values = num_ssa_names;
526ceb68 7018 vr_value = XCNEWVEC (value_range *, num_vr_values);
fc6827fe 7019 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
b29fcf3b 7020 bitmap_obstack_initialize (&vrp_equiv_obstack);
973625a0
KV
7021}
7022
7023/* Initialization required by ssa_propagate engine. */
7024
7025static void
7026vrp_initialize ()
7027{
7028 basic_block bb;
0bca51f0 7029
11cd3bed 7030 FOR_EACH_BB_FN (bb, cfun)
0bca51f0 7031 {
538dd0b7
DM
7032 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
7033 gsi_next (&si))
0bca51f0 7034 {
538dd0b7 7035 gphi *phi = si.phi ();
0bca51f0
DN
7036 if (!stmt_interesting_for_vrp (phi))
7037 {
7038 tree lhs = PHI_RESULT (phi);
b565d777 7039 set_value_range_to_varying (get_value_range (lhs));
726a989a 7040 prop_set_simulate_again (phi, false);
0bca51f0
DN
7041 }
7042 else
726a989a 7043 prop_set_simulate_again (phi, true);
0bca51f0
DN
7044 }
7045
538dd0b7
DM
7046 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
7047 gsi_next (&si))
0bca51f0 7048 {
355fe088 7049 gimple *stmt = gsi_stmt (si);
0bca51f0 7050
cd6ea7a2
RH
7051 /* If the statement is a control insn, then we do not
7052 want to avoid simulating the statement once. Failure
7053 to do so means that those edges will never get added. */
7054 if (stmt_ends_bb_p (stmt))
7055 prop_set_simulate_again (stmt, true);
7056 else if (!stmt_interesting_for_vrp (stmt))
0bca51f0 7057 {
4d6484dd 7058 set_defs_to_varying (stmt);
726a989a 7059 prop_set_simulate_again (stmt, false);
0bca51f0
DN
7060 }
7061 else
cd6ea7a2 7062 prop_set_simulate_again (stmt, true);
0bca51f0
DN
7063 }
7064 }
0bca51f0
DN
7065}
7066
cfef45c8
RG
7067/* Return the singleton value-range for NAME or NAME. */
7068
7069static inline tree
7070vrp_valueize (tree name)
7071{
7072 if (TREE_CODE (name) == SSA_NAME)
7073 {
526ceb68 7074 value_range *vr = get_value_range (name);
cfef45c8 7075 if (vr->type == VR_RANGE
973625a0
KV
7076 && (TREE_CODE (vr->min) == SSA_NAME
7077 || is_gimple_min_invariant (vr->min))
661d6efd 7078 && vrp_operand_equal_p (vr->min, vr->max))
cfef45c8
RG
7079 return vr->min;
7080 }
7081 return name;
7082}
0bca51f0 7083
d2a85801
RB
7084/* Return the singleton value-range for NAME if that is a constant
7085 but signal to not follow SSA edges. */
7086
7087static inline tree
7088vrp_valueize_1 (tree name)
7089{
7090 if (TREE_CODE (name) == SSA_NAME)
7091 {
d2a85801
RB
7092 /* If the definition may be simulated again we cannot follow
7093 this SSA edge as the SSA propagator does not necessarily
7094 re-visit the use. */
355fe088 7095 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7dd1f7ac
RB
7096 if (!gimple_nop_p (def_stmt)
7097 && prop_simulate_again_p (def_stmt))
d2a85801 7098 return NULL_TREE;
526ceb68 7099 value_range *vr = get_value_range (name);
d94e3e75
RB
7100 if (range_int_cst_singleton_p (vr))
7101 return vr->min;
d2a85801
RB
7102 }
7103 return name;
7104}
7105
0bca51f0 7106/* Visit assignment STMT. If it produces an interesting range, record
bb9d2f4d 7107 the range in VR and set LHS to OUTPUT_P. */
0bca51f0 7108
bb9d2f4d
KV
7109static void
7110vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr)
0bca51f0 7111{
bb9d2f4d 7112 tree lhs;
726a989a
RB
7113 enum gimple_code code = gimple_code (stmt);
7114 lhs = gimple_get_lhs (stmt);
bb9d2f4d 7115 *output_p = NULL_TREE;
0bca51f0
DN
7116
7117 /* We only keep track of ranges in integral and pointer types. */
7118 if (TREE_CODE (lhs) == SSA_NAME
e260a614
JL
7119 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7120 /* It is valid to have NULL MIN/MAX values on a type. See
7121 build_range_type. */
7122 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7123 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
0bca51f0
DN
7124 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7125 {
1c3099cc
RB
7126 *output_p = lhs;
7127
cfef45c8 7128 /* Try folding the statement to a constant first. */
d2a85801
RB
7129 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7130 vrp_valueize_1);
1c3099cc
RB
7131 if (tem)
7132 {
7133 if (TREE_CODE (tem) == SSA_NAME
7134 && (SSA_NAME_IS_DEFAULT_DEF (tem)
7135 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
7136 {
7137 extract_range_from_ssa_name (vr, tem);
7138 return;
7139 }
7140 else if (is_gimple_min_invariant (tem))
7141 {
7142 set_value_range_to_value (vr, tem, NULL);
7143 return;
7144 }
7145 }
cfef45c8 7146 /* Then dispatch to value-range extracting functions. */
1c3099cc 7147 if (code == GIMPLE_CALL)
bb9d2f4d 7148 extract_range_basic (vr, stmt);
726a989a 7149 else
bb9d2f4d 7150 extract_range_from_assignment (vr, as_a <gassign *> (stmt));
0bca51f0 7151 }
0bca51f0
DN
7152}
7153
f5052e29 7154/* Helper that gets the value range of the SSA_NAME with version I
c80b4100 7155 or a symbolic range containing the SSA_NAME only if the value range
f5052e29
RG
7156 is varying or undefined. */
7157
526ceb68 7158static inline value_range
f5052e29
RG
7159get_vr_for_comparison (int i)
7160{
526ceb68 7161 value_range vr = *get_value_range (ssa_name (i));
f5052e29
RG
7162
7163 /* If name N_i does not have a valid range, use N_i as its own
7164 range. This allows us to compare against names that may
7165 have N_i in their ranges. */
7166 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7167 {
7168 vr.type = VR_RANGE;
7169 vr.min = ssa_name (i);
7170 vr.max = ssa_name (i);
7171 }
7172
7173 return vr;
7174}
0bca51f0 7175
227858d1
DN
7176/* Compare all the value ranges for names equivalent to VAR with VAL
7177 using comparison code COMP. Return the same value returned by
12df8a7e
ILT
7178 compare_range_with_value, including the setting of
7179 *STRICT_OVERFLOW_P. */
227858d1
DN
7180
7181static tree
12df8a7e 7182compare_name_with_value (enum tree_code comp, tree var, tree val,
a75f5e30 7183 bool *strict_overflow_p, bool use_equiv_p)
227858d1
DN
7184{
7185 bitmap_iterator bi;
7186 unsigned i;
7187 bitmap e;
7188 tree retval, t;
12df8a7e 7189 int used_strict_overflow;
f5052e29 7190 bool sop;
526ceb68 7191 value_range equiv_vr;
227858d1
DN
7192
7193 /* Get the set of equivalences for VAR. */
7194 e = get_value_range (var)->equiv;
7195
12df8a7e
ILT
7196 /* Start at -1. Set it to 0 if we do a comparison without relying
7197 on overflow, or 1 if all comparisons rely on overflow. */
7198 used_strict_overflow = -1;
7199
f5052e29
RG
7200 /* Compare vars' value range with val. */
7201 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7202 sop = false;
7203 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
e07e405d
ILT
7204 if (retval)
7205 used_strict_overflow = sop ? 1 : 0;
227858d1 7206
f5052e29
RG
7207 /* If the equiv set is empty we have done all work we need to do. */
7208 if (e == NULL)
7209 {
7210 if (retval
7211 && used_strict_overflow > 0)
7212 *strict_overflow_p = true;
7213 return retval;
7214 }
227858d1 7215
f5052e29
RG
7216 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7217 {
6d59425d
RB
7218 tree name = ssa_name (i);
7219 if (! name)
7220 continue;
7221
a75f5e30 7222 if (! use_equiv_p
6d59425d
RB
7223 && ! SSA_NAME_IS_DEFAULT_DEF (name)
7224 && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
a75f5e30
RB
7225 continue;
7226
f5052e29 7227 equiv_vr = get_vr_for_comparison (i);
12df8a7e
ILT
7228 sop = false;
7229 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
227858d1
DN
7230 if (t)
7231 {
96644aba
RG
7232 /* If we get different answers from different members
7233 of the equivalence set this check must be in a dead
7234 code region. Folding it to a trap representation
7235 would be correct here. For now just return don't-know. */
7236 if (retval != NULL
7237 && t != retval)
7238 {
7239 retval = NULL_TREE;
7240 break;
7241 }
227858d1 7242 retval = t;
12df8a7e
ILT
7243
7244 if (!sop)
7245 used_strict_overflow = 0;
7246 else if (used_strict_overflow < 0)
7247 used_strict_overflow = 1;
227858d1
DN
7248 }
7249 }
7250
f5052e29
RG
7251 if (retval
7252 && used_strict_overflow > 0)
7253 *strict_overflow_p = true;
227858d1 7254
f5052e29 7255 return retval;
227858d1
DN
7256}
7257
7258
7259/* Given a comparison code COMP and names N1 and N2, compare all the
8ab5f5c9 7260 ranges equivalent to N1 against all the ranges equivalent to N2
227858d1 7261 to determine the value of N1 COMP N2. Return the same value
12df8a7e
ILT
7262 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7263 whether we relied on an overflow infinity in the comparison. */
7264
0bca51f0
DN
7265
7266static tree
12df8a7e
ILT
7267compare_names (enum tree_code comp, tree n1, tree n2,
7268 bool *strict_overflow_p)
227858d1
DN
7269{
7270 tree t, retval;
7271 bitmap e1, e2;
7272 bitmap_iterator bi1, bi2;
7273 unsigned i1, i2;
12df8a7e 7274 int used_strict_overflow;
f5052e29
RG
7275 static bitmap_obstack *s_obstack = NULL;
7276 static bitmap s_e1 = NULL, s_e2 = NULL;
227858d1
DN
7277
7278 /* Compare the ranges of every name equivalent to N1 against the
7279 ranges of every name equivalent to N2. */
7280 e1 = get_value_range (n1)->equiv;
7281 e2 = get_value_range (n2)->equiv;
7282
f5052e29
RG
7283 /* Use the fake bitmaps if e1 or e2 are not available. */
7284 if (s_obstack == NULL)
7285 {
7286 s_obstack = XNEW (bitmap_obstack);
7287 bitmap_obstack_initialize (s_obstack);
7288 s_e1 = BITMAP_ALLOC (s_obstack);
7289 s_e2 = BITMAP_ALLOC (s_obstack);
7290 }
7291 if (e1 == NULL)
7292 e1 = s_e1;
7293 if (e2 == NULL)
7294 e2 = s_e2;
7295
227858d1
DN
7296 /* Add N1 and N2 to their own set of equivalences to avoid
7297 duplicating the body of the loop just to check N1 and N2
7298 ranges. */
7299 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7300 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7301
7302 /* If the equivalence sets have a common intersection, then the two
7303 names can be compared without checking their ranges. */
7304 if (bitmap_intersect_p (e1, e2))
7305 {
7306 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7307 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7308
7309 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7310 ? boolean_true_node
7311 : boolean_false_node;
7312 }
7313
12df8a7e
ILT
7314 /* Start at -1. Set it to 0 if we do a comparison without relying
7315 on overflow, or 1 if all comparisons rely on overflow. */
7316 used_strict_overflow = -1;
7317
227858d1
DN
7318 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7319 N2 to their own set of equivalences to avoid duplicating the body
7320 of the loop just to check N1 and N2 ranges. */
7321 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7322 {
6d59425d
RB
7323 if (! ssa_name (i1))
7324 continue;
7325
526ceb68 7326 value_range vr1 = get_vr_for_comparison (i1);
227858d1
DN
7327
7328 t = retval = NULL_TREE;
7329 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7330 {
6d59425d
RB
7331 if (! ssa_name (i2))
7332 continue;
7333
3b7bab4d 7334 bool sop = false;
12df8a7e 7335
526ceb68 7336 value_range vr2 = get_vr_for_comparison (i2);
227858d1 7337
12df8a7e 7338 t = compare_ranges (comp, &vr1, &vr2, &sop);
227858d1
DN
7339 if (t)
7340 {
96644aba
RG
7341 /* If we get different answers from different members
7342 of the equivalence set this check must be in a dead
7343 code region. Folding it to a trap representation
7344 would be correct here. For now just return don't-know. */
7345 if (retval != NULL
7346 && t != retval)
7347 {
7348 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7349 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7350 return NULL_TREE;
7351 }
227858d1 7352 retval = t;
12df8a7e
ILT
7353
7354 if (!sop)
7355 used_strict_overflow = 0;
7356 else if (used_strict_overflow < 0)
7357 used_strict_overflow = 1;
227858d1
DN
7358 }
7359 }
7360
7361 if (retval)
7362 {
7363 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7364 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
12df8a7e
ILT
7365 if (used_strict_overflow > 0)
7366 *strict_overflow_p = true;
227858d1
DN
7367 return retval;
7368 }
7369 }
7370
7371 /* None of the equivalent ranges are useful in computing this
7372 comparison. */
7373 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7374 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7375 return NULL_TREE;
7376}
7377
da7db2ce
NS
7378/* Helper function for vrp_evaluate_conditional_warnv & other
7379 optimizers. */
6b99f156
JH
7380
7381static tree
7382vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7383 tree op0, tree op1,
7384 bool * strict_overflow_p)
7385{
526ceb68 7386 value_range *vr0, *vr1;
6b99f156
JH
7387
7388 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7389 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7390
597f5997 7391 tree res = NULL_TREE;
6b99f156 7392 if (vr0 && vr1)
597f5997
RB
7393 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7394 if (!res && vr0)
7395 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7396 if (!res && vr1)
7397 res = (compare_range_with_value
6b99f156 7398 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
597f5997 7399 return res;
6b99f156
JH
7400}
7401
2d3cd5d5
RAE
7402/* Helper function for vrp_evaluate_conditional_warnv. */
7403
7404static tree
7405vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7406 tree op1, bool use_equiv_p,
6b99f156 7407 bool *strict_overflow_p, bool *only_ranges)
2d3cd5d5 7408{
6b99f156
JH
7409 tree ret;
7410 if (only_ranges)
7411 *only_ranges = true;
7412
2d3cd5d5
RAE
7413 /* We only deal with integral and pointer types. */
7414 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7415 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7416 return NULL_TREE;
7417
a75f5e30
RB
7418 if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7419 (code, op0, op1, strict_overflow_p)))
7420 return ret;
7421 if (only_ranges)
7422 *only_ranges = false;
7423 /* Do not use compare_names during propagation, it's quadratic. */
7424 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
7425 && use_equiv_p)
7426 return compare_names (code, op0, op1, strict_overflow_p);
7427 else if (TREE_CODE (op0) == SSA_NAME)
7428 return compare_name_with_value (code, op0, op1,
7429 strict_overflow_p, use_equiv_p);
7430 else if (TREE_CODE (op1) == SSA_NAME)
7431 return compare_name_with_value (swap_tree_comparison (code), op1, op0,
7432 strict_overflow_p, use_equiv_p);
2d3cd5d5
RAE
7433 return NULL_TREE;
7434}
227858d1 7435
e80d7580 7436/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
0c948c27
ILT
7437 information. Return NULL if the conditional can not be evaluated.
7438 The ranges of all the names equivalent with the operands in COND
7439 will be used when trying to compute the value. If the result is
7440 based on undefined signed overflow, issue a warning if
7441 appropriate. */
7442
ff7ffb8f 7443static tree
355fe088 7444vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
0c948c27
ILT
7445{
7446 bool sop;
7447 tree ret;
6b99f156 7448 bool only_ranges;
0c948c27 7449
09a782eb
RG
7450 /* Some passes and foldings leak constants with overflow flag set
7451 into the IL. Avoid doing wrong things with these and bail out. */
7452 if ((TREE_CODE (op0) == INTEGER_CST
7453 && TREE_OVERFLOW (op0))
7454 || (TREE_CODE (op1) == INTEGER_CST
7455 && TREE_OVERFLOW (op1)))
7456 return NULL_TREE;
7457
0c948c27 7458 sop = false;
6b99f156
JH
7459 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7460 &only_ranges);
0c948c27
ILT
7461
7462 if (ret && sop)
7463 {
7464 enum warn_strict_overflow_code wc;
7465 const char* warnmsg;
7466
7467 if (is_gimple_min_invariant (ret))
7468 {
7469 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7470 warnmsg = G_("assuming signed overflow does not occur when "
7471 "simplifying conditional to constant");
7472 }
7473 else
7474 {
7475 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7476 warnmsg = G_("assuming signed overflow does not occur when "
7477 "simplifying conditional");
7478 }
7479
7480 if (issue_strict_overflow_warning (wc))
7481 {
726a989a 7482 location_t location;
0c948c27 7483
726a989a
RB
7484 if (!gimple_has_location (stmt))
7485 location = input_location;
0c948c27 7486 else
726a989a 7487 location = gimple_location (stmt);
fab922b1 7488 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
0c948c27
ILT
7489 }
7490 }
7491
faebccf9 7492 if (warn_type_limits
6b99f156 7493 && ret && only_ranges
e80d7580
RAE
7494 && TREE_CODE_CLASS (code) == tcc_comparison
7495 && TREE_CODE (op0) == SSA_NAME)
faebccf9
DN
7496 {
7497 /* If the comparison is being folded and the operand on the LHS
7498 is being compared against a constant value that is outside of
7499 the natural range of OP0's type, then the predicate will
7500 always fold regardless of the value of OP0. If -Wtype-limits
7501 was specified, emit a warning. */
faebccf9 7502 tree type = TREE_TYPE (op0);
526ceb68 7503 value_range *vr0 = get_value_range (op0);
faebccf9 7504
737fde1d 7505 if (vr0->type == VR_RANGE
faebccf9
DN
7506 && INTEGRAL_TYPE_P (type)
7507 && vrp_val_is_min (vr0->min)
7508 && vrp_val_is_max (vr0->max)
7509 && is_gimple_min_invariant (op1))
faebccf9 7510 {
726a989a 7511 location_t location;
faebccf9 7512
726a989a
RB
7513 if (!gimple_has_location (stmt))
7514 location = input_location;
faebccf9 7515 else
726a989a 7516 location = gimple_location (stmt);
faebccf9 7517
b8698a0f 7518 warning_at (location, OPT_Wtype_limits,
fab922b1 7519 integer_zerop (ret)
2349c14b
MLI
7520 ? G_("comparison always false "
7521 "due to limited range of data type")
7522 : G_("comparison always true "
7523 "due to limited range of data type"));
faebccf9
DN
7524 }
7525 }
7526
0c948c27
ILT
7527 return ret;
7528}
7529
0bca51f0
DN
7530
7531/* Visit conditional statement STMT. If we can determine which edge
7532 will be taken out of STMT's basic block, record it in
bb9d2f4d 7533 *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
0bca51f0 7534
bb9d2f4d 7535static void
538dd0b7 7536vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
0bca51f0 7537{
726a989a 7538 tree val;
12df8a7e 7539 bool sop;
0bca51f0
DN
7540
7541 *taken_edge_p = NULL;
0bca51f0
DN
7542
7543 if (dump_file && (dump_flags & TDF_DETAILS))
7544 {
7545 tree use;
7546 ssa_op_iter i;
7547
7548 fprintf (dump_file, "\nVisiting conditional with predicate: ");
726a989a 7549 print_gimple_stmt (dump_file, stmt, 0, 0);
0bca51f0 7550 fprintf (dump_file, "\nWith known ranges\n");
b8698a0f 7551
0bca51f0
DN
7552 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7553 {
7554 fprintf (dump_file, "\t");
7555 print_generic_expr (dump_file, use, 0);
7556 fprintf (dump_file, ": ");
227858d1 7557 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
0bca51f0
DN
7558 }
7559
7560 fprintf (dump_file, "\n");
7561 }
7562
7563 /* Compute the value of the predicate COND by checking the known
227858d1 7564 ranges of each of its operands.
b8698a0f 7565
227858d1
DN
7566 Note that we cannot evaluate all the equivalent ranges here
7567 because those ranges may not yet be final and with the current
7568 propagation strategy, we cannot determine when the value ranges
7569 of the names in the equivalence set have changed.
7570
7571 For instance, given the following code fragment
7572
7573 i_5 = PHI <8, i_13>
7574 ...
7575 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7576 if (i_14 == 1)
7577 ...
7578
7579 Assume that on the first visit to i_14, i_5 has the temporary
7580 range [8, 8] because the second argument to the PHI function is
7581 not yet executable. We derive the range ~[0, 0] for i_14 and the
7582 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7583 the first time, since i_14 is equivalent to the range [8, 8], we
7584 determine that the predicate is always false.
7585
7586 On the next round of propagation, i_13 is determined to be
7587 VARYING, which causes i_5 to drop down to VARYING. So, another
7588 visit to i_14 is scheduled. In this second visit, we compute the
7589 exact same range and equivalence set for i_14, namely ~[0, 0] and
7590 { i_5 }. But we did not have the previous range for i_5
7591 registered, so vrp_visit_assignment thinks that the range for
7592 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7593 is not visited again, which stops propagation from visiting
7594 statements in the THEN clause of that if().
7595
7596 To properly fix this we would need to keep the previous range
7597 value for the names in the equivalence set. This way we would've
7598 discovered that from one visit to the other i_5 changed from
7599 range [8, 8] to VR_VARYING.
7600
7601 However, fixing this apparent limitation may not be worth the
7602 additional checking. Testing on several code bases (GCC, DLV,
7603 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7604 4 more predicates folded in SPEC. */
12df8a7e 7605 sop = false;
e80d7580 7606
726a989a
RB
7607 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7608 gimple_cond_lhs (stmt),
7609 gimple_cond_rhs (stmt),
6b99f156 7610 false, &sop, NULL);
0bca51f0 7611 if (val)
12df8a7e
ILT
7612 {
7613 if (!sop)
726a989a 7614 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
12df8a7e
ILT
7615 else
7616 {
7617 if (dump_file && (dump_flags & TDF_DETAILS))
7618 fprintf (dump_file,
7619 "\nIgnoring predicate evaluation because "
7620 "it assumes that signed overflow is undefined");
7621 val = NULL_TREE;
7622 }
7623 }
0bca51f0
DN
7624
7625 if (dump_file && (dump_flags & TDF_DETAILS))
7626 {
7627 fprintf (dump_file, "\nPredicate evaluates to: ");
7628 if (val == NULL_TREE)
7629 fprintf (dump_file, "DON'T KNOW\n");
7630 else
7631 print_generic_stmt (dump_file, val, 0);
7632 }
0bca51f0
DN
7633}
7634
b7d8d447
RAE
7635/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7636 that includes the value VAL. The search is restricted to the range
726a989a 7637 [START_IDX, n - 1] where n is the size of VEC.
0bca51f0 7638
b7d8d447
RAE
7639 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7640 returned.
7641
92ef7fb1 7642 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
b7d8d447
RAE
7643 it is placed in IDX and false is returned.
7644
726a989a 7645 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
b7d8d447 7646 returned. */
8aea0bf0
RG
7647
7648static bool
538dd0b7 7649find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
8aea0bf0 7650{
726a989a 7651 size_t n = gimple_switch_num_labels (stmt);
b7d8d447
RAE
7652 size_t low, high;
7653
7654 /* Find case label for minimum of the value range or the next one.
7655 At each iteration we are searching in [low, high - 1]. */
8aea0bf0 7656
726a989a 7657 for (low = start_idx, high = n; high != low; )
8aea0bf0
RG
7658 {
7659 tree t;
7660 int cmp;
726a989a 7661 /* Note that i != high, so we never ask for n. */
b7d8d447 7662 size_t i = (high + low) / 2;
726a989a 7663 t = gimple_switch_label (stmt, i);
8aea0bf0
RG
7664
7665 /* Cache the result of comparing CASE_LOW and val. */
7666 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7667
b7d8d447
RAE
7668 if (cmp == 0)
7669 {
7670 /* Ranges cannot be empty. */
7671 *idx = i;
7672 return true;
7673 }
7674 else if (cmp > 0)
8aea0bf0
RG
7675 high = i;
7676 else
b7d8d447
RAE
7677 {
7678 low = i + 1;
7679 if (CASE_HIGH (t) != NULL
7680 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
8aea0bf0
RG
7681 {
7682 *idx = i;
7683 return true;
7684 }
7685 }
7686 }
7687
b7d8d447 7688 *idx = high;
8aea0bf0
RG
7689 return false;
7690}
7691
b7d8d447
RAE
7692/* Searches the case label vector VEC for the range of CASE_LABELs that is used
7693 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7694 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7695 then MAX_IDX < MIN_IDX.
7696 Returns true if the default label is not needed. */
7697
7698static bool
538dd0b7 7699find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
726a989a 7700 size_t *max_idx)
b7d8d447
RAE
7701{
7702 size_t i, j;
726a989a
RB
7703 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7704 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
b7d8d447
RAE
7705
7706 if (i == j
7707 && min_take_default
7708 && max_take_default)
7709 {
b8698a0f 7710 /* Only the default case label reached.
b7d8d447
RAE
7711 Return an empty range. */
7712 *min_idx = 1;
7713 *max_idx = 0;
7714 return false;
7715 }
7716 else
7717 {
7718 bool take_default = min_take_default || max_take_default;
7719 tree low, high;
7720 size_t k;
7721
7722 if (max_take_default)
7723 j--;
7724
7725 /* If the case label range is continuous, we do not need
7726 the default case label. Verify that. */
726a989a
RB
7727 high = CASE_LOW (gimple_switch_label (stmt, i));
7728 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7729 high = CASE_HIGH (gimple_switch_label (stmt, i));
b7d8d447
RAE
7730 for (k = i + 1; k <= j; ++k)
7731 {
726a989a 7732 low = CASE_LOW (gimple_switch_label (stmt, k));
d35936ab 7733 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
b7d8d447
RAE
7734 {
7735 take_default = true;
7736 break;
7737 }
7738 high = low;
726a989a
RB
7739 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7740 high = CASE_HIGH (gimple_switch_label (stmt, k));
b7d8d447
RAE
7741 }
7742
7743 *min_idx = i;
7744 *max_idx = j;
7745 return !take_default;
7746 }
7747}
7748
8bb37e9a
TV
7749/* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7750 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7751 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7752 Returns true if the default label is not needed. */
7753
7754static bool
526ceb68 7755find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
8bb37e9a
TV
7756 size_t *max_idx1, size_t *min_idx2,
7757 size_t *max_idx2)
7758{
7759 size_t i, j, k, l;
7760 unsigned int n = gimple_switch_num_labels (stmt);
7761 bool take_default;
7762 tree case_low, case_high;
7763 tree min = vr->min, max = vr->max;
7764
7765 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7766
7767 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7768
7769 /* Set second range to emtpy. */
7770 *min_idx2 = 1;
7771 *max_idx2 = 0;
7772
7773 if (vr->type == VR_RANGE)
7774 {
7775 *min_idx1 = i;
7776 *max_idx1 = j;
7777 return !take_default;
7778 }
7779
7780 /* Set first range to all case labels. */
7781 *min_idx1 = 1;
7782 *max_idx1 = n - 1;
7783
7784 if (i > j)
7785 return false;
7786
7787 /* Make sure all the values of case labels [i , j] are contained in
7788 range [MIN, MAX]. */
7789 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7790 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7791 if (tree_int_cst_compare (case_low, min) < 0)
7792 i += 1;
7793 if (case_high != NULL_TREE
7794 && tree_int_cst_compare (max, case_high) < 0)
7795 j -= 1;
7796
7797 if (i > j)
7798 return false;
7799
7800 /* If the range spans case labels [i, j], the corresponding anti-range spans
7801 the labels [1, i - 1] and [j + 1, n - 1]. */
7802 k = j + 1;
7803 l = n - 1;
7804 if (k > l)
7805 {
7806 k = 1;
7807 l = 0;
7808 }
7809
7810 j = i - 1;
7811 i = 1;
7812 if (i > j)
7813 {
7814 i = k;
7815 j = l;
7816 k = 1;
7817 l = 0;
7818 }
7819
7820 *min_idx1 = i;
7821 *max_idx1 = j;
7822 *min_idx2 = k;
7823 *max_idx2 = l;
7824 return false;
7825}
7826
8aea0bf0
RG
7827/* Visit switch statement STMT. If we can determine which edge
7828 will be taken out of STMT's basic block, record it in
bb9d2f4d 7829 *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
8aea0bf0 7830
bb9d2f4d 7831static void
538dd0b7 7832vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
8aea0bf0
RG
7833{
7834 tree op, val;
526ceb68 7835 value_range *vr;
8bb37e9a 7836 size_t i = 0, j = 0, k, l;
b7d8d447 7837 bool take_default;
8aea0bf0
RG
7838
7839 *taken_edge_p = NULL;
726a989a 7840 op = gimple_switch_index (stmt);
8aea0bf0 7841 if (TREE_CODE (op) != SSA_NAME)
bb9d2f4d 7842 return;
8aea0bf0
RG
7843
7844 vr = get_value_range (op);
7845 if (dump_file && (dump_flags & TDF_DETAILS))
7846 {
7847 fprintf (dump_file, "\nVisiting switch expression with operand ");
7848 print_generic_expr (dump_file, op, 0);
7849 fprintf (dump_file, " with known range ");
7850 dump_value_range (dump_file, vr);
7851 fprintf (dump_file, "\n");
7852 }
7853
8bb37e9a
TV
7854 if ((vr->type != VR_RANGE
7855 && vr->type != VR_ANTI_RANGE)
8aea0bf0 7856 || symbolic_range_p (vr))
bb9d2f4d 7857 return;
8aea0bf0
RG
7858
7859 /* Find the single edge that is taken from the switch expression. */
8bb37e9a 7860 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8aea0bf0 7861
b7d8d447
RAE
7862 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7863 label */
8aea0bf0 7864 if (j < i)
b7d8d447
RAE
7865 {
7866 gcc_assert (take_default);
726a989a 7867 val = gimple_switch_default_label (stmt);
b7d8d447 7868 }
8aea0bf0
RG
7869 else
7870 {
b7d8d447
RAE
7871 /* Check if labels with index i to j and maybe the default label
7872 are all reaching the same label. */
7873
726a989a 7874 val = gimple_switch_label (stmt, i);
b7d8d447 7875 if (take_default
726a989a
RB
7876 && CASE_LABEL (gimple_switch_default_label (stmt))
7877 != CASE_LABEL (val))
8aea0bf0
RG
7878 {
7879 if (dump_file && (dump_flags & TDF_DETAILS))
7880 fprintf (dump_file, " not a single destination for this "
7881 "range\n");
bb9d2f4d 7882 return;
8aea0bf0
RG
7883 }
7884 for (++i; i <= j; ++i)
7885 {
726a989a 7886 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
8aea0bf0
RG
7887 {
7888 if (dump_file && (dump_flags & TDF_DETAILS))
7889 fprintf (dump_file, " not a single destination for this "
7890 "range\n");
bb9d2f4d 7891 return;
8aea0bf0
RG
7892 }
7893 }
8bb37e9a
TV
7894 for (; k <= l; ++k)
7895 {
7896 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7897 {
7898 if (dump_file && (dump_flags & TDF_DETAILS))
7899 fprintf (dump_file, " not a single destination for this "
7900 "range\n");
bb9d2f4d 7901 return;
8bb37e9a
TV
7902 }
7903 }
8aea0bf0
RG
7904 }
7905
726a989a 7906 *taken_edge_p = find_edge (gimple_bb (stmt),
8aea0bf0
RG
7907 label_to_block (CASE_LABEL (val)));
7908
7909 if (dump_file && (dump_flags & TDF_DETAILS))
7910 {
7911 fprintf (dump_file, " will take edge to ");
7912 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7913 }
8aea0bf0
RG
7914}
7915
7916
bb9d2f4d
KV
7917/* Evaluate statement STMT. If the statement produces a useful range,
7918 set VR and corepsponding OUTPUT_P.
7919
7920 If STMT is a conditional branch and we can determine its truth
7921 value, the taken edge is recorded in *TAKEN_EDGE_P. */
7922
7923static void
7924extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
7925 tree *output_p, value_range *vr)
7926{
7927
7928 if (dump_file && (dump_flags & TDF_DETAILS))
7929 {
7930 fprintf (dump_file, "\nVisiting statement:\n");
7931 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7932 }
7933
7934 if (!stmt_interesting_for_vrp (stmt))
7935 gcc_assert (stmt_ends_bb_p (stmt));
7936 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7937 vrp_visit_assignment_or_call (stmt, output_p, vr);
7938 else if (gimple_code (stmt) == GIMPLE_COND)
7939 vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
7940 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7941 vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
7942}
7943
0bca51f0
DN
7944/* Evaluate statement STMT. If the statement produces a useful range,
7945 return SSA_PROP_INTERESTING and record the SSA name with the
7946 interesting range into *OUTPUT_P.
7947
7948 If STMT is a conditional branch and we can determine its truth
7949 value, the taken edge is recorded in *TAKEN_EDGE_P.
7950
7951 If STMT produces a varying value, return SSA_PROP_VARYING. */
7952
7953static enum ssa_prop_result
355fe088 7954vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
0bca51f0 7955{
bb9d2f4d
KV
7956 value_range vr = VR_INITIALIZER;
7957 tree lhs = gimple_get_lhs (stmt);
bb9d2f4d 7958 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
0bca51f0 7959
bb9d2f4d 7960 if (*output_p)
0bca51f0 7961 {
bb9d2f4d
KV
7962 if (update_value_range (*output_p, &vr))
7963 {
7964 if (dump_file && (dump_flags & TDF_DETAILS))
7965 {
7966 fprintf (dump_file, "Found new range for ");
7967 print_generic_expr (dump_file, *output_p, 0);
7968 fprintf (dump_file, ": ");
7969 dump_value_range (dump_file, &vr);
7970 fprintf (dump_file, "\n");
7971 }
7972
7973 if (vr.type == VR_VARYING)
7974 return SSA_PROP_VARYING;
7975
7976 return SSA_PROP_INTERESTING;
7977 }
7978 return SSA_PROP_NOT_INTERESTING;
0bca51f0
DN
7979 }
7980
bb9d2f4d
KV
7981 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7982 switch (gimple_call_internal_fn (stmt))
7983 {
7984 case IFN_ADD_OVERFLOW:
7985 case IFN_SUB_OVERFLOW:
7986 case IFN_MUL_OVERFLOW:
7987 /* These internal calls return _Complex integer type,
7988 which VRP does not track, but the immediate uses
7989 thereof might be interesting. */
7990 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7991 {
7992 imm_use_iterator iter;
7993 use_operand_p use_p;
7994 enum ssa_prop_result res = SSA_PROP_VARYING;
7995
7996 set_value_range_to_varying (get_value_range (lhs));
7997
7998 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
7999 {
8000 gimple *use_stmt = USE_STMT (use_p);
8001 if (!is_gimple_assign (use_stmt))
8002 continue;
8003 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
8004 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
8005 continue;
8006 tree rhs1 = gimple_assign_rhs1 (use_stmt);
8007 tree use_lhs = gimple_assign_lhs (use_stmt);
8008 if (TREE_CODE (rhs1) != rhs_code
8009 || TREE_OPERAND (rhs1, 0) != lhs
8010 || TREE_CODE (use_lhs) != SSA_NAME
8011 || !stmt_interesting_for_vrp (use_stmt)
8012 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
8013 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
8014 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
8015 continue;
8016
8017 /* If there is a change in the value range for any of the
8018 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
8019 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
8020 or IMAGPART_EXPR immediate uses, but none of them have
8021 a change in their value ranges, return
8022 SSA_PROP_NOT_INTERESTING. If there are no
8023 {REAL,IMAG}PART_EXPR uses at all,
8024 return SSA_PROP_VARYING. */
8025 value_range new_vr = VR_INITIALIZER;
8026 extract_range_basic (&new_vr, use_stmt);
8027 value_range *old_vr = get_value_range (use_lhs);
8028 if (old_vr->type != new_vr.type
8029 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
8030 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
8031 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
8032 res = SSA_PROP_INTERESTING;
8033 else
8034 res = SSA_PROP_NOT_INTERESTING;
8035 BITMAP_FREE (new_vr.equiv);
8036 if (res == SSA_PROP_INTERESTING)
8037 {
8038 *output_p = lhs;
8039 return res;
8040 }
8041 }
8042
8043 return res;
8044 }
8045 break;
8046 default:
8047 break;
8048 }
0bca51f0
DN
8049
8050 /* All other statements produce nothing of interest for VRP, so mark
8051 their outputs varying and prevent further simulation. */
4d6484dd 8052 set_defs_to_varying (stmt);
0bca51f0 8053
bb9d2f4d 8054 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
0bca51f0
DN
8055}
8056
b54e19c2
RG
8057/* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8058 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8059 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8060 possible such range. The resulting range is not canonicalized. */
8061
8062static void
8063union_ranges (enum value_range_type *vr0type,
8064 tree *vr0min, tree *vr0max,
8065 enum value_range_type vr1type,
8066 tree vr1min, tree vr1max)
8067{
661d6efd
RB
8068 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8069 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
b54e19c2
RG
8070
8071 /* [] is vr0, () is vr1 in the following classification comments. */
8072 if (mineq && maxeq)
8073 {
8074 /* [( )] */
8075 if (*vr0type == vr1type)
8076 /* Nothing to do for equal ranges. */
8077 ;
8078 else if ((*vr0type == VR_RANGE
8079 && vr1type == VR_ANTI_RANGE)
8080 || (*vr0type == VR_ANTI_RANGE
8081 && vr1type == VR_RANGE))
8082 {
8083 /* For anti-range with range union the result is varying. */
8084 goto give_up;
8085 }
8086 else
8087 gcc_unreachable ();
8088 }
8089 else if (operand_less_p (*vr0max, vr1min) == 1
8090 || operand_less_p (vr1max, *vr0min) == 1)
8091 {
8092 /* [ ] ( ) or ( ) [ ]
8093 If the ranges have an empty intersection, result of the union
8094 operation is the anti-range or if both are anti-ranges
8095 it covers all. */
8096 if (*vr0type == VR_ANTI_RANGE
8097 && vr1type == VR_ANTI_RANGE)
8098 goto give_up;
8099 else if (*vr0type == VR_ANTI_RANGE
8100 && vr1type == VR_RANGE)
8101 ;
8102 else if (*vr0type == VR_RANGE
8103 && vr1type == VR_ANTI_RANGE)
8104 {
8105 *vr0type = vr1type;
8106 *vr0min = vr1min;
8107 *vr0max = vr1max;
8108 }
8109 else if (*vr0type == VR_RANGE
8110 && vr1type == VR_RANGE)
8111 {
8112 /* The result is the convex hull of both ranges. */
8113 if (operand_less_p (*vr0max, vr1min) == 1)
8114 {
8115 /* If the result can be an anti-range, create one. */
8116 if (TREE_CODE (*vr0max) == INTEGER_CST
8117 && TREE_CODE (vr1min) == INTEGER_CST
8118 && vrp_val_is_min (*vr0min)
8119 && vrp_val_is_max (vr1max))
8120 {
8121 tree min = int_const_binop (PLUS_EXPR,
807e902e
KZ
8122 *vr0max,
8123 build_int_cst (TREE_TYPE (*vr0max), 1));
b54e19c2 8124 tree max = int_const_binop (MINUS_EXPR,
807e902e
KZ
8125 vr1min,
8126 build_int_cst (TREE_TYPE (vr1min), 1));
b54e19c2
RG
8127 if (!operand_less_p (max, min))
8128 {
8129 *vr0type = VR_ANTI_RANGE;
8130 *vr0min = min;
8131 *vr0max = max;
8132 }
8133 else
8134 *vr0max = vr1max;
8135 }
8136 else
8137 *vr0max = vr1max;
8138 }
8139 else
8140 {
8141 /* If the result can be an anti-range, create one. */
8142 if (TREE_CODE (vr1max) == INTEGER_CST
8143 && TREE_CODE (*vr0min) == INTEGER_CST
8144 && vrp_val_is_min (vr1min)
8145 && vrp_val_is_max (*vr0max))
8146 {
8147 tree min = int_const_binop (PLUS_EXPR,
807e902e
KZ
8148 vr1max,
8149 build_int_cst (TREE_TYPE (vr1max), 1));
b54e19c2 8150 tree max = int_const_binop (MINUS_EXPR,
807e902e
KZ
8151 *vr0min,
8152 build_int_cst (TREE_TYPE (*vr0min), 1));
b54e19c2
RG
8153 if (!operand_less_p (max, min))
8154 {
8155 *vr0type = VR_ANTI_RANGE;
8156 *vr0min = min;
8157 *vr0max = max;
8158 }
8159 else
8160 *vr0min = vr1min;
8161 }
8162 else
8163 *vr0min = vr1min;
8164 }
8165 }
8166 else
8167 gcc_unreachable ();
8168 }
8169 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8170 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8171 {
8172 /* [ ( ) ] or [( ) ] or [ ( )] */
8173 if (*vr0type == VR_RANGE
8174 && vr1type == VR_RANGE)
8175 ;
8176 else if (*vr0type == VR_ANTI_RANGE
8177 && vr1type == VR_ANTI_RANGE)
8178 {
8179 *vr0type = vr1type;
8180 *vr0min = vr1min;
8181 *vr0max = vr1max;
8182 }
8183 else if (*vr0type == VR_ANTI_RANGE
8184 && vr1type == VR_RANGE)
8185 {
8186 /* Arbitrarily choose the right or left gap. */
8187 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
807e902e
KZ
8188 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8189 build_int_cst (TREE_TYPE (vr1min), 1));
b54e19c2 8190 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
807e902e
KZ
8191 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8192 build_int_cst (TREE_TYPE (vr1max), 1));
b54e19c2
RG
8193 else
8194 goto give_up;
8195 }
8196 else if (*vr0type == VR_RANGE
8197 && vr1type == VR_ANTI_RANGE)
8198 /* The result covers everything. */
8199 goto give_up;
8200 else
8201 gcc_unreachable ();
8202 }
8203 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8204 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8205 {
8206 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8207 if (*vr0type == VR_RANGE
8208 && vr1type == VR_RANGE)
8209 {
8210 *vr0type = vr1type;
8211 *vr0min = vr1min;
8212 *vr0max = vr1max;
8213 }
8214 else if (*vr0type == VR_ANTI_RANGE
8215 && vr1type == VR_ANTI_RANGE)
8216 ;
8217 else if (*vr0type == VR_RANGE
8218 && vr1type == VR_ANTI_RANGE)
8219 {
8220 *vr0type = VR_ANTI_RANGE;
8221 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8222 {
807e902e
KZ
8223 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8224 build_int_cst (TREE_TYPE (*vr0min), 1));
b54e19c2
RG
8225 *vr0min = vr1min;
8226 }
8227 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8228 {
807e902e
KZ
8229 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8230 build_int_cst (TREE_TYPE (*vr0max), 1));
b54e19c2
RG
8231 *vr0max = vr1max;
8232 }
8233 else
8234 goto give_up;
8235 }
8236 else if (*vr0type == VR_ANTI_RANGE
8237 && vr1type == VR_RANGE)
8238 /* The result covers everything. */
8239 goto give_up;
8240 else
8241 gcc_unreachable ();
8242 }
8243 else if ((operand_less_p (vr1min, *vr0max) == 1
8244 || operand_equal_p (vr1min, *vr0max, 0))
5ef0de9b
JJ
8245 && operand_less_p (*vr0min, vr1min) == 1
8246 && operand_less_p (*vr0max, vr1max) == 1)
b54e19c2
RG
8247 {
8248 /* [ ( ] ) or [ ]( ) */
8249 if (*vr0type == VR_RANGE
8250 && vr1type == VR_RANGE)
8251 *vr0max = vr1max;
8252 else if (*vr0type == VR_ANTI_RANGE
8253 && vr1type == VR_ANTI_RANGE)
8254 *vr0min = vr1min;
8255 else if (*vr0type == VR_ANTI_RANGE
8256 && vr1type == VR_RANGE)
8257 {
8258 if (TREE_CODE (vr1min) == INTEGER_CST)
807e902e
KZ
8259 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8260 build_int_cst (TREE_TYPE (vr1min), 1));
b54e19c2
RG
8261 else
8262 goto give_up;
8263 }
8264 else if (*vr0type == VR_RANGE
8265 && vr1type == VR_ANTI_RANGE)
8266 {
8267 if (TREE_CODE (*vr0max) == INTEGER_CST)
8268 {
8269 *vr0type = vr1type;
807e902e
KZ
8270 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8271 build_int_cst (TREE_TYPE (*vr0max), 1));
b54e19c2
RG
8272 *vr0max = vr1max;
8273 }
8274 else
8275 goto give_up;
8276 }
8277 else
8278 gcc_unreachable ();
8279 }
8280 else if ((operand_less_p (*vr0min, vr1max) == 1
8281 || operand_equal_p (*vr0min, vr1max, 0))
5ef0de9b
JJ
8282 && operand_less_p (vr1min, *vr0min) == 1
8283 && operand_less_p (vr1max, *vr0max) == 1)
b54e19c2
RG
8284 {
8285 /* ( [ ) ] or ( )[ ] */
8286 if (*vr0type == VR_RANGE
8287 && vr1type == VR_RANGE)
8288 *vr0min = vr1min;
8289 else if (*vr0type == VR_ANTI_RANGE
8290 && vr1type == VR_ANTI_RANGE)
8291 *vr0max = vr1max;
8292 else if (*vr0type == VR_ANTI_RANGE
8293 && vr1type == VR_RANGE)
8294 {
8295 if (TREE_CODE (vr1max) == INTEGER_CST)
807e902e
KZ
8296 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8297 build_int_cst (TREE_TYPE (vr1max), 1));
b54e19c2
RG
8298 else
8299 goto give_up;
8300 }
8301 else if (*vr0type == VR_RANGE
8302 && vr1type == VR_ANTI_RANGE)
8303 {
8304 if (TREE_CODE (*vr0min) == INTEGER_CST)
8305 {
8306 *vr0type = vr1type;
8307 *vr0min = vr1min;
807e902e
KZ
8308 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8309 build_int_cst (TREE_TYPE (*vr0min), 1));
b54e19c2
RG
8310 }
8311 else
8312 goto give_up;
8313 }
8314 else
8315 gcc_unreachable ();
8316 }
8317 else
8318 goto give_up;
8319
8320 return;
8321
8322give_up:
8323 *vr0type = VR_VARYING;
8324 *vr0min = NULL_TREE;
8325 *vr0max = NULL_TREE;
8326}
8327
3928c098
RG
8328/* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8329 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8330 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8331 possible such range. The resulting range is not canonicalized. */
8332
8333static void
8334intersect_ranges (enum value_range_type *vr0type,
8335 tree *vr0min, tree *vr0max,
8336 enum value_range_type vr1type,
8337 tree vr1min, tree vr1max)
8338{
661d6efd
RB
8339 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8340 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
105b7208 8341
3928c098 8342 /* [] is vr0, () is vr1 in the following classification comments. */
105b7208
RG
8343 if (mineq && maxeq)
8344 {
8345 /* [( )] */
8346 if (*vr0type == vr1type)
8347 /* Nothing to do for equal ranges. */
8348 ;
8349 else if ((*vr0type == VR_RANGE
8350 && vr1type == VR_ANTI_RANGE)
8351 || (*vr0type == VR_ANTI_RANGE
8352 && vr1type == VR_RANGE))
8353 {
8354 /* For anti-range with range intersection the result is empty. */
8355 *vr0type = VR_UNDEFINED;
8356 *vr0min = NULL_TREE;
8357 *vr0max = NULL_TREE;
8358 }
8359 else
8360 gcc_unreachable ();
8361 }
8362 else if (operand_less_p (*vr0max, vr1min) == 1
8363 || operand_less_p (vr1max, *vr0min) == 1)
3928c098
RG
8364 {
8365 /* [ ] ( ) or ( ) [ ]
8366 If the ranges have an empty intersection, the result of the
8367 intersect operation is the range for intersecting an
a75f5017 8368 anti-range with a range or empty when intersecting two ranges. */
3928c098
RG
8369 if (*vr0type == VR_RANGE
8370 && vr1type == VR_ANTI_RANGE)
8371 ;
8372 else if (*vr0type == VR_ANTI_RANGE
8373 && vr1type == VR_RANGE)
8374 {
8375 *vr0type = vr1type;
8376 *vr0min = vr1min;
8377 *vr0max = vr1max;
8378 }
8379 else if (*vr0type == VR_RANGE
8380 && vr1type == VR_RANGE)
8381 {
8382 *vr0type = VR_UNDEFINED;
8383 *vr0min = NULL_TREE;
8384 *vr0max = NULL_TREE;
8385 }
8386 else if (*vr0type == VR_ANTI_RANGE
8387 && vr1type == VR_ANTI_RANGE)
8388 {
a75f5017
RG
8389 /* If the anti-ranges are adjacent to each other merge them. */
8390 if (TREE_CODE (*vr0max) == INTEGER_CST
8391 && TREE_CODE (vr1min) == INTEGER_CST
8392 && operand_less_p (*vr0max, vr1min) == 1
8393 && integer_onep (int_const_binop (MINUS_EXPR,
8394 vr1min, *vr0max)))
8395 *vr0max = vr1max;
8396 else if (TREE_CODE (vr1max) == INTEGER_CST
8397 && TREE_CODE (*vr0min) == INTEGER_CST
8398 && operand_less_p (vr1max, *vr0min) == 1
8399 && integer_onep (int_const_binop (MINUS_EXPR,
8400 *vr0min, vr1max)))
8401 *vr0min = vr1min;
8402 /* Else arbitrarily take VR0. */
3928c098
RG
8403 }
8404 }
105b7208
RG
8405 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8406 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
3928c098 8407 {
105b7208
RG
8408 /* [ ( ) ] or [( ) ] or [ ( )] */
8409 if (*vr0type == VR_RANGE
8410 && vr1type == VR_RANGE)
3928c098 8411 {
105b7208 8412 /* If both are ranges the result is the inner one. */
3928c098
RG
8413 *vr0type = vr1type;
8414 *vr0min = vr1min;
8415 *vr0max = vr1max;
8416 }
105b7208
RG
8417 else if (*vr0type == VR_RANGE
8418 && vr1type == VR_ANTI_RANGE)
8419 {
8420 /* Choose the right gap if the left one is empty. */
8421 if (mineq)
8422 {
8423 if (TREE_CODE (vr1max) == INTEGER_CST)
807e902e
KZ
8424 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8425 build_int_cst (TREE_TYPE (vr1max), 1));
105b7208
RG
8426 else
8427 *vr0min = vr1max;
8428 }
8429 /* Choose the left gap if the right one is empty. */
8430 else if (maxeq)
8431 {
8432 if (TREE_CODE (vr1min) == INTEGER_CST)
8433 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
807e902e 8434 build_int_cst (TREE_TYPE (vr1min), 1));
105b7208
RG
8435 else
8436 *vr0max = vr1min;
8437 }
8438 /* Choose the anti-range if the range is effectively varying. */
8439 else if (vrp_val_is_min (*vr0min)
8440 && vrp_val_is_max (*vr0max))
8441 {
8442 *vr0type = vr1type;
8443 *vr0min = vr1min;
8444 *vr0max = vr1max;
8445 }
8446 /* Else choose the range. */
8447 }
3928c098
RG
8448 else if (*vr0type == VR_ANTI_RANGE
8449 && vr1type == VR_ANTI_RANGE)
8450 /* If both are anti-ranges the result is the outer one. */
8451 ;
8452 else if (*vr0type == VR_ANTI_RANGE
8453 && vr1type == VR_RANGE)
8454 {
8455 /* The intersection is empty. */
8456 *vr0type = VR_UNDEFINED;
8457 *vr0min = NULL_TREE;
8458 *vr0max = NULL_TREE;
8459 }
8460 else
8461 gcc_unreachable ();
8462 }
105b7208
RG
8463 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8464 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
3928c098 8465 {
105b7208
RG
8466 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8467 if (*vr0type == VR_RANGE
8468 && vr1type == VR_RANGE)
8469 /* Choose the inner range. */
3928c098 8470 ;
105b7208
RG
8471 else if (*vr0type == VR_ANTI_RANGE
8472 && vr1type == VR_RANGE)
8473 {
8474 /* Choose the right gap if the left is empty. */
8475 if (mineq)
8476 {
8477 *vr0type = VR_RANGE;
8478 if (TREE_CODE (*vr0max) == INTEGER_CST)
8479 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
807e902e 8480 build_int_cst (TREE_TYPE (*vr0max), 1));
105b7208
RG
8481 else
8482 *vr0min = *vr0max;
8483 *vr0max = vr1max;
8484 }
8485 /* Choose the left gap if the right is empty. */
8486 else if (maxeq)
8487 {
8488 *vr0type = VR_RANGE;
8489 if (TREE_CODE (*vr0min) == INTEGER_CST)
8490 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
807e902e 8491 build_int_cst (TREE_TYPE (*vr0min), 1));
105b7208
RG
8492 else
8493 *vr0max = *vr0min;
8494 *vr0min = vr1min;
8495 }
8496 /* Choose the anti-range if the range is effectively varying. */
8497 else if (vrp_val_is_min (vr1min)
8498 && vrp_val_is_max (vr1max))
8499 ;
8500 /* Else choose the range. */
8501 else
8502 {
8503 *vr0type = vr1type;
8504 *vr0min = vr1min;
8505 *vr0max = vr1max;
8506 }
8507 }
3928c098
RG
8508 else if (*vr0type == VR_ANTI_RANGE
8509 && vr1type == VR_ANTI_RANGE)
8510 {
8511 /* If both are anti-ranges the result is the outer one. */
8512 *vr0type = vr1type;
8513 *vr0min = vr1min;
8514 *vr0max = vr1max;
8515 }
8516 else if (vr1type == VR_ANTI_RANGE
8517 && *vr0type == VR_RANGE)
8518 {
8519 /* The intersection is empty. */
8520 *vr0type = VR_UNDEFINED;
8521 *vr0min = NULL_TREE;
8522 *vr0max = NULL_TREE;
8523 }
8524 else
8525 gcc_unreachable ();
8526 }
8527 else if ((operand_less_p (vr1min, *vr0max) == 1
8528 || operand_equal_p (vr1min, *vr0max, 0))
105b7208 8529 && operand_less_p (*vr0min, vr1min) == 1)
3928c098 8530 {
105b7208 8531 /* [ ( ] ) or [ ]( ) */
3928c098
RG
8532 if (*vr0type == VR_ANTI_RANGE
8533 && vr1type == VR_ANTI_RANGE)
8534 *vr0max = vr1max;
8535 else if (*vr0type == VR_RANGE
8536 && vr1type == VR_RANGE)
8537 *vr0min = vr1min;
8538 else if (*vr0type == VR_RANGE
8539 && vr1type == VR_ANTI_RANGE)
8540 {
8541 if (TREE_CODE (vr1min) == INTEGER_CST)
8542 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
807e902e 8543 build_int_cst (TREE_TYPE (vr1min), 1));
3928c098
RG
8544 else
8545 *vr0max = vr1min;
8546 }
8547 else if (*vr0type == VR_ANTI_RANGE
8548 && vr1type == VR_RANGE)
8549 {
8550 *vr0type = VR_RANGE;
8551 if (TREE_CODE (*vr0max) == INTEGER_CST)
8552 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
807e902e 8553 build_int_cst (TREE_TYPE (*vr0max), 1));
3928c098
RG
8554 else
8555 *vr0min = *vr0max;
8556 *vr0max = vr1max;
8557 }
8558 else
8559 gcc_unreachable ();
8560 }
8561 else if ((operand_less_p (*vr0min, vr1max) == 1
8562 || operand_equal_p (*vr0min, vr1max, 0))
105b7208 8563 && operand_less_p (vr1min, *vr0min) == 1)
3928c098 8564 {
105b7208 8565 /* ( [ ) ] or ( )[ ] */
3928c098
RG
8566 if (*vr0type == VR_ANTI_RANGE
8567 && vr1type == VR_ANTI_RANGE)
8568 *vr0min = vr1min;
8569 else if (*vr0type == VR_RANGE
8570 && vr1type == VR_RANGE)
8571 *vr0max = vr1max;
8572 else if (*vr0type == VR_RANGE
8573 && vr1type == VR_ANTI_RANGE)
8574 {
8575 if (TREE_CODE (vr1max) == INTEGER_CST)
8576 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
807e902e 8577 build_int_cst (TREE_TYPE (vr1max), 1));
3928c098
RG
8578 else
8579 *vr0min = vr1max;
8580 }
8581 else if (*vr0type == VR_ANTI_RANGE
8582 && vr1type == VR_RANGE)
8583 {
8584 *vr0type = VR_RANGE;
8585 if (TREE_CODE (*vr0min) == INTEGER_CST)
8586 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
807e902e 8587 build_int_cst (TREE_TYPE (*vr0min), 1));
3928c098
RG
8588 else
8589 *vr0max = *vr0min;
8590 *vr0min = vr1min;
8591 }
8592 else
8593 gcc_unreachable ();
8594 }
8595
8596 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8597 result for the intersection. That's always a conservative
68ad1df5
RB
8598 correct estimate unless VR1 is a constant singleton range
8599 in which case we choose that. */
8600 if (vr1type == VR_RANGE
8601 && is_gimple_min_invariant (vr1min)
8602 && vrp_operand_equal_p (vr1min, vr1max))
8603 {
8604 *vr0type = vr1type;
8605 *vr0min = vr1min;
8606 *vr0max = vr1max;
8607 }
3928c098
RG
8608
8609 return;
8610}
8611
8612
8613/* Intersect the two value-ranges *VR0 and *VR1 and store the result
8614 in *VR0. This may not be the smallest possible such range. */
8615
8616static void
526ceb68 8617vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
3928c098 8618{
526ceb68 8619 value_range saved;
3928c098
RG
8620
8621 /* If either range is VR_VARYING the other one wins. */
8622 if (vr1->type == VR_VARYING)
8623 return;
8624 if (vr0->type == VR_VARYING)
8625 {
8626 copy_value_range (vr0, vr1);
8627 return;
8628 }
8629
8630 /* When either range is VR_UNDEFINED the resulting range is
8631 VR_UNDEFINED, too. */
8632 if (vr0->type == VR_UNDEFINED)
8633 return;
8634 if (vr1->type == VR_UNDEFINED)
8635 {
8636 set_value_range_to_undefined (vr0);
8637 return;
8638 }
8639
8640 /* Save the original vr0 so we can return it as conservative intersection
8641 result when our worker turns things to varying. */
8642 saved = *vr0;
8643 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8644 vr1->type, vr1->min, vr1->max);
8645 /* Make sure to canonicalize the result though as the inversion of a
8646 VR_RANGE can still be a VR_RANGE. */
8647 set_and_canonicalize_value_range (vr0, vr0->type,
8648 vr0->min, vr0->max, vr0->equiv);
8649 /* If that failed, use the saved original VR0. */
8650 if (vr0->type == VR_VARYING)
8651 {
8652 *vr0 = saved;
8653 return;
8654 }
8655 /* If the result is VR_UNDEFINED there is no need to mess with
8656 the equivalencies. */
8657 if (vr0->type == VR_UNDEFINED)
8658 return;
8659
8660 /* The resulting set of equivalences for range intersection is the union of
8661 the two sets. */
8662 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8663 bitmap_ior_into (vr0->equiv, vr1->equiv);
8664 else if (vr1->equiv && !vr0->equiv)
496f8eea
KV
8665 {
8666 vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
8667 bitmap_copy (vr0->equiv, vr1->equiv);
8668 }
3928c098 8669}
0bca51f0 8670
f90aa46c 8671void
526ceb68 8672vrp_intersect_ranges (value_range *vr0, value_range *vr1)
105b7208
RG
8673{
8674 if (dump_file && (dump_flags & TDF_DETAILS))
8675 {
8676 fprintf (dump_file, "Intersecting\n ");
8677 dump_value_range (dump_file, vr0);
8678 fprintf (dump_file, "\nand\n ");
8679 dump_value_range (dump_file, vr1);
8680 fprintf (dump_file, "\n");
8681 }
8682 vrp_intersect_ranges_1 (vr0, vr1);
8683 if (dump_file && (dump_flags & TDF_DETAILS))
8684 {
8685 fprintf (dump_file, "to\n ");
8686 dump_value_range (dump_file, vr0);
8687 fprintf (dump_file, "\n");
8688 }
8689}
8690
0bca51f0 8691/* Meet operation for value ranges. Given two value ranges VR0 and
32c8bce7
DS
8692 VR1, store in VR0 a range that contains both VR0 and VR1. This
8693 may not be the smallest possible such range. */
0bca51f0
DN
8694
8695static void
f90aa46c 8696vrp_meet_1 (value_range *vr0, const value_range *vr1)
0bca51f0 8697{
526ceb68 8698 value_range saved;
b54e19c2 8699
0bca51f0
DN
8700 if (vr0->type == VR_UNDEFINED)
8701 {
c25a0c60 8702 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
0bca51f0
DN
8703 return;
8704 }
8705
8706 if (vr1->type == VR_UNDEFINED)
8707 {
c25a0c60 8708 /* VR0 already has the resulting range. */
0bca51f0
DN
8709 return;
8710 }
8711
8712 if (vr0->type == VR_VARYING)
8713 {
8714 /* Nothing to do. VR0 already has the resulting range. */
8715 return;
8716 }
8717
8718 if (vr1->type == VR_VARYING)
0bca51f0 8719 {
b565d777 8720 set_value_range_to_varying (vr0);
0bca51f0
DN
8721 return;
8722 }
8723
b54e19c2
RG
8724 saved = *vr0;
8725 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8726 vr1->type, vr1->min, vr1->max);
8727 if (vr0->type == VR_VARYING)
0bca51f0 8728 {
b54e19c2
RG
8729 /* Failed to find an efficient meet. Before giving up and setting
8730 the result to VARYING, see if we can at least derive a useful
8731 anti-range. FIXME, all this nonsense about distinguishing
8732 anti-ranges from ranges is necessary because of the odd
8733 semantics of range_includes_zero_p and friends. */
e8f808b3
RG
8734 if (((saved.type == VR_RANGE
8735 && range_includes_zero_p (saved.min, saved.max) == 0)
8736 || (saved.type == VR_ANTI_RANGE
8737 && range_includes_zero_p (saved.min, saved.max) == 1))
8738 && ((vr1->type == VR_RANGE
8739 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8740 || (vr1->type == VR_ANTI_RANGE
8741 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
b54e19c2
RG
8742 {
8743 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8744
8745 /* Since this meet operation did not result from the meeting of
8746 two equivalent names, VR0 cannot have any equivalences. */
8747 if (vr0->equiv)
8748 bitmap_clear (vr0->equiv);
8749 return;
cf35667e 8750 }
227858d1 8751
b54e19c2
RG
8752 set_value_range_to_varying (vr0);
8753 return;
0bca51f0 8754 }
b54e19c2
RG
8755 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8756 vr0->equiv);
8757 if (vr0->type == VR_VARYING)
8758 return;
227858d1 8759
cf35667e 8760 /* The resulting set of equivalences is always the intersection of
b54e19c2 8761 the two sets. */
cf35667e
RG
8762 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8763 bitmap_and_into (vr0->equiv, vr1->equiv);
8764 else if (vr0->equiv && !vr1->equiv)
8765 bitmap_clear (vr0->equiv);
b54e19c2 8766}
cf35667e 8767
f90aa46c
KV
8768void
8769vrp_meet (value_range *vr0, const value_range *vr1)
b54e19c2
RG
8770{
8771 if (dump_file && (dump_flags & TDF_DETAILS))
8772 {
8773 fprintf (dump_file, "Meeting\n ");
8774 dump_value_range (dump_file, vr0);
8775 fprintf (dump_file, "\nand\n ");
8776 dump_value_range (dump_file, vr1);
8777 fprintf (dump_file, "\n");
8778 }
8779 vrp_meet_1 (vr0, vr1);
8780 if (dump_file && (dump_flags & TDF_DETAILS))
8781 {
8782 fprintf (dump_file, "to\n ");
8783 dump_value_range (dump_file, vr0);
8784 fprintf (dump_file, "\n");
e82d7e60 8785 }
0bca51f0
DN
8786}
8787
227858d1 8788
0bca51f0
DN
8789/* Visit all arguments for PHI node PHI that flow through executable
8790 edges. If a valid value range can be derived from all the incoming
bb9d2f4d 8791 value ranges, set a new range in VR_RESULT. */
0bca51f0 8792
bb9d2f4d
KV
8793static void
8794extract_range_from_phi_node (gphi *phi, value_range *vr_result)
0bca51f0 8795{
726a989a 8796 size_t i;
0bca51f0 8797 tree lhs = PHI_RESULT (phi);
526ceb68 8798 value_range *lhs_vr = get_value_range (lhs);
0d5a9e78 8799 bool first = true;
fc6827fe 8800 int edges, old_edges;
b09bae68 8801 struct loop *l;
227858d1 8802
0bca51f0
DN
8803 if (dump_file && (dump_flags & TDF_DETAILS))
8804 {
8805 fprintf (dump_file, "\nVisiting PHI node: ");
726a989a 8806 print_gimple_stmt (dump_file, phi, 0, dump_flags);
0bca51f0
DN
8807 }
8808
661d6efd 8809 bool may_simulate_backedge_again = false;
fc6827fe 8810 edges = 0;
726a989a 8811 for (i = 0; i < gimple_phi_num_args (phi); i++)
0bca51f0 8812 {
726a989a 8813 edge e = gimple_phi_arg_edge (phi, i);
0bca51f0
DN
8814
8815 if (dump_file && (dump_flags & TDF_DETAILS))
8816 {
8817 fprintf (dump_file,
6e5799b9 8818 " Argument #%d (%d -> %d %sexecutable)\n",
726a989a 8819 (int) i, e->src->index, e->dest->index,
0bca51f0
DN
8820 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8821 }
8822
8823 if (e->flags & EDGE_EXECUTABLE)
8824 {
8825 tree arg = PHI_ARG_DEF (phi, i);
526ceb68 8826 value_range vr_arg;
0bca51f0 8827
fc6827fe
ILT
8828 ++edges;
8829
0bca51f0 8830 if (TREE_CODE (arg) == SSA_NAME)
31ab1cc9 8831 {
2650da88
RB
8832 /* See if we are eventually going to change one of the args. */
8833 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
8834 if (! gimple_nop_p (def_stmt)
661d6efd
RB
8835 && prop_simulate_again_p (def_stmt)
8836 && e->flags & EDGE_DFS_BACK)
8837 may_simulate_backedge_again = true;
2650da88 8838
31ab1cc9 8839 vr_arg = *(get_value_range (arg));
c25a0c60
RB
8840 /* Do not allow equivalences or symbolic ranges to leak in from
8841 backedges. That creates invalid equivalencies.
8842 See PR53465 and PR54767. */
6e5799b9 8843 if (e->flags & EDGE_DFS_BACK)
c25a0c60 8844 {
6e5799b9
RB
8845 if (vr_arg.type == VR_RANGE
8846 || vr_arg.type == VR_ANTI_RANGE)
c25a0c60 8847 {
6e5799b9
RB
8848 vr_arg.equiv = NULL;
8849 if (symbolic_range_p (&vr_arg))
8850 {
8851 vr_arg.type = VR_VARYING;
8852 vr_arg.min = NULL_TREE;
8853 vr_arg.max = NULL_TREE;
8854 }
8855 }
8856 }
8857 else
8858 {
8859 /* If the non-backedge arguments range is VR_VARYING then
8860 we can still try recording a simple equivalence. */
8861 if (vr_arg.type == VR_VARYING)
8862 {
8863 vr_arg.type = VR_RANGE;
8864 vr_arg.min = arg;
8865 vr_arg.max = arg;
8866 vr_arg.equiv = NULL;
c25a0c60
RB
8867 }
8868 }
31ab1cc9 8869 }
0bca51f0
DN
8870 else
8871 {
635bfae0 8872 if (TREE_OVERFLOW_P (arg))
3f5c390d 8873 arg = drop_tree_overflow (arg);
8cf781f0 8874
0bca51f0
DN
8875 vr_arg.type = VR_RANGE;
8876 vr_arg.min = arg;
8877 vr_arg.max = arg;
227858d1 8878 vr_arg.equiv = NULL;
0bca51f0
DN
8879 }
8880
8881 if (dump_file && (dump_flags & TDF_DETAILS))
8882 {
8883 fprintf (dump_file, "\t");
8884 print_generic_expr (dump_file, arg, dump_flags);
6e5799b9 8885 fprintf (dump_file, ": ");
0bca51f0
DN
8886 dump_value_range (dump_file, &vr_arg);
8887 fprintf (dump_file, "\n");
8888 }
8889
0d5a9e78 8890 if (first)
bb9d2f4d 8891 copy_value_range (vr_result, &vr_arg);
0d5a9e78 8892 else
bb9d2f4d 8893 vrp_meet (vr_result, &vr_arg);
0d5a9e78 8894 first = false;
0bca51f0 8895
bb9d2f4d 8896 if (vr_result->type == VR_VARYING)
0bca51f0
DN
8897 break;
8898 }
8899 }
8900
bb9d2f4d 8901 if (vr_result->type == VR_VARYING)
227858d1 8902 goto varying;
bb9d2f4d 8903 else if (vr_result->type == VR_UNDEFINED)
a9b332d4 8904 goto update_range;
0bca51f0 8905
fc6827fe
ILT
8906 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8907 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8908
0bca51f0
DN
8909 /* To prevent infinite iterations in the algorithm, derive ranges
8910 when the new value is slightly bigger or smaller than the
fc6827fe
ILT
8911 previous one. We don't do this if we have seen a new executable
8912 edge; this helps us avoid an overflow infinity for conditionals
2f33158f 8913 which are not in a loop. If the old value-range was VR_UNDEFINED
2650da88 8914 use the updated range and iterate one more time. If we will not
661d6efd 8915 simulate this PHI again via the backedge allow us to iterate. */
e3488283 8916 if (edges > 0
7bec30e1 8917 && gimple_phi_num_args (phi) > 1
2f33158f 8918 && edges == old_edges
2650da88 8919 && lhs_vr->type != VR_UNDEFINED
661d6efd 8920 && may_simulate_backedge_again)
e3488283 8921 {
a896172d
RB
8922 /* Compare old and new ranges, fall back to varying if the
8923 values are not comparable. */
bb9d2f4d 8924 int cmp_min = compare_values (lhs_vr->min, vr_result->min);
a896172d
RB
8925 if (cmp_min == -2)
8926 goto varying;
bb9d2f4d 8927 int cmp_max = compare_values (lhs_vr->max, vr_result->max);
a896172d
RB
8928 if (cmp_max == -2)
8929 goto varying;
e3488283
RG
8930
8931 /* For non VR_RANGE or for pointers fall back to varying if
8932 the range changed. */
bb9d2f4d 8933 if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
e3488283
RG
8934 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8935 && (cmp_min != 0 || cmp_max != 0))
8936 goto varying;
8937
026c3cfd 8938 /* If the new minimum is larger than the previous one
771c9501
RB
8939 retain the old value. If the new minimum value is smaller
8940 than the previous one and not -INF go all the way to -INF + 1.
8941 In the first case, to avoid infinite bouncing between different
8942 minimums, and in the other case to avoid iterating millions of
8943 times to reach -INF. Going to -INF + 1 also lets the following
8944 iteration compute whether there will be any overflow, at the
8945 expense of one additional iteration. */
8946 if (cmp_min < 0)
bb9d2f4d 8947 vr_result->min = lhs_vr->min;
771c9501 8948 else if (cmp_min > 0
bb9d2f4d
KV
8949 && !vrp_val_is_min (vr_result->min))
8950 vr_result->min
771c9501 8951 = int_const_binop (PLUS_EXPR,
bb9d2f4d
KV
8952 vrp_val_min (TREE_TYPE (vr_result->min)),
8953 build_int_cst (TREE_TYPE (vr_result->min), 1));
771c9501
RB
8954
8955 /* Similarly for the maximum value. */
8956 if (cmp_max > 0)
bb9d2f4d 8957 vr_result->max = lhs_vr->max;
771c9501 8958 else if (cmp_max < 0
bb9d2f4d
KV
8959 && !vrp_val_is_max (vr_result->max))
8960 vr_result->max
771c9501 8961 = int_const_binop (MINUS_EXPR,
bb9d2f4d
KV
8962 vrp_val_max (TREE_TYPE (vr_result->min)),
8963 build_int_cst (TREE_TYPE (vr_result->min), 1));
e3488283
RG
8964
8965 /* If we dropped either bound to +-INF then if this is a loop
8966 PHI node SCEV may known more about its value-range. */
35e2b6e1 8967 if (cmp_min > 0 || cmp_min < 0
e3488283 8968 || cmp_max < 0 || cmp_max > 0)
35e2b6e1
RB
8969 goto scev_check;
8970
8971 goto infinite_check;
0bca51f0
DN
8972 }
8973
2650da88
RB
8974 goto update_range;
8975
8976varying:
bb9d2f4d 8977 set_value_range_to_varying (vr_result);
2650da88
RB
8978
8979scev_check:
8980 /* If this is a loop PHI node SCEV may known more about its value-range.
8981 scev_check can be reached from two paths, one is a fall through from above
8982 "varying" label, the other is direct goto from code block which tries to
8983 avoid infinite simulation. */
8984 if ((l = loop_containing_stmt (phi))
8985 && l->header == gimple_bb (phi))
bb9d2f4d 8986 adjust_range_with_scev (vr_result, l, phi, lhs);
2650da88
RB
8987
8988infinite_check:
8989 /* If we will end up with a (-INF, +INF) range, set it to
8990 VARYING. Same if the previous max value was invalid for
8991 the type and we end up with vr_result.min > vr_result.max. */
bb9d2f4d
KV
8992 if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
8993 && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
8994 || compare_values (vr_result->min, vr_result->max) > 0))
2650da88
RB
8995 ;
8996 else
bb9d2f4d 8997 set_value_range_to_varying (vr_result);
2650da88 8998
0bca51f0
DN
8999 /* If the new range is different than the previous value, keep
9000 iterating. */
a9b332d4 9001update_range:
bb9d2f4d
KV
9002 return;
9003}
9004
9005/* Visit all arguments for PHI node PHI that flow through executable
9006 edges. If a valid value range can be derived from all the incoming
9007 value ranges, set a new range for the LHS of PHI. */
9008
9009static enum ssa_prop_result
9010vrp_visit_phi_node (gphi *phi)
9011{
9012 tree lhs = PHI_RESULT (phi);
9013 value_range vr_result = VR_INITIALIZER;
9014 extract_range_from_phi_node (phi, &vr_result);
227858d1 9015 if (update_value_range (lhs, &vr_result))
1936a7d4
RG
9016 {
9017 if (dump_file && (dump_flags & TDF_DETAILS))
9018 {
9019 fprintf (dump_file, "Found new range for ");
9020 print_generic_expr (dump_file, lhs, 0);
9021 fprintf (dump_file, ": ");
9022 dump_value_range (dump_file, &vr_result);
6e5799b9 9023 fprintf (dump_file, "\n");
1936a7d4
RG
9024 }
9025
9c3cb360
JJ
9026 if (vr_result.type == VR_VARYING)
9027 return SSA_PROP_VARYING;
9028
1936a7d4
RG
9029 return SSA_PROP_INTERESTING;
9030 }
0bca51f0
DN
9031
9032 /* Nothing changed, don't add outgoing edges. */
9033 return SSA_PROP_NOT_INTERESTING;
9034}
9035
30821654
PB
9036/* Simplify boolean operations if the source is known
9037 to be already a boolean. */
9038static bool
355fe088 9039simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
30821654
PB
9040{
9041 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7e29ba60 9042 tree lhs, op0, op1;
30821654
PB
9043 bool need_conversion;
9044
98958241
KT
9045 /* We handle only !=/== case here. */
9046 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
9047
30821654 9048 op0 = gimple_assign_rhs1 (stmt);
7e29ba60
RG
9049 if (!op_with_boolean_value_range_p (op0))
9050 return false;
30821654 9051
98958241 9052 op1 = gimple_assign_rhs2 (stmt);
7e29ba60
RG
9053 if (!op_with_boolean_value_range_p (op1))
9054 return false;
98958241 9055
7e29ba60
RG
9056 /* Reduce number of cases to handle to NE_EXPR. As there is no
9057 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
9058 if (rhs_code == EQ_EXPR)
30821654 9059 {
7e29ba60 9060 if (TREE_CODE (op1) == INTEGER_CST)
807e902e
KZ
9061 op1 = int_const_binop (BIT_XOR_EXPR, op1,
9062 build_int_cst (TREE_TYPE (op1), 1));
30821654 9063 else
7e29ba60 9064 return false;
30821654
PB
9065 }
9066
7e29ba60
RG
9067 lhs = gimple_assign_lhs (stmt);
9068 need_conversion
9069 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
30821654 9070
7e29ba60 9071 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
e61451e8
RG
9072 if (need_conversion
9073 && !TYPE_UNSIGNED (TREE_TYPE (op0))
7e29ba60
RG
9074 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
9075 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
30821654
PB
9076 return false;
9077
7e29ba60
RG
9078 /* For A != 0 we can substitute A itself. */
9079 if (integer_zerop (op1))
9080 gimple_assign_set_rhs_with_ops (gsi,
9081 need_conversion
00d66391 9082 ? NOP_EXPR : TREE_CODE (op0), op0);
7e29ba60
RG
9083 /* For A != B we substitute A ^ B. Either with conversion. */
9084 else if (need_conversion)
9085 {
b731b390 9086 tree tem = make_ssa_name (TREE_TYPE (op0));
538dd0b7 9087 gassign *newop
0d0e4a03 9088 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
7e29ba60 9089 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
0139ba93
MG
9090 if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
9091 && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
9092 set_range_info (tem, VR_RANGE,
9093 wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
9094 wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
00d66391 9095 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
7e29ba60
RG
9096 }
9097 /* Or without. */
9098 else
9099 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
30821654 9100 update_stmt (gsi_stmt (*gsi));
d7f336f8 9101 fold_stmt (gsi, follow_single_use_edges);
7e29ba60 9102
30821654
PB
9103 return true;
9104}
9105
1a557723
JL
9106/* Simplify a division or modulo operator to a right shift or
9107 bitwise and if the first operand is unsigned or is greater
f51286f2
JJ
9108 than zero and the second operand is an exact power of two.
9109 For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it
9110 into just op0 if op0's range is known to be a subset of
9111 [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned
9112 modulo. */
a513fe88 9113
30821654 9114static bool
20b8d734 9115simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
a513fe88 9116{
726a989a 9117 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
1a557723 9118 tree val = NULL;
726a989a
RB
9119 tree op0 = gimple_assign_rhs1 (stmt);
9120 tree op1 = gimple_assign_rhs2 (stmt);
526ceb68 9121 value_range *vr = get_value_range (op0);
f51286f2
JJ
9122
9123 if (rhs_code == TRUNC_MOD_EXPR
9124 && TREE_CODE (op1) == INTEGER_CST
9125 && tree_int_cst_sgn (op1) == 1
9126 && range_int_cst_p (vr)
9127 && tree_int_cst_lt (vr->max, op1))
9128 {
9129 if (TYPE_UNSIGNED (TREE_TYPE (op0))
9130 || tree_int_cst_sgn (vr->min) >= 0
9131 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1),
9132 vr->min))
9133 {
9134 /* If op0 already has the range op0 % op1 has,
9135 then TRUNC_MOD_EXPR won't change anything. */
7dc2f5f1 9136 gimple_assign_set_rhs_from_tree (gsi, op0);
f51286f2
JJ
9137 return true;
9138 }
9139 }
9140
9141 if (!integer_pow2p (op1))
1a557723 9142 {
20b8d734
JJ
9143 /* X % -Y can be only optimized into X % Y either if
9144 X is not INT_MIN, or Y is not -1. Fold it now, as after
9145 remove_range_assertions the range info might be not available
9146 anymore. */
9147 if (rhs_code == TRUNC_MOD_EXPR
9148 && fold_stmt (gsi, follow_single_use_edges))
9149 return true;
9150 return false;
1a557723 9151 }
20b8d734
JJ
9152
9153 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9154 val = integer_one_node;
1a557723
JL
9155 else
9156 {
12df8a7e
ILT
9157 bool sop = false;
9158
737b0891 9159 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
0c948c27
ILT
9160
9161 if (val
9162 && sop
9163 && integer_onep (val)
9164 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9165 {
726a989a 9166 location_t location;
0c948c27 9167
726a989a
RB
9168 if (!gimple_has_location (stmt))
9169 location = input_location;
0c948c27 9170 else
726a989a 9171 location = gimple_location (stmt);
fab922b1
MLI
9172 warning_at (location, OPT_Wstrict_overflow,
9173 "assuming signed overflow does not occur when "
9174 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
0c948c27 9175 }
1a557723
JL
9176 }
9177
9178 if (val && integer_onep (val))
a513fe88 9179 {
1a557723 9180 tree t;
a513fe88 9181
1a557723
JL
9182 if (rhs_code == TRUNC_DIV_EXPR)
9183 {
45a2c477 9184 t = build_int_cst (integer_type_node, tree_log2 (op1));
726a989a
RB
9185 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9186 gimple_assign_set_rhs1 (stmt, op0);
9187 gimple_assign_set_rhs2 (stmt, t);
1a557723
JL
9188 }
9189 else
a513fe88 9190 {
1a557723 9191 t = build_int_cst (TREE_TYPE (op1), 1);
d35936ab 9192 t = int_const_binop (MINUS_EXPR, op1, t);
1a557723 9193 t = fold_convert (TREE_TYPE (op0), t);
726a989a
RB
9194
9195 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9196 gimple_assign_set_rhs1 (stmt, op0);
9197 gimple_assign_set_rhs2 (stmt, t);
1a557723
JL
9198 }
9199
1a557723 9200 update_stmt (stmt);
d7f336f8 9201 fold_stmt (gsi, follow_single_use_edges);
30821654 9202 return true;
1a557723 9203 }
30821654
PB
9204
9205 return false;
1a557723 9206}
a513fe88 9207
da7db2ce
NS
9208/* Simplify a min or max if the ranges of the two operands are
9209 disjoint. Return true if we do simplify. */
9210
9211static bool
7dc2f5f1 9212simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
da7db2ce
NS
9213{
9214 tree op0 = gimple_assign_rhs1 (stmt);
9215 tree op1 = gimple_assign_rhs2 (stmt);
9216 bool sop = false;
9217 tree val;
9218
9219 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9220 (LE_EXPR, op0, op1, &sop));
9221 if (!val)
9222 {
9223 sop = false;
9224 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9225 (LT_EXPR, op0, op1, &sop));
9226 }
9227
9228 if (val)
9229 {
9230 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9231 {
9232 location_t location;
9233
9234 if (!gimple_has_location (stmt))
9235 location = input_location;
9236 else
9237 location = gimple_location (stmt);
9238 warning_at (location, OPT_Wstrict_overflow,
9239 "assuming signed overflow does not occur when "
9240 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9241 }
9242
9243 /* VAL == TRUE -> OP0 < or <= op1
9244 VAL == FALSE -> OP0 > or >= op1. */
9245 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9246 == integer_zerop (val)) ? op0 : op1;
7dc2f5f1 9247 gimple_assign_set_rhs_from_tree (gsi, res);
da7db2ce
NS
9248 return true;
9249 }
9250
9251 return false;
9252}
9253
1a557723
JL
9254/* If the operand to an ABS_EXPR is >= 0, then eliminate the
9255 ABS_EXPR. If the operand is <= 0, then simplify the
9256 ABS_EXPR into a NEGATE_EXPR. */
9257
30821654 9258static bool
7dc2f5f1 9259simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
1a557723 9260{
726a989a 9261 tree op = gimple_assign_rhs1 (stmt);
526ceb68 9262 value_range *vr = get_value_range (op);
1a557723 9263
8299dd5c 9264 if (vr)
1a557723 9265 {
8299dd5c 9266 tree val = NULL;
12df8a7e
ILT
9267 bool sop = false;
9268
9269 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
1a557723
JL
9270 if (!val)
9271 {
8299dd5c
NS
9272 /* The range is neither <= 0 nor > 0. Now see if it is
9273 either < 0 or >= 0. */
12df8a7e 9274 sop = false;
8299dd5c 9275 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
12df8a7e 9276 &sop);
1a557723 9277 }
a513fe88 9278
8299dd5c 9279 if (val)
1a557723 9280 {
0c948c27
ILT
9281 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9282 {
726a989a 9283 location_t location;
0c948c27 9284
726a989a
RB
9285 if (!gimple_has_location (stmt))
9286 location = input_location;
0c948c27 9287 else
726a989a 9288 location = gimple_location (stmt);
fab922b1
MLI
9289 warning_at (location, OPT_Wstrict_overflow,
9290 "assuming signed overflow does not occur when "
9291 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
0c948c27
ILT
9292 }
9293
726a989a 9294 gimple_assign_set_rhs1 (stmt, op);
8299dd5c 9295 if (integer_zerop (val))
726a989a 9296 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8299dd5c
NS
9297 else
9298 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
1a557723 9299 update_stmt (stmt);
7dc2f5f1 9300 fold_stmt (gsi, follow_single_use_edges);
30821654 9301 return true;
1a557723
JL
9302 }
9303 }
30821654
PB
9304
9305 return false;
1a557723
JL
9306}
9307
8556f58f
JJ
9308/* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9309 If all the bits that are being cleared by & are already
9310 known to be zero from VR, or all the bits that are being
9311 set by | are already known to be one from VR, the bit
9312 operation is redundant. */
9313
9314static bool
355fe088 9315simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
8556f58f
JJ
9316{
9317 tree op0 = gimple_assign_rhs1 (stmt);
9318 tree op1 = gimple_assign_rhs2 (stmt);
9319 tree op = NULL_TREE;
526ceb68
TS
9320 value_range vr0 = VR_INITIALIZER;
9321 value_range vr1 = VR_INITIALIZER;
807e902e
KZ
9322 wide_int may_be_nonzero0, may_be_nonzero1;
9323 wide_int must_be_nonzero0, must_be_nonzero1;
9324 wide_int mask;
8556f58f
JJ
9325
9326 if (TREE_CODE (op0) == SSA_NAME)
9327 vr0 = *(get_value_range (op0));
9328 else if (is_gimple_min_invariant (op0))
9329 set_value_range_to_value (&vr0, op0, NULL);
9330 else
9331 return false;
9332
9333 if (TREE_CODE (op1) == SSA_NAME)
9334 vr1 = *(get_value_range (op1));
9335 else if (is_gimple_min_invariant (op1))
9336 set_value_range_to_value (&vr1, op1, NULL);
9337 else
9338 return false;
9339
807e902e
KZ
9340 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9341 &must_be_nonzero0))
8556f58f 9342 return false;
807e902e
KZ
9343 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9344 &must_be_nonzero1))
8556f58f
JJ
9345 return false;
9346
9347 switch (gimple_assign_rhs_code (stmt))
9348 {
9349 case BIT_AND_EXPR:
27bcd47c 9350 mask = may_be_nonzero0.and_not (must_be_nonzero1);
807e902e 9351 if (mask == 0)
8556f58f
JJ
9352 {
9353 op = op0;
9354 break;
9355 }
27bcd47c 9356 mask = may_be_nonzero1.and_not (must_be_nonzero0);
807e902e 9357 if (mask == 0)
8556f58f
JJ
9358 {
9359 op = op1;
9360 break;
9361 }
9362 break;
9363 case BIT_IOR_EXPR:
27bcd47c 9364 mask = may_be_nonzero0.and_not (must_be_nonzero1);
807e902e 9365 if (mask == 0)
8556f58f
JJ
9366 {
9367 op = op1;
9368 break;
9369 }
27bcd47c 9370 mask = may_be_nonzero1.and_not (must_be_nonzero0);
807e902e 9371 if (mask == 0)
8556f58f
JJ
9372 {
9373 op = op0;
9374 break;
9375 }
9376 break;
9377 default:
9378 gcc_unreachable ();
9379 }
9380
9381 if (op == NULL_TREE)
9382 return false;
9383
00d66391 9384 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
8556f58f
JJ
9385 update_stmt (gsi_stmt (*gsi));
9386 return true;
9387}
9388
d579f20b
JL
9389/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9390 a known value range VR.
9391
9392 If there is one and only one value which will satisfy the
7ac753f9
PP
9393 conditional, then return that value. Else return NULL.
9394
9395 If signed overflow must be undefined for the value to satisfy
9396 the conditional, then set *STRICT_OVERFLOW_P to true. */
d579f20b
JL
9397
9398static tree
9399test_for_singularity (enum tree_code cond_code, tree op0,
526ceb68 9400 tree op1, value_range *vr,
7ac753f9 9401 bool *strict_overflow_p)
d579f20b
JL
9402{
9403 tree min = NULL;
9404 tree max = NULL;
9405
6af801f5
JJ
9406 /* Extract minimum/maximum values which satisfy the conditional as it was
9407 written. */
d579f20b
JL
9408 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9409 {
12df8a7e
ILT
9410 /* This should not be negative infinity; there is no overflow
9411 here. */
d579f20b
JL
9412 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9413
9414 max = op1;
12df8a7e 9415 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
d579f20b
JL
9416 {
9417 tree one = build_int_cst (TREE_TYPE (op0), 1);
a5ad7269 9418 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
3fe5bcaf
ILT
9419 if (EXPR_P (max))
9420 TREE_NO_WARNING (max) = 1;
d579f20b
JL
9421 }
9422 }
9423 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9424 {
12df8a7e
ILT
9425 /* This should not be positive infinity; there is no overflow
9426 here. */
d579f20b
JL
9427 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9428
9429 min = op1;
12df8a7e 9430 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
d579f20b
JL
9431 {
9432 tree one = build_int_cst (TREE_TYPE (op0), 1);
f9fe7aed 9433 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
3fe5bcaf
ILT
9434 if (EXPR_P (min))
9435 TREE_NO_WARNING (min) = 1;
d579f20b
JL
9436 }
9437 }
9438
9439 /* Now refine the minimum and maximum values using any
9440 value range information we have for op0. */
9441 if (min && max)
9442 {
fbd43827 9443 if (compare_values (vr->min, min) == 1)
d579f20b 9444 min = vr->min;
fbd43827 9445 if (compare_values (vr->max, max) == -1)
d579f20b
JL
9446 max = vr->max;
9447
f9fe7aed
JL
9448 /* If the new min/max values have converged to a single value,
9449 then there is only one value which can satisfy the condition,
9450 return that value. */
9451 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7ac753f9
PP
9452 {
9453 if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9454 && is_overflow_infinity (vr->max))
9455 *strict_overflow_p = true;
9456 if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9457 && is_overflow_infinity (vr->min))
9458 *strict_overflow_p = true;
9459
9460 return min;
9461 }
d579f20b
JL
9462 }
9463 return NULL;
9464}
9465
ebbd90d8
JL
9466/* Return whether the value range *VR fits in an integer type specified
9467 by PRECISION and UNSIGNED_P. */
9468
9469static bool
526ceb68 9470range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
ebbd90d8
JL
9471{
9472 tree src_type;
9473 unsigned src_precision;
807e902e
KZ
9474 widest_int tem;
9475 signop src_sgn;
ebbd90d8
JL
9476
9477 /* We can only handle integral and pointer types. */
9478 src_type = TREE_TYPE (vr->min);
9479 if (!INTEGRAL_TYPE_P (src_type)
9480 && !POINTER_TYPE_P (src_type))
9481 return false;
9482
807e902e 9483 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
ebbd90d8
JL
9484 and so is an identity transform. */
9485 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
807e902e
KZ
9486 src_sgn = TYPE_SIGN (src_type);
9487 if ((src_precision < dest_precision
9488 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9489 || (src_precision == dest_precision && src_sgn == dest_sgn))
ebbd90d8
JL
9490 return true;
9491
9492 /* Now we can only handle ranges with constant bounds. */
9493 if (vr->type != VR_RANGE
9494 || TREE_CODE (vr->min) != INTEGER_CST
9495 || TREE_CODE (vr->max) != INTEGER_CST)
9496 return false;
9497
807e902e 9498 /* For sign changes, the MSB of the wide_int has to be clear.
ebbd90d8 9499 An unsigned value with its MSB set cannot be represented by
807e902e
KZ
9500 a signed wide_int, while a negative value cannot be represented
9501 by an unsigned wide_int. */
9502 if (src_sgn != dest_sgn
9503 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
ebbd90d8
JL
9504 return false;
9505
9506 /* Then we can perform the conversion on both ends and compare
9507 the result for equality. */
807e902e
KZ
9508 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9509 if (tem != wi::to_widest (vr->min))
ebbd90d8 9510 return false;
807e902e
KZ
9511 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9512 if (tem != wi::to_widest (vr->max))
ebbd90d8
JL
9513 return false;
9514
9515 return true;
9516}
9517
22deefcb
RB
9518/* Simplify a conditional using a relational operator to an equality
9519 test if the range information indicates only one value can satisfy
9520 the original conditional. */
1a557723 9521
30821654 9522static bool
538dd0b7 9523simplify_cond_using_ranges (gcond *stmt)
1a557723 9524{
726a989a
RB
9525 tree op0 = gimple_cond_lhs (stmt);
9526 tree op1 = gimple_cond_rhs (stmt);
9527 enum tree_code cond_code = gimple_cond_code (stmt);
1a557723 9528
22deefcb 9529 if (cond_code != NE_EXPR
1a557723
JL
9530 && cond_code != EQ_EXPR
9531 && TREE_CODE (op0) == SSA_NAME
9532 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9533 && is_gimple_min_invariant (op1))
9534 {
526ceb68 9535 value_range *vr = get_value_range (op0);
b8698a0f 9536
1a557723
JL
9537 /* If we have range information for OP0, then we might be
9538 able to simplify this conditional. */
9539 if (vr->type == VR_RANGE)
9540 {
7ac753f9
PP
9541 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9542 bool sop = false;
9543 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
1a557723 9544
7ac753f9
PP
9545 if (new_tree
9546 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
1a557723 9547 {
d579f20b 9548 if (dump_file)
1a557723 9549 {
d579f20b 9550 fprintf (dump_file, "Simplified relational ");
726a989a 9551 print_gimple_stmt (dump_file, stmt, 0, 0);
d579f20b 9552 fprintf (dump_file, " into ");
a513fe88
JL
9553 }
9554
726a989a
RB
9555 gimple_cond_set_code (stmt, EQ_EXPR);
9556 gimple_cond_set_lhs (stmt, op0);
82d6e6fc 9557 gimple_cond_set_rhs (stmt, new_tree);
726a989a 9558
d579f20b
JL
9559 update_stmt (stmt);
9560
9561 if (dump_file)
a513fe88 9562 {
726a989a 9563 print_gimple_stmt (dump_file, stmt, 0, 0);
d579f20b 9564 fprintf (dump_file, "\n");
a513fe88 9565 }
d579f20b 9566
7ac753f9
PP
9567 if (sop && issue_strict_overflow_warning (wc))
9568 {
9569 location_t location = input_location;
9570 if (gimple_has_location (stmt))
9571 location = gimple_location (stmt);
9572
9573 warning_at (location, OPT_Wstrict_overflow,
9574 "assuming signed overflow does not occur when "
9575 "simplifying conditional");
9576 }
9577
30821654 9578 return true;
a513fe88
JL
9579 }
9580
d579f20b
JL
9581 /* Try again after inverting the condition. We only deal
9582 with integral types here, so no need to worry about
9583 issues with inverting FP comparisons. */
7ac753f9
PP
9584 sop = false;
9585 new_tree = test_for_singularity
9586 (invert_tree_comparison (cond_code, false),
9587 op0, op1, vr, &sop);
d579f20b 9588
7ac753f9
PP
9589 if (new_tree
9590 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
1a557723 9591 {
d579f20b 9592 if (dump_file)
1a557723 9593 {
d579f20b 9594 fprintf (dump_file, "Simplified relational ");
726a989a 9595 print_gimple_stmt (dump_file, stmt, 0, 0);
d579f20b 9596 fprintf (dump_file, " into ");
1a557723 9597 }
d579f20b 9598
726a989a
RB
9599 gimple_cond_set_code (stmt, NE_EXPR);
9600 gimple_cond_set_lhs (stmt, op0);
82d6e6fc 9601 gimple_cond_set_rhs (stmt, new_tree);
726a989a 9602
d579f20b
JL
9603 update_stmt (stmt);
9604
9605 if (dump_file)
9606 {
726a989a 9607 print_gimple_stmt (dump_file, stmt, 0, 0);
d579f20b
JL
9608 fprintf (dump_file, "\n");
9609 }
d579f20b 9610
7ac753f9
PP
9611 if (sop && issue_strict_overflow_warning (wc))
9612 {
9613 location_t location = input_location;
9614 if (gimple_has_location (stmt))
9615 location = gimple_location (stmt);
9616
9617 warning_at (location, OPT_Wstrict_overflow,
9618 "assuming signed overflow does not occur when "
9619 "simplifying conditional");
9620 }
9621
30821654 9622 return true;
1a557723 9623 }
a513fe88
JL
9624 }
9625 }
30821654 9626
ebbd90d8
JL
9627 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9628 see if OP0 was set by a type conversion where the source of
9629 the conversion is another SSA_NAME with a range that fits
9630 into the range of OP0's type.
a32dfe9d 9631
ebbd90d8
JL
9632 If so, the conversion is redundant as the earlier SSA_NAME can be
9633 used for the comparison directly if we just massage the constant in the
9634 comparison. */
a32dfe9d 9635 if (TREE_CODE (op0) == SSA_NAME
a32dfe9d
JL
9636 && TREE_CODE (op1) == INTEGER_CST)
9637 {
355fe088 9638 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
a32dfe9d
JL
9639 tree innerop;
9640
9641 if (!is_gimple_assign (def_stmt)
9642 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9643 return false;
9644
9645 innerop = gimple_assign_rhs1 (def_stmt);
9646
a4ce1258 9647 if (TREE_CODE (innerop) == SSA_NAME
1014b6f5 9648 && !POINTER_TYPE_P (TREE_TYPE (innerop))
1ebd5558
JJ
9649 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
9650 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
a32dfe9d 9651 {
526ceb68 9652 value_range *vr = get_value_range (innerop);
a32dfe9d
JL
9653
9654 if (range_int_cst_p (vr)
ebbd90d8
JL
9655 && range_fits_type_p (vr,
9656 TYPE_PRECISION (TREE_TYPE (op0)),
807e902e 9657 TYPE_SIGN (TREE_TYPE (op0)))
2343af65
JL
9658 && int_fits_type_p (op1, TREE_TYPE (innerop))
9659 /* The range must not have overflowed, or if it did overflow
9660 we must not be wrapping/trapping overflow and optimizing
9661 with strict overflow semantics. */
9662 && ((!is_negative_overflow_infinity (vr->min)
9663 && !is_positive_overflow_infinity (vr->max))
9664 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
a32dfe9d 9665 {
2343af65
JL
9666 /* If the range overflowed and the user has asked for warnings
9667 when strict overflow semantics were used to optimize code,
9668 issue an appropriate warning. */
ee68591e
RB
9669 if (cond_code != EQ_EXPR && cond_code != NE_EXPR
9670 && (is_negative_overflow_infinity (vr->min)
9671 || is_positive_overflow_infinity (vr->max))
2343af65
JL
9672 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
9673 {
9674 location_t location;
9675
9676 if (!gimple_has_location (stmt))
9677 location = input_location;
9678 else
9679 location = gimple_location (stmt);
9680 warning_at (location, OPT_Wstrict_overflow,
1ebd5558
JJ
9681 "assuming signed overflow does not occur when "
9682 "simplifying conditional");
2343af65
JL
9683 }
9684
a32dfe9d
JL
9685 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
9686 gimple_cond_set_lhs (stmt, innerop);
9687 gimple_cond_set_rhs (stmt, newconst);
9688 return true;
9689 }
9690 }
9691 }
9692
30821654 9693 return false;
a513fe88
JL
9694}
9695
b7814a18
RG
9696/* Simplify a switch statement using the value range of the switch
9697 argument. */
9698
30821654 9699static bool
538dd0b7 9700simplify_switch_using_ranges (gswitch *stmt)
b7814a18 9701{
726a989a 9702 tree op = gimple_switch_index (stmt);
48abe922 9703 value_range *vr = NULL;
b7814a18
RG
9704 bool take_default;
9705 edge e;
9706 edge_iterator ei;
9707 size_t i = 0, j = 0, n, n2;
726a989a 9708 tree vec2;
b7814a18 9709 switch_update su;
8bb37e9a 9710 size_t k = 1, l = 0;
b7814a18 9711
92ef7fb1
MLI
9712 if (TREE_CODE (op) == SSA_NAME)
9713 {
9714 vr = get_value_range (op);
b7814a18 9715
92ef7fb1 9716 /* We can only handle integer ranges. */
8bb37e9a
TV
9717 if ((vr->type != VR_RANGE
9718 && vr->type != VR_ANTI_RANGE)
92ef7fb1
MLI
9719 || symbolic_range_p (vr))
9720 return false;
b7814a18 9721
92ef7fb1 9722 /* Find case label for min/max of the value range. */
8bb37e9a 9723 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
92ef7fb1
MLI
9724 }
9725 else if (TREE_CODE (op) == INTEGER_CST)
9726 {
9727 take_default = !find_case_label_index (stmt, 1, op, &i);
9728 if (take_default)
9729 {
9730 i = 1;
9731 j = 0;
9732 }
b8698a0f 9733 else
92ef7fb1
MLI
9734 {
9735 j = i;
9736 }
9737 }
9738 else
30821654 9739 return false;
b7814a18 9740
726a989a 9741 n = gimple_switch_num_labels (stmt);
b7814a18 9742
48abe922
PP
9743 /* We can truncate the case label ranges that partially overlap with OP's
9744 value range. */
9745 size_t min_idx = 1, max_idx = 0;
9746 if (vr != NULL)
9747 find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
9748 if (min_idx <= max_idx)
9749 {
9750 tree min_label = gimple_switch_label (stmt, min_idx);
9751 tree max_label = gimple_switch_label (stmt, max_idx);
9752
1addb9e6
PP
9753 /* Avoid changing the type of the case labels when truncating. */
9754 tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
9755 tree vr_min = fold_convert (case_label_type, vr->min);
9756 tree vr_max = fold_convert (case_label_type, vr->max);
9757
48abe922
PP
9758 if (vr->type == VR_RANGE)
9759 {
9760 /* If OP's value range is [2,8] and the low label range is
9761 0 ... 3, truncate the label's range to 2 .. 3. */
1addb9e6 9762 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
48abe922 9763 && CASE_HIGH (min_label) != NULL_TREE
1addb9e6
PP
9764 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
9765 CASE_LOW (min_label) = vr_min;
48abe922
PP
9766
9767 /* If OP's value range is [2,8] and the high label range is
9768 7 ... 10, truncate the label's range to 7 .. 8. */
1addb9e6 9769 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
48abe922 9770 && CASE_HIGH (max_label) != NULL_TREE
1addb9e6
PP
9771 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
9772 CASE_HIGH (max_label) = vr_max;
48abe922
PP
9773 }
9774 else if (vr->type == VR_ANTI_RANGE)
9775 {
1addb9e6 9776 tree one_cst = build_one_cst (case_label_type);
48abe922
PP
9777
9778 if (min_label == max_label)
9779 {
9780 /* If OP's value range is ~[7,8] and the label's range is
9781 7 ... 10, truncate the label's range to 9 ... 10. */
1addb9e6 9782 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
48abe922 9783 && CASE_HIGH (min_label) != NULL_TREE
1addb9e6 9784 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
48abe922 9785 CASE_LOW (min_label)
1addb9e6 9786 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
48abe922
PP
9787
9788 /* If OP's value range is ~[7,8] and the label's range is
9789 5 ... 8, truncate the label's range to 5 ... 6. */
1addb9e6 9790 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
48abe922 9791 && CASE_HIGH (min_label) != NULL_TREE
1addb9e6 9792 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
48abe922 9793 CASE_HIGH (min_label)
1addb9e6 9794 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
48abe922
PP
9795 }
9796 else
9797 {
9798 /* If OP's value range is ~[2,8] and the low label range is
9799 0 ... 3, truncate the label's range to 0 ... 1. */
1addb9e6 9800 if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
48abe922 9801 && CASE_HIGH (min_label) != NULL_TREE
1addb9e6 9802 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
48abe922 9803 CASE_HIGH (min_label)
1addb9e6 9804 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
48abe922
PP
9805
9806 /* If OP's value range is ~[2,8] and the high label range is
9807 7 ... 10, truncate the label's range to 9 ... 10. */
1addb9e6 9808 if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
48abe922 9809 && CASE_HIGH (max_label) != NULL_TREE
1addb9e6 9810 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
48abe922 9811 CASE_LOW (max_label)
1addb9e6 9812 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
48abe922
PP
9813 }
9814 }
9815
9816 /* Canonicalize singleton case ranges. */
9817 if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
9818 CASE_HIGH (min_label) = NULL_TREE;
9819 if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
9820 CASE_HIGH (max_label) = NULL_TREE;
9821 }
9822
9823 /* We can also eliminate case labels that lie completely outside OP's value
9824 range. */
9825
b7814a18 9826 /* Bail out if this is just all edges taken. */
726a989a
RB
9827 if (i == 1
9828 && j == n - 1
b7814a18 9829 && take_default)
30821654 9830 return false;
b7814a18
RG
9831
9832 /* Build a new vector of taken case labels. */
8bb37e9a 9833 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
726a989a 9834 n2 = 0;
b7814a18
RG
9835
9836 /* Add the default edge, if necessary. */
9837 if (take_default)
726a989a
RB
9838 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9839
9840 for (; i <= j; ++i, ++n2)
9841 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
b7814a18 9842
8bb37e9a
TV
9843 for (; k <= l; ++k, ++n2)
9844 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9845
b7814a18
RG
9846 /* Mark needed edges. */
9847 for (i = 0; i < n2; ++i)
9848 {
726a989a 9849 e = find_edge (gimple_bb (stmt),
b7814a18
RG
9850 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9851 e->aux = (void *)-1;
9852 }
9853
9854 /* Queue not needed edges for later removal. */
726a989a 9855 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
b7814a18
RG
9856 {
9857 if (e->aux == (void *)-1)
9858 {
9859 e->aux = NULL;
9860 continue;
9861 }
9862
9863 if (dump_file && (dump_flags & TDF_DETAILS))
9864 {
9865 fprintf (dump_file, "removing unreachable case label\n");
9866 }
9771b263 9867 to_remove_edges.safe_push (e);
1d86f5e9 9868 e->flags &= ~EDGE_EXECUTABLE;
b7814a18
RG
9869 }
9870
9871 /* And queue an update for the stmt. */
9872 su.stmt = stmt;
9873 su.vec = vec2;
9771b263 9874 to_update_switch_stmts.safe_push (su);
30821654 9875 return false;
b7814a18
RG
9876}
9877
29c5134a
RG
9878/* Simplify an integral conversion from an SSA name in STMT. */
9879
9880static bool
7dc2f5f1 9881simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
29c5134a 9882{
dcc95c20 9883 tree innerop, middleop, finaltype;
355fe088 9884 gimple *def_stmt;
807e902e 9885 signop inner_sgn, middle_sgn, final_sgn;
6ebbd277 9886 unsigned inner_prec, middle_prec, final_prec;
807e902e 9887 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
dcc95c20
RG
9888
9889 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9111c715
RG
9890 if (!INTEGRAL_TYPE_P (finaltype))
9891 return false;
dcc95c20
RG
9892 middleop = gimple_assign_rhs1 (stmt);
9893 def_stmt = SSA_NAME_DEF_STMT (middleop);
29c5134a
RG
9894 if (!is_gimple_assign (def_stmt)
9895 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9896 return false;
dcc95c20 9897 innerop = gimple_assign_rhs1 (def_stmt);
999c1171
RB
9898 if (TREE_CODE (innerop) != SSA_NAME
9899 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
29c5134a 9900 return false;
dcc95c20 9901
0139ba93
MG
9902 /* Get the value-range of the inner operand. Use get_range_info in
9903 case innerop was created during substitute-and-fold. */
9904 wide_int imin, imax;
9905 if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
9906 || get_range_info (innerop, &imin, &imax) != VR_RANGE)
29c5134a 9907 return false;
0139ba93
MG
9908 innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
9909 innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
dcc95c20
RG
9910
9911 /* Simulate the conversion chain to check if the result is equal if
9912 the middle conversion is removed. */
6ebbd277
JR
9913 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9914 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9915 final_prec = TYPE_PRECISION (finaltype);
9916
9917 /* If the first conversion is not injective, the second must not
9918 be widening. */
807e902e
KZ
9919 if (wi::gtu_p (innermax - innermin,
9920 wi::mask <widest_int> (middle_prec, false))
6ebbd277 9921 && middle_prec < final_prec)
29c5134a 9922 return false;
6ebbd277
JR
9923 /* We also want a medium value so that we can track the effect that
9924 narrowing conversions with sign change have. */
807e902e
KZ
9925 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
9926 if (inner_sgn == UNSIGNED)
9927 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
6ebbd277 9928 else
807e902e
KZ
9929 innermed = 0;
9930 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
9931 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
6ebbd277
JR
9932 innermed = innermin;
9933
807e902e
KZ
9934 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
9935 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
9936 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
9937 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
6ebbd277 9938
7d5a0f1b
RG
9939 /* Require that the final conversion applied to both the original
9940 and the intermediate range produces the same result. */
807e902e
KZ
9941 final_sgn = TYPE_SIGN (finaltype);
9942 if (wi::ext (middlemin, final_prec, final_sgn)
9943 != wi::ext (innermin, final_prec, final_sgn)
9944 || wi::ext (middlemed, final_prec, final_sgn)
9945 != wi::ext (innermed, final_prec, final_sgn)
9946 || wi::ext (middlemax, final_prec, final_sgn)
9947 != wi::ext (innermax, final_prec, final_sgn))
dcc95c20
RG
9948 return false;
9949
9950 gimple_assign_set_rhs1 (stmt, innerop);
7dc2f5f1 9951 fold_stmt (gsi, follow_single_use_edges);
29c5134a
RG
9952 return true;
9953}
9954
ebeadd91
RG
9955/* Simplify a conversion from integral SSA name to float in STMT. */
9956
9957static bool
355fe088
TS
9958simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
9959 gimple *stmt)
ebeadd91
RG
9960{
9961 tree rhs1 = gimple_assign_rhs1 (stmt);
526ceb68 9962 value_range *vr = get_value_range (rhs1);
ef4bddc2
RS
9963 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9964 machine_mode mode;
ebeadd91 9965 tree tem;
538dd0b7 9966 gassign *conv;
ebeadd91
RG
9967
9968 /* We can only handle constant ranges. */
9969 if (vr->type != VR_RANGE
9970 || TREE_CODE (vr->min) != INTEGER_CST
9971 || TREE_CODE (vr->max) != INTEGER_CST)
9972 return false;
9973
9974 /* First check if we can use a signed type in place of an unsigned. */
9975 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9976 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9977 != CODE_FOR_nothing)
807e902e 9978 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
ebeadd91
RG
9979 mode = TYPE_MODE (TREE_TYPE (rhs1));
9980 /* If we can do the conversion in the current input mode do nothing. */
9981 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
5f4e6de3 9982 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
ebeadd91
RG
9983 return false;
9984 /* Otherwise search for a mode we can use, starting from the narrowest
9985 integer mode available. */
9986 else
9987 {
9988 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9989 do
9990 {
9991 /* If we cannot do a signed conversion to float from mode
9992 or if the value-range does not fit in the signed type
9993 try with a wider mode. */
9994 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
807e902e 9995 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
ebeadd91
RG
9996 break;
9997
9998 mode = GET_MODE_WIDER_MODE (mode);
9999 /* But do not widen the input. Instead leave that to the
10000 optabs expansion code. */
10001 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
10002 return false;
10003 }
10004 while (mode != VOIDmode);
10005 if (mode == VOIDmode)
10006 return false;
10007 }
10008
10009 /* It works, insert a truncation or sign-change before the
10010 float conversion. */
83d5977e 10011 tem = make_ssa_name (build_nonstandard_integer_type
b731b390 10012 (GET_MODE_PRECISION (mode), 0));
0d0e4a03 10013 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
ebeadd91
RG
10014 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
10015 gimple_assign_set_rhs1 (stmt, tem);
d7f336f8 10016 fold_stmt (gsi, follow_single_use_edges);
ebeadd91
RG
10017
10018 return true;
10019}
10020
97286431
JJ
10021/* Simplify an internal fn call using ranges if possible. */
10022
10023static bool
355fe088 10024simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
97286431
JJ
10025{
10026 enum tree_code subcode;
1304953e
JJ
10027 bool is_ubsan = false;
10028 bool ovf = false;
97286431
JJ
10029 switch (gimple_call_internal_fn (stmt))
10030 {
10031 case IFN_UBSAN_CHECK_ADD:
10032 subcode = PLUS_EXPR;
1304953e 10033 is_ubsan = true;
97286431
JJ
10034 break;
10035 case IFN_UBSAN_CHECK_SUB:
10036 subcode = MINUS_EXPR;
1304953e 10037 is_ubsan = true;
97286431
JJ
10038 break;
10039 case IFN_UBSAN_CHECK_MUL:
1304953e
JJ
10040 subcode = MULT_EXPR;
10041 is_ubsan = true;
10042 break;
10043 case IFN_ADD_OVERFLOW:
10044 subcode = PLUS_EXPR;
10045 break;
10046 case IFN_SUB_OVERFLOW:
10047 subcode = MINUS_EXPR;
10048 break;
10049 case IFN_MUL_OVERFLOW:
97286431
JJ
10050 subcode = MULT_EXPR;
10051 break;
10052 default:
10053 return false;
10054 }
10055
97286431
JJ
10056 tree op0 = gimple_call_arg (stmt, 0);
10057 tree op1 = gimple_call_arg (stmt, 1);
1304953e
JJ
10058 tree type;
10059 if (is_ubsan)
1705cebd
JJ
10060 {
10061 type = TREE_TYPE (op0);
10062 if (VECTOR_TYPE_P (type))
10063 return false;
10064 }
1304953e
JJ
10065 else if (gimple_call_lhs (stmt) == NULL_TREE)
10066 return false;
97286431 10067 else
1304953e
JJ
10068 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
10069 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
10070 || (is_ubsan && ovf))
10071 return false;
97286431 10072
355fe088 10073 gimple *g;
1304953e
JJ
10074 location_t loc = gimple_location (stmt);
10075 if (is_ubsan)
0d0e4a03 10076 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
368b454d
JJ
10077 else
10078 {
1304953e
JJ
10079 int prec = TYPE_PRECISION (type);
10080 tree utype = type;
10081 if (ovf
10082 || !useless_type_conversion_p (type, TREE_TYPE (op0))
10083 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
10084 utype = build_nonstandard_integer_type (prec, 1);
10085 if (TREE_CODE (op0) == INTEGER_CST)
10086 op0 = fold_convert (utype, op0);
10087 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
10088 {
0d0e4a03 10089 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
1304953e
JJ
10090 gimple_set_location (g, loc);
10091 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10092 op0 = gimple_assign_lhs (g);
368b454d 10093 }
1304953e
JJ
10094 if (TREE_CODE (op1) == INTEGER_CST)
10095 op1 = fold_convert (utype, op1);
10096 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
10097 {
0d0e4a03 10098 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
1304953e
JJ
10099 gimple_set_location (g, loc);
10100 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10101 op1 = gimple_assign_lhs (g);
10102 }
0d0e4a03 10103 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
1304953e
JJ
10104 gimple_set_location (g, loc);
10105 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10106 if (utype != type)
10107 {
0d0e4a03
JJ
10108 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
10109 gimple_assign_lhs (g));
1304953e
JJ
10110 gimple_set_location (g, loc);
10111 gsi_insert_before (gsi, g, GSI_SAME_STMT);
10112 }
0d0e4a03
JJ
10113 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
10114 gimple_assign_lhs (g),
10115 build_int_cst (type, ovf));
1304953e
JJ
10116 }
10117 gimple_set_location (g, loc);
97286431
JJ
10118 gsi_replace (gsi, g, false);
10119 return true;
10120}
10121
22d12455
KV
10122/* Return true if VAR is a two-valued variable. Set a and b with the
10123 two-values when it is true. Return false otherwise. */
10124
10125static bool
10126two_valued_val_range_p (tree var, tree *a, tree *b)
10127{
10128 value_range *vr = get_value_range (var);
10129 if ((vr->type != VR_RANGE
10130 && vr->type != VR_ANTI_RANGE)
10131 || TREE_CODE (vr->min) != INTEGER_CST
10132 || TREE_CODE (vr->max) != INTEGER_CST)
10133 return false;
10134
10135 if (vr->type == VR_RANGE
10136 && wi::sub (vr->max, vr->min) == 1)
10137 {
10138 *a = vr->min;
10139 *b = vr->max;
10140 return true;
10141 }
10142
10143 /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
10144 if (vr->type == VR_ANTI_RANGE
10145 && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
10146 && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
10147 {
10148 *a = vrp_val_min (TREE_TYPE (var));
10149 *b = vrp_val_max (TREE_TYPE (var));
10150 return true;
10151 }
10152
10153 return false;
10154}
10155
1a557723
JL
10156/* Simplify STMT using ranges if possible. */
10157
ff7ffb8f 10158static bool
30821654 10159simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
1a557723 10160{
355fe088 10161 gimple *stmt = gsi_stmt (*gsi);
726a989a 10162 if (is_gimple_assign (stmt))
1a557723 10163 {
726a989a 10164 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
29c5134a 10165 tree rhs1 = gimple_assign_rhs1 (stmt);
22d12455
KV
10166 tree rhs2 = gimple_assign_rhs2 (stmt);
10167 tree lhs = gimple_assign_lhs (stmt);
10168 tree val1 = NULL_TREE, val2 = NULL_TREE;
10169 use_operand_p use_p;
10170 gimple *use_stmt;
10171
10172 /* Convert:
10173 LHS = CST BINOP VAR
10174 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10175 To:
10176 LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
10177
10178 Also handles:
10179 LHS = VAR BINOP CST
10180 Where VAR is two-valued and LHS is used in GIMPLE_COND only
10181 To:
10182 LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
10183
10184 if (TREE_CODE_CLASS (rhs_code) == tcc_binary
10185 && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10186 && ((TREE_CODE (rhs1) == INTEGER_CST
10187 && TREE_CODE (rhs2) == SSA_NAME)
10188 || (TREE_CODE (rhs2) == INTEGER_CST
10189 && TREE_CODE (rhs1) == SSA_NAME))
10190 && single_imm_use (lhs, &use_p, &use_stmt)
10191 && gimple_code (use_stmt) == GIMPLE_COND)
10192
10193 {
10194 tree new_rhs1 = NULL_TREE;
10195 tree new_rhs2 = NULL_TREE;
10196 tree cmp_var = NULL_TREE;
10197
10198 if (TREE_CODE (rhs2) == SSA_NAME
10199 && two_valued_val_range_p (rhs2, &val1, &val2))
10200 {
10201 /* Optimize RHS1 OP [VAL1, VAL2]. */
10202 new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
10203 new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
10204 cmp_var = rhs2;
10205 }
10206 else if (TREE_CODE (rhs1) == SSA_NAME
10207 && two_valued_val_range_p (rhs1, &val1, &val2))
10208 {
10209 /* Optimize [VAL1, VAL2] OP RHS2. */
10210 new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
10211 new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
10212 cmp_var = rhs1;
10213 }
10214
10215 /* If we could not find two-vals or the optimzation is invalid as
10216 in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
10217 if (new_rhs1 && new_rhs2)
10218 {
28ea3e97 10219 tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
22d12455
KV
10220 gimple_assign_set_rhs_with_ops (gsi,
10221 COND_EXPR, cond,
10222 new_rhs1,
10223 new_rhs2);
10224 update_stmt (gsi_stmt (*gsi));
d7f336f8 10225 fold_stmt (gsi, follow_single_use_edges);
22d12455
KV
10226 return true;
10227 }
10228 }
1a557723 10229
30821654
PB
10230 switch (rhs_code)
10231 {
10232 case EQ_EXPR:
10233 case NE_EXPR:
98958241
KT
10234 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
10235 if the RHS is zero or one, and the LHS are known to be boolean
10236 values. */
29c5134a 10237 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
30821654
PB
10238 return simplify_truth_ops_using_ranges (gsi, stmt);
10239 break;
10240
1a557723
JL
10241 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
10242 and BIT_AND_EXPR respectively if the first operand is greater
f51286f2
JJ
10243 than zero and the second operand is an exact power of two.
10244 Also optimize TRUNC_MOD_EXPR away if the second operand is
10245 constant and the first operand already has the right value
10246 range. */
30821654
PB
10247 case TRUNC_DIV_EXPR:
10248 case TRUNC_MOD_EXPR:
f51286f2
JJ
10249 if (TREE_CODE (rhs1) == SSA_NAME
10250 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
20b8d734 10251 return simplify_div_or_mod_using_ranges (gsi, stmt);
30821654 10252 break;
1a557723
JL
10253
10254 /* Transform ABS (X) into X or -X as appropriate. */
30821654 10255 case ABS_EXPR:
29c5134a
RG
10256 if (TREE_CODE (rhs1) == SSA_NAME
10257 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7dc2f5f1 10258 return simplify_abs_using_ranges (gsi, stmt);
30821654
PB
10259 break;
10260
8556f58f
JJ
10261 case BIT_AND_EXPR:
10262 case BIT_IOR_EXPR:
10263 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
10264 if all the bits being cleared are already cleared or
10265 all the bits being set are already set. */
29c5134a 10266 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8556f58f
JJ
10267 return simplify_bit_ops_using_ranges (gsi, stmt);
10268 break;
10269
29c5134a
RG
10270 CASE_CONVERT:
10271 if (TREE_CODE (rhs1) == SSA_NAME
10272 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7dc2f5f1 10273 return simplify_conversion_using_ranges (gsi, stmt);
29c5134a
RG
10274 break;
10275
ebeadd91
RG
10276 case FLOAT_EXPR:
10277 if (TREE_CODE (rhs1) == SSA_NAME
10278 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10279 return simplify_float_conversion_using_ranges (gsi, stmt);
10280 break;
10281
da7db2ce
NS
10282 case MIN_EXPR:
10283 case MAX_EXPR:
7dc2f5f1 10284 return simplify_min_or_max_using_ranges (gsi, stmt);
da7db2ce 10285
30821654
PB
10286 default:
10287 break;
10288 }
1a557723 10289 }
726a989a 10290 else if (gimple_code (stmt) == GIMPLE_COND)
538dd0b7 10291 return simplify_cond_using_ranges (as_a <gcond *> (stmt));
726a989a 10292 else if (gimple_code (stmt) == GIMPLE_SWITCH)
538dd0b7 10293 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
97286431
JJ
10294 else if (is_gimple_call (stmt)
10295 && gimple_call_internal_p (stmt))
10296 return simplify_internal_call_using_ranges (gsi, stmt);
30821654
PB
10297
10298 return false;
1a557723
JL
10299}
10300
ff7ffb8f
RG
10301/* If the statement pointed by SI has a predicate whose value can be
10302 computed using the value range information computed by VRP, compute
10303 its value and return true. Otherwise, return false. */
10304
10305static bool
10306fold_predicate_in (gimple_stmt_iterator *si)
10307{
10308 bool assignment_p = false;
10309 tree val;
355fe088 10310 gimple *stmt = gsi_stmt (*si);
ff7ffb8f
RG
10311
10312 if (is_gimple_assign (stmt)
10313 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10314 {
10315 assignment_p = true;
10316 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10317 gimple_assign_rhs1 (stmt),
10318 gimple_assign_rhs2 (stmt),
10319 stmt);
10320 }
538dd0b7
DM
10321 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10322 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10323 gimple_cond_lhs (cond_stmt),
10324 gimple_cond_rhs (cond_stmt),
ff7ffb8f
RG
10325 stmt);
10326 else
10327 return false;
10328
10329 if (val)
10330 {
10331 if (assignment_p)
10332 val = fold_convert (gimple_expr_type (stmt), val);
b8698a0f 10333
ff7ffb8f
RG
10334 if (dump_file)
10335 {
10336 fprintf (dump_file, "Folding predicate ");
10337 print_gimple_expr (dump_file, stmt, 0, 0);
10338 fprintf (dump_file, " to ");
10339 print_generic_expr (dump_file, val, 0);
10340 fprintf (dump_file, "\n");
10341 }
10342
10343 if (is_gimple_assign (stmt))
10344 gimple_assign_set_rhs_from_tree (si, val);
10345 else
10346 {
10347 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
538dd0b7 10348 gcond *cond_stmt = as_a <gcond *> (stmt);
ff7ffb8f 10349 if (integer_zerop (val))
538dd0b7 10350 gimple_cond_make_false (cond_stmt);
ff7ffb8f 10351 else if (integer_onep (val))
538dd0b7 10352 gimple_cond_make_true (cond_stmt);
ff7ffb8f
RG
10353 else
10354 gcc_unreachable ();
10355 }
10356
10357 return true;
10358 }
10359
10360 return false;
10361}
10362
10363/* Callback for substitute_and_fold folding the stmt at *SI. */
10364
10365static bool
10366vrp_fold_stmt (gimple_stmt_iterator *si)
10367{
10368 if (fold_predicate_in (si))
10369 return true;
10370
10371 return simplify_stmt_using_ranges (si);
10372}
10373
f6c72af4
JL
10374/* Unwindable const/copy equivalences. */
10375const_and_copies *equiv_stack;
2090d6a0 10376
0c948c27
ILT
10377/* A trivial wrapper so that we can present the generic jump threading
10378 code with a simple API for simplifying statements. STMT is the
10379 statement we want to simplify, WITHIN_STMT provides the location
10380 for any overflow warnings. */
10381
2090d6a0 10382static tree
355fe088 10383simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
8e33db8f 10384 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
2090d6a0 10385{
538dd0b7
DM
10386 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10387 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10388 gimple_cond_lhs (cond_stmt),
10389 gimple_cond_rhs (cond_stmt),
10390 within_stmt);
5562e26e 10391
5c3e5002
PP
10392 /* We simplify a switch statement by trying to determine which case label
10393 will be taken. If we are successful then we return the corresponding
10394 CASE_LABEL_EXPR. */
10395 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
10396 {
10397 tree op = gimple_switch_index (switch_stmt);
10398 if (TREE_CODE (op) != SSA_NAME)
10399 return NULL_TREE;
10400
10401 value_range *vr = get_value_range (op);
10402 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
10403 || symbolic_range_p (vr))
10404 return NULL_TREE;
10405
10406 if (vr->type == VR_RANGE)
10407 {
10408 size_t i, j;
10409 /* Get the range of labels that contain a part of the operand's
10410 value range. */
10411 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
10412
10413 /* Is there only one such label? */
10414 if (i == j)
10415 {
10416 tree label = gimple_switch_label (switch_stmt, i);
10417
10418 /* The i'th label will be taken only if the value range of the
10419 operand is entirely within the bounds of this label. */
10420 if (CASE_HIGH (label) != NULL_TREE
10421 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
10422 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
10423 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
10424 && tree_int_cst_equal (vr->min, vr->max)))
10425 return label;
10426 }
10427
10428 /* If there are no such labels then the default label will be
10429 taken. */
10430 if (i > j)
10431 return gimple_switch_label (switch_stmt, 0);
10432 }
10433
10434 if (vr->type == VR_ANTI_RANGE)
10435 {
10436 unsigned n = gimple_switch_num_labels (switch_stmt);
10437 tree min_label = gimple_switch_label (switch_stmt, 1);
10438 tree max_label = gimple_switch_label (switch_stmt, n - 1);
10439
10440 /* The default label will be taken only if the anti-range of the
10441 operand is entirely outside the bounds of all the (non-default)
10442 case labels. */
10443 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
10444 && (CASE_HIGH (max_label) != NULL_TREE
10445 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
10446 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
10447 return gimple_switch_label (switch_stmt, 0);
10448 }
10449
10450 return NULL_TREE;
10451 }
10452
538dd0b7 10453 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
5562e26e 10454 {
526ceb68 10455 value_range new_vr = VR_INITIALIZER;
538dd0b7 10456 tree lhs = gimple_assign_lhs (assign_stmt);
5562e26e
JL
10457
10458 if (TREE_CODE (lhs) == SSA_NAME
10459 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10460 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10461 {
538dd0b7 10462 extract_range_from_assignment (&new_vr, assign_stmt);
5562e26e
JL
10463 if (range_int_cst_singleton_p (&new_vr))
10464 return new_vr.min;
10465 }
10466 }
2090d6a0 10467
5562e26e 10468 return NULL_TREE;
2090d6a0
JL
10469}
10470
10471/* Blocks which have more than one predecessor and more than
fa10beec 10472 one successor present jump threading opportunities, i.e.,
2090d6a0
JL
10473 when the block is reached from a specific predecessor, we
10474 may be able to determine which of the outgoing edges will
10475 be traversed. When this optimization applies, we are able
10476 to avoid conditionals at runtime and we may expose secondary
10477 optimization opportunities.
10478
10479 This routine is effectively a driver for the generic jump
10480 threading code. It basically just presents the generic code
10481 with edges that may be suitable for jump threading.
10482
10483 Unlike DOM, we do not iterate VRP if jump threading was successful.
10484 While iterating may expose new opportunities for VRP, it is expected
10485 those opportunities would be very limited and the compile time cost
b8698a0f 10486 to expose those opportunities would be significant.
2090d6a0
JL
10487
10488 As jump threading opportunities are discovered, they are registered
10489 for later realization. */
10490
10491static void
10492identify_jump_threads (void)
10493{
10494 basic_block bb;
538dd0b7 10495 gcond *dummy;
b7814a18
RG
10496 int i;
10497 edge e;
2090d6a0
JL
10498
10499 /* Ugh. When substituting values earlier in this pass we can
10500 wipe the dominance information. So rebuild the dominator
10501 information as we need it within the jump threading code. */
10502 calculate_dominance_info (CDI_DOMINATORS);
10503
10504 /* We do not allow VRP information to be used for jump threading
10505 across a back edge in the CFG. Otherwise it becomes too
10506 difficult to avoid eliminating loop exit tests. Of course
10507 EDGE_DFS_BACK is not accurate at this time so we have to
10508 recompute it. */
10509 mark_dfs_back_edges ();
10510
b7814a18 10511 /* Do not thread across edges we are about to remove. Just marking
b9e59e4f 10512 them as EDGE_IGNORE will do. */
9771b263 10513 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
b9e59e4f 10514 e->flags |= EDGE_IGNORE;
b7814a18 10515
2090d6a0
JL
10516 /* Allocate our unwinder stack to unwind any temporary equivalences
10517 that might be recorded. */
a12cbc57 10518 equiv_stack = new const_and_copies ();
2090d6a0
JL
10519
10520 /* To avoid lots of silly node creation, we create a single
10521 conditional and just modify it in-place when attempting to
10522 thread jumps. */
726a989a
RB
10523 dummy = gimple_build_cond (EQ_EXPR,
10524 integer_zero_node, integer_zero_node,
10525 NULL, NULL);
2090d6a0
JL
10526
10527 /* Walk through all the blocks finding those which present a
10528 potential jump threading opportunity. We could set this up
10529 as a dominator walker and record data during the walk, but
10530 I doubt it's worth the effort for the classes of jump
10531 threading opportunities we are trying to identify at this
10532 point in compilation. */
11cd3bed 10533 FOR_EACH_BB_FN (bb, cfun)
2090d6a0 10534 {
355fe088 10535 gimple *last;
2090d6a0
JL
10536
10537 /* If the generic jump threading code does not find this block
10538 interesting, then there is nothing to do. */
10539 if (! potentially_threadable_block (bb))
10540 continue;
10541
1d93fa5c 10542 last = last_stmt (bb);
2090d6a0 10543
1f3fcdc3 10544 /* We're basically looking for a switch or any kind of conditional with
6261ab0e
JL
10545 integral or pointer type arguments. Note the type of the second
10546 argument will be the same as the first argument, so no need to
215f8d9e
JL
10547 check it explicitly.
10548
10549 We also handle the case where there are no statements in the
10550 block. This come up with forwarder blocks that are not
10551 optimized away because they lead to a loop header. But we do
10552 want to thread through them as we can sometimes thread to the
10553 loop exit which is obviously profitable. */
10554 if (!last
10555 || gimple_code (last) == GIMPLE_SWITCH
1f3fcdc3
JL
10556 || (gimple_code (last) == GIMPLE_COND
10557 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10558 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10559 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10560 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10561 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
2090d6a0
JL
10562 {
10563 edge_iterator ei;
2090d6a0
JL
10564
10565 /* We've got a block with multiple predecessors and multiple
1f3fcdc3
JL
10566 successors which also ends in a suitable conditional or
10567 switch statement. For each predecessor, see if we can thread
10568 it to a specific successor. */
2090d6a0
JL
10569 FOR_EACH_EDGE (e, ei, bb->preds)
10570 {
b9e59e4f
JL
10571 /* Do not thread across edges marked to ignoreor abnormal
10572 edges in the CFG. */
10573 if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX))
2090d6a0
JL
10574 continue;
10575
e8ae63bb 10576 thread_across_edge (dummy, e, true, equiv_stack, NULL,
2090d6a0
JL
10577 simplify_stmt_for_jump_threading);
10578 }
10579 }
10580 }
10581
b9e59e4f
JL
10582 /* Clear EDGE_IGNORE. */
10583 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10584 e->flags &= ~EDGE_IGNORE;
10585
2090d6a0
JL
10586 /* We do not actually update the CFG or SSA graphs at this point as
10587 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10588 handle ASSERT_EXPRs gracefully. */
10589}
10590
10591/* We identified all the jump threading opportunities earlier, but could
10592 not transform the CFG at that time. This routine transforms the
10593 CFG and arranges for the dominator tree to be rebuilt if necessary.
10594
10595 Note the SSA graph update will occur during the normal TODO
10596 processing by the pass manager. */
10597static void
10598finalize_jump_threads (void)
10599{
b02b9b53 10600 thread_through_all_blocks (false);
f6c72af4 10601 delete equiv_stack;
2090d6a0 10602}
1a557723 10603
973625a0
KV
10604/* Free VRP lattice. */
10605
10606static void
10607vrp_free_lattice ()
10608{
10609 /* Free allocated memory. */
10610 free (vr_value);
10611 free (vr_phi_edge_counts);
10612 bitmap_obstack_release (&vrp_equiv_obstack);
10613 vrp_value_range_pool.release ();
10614
10615 /* So that we can distinguish between VRP data being available
10616 and not available. */
10617 vr_value = NULL;
10618 vr_phi_edge_counts = NULL;
10619}
0bca51f0
DN
10620
10621/* Traverse all the blocks folding conditionals with known ranges. */
10622
10623static void
b0c77505 10624vrp_finalize (bool warn_array_bounds_p)
0bca51f0 10625{
227858d1 10626 size_t i;
d9256277
RG
10627
10628 values_propagated = true;
0bca51f0
DN
10629
10630 if (dump_file)
10631 {
10632 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10633 dump_all_value_ranges (dump_file);
10634 fprintf (dump_file, "\n");
10635 }
10636
a895a2b8
KV
10637 /* Set value range to non pointer SSA_NAMEs. */
10638 for (i = 0; i < num_vr_values; i++)
0498471b
CL
10639 if (vr_value[i])
10640 {
10641 tree name = ssa_name (i);
a895a2b8 10642
735b8f9f
KV
10643 if (!name
10644 || (vr_value[i]->type == VR_VARYING)
10645 || (vr_value[i]->type == VR_UNDEFINED)
10646 || (TREE_CODE (vr_value[i]->min) != INTEGER_CST)
10647 || (TREE_CODE (vr_value[i]->max) != INTEGER_CST))
10648 continue;
a895a2b8 10649
735b8f9f
KV
10650 if (POINTER_TYPE_P (TREE_TYPE (name))
10651 && ((vr_value[i]->type == VR_RANGE
10652 && range_includes_zero_p (vr_value[i]->min,
10653 vr_value[i]->max) == 0)
10654 || (vr_value[i]->type == VR_ANTI_RANGE
10655 && range_includes_zero_p (vr_value[i]->min,
10656 vr_value[i]->max) == 1)))
10657 set_ptr_nonnull (name);
10658 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
10659 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
10660 vr_value[i]->max);
0498471b 10661 }
a895a2b8 10662
62869a1c 10663 substitute_and_fold (op_with_constant_singleton_value_range, vrp_fold_stmt);
20b8d734
JJ
10664
10665 if (warn_array_bounds && warn_array_bounds_p)
10666 check_all_array_refs ();
10667
10668 /* We must identify jump threading opportunities before we release
10669 the datastructures built by VRP. */
10670 identify_jump_threads ();
973625a0 10671}
20b8d734 10672
973625a0
KV
10673/* evrp_dom_walker visits the basic blocks in the dominance order and set
10674 the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
10675 discover more VRs. */
b16caf72 10676
973625a0
KV
10677class evrp_dom_walker : public dom_walker
10678{
10679public:
10680 evrp_dom_walker ()
10681 : dom_walker (CDI_DOMINATORS), stack (10)
10682 {
973625a0
KV
10683 need_eh_cleanup = BITMAP_ALLOC (NULL);
10684 }
10685 ~evrp_dom_walker ()
10686 {
973625a0
KV
10687 BITMAP_FREE (need_eh_cleanup);
10688 }
10689 virtual edge before_dom_children (basic_block);
10690 virtual void after_dom_children (basic_block);
4280df0a
RB
10691 void push_value_range (tree var, value_range *vr);
10692 value_range *pop_value_range (tree var);
0011af7b 10693 value_range *try_find_new_range (tree op, tree_code code, tree limit);
973625a0
KV
10694
10695 /* Cond_stack holds the old VR. */
4280df0a 10696 auto_vec<std::pair <tree, value_range*> > stack;
973625a0 10697 bitmap need_eh_cleanup;
d9700bdb
RB
10698 auto_vec<gimple *> stmts_to_fixup;
10699 auto_vec<gimple *> stmts_to_remove;
973625a0
KV
10700};
10701
0011af7b 10702/* Find new range for OP such that (OP CODE LIMIT) is true. */
5b69c5e5 10703
0011af7b
KV
10704value_range *
10705evrp_dom_walker::try_find_new_range (tree op, tree_code code, tree limit)
5b69c5e5
KV
10706{
10707 value_range vr = VR_INITIALIZER;
10708 value_range *old_vr = get_value_range (op);
10709
10710 /* Discover VR when condition is true. */
10711 extract_range_for_var_from_comparison_expr (op, code, op,
10712 limit, &vr);
10713 if (old_vr->type == VR_RANGE || old_vr->type == VR_ANTI_RANGE)
10714 vrp_intersect_ranges (&vr, old_vr);
10715 /* If we found any usable VR, set the VR to ssa_name and create a
10716 PUSH old value in the stack with the old VR. */
10717 if (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE)
10718 {
4280df0a
RB
10719 if (old_vr->type == vr.type
10720 && vrp_operand_equal_p (old_vr->min, vr.min)
10721 && vrp_operand_equal_p (old_vr->max, vr.max))
10722 return NULL;
5b69c5e5
KV
10723 value_range *new_vr = vrp_value_range_pool.allocate ();
10724 *new_vr = vr;
0011af7b 10725 return new_vr;
5b69c5e5 10726 }
0011af7b 10727 return NULL;
5b69c5e5
KV
10728}
10729
973625a0
KV
10730/* See if there is any new scope is entered with new VR and set that VR to
10731 ssa_name before visiting the statements in the scope. */
10732
10733edge
10734evrp_dom_walker::before_dom_children (basic_block bb)
10735{
973625a0 10736 tree op0 = NULL_TREE;
40f683e8
RB
10737 edge_iterator ei;
10738 edge e;
973625a0 10739
4280df0a
RB
10740 if (dump_file && (dump_flags & TDF_DETAILS))
10741 fprintf (dump_file, "Visiting BB%d\n", bb->index);
10742
10743 stack.safe_push (std::make_pair (NULL_TREE, (value_range *)NULL));
40f683e8
RB
10744
10745 edge pred_e = NULL;
10746 FOR_EACH_EDGE (e, ei, bb->preds)
973625a0 10747 {
40f683e8
RB
10748 /* Ignore simple backedges from this to allow recording conditions
10749 in loop headers. */
10750 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
10751 continue;
10752 if (! pred_e)
10753 pred_e = e;
10754 else
10755 {
10756 pred_e = NULL;
10757 break;
10758 }
10759 }
10760 if (pred_e)
10761 {
10762 gimple *stmt = last_stmt (pred_e->src);
973625a0
KV
10763 if (stmt
10764 && gimple_code (stmt) == GIMPLE_COND
10765 && (op0 = gimple_cond_lhs (stmt))
10766 && TREE_CODE (op0) == SSA_NAME
65b25c9b
KV
10767 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
10768 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
973625a0 10769 {
4280df0a
RB
10770 if (dump_file && (dump_flags & TDF_DETAILS))
10771 {
10772 fprintf (dump_file, "Visiting controlling predicate ");
10773 print_gimple_stmt (dump_file, stmt, 0, 0);
10774 }
973625a0
KV
10775 /* Entering a new scope. Try to see if we can find a VR
10776 here. */
10777 tree op1 = gimple_cond_rhs (stmt);
10778 tree_code code = gimple_cond_code (stmt);
973625a0
KV
10779
10780 if (TREE_OVERFLOW_P (op1))
10781 op1 = drop_tree_overflow (op1);
10782
10783 /* If condition is false, invert the cond. */
40f683e8 10784 if (pred_e->flags & EDGE_FALSE_VALUE)
973625a0
KV
10785 code = invert_tree_comparison (gimple_cond_code (stmt),
10786 HONOR_NANS (op0));
5b69c5e5 10787 /* Add VR when (OP0 CODE OP1) condition is true. */
0011af7b 10788 value_range *op0_range = try_find_new_range (op0, code, op1);
5b69c5e5
KV
10789
10790 /* Register ranges for y in x < y where
10791 y might have ranges that are useful. */
10792 tree limit;
10793 tree_code new_code;
10794 if (TREE_CODE (op1) == SSA_NAME
10795 && extract_code_and_val_from_cond_with_ops (op1, code,
10796 op0, op1,
10797 false,
10798 &new_code, &limit))
973625a0 10799 {
5b69c5e5 10800 /* Add VR when (OP1 NEW_CODE LIMIT) condition is true. */
0011af7b
KV
10801 value_range *op1_range = try_find_new_range (op1, new_code, limit);
10802 if (op1_range)
10803 push_value_range (op1, op1_range);
973625a0 10804 }
0011af7b
KV
10805
10806 if (op0_range)
10807 push_value_range (op0, op0_range);
973625a0
KV
10808 }
10809 }
10810
10811 /* Visit PHI stmts and discover any new VRs possible. */
b64e8239 10812 bool has_unvisited_preds = false;
973625a0 10813 FOR_EACH_EDGE (e, ei, bb->preds)
b64e8239
RB
10814 if (e->flags & EDGE_EXECUTABLE
10815 && !(e->src->flags & BB_VISITED))
973625a0 10816 {
b64e8239 10817 has_unvisited_preds = true;
973625a0
KV
10818 break;
10819 }
10820
10821 for (gphi_iterator gpi = gsi_start_phis (bb);
10822 !gsi_end_p (gpi); gsi_next (&gpi))
10823 {
10824 gphi *phi = gpi.phi ();
10825 tree lhs = PHI_RESULT (phi);
40f683e8
RB
10826 if (virtual_operand_p (lhs))
10827 continue;
973625a0 10828 value_range vr_result = VR_INITIALIZER;
40f683e8 10829 bool interesting = stmt_interesting_for_vrp (phi);
4280df0a
RB
10830 if (interesting && dump_file && (dump_flags & TDF_DETAILS))
10831 {
10832 fprintf (dump_file, "Visiting PHI node ");
10833 print_gimple_stmt (dump_file, phi, 0, 0);
10834 }
b64e8239 10835 if (!has_unvisited_preds
40f683e8 10836 && interesting)
973625a0
KV
10837 extract_range_from_phi_node (phi, &vr_result);
10838 else
40f683e8
RB
10839 {
10840 set_value_range_to_varying (&vr_result);
10841 /* When we have an unvisited executable predecessor we can't
10842 use PHI arg ranges which may be still UNDEFINED but have
10843 to use VARYING for them. But we can still resort to
10844 SCEV for loop header PHIs. */
10845 struct loop *l;
10846 if (interesting
10847 && (l = loop_containing_stmt (phi))
10848 && l->header == gimple_bb (phi))
10849 adjust_range_with_scev (&vr_result, l, phi, lhs);
10850 }
973625a0 10851 update_value_range (lhs, &vr_result);
d9700bdb
RB
10852
10853 /* Mark PHIs whose lhs we fully propagate for removal. */
10854 tree val = op_with_constant_singleton_value_range (lhs);
10855 if (val && may_propagate_copy (lhs, val))
10856 stmts_to_remove.safe_push (phi);
973625a0
KV
10857 }
10858
b64e8239
RB
10859 edge taken_edge = NULL;
10860
973625a0 10861 /* Visit all other stmts and discover any new VRs possible. */
40f683e8
RB
10862 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
10863 !gsi_end_p (gsi); gsi_next (&gsi))
973625a0
KV
10864 {
10865 gimple *stmt = gsi_stmt (gsi);
973625a0
KV
10866 tree output = NULL_TREE;
10867 gimple *old_stmt = stmt;
10868 bool was_noreturn = (is_gimple_call (stmt)
10869 && gimple_call_noreturn_p (stmt));
10870
4280df0a
RB
10871 if (dump_file && (dump_flags & TDF_DETAILS))
10872 {
10873 fprintf (dump_file, "Visiting stmt ");
10874 print_gimple_stmt (dump_file, stmt, 0, 0);
10875 }
10876
b64e8239
RB
10877 if (gcond *cond = dyn_cast <gcond *> (stmt))
10878 {
10879 vrp_visit_cond_stmt (cond, &taken_edge);
10880 if (taken_edge)
10881 {
10882 if (taken_edge->flags & EDGE_TRUE_VALUE)
10883 gimple_cond_make_true (cond);
10884 else if (taken_edge->flags & EDGE_FALSE_VALUE)
10885 gimple_cond_make_false (cond);
10886 else
10887 gcc_unreachable ();
4280df0a 10888 update_stmt (stmt);
b64e8239
RB
10889 }
10890 }
10891 else if (stmt_interesting_for_vrp (stmt))
973625a0 10892 {
b64e8239 10893 edge taken_edge;
973625a0
KV
10894 value_range vr = VR_INITIALIZER;
10895 extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
10896 if (output
10897 && (vr.type == VR_RANGE || vr.type == VR_ANTI_RANGE))
b64e8239
RB
10898 {
10899 update_value_range (output, &vr);
10900 vr = *get_value_range (output);
973625a0 10901
b64e8239
RB
10902 /* Set the SSA with the value range. */
10903 if (INTEGRAL_TYPE_P (TREE_TYPE (output)))
973625a0 10904 {
b64e8239
RB
10905 if ((vr.type == VR_RANGE
10906 || vr.type == VR_ANTI_RANGE)
10907 && (TREE_CODE (vr.min) == INTEGER_CST)
10908 && (TREE_CODE (vr.max) == INTEGER_CST))
10909 set_range_info (output, vr.type, vr.min, vr.max);
973625a0 10910 }
b64e8239
RB
10911 else if (POINTER_TYPE_P (TREE_TYPE (output))
10912 && ((vr.type == VR_RANGE
10913 && range_includes_zero_p (vr.min,
10914 vr.max) == 0)
10915 || (vr.type == VR_ANTI_RANGE
10916 && range_includes_zero_p (vr.min,
10917 vr.max) == 1)))
10918 set_ptr_nonnull (output);
d9700bdb
RB
10919
10920 /* Mark stmts whose output we fully propagate for removal. */
10921 tree val;
10922 if ((val = op_with_constant_singleton_value_range (output))
10923 && may_propagate_copy (output, val)
10924 && !stmt_could_throw_p (stmt)
10925 && !gimple_has_side_effects (stmt))
10926 {
10927 stmts_to_remove.safe_push (stmt);
10928 continue;
10929 }
973625a0 10930 }
b64e8239
RB
10931 else
10932 set_defs_to_varying (stmt);
10933 }
10934 else
10935 set_defs_to_varying (stmt);
10936
4280df0a
RB
10937 /* See if we can derive a range for any of STMT's operands. */
10938 tree op;
10939 ssa_op_iter i;
10940 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
10941 {
10942 tree value;
10943 enum tree_code comp_code;
10944
10945 /* If OP is used in such a way that we can infer a value
10946 range for it, and we don't find a previous assertion for
10947 it, create a new assertion location node for OP. */
10948 if (infer_value_range (stmt, op, &comp_code, &value))
10949 {
10950 /* If we are able to infer a nonzero value range for OP,
10951 then walk backwards through the use-def chain to see if OP
10952 was set via a typecast.
10953 If so, then we can also infer a nonzero value range
10954 for the operand of the NOP_EXPR. */
10955 if (comp_code == NE_EXPR && integer_zerop (value))
10956 {
10957 tree t = op;
10958 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
10959 while (is_gimple_assign (def_stmt)
10960 && CONVERT_EXPR_CODE_P
10961 (gimple_assign_rhs_code (def_stmt))
10962 && TREE_CODE
10963 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
10964 && POINTER_TYPE_P
10965 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
10966 {
10967 t = gimple_assign_rhs1 (def_stmt);
10968 def_stmt = SSA_NAME_DEF_STMT (t);
10969
10970 /* Add VR when (T COMP_CODE value) condition is
10971 true. */
10972 value_range *op_range
10973 = try_find_new_range (t, comp_code, value);
10974 if (op_range)
10975 push_value_range (t, op_range);
10976 }
10977 }
10978 /* Add VR when (OP COMP_CODE value) condition is true. */
10979 value_range *op_range = try_find_new_range (op,
10980 comp_code, value);
10981 if (op_range)
10982 push_value_range (op, op_range);
10983 }
10984 }
10985
b64e8239
RB
10986 /* Try folding stmts with the VR discovered. */
10987 bool did_replace
10988 = replace_uses_in (stmt, op_with_constant_singleton_value_range);
10989 if (fold_stmt (&gsi, follow_single_use_edges)
10990 || did_replace)
a20d03c8
RB
10991 {
10992 stmt = gsi_stmt (gsi);
10993 update_stmt (stmt);
10994 did_replace = true;
10995 }
b64e8239
RB
10996
10997 if (did_replace)
10998 {
10999 /* If we cleaned up EH information from the statement,
11000 remove EH edges. */
11001 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
11002 bitmap_set_bit (need_eh_cleanup, bb->index);
973625a0 11003
b64e8239
RB
11004 /* If we turned a not noreturn call into a noreturn one
11005 schedule it for fixup. */
11006 if (!was_noreturn
11007 && is_gimple_call (stmt)
11008 && gimple_call_noreturn_p (stmt))
11009 stmts_to_fixup.safe_push (stmt);
11010
11011 if (gimple_assign_single_p (stmt))
973625a0 11012 {
b64e8239
RB
11013 tree rhs = gimple_assign_rhs1 (stmt);
11014 if (TREE_CODE (rhs) == ADDR_EXPR)
11015 recompute_tree_invariant_for_addr_expr (rhs);
973625a0
KV
11016 }
11017 }
973625a0 11018 }
d9700bdb
RB
11019
11020 /* Visit BB successor PHI nodes and replace PHI args. */
11021 FOR_EACH_EDGE (e, ei, bb->succs)
11022 {
11023 for (gphi_iterator gpi = gsi_start_phis (e->dest);
11024 !gsi_end_p (gpi); gsi_next (&gpi))
11025 {
11026 gphi *phi = gpi.phi ();
11027 use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
11028 tree arg = USE_FROM_PTR (use_p);
11029 if (TREE_CODE (arg) != SSA_NAME
11030 || virtual_operand_p (arg))
11031 continue;
11032 tree val = op_with_constant_singleton_value_range (arg);
11033 if (val && may_propagate_copy (arg, val))
11034 propagate_value (use_p, val);
11035 }
11036 }
11037
973625a0 11038 bb->flags |= BB_VISITED;
b64e8239
RB
11039
11040 return taken_edge;
973625a0
KV
11041}
11042
11043/* Restore/pop VRs valid only for BB when we leave BB. */
11044
11045void
11046evrp_dom_walker::after_dom_children (basic_block bb ATTRIBUTE_UNUSED)
11047{
11048 gcc_checking_assert (!stack.is_empty ());
11049 while (stack.last ().first != NULL_TREE)
11050 pop_value_range (stack.last ().first);
4280df0a 11051 stack.pop ();
973625a0
KV
11052}
11053
11054/* Push the Value Range of VAR to the stack and update it with new VR. */
11055
11056void
4280df0a 11057evrp_dom_walker::push_value_range (tree var, value_range *vr)
973625a0 11058{
4280df0a
RB
11059 if (SSA_NAME_VERSION (var) >= num_vr_values)
11060 return;
11061 if (dump_file && (dump_flags & TDF_DETAILS))
973625a0 11062 {
4280df0a
RB
11063 fprintf (dump_file, "pushing new range for ");
11064 print_generic_expr (dump_file, var, 0);
11065 fprintf (dump_file, ": ");
11066 dump_value_range (dump_file, vr);
11067 fprintf (dump_file, "\n");
973625a0 11068 }
4280df0a
RB
11069 stack.safe_push (std::make_pair (var, get_value_range (var)));
11070 vr_value[SSA_NAME_VERSION (var)] = vr;
973625a0
KV
11071}
11072
11073/* Pop the Value Range from the vrp_stack and update VAR with it. */
11074
11075value_range *
4280df0a 11076evrp_dom_walker::pop_value_range (tree var)
973625a0
KV
11077{
11078 value_range *vr = stack.last ().second;
4280df0a
RB
11079 gcc_checking_assert (var == stack.last ().first);
11080 if (dump_file && (dump_flags & TDF_DETAILS))
973625a0 11081 {
4280df0a
RB
11082 fprintf (dump_file, "popping range for ");
11083 print_generic_expr (dump_file, var, 0);
11084 fprintf (dump_file, ", restoring ");
11085 dump_value_range (dump_file, vr);
11086 fprintf (dump_file, "\n");
973625a0 11087 }
4280df0a 11088 vr_value[SSA_NAME_VERSION (var)] = vr;
973625a0
KV
11089 stack.pop ();
11090 return vr;
11091}
11092
11093
11094/* Main entry point for the early vrp pass which is a simplified non-iterative
11095 version of vrp where basic blocks are visited in dominance order. Value
11096 ranges discovered in early vrp will also be used by ipa-vrp. */
11097
11098static unsigned int
11099execute_early_vrp ()
11100{
11101 edge e;
11102 edge_iterator ei;
11103 basic_block bb;
11104
11105 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
11106 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11107 scev_initialize ();
11108 calculate_dominance_info (CDI_DOMINATORS);
11109 FOR_EACH_BB_FN (bb, cfun)
11110 {
11111 bb->flags &= ~BB_VISITED;
11112 FOR_EACH_EDGE (e, ei, bb->preds)
11113 e->flags |= EDGE_EXECUTABLE;
11114 }
11115 vrp_initialize_lattice ();
11116
11117 /* Walk stmts in dominance order and propagate VRP. */
11118 evrp_dom_walker walker;
11119 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11120
d9700bdb
RB
11121 if (dump_file)
11122 {
11123 fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
11124 dump_all_value_ranges (dump_file);
11125 fprintf (dump_file, "\n");
11126 }
11127
11128 /* Remove stmts in reverse order to make debug stmt creation possible. */
11129 while (! walker.stmts_to_remove.is_empty ())
11130 {
11131 gimple *stmt = walker.stmts_to_remove.pop ();
11132 if (dump_file && dump_flags & TDF_DETAILS)
11133 {
11134 fprintf (dump_file, "Removing dead stmt ");
11135 print_gimple_stmt (dump_file, stmt, 0, 0);
11136 fprintf (dump_file, "\n");
11137 }
11138 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
11139 if (gimple_code (stmt) == GIMPLE_PHI)
11140 remove_phi_node (&gsi, true);
11141 else
11142 {
11143 unlink_stmt_vdef (stmt);
11144 gsi_remove (&gsi, true);
11145 release_defs (stmt);
11146 }
11147 }
11148
973625a0
KV
11149 if (!bitmap_empty_p (walker.need_eh_cleanup))
11150 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
11151
11152 /* Fixup stmts that became noreturn calls. This may require splitting
11153 blocks and thus isn't possible during the dominator walk. Do this
11154 in reverse order so we don't inadvertedly remove a stmt we want to
11155 fixup by visiting a dominating now noreturn call first. */
11156 while (!walker.stmts_to_fixup.is_empty ())
11157 {
11158 gimple *stmt = walker.stmts_to_fixup.pop ();
11159 fixup_noreturn_call (stmt);
11160 }
11161
973625a0
KV
11162 vrp_free_lattice ();
11163 scev_finalize ();
11164 loop_optimizer_finalize ();
973625a0 11165 return 0;
0bca51f0
DN
11166}
11167
11168
11169/* Main entry point to VRP (Value Range Propagation). This pass is
11170 loosely based on J. R. C. Patterson, ``Accurate Static Branch
11171 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
11172 Programming Language Design and Implementation, pp. 67-78, 1995.
11173 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
11174
11175 This is essentially an SSA-CCP pass modified to deal with ranges
11176 instead of constants.
11177
227858d1
DN
11178 While propagating ranges, we may find that two or more SSA name
11179 have equivalent, though distinct ranges. For instance,
11180
11181 1 x_9 = p_3->a;
11182 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
11183 3 if (p_4 == q_2)
11184 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
11185 5 endif
11186 6 if (q_2)
b8698a0f 11187
227858d1
DN
11188 In the code above, pointer p_5 has range [q_2, q_2], but from the
11189 code we can also determine that p_5 cannot be NULL and, if q_2 had
11190 a non-varying range, p_5's range should also be compatible with it.
11191
8ab5f5c9 11192 These equivalences are created by two expressions: ASSERT_EXPR and
227858d1
DN
11193 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
11194 result of another assertion, then we can use the fact that p_5 and
11195 p_4 are equivalent when evaluating p_5's range.
11196
8ab5f5c9 11197 Together with value ranges, we also propagate these equivalences
227858d1
DN
11198 between names so that we can take advantage of information from
11199 multiple ranges when doing final replacement. Note that this
11200 equivalency relation is transitive but not symmetric.
b8698a0f 11201
227858d1
DN
11202 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
11203 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
11204 in contexts where that assertion does not hold (e.g., in line 6).
11205
0bca51f0
DN
11206 TODO, the main difference between this pass and Patterson's is that
11207 we do not propagate edge probabilities. We only compute whether
11208 edges can be taken or not. That is, instead of having a spectrum
11209 of jump probabilities between 0 and 1, we only deal with 0, 1 and
11210 DON'T KNOW. In the future, it may be worthwhile to propagate
11211 probabilities to aid branch prediction. */
11212
c2924966 11213static unsigned int
b0c77505 11214execute_vrp (bool warn_array_bounds_p)
0bca51f0 11215{
b7814a18
RG
11216 int i;
11217 edge e;
11218 switch_update *su;
11219
b02b9b53 11220 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
d51157de
ZD
11221 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
11222 scev_initialize ();
b02b9b53 11223
c25a0c60
RB
11224 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
11225 Inserting assertions may split edges which will invalidate
11226 EDGE_DFS_BACK. */
09302442
JJ
11227 insert_range_assertions ();
11228
9771b263
DN
11229 to_remove_edges.create (10);
11230 to_update_switch_stmts.create (5);
448ee662 11231 threadedge_initialize_values ();
b7814a18 11232
c25a0c60
RB
11233 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
11234 mark_dfs_back_edges ();
11235
973625a0 11236 vrp_initialize_lattice ();
227858d1
DN
11237 vrp_initialize ();
11238 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
b0c77505 11239 vrp_finalize (warn_array_bounds_p);
973625a0 11240 vrp_free_lattice ();
0bca51f0 11241
61183076 11242 free_numbers_of_iterations_estimates (cfun);
09302442 11243
2090d6a0
JL
11244 /* ASSERT_EXPRs must be removed before finalizing jump threads
11245 as finalizing jump threads calls the CFG cleanup code which
11246 does not properly handle ASSERT_EXPRs. */
0bca51f0 11247 remove_range_assertions ();
59c02d8a
JL
11248
11249 /* If we exposed any new variables, go ahead and put them into
11250 SSA form now, before we handle jump threading. This simplifies
11251 interactions between rewriting of _DECL nodes into SSA form
11252 and rewriting SSA_NAME nodes into SSA form after block
11253 duplication and CFG manipulation. */
11254 update_ssa (TODO_update_ssa);
11255
2090d6a0 11256 finalize_jump_threads ();
0a4bf1d3
RG
11257
11258 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
11259 CFG in a broken state and requires a cfg_cleanup run. */
9771b263 11260 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
0a4bf1d3
RG
11261 remove_edge (e);
11262 /* Update SWITCH_EXPR case label vector. */
9771b263 11263 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
726a989a
RB
11264 {
11265 size_t j;
11266 size_t n = TREE_VEC_LENGTH (su->vec);
256f88c6 11267 tree label;
726a989a
RB
11268 gimple_switch_set_num_labels (su->stmt, n);
11269 for (j = 0; j < n; j++)
11270 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
256f88c6
RG
11271 /* As we may have replaced the default label with a regular one
11272 make sure to make it a real default label again. This ensures
11273 optimal expansion. */
fd8d363e 11274 label = gimple_switch_label (su->stmt, 0);
256f88c6
RG
11275 CASE_LOW (label) = NULL_TREE;
11276 CASE_HIGH (label) = NULL_TREE;
726a989a 11277 }
0a4bf1d3 11278
9771b263 11279 if (to_remove_edges.length () > 0)
051b9446
RB
11280 {
11281 free_dominance_info (CDI_DOMINATORS);
726338f4 11282 loops_state_set (LOOPS_NEED_FIXUP);
051b9446 11283 }
0a4bf1d3 11284
9771b263
DN
11285 to_remove_edges.release ();
11286 to_update_switch_stmts.release ();
448ee662 11287 threadedge_finalize_values ();
0a4bf1d3 11288
d51157de
ZD
11289 scev_finalize ();
11290 loop_optimizer_finalize ();
c2924966 11291 return 0;
0bca51f0
DN
11292}
11293
27a4cd48
DM
11294namespace {
11295
11296const pass_data pass_data_vrp =
11297{
11298 GIMPLE_PASS, /* type */
11299 "vrp", /* name */
11300 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
11301 TV_TREE_VRP, /* tv_id */
11302 PROP_ssa, /* properties_required */
11303 0, /* properties_provided */
11304 0, /* properties_destroyed */
11305 0, /* todo_flags_start */
3bea341f 11306 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
0bca51f0 11307};
27a4cd48
DM
11308
11309class pass_vrp : public gimple_opt_pass
11310{
11311public:
c3284718 11312 pass_vrp (gcc::context *ctxt)
b0c77505 11313 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
27a4cd48
DM
11314 {}
11315
11316 /* opt_pass methods: */
65d3284b 11317 opt_pass * clone () { return new pass_vrp (m_ctxt); }
b0c77505
TV
11318 void set_pass_param (unsigned int n, bool param)
11319 {
11320 gcc_assert (n == 0);
11321 warn_array_bounds_p = param;
11322 }
1a3d085c 11323 virtual bool gate (function *) { return flag_tree_vrp != 0; }
b0c77505
TV
11324 virtual unsigned int execute (function *)
11325 { return execute_vrp (warn_array_bounds_p); }
27a4cd48 11326
b0c77505
TV
11327 private:
11328 bool warn_array_bounds_p;
27a4cd48
DM
11329}; // class pass_vrp
11330
11331} // anon namespace
11332
11333gimple_opt_pass *
11334make_pass_vrp (gcc::context *ctxt)
11335{
11336 return new pass_vrp (ctxt);
11337}
973625a0
KV
11338
11339namespace {
11340
11341const pass_data pass_data_early_vrp =
11342{
11343 GIMPLE_PASS, /* type */
11344 "evrp", /* name */
11345 OPTGROUP_NONE, /* optinfo_flags */
11346 TV_TREE_EARLY_VRP, /* tv_id */
11347 PROP_ssa, /* properties_required */
11348 0, /* properties_provided */
11349 0, /* properties_destroyed */
11350 0, /* todo_flags_start */
11351 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
11352};
11353
11354class pass_early_vrp : public gimple_opt_pass
11355{
11356public:
11357 pass_early_vrp (gcc::context *ctxt)
11358 : gimple_opt_pass (pass_data_early_vrp, ctxt)
11359 {}
11360
11361 /* opt_pass methods: */
11362 opt_pass * clone () { return new pass_early_vrp (m_ctxt); }
11363 virtual bool gate (function *)
11364 {
11365 return flag_tree_vrp != 0;
11366 }
11367 virtual unsigned int execute (function *)
11368 { return execute_early_vrp (); }
11369
11370}; // class pass_vrp
11371} // anon namespace
11372
11373gimple_opt_pass *
11374make_pass_early_vrp (gcc::context *ctxt)
11375{
11376 return new pass_early_vrp (ctxt);
11377}
11378