]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vrp.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "tree-ssa.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
34 #include "intl.h"
35 #include "cfgloop.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-ssa-propagate.h"
38 #include "tree-chrec.h"
39 #include "gimple-fold.h"
40 #include "expr.h"
41 #include "optabs.h"
42
43
44 /* Type of value ranges. See value_range_d for a description of these
45 types. */
46 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
47
48 /* Range of values that can be associated with an SSA_NAME after VRP
49 has executed. */
50 struct value_range_d
51 {
52 /* Lattice value represented by this range. */
53 enum value_range_type type;
54
55 /* Minimum and maximum values represented by this range. These
56 values should be interpreted as follows:
57
58 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
59 be NULL.
60
61 - If TYPE == VR_RANGE then MIN holds the minimum value and
62 MAX holds the maximum value of the range [MIN, MAX].
63
64 - If TYPE == ANTI_RANGE the variable is known to NOT
65 take any values in the range [MIN, MAX]. */
66 tree min;
67 tree max;
68
69 /* Set of SSA names whose value ranges are equivalent to this one.
70 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
71 bitmap equiv;
72 };
73
74 typedef struct value_range_d value_range_t;
75
76 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
77
78 /* Set of SSA names found live during the RPO traversal of the function
79 for still active basic-blocks. */
80 static sbitmap *live;
81
82 /* Return true if the SSA name NAME is live on the edge E. */
83
84 static bool
85 live_on_edge (edge e, tree name)
86 {
87 return (live[e->dest->index]
88 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
89 }
90
91 /* Local functions. */
92 static int compare_values (tree val1, tree val2);
93 static int compare_values_warnv (tree val1, tree val2, bool *);
94 static void vrp_meet (value_range_t *, value_range_t *);
95 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
97 tree, tree, bool, bool *,
98 bool *);
99
100 /* Location information for ASSERT_EXPRs. Each instance of this
101 structure describes an ASSERT_EXPR for an SSA name. Since a single
102 SSA name may have more than one assertion associated with it, these
103 locations are kept in a linked list attached to the corresponding
104 SSA name. */
105 struct assert_locus_d
106 {
107 /* Basic block where the assertion would be inserted. */
108 basic_block bb;
109
110 /* Some assertions need to be inserted on an edge (e.g., assertions
111 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 edge e;
113
114 /* Pointer to the statement that generated this assertion. */
115 gimple_stmt_iterator si;
116
117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
118 enum tree_code comp_code;
119
120 /* Value being compared against. */
121 tree val;
122
123 /* Expression to compare. */
124 tree expr;
125
126 /* Next node in the linked list. */
127 struct assert_locus_d *next;
128 };
129
130 typedef struct assert_locus_d *assert_locus_t;
131
132 /* If bit I is present, it means that SSA name N_i has a list of
133 assertions that should be inserted in the IL. */
134 static bitmap need_assert_for;
135
136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
137 holds a list of ASSERT_LOCUS_T nodes that describe where
138 ASSERT_EXPRs for SSA name N_I should be inserted. */
139 static assert_locus_t *asserts_for;
140
141 /* Value range array. After propagation, VR_VALUE[I] holds the range
142 of values that SSA name N_I may take. */
143 static unsigned num_vr_values;
144 static value_range_t **vr_value;
145 static bool values_propagated;
146
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148 number of executable edges we saw the last time we visited the
149 node. */
150 static int *vr_phi_edge_counts;
151
152 typedef struct {
153 gimple stmt;
154 tree vec;
155 } switch_update;
156
157 static vec<edge> to_remove_edges;
158 static vec<switch_update> to_update_switch_stmts;
159
160
161 /* Return the maximum value for TYPE. */
162
163 static inline tree
164 vrp_val_max (const_tree type)
165 {
166 if (!INTEGRAL_TYPE_P (type))
167 return NULL_TREE;
168
169 return TYPE_MAX_VALUE (type);
170 }
171
172 /* Return the minimum value for TYPE. */
173
174 static inline tree
175 vrp_val_min (const_tree type)
176 {
177 if (!INTEGRAL_TYPE_P (type))
178 return NULL_TREE;
179
180 return TYPE_MIN_VALUE (type);
181 }
182
183 /* Return whether VAL is equal to the maximum value of its type. This
184 will be true for a positive overflow infinity. We can't do a
185 simple equality comparison with TYPE_MAX_VALUE because C typedefs
186 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
187 to the integer constant with the same value in the type. */
188
189 static inline bool
190 vrp_val_is_max (const_tree val)
191 {
192 tree type_max = vrp_val_max (TREE_TYPE (val));
193 return (val == type_max
194 || (type_max != NULL_TREE
195 && operand_equal_p (val, type_max, 0)));
196 }
197
198 /* Return whether VAL is equal to the minimum value of its type. This
199 will be true for a negative overflow infinity. */
200
201 static inline bool
202 vrp_val_is_min (const_tree val)
203 {
204 tree type_min = vrp_val_min (TREE_TYPE (val));
205 return (val == type_min
206 || (type_min != NULL_TREE
207 && operand_equal_p (val, type_min, 0)));
208 }
209
210
211 /* Return whether TYPE should use an overflow infinity distinct from
212 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
213 represent a signed overflow during VRP computations. An infinity
214 is distinct from a half-range, which will go from some number to
215 TYPE_{MIN,MAX}_VALUE. */
216
217 static inline bool
218 needs_overflow_infinity (const_tree type)
219 {
220 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
221 }
222
223 /* Return whether TYPE can support our overflow infinity
224 representation: we use the TREE_OVERFLOW flag, which only exists
225 for constants. If TYPE doesn't support this, we don't optimize
226 cases which would require signed overflow--we drop them to
227 VARYING. */
228
229 static inline bool
230 supports_overflow_infinity (const_tree type)
231 {
232 tree min = vrp_val_min (type), max = vrp_val_max (type);
233 #ifdef ENABLE_CHECKING
234 gcc_assert (needs_overflow_infinity (type));
235 #endif
236 return (min != NULL_TREE
237 && CONSTANT_CLASS_P (min)
238 && max != NULL_TREE
239 && CONSTANT_CLASS_P (max));
240 }
241
242 /* VAL is the maximum or minimum value of a type. Return a
243 corresponding overflow infinity. */
244
245 static inline tree
246 make_overflow_infinity (tree val)
247 {
248 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
249 val = copy_node (val);
250 TREE_OVERFLOW (val) = 1;
251 return val;
252 }
253
254 /* Return a negative overflow infinity for TYPE. */
255
256 static inline tree
257 negative_overflow_infinity (tree type)
258 {
259 gcc_checking_assert (supports_overflow_infinity (type));
260 return make_overflow_infinity (vrp_val_min (type));
261 }
262
263 /* Return a positive overflow infinity for TYPE. */
264
265 static inline tree
266 positive_overflow_infinity (tree type)
267 {
268 gcc_checking_assert (supports_overflow_infinity (type));
269 return make_overflow_infinity (vrp_val_max (type));
270 }
271
272 /* Return whether VAL is a negative overflow infinity. */
273
274 static inline bool
275 is_negative_overflow_infinity (const_tree val)
276 {
277 return (needs_overflow_infinity (TREE_TYPE (val))
278 && CONSTANT_CLASS_P (val)
279 && TREE_OVERFLOW (val)
280 && vrp_val_is_min (val));
281 }
282
283 /* Return whether VAL is a positive overflow infinity. */
284
285 static inline bool
286 is_positive_overflow_infinity (const_tree val)
287 {
288 return (needs_overflow_infinity (TREE_TYPE (val))
289 && CONSTANT_CLASS_P (val)
290 && TREE_OVERFLOW (val)
291 && vrp_val_is_max (val));
292 }
293
294 /* Return whether VAL is a positive or negative overflow infinity. */
295
296 static inline bool
297 is_overflow_infinity (const_tree val)
298 {
299 return (needs_overflow_infinity (TREE_TYPE (val))
300 && CONSTANT_CLASS_P (val)
301 && TREE_OVERFLOW (val)
302 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
303 }
304
305 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
306
307 static inline bool
308 stmt_overflow_infinity (gimple stmt)
309 {
310 if (is_gimple_assign (stmt)
311 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
312 GIMPLE_SINGLE_RHS)
313 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
314 return false;
315 }
316
317 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
318 the same value with TREE_OVERFLOW clear. This can be used to avoid
319 confusing a regular value with an overflow value. */
320
321 static inline tree
322 avoid_overflow_infinity (tree val)
323 {
324 if (!is_overflow_infinity (val))
325 return val;
326
327 if (vrp_val_is_max (val))
328 return vrp_val_max (TREE_TYPE (val));
329 else
330 {
331 gcc_checking_assert (vrp_val_is_min (val));
332 return vrp_val_min (TREE_TYPE (val));
333 }
334 }
335
336
337 /* Return true if ARG is marked with the nonnull attribute in the
338 current function signature. */
339
340 static bool
341 nonnull_arg_p (const_tree arg)
342 {
343 tree t, attrs, fntype;
344 unsigned HOST_WIDE_INT arg_num;
345
346 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
347
348 /* The static chain decl is always non null. */
349 if (arg == cfun->static_chain_decl)
350 return true;
351
352 fntype = TREE_TYPE (current_function_decl);
353 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
354 {
355 attrs = lookup_attribute ("nonnull", attrs);
356
357 /* If "nonnull" wasn't specified, we know nothing about the argument. */
358 if (attrs == NULL_TREE)
359 return false;
360
361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
362 if (TREE_VALUE (attrs) == NULL_TREE)
363 return true;
364
365 /* Get the position number for ARG in the function signature. */
366 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
367 t;
368 t = DECL_CHAIN (t), arg_num++)
369 {
370 if (t == arg)
371 break;
372 }
373
374 gcc_assert (t == arg);
375
376 /* Now see if ARG_NUM is mentioned in the nonnull list. */
377 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
378 {
379 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
380 return true;
381 }
382 }
383
384 return false;
385 }
386
387
388 /* Set value range VR to VR_UNDEFINED. */
389
390 static inline void
391 set_value_range_to_undefined (value_range_t *vr)
392 {
393 vr->type = VR_UNDEFINED;
394 vr->min = vr->max = NULL_TREE;
395 if (vr->equiv)
396 bitmap_clear (vr->equiv);
397 }
398
399
400 /* Set value range VR to VR_VARYING. */
401
402 static inline void
403 set_value_range_to_varying (value_range_t *vr)
404 {
405 vr->type = VR_VARYING;
406 vr->min = vr->max = NULL_TREE;
407 if (vr->equiv)
408 bitmap_clear (vr->equiv);
409 }
410
411
412 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
413
414 static void
415 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
416 tree max, bitmap equiv)
417 {
418 #if defined ENABLE_CHECKING
419 /* Check the validity of the range. */
420 if (t == VR_RANGE || t == VR_ANTI_RANGE)
421 {
422 int cmp;
423
424 gcc_assert (min && max);
425
426 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
427 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
428
429 cmp = compare_values (min, max);
430 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
431
432 if (needs_overflow_infinity (TREE_TYPE (min)))
433 gcc_assert (!is_overflow_infinity (min)
434 || !is_overflow_infinity (max));
435 }
436
437 if (t == VR_UNDEFINED || t == VR_VARYING)
438 gcc_assert (min == NULL_TREE && max == NULL_TREE);
439
440 if (t == VR_UNDEFINED || t == VR_VARYING)
441 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
442 #endif
443
444 vr->type = t;
445 vr->min = min;
446 vr->max = max;
447
448 /* Since updating the equivalence set involves deep copying the
449 bitmaps, only do it if absolutely necessary. */
450 if (vr->equiv == NULL
451 && equiv != NULL)
452 vr->equiv = BITMAP_ALLOC (NULL);
453
454 if (equiv != vr->equiv)
455 {
456 if (equiv && !bitmap_empty_p (equiv))
457 bitmap_copy (vr->equiv, equiv);
458 else
459 bitmap_clear (vr->equiv);
460 }
461 }
462
463
464 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
465 This means adjusting T, MIN and MAX representing the case of a
466 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
467 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
468 In corner cases where MAX+1 or MIN-1 wraps this will fall back
469 to varying.
470 This routine exists to ease canonicalization in the case where we
471 extract ranges from var + CST op limit. */
472
473 static void
474 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
475 tree min, tree max, bitmap equiv)
476 {
477 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
478 if (t == VR_UNDEFINED)
479 {
480 set_value_range_to_undefined (vr);
481 return;
482 }
483 else if (t == VR_VARYING)
484 {
485 set_value_range_to_varying (vr);
486 return;
487 }
488
489 /* Nothing to canonicalize for symbolic ranges. */
490 if (TREE_CODE (min) != INTEGER_CST
491 || TREE_CODE (max) != INTEGER_CST)
492 {
493 set_value_range (vr, t, min, max, equiv);
494 return;
495 }
496
497 /* Wrong order for min and max, to swap them and the VR type we need
498 to adjust them. */
499 if (tree_int_cst_lt (max, min))
500 {
501 tree one, tmp;
502
503 /* For one bit precision if max < min, then the swapped
504 range covers all values, so for VR_RANGE it is varying and
505 for VR_ANTI_RANGE empty range, so drop to varying as well. */
506 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
507 {
508 set_value_range_to_varying (vr);
509 return;
510 }
511
512 one = build_int_cst (TREE_TYPE (min), 1);
513 tmp = int_const_binop (PLUS_EXPR, max, one);
514 max = int_const_binop (MINUS_EXPR, min, one);
515 min = tmp;
516
517 /* There's one corner case, if we had [C+1, C] before we now have
518 that again. But this represents an empty value range, so drop
519 to varying in this case. */
520 if (tree_int_cst_lt (max, min))
521 {
522 set_value_range_to_varying (vr);
523 return;
524 }
525
526 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
527 }
528
529 /* Anti-ranges that can be represented as ranges should be so. */
530 if (t == VR_ANTI_RANGE)
531 {
532 bool is_min = vrp_val_is_min (min);
533 bool is_max = vrp_val_is_max (max);
534
535 if (is_min && is_max)
536 {
537 /* We cannot deal with empty ranges, drop to varying.
538 ??? This could be VR_UNDEFINED instead. */
539 set_value_range_to_varying (vr);
540 return;
541 }
542 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
543 && (is_min || is_max))
544 {
545 /* Non-empty boolean ranges can always be represented
546 as a singleton range. */
547 if (is_min)
548 min = max = vrp_val_max (TREE_TYPE (min));
549 else
550 min = max = vrp_val_min (TREE_TYPE (min));
551 t = VR_RANGE;
552 }
553 else if (is_min
554 /* As a special exception preserve non-null ranges. */
555 && !(TYPE_UNSIGNED (TREE_TYPE (min))
556 && integer_zerop (max)))
557 {
558 tree one = build_int_cst (TREE_TYPE (max), 1);
559 min = int_const_binop (PLUS_EXPR, max, one);
560 max = vrp_val_max (TREE_TYPE (max));
561 t = VR_RANGE;
562 }
563 else if (is_max)
564 {
565 tree one = build_int_cst (TREE_TYPE (min), 1);
566 max = int_const_binop (MINUS_EXPR, min, one);
567 min = vrp_val_min (TREE_TYPE (min));
568 t = VR_RANGE;
569 }
570 }
571
572 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
573 if (needs_overflow_infinity (TREE_TYPE (min))
574 && is_overflow_infinity (min)
575 && is_overflow_infinity (max))
576 {
577 set_value_range_to_varying (vr);
578 return;
579 }
580
581 set_value_range (vr, t, min, max, equiv);
582 }
583
584 /* Copy value range FROM into value range TO. */
585
586 static inline void
587 copy_value_range (value_range_t *to, value_range_t *from)
588 {
589 set_value_range (to, from->type, from->min, from->max, from->equiv);
590 }
591
592 /* Set value range VR to a single value. This function is only called
593 with values we get from statements, and exists to clear the
594 TREE_OVERFLOW flag so that we don't think we have an overflow
595 infinity when we shouldn't. */
596
597 static inline void
598 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
599 {
600 gcc_assert (is_gimple_min_invariant (val));
601 val = avoid_overflow_infinity (val);
602 set_value_range (vr, VR_RANGE, val, val, equiv);
603 }
604
605 /* Set value range VR to a non-negative range of type TYPE.
606 OVERFLOW_INFINITY indicates whether to use an overflow infinity
607 rather than TYPE_MAX_VALUE; this should be true if we determine
608 that the range is nonnegative based on the assumption that signed
609 overflow does not occur. */
610
611 static inline void
612 set_value_range_to_nonnegative (value_range_t *vr, tree type,
613 bool overflow_infinity)
614 {
615 tree zero;
616
617 if (overflow_infinity && !supports_overflow_infinity (type))
618 {
619 set_value_range_to_varying (vr);
620 return;
621 }
622
623 zero = build_int_cst (type, 0);
624 set_value_range (vr, VR_RANGE, zero,
625 (overflow_infinity
626 ? positive_overflow_infinity (type)
627 : TYPE_MAX_VALUE (type)),
628 vr->equiv);
629 }
630
631 /* Set value range VR to a non-NULL range of type TYPE. */
632
633 static inline void
634 set_value_range_to_nonnull (value_range_t *vr, tree type)
635 {
636 tree zero = build_int_cst (type, 0);
637 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
638 }
639
640
641 /* Set value range VR to a NULL range of type TYPE. */
642
643 static inline void
644 set_value_range_to_null (value_range_t *vr, tree type)
645 {
646 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
647 }
648
649
650 /* Set value range VR to a range of a truthvalue of type TYPE. */
651
652 static inline void
653 set_value_range_to_truthvalue (value_range_t *vr, tree type)
654 {
655 if (TYPE_PRECISION (type) == 1)
656 set_value_range_to_varying (vr);
657 else
658 set_value_range (vr, VR_RANGE,
659 build_int_cst (type, 0), build_int_cst (type, 1),
660 vr->equiv);
661 }
662
663
664 /* If abs (min) < abs (max), set VR to [-max, max], if
665 abs (min) >= abs (max), set VR to [-min, min]. */
666
667 static void
668 abs_extent_range (value_range_t *vr, tree min, tree max)
669 {
670 int cmp;
671
672 gcc_assert (TREE_CODE (min) == INTEGER_CST);
673 gcc_assert (TREE_CODE (max) == INTEGER_CST);
674 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
675 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
676 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
677 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
678 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
679 {
680 set_value_range_to_varying (vr);
681 return;
682 }
683 cmp = compare_values (min, max);
684 if (cmp == -1)
685 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
686 else if (cmp == 0 || cmp == 1)
687 {
688 max = min;
689 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
690 }
691 else
692 {
693 set_value_range_to_varying (vr);
694 return;
695 }
696 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
697 }
698
699
700 /* Return value range information for VAR.
701
702 If we have no values ranges recorded (ie, VRP is not running), then
703 return NULL. Otherwise create an empty range if none existed for VAR. */
704
705 static value_range_t *
706 get_value_range (const_tree var)
707 {
708 static const struct value_range_d vr_const_varying
709 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
710 value_range_t *vr;
711 tree sym;
712 unsigned ver = SSA_NAME_VERSION (var);
713
714 /* If we have no recorded ranges, then return NULL. */
715 if (! vr_value)
716 return NULL;
717
718 /* If we query the range for a new SSA name return an unmodifiable VARYING.
719 We should get here at most from the substitute-and-fold stage which
720 will never try to change values. */
721 if (ver >= num_vr_values)
722 return CONST_CAST (value_range_t *, &vr_const_varying);
723
724 vr = vr_value[ver];
725 if (vr)
726 return vr;
727
728 /* After propagation finished do not allocate new value-ranges. */
729 if (values_propagated)
730 return CONST_CAST (value_range_t *, &vr_const_varying);
731
732 /* Create a default value range. */
733 vr_value[ver] = vr = XCNEW (value_range_t);
734
735 /* Defer allocating the equivalence set. */
736 vr->equiv = NULL;
737
738 /* If VAR is a default definition of a parameter, the variable can
739 take any value in VAR's type. */
740 if (SSA_NAME_IS_DEFAULT_DEF (var))
741 {
742 sym = SSA_NAME_VAR (var);
743 if (TREE_CODE (sym) == PARM_DECL)
744 {
745 /* Try to use the "nonnull" attribute to create ~[0, 0]
746 anti-ranges for pointers. Note that this is only valid with
747 default definitions of PARM_DECLs. */
748 if (POINTER_TYPE_P (TREE_TYPE (sym))
749 && nonnull_arg_p (sym))
750 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
751 else
752 set_value_range_to_varying (vr);
753 }
754 else if (TREE_CODE (sym) == RESULT_DECL
755 && DECL_BY_REFERENCE (sym))
756 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
757 }
758
759 return vr;
760 }
761
762 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
763
764 static inline bool
765 vrp_operand_equal_p (const_tree val1, const_tree val2)
766 {
767 if (val1 == val2)
768 return true;
769 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
770 return false;
771 if (is_overflow_infinity (val1))
772 return is_overflow_infinity (val2);
773 return true;
774 }
775
776 /* Return true, if the bitmaps B1 and B2 are equal. */
777
778 static inline bool
779 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
780 {
781 return (b1 == b2
782 || ((!b1 || bitmap_empty_p (b1))
783 && (!b2 || bitmap_empty_p (b2)))
784 || (b1 && b2
785 && bitmap_equal_p (b1, b2)));
786 }
787
788 /* Update the value range and equivalence set for variable VAR to
789 NEW_VR. Return true if NEW_VR is different from VAR's previous
790 value.
791
792 NOTE: This function assumes that NEW_VR is a temporary value range
793 object created for the sole purpose of updating VAR's range. The
794 storage used by the equivalence set from NEW_VR will be freed by
795 this function. Do not call update_value_range when NEW_VR
796 is the range object associated with another SSA name. */
797
798 static inline bool
799 update_value_range (const_tree var, value_range_t *new_vr)
800 {
801 value_range_t *old_vr;
802 bool is_new;
803
804 /* Update the value range, if necessary. */
805 old_vr = get_value_range (var);
806 is_new = old_vr->type != new_vr->type
807 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
808 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
809 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
810
811 if (is_new)
812 {
813 /* Do not allow transitions up the lattice. The following
814 is slightly more awkward than just new_vr->type < old_vr->type
815 because VR_RANGE and VR_ANTI_RANGE need to be considered
816 the same. We may not have is_new when transitioning to
817 UNDEFINED or from VARYING. */
818 if (new_vr->type == VR_UNDEFINED
819 || old_vr->type == VR_VARYING)
820 set_value_range_to_varying (old_vr);
821 else
822 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
823 new_vr->equiv);
824 }
825
826 BITMAP_FREE (new_vr->equiv);
827
828 return is_new;
829 }
830
831
832 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
833 point where equivalence processing can be turned on/off. */
834
835 static void
836 add_equivalence (bitmap *equiv, const_tree var)
837 {
838 unsigned ver = SSA_NAME_VERSION (var);
839 value_range_t *vr = vr_value[ver];
840
841 if (*equiv == NULL)
842 *equiv = BITMAP_ALLOC (NULL);
843 bitmap_set_bit (*equiv, ver);
844 if (vr && vr->equiv)
845 bitmap_ior_into (*equiv, vr->equiv);
846 }
847
848
849 /* Return true if VR is ~[0, 0]. */
850
851 static inline bool
852 range_is_nonnull (value_range_t *vr)
853 {
854 return vr->type == VR_ANTI_RANGE
855 && integer_zerop (vr->min)
856 && integer_zerop (vr->max);
857 }
858
859
860 /* Return true if VR is [0, 0]. */
861
862 static inline bool
863 range_is_null (value_range_t *vr)
864 {
865 return vr->type == VR_RANGE
866 && integer_zerop (vr->min)
867 && integer_zerop (vr->max);
868 }
869
870 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
871 a singleton. */
872
873 static inline bool
874 range_int_cst_p (value_range_t *vr)
875 {
876 return (vr->type == VR_RANGE
877 && TREE_CODE (vr->max) == INTEGER_CST
878 && TREE_CODE (vr->min) == INTEGER_CST);
879 }
880
881 /* Return true if VR is a INTEGER_CST singleton. */
882
883 static inline bool
884 range_int_cst_singleton_p (value_range_t *vr)
885 {
886 return (range_int_cst_p (vr)
887 && !TREE_OVERFLOW (vr->min)
888 && !TREE_OVERFLOW (vr->max)
889 && tree_int_cst_equal (vr->min, vr->max));
890 }
891
892 /* Return true if value range VR involves at least one symbol. */
893
894 static inline bool
895 symbolic_range_p (value_range_t *vr)
896 {
897 return (!is_gimple_min_invariant (vr->min)
898 || !is_gimple_min_invariant (vr->max));
899 }
900
901 /* Return true if value range VR uses an overflow infinity. */
902
903 static inline bool
904 overflow_infinity_range_p (value_range_t *vr)
905 {
906 return (vr->type == VR_RANGE
907 && (is_overflow_infinity (vr->min)
908 || is_overflow_infinity (vr->max)));
909 }
910
911 /* Return false if we can not make a valid comparison based on VR;
912 this will be the case if it uses an overflow infinity and overflow
913 is not undefined (i.e., -fno-strict-overflow is in effect).
914 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
915 uses an overflow infinity. */
916
917 static bool
918 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
919 {
920 gcc_assert (vr->type == VR_RANGE);
921 if (is_overflow_infinity (vr->min))
922 {
923 *strict_overflow_p = true;
924 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
925 return false;
926 }
927 if (is_overflow_infinity (vr->max))
928 {
929 *strict_overflow_p = true;
930 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
931 return false;
932 }
933 return true;
934 }
935
936
937 /* Return true if the result of assignment STMT is know to be non-negative.
938 If the return value is based on the assumption that signed overflow is
939 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
940 *STRICT_OVERFLOW_P.*/
941
942 static bool
943 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
944 {
945 enum tree_code code = gimple_assign_rhs_code (stmt);
946 switch (get_gimple_rhs_class (code))
947 {
948 case GIMPLE_UNARY_RHS:
949 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
950 gimple_expr_type (stmt),
951 gimple_assign_rhs1 (stmt),
952 strict_overflow_p);
953 case GIMPLE_BINARY_RHS:
954 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
955 gimple_expr_type (stmt),
956 gimple_assign_rhs1 (stmt),
957 gimple_assign_rhs2 (stmt),
958 strict_overflow_p);
959 case GIMPLE_TERNARY_RHS:
960 return false;
961 case GIMPLE_SINGLE_RHS:
962 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
963 strict_overflow_p);
964 case GIMPLE_INVALID_RHS:
965 gcc_unreachable ();
966 default:
967 gcc_unreachable ();
968 }
969 }
970
971 /* Return true if return value of call STMT is know to be non-negative.
972 If the return value is based on the assumption that signed overflow is
973 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
974 *STRICT_OVERFLOW_P.*/
975
976 static bool
977 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
978 {
979 tree arg0 = gimple_call_num_args (stmt) > 0 ?
980 gimple_call_arg (stmt, 0) : NULL_TREE;
981 tree arg1 = gimple_call_num_args (stmt) > 1 ?
982 gimple_call_arg (stmt, 1) : NULL_TREE;
983
984 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
985 gimple_call_fndecl (stmt),
986 arg0,
987 arg1,
988 strict_overflow_p);
989 }
990
991 /* Return true if STMT is know to to compute a non-negative value.
992 If the return value is based on the assumption that signed overflow is
993 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
994 *STRICT_OVERFLOW_P.*/
995
996 static bool
997 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
998 {
999 switch (gimple_code (stmt))
1000 {
1001 case GIMPLE_ASSIGN:
1002 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1003 case GIMPLE_CALL:
1004 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1005 default:
1006 gcc_unreachable ();
1007 }
1008 }
1009
1010 /* Return true if the result of assignment STMT is know to be non-zero.
1011 If the return value is based on the assumption that signed overflow is
1012 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1013 *STRICT_OVERFLOW_P.*/
1014
1015 static bool
1016 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1017 {
1018 enum tree_code code = gimple_assign_rhs_code (stmt);
1019 switch (get_gimple_rhs_class (code))
1020 {
1021 case GIMPLE_UNARY_RHS:
1022 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1023 gimple_expr_type (stmt),
1024 gimple_assign_rhs1 (stmt),
1025 strict_overflow_p);
1026 case GIMPLE_BINARY_RHS:
1027 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1028 gimple_expr_type (stmt),
1029 gimple_assign_rhs1 (stmt),
1030 gimple_assign_rhs2 (stmt),
1031 strict_overflow_p);
1032 case GIMPLE_TERNARY_RHS:
1033 return false;
1034 case GIMPLE_SINGLE_RHS:
1035 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1036 strict_overflow_p);
1037 case GIMPLE_INVALID_RHS:
1038 gcc_unreachable ();
1039 default:
1040 gcc_unreachable ();
1041 }
1042 }
1043
1044 /* Return true if STMT is know to to compute a non-zero value.
1045 If the return value is based on the assumption that signed overflow is
1046 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1047 *STRICT_OVERFLOW_P.*/
1048
1049 static bool
1050 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1051 {
1052 switch (gimple_code (stmt))
1053 {
1054 case GIMPLE_ASSIGN:
1055 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1056 case GIMPLE_CALL:
1057 return gimple_alloca_call_p (stmt);
1058 default:
1059 gcc_unreachable ();
1060 }
1061 }
1062
1063 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1064 obtained so far. */
1065
1066 static bool
1067 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1068 {
1069 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1070 return true;
1071
1072 /* If we have an expression of the form &X->a, then the expression
1073 is nonnull if X is nonnull. */
1074 if (is_gimple_assign (stmt)
1075 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1076 {
1077 tree expr = gimple_assign_rhs1 (stmt);
1078 tree base = get_base_address (TREE_OPERAND (expr, 0));
1079
1080 if (base != NULL_TREE
1081 && TREE_CODE (base) == MEM_REF
1082 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1083 {
1084 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1085 if (range_is_nonnull (vr))
1086 return true;
1087 }
1088 }
1089
1090 return false;
1091 }
1092
1093 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1094 a gimple invariant, or SSA_NAME +- CST. */
1095
1096 static bool
1097 valid_value_p (tree expr)
1098 {
1099 if (TREE_CODE (expr) == SSA_NAME)
1100 return true;
1101
1102 if (TREE_CODE (expr) == PLUS_EXPR
1103 || TREE_CODE (expr) == MINUS_EXPR)
1104 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1105 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1106
1107 return is_gimple_min_invariant (expr);
1108 }
1109
1110 /* Return
1111 1 if VAL < VAL2
1112 0 if !(VAL < VAL2)
1113 -2 if those are incomparable. */
1114 static inline int
1115 operand_less_p (tree val, tree val2)
1116 {
1117 /* LT is folded faster than GE and others. Inline the common case. */
1118 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1119 {
1120 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1121 return INT_CST_LT_UNSIGNED (val, val2);
1122 else
1123 {
1124 if (INT_CST_LT (val, val2))
1125 return 1;
1126 }
1127 }
1128 else
1129 {
1130 tree tcmp;
1131
1132 fold_defer_overflow_warnings ();
1133
1134 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1135
1136 fold_undefer_and_ignore_overflow_warnings ();
1137
1138 if (!tcmp
1139 || TREE_CODE (tcmp) != INTEGER_CST)
1140 return -2;
1141
1142 if (!integer_zerop (tcmp))
1143 return 1;
1144 }
1145
1146 /* val >= val2, not considering overflow infinity. */
1147 if (is_negative_overflow_infinity (val))
1148 return is_negative_overflow_infinity (val2) ? 0 : 1;
1149 else if (is_positive_overflow_infinity (val2))
1150 return is_positive_overflow_infinity (val) ? 0 : 1;
1151
1152 return 0;
1153 }
1154
1155 /* Compare two values VAL1 and VAL2. Return
1156
1157 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1158 -1 if VAL1 < VAL2,
1159 0 if VAL1 == VAL2,
1160 +1 if VAL1 > VAL2, and
1161 +2 if VAL1 != VAL2
1162
1163 This is similar to tree_int_cst_compare but supports pointer values
1164 and values that cannot be compared at compile time.
1165
1166 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1167 true if the return value is only valid if we assume that signed
1168 overflow is undefined. */
1169
1170 static int
1171 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1172 {
1173 if (val1 == val2)
1174 return 0;
1175
1176 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1177 both integers. */
1178 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1179 == POINTER_TYPE_P (TREE_TYPE (val2)));
1180 /* Convert the two values into the same type. This is needed because
1181 sizetype causes sign extension even for unsigned types. */
1182 val2 = fold_convert (TREE_TYPE (val1), val2);
1183 STRIP_USELESS_TYPE_CONVERSION (val2);
1184
1185 if ((TREE_CODE (val1) == SSA_NAME
1186 || TREE_CODE (val1) == PLUS_EXPR
1187 || TREE_CODE (val1) == MINUS_EXPR)
1188 && (TREE_CODE (val2) == SSA_NAME
1189 || TREE_CODE (val2) == PLUS_EXPR
1190 || TREE_CODE (val2) == MINUS_EXPR))
1191 {
1192 tree n1, c1, n2, c2;
1193 enum tree_code code1, code2;
1194
1195 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1196 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1197 same name, return -2. */
1198 if (TREE_CODE (val1) == SSA_NAME)
1199 {
1200 code1 = SSA_NAME;
1201 n1 = val1;
1202 c1 = NULL_TREE;
1203 }
1204 else
1205 {
1206 code1 = TREE_CODE (val1);
1207 n1 = TREE_OPERAND (val1, 0);
1208 c1 = TREE_OPERAND (val1, 1);
1209 if (tree_int_cst_sgn (c1) == -1)
1210 {
1211 if (is_negative_overflow_infinity (c1))
1212 return -2;
1213 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1214 if (!c1)
1215 return -2;
1216 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1217 }
1218 }
1219
1220 if (TREE_CODE (val2) == SSA_NAME)
1221 {
1222 code2 = SSA_NAME;
1223 n2 = val2;
1224 c2 = NULL_TREE;
1225 }
1226 else
1227 {
1228 code2 = TREE_CODE (val2);
1229 n2 = TREE_OPERAND (val2, 0);
1230 c2 = TREE_OPERAND (val2, 1);
1231 if (tree_int_cst_sgn (c2) == -1)
1232 {
1233 if (is_negative_overflow_infinity (c2))
1234 return -2;
1235 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1236 if (!c2)
1237 return -2;
1238 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1239 }
1240 }
1241
1242 /* Both values must use the same name. */
1243 if (n1 != n2)
1244 return -2;
1245
1246 if (code1 == SSA_NAME
1247 && code2 == SSA_NAME)
1248 /* NAME == NAME */
1249 return 0;
1250
1251 /* If overflow is defined we cannot simplify more. */
1252 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1253 return -2;
1254
1255 if (strict_overflow_p != NULL
1256 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1257 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1258 *strict_overflow_p = true;
1259
1260 if (code1 == SSA_NAME)
1261 {
1262 if (code2 == PLUS_EXPR)
1263 /* NAME < NAME + CST */
1264 return -1;
1265 else if (code2 == MINUS_EXPR)
1266 /* NAME > NAME - CST */
1267 return 1;
1268 }
1269 else if (code1 == PLUS_EXPR)
1270 {
1271 if (code2 == SSA_NAME)
1272 /* NAME + CST > NAME */
1273 return 1;
1274 else if (code2 == PLUS_EXPR)
1275 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1276 return compare_values_warnv (c1, c2, strict_overflow_p);
1277 else if (code2 == MINUS_EXPR)
1278 /* NAME + CST1 > NAME - CST2 */
1279 return 1;
1280 }
1281 else if (code1 == MINUS_EXPR)
1282 {
1283 if (code2 == SSA_NAME)
1284 /* NAME - CST < NAME */
1285 return -1;
1286 else if (code2 == PLUS_EXPR)
1287 /* NAME - CST1 < NAME + CST2 */
1288 return -1;
1289 else if (code2 == MINUS_EXPR)
1290 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1291 C1 and C2 are swapped in the call to compare_values. */
1292 return compare_values_warnv (c2, c1, strict_overflow_p);
1293 }
1294
1295 gcc_unreachable ();
1296 }
1297
1298 /* We cannot compare non-constants. */
1299 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1300 return -2;
1301
1302 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1303 {
1304 /* We cannot compare overflowed values, except for overflow
1305 infinities. */
1306 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1307 {
1308 if (strict_overflow_p != NULL)
1309 *strict_overflow_p = true;
1310 if (is_negative_overflow_infinity (val1))
1311 return is_negative_overflow_infinity (val2) ? 0 : -1;
1312 else if (is_negative_overflow_infinity (val2))
1313 return 1;
1314 else if (is_positive_overflow_infinity (val1))
1315 return is_positive_overflow_infinity (val2) ? 0 : 1;
1316 else if (is_positive_overflow_infinity (val2))
1317 return -1;
1318 return -2;
1319 }
1320
1321 return tree_int_cst_compare (val1, val2);
1322 }
1323 else
1324 {
1325 tree t;
1326
1327 /* First see if VAL1 and VAL2 are not the same. */
1328 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1329 return 0;
1330
1331 /* If VAL1 is a lower address than VAL2, return -1. */
1332 if (operand_less_p (val1, val2) == 1)
1333 return -1;
1334
1335 /* If VAL1 is a higher address than VAL2, return +1. */
1336 if (operand_less_p (val2, val1) == 1)
1337 return 1;
1338
1339 /* If VAL1 is different than VAL2, return +2.
1340 For integer constants we either have already returned -1 or 1
1341 or they are equivalent. We still might succeed in proving
1342 something about non-trivial operands. */
1343 if (TREE_CODE (val1) != INTEGER_CST
1344 || TREE_CODE (val2) != INTEGER_CST)
1345 {
1346 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1347 if (t && integer_onep (t))
1348 return 2;
1349 }
1350
1351 return -2;
1352 }
1353 }
1354
1355 /* Compare values like compare_values_warnv, but treat comparisons of
1356 nonconstants which rely on undefined overflow as incomparable. */
1357
1358 static int
1359 compare_values (tree val1, tree val2)
1360 {
1361 bool sop;
1362 int ret;
1363
1364 sop = false;
1365 ret = compare_values_warnv (val1, val2, &sop);
1366 if (sop
1367 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1368 ret = -2;
1369 return ret;
1370 }
1371
1372
1373 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1374 0 if VAL is not inside [MIN, MAX],
1375 -2 if we cannot tell either way.
1376
1377 Benchmark compile/20001226-1.c compilation time after changing this
1378 function. */
1379
1380 static inline int
1381 value_inside_range (tree val, tree min, tree max)
1382 {
1383 int cmp1, cmp2;
1384
1385 cmp1 = operand_less_p (val, min);
1386 if (cmp1 == -2)
1387 return -2;
1388 if (cmp1 == 1)
1389 return 0;
1390
1391 cmp2 = operand_less_p (max, val);
1392 if (cmp2 == -2)
1393 return -2;
1394
1395 return !cmp2;
1396 }
1397
1398
1399 /* Return true if value ranges VR0 and VR1 have a non-empty
1400 intersection.
1401
1402 Benchmark compile/20001226-1.c compilation time after changing this
1403 function.
1404 */
1405
1406 static inline bool
1407 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1408 {
1409 /* The value ranges do not intersect if the maximum of the first range is
1410 less than the minimum of the second range or vice versa.
1411 When those relations are unknown, we can't do any better. */
1412 if (operand_less_p (vr0->max, vr1->min) != 0)
1413 return false;
1414 if (operand_less_p (vr1->max, vr0->min) != 0)
1415 return false;
1416 return true;
1417 }
1418
1419
1420 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1421 include the value zero, -2 if we cannot tell. */
1422
1423 static inline int
1424 range_includes_zero_p (tree min, tree max)
1425 {
1426 tree zero = build_int_cst (TREE_TYPE (min), 0);
1427 return value_inside_range (zero, min, max);
1428 }
1429
1430 /* Return true if *VR is know to only contain nonnegative values. */
1431
1432 static inline bool
1433 value_range_nonnegative_p (value_range_t *vr)
1434 {
1435 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1436 which would return a useful value should be encoded as a
1437 VR_RANGE. */
1438 if (vr->type == VR_RANGE)
1439 {
1440 int result = compare_values (vr->min, integer_zero_node);
1441 return (result == 0 || result == 1);
1442 }
1443
1444 return false;
1445 }
1446
1447 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1448 false otherwise or if no value range information is available. */
1449
1450 bool
1451 ssa_name_nonnegative_p (const_tree t)
1452 {
1453 value_range_t *vr = get_value_range (t);
1454
1455 if (INTEGRAL_TYPE_P (t)
1456 && TYPE_UNSIGNED (t))
1457 return true;
1458
1459 if (!vr)
1460 return false;
1461
1462 return value_range_nonnegative_p (vr);
1463 }
1464
1465 /* If *VR has a value rante that is a single constant value return that,
1466 otherwise return NULL_TREE. */
1467
1468 static tree
1469 value_range_constant_singleton (value_range_t *vr)
1470 {
1471 if (vr->type == VR_RANGE
1472 && operand_equal_p (vr->min, vr->max, 0)
1473 && is_gimple_min_invariant (vr->min))
1474 return vr->min;
1475
1476 return NULL_TREE;
1477 }
1478
1479 /* If OP has a value range with a single constant value return that,
1480 otherwise return NULL_TREE. This returns OP itself if OP is a
1481 constant. */
1482
1483 static tree
1484 op_with_constant_singleton_value_range (tree op)
1485 {
1486 if (is_gimple_min_invariant (op))
1487 return op;
1488
1489 if (TREE_CODE (op) != SSA_NAME)
1490 return NULL_TREE;
1491
1492 return value_range_constant_singleton (get_value_range (op));
1493 }
1494
1495 /* Return true if op is in a boolean [0, 1] value-range. */
1496
1497 static bool
1498 op_with_boolean_value_range_p (tree op)
1499 {
1500 value_range_t *vr;
1501
1502 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1503 return true;
1504
1505 if (integer_zerop (op)
1506 || integer_onep (op))
1507 return true;
1508
1509 if (TREE_CODE (op) != SSA_NAME)
1510 return false;
1511
1512 vr = get_value_range (op);
1513 return (vr->type == VR_RANGE
1514 && integer_zerop (vr->min)
1515 && integer_onep (vr->max));
1516 }
1517
1518 /* Extract value range information from an ASSERT_EXPR EXPR and store
1519 it in *VR_P. */
1520
1521 static void
1522 extract_range_from_assert (value_range_t *vr_p, tree expr)
1523 {
1524 tree var, cond, limit, min, max, type;
1525 value_range_t *limit_vr;
1526 enum tree_code cond_code;
1527
1528 var = ASSERT_EXPR_VAR (expr);
1529 cond = ASSERT_EXPR_COND (expr);
1530
1531 gcc_assert (COMPARISON_CLASS_P (cond));
1532
1533 /* Find VAR in the ASSERT_EXPR conditional. */
1534 if (var == TREE_OPERAND (cond, 0)
1535 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1536 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1537 {
1538 /* If the predicate is of the form VAR COMP LIMIT, then we just
1539 take LIMIT from the RHS and use the same comparison code. */
1540 cond_code = TREE_CODE (cond);
1541 limit = TREE_OPERAND (cond, 1);
1542 cond = TREE_OPERAND (cond, 0);
1543 }
1544 else
1545 {
1546 /* If the predicate is of the form LIMIT COMP VAR, then we need
1547 to flip around the comparison code to create the proper range
1548 for VAR. */
1549 cond_code = swap_tree_comparison (TREE_CODE (cond));
1550 limit = TREE_OPERAND (cond, 0);
1551 cond = TREE_OPERAND (cond, 1);
1552 }
1553
1554 limit = avoid_overflow_infinity (limit);
1555
1556 type = TREE_TYPE (var);
1557 gcc_assert (limit != var);
1558
1559 /* For pointer arithmetic, we only keep track of pointer equality
1560 and inequality. */
1561 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1562 {
1563 set_value_range_to_varying (vr_p);
1564 return;
1565 }
1566
1567 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1568 try to use LIMIT's range to avoid creating symbolic ranges
1569 unnecessarily. */
1570 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1571
1572 /* LIMIT's range is only interesting if it has any useful information. */
1573 if (limit_vr
1574 && (limit_vr->type == VR_UNDEFINED
1575 || limit_vr->type == VR_VARYING
1576 || symbolic_range_p (limit_vr)))
1577 limit_vr = NULL;
1578
1579 /* Initially, the new range has the same set of equivalences of
1580 VAR's range. This will be revised before returning the final
1581 value. Since assertions may be chained via mutually exclusive
1582 predicates, we will need to trim the set of equivalences before
1583 we are done. */
1584 gcc_assert (vr_p->equiv == NULL);
1585 add_equivalence (&vr_p->equiv, var);
1586
1587 /* Extract a new range based on the asserted comparison for VAR and
1588 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1589 will only use it for equality comparisons (EQ_EXPR). For any
1590 other kind of assertion, we cannot derive a range from LIMIT's
1591 anti-range that can be used to describe the new range. For
1592 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1593 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1594 no single range for x_2 that could describe LE_EXPR, so we might
1595 as well build the range [b_4, +INF] for it.
1596 One special case we handle is extracting a range from a
1597 range test encoded as (unsigned)var + CST <= limit. */
1598 if (TREE_CODE (cond) == NOP_EXPR
1599 || TREE_CODE (cond) == PLUS_EXPR)
1600 {
1601 if (TREE_CODE (cond) == PLUS_EXPR)
1602 {
1603 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1604 TREE_OPERAND (cond, 1));
1605 max = int_const_binop (PLUS_EXPR, limit, min);
1606 cond = TREE_OPERAND (cond, 0);
1607 }
1608 else
1609 {
1610 min = build_int_cst (TREE_TYPE (var), 0);
1611 max = limit;
1612 }
1613
1614 /* Make sure to not set TREE_OVERFLOW on the final type
1615 conversion. We are willingly interpreting large positive
1616 unsigned values as negative singed values here. */
1617 min = force_fit_type (TREE_TYPE (var),
1618 wide_int::from (min,
1619 TYPE_PRECISION (TREE_TYPE (var)),
1620 TYPE_SIGN (TREE_TYPE (min))),
1621 0, false);
1622 max = force_fit_type (TREE_TYPE (var),
1623 wide_int::from (max,
1624 TYPE_PRECISION (TREE_TYPE (var)),
1625 TYPE_SIGN (TREE_TYPE (max))),
1626 0, false);
1627
1628 /* We can transform a max, min range to an anti-range or
1629 vice-versa. Use set_and_canonicalize_value_range which does
1630 this for us. */
1631 if (cond_code == LE_EXPR)
1632 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1633 min, max, vr_p->equiv);
1634 else if (cond_code == GT_EXPR)
1635 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1636 min, max, vr_p->equiv);
1637 else
1638 gcc_unreachable ();
1639 }
1640 else if (cond_code == EQ_EXPR)
1641 {
1642 enum value_range_type range_type;
1643
1644 if (limit_vr)
1645 {
1646 range_type = limit_vr->type;
1647 min = limit_vr->min;
1648 max = limit_vr->max;
1649 }
1650 else
1651 {
1652 range_type = VR_RANGE;
1653 min = limit;
1654 max = limit;
1655 }
1656
1657 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1658
1659 /* When asserting the equality VAR == LIMIT and LIMIT is another
1660 SSA name, the new range will also inherit the equivalence set
1661 from LIMIT. */
1662 if (TREE_CODE (limit) == SSA_NAME)
1663 add_equivalence (&vr_p->equiv, limit);
1664 }
1665 else if (cond_code == NE_EXPR)
1666 {
1667 /* As described above, when LIMIT's range is an anti-range and
1668 this assertion is an inequality (NE_EXPR), then we cannot
1669 derive anything from the anti-range. For instance, if
1670 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1671 not imply that VAR's range is [0, 0]. So, in the case of
1672 anti-ranges, we just assert the inequality using LIMIT and
1673 not its anti-range.
1674
1675 If LIMIT_VR is a range, we can only use it to build a new
1676 anti-range if LIMIT_VR is a single-valued range. For
1677 instance, if LIMIT_VR is [0, 1], the predicate
1678 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1679 Rather, it means that for value 0 VAR should be ~[0, 0]
1680 and for value 1, VAR should be ~[1, 1]. We cannot
1681 represent these ranges.
1682
1683 The only situation in which we can build a valid
1684 anti-range is when LIMIT_VR is a single-valued range
1685 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1686 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1687 if (limit_vr
1688 && limit_vr->type == VR_RANGE
1689 && compare_values (limit_vr->min, limit_vr->max) == 0)
1690 {
1691 min = limit_vr->min;
1692 max = limit_vr->max;
1693 }
1694 else
1695 {
1696 /* In any other case, we cannot use LIMIT's range to build a
1697 valid anti-range. */
1698 min = max = limit;
1699 }
1700
1701 /* If MIN and MAX cover the whole range for their type, then
1702 just use the original LIMIT. */
1703 if (INTEGRAL_TYPE_P (type)
1704 && vrp_val_is_min (min)
1705 && vrp_val_is_max (max))
1706 min = max = limit;
1707
1708 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1709 min, max, vr_p->equiv);
1710 }
1711 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1712 {
1713 min = TYPE_MIN_VALUE (type);
1714
1715 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1716 max = limit;
1717 else
1718 {
1719 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1720 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1721 LT_EXPR. */
1722 max = limit_vr->max;
1723 }
1724
1725 /* If the maximum value forces us to be out of bounds, simply punt.
1726 It would be pointless to try and do anything more since this
1727 all should be optimized away above us. */
1728 if ((cond_code == LT_EXPR
1729 && compare_values (max, min) == 0)
1730 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1731 set_value_range_to_varying (vr_p);
1732 else
1733 {
1734 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1735 if (cond_code == LT_EXPR)
1736 {
1737 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1738 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1739 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1740 build_int_cst (TREE_TYPE (max), -1));
1741 else
1742 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1743 build_int_cst (TREE_TYPE (max), 1));
1744 if (EXPR_P (max))
1745 TREE_NO_WARNING (max) = 1;
1746 }
1747
1748 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1749 }
1750 }
1751 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1752 {
1753 max = TYPE_MAX_VALUE (type);
1754
1755 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1756 min = limit;
1757 else
1758 {
1759 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1760 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1761 GT_EXPR. */
1762 min = limit_vr->min;
1763 }
1764
1765 /* If the minimum value forces us to be out of bounds, simply punt.
1766 It would be pointless to try and do anything more since this
1767 all should be optimized away above us. */
1768 if ((cond_code == GT_EXPR
1769 && compare_values (min, max) == 0)
1770 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1771 set_value_range_to_varying (vr_p);
1772 else
1773 {
1774 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1775 if (cond_code == GT_EXPR)
1776 {
1777 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1778 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1779 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1780 build_int_cst (TREE_TYPE (min), -1));
1781 else
1782 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1783 build_int_cst (TREE_TYPE (min), 1));
1784 if (EXPR_P (min))
1785 TREE_NO_WARNING (min) = 1;
1786 }
1787
1788 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1789 }
1790 }
1791 else
1792 gcc_unreachable ();
1793
1794 /* Finally intersect the new range with what we already know about var. */
1795 vrp_intersect_ranges (vr_p, get_value_range (var));
1796 }
1797
1798
1799 /* Extract range information from SSA name VAR and store it in VR. If
1800 VAR has an interesting range, use it. Otherwise, create the
1801 range [VAR, VAR] and return it. This is useful in situations where
1802 we may have conditionals testing values of VARYING names. For
1803 instance,
1804
1805 x_3 = y_5;
1806 if (x_3 > y_5)
1807 ...
1808
1809 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1810 always false. */
1811
1812 static void
1813 extract_range_from_ssa_name (value_range_t *vr, tree var)
1814 {
1815 value_range_t *var_vr = get_value_range (var);
1816
1817 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1818 copy_value_range (vr, var_vr);
1819 else
1820 set_value_range (vr, VR_RANGE, var, var, NULL);
1821
1822 add_equivalence (&vr->equiv, var);
1823 }
1824
1825
1826 /* Wrapper around int_const_binop. If the operation overflows and we
1827 are not using wrapping arithmetic, then adjust the result to be
1828 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1829 NULL_TREE if we need to use an overflow infinity representation but
1830 the type does not support it. */
1831
1832 static tree
1833 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1834 {
1835 tree res;
1836
1837 res = int_const_binop (code, val1, val2);
1838
1839 /* If we are using unsigned arithmetic, operate symbolically
1840 on -INF and +INF as int_const_binop only handles signed overflow. */
1841 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1842 {
1843 int checkz = compare_values (res, val1);
1844 bool overflow = false;
1845
1846 /* Ensure that res = val1 [+*] val2 >= val1
1847 or that res = val1 - val2 <= val1. */
1848 if ((code == PLUS_EXPR
1849 && !(checkz == 1 || checkz == 0))
1850 || (code == MINUS_EXPR
1851 && !(checkz == 0 || checkz == -1)))
1852 {
1853 overflow = true;
1854 }
1855 /* Checking for multiplication overflow is done by dividing the
1856 output of the multiplication by the first input of the
1857 multiplication. If the result of that division operation is
1858 not equal to the second input of the multiplication, then the
1859 multiplication overflowed. */
1860 else if (code == MULT_EXPR && !integer_zerop (val1))
1861 {
1862 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1863 res,
1864 val1);
1865 int check = compare_values (tmp, val2);
1866
1867 if (check != 0)
1868 overflow = true;
1869 }
1870
1871 if (overflow)
1872 {
1873 res = copy_node (res);
1874 TREE_OVERFLOW (res) = 1;
1875 }
1876
1877 }
1878 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1879 /* If the singed operation wraps then int_const_binop has done
1880 everything we want. */
1881 ;
1882 /* Signed division of -1/0 overflows and by the time it gets here
1883 returns NULL_TREE. */
1884 else if (!res)
1885 return NULL_TREE;
1886 else if ((TREE_OVERFLOW (res)
1887 && !TREE_OVERFLOW (val1)
1888 && !TREE_OVERFLOW (val2))
1889 || is_overflow_infinity (val1)
1890 || is_overflow_infinity (val2))
1891 {
1892 /* If the operation overflowed but neither VAL1 nor VAL2 are
1893 overflown, return -INF or +INF depending on the operation
1894 and the combination of signs of the operands. */
1895 int sgn1 = tree_int_cst_sgn (val1);
1896 int sgn2 = tree_int_cst_sgn (val2);
1897
1898 if (needs_overflow_infinity (TREE_TYPE (res))
1899 && !supports_overflow_infinity (TREE_TYPE (res)))
1900 return NULL_TREE;
1901
1902 /* We have to punt on adding infinities of different signs,
1903 since we can't tell what the sign of the result should be.
1904 Likewise for subtracting infinities of the same sign. */
1905 if (((code == PLUS_EXPR && sgn1 != sgn2)
1906 || (code == MINUS_EXPR && sgn1 == sgn2))
1907 && is_overflow_infinity (val1)
1908 && is_overflow_infinity (val2))
1909 return NULL_TREE;
1910
1911 /* Don't try to handle division or shifting of infinities. */
1912 if ((code == TRUNC_DIV_EXPR
1913 || code == FLOOR_DIV_EXPR
1914 || code == CEIL_DIV_EXPR
1915 || code == EXACT_DIV_EXPR
1916 || code == ROUND_DIV_EXPR
1917 || code == RSHIFT_EXPR)
1918 && (is_overflow_infinity (val1)
1919 || is_overflow_infinity (val2)))
1920 return NULL_TREE;
1921
1922 /* Notice that we only need to handle the restricted set of
1923 operations handled by extract_range_from_binary_expr.
1924 Among them, only multiplication, addition and subtraction
1925 can yield overflow without overflown operands because we
1926 are working with integral types only... except in the
1927 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1928 for division too. */
1929
1930 /* For multiplication, the sign of the overflow is given
1931 by the comparison of the signs of the operands. */
1932 if ((code == MULT_EXPR && sgn1 == sgn2)
1933 /* For addition, the operands must be of the same sign
1934 to yield an overflow. Its sign is therefore that
1935 of one of the operands, for example the first. For
1936 infinite operands X + -INF is negative, not positive. */
1937 || (code == PLUS_EXPR
1938 && (sgn1 >= 0
1939 ? !is_negative_overflow_infinity (val2)
1940 : is_positive_overflow_infinity (val2)))
1941 /* For subtraction, non-infinite operands must be of
1942 different signs to yield an overflow. Its sign is
1943 therefore that of the first operand or the opposite of
1944 that of the second operand. A first operand of 0 counts
1945 as positive here, for the corner case 0 - (-INF), which
1946 overflows, but must yield +INF. For infinite operands 0
1947 - INF is negative, not positive. */
1948 || (code == MINUS_EXPR
1949 && (sgn1 >= 0
1950 ? !is_positive_overflow_infinity (val2)
1951 : is_negative_overflow_infinity (val2)))
1952 /* We only get in here with positive shift count, so the
1953 overflow direction is the same as the sign of val1.
1954 Actually rshift does not overflow at all, but we only
1955 handle the case of shifting overflowed -INF and +INF. */
1956 || (code == RSHIFT_EXPR
1957 && sgn1 >= 0)
1958 /* For division, the only case is -INF / -1 = +INF. */
1959 || code == TRUNC_DIV_EXPR
1960 || code == FLOOR_DIV_EXPR
1961 || code == CEIL_DIV_EXPR
1962 || code == EXACT_DIV_EXPR
1963 || code == ROUND_DIV_EXPR)
1964 return (needs_overflow_infinity (TREE_TYPE (res))
1965 ? positive_overflow_infinity (TREE_TYPE (res))
1966 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1967 else
1968 return (needs_overflow_infinity (TREE_TYPE (res))
1969 ? negative_overflow_infinity (TREE_TYPE (res))
1970 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1971 }
1972
1973 return res;
1974 }
1975
1976
1977 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1978 bitmask if some bit is unset, it means for all numbers in the range
1979 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1980 bitmask if some bit is set, it means for all numbers in the range
1981 the bit is 1, otherwise it might be 0 or 1. */
1982
1983 static bool
1984 zero_nonzero_bits_from_vr (const tree expr_type,
1985 value_range_t *vr,
1986 wide_int *may_be_nonzero,
1987 wide_int *must_be_nonzero)
1988 {
1989 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1990 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1991 if (!range_int_cst_p (vr)
1992 || TREE_OVERFLOW (vr->min)
1993 || TREE_OVERFLOW (vr->max))
1994 return false;
1995
1996 if (range_int_cst_singleton_p (vr))
1997 {
1998 *may_be_nonzero = vr->min;
1999 *must_be_nonzero = *may_be_nonzero;
2000 }
2001 else if (tree_int_cst_sgn (vr->min) >= 0
2002 || tree_int_cst_sgn (vr->max) < 0)
2003 {
2004 wide_int wmin = vr->min;
2005 wide_int wmax = vr->max;
2006 wide_int xor_mask = wmin ^ wmax;
2007 *may_be_nonzero = wmin | wmax;
2008 *must_be_nonzero = wmin & wmax;
2009 if (xor_mask != 0)
2010 {
2011 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
2012 (*may_be_nonzero).get_precision ());
2013 *may_be_nonzero = (*may_be_nonzero) | mask;
2014 *must_be_nonzero = (*must_be_nonzero).and_not (mask);
2015 }
2016 }
2017
2018 return true;
2019 }
2020
2021 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2022 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2023 false otherwise. If *AR can be represented with a single range
2024 *VR1 will be VR_UNDEFINED. */
2025
2026 static bool
2027 ranges_from_anti_range (value_range_t *ar,
2028 value_range_t *vr0, value_range_t *vr1)
2029 {
2030 tree type = TREE_TYPE (ar->min);
2031
2032 vr0->type = VR_UNDEFINED;
2033 vr1->type = VR_UNDEFINED;
2034
2035 if (ar->type != VR_ANTI_RANGE
2036 || TREE_CODE (ar->min) != INTEGER_CST
2037 || TREE_CODE (ar->max) != INTEGER_CST
2038 || !vrp_val_min (type)
2039 || !vrp_val_max (type))
2040 return false;
2041
2042 if (!vrp_val_is_min (ar->min))
2043 {
2044 vr0->type = VR_RANGE;
2045 vr0->min = vrp_val_min (type);
2046 vr0->max
2047 = wide_int_to_tree (type,
2048 wide_int (ar->min) - 1);
2049 }
2050 if (!vrp_val_is_max (ar->max))
2051 {
2052 vr1->type = VR_RANGE;
2053 vr1->min
2054 = wide_int_to_tree (type,
2055 wide_int (ar->max) + 1);
2056 vr1->max = vrp_val_max (type);
2057 }
2058 if (vr0->type == VR_UNDEFINED)
2059 {
2060 *vr0 = *vr1;
2061 vr1->type = VR_UNDEFINED;
2062 }
2063
2064 return vr0->type != VR_UNDEFINED;
2065 }
2066
2067 /* Helper to extract a value-range *VR for a multiplicative operation
2068 *VR0 CODE *VR1. */
2069
2070 static void
2071 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2072 enum tree_code code,
2073 value_range_t *vr0, value_range_t *vr1)
2074 {
2075 enum value_range_type type;
2076 tree val[4];
2077 size_t i;
2078 tree min, max;
2079 bool sop;
2080 int cmp;
2081
2082 /* Multiplications, divisions and shifts are a bit tricky to handle,
2083 depending on the mix of signs we have in the two ranges, we
2084 need to operate on different values to get the minimum and
2085 maximum values for the new range. One approach is to figure
2086 out all the variations of range combinations and do the
2087 operations.
2088
2089 However, this involves several calls to compare_values and it
2090 is pretty convoluted. It's simpler to do the 4 operations
2091 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2092 MAX1) and then figure the smallest and largest values to form
2093 the new range. */
2094 gcc_assert (code == MULT_EXPR
2095 || code == TRUNC_DIV_EXPR
2096 || code == FLOOR_DIV_EXPR
2097 || code == CEIL_DIV_EXPR
2098 || code == EXACT_DIV_EXPR
2099 || code == ROUND_DIV_EXPR
2100 || code == RSHIFT_EXPR
2101 || code == LSHIFT_EXPR);
2102 gcc_assert ((vr0->type == VR_RANGE
2103 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2104 && vr0->type == vr1->type);
2105
2106 type = vr0->type;
2107
2108 /* Compute the 4 cross operations. */
2109 sop = false;
2110 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2111 if (val[0] == NULL_TREE)
2112 sop = true;
2113
2114 if (vr1->max == vr1->min)
2115 val[1] = NULL_TREE;
2116 else
2117 {
2118 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2119 if (val[1] == NULL_TREE)
2120 sop = true;
2121 }
2122
2123 if (vr0->max == vr0->min)
2124 val[2] = NULL_TREE;
2125 else
2126 {
2127 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2128 if (val[2] == NULL_TREE)
2129 sop = true;
2130 }
2131
2132 if (vr0->min == vr0->max || vr1->min == vr1->max)
2133 val[3] = NULL_TREE;
2134 else
2135 {
2136 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2137 if (val[3] == NULL_TREE)
2138 sop = true;
2139 }
2140
2141 if (sop)
2142 {
2143 set_value_range_to_varying (vr);
2144 return;
2145 }
2146
2147 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2148 of VAL[i]. */
2149 min = val[0];
2150 max = val[0];
2151 for (i = 1; i < 4; i++)
2152 {
2153 if (!is_gimple_min_invariant (min)
2154 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2155 || !is_gimple_min_invariant (max)
2156 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2157 break;
2158
2159 if (val[i])
2160 {
2161 if (!is_gimple_min_invariant (val[i])
2162 || (TREE_OVERFLOW (val[i])
2163 && !is_overflow_infinity (val[i])))
2164 {
2165 /* If we found an overflowed value, set MIN and MAX
2166 to it so that we set the resulting range to
2167 VARYING. */
2168 min = max = val[i];
2169 break;
2170 }
2171
2172 if (compare_values (val[i], min) == -1)
2173 min = val[i];
2174
2175 if (compare_values (val[i], max) == 1)
2176 max = val[i];
2177 }
2178 }
2179
2180 /* If either MIN or MAX overflowed, then set the resulting range to
2181 VARYING. But we do accept an overflow infinity
2182 representation. */
2183 if (min == NULL_TREE
2184 || !is_gimple_min_invariant (min)
2185 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2186 || max == NULL_TREE
2187 || !is_gimple_min_invariant (max)
2188 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2189 {
2190 set_value_range_to_varying (vr);
2191 return;
2192 }
2193
2194 /* We punt if:
2195 1) [-INF, +INF]
2196 2) [-INF, +-INF(OVF)]
2197 3) [+-INF(OVF), +INF]
2198 4) [+-INF(OVF), +-INF(OVF)]
2199 We learn nothing when we have INF and INF(OVF) on both sides.
2200 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2201 overflow. */
2202 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2203 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2204 {
2205 set_value_range_to_varying (vr);
2206 return;
2207 }
2208
2209 cmp = compare_values (min, max);
2210 if (cmp == -2 || cmp == 1)
2211 {
2212 /* If the new range has its limits swapped around (MIN > MAX),
2213 then the operation caused one of them to wrap around, mark
2214 the new range VARYING. */
2215 set_value_range_to_varying (vr);
2216 }
2217 else
2218 set_value_range (vr, type, min, max, NULL);
2219 }
2220
2221 /* Extract range information from a binary operation CODE based on
2222 the ranges of each of its operands, *VR0 and *VR1 with resulting
2223 type EXPR_TYPE. The resulting range is stored in *VR. */
2224
2225 static void
2226 extract_range_from_binary_expr_1 (value_range_t *vr,
2227 enum tree_code code, tree expr_type,
2228 value_range_t *vr0_, value_range_t *vr1_)
2229 {
2230 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2231 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2232 enum value_range_type type;
2233 tree min = NULL_TREE, max = NULL_TREE;
2234 int cmp;
2235
2236 if (!INTEGRAL_TYPE_P (expr_type)
2237 && !POINTER_TYPE_P (expr_type))
2238 {
2239 set_value_range_to_varying (vr);
2240 return;
2241 }
2242
2243 /* Not all binary expressions can be applied to ranges in a
2244 meaningful way. Handle only arithmetic operations. */
2245 if (code != PLUS_EXPR
2246 && code != MINUS_EXPR
2247 && code != POINTER_PLUS_EXPR
2248 && code != MULT_EXPR
2249 && code != TRUNC_DIV_EXPR
2250 && code != FLOOR_DIV_EXPR
2251 && code != CEIL_DIV_EXPR
2252 && code != EXACT_DIV_EXPR
2253 && code != ROUND_DIV_EXPR
2254 && code != TRUNC_MOD_EXPR
2255 && code != RSHIFT_EXPR
2256 && code != LSHIFT_EXPR
2257 && code != MIN_EXPR
2258 && code != MAX_EXPR
2259 && code != BIT_AND_EXPR
2260 && code != BIT_IOR_EXPR
2261 && code != BIT_XOR_EXPR)
2262 {
2263 set_value_range_to_varying (vr);
2264 return;
2265 }
2266
2267 /* If both ranges are UNDEFINED, so is the result. */
2268 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2269 {
2270 set_value_range_to_undefined (vr);
2271 return;
2272 }
2273 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2274 code. At some point we may want to special-case operations that
2275 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2276 operand. */
2277 else if (vr0.type == VR_UNDEFINED)
2278 set_value_range_to_varying (&vr0);
2279 else if (vr1.type == VR_UNDEFINED)
2280 set_value_range_to_varying (&vr1);
2281
2282 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2283 and express ~[] op X as ([]' op X) U ([]'' op X). */
2284 if (vr0.type == VR_ANTI_RANGE
2285 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2286 {
2287 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2288 if (vrtem1.type != VR_UNDEFINED)
2289 {
2290 value_range_t vrres = VR_INITIALIZER;
2291 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2292 &vrtem1, vr1_);
2293 vrp_meet (vr, &vrres);
2294 }
2295 return;
2296 }
2297 /* Likewise for X op ~[]. */
2298 if (vr1.type == VR_ANTI_RANGE
2299 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2300 {
2301 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2302 if (vrtem1.type != VR_UNDEFINED)
2303 {
2304 value_range_t vrres = VR_INITIALIZER;
2305 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2306 vr0_, &vrtem1);
2307 vrp_meet (vr, &vrres);
2308 }
2309 return;
2310 }
2311
2312 /* The type of the resulting value range defaults to VR0.TYPE. */
2313 type = vr0.type;
2314
2315 /* Refuse to operate on VARYING ranges, ranges of different kinds
2316 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2317 because we may be able to derive a useful range even if one of
2318 the operands is VR_VARYING or symbolic range. Similarly for
2319 divisions. TODO, we may be able to derive anti-ranges in
2320 some cases. */
2321 if (code != BIT_AND_EXPR
2322 && code != BIT_IOR_EXPR
2323 && code != TRUNC_DIV_EXPR
2324 && code != FLOOR_DIV_EXPR
2325 && code != CEIL_DIV_EXPR
2326 && code != EXACT_DIV_EXPR
2327 && code != ROUND_DIV_EXPR
2328 && code != TRUNC_MOD_EXPR
2329 && code != MIN_EXPR
2330 && code != MAX_EXPR
2331 && (vr0.type == VR_VARYING
2332 || vr1.type == VR_VARYING
2333 || vr0.type != vr1.type
2334 || symbolic_range_p (&vr0)
2335 || symbolic_range_p (&vr1)))
2336 {
2337 set_value_range_to_varying (vr);
2338 return;
2339 }
2340
2341 /* Now evaluate the expression to determine the new range. */
2342 if (POINTER_TYPE_P (expr_type))
2343 {
2344 if (code == MIN_EXPR || code == MAX_EXPR)
2345 {
2346 /* For MIN/MAX expressions with pointers, we only care about
2347 nullness, if both are non null, then the result is nonnull.
2348 If both are null, then the result is null. Otherwise they
2349 are varying. */
2350 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2351 set_value_range_to_nonnull (vr, expr_type);
2352 else if (range_is_null (&vr0) && range_is_null (&vr1))
2353 set_value_range_to_null (vr, expr_type);
2354 else
2355 set_value_range_to_varying (vr);
2356 }
2357 else if (code == POINTER_PLUS_EXPR)
2358 {
2359 /* For pointer types, we are really only interested in asserting
2360 whether the expression evaluates to non-NULL. */
2361 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2362 set_value_range_to_nonnull (vr, expr_type);
2363 else if (range_is_null (&vr0) && range_is_null (&vr1))
2364 set_value_range_to_null (vr, expr_type);
2365 else
2366 set_value_range_to_varying (vr);
2367 }
2368 else if (code == BIT_AND_EXPR)
2369 {
2370 /* For pointer types, we are really only interested in asserting
2371 whether the expression evaluates to non-NULL. */
2372 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2373 set_value_range_to_nonnull (vr, expr_type);
2374 else if (range_is_null (&vr0) || range_is_null (&vr1))
2375 set_value_range_to_null (vr, expr_type);
2376 else
2377 set_value_range_to_varying (vr);
2378 }
2379 else
2380 set_value_range_to_varying (vr);
2381
2382 return;
2383 }
2384
2385 /* For integer ranges, apply the operation to each end of the
2386 range and see what we end up with. */
2387 if (code == PLUS_EXPR || code == MINUS_EXPR)
2388 {
2389 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2390 ranges compute the precise range for such case if possible. */
2391 if (range_int_cst_p (&vr0)
2392 && range_int_cst_p (&vr1))
2393 {
2394 signop sgn = TYPE_SIGN (expr_type);
2395 unsigned int prec = TYPE_PRECISION (expr_type);
2396 wide_int min0 = wide_int (vr0.min);
2397 wide_int max0 = wide_int (vr0.max);
2398 wide_int min1 = wide_int (vr1.min);
2399 wide_int max1 = wide_int (vr1.max);
2400 wide_int type_min = wi::min_value (TYPE_PRECISION (expr_type), sgn);
2401 wide_int type_max = wi::max_value (TYPE_PRECISION (expr_type), sgn);
2402 wide_int wmin, wmax;
2403 int min_ovf = 0;
2404 int max_ovf = 0;
2405
2406 if (code == PLUS_EXPR)
2407 {
2408 wmin = min0 + min1;
2409 wmax = max0 + max1;
2410
2411 /* Check for overflow. */
2412 if (wi::cmp (min1, 0, sgn) != wi::cmp (wmin, min0, sgn))
2413 min_ovf = wi::cmp (min0, wmin, sgn);
2414 if (wi::cmp (max1, 0, sgn) != wi::cmp (wmax, max0, sgn))
2415 max_ovf = wi::cmp (max0, wmax, sgn);
2416 }
2417 else /* if (code == MINUS_EXPR) */
2418 {
2419 wmin = min0 - max1;
2420 wmax = max0 - min1;
2421
2422 if (wi::cmp (0, max1, sgn) != wi::cmp (wmin, min0, sgn))
2423 min_ovf = wi::cmp (min0, max1, sgn);
2424 if (wi::cmp (0, min1, sgn) != wi::cmp (wmax, max0, sgn))
2425 max_ovf = wi::cmp (max0, min1, sgn);
2426 }
2427
2428 /* For non-wrapping arithmetic look at possibly smaller
2429 value-ranges of the type. */
2430 if (!TYPE_OVERFLOW_WRAPS (expr_type))
2431 {
2432 if (vrp_val_min (expr_type))
2433 type_min = wide_int (vrp_val_min (expr_type));
2434 if (vrp_val_max (expr_type))
2435 type_max = wide_int (vrp_val_max (expr_type));
2436 }
2437
2438 /* Check for type overflow. */
2439 if (min_ovf == 0)
2440 {
2441 if (wi::cmp (wmin, type_min, sgn) == -1)
2442 min_ovf = -1;
2443 else if (wi::cmp (wmin, type_max, sgn) == 1)
2444 min_ovf = 1;
2445 }
2446 if (max_ovf == 0)
2447 {
2448 if (wi::cmp (wmax, type_min, sgn) == -1)
2449 max_ovf = -1;
2450 else if (wi::cmp (wmax, type_max, sgn) == 1)
2451 max_ovf = 1;
2452 }
2453
2454 if (TYPE_OVERFLOW_WRAPS (expr_type))
2455 {
2456 /* If overflow wraps, truncate the values and adjust the
2457 range kind and bounds appropriately. */
2458 wide_int tmin = wide_int::from (wmin, prec, sgn);
2459 wide_int tmax = wide_int::from (wmax, prec, sgn);
2460 if (min_ovf == max_ovf)
2461 {
2462 /* No overflow or both overflow or underflow. The
2463 range kind stays VR_RANGE. */
2464 min = wide_int_to_tree (expr_type, tmin);
2465 max = wide_int_to_tree (expr_type, tmax);
2466 }
2467 else if (min_ovf == -1
2468 && max_ovf == 1)
2469 {
2470 /* Underflow and overflow, drop to VR_VARYING. */
2471 set_value_range_to_varying (vr);
2472 return;
2473 }
2474 else
2475 {
2476 /* Min underflow or max overflow. The range kind
2477 changes to VR_ANTI_RANGE. */
2478 bool covers = false;
2479 wide_int tem = tmin;
2480 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2481 || (max_ovf == 1 && min_ovf == 0));
2482 type = VR_ANTI_RANGE;
2483 tmin = tmax + 1;
2484 if (wi::cmp (tmin, tmax, sgn) < 0)
2485 covers = true;
2486 tmax = tem - 1;
2487 if (wi::cmp (tmax, tem, sgn) > 0)
2488 covers = true;
2489 /* If the anti-range would cover nothing, drop to varying.
2490 Likewise if the anti-range bounds are outside of the
2491 types values. */
2492 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2493 {
2494 set_value_range_to_varying (vr);
2495 return;
2496 }
2497 min = wide_int_to_tree (expr_type, tmin);
2498 max = wide_int_to_tree (expr_type, tmax);
2499 }
2500 }
2501 else
2502 {
2503 /* If overflow does not wrap, saturate to the types min/max
2504 value. */
2505 if (min_ovf == -1)
2506 {
2507 if (needs_overflow_infinity (expr_type)
2508 && supports_overflow_infinity (expr_type))
2509 min = negative_overflow_infinity (expr_type);
2510 else
2511 min = wide_int_to_tree (expr_type, type_min);
2512 }
2513 else if (min_ovf == 1)
2514 {
2515 if (needs_overflow_infinity (expr_type)
2516 && supports_overflow_infinity (expr_type))
2517 min = positive_overflow_infinity (expr_type);
2518 else
2519 min = wide_int_to_tree (expr_type, type_max);
2520 }
2521 else
2522 min = wide_int_to_tree (expr_type, wmin);
2523
2524 if (max_ovf == -1)
2525 {
2526 if (needs_overflow_infinity (expr_type)
2527 && supports_overflow_infinity (expr_type))
2528 max = negative_overflow_infinity (expr_type);
2529 else
2530 max = wide_int_to_tree (expr_type, type_min);
2531 }
2532 else if (max_ovf == 1)
2533 {
2534 if (needs_overflow_infinity (expr_type)
2535 && supports_overflow_infinity (expr_type))
2536 max = positive_overflow_infinity (expr_type);
2537 else
2538 max = wide_int_to_tree (expr_type, type_max);
2539 }
2540 else
2541 max = wide_int_to_tree (expr_type, wmax);
2542 }
2543 if (needs_overflow_infinity (expr_type)
2544 && supports_overflow_infinity (expr_type))
2545 {
2546 if (is_negative_overflow_infinity (vr0.min)
2547 || (code == PLUS_EXPR
2548 ? is_negative_overflow_infinity (vr1.min)
2549 : is_positive_overflow_infinity (vr1.max)))
2550 min = negative_overflow_infinity (expr_type);
2551 if (is_positive_overflow_infinity (vr0.max)
2552 || (code == PLUS_EXPR
2553 ? is_positive_overflow_infinity (vr1.max)
2554 : is_negative_overflow_infinity (vr1.min)))
2555 max = positive_overflow_infinity (expr_type);
2556 }
2557 }
2558 else
2559 {
2560 /* For other cases, for example if we have a PLUS_EXPR with two
2561 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2562 to compute a precise range for such a case.
2563 ??? General even mixed range kind operations can be expressed
2564 by for example transforming ~[3, 5] + [1, 2] to range-only
2565 operations and a union primitive:
2566 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2567 [-INF+1, 4] U [6, +INF(OVF)]
2568 though usually the union is not exactly representable with
2569 a single range or anti-range as the above is
2570 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2571 but one could use a scheme similar to equivalences for this. */
2572 set_value_range_to_varying (vr);
2573 return;
2574 }
2575 }
2576 else if (code == MIN_EXPR
2577 || code == MAX_EXPR)
2578 {
2579 if (vr0.type == VR_RANGE
2580 && !symbolic_range_p (&vr0))
2581 {
2582 type = VR_RANGE;
2583 if (vr1.type == VR_RANGE
2584 && !symbolic_range_p (&vr1))
2585 {
2586 /* For operations that make the resulting range directly
2587 proportional to the original ranges, apply the operation to
2588 the same end of each range. */
2589 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2590 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2591 }
2592 else if (code == MIN_EXPR)
2593 {
2594 min = vrp_val_min (expr_type);
2595 max = vr0.max;
2596 }
2597 else if (code == MAX_EXPR)
2598 {
2599 min = vr0.min;
2600 max = vrp_val_max (expr_type);
2601 }
2602 }
2603 else if (vr1.type == VR_RANGE
2604 && !symbolic_range_p (&vr1))
2605 {
2606 type = VR_RANGE;
2607 if (code == MIN_EXPR)
2608 {
2609 min = vrp_val_min (expr_type);
2610 max = vr1.max;
2611 }
2612 else if (code == MAX_EXPR)
2613 {
2614 min = vr1.min;
2615 max = vrp_val_max (expr_type);
2616 }
2617 }
2618 else
2619 {
2620 set_value_range_to_varying (vr);
2621 return;
2622 }
2623 }
2624 else if (code == MULT_EXPR)
2625 {
2626 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2627 drop to varying. This test requires 2*prec bits if both
2628 operands are signed and 2*prec + 2 bits if either is not. */
2629
2630 signop sign = TYPE_SIGN (expr_type);
2631 unsigned int prec = TYPE_PRECISION (expr_type);
2632 unsigned int prec2 = (prec * 2) + (sign == UNSIGNED ? 2 : 0);
2633
2634 if (range_int_cst_p (&vr0)
2635 && range_int_cst_p (&vr1)
2636 && TYPE_OVERFLOW_WRAPS (expr_type))
2637 {
2638 wide_int min0, max0, min1, max1;
2639 wide_int prod0, prod1, prod2, prod3;
2640 wide_int sizem1 = wi::mask (prec, false, prec2);
2641 wide_int size = sizem1 + 1;
2642
2643 /* Extend the values using the sign of the result to PREC2.
2644 From here on out, everthing is just signed math no matter
2645 what the input types were. */
2646 min0 = wide_int::from (vr0.min, prec2, sign);
2647 max0 = wide_int::from (vr0.max, prec2, sign);
2648 min1 = wide_int::from (vr1.min, prec2, sign);
2649 max1 = wide_int::from (vr1.max, prec2, sign);
2650
2651 /* Canonicalize the intervals. */
2652 if (sign == UNSIGNED)
2653 {
2654 if (wi::ltu_p (size, min0 + max0))
2655 {
2656 min0 -= size;
2657 max0 -= size;
2658 }
2659
2660 if (wi::ltu_p (size, min1 + max1))
2661 {
2662 min1 -= size;
2663 max1 -= size;
2664 }
2665 }
2666
2667 prod0 = min0 * min1;
2668 prod1 = min0 * max1;
2669 prod2 = max0 * min1;
2670 prod3 = max0 * max1;
2671
2672 /* Sort the 4 products so that min is in prod0 and max is in
2673 prod3. */
2674 /* min0min1 > max0max1 */
2675 if (wi::gts_p (prod0, prod3))
2676 {
2677 wide_int tmp = prod3;
2678 prod3 = prod0;
2679 prod0 = tmp;
2680 }
2681
2682 /* min0max1 > max0min1 */
2683 if (wi::gts_p (prod1, prod2))
2684 {
2685 wide_int tmp = prod2;
2686 prod2 = prod1;
2687 prod1 = tmp;
2688 }
2689
2690 if (wi::gts_p (prod0, prod1))
2691 {
2692 wide_int tmp = prod1;
2693 prod1 = prod0;
2694 prod0 = tmp;
2695 }
2696
2697 if (wi::gts_p (prod2, prod3))
2698 {
2699 wide_int tmp = prod3;
2700 prod3 = prod2;
2701 prod2 = tmp;
2702 }
2703
2704 /* diff = max - min. */
2705 prod2 = prod3 - prod0;
2706 if (wi::geu_p (prod2, sizem1))
2707 {
2708 /* the range covers all values. */
2709 set_value_range_to_varying (vr);
2710 return;
2711 }
2712
2713 /* The following should handle the wrapping and selecting
2714 VR_ANTI_RANGE for us. */
2715 min = wide_int_to_tree (expr_type, prod0);
2716 max = wide_int_to_tree (expr_type, prod3);
2717 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2718 return;
2719 }
2720
2721 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2722 drop to VR_VARYING. It would take more effort to compute a
2723 precise range for such a case. For example, if we have
2724 op0 == 65536 and op1 == 65536 with their ranges both being
2725 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2726 we cannot claim that the product is in ~[0,0]. Note that we
2727 are guaranteed to have vr0.type == vr1.type at this
2728 point. */
2729 if (vr0.type == VR_ANTI_RANGE
2730 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2731 {
2732 set_value_range_to_varying (vr);
2733 return;
2734 }
2735
2736 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2737 return;
2738 }
2739 else if (code == RSHIFT_EXPR
2740 || code == LSHIFT_EXPR)
2741 {
2742 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2743 then drop to VR_VARYING. Outside of this range we get undefined
2744 behavior from the shift operation. We cannot even trust
2745 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2746 shifts, and the operation at the tree level may be widened. */
2747 if (range_int_cst_p (&vr1)
2748 && compare_tree_int (vr1.min, 0) >= 0
2749 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2750 {
2751 if (code == RSHIFT_EXPR)
2752 {
2753 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2754 return;
2755 }
2756 /* We can map lshifts by constants to MULT_EXPR handling. */
2757 else if (code == LSHIFT_EXPR
2758 && range_int_cst_singleton_p (&vr1))
2759 {
2760 bool saved_flag_wrapv;
2761 value_range_t vr1p = VR_INITIALIZER;
2762 vr1p.type = VR_RANGE;
2763 vr1p.min = (wide_int_to_tree
2764 (expr_type,
2765 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2766 TYPE_PRECISION (expr_type))));
2767 vr1p.max = vr1p.min;
2768 /* We have to use a wrapping multiply though as signed overflow
2769 on lshifts is implementation defined in C89. */
2770 saved_flag_wrapv = flag_wrapv;
2771 flag_wrapv = 1;
2772 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2773 &vr0, &vr1p);
2774 flag_wrapv = saved_flag_wrapv;
2775 return;
2776 }
2777 else if (code == LSHIFT_EXPR
2778 && range_int_cst_p (&vr0))
2779 {
2780 int prec = TYPE_PRECISION (expr_type);
2781 int overflow_pos = prec;
2782 int bound_shift;
2783 wide_int bound, complement, low_bound, high_bound;
2784 bool uns = TYPE_UNSIGNED (expr_type);
2785 bool in_bounds = false;
2786
2787 if (!uns)
2788 overflow_pos -= 1;
2789
2790 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2791 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2792 overflow. However, for that to happen, vr1.max needs to be
2793 zero, which means vr1 is a singleton range of zero, which
2794 means it should be handled by the previous LSHIFT_EXPR
2795 if-clause. */
2796 bound = wi::set_bit_in_zero (bound_shift, prec);
2797 complement = ~(bound - 1);
2798
2799 if (uns)
2800 {
2801 low_bound = bound;
2802 high_bound = complement;
2803 if (wi::ltu_p (vr0.max, low_bound))
2804 {
2805 /* [5, 6] << [1, 2] == [10, 24]. */
2806 /* We're shifting out only zeroes, the value increases
2807 monotonically. */
2808 in_bounds = true;
2809 }
2810 else if (wi::ltu_p (high_bound, vr0.min))
2811 {
2812 /* [0xffffff00, 0xffffffff] << [1, 2]
2813 == [0xfffffc00, 0xfffffffe]. */
2814 /* We're shifting out only ones, the value decreases
2815 monotonically. */
2816 in_bounds = true;
2817 }
2818 }
2819 else
2820 {
2821 /* [-1, 1] << [1, 2] == [-4, 4]. */
2822 low_bound = complement;
2823 high_bound = bound;
2824 if (wi::lts_p (vr0.max, high_bound)
2825 && wi::lts_p (low_bound, vr0.min))
2826 {
2827 /* For non-negative numbers, we're shifting out only
2828 zeroes, the value increases monotonically.
2829 For negative numbers, we're shifting out only ones, the
2830 value decreases monotomically. */
2831 in_bounds = true;
2832 }
2833 }
2834
2835 if (in_bounds)
2836 {
2837 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2838 return;
2839 }
2840 }
2841 }
2842 set_value_range_to_varying (vr);
2843 return;
2844 }
2845 else if (code == TRUNC_DIV_EXPR
2846 || code == FLOOR_DIV_EXPR
2847 || code == CEIL_DIV_EXPR
2848 || code == EXACT_DIV_EXPR
2849 || code == ROUND_DIV_EXPR)
2850 {
2851 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2852 {
2853 /* For division, if op1 has VR_RANGE but op0 does not, something
2854 can be deduced just from that range. Say [min, max] / [4, max]
2855 gives [min / 4, max / 4] range. */
2856 if (vr1.type == VR_RANGE
2857 && !symbolic_range_p (&vr1)
2858 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2859 {
2860 vr0.type = type = VR_RANGE;
2861 vr0.min = vrp_val_min (expr_type);
2862 vr0.max = vrp_val_max (expr_type);
2863 }
2864 else
2865 {
2866 set_value_range_to_varying (vr);
2867 return;
2868 }
2869 }
2870
2871 /* For divisions, if flag_non_call_exceptions is true, we must
2872 not eliminate a division by zero. */
2873 if (cfun->can_throw_non_call_exceptions
2874 && (vr1.type != VR_RANGE
2875 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2876 {
2877 set_value_range_to_varying (vr);
2878 return;
2879 }
2880
2881 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2882 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2883 include 0. */
2884 if (vr0.type == VR_RANGE
2885 && (vr1.type != VR_RANGE
2886 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2887 {
2888 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2889 int cmp;
2890
2891 min = NULL_TREE;
2892 max = NULL_TREE;
2893 if (TYPE_UNSIGNED (expr_type)
2894 || value_range_nonnegative_p (&vr1))
2895 {
2896 /* For unsigned division or when divisor is known
2897 to be non-negative, the range has to cover
2898 all numbers from 0 to max for positive max
2899 and all numbers from min to 0 for negative min. */
2900 cmp = compare_values (vr0.max, zero);
2901 if (cmp == -1)
2902 max = zero;
2903 else if (cmp == 0 || cmp == 1)
2904 max = vr0.max;
2905 else
2906 type = VR_VARYING;
2907 cmp = compare_values (vr0.min, zero);
2908 if (cmp == 1)
2909 min = zero;
2910 else if (cmp == 0 || cmp == -1)
2911 min = vr0.min;
2912 else
2913 type = VR_VARYING;
2914 }
2915 else
2916 {
2917 /* Otherwise the range is -max .. max or min .. -min
2918 depending on which bound is bigger in absolute value,
2919 as the division can change the sign. */
2920 abs_extent_range (vr, vr0.min, vr0.max);
2921 return;
2922 }
2923 if (type == VR_VARYING)
2924 {
2925 set_value_range_to_varying (vr);
2926 return;
2927 }
2928 }
2929 else
2930 {
2931 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2932 return;
2933 }
2934 }
2935 else if (code == TRUNC_MOD_EXPR)
2936 {
2937 if (vr1.type != VR_RANGE
2938 || range_includes_zero_p (vr1.min, vr1.max) != 0
2939 || vrp_val_is_min (vr1.min))
2940 {
2941 set_value_range_to_varying (vr);
2942 return;
2943 }
2944 type = VR_RANGE;
2945 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2946 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2947 if (tree_int_cst_lt (max, vr1.max))
2948 max = vr1.max;
2949 max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1));
2950 /* If the dividend is non-negative the modulus will be
2951 non-negative as well. */
2952 if (TYPE_UNSIGNED (expr_type)
2953 || value_range_nonnegative_p (&vr0))
2954 min = build_int_cst (TREE_TYPE (max), 0);
2955 else
2956 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2957 }
2958 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2959 {
2960 bool int_cst_range0, int_cst_range1;
2961 wide_int may_be_nonzero0, may_be_nonzero1;
2962 wide_int must_be_nonzero0, must_be_nonzero1;
2963
2964 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, &may_be_nonzero0,
2965 &must_be_nonzero0);
2966 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, &may_be_nonzero1,
2967 &must_be_nonzero1);
2968
2969 type = VR_RANGE;
2970 if (code == BIT_AND_EXPR)
2971 {
2972 wide_int wmax;
2973 min = wide_int_to_tree (expr_type,
2974 must_be_nonzero0 & must_be_nonzero1);
2975 wmax = may_be_nonzero0 & may_be_nonzero1;
2976 /* If both input ranges contain only negative values we can
2977 truncate the result range maximum to the minimum of the
2978 input range maxima. */
2979 if (int_cst_range0 && int_cst_range1
2980 && tree_int_cst_sgn (vr0.max) < 0
2981 && tree_int_cst_sgn (vr1.max) < 0)
2982 {
2983 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
2984 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
2985 }
2986 /* If either input range contains only non-negative values
2987 we can truncate the result range maximum to the respective
2988 maximum of the input range. */
2989 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2990 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
2991 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2992 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
2993 max = wide_int_to_tree (expr_type, wmax);
2994 }
2995 else if (code == BIT_IOR_EXPR)
2996 {
2997 wide_int wmin;
2998 max = wide_int_to_tree (expr_type,
2999 may_be_nonzero0 | may_be_nonzero1);
3000 wmin = must_be_nonzero0 | must_be_nonzero1;
3001 /* If the input ranges contain only positive values we can
3002 truncate the minimum of the result range to the maximum
3003 of the input range minima. */
3004 if (int_cst_range0 && int_cst_range1
3005 && tree_int_cst_sgn (vr0.min) >= 0
3006 && tree_int_cst_sgn (vr1.min) >= 0)
3007 {
3008 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3009 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3010 }
3011 /* If either input range contains only negative values
3012 we can truncate the minimum of the result range to the
3013 respective minimum range. */
3014 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3015 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3016 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3017 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3018 min = wide_int_to_tree (expr_type, wmin);
3019 }
3020 else if (code == BIT_XOR_EXPR)
3021 {
3022 wide_int result_zero_bits, result_one_bits;
3023 result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
3024 | ~(may_be_nonzero0 | may_be_nonzero1);
3025 result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
3026 | must_be_nonzero1.and_not (may_be_nonzero0);
3027 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3028 min = wide_int_to_tree (expr_type, result_one_bits);
3029 /* If the range has all positive or all negative values the
3030 result is better than VARYING. */
3031 if (tree_int_cst_sgn (min) < 0
3032 || tree_int_cst_sgn (max) >= 0)
3033 ;
3034 else
3035 max = min = NULL_TREE;
3036 }
3037 }
3038 else
3039 gcc_unreachable ();
3040
3041 /* If either MIN or MAX overflowed, then set the resulting range to
3042 VARYING. But we do accept an overflow infinity
3043 representation. */
3044 if (min == NULL_TREE
3045 || !is_gimple_min_invariant (min)
3046 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
3047 || max == NULL_TREE
3048 || !is_gimple_min_invariant (max)
3049 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3050 {
3051 set_value_range_to_varying (vr);
3052 return;
3053 }
3054
3055 /* We punt if:
3056 1) [-INF, +INF]
3057 2) [-INF, +-INF(OVF)]
3058 3) [+-INF(OVF), +INF]
3059 4) [+-INF(OVF), +-INF(OVF)]
3060 We learn nothing when we have INF and INF(OVF) on both sides.
3061 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3062 overflow. */
3063 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3064 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3065 {
3066 set_value_range_to_varying (vr);
3067 return;
3068 }
3069
3070 cmp = compare_values (min, max);
3071 if (cmp == -2 || cmp == 1)
3072 {
3073 /* If the new range has its limits swapped around (MIN > MAX),
3074 then the operation caused one of them to wrap around, mark
3075 the new range VARYING. */
3076 set_value_range_to_varying (vr);
3077 }
3078 else
3079 set_value_range (vr, type, min, max, NULL);
3080 }
3081
3082 /* Extract range information from a binary expression OP0 CODE OP1 based on
3083 the ranges of each of its operands with resulting type EXPR_TYPE.
3084 The resulting range is stored in *VR. */
3085
3086 static void
3087 extract_range_from_binary_expr (value_range_t *vr,
3088 enum tree_code code,
3089 tree expr_type, tree op0, tree op1)
3090 {
3091 value_range_t vr0 = VR_INITIALIZER;
3092 value_range_t vr1 = VR_INITIALIZER;
3093
3094 /* Get value ranges for each operand. For constant operands, create
3095 a new value range with the operand to simplify processing. */
3096 if (TREE_CODE (op0) == SSA_NAME)
3097 vr0 = *(get_value_range (op0));
3098 else if (is_gimple_min_invariant (op0))
3099 set_value_range_to_value (&vr0, op0, NULL);
3100 else
3101 set_value_range_to_varying (&vr0);
3102
3103 if (TREE_CODE (op1) == SSA_NAME)
3104 vr1 = *(get_value_range (op1));
3105 else if (is_gimple_min_invariant (op1))
3106 set_value_range_to_value (&vr1, op1, NULL);
3107 else
3108 set_value_range_to_varying (&vr1);
3109
3110 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3111 }
3112
3113 /* Extract range information from a unary operation CODE based on
3114 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3115 The The resulting range is stored in *VR. */
3116
3117 static void
3118 extract_range_from_unary_expr_1 (value_range_t *vr,
3119 enum tree_code code, tree type,
3120 value_range_t *vr0_, tree op0_type)
3121 {
3122 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3123
3124 /* VRP only operates on integral and pointer types. */
3125 if (!(INTEGRAL_TYPE_P (op0_type)
3126 || POINTER_TYPE_P (op0_type))
3127 || !(INTEGRAL_TYPE_P (type)
3128 || POINTER_TYPE_P (type)))
3129 {
3130 set_value_range_to_varying (vr);
3131 return;
3132 }
3133
3134 /* If VR0 is UNDEFINED, so is the result. */
3135 if (vr0.type == VR_UNDEFINED)
3136 {
3137 set_value_range_to_undefined (vr);
3138 return;
3139 }
3140
3141 /* Handle operations that we express in terms of others. */
3142 if (code == PAREN_EXPR)
3143 {
3144 /* PAREN_EXPR is a simple copy. */
3145 copy_value_range (vr, &vr0);
3146 return;
3147 }
3148 else if (code == NEGATE_EXPR)
3149 {
3150 /* -X is simply 0 - X, so re-use existing code that also handles
3151 anti-ranges fine. */
3152 value_range_t zero = VR_INITIALIZER;
3153 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3154 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3155 return;
3156 }
3157 else if (code == BIT_NOT_EXPR)
3158 {
3159 /* ~X is simply -1 - X, so re-use existing code that also handles
3160 anti-ranges fine. */
3161 value_range_t minusone = VR_INITIALIZER;
3162 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3163 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3164 type, &minusone, &vr0);
3165 return;
3166 }
3167
3168 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3169 and express op ~[] as (op []') U (op []''). */
3170 if (vr0.type == VR_ANTI_RANGE
3171 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3172 {
3173 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3174 if (vrtem1.type != VR_UNDEFINED)
3175 {
3176 value_range_t vrres = VR_INITIALIZER;
3177 extract_range_from_unary_expr_1 (&vrres, code, type,
3178 &vrtem1, op0_type);
3179 vrp_meet (vr, &vrres);
3180 }
3181 return;
3182 }
3183
3184 if (CONVERT_EXPR_CODE_P (code))
3185 {
3186 tree inner_type = op0_type;
3187 tree outer_type = type;
3188
3189 /* If the expression evaluates to a pointer, we are only interested in
3190 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3191 if (POINTER_TYPE_P (type))
3192 {
3193 if (range_is_nonnull (&vr0))
3194 set_value_range_to_nonnull (vr, type);
3195 else if (range_is_null (&vr0))
3196 set_value_range_to_null (vr, type);
3197 else
3198 set_value_range_to_varying (vr);
3199 return;
3200 }
3201
3202 /* If VR0 is varying and we increase the type precision, assume
3203 a full range for the following transformation. */
3204 if (vr0.type == VR_VARYING
3205 && INTEGRAL_TYPE_P (inner_type)
3206 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3207 {
3208 vr0.type = VR_RANGE;
3209 vr0.min = TYPE_MIN_VALUE (inner_type);
3210 vr0.max = TYPE_MAX_VALUE (inner_type);
3211 }
3212
3213 /* If VR0 is a constant range or anti-range and the conversion is
3214 not truncating we can convert the min and max values and
3215 canonicalize the resulting range. Otherwise we can do the
3216 conversion if the size of the range is less than what the
3217 precision of the target type can represent and the range is
3218 not an anti-range. */
3219 if ((vr0.type == VR_RANGE
3220 || vr0.type == VR_ANTI_RANGE)
3221 && TREE_CODE (vr0.min) == INTEGER_CST
3222 && TREE_CODE (vr0.max) == INTEGER_CST
3223 && (!is_overflow_infinity (vr0.min)
3224 || (vr0.type == VR_RANGE
3225 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3226 && needs_overflow_infinity (outer_type)
3227 && supports_overflow_infinity (outer_type)))
3228 && (!is_overflow_infinity (vr0.max)
3229 || (vr0.type == VR_RANGE
3230 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3231 && needs_overflow_infinity (outer_type)
3232 && supports_overflow_infinity (outer_type)))
3233 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3234 || (vr0.type == VR_RANGE
3235 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3236 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3237 size_int (TYPE_PRECISION (outer_type)))))))
3238 {
3239 tree new_min, new_max;
3240 if (is_overflow_infinity (vr0.min))
3241 new_min = negative_overflow_infinity (outer_type);
3242 else
3243 new_min = force_fit_type (outer_type,
3244 wide_int::from
3245 (vr0.min,
3246 TYPE_PRECISION (outer_type),
3247 TYPE_SIGN (TREE_TYPE (vr0.min))),
3248 0, false);
3249 if (is_overflow_infinity (vr0.max))
3250 new_max = positive_overflow_infinity (outer_type);
3251 else
3252 new_max = force_fit_type (outer_type,
3253 wide_int::from
3254 (vr0.max,
3255 TYPE_PRECISION (outer_type),
3256 TYPE_SIGN (TREE_TYPE (vr0.max))),
3257 0, false);
3258 set_and_canonicalize_value_range (vr, vr0.type,
3259 new_min, new_max, NULL);
3260 return;
3261 }
3262
3263 set_value_range_to_varying (vr);
3264 return;
3265 }
3266 else if (code == ABS_EXPR)
3267 {
3268 tree min, max;
3269 int cmp;
3270
3271 /* Pass through vr0 in the easy cases. */
3272 if (TYPE_UNSIGNED (type)
3273 || value_range_nonnegative_p (&vr0))
3274 {
3275 copy_value_range (vr, &vr0);
3276 return;
3277 }
3278
3279 /* For the remaining varying or symbolic ranges we can't do anything
3280 useful. */
3281 if (vr0.type == VR_VARYING
3282 || symbolic_range_p (&vr0))
3283 {
3284 set_value_range_to_varying (vr);
3285 return;
3286 }
3287
3288 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3289 useful range. */
3290 if (!TYPE_OVERFLOW_UNDEFINED (type)
3291 && ((vr0.type == VR_RANGE
3292 && vrp_val_is_min (vr0.min))
3293 || (vr0.type == VR_ANTI_RANGE
3294 && !vrp_val_is_min (vr0.min))))
3295 {
3296 set_value_range_to_varying (vr);
3297 return;
3298 }
3299
3300 /* ABS_EXPR may flip the range around, if the original range
3301 included negative values. */
3302 if (is_overflow_infinity (vr0.min))
3303 min = positive_overflow_infinity (type);
3304 else if (!vrp_val_is_min (vr0.min))
3305 min = fold_unary_to_constant (code, type, vr0.min);
3306 else if (!needs_overflow_infinity (type))
3307 min = TYPE_MAX_VALUE (type);
3308 else if (supports_overflow_infinity (type))
3309 min = positive_overflow_infinity (type);
3310 else
3311 {
3312 set_value_range_to_varying (vr);
3313 return;
3314 }
3315
3316 if (is_overflow_infinity (vr0.max))
3317 max = positive_overflow_infinity (type);
3318 else if (!vrp_val_is_min (vr0.max))
3319 max = fold_unary_to_constant (code, type, vr0.max);
3320 else if (!needs_overflow_infinity (type))
3321 max = TYPE_MAX_VALUE (type);
3322 else if (supports_overflow_infinity (type)
3323 /* We shouldn't generate [+INF, +INF] as set_value_range
3324 doesn't like this and ICEs. */
3325 && !is_positive_overflow_infinity (min))
3326 max = positive_overflow_infinity (type);
3327 else
3328 {
3329 set_value_range_to_varying (vr);
3330 return;
3331 }
3332
3333 cmp = compare_values (min, max);
3334
3335 /* If a VR_ANTI_RANGEs contains zero, then we have
3336 ~[-INF, min(MIN, MAX)]. */
3337 if (vr0.type == VR_ANTI_RANGE)
3338 {
3339 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3340 {
3341 /* Take the lower of the two values. */
3342 if (cmp != 1)
3343 max = min;
3344
3345 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3346 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3347 flag_wrapv is set and the original anti-range doesn't include
3348 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3349 if (TYPE_OVERFLOW_WRAPS (type))
3350 {
3351 tree type_min_value = TYPE_MIN_VALUE (type);
3352
3353 min = (vr0.min != type_min_value
3354 ? int_const_binop (PLUS_EXPR, type_min_value,
3355 build_int_cst (TREE_TYPE (type_min_value), 1))
3356 : type_min_value);
3357 }
3358 else
3359 {
3360 if (overflow_infinity_range_p (&vr0))
3361 min = negative_overflow_infinity (type);
3362 else
3363 min = TYPE_MIN_VALUE (type);
3364 }
3365 }
3366 else
3367 {
3368 /* All else has failed, so create the range [0, INF], even for
3369 flag_wrapv since TYPE_MIN_VALUE is in the original
3370 anti-range. */
3371 vr0.type = VR_RANGE;
3372 min = build_int_cst (type, 0);
3373 if (needs_overflow_infinity (type))
3374 {
3375 if (supports_overflow_infinity (type))
3376 max = positive_overflow_infinity (type);
3377 else
3378 {
3379 set_value_range_to_varying (vr);
3380 return;
3381 }
3382 }
3383 else
3384 max = TYPE_MAX_VALUE (type);
3385 }
3386 }
3387
3388 /* If the range contains zero then we know that the minimum value in the
3389 range will be zero. */
3390 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3391 {
3392 if (cmp == 1)
3393 max = min;
3394 min = build_int_cst (type, 0);
3395 }
3396 else
3397 {
3398 /* If the range was reversed, swap MIN and MAX. */
3399 if (cmp == 1)
3400 {
3401 tree t = min;
3402 min = max;
3403 max = t;
3404 }
3405 }
3406
3407 cmp = compare_values (min, max);
3408 if (cmp == -2 || cmp == 1)
3409 {
3410 /* If the new range has its limits swapped around (MIN > MAX),
3411 then the operation caused one of them to wrap around, mark
3412 the new range VARYING. */
3413 set_value_range_to_varying (vr);
3414 }
3415 else
3416 set_value_range (vr, vr0.type, min, max, NULL);
3417 return;
3418 }
3419
3420 /* For unhandled operations fall back to varying. */
3421 set_value_range_to_varying (vr);
3422 return;
3423 }
3424
3425
3426 /* Extract range information from a unary expression CODE OP0 based on
3427 the range of its operand with resulting type TYPE.
3428 The resulting range is stored in *VR. */
3429
3430 static void
3431 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3432 tree type, tree op0)
3433 {
3434 value_range_t vr0 = VR_INITIALIZER;
3435
3436 /* Get value ranges for the operand. For constant operands, create
3437 a new value range with the operand to simplify processing. */
3438 if (TREE_CODE (op0) == SSA_NAME)
3439 vr0 = *(get_value_range (op0));
3440 else if (is_gimple_min_invariant (op0))
3441 set_value_range_to_value (&vr0, op0, NULL);
3442 else
3443 set_value_range_to_varying (&vr0);
3444
3445 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3446 }
3447
3448
3449 /* Extract range information from a conditional expression STMT based on
3450 the ranges of each of its operands and the expression code. */
3451
3452 static void
3453 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3454 {
3455 tree op0, op1;
3456 value_range_t vr0 = VR_INITIALIZER;
3457 value_range_t vr1 = VR_INITIALIZER;
3458
3459 /* Get value ranges for each operand. For constant operands, create
3460 a new value range with the operand to simplify processing. */
3461 op0 = gimple_assign_rhs2 (stmt);
3462 if (TREE_CODE (op0) == SSA_NAME)
3463 vr0 = *(get_value_range (op0));
3464 else if (is_gimple_min_invariant (op0))
3465 set_value_range_to_value (&vr0, op0, NULL);
3466 else
3467 set_value_range_to_varying (&vr0);
3468
3469 op1 = gimple_assign_rhs3 (stmt);
3470 if (TREE_CODE (op1) == SSA_NAME)
3471 vr1 = *(get_value_range (op1));
3472 else if (is_gimple_min_invariant (op1))
3473 set_value_range_to_value (&vr1, op1, NULL);
3474 else
3475 set_value_range_to_varying (&vr1);
3476
3477 /* The resulting value range is the union of the operand ranges */
3478 copy_value_range (vr, &vr0);
3479 vrp_meet (vr, &vr1);
3480 }
3481
3482
3483 /* Extract range information from a comparison expression EXPR based
3484 on the range of its operand and the expression code. */
3485
3486 static void
3487 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3488 tree type, tree op0, tree op1)
3489 {
3490 bool sop = false;
3491 tree val;
3492
3493 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3494 NULL);
3495
3496 /* A disadvantage of using a special infinity as an overflow
3497 representation is that we lose the ability to record overflow
3498 when we don't have an infinity. So we have to ignore a result
3499 which relies on overflow. */
3500
3501 if (val && !is_overflow_infinity (val) && !sop)
3502 {
3503 /* Since this expression was found on the RHS of an assignment,
3504 its type may be different from _Bool. Convert VAL to EXPR's
3505 type. */
3506 val = fold_convert (type, val);
3507 if (is_gimple_min_invariant (val))
3508 set_value_range_to_value (vr, val, vr->equiv);
3509 else
3510 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3511 }
3512 else
3513 /* The result of a comparison is always true or false. */
3514 set_value_range_to_truthvalue (vr, type);
3515 }
3516
3517 /* Try to derive a nonnegative or nonzero range out of STMT relying
3518 primarily on generic routines in fold in conjunction with range data.
3519 Store the result in *VR */
3520
3521 static void
3522 extract_range_basic (value_range_t *vr, gimple stmt)
3523 {
3524 bool sop = false;
3525 tree type = gimple_expr_type (stmt);
3526
3527 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3528 {
3529 tree fndecl = gimple_call_fndecl (stmt), arg;
3530 int mini, maxi, zerov = 0, prec;
3531
3532 switch (DECL_FUNCTION_CODE (fndecl))
3533 {
3534 case BUILT_IN_CONSTANT_P:
3535 /* If the call is __builtin_constant_p and the argument is a
3536 function parameter resolve it to false. This avoids bogus
3537 array bound warnings.
3538 ??? We could do this as early as inlining is finished. */
3539 arg = gimple_call_arg (stmt, 0);
3540 if (TREE_CODE (arg) == SSA_NAME
3541 && SSA_NAME_IS_DEFAULT_DEF (arg)
3542 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3543 {
3544 set_value_range_to_null (vr, type);
3545 return;
3546 }
3547 break;
3548 /* Both __builtin_ffs* and __builtin_popcount return
3549 [0, prec]. */
3550 CASE_INT_FN (BUILT_IN_FFS):
3551 CASE_INT_FN (BUILT_IN_POPCOUNT):
3552 arg = gimple_call_arg (stmt, 0);
3553 prec = TYPE_PRECISION (TREE_TYPE (arg));
3554 mini = 0;
3555 maxi = prec;
3556 if (TREE_CODE (arg) == SSA_NAME)
3557 {
3558 value_range_t *vr0 = get_value_range (arg);
3559 /* If arg is non-zero, then ffs or popcount
3560 are non-zero. */
3561 if (((vr0->type == VR_RANGE
3562 && integer_nonzerop (vr0->min))
3563 || (vr0->type == VR_ANTI_RANGE
3564 && integer_zerop (vr0->min)))
3565 && !TREE_OVERFLOW (vr0->min))
3566 mini = 1;
3567 /* If some high bits are known to be zero,
3568 we can decrease the maximum. */
3569 if (vr0->type == VR_RANGE
3570 && TREE_CODE (vr0->max) == INTEGER_CST
3571 && !TREE_OVERFLOW (vr0->max))
3572 maxi = tree_floor_log2 (vr0->max) + 1;
3573 }
3574 goto bitop_builtin;
3575 /* __builtin_parity* returns [0, 1]. */
3576 CASE_INT_FN (BUILT_IN_PARITY):
3577 mini = 0;
3578 maxi = 1;
3579 goto bitop_builtin;
3580 /* __builtin_c[lt]z* return [0, prec-1], except for
3581 when the argument is 0, but that is undefined behavior.
3582 On many targets where the CLZ RTL or optab value is defined
3583 for 0 the value is prec, so include that in the range
3584 by default. */
3585 CASE_INT_FN (BUILT_IN_CLZ):
3586 arg = gimple_call_arg (stmt, 0);
3587 prec = TYPE_PRECISION (TREE_TYPE (arg));
3588 mini = 0;
3589 maxi = prec;
3590 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3591 != CODE_FOR_nothing
3592 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3593 zerov)
3594 /* Handle only the single common value. */
3595 && zerov != prec)
3596 /* Magic value to give up, unless vr0 proves
3597 arg is non-zero. */
3598 mini = -2;
3599 if (TREE_CODE (arg) == SSA_NAME)
3600 {
3601 value_range_t *vr0 = get_value_range (arg);
3602 /* From clz of VR_RANGE minimum we can compute
3603 result maximum. */
3604 if (vr0->type == VR_RANGE
3605 && TREE_CODE (vr0->min) == INTEGER_CST
3606 && !TREE_OVERFLOW (vr0->min))
3607 {
3608 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3609 if (maxi != prec)
3610 mini = 0;
3611 }
3612 else if (vr0->type == VR_ANTI_RANGE
3613 && integer_zerop (vr0->min)
3614 && !TREE_OVERFLOW (vr0->min))
3615 {
3616 maxi = prec - 1;
3617 mini = 0;
3618 }
3619 if (mini == -2)
3620 break;
3621 /* From clz of VR_RANGE maximum we can compute
3622 result minimum. */
3623 if (vr0->type == VR_RANGE
3624 && TREE_CODE (vr0->max) == INTEGER_CST
3625 && !TREE_OVERFLOW (vr0->max))
3626 {
3627 mini = prec - 1 - tree_floor_log2 (vr0->max);
3628 if (mini == prec)
3629 break;
3630 }
3631 }
3632 if (mini == -2)
3633 break;
3634 goto bitop_builtin;
3635 /* __builtin_ctz* return [0, prec-1], except for
3636 when the argument is 0, but that is undefined behavior.
3637 If there is a ctz optab for this mode and
3638 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3639 otherwise just assume 0 won't be seen. */
3640 CASE_INT_FN (BUILT_IN_CTZ):
3641 arg = gimple_call_arg (stmt, 0);
3642 prec = TYPE_PRECISION (TREE_TYPE (arg));
3643 mini = 0;
3644 maxi = prec - 1;
3645 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3646 != CODE_FOR_nothing
3647 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3648 zerov))
3649 {
3650 /* Handle only the two common values. */
3651 if (zerov == -1)
3652 mini = -1;
3653 else if (zerov == prec)
3654 maxi = prec;
3655 else
3656 /* Magic value to give up, unless vr0 proves
3657 arg is non-zero. */
3658 mini = -2;
3659 }
3660 if (TREE_CODE (arg) == SSA_NAME)
3661 {
3662 value_range_t *vr0 = get_value_range (arg);
3663 /* If arg is non-zero, then use [0, prec - 1]. */
3664 if (((vr0->type == VR_RANGE
3665 && integer_nonzerop (vr0->min))
3666 || (vr0->type == VR_ANTI_RANGE
3667 && integer_zerop (vr0->min)))
3668 && !TREE_OVERFLOW (vr0->min))
3669 {
3670 mini = 0;
3671 maxi = prec - 1;
3672 }
3673 /* If some high bits are known to be zero,
3674 we can decrease the result maximum. */
3675 if (vr0->type == VR_RANGE
3676 && TREE_CODE (vr0->max) == INTEGER_CST
3677 && !TREE_OVERFLOW (vr0->max))
3678 {
3679 maxi = tree_floor_log2 (vr0->max);
3680 /* For vr0 [0, 0] give up. */
3681 if (maxi == -1)
3682 break;
3683 }
3684 }
3685 if (mini == -2)
3686 break;
3687 goto bitop_builtin;
3688 /* __builtin_clrsb* returns [0, prec-1]. */
3689 CASE_INT_FN (BUILT_IN_CLRSB):
3690 arg = gimple_call_arg (stmt, 0);
3691 prec = TYPE_PRECISION (TREE_TYPE (arg));
3692 mini = 0;
3693 maxi = prec - 1;
3694 goto bitop_builtin;
3695 bitop_builtin:
3696 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3697 build_int_cst (type, maxi), NULL);
3698 return;
3699 default:
3700 break;
3701 }
3702 }
3703 if (INTEGRAL_TYPE_P (type)
3704 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3705 set_value_range_to_nonnegative (vr, type,
3706 sop || stmt_overflow_infinity (stmt));
3707 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3708 && !sop)
3709 set_value_range_to_nonnull (vr, type);
3710 else
3711 set_value_range_to_varying (vr);
3712 }
3713
3714
3715 /* Try to compute a useful range out of assignment STMT and store it
3716 in *VR. */
3717
3718 static void
3719 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3720 {
3721 enum tree_code code = gimple_assign_rhs_code (stmt);
3722
3723 if (code == ASSERT_EXPR)
3724 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3725 else if (code == SSA_NAME)
3726 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3727 else if (TREE_CODE_CLASS (code) == tcc_binary)
3728 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3729 gimple_expr_type (stmt),
3730 gimple_assign_rhs1 (stmt),
3731 gimple_assign_rhs2 (stmt));
3732 else if (TREE_CODE_CLASS (code) == tcc_unary)
3733 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3734 gimple_expr_type (stmt),
3735 gimple_assign_rhs1 (stmt));
3736 else if (code == COND_EXPR)
3737 extract_range_from_cond_expr (vr, stmt);
3738 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3739 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3740 gimple_expr_type (stmt),
3741 gimple_assign_rhs1 (stmt),
3742 gimple_assign_rhs2 (stmt));
3743 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3744 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3745 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3746 else
3747 set_value_range_to_varying (vr);
3748
3749 if (vr->type == VR_VARYING)
3750 extract_range_basic (vr, stmt);
3751 }
3752
3753 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3754 would be profitable to adjust VR using scalar evolution information
3755 for VAR. If so, update VR with the new limits. */
3756
3757 static void
3758 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3759 gimple stmt, tree var)
3760 {
3761 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3762 enum ev_direction dir;
3763
3764 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3765 better opportunities than a regular range, but I'm not sure. */
3766 if (vr->type == VR_ANTI_RANGE)
3767 return;
3768
3769 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3770
3771 /* Like in PR19590, scev can return a constant function. */
3772 if (is_gimple_min_invariant (chrec))
3773 {
3774 set_value_range_to_value (vr, chrec, vr->equiv);
3775 return;
3776 }
3777
3778 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3779 return;
3780
3781 init = initial_condition_in_loop_num (chrec, loop->num);
3782 tem = op_with_constant_singleton_value_range (init);
3783 if (tem)
3784 init = tem;
3785 step = evolution_part_in_loop_num (chrec, loop->num);
3786 tem = op_with_constant_singleton_value_range (step);
3787 if (tem)
3788 step = tem;
3789
3790 /* If STEP is symbolic, we can't know whether INIT will be the
3791 minimum or maximum value in the range. Also, unless INIT is
3792 a simple expression, compare_values and possibly other functions
3793 in tree-vrp won't be able to handle it. */
3794 if (step == NULL_TREE
3795 || !is_gimple_min_invariant (step)
3796 || !valid_value_p (init))
3797 return;
3798
3799 dir = scev_direction (chrec);
3800 if (/* Do not adjust ranges if we do not know whether the iv increases
3801 or decreases, ... */
3802 dir == EV_DIR_UNKNOWN
3803 /* ... or if it may wrap. */
3804 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3805 true))
3806 return;
3807
3808 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3809 negative_overflow_infinity and positive_overflow_infinity,
3810 because we have concluded that the loop probably does not
3811 wrap. */
3812
3813 type = TREE_TYPE (var);
3814 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3815 tmin = lower_bound_in_type (type, type);
3816 else
3817 tmin = TYPE_MIN_VALUE (type);
3818 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3819 tmax = upper_bound_in_type (type, type);
3820 else
3821 tmax = TYPE_MAX_VALUE (type);
3822
3823 /* Try to use estimated number of iterations for the loop to constrain the
3824 final value in the evolution. */
3825 if (TREE_CODE (step) == INTEGER_CST
3826 && is_gimple_val (init)
3827 && (TREE_CODE (init) != SSA_NAME
3828 || get_value_range (init)->type == VR_RANGE))
3829 {
3830 max_wide_int nit;
3831
3832 /* We are only entering here for loop header PHI nodes, so using
3833 the number of latch executions is the correct thing to use. */
3834 if (max_loop_iterations (loop, &nit))
3835 {
3836 value_range_t maxvr = VR_INITIALIZER;
3837 max_wide_int wtmp;
3838 signop sgn = TYPE_SIGN (TREE_TYPE (step));
3839 bool overflow;
3840
3841 wtmp = wi::mul (step, nit, sgn, &overflow);
3842 /* If the multiplication overflowed we can't do a meaningful
3843 adjustment. Likewise if the result doesn't fit in the type
3844 of the induction variable. For a signed type we have to
3845 check whether the result has the expected signedness which
3846 is that of the step as number of iterations is unsigned. */
3847 if (!overflow
3848 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
3849 && (sgn == UNSIGNED
3850 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
3851 {
3852 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
3853 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3854 TREE_TYPE (init), init, tem);
3855 /* Likewise if the addition did. */
3856 if (maxvr.type == VR_RANGE)
3857 {
3858 tmin = maxvr.min;
3859 tmax = maxvr.max;
3860 }
3861 }
3862 }
3863 }
3864
3865 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3866 {
3867 min = tmin;
3868 max = tmax;
3869
3870 /* For VARYING or UNDEFINED ranges, just about anything we get
3871 from scalar evolutions should be better. */
3872
3873 if (dir == EV_DIR_DECREASES)
3874 max = init;
3875 else
3876 min = init;
3877
3878 /* If we would create an invalid range, then just assume we
3879 know absolutely nothing. This may be over-conservative,
3880 but it's clearly safe, and should happen only in unreachable
3881 parts of code, or for invalid programs. */
3882 if (compare_values (min, max) == 1)
3883 return;
3884
3885 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3886 }
3887 else if (vr->type == VR_RANGE)
3888 {
3889 min = vr->min;
3890 max = vr->max;
3891
3892 if (dir == EV_DIR_DECREASES)
3893 {
3894 /* INIT is the maximum value. If INIT is lower than VR->MAX
3895 but no smaller than VR->MIN, set VR->MAX to INIT. */
3896 if (compare_values (init, max) == -1)
3897 max = init;
3898
3899 /* According to the loop information, the variable does not
3900 overflow. If we think it does, probably because of an
3901 overflow due to arithmetic on a different INF value,
3902 reset now. */
3903 if (is_negative_overflow_infinity (min)
3904 || compare_values (min, tmin) == -1)
3905 min = tmin;
3906
3907 }
3908 else
3909 {
3910 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3911 if (compare_values (init, min) == 1)
3912 min = init;
3913
3914 if (is_positive_overflow_infinity (max)
3915 || compare_values (tmax, max) == -1)
3916 max = tmax;
3917 }
3918
3919 /* If we just created an invalid range with the minimum
3920 greater than the maximum, we fail conservatively.
3921 This should happen only in unreachable
3922 parts of code, or for invalid programs. */
3923 if (compare_values (min, max) == 1)
3924 return;
3925
3926 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3927 }
3928 }
3929
3930 /* Return true if VAR may overflow at STMT. This checks any available
3931 loop information to see if we can determine that VAR does not
3932 overflow. */
3933
3934 static bool
3935 vrp_var_may_overflow (tree var, gimple stmt)
3936 {
3937 struct loop *l;
3938 tree chrec, init, step;
3939
3940 if (current_loops == NULL)
3941 return true;
3942
3943 l = loop_containing_stmt (stmt);
3944 if (l == NULL
3945 || !loop_outer (l))
3946 return true;
3947
3948 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3949 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3950 return true;
3951
3952 init = initial_condition_in_loop_num (chrec, l->num);
3953 step = evolution_part_in_loop_num (chrec, l->num);
3954
3955 if (step == NULL_TREE
3956 || !is_gimple_min_invariant (step)
3957 || !valid_value_p (init))
3958 return true;
3959
3960 /* If we get here, we know something useful about VAR based on the
3961 loop information. If it wraps, it may overflow. */
3962
3963 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3964 true))
3965 return true;
3966
3967 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3968 {
3969 print_generic_expr (dump_file, var, 0);
3970 fprintf (dump_file, ": loop information indicates does not overflow\n");
3971 }
3972
3973 return false;
3974 }
3975
3976
3977 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3978
3979 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3980 all the values in the ranges.
3981
3982 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3983
3984 - Return NULL_TREE if it is not always possible to determine the
3985 value of the comparison.
3986
3987 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3988 overflow infinity was used in the test. */
3989
3990
3991 static tree
3992 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3993 bool *strict_overflow_p)
3994 {
3995 /* VARYING or UNDEFINED ranges cannot be compared. */
3996 if (vr0->type == VR_VARYING
3997 || vr0->type == VR_UNDEFINED
3998 || vr1->type == VR_VARYING
3999 || vr1->type == VR_UNDEFINED)
4000 return NULL_TREE;
4001
4002 /* Anti-ranges need to be handled separately. */
4003 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4004 {
4005 /* If both are anti-ranges, then we cannot compute any
4006 comparison. */
4007 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4008 return NULL_TREE;
4009
4010 /* These comparisons are never statically computable. */
4011 if (comp == GT_EXPR
4012 || comp == GE_EXPR
4013 || comp == LT_EXPR
4014 || comp == LE_EXPR)
4015 return NULL_TREE;
4016
4017 /* Equality can be computed only between a range and an
4018 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4019 if (vr0->type == VR_RANGE)
4020 {
4021 /* To simplify processing, make VR0 the anti-range. */
4022 value_range_t *tmp = vr0;
4023 vr0 = vr1;
4024 vr1 = tmp;
4025 }
4026
4027 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4028
4029 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4030 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4031 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4032
4033 return NULL_TREE;
4034 }
4035
4036 if (!usable_range_p (vr0, strict_overflow_p)
4037 || !usable_range_p (vr1, strict_overflow_p))
4038 return NULL_TREE;
4039
4040 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4041 operands around and change the comparison code. */
4042 if (comp == GT_EXPR || comp == GE_EXPR)
4043 {
4044 value_range_t *tmp;
4045 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4046 tmp = vr0;
4047 vr0 = vr1;
4048 vr1 = tmp;
4049 }
4050
4051 if (comp == EQ_EXPR)
4052 {
4053 /* Equality may only be computed if both ranges represent
4054 exactly one value. */
4055 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4056 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4057 {
4058 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4059 strict_overflow_p);
4060 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4061 strict_overflow_p);
4062 if (cmp_min == 0 && cmp_max == 0)
4063 return boolean_true_node;
4064 else if (cmp_min != -2 && cmp_max != -2)
4065 return boolean_false_node;
4066 }
4067 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4068 else if (compare_values_warnv (vr0->min, vr1->max,
4069 strict_overflow_p) == 1
4070 || compare_values_warnv (vr1->min, vr0->max,
4071 strict_overflow_p) == 1)
4072 return boolean_false_node;
4073
4074 return NULL_TREE;
4075 }
4076 else if (comp == NE_EXPR)
4077 {
4078 int cmp1, cmp2;
4079
4080 /* If VR0 is completely to the left or completely to the right
4081 of VR1, they are always different. Notice that we need to
4082 make sure that both comparisons yield similar results to
4083 avoid comparing values that cannot be compared at
4084 compile-time. */
4085 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4086 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4087 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4088 return boolean_true_node;
4089
4090 /* If VR0 and VR1 represent a single value and are identical,
4091 return false. */
4092 else if (compare_values_warnv (vr0->min, vr0->max,
4093 strict_overflow_p) == 0
4094 && compare_values_warnv (vr1->min, vr1->max,
4095 strict_overflow_p) == 0
4096 && compare_values_warnv (vr0->min, vr1->min,
4097 strict_overflow_p) == 0
4098 && compare_values_warnv (vr0->max, vr1->max,
4099 strict_overflow_p) == 0)
4100 return boolean_false_node;
4101
4102 /* Otherwise, they may or may not be different. */
4103 else
4104 return NULL_TREE;
4105 }
4106 else if (comp == LT_EXPR || comp == LE_EXPR)
4107 {
4108 int tst;
4109
4110 /* If VR0 is to the left of VR1, return true. */
4111 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4112 if ((comp == LT_EXPR && tst == -1)
4113 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4114 {
4115 if (overflow_infinity_range_p (vr0)
4116 || overflow_infinity_range_p (vr1))
4117 *strict_overflow_p = true;
4118 return boolean_true_node;
4119 }
4120
4121 /* If VR0 is to the right of VR1, return false. */
4122 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4123 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4124 || (comp == LE_EXPR && tst == 1))
4125 {
4126 if (overflow_infinity_range_p (vr0)
4127 || overflow_infinity_range_p (vr1))
4128 *strict_overflow_p = true;
4129 return boolean_false_node;
4130 }
4131
4132 /* Otherwise, we don't know. */
4133 return NULL_TREE;
4134 }
4135
4136 gcc_unreachable ();
4137 }
4138
4139
4140 /* Given a value range VR, a value VAL and a comparison code COMP, return
4141 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4142 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4143 always returns false. Return NULL_TREE if it is not always
4144 possible to determine the value of the comparison. Also set
4145 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4146 infinity was used in the test. */
4147
4148 static tree
4149 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4150 bool *strict_overflow_p)
4151 {
4152 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4153 return NULL_TREE;
4154
4155 /* Anti-ranges need to be handled separately. */
4156 if (vr->type == VR_ANTI_RANGE)
4157 {
4158 /* For anti-ranges, the only predicates that we can compute at
4159 compile time are equality and inequality. */
4160 if (comp == GT_EXPR
4161 || comp == GE_EXPR
4162 || comp == LT_EXPR
4163 || comp == LE_EXPR)
4164 return NULL_TREE;
4165
4166 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4167 if (value_inside_range (val, vr->min, vr->max) == 1)
4168 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4169
4170 return NULL_TREE;
4171 }
4172
4173 if (!usable_range_p (vr, strict_overflow_p))
4174 return NULL_TREE;
4175
4176 if (comp == EQ_EXPR)
4177 {
4178 /* EQ_EXPR may only be computed if VR represents exactly
4179 one value. */
4180 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4181 {
4182 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4183 if (cmp == 0)
4184 return boolean_true_node;
4185 else if (cmp == -1 || cmp == 1 || cmp == 2)
4186 return boolean_false_node;
4187 }
4188 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4189 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4190 return boolean_false_node;
4191
4192 return NULL_TREE;
4193 }
4194 else if (comp == NE_EXPR)
4195 {
4196 /* If VAL is not inside VR, then they are always different. */
4197 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4198 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4199 return boolean_true_node;
4200
4201 /* If VR represents exactly one value equal to VAL, then return
4202 false. */
4203 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4204 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4205 return boolean_false_node;
4206
4207 /* Otherwise, they may or may not be different. */
4208 return NULL_TREE;
4209 }
4210 else if (comp == LT_EXPR || comp == LE_EXPR)
4211 {
4212 int tst;
4213
4214 /* If VR is to the left of VAL, return true. */
4215 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4216 if ((comp == LT_EXPR && tst == -1)
4217 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4218 {
4219 if (overflow_infinity_range_p (vr))
4220 *strict_overflow_p = true;
4221 return boolean_true_node;
4222 }
4223
4224 /* If VR is to the right of VAL, return false. */
4225 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4226 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4227 || (comp == LE_EXPR && tst == 1))
4228 {
4229 if (overflow_infinity_range_p (vr))
4230 *strict_overflow_p = true;
4231 return boolean_false_node;
4232 }
4233
4234 /* Otherwise, we don't know. */
4235 return NULL_TREE;
4236 }
4237 else if (comp == GT_EXPR || comp == GE_EXPR)
4238 {
4239 int tst;
4240
4241 /* If VR is to the right of VAL, return true. */
4242 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4243 if ((comp == GT_EXPR && tst == 1)
4244 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4245 {
4246 if (overflow_infinity_range_p (vr))
4247 *strict_overflow_p = true;
4248 return boolean_true_node;
4249 }
4250
4251 /* If VR is to the left of VAL, return false. */
4252 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4253 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4254 || (comp == GE_EXPR && tst == -1))
4255 {
4256 if (overflow_infinity_range_p (vr))
4257 *strict_overflow_p = true;
4258 return boolean_false_node;
4259 }
4260
4261 /* Otherwise, we don't know. */
4262 return NULL_TREE;
4263 }
4264
4265 gcc_unreachable ();
4266 }
4267
4268
4269 /* Debugging dumps. */
4270
4271 void dump_value_range (FILE *, value_range_t *);
4272 void debug_value_range (value_range_t *);
4273 void dump_all_value_ranges (FILE *);
4274 void debug_all_value_ranges (void);
4275 void dump_vr_equiv (FILE *, bitmap);
4276 void debug_vr_equiv (bitmap);
4277
4278
4279 /* Dump value range VR to FILE. */
4280
4281 void
4282 dump_value_range (FILE *file, value_range_t *vr)
4283 {
4284 if (vr == NULL)
4285 fprintf (file, "[]");
4286 else if (vr->type == VR_UNDEFINED)
4287 fprintf (file, "UNDEFINED");
4288 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4289 {
4290 tree type = TREE_TYPE (vr->min);
4291
4292 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4293
4294 if (is_negative_overflow_infinity (vr->min))
4295 fprintf (file, "-INF(OVF)");
4296 else if (INTEGRAL_TYPE_P (type)
4297 && !TYPE_UNSIGNED (type)
4298 && vrp_val_is_min (vr->min))
4299 fprintf (file, "-INF");
4300 else
4301 print_generic_expr (file, vr->min, 0);
4302
4303 fprintf (file, ", ");
4304
4305 if (is_positive_overflow_infinity (vr->max))
4306 fprintf (file, "+INF(OVF)");
4307 else if (INTEGRAL_TYPE_P (type)
4308 && vrp_val_is_max (vr->max))
4309 fprintf (file, "+INF");
4310 else
4311 print_generic_expr (file, vr->max, 0);
4312
4313 fprintf (file, "]");
4314
4315 if (vr->equiv)
4316 {
4317 bitmap_iterator bi;
4318 unsigned i, c = 0;
4319
4320 fprintf (file, " EQUIVALENCES: { ");
4321
4322 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4323 {
4324 print_generic_expr (file, ssa_name (i), 0);
4325 fprintf (file, " ");
4326 c++;
4327 }
4328
4329 fprintf (file, "} (%u elements)", c);
4330 }
4331 }
4332 else if (vr->type == VR_VARYING)
4333 fprintf (file, "VARYING");
4334 else
4335 fprintf (file, "INVALID RANGE");
4336 }
4337
4338
4339 /* Dump value range VR to stderr. */
4340
4341 DEBUG_FUNCTION void
4342 debug_value_range (value_range_t *vr)
4343 {
4344 dump_value_range (stderr, vr);
4345 fprintf (stderr, "\n");
4346 }
4347
4348
4349 /* Dump value ranges of all SSA_NAMEs to FILE. */
4350
4351 void
4352 dump_all_value_ranges (FILE *file)
4353 {
4354 size_t i;
4355
4356 for (i = 0; i < num_vr_values; i++)
4357 {
4358 if (vr_value[i])
4359 {
4360 print_generic_expr (file, ssa_name (i), 0);
4361 fprintf (file, ": ");
4362 dump_value_range (file, vr_value[i]);
4363 fprintf (file, "\n");
4364 }
4365 }
4366
4367 fprintf (file, "\n");
4368 }
4369
4370
4371 /* Dump all value ranges to stderr. */
4372
4373 DEBUG_FUNCTION void
4374 debug_all_value_ranges (void)
4375 {
4376 dump_all_value_ranges (stderr);
4377 }
4378
4379
4380 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4381 create a new SSA name N and return the assertion assignment
4382 'V = ASSERT_EXPR <V, V OP W>'. */
4383
4384 static gimple
4385 build_assert_expr_for (tree cond, tree v)
4386 {
4387 tree a;
4388 gimple assertion;
4389
4390 gcc_assert (TREE_CODE (v) == SSA_NAME
4391 && COMPARISON_CLASS_P (cond));
4392
4393 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4394 assertion = gimple_build_assign (NULL_TREE, a);
4395
4396 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4397 operand of the ASSERT_EXPR. Create it so the new name and the old one
4398 are registered in the replacement table so that we can fix the SSA web
4399 after adding all the ASSERT_EXPRs. */
4400 create_new_def_for (v, assertion, NULL);
4401
4402 return assertion;
4403 }
4404
4405
4406 /* Return false if EXPR is a predicate expression involving floating
4407 point values. */
4408
4409 static inline bool
4410 fp_predicate (gimple stmt)
4411 {
4412 GIMPLE_CHECK (stmt, GIMPLE_COND);
4413
4414 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4415 }
4416
4417
4418 /* If the range of values taken by OP can be inferred after STMT executes,
4419 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4420 describes the inferred range. Return true if a range could be
4421 inferred. */
4422
4423 static bool
4424 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4425 {
4426 *val_p = NULL_TREE;
4427 *comp_code_p = ERROR_MARK;
4428
4429 /* Do not attempt to infer anything in names that flow through
4430 abnormal edges. */
4431 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4432 return false;
4433
4434 /* Similarly, don't infer anything from statements that may throw
4435 exceptions. */
4436 if (stmt_could_throw_p (stmt))
4437 return false;
4438
4439 /* If STMT is the last statement of a basic block with no
4440 successors, there is no point inferring anything about any of its
4441 operands. We would not be able to find a proper insertion point
4442 for the assertion, anyway. */
4443 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4444 return false;
4445
4446 /* We can only assume that a pointer dereference will yield
4447 non-NULL if -fdelete-null-pointer-checks is enabled. */
4448 if (flag_delete_null_pointer_checks
4449 && POINTER_TYPE_P (TREE_TYPE (op))
4450 && gimple_code (stmt) != GIMPLE_ASM)
4451 {
4452 unsigned num_uses, num_loads, num_stores;
4453
4454 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4455 if (num_loads + num_stores > 0)
4456 {
4457 *val_p = build_int_cst (TREE_TYPE (op), 0);
4458 *comp_code_p = NE_EXPR;
4459 return true;
4460 }
4461 }
4462
4463 return false;
4464 }
4465
4466
4467 void dump_asserts_for (FILE *, tree);
4468 void debug_asserts_for (tree);
4469 void dump_all_asserts (FILE *);
4470 void debug_all_asserts (void);
4471
4472 /* Dump all the registered assertions for NAME to FILE. */
4473
4474 void
4475 dump_asserts_for (FILE *file, tree name)
4476 {
4477 assert_locus_t loc;
4478
4479 fprintf (file, "Assertions to be inserted for ");
4480 print_generic_expr (file, name, 0);
4481 fprintf (file, "\n");
4482
4483 loc = asserts_for[SSA_NAME_VERSION (name)];
4484 while (loc)
4485 {
4486 fprintf (file, "\t");
4487 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4488 fprintf (file, "\n\tBB #%d", loc->bb->index);
4489 if (loc->e)
4490 {
4491 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4492 loc->e->dest->index);
4493 dump_edge_info (file, loc->e, dump_flags, 0);
4494 }
4495 fprintf (file, "\n\tPREDICATE: ");
4496 print_generic_expr (file, name, 0);
4497 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4498 print_generic_expr (file, loc->val, 0);
4499 fprintf (file, "\n\n");
4500 loc = loc->next;
4501 }
4502
4503 fprintf (file, "\n");
4504 }
4505
4506
4507 /* Dump all the registered assertions for NAME to stderr. */
4508
4509 DEBUG_FUNCTION void
4510 debug_asserts_for (tree name)
4511 {
4512 dump_asserts_for (stderr, name);
4513 }
4514
4515
4516 /* Dump all the registered assertions for all the names to FILE. */
4517
4518 void
4519 dump_all_asserts (FILE *file)
4520 {
4521 unsigned i;
4522 bitmap_iterator bi;
4523
4524 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4525 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4526 dump_asserts_for (file, ssa_name (i));
4527 fprintf (file, "\n");
4528 }
4529
4530
4531 /* Dump all the registered assertions for all the names to stderr. */
4532
4533 DEBUG_FUNCTION void
4534 debug_all_asserts (void)
4535 {
4536 dump_all_asserts (stderr);
4537 }
4538
4539
4540 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4541 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4542 E->DEST, then register this location as a possible insertion point
4543 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4544
4545 BB, E and SI provide the exact insertion point for the new
4546 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4547 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4548 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4549 must not be NULL. */
4550
4551 static void
4552 register_new_assert_for (tree name, tree expr,
4553 enum tree_code comp_code,
4554 tree val,
4555 basic_block bb,
4556 edge e,
4557 gimple_stmt_iterator si)
4558 {
4559 assert_locus_t n, loc, last_loc;
4560 basic_block dest_bb;
4561
4562 gcc_checking_assert (bb == NULL || e == NULL);
4563
4564 if (e == NULL)
4565 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4566 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4567
4568 /* Never build an assert comparing against an integer constant with
4569 TREE_OVERFLOW set. This confuses our undefined overflow warning
4570 machinery. */
4571 if (TREE_CODE (val) == INTEGER_CST
4572 && TREE_OVERFLOW (val))
4573 val = wide_int_to_tree (TREE_TYPE (val), val);
4574
4575 /* The new assertion A will be inserted at BB or E. We need to
4576 determine if the new location is dominated by a previously
4577 registered location for A. If we are doing an edge insertion,
4578 assume that A will be inserted at E->DEST. Note that this is not
4579 necessarily true.
4580
4581 If E is a critical edge, it will be split. But even if E is
4582 split, the new block will dominate the same set of blocks that
4583 E->DEST dominates.
4584
4585 The reverse, however, is not true, blocks dominated by E->DEST
4586 will not be dominated by the new block created to split E. So,
4587 if the insertion location is on a critical edge, we will not use
4588 the new location to move another assertion previously registered
4589 at a block dominated by E->DEST. */
4590 dest_bb = (bb) ? bb : e->dest;
4591
4592 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4593 VAL at a block dominating DEST_BB, then we don't need to insert a new
4594 one. Similarly, if the same assertion already exists at a block
4595 dominated by DEST_BB and the new location is not on a critical
4596 edge, then update the existing location for the assertion (i.e.,
4597 move the assertion up in the dominance tree).
4598
4599 Note, this is implemented as a simple linked list because there
4600 should not be more than a handful of assertions registered per
4601 name. If this becomes a performance problem, a table hashed by
4602 COMP_CODE and VAL could be implemented. */
4603 loc = asserts_for[SSA_NAME_VERSION (name)];
4604 last_loc = loc;
4605 while (loc)
4606 {
4607 if (loc->comp_code == comp_code
4608 && (loc->val == val
4609 || operand_equal_p (loc->val, val, 0))
4610 && (loc->expr == expr
4611 || operand_equal_p (loc->expr, expr, 0)))
4612 {
4613 /* If E is not a critical edge and DEST_BB
4614 dominates the existing location for the assertion, move
4615 the assertion up in the dominance tree by updating its
4616 location information. */
4617 if ((e == NULL || !EDGE_CRITICAL_P (e))
4618 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4619 {
4620 loc->bb = dest_bb;
4621 loc->e = e;
4622 loc->si = si;
4623 return;
4624 }
4625 }
4626
4627 /* Update the last node of the list and move to the next one. */
4628 last_loc = loc;
4629 loc = loc->next;
4630 }
4631
4632 /* If we didn't find an assertion already registered for
4633 NAME COMP_CODE VAL, add a new one at the end of the list of
4634 assertions associated with NAME. */
4635 n = XNEW (struct assert_locus_d);
4636 n->bb = dest_bb;
4637 n->e = e;
4638 n->si = si;
4639 n->comp_code = comp_code;
4640 n->val = val;
4641 n->expr = expr;
4642 n->next = NULL;
4643
4644 if (last_loc)
4645 last_loc->next = n;
4646 else
4647 asserts_for[SSA_NAME_VERSION (name)] = n;
4648
4649 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4650 }
4651
4652 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4653 Extract a suitable test code and value and store them into *CODE_P and
4654 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4655
4656 If no extraction was possible, return FALSE, otherwise return TRUE.
4657
4658 If INVERT is true, then we invert the result stored into *CODE_P. */
4659
4660 static bool
4661 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4662 tree cond_op0, tree cond_op1,
4663 bool invert, enum tree_code *code_p,
4664 tree *val_p)
4665 {
4666 enum tree_code comp_code;
4667 tree val;
4668
4669 /* Otherwise, we have a comparison of the form NAME COMP VAL
4670 or VAL COMP NAME. */
4671 if (name == cond_op1)
4672 {
4673 /* If the predicate is of the form VAL COMP NAME, flip
4674 COMP around because we need to register NAME as the
4675 first operand in the predicate. */
4676 comp_code = swap_tree_comparison (cond_code);
4677 val = cond_op0;
4678 }
4679 else
4680 {
4681 /* The comparison is of the form NAME COMP VAL, so the
4682 comparison code remains unchanged. */
4683 comp_code = cond_code;
4684 val = cond_op1;
4685 }
4686
4687 /* Invert the comparison code as necessary. */
4688 if (invert)
4689 comp_code = invert_tree_comparison (comp_code, 0);
4690
4691 /* VRP does not handle float types. */
4692 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4693 return false;
4694
4695 /* Do not register always-false predicates.
4696 FIXME: this works around a limitation in fold() when dealing with
4697 enumerations. Given 'enum { N1, N2 } x;', fold will not
4698 fold 'if (x > N2)' to 'if (0)'. */
4699 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4700 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4701 {
4702 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4703 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4704
4705 if (comp_code == GT_EXPR
4706 && (!max
4707 || compare_values (val, max) == 0))
4708 return false;
4709
4710 if (comp_code == LT_EXPR
4711 && (!min
4712 || compare_values (val, min) == 0))
4713 return false;
4714 }
4715 *code_p = comp_code;
4716 *val_p = val;
4717 return true;
4718 }
4719
4720 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4721 (otherwise return VAL). VAL and MASK must be zero-extended for
4722 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4723 (to transform signed values into unsigned) and at the end xor
4724 SGNBIT back. */
4725
4726 static wide_int
4727 masked_increment (wide_int val, wide_int mask, wide_int sgnbit,
4728 unsigned int prec)
4729 {
4730 wide_int bit = wi::one (prec), res;
4731 unsigned int i;
4732
4733 val ^= sgnbit;
4734 for (i = 0; i < prec; i++, bit += bit)
4735 {
4736 res = mask;
4737 if ((res & bit) == 0)
4738 continue;
4739 res = bit - 1;
4740 res = (val + bit).and_not (res);
4741 res &= mask;
4742 if (wi::gtu_p (res, val))
4743 return res ^ sgnbit;
4744 }
4745 return val ^ sgnbit;
4746 }
4747
4748 /* Try to register an edge assertion for SSA name NAME on edge E for
4749 the condition COND contributing to the conditional jump pointed to by BSI.
4750 Invert the condition COND if INVERT is true.
4751 Return true if an assertion for NAME could be registered. */
4752
4753 static bool
4754 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4755 enum tree_code cond_code,
4756 tree cond_op0, tree cond_op1, bool invert)
4757 {
4758 tree val;
4759 enum tree_code comp_code;
4760 bool retval = false;
4761
4762 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4763 cond_op0,
4764 cond_op1,
4765 invert, &comp_code, &val))
4766 return false;
4767
4768 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4769 reachable from E. */
4770 if (live_on_edge (e, name)
4771 && !has_single_use (name))
4772 {
4773 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4774 retval = true;
4775 }
4776
4777 /* In the case of NAME <= CST and NAME being defined as
4778 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4779 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4780 This catches range and anti-range tests. */
4781 if ((comp_code == LE_EXPR
4782 || comp_code == GT_EXPR)
4783 && TREE_CODE (val) == INTEGER_CST
4784 && TYPE_UNSIGNED (TREE_TYPE (val)))
4785 {
4786 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4787 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4788
4789 /* Extract CST2 from the (optional) addition. */
4790 if (is_gimple_assign (def_stmt)
4791 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4792 {
4793 name2 = gimple_assign_rhs1 (def_stmt);
4794 cst2 = gimple_assign_rhs2 (def_stmt);
4795 if (TREE_CODE (name2) == SSA_NAME
4796 && TREE_CODE (cst2) == INTEGER_CST)
4797 def_stmt = SSA_NAME_DEF_STMT (name2);
4798 }
4799
4800 /* Extract NAME2 from the (optional) sign-changing cast. */
4801 if (gimple_assign_cast_p (def_stmt))
4802 {
4803 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4804 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4805 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4806 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4807 name3 = gimple_assign_rhs1 (def_stmt);
4808 }
4809
4810 /* If name3 is used later, create an ASSERT_EXPR for it. */
4811 if (name3 != NULL_TREE
4812 && TREE_CODE (name3) == SSA_NAME
4813 && (cst2 == NULL_TREE
4814 || TREE_CODE (cst2) == INTEGER_CST)
4815 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4816 && live_on_edge (e, name3)
4817 && !has_single_use (name3))
4818 {
4819 tree tmp;
4820
4821 /* Build an expression for the range test. */
4822 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4823 if (cst2 != NULL_TREE)
4824 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4825
4826 if (dump_file)
4827 {
4828 fprintf (dump_file, "Adding assert for ");
4829 print_generic_expr (dump_file, name3, 0);
4830 fprintf (dump_file, " from ");
4831 print_generic_expr (dump_file, tmp, 0);
4832 fprintf (dump_file, "\n");
4833 }
4834
4835 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4836
4837 retval = true;
4838 }
4839
4840 /* If name2 is used later, create an ASSERT_EXPR for it. */
4841 if (name2 != NULL_TREE
4842 && TREE_CODE (name2) == SSA_NAME
4843 && TREE_CODE (cst2) == INTEGER_CST
4844 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4845 && live_on_edge (e, name2)
4846 && !has_single_use (name2))
4847 {
4848 tree tmp;
4849
4850 /* Build an expression for the range test. */
4851 tmp = name2;
4852 if (TREE_TYPE (name) != TREE_TYPE (name2))
4853 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4854 if (cst2 != NULL_TREE)
4855 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4856
4857 if (dump_file)
4858 {
4859 fprintf (dump_file, "Adding assert for ");
4860 print_generic_expr (dump_file, name2, 0);
4861 fprintf (dump_file, " from ");
4862 print_generic_expr (dump_file, tmp, 0);
4863 fprintf (dump_file, "\n");
4864 }
4865
4866 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4867
4868 retval = true;
4869 }
4870 }
4871
4872 /* In the case of post-in/decrement tests like if (i++) ... and uses
4873 of the in/decremented value on the edge the extra name we want to
4874 assert for is not on the def chain of the name compared. Instead
4875 it is in the set of use stmts. */
4876 if ((comp_code == NE_EXPR
4877 || comp_code == EQ_EXPR)
4878 && TREE_CODE (val) == INTEGER_CST)
4879 {
4880 imm_use_iterator ui;
4881 gimple use_stmt;
4882 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
4883 {
4884 /* Cut off to use-stmts that are in the predecessor. */
4885 if (gimple_bb (use_stmt) != e->src)
4886 continue;
4887
4888 if (!is_gimple_assign (use_stmt))
4889 continue;
4890
4891 enum tree_code code = gimple_assign_rhs_code (use_stmt);
4892 if (code != PLUS_EXPR
4893 && code != MINUS_EXPR)
4894 continue;
4895
4896 tree cst = gimple_assign_rhs2 (use_stmt);
4897 if (TREE_CODE (cst) != INTEGER_CST)
4898 continue;
4899
4900 tree name2 = gimple_assign_lhs (use_stmt);
4901 if (live_on_edge (e, name2))
4902 {
4903 cst = int_const_binop (code, val, cst);
4904 register_new_assert_for (name2, name2, comp_code, cst,
4905 NULL, e, bsi);
4906 retval = true;
4907 }
4908 }
4909 }
4910
4911 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4912 && TREE_CODE (val) == INTEGER_CST)
4913 {
4914 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4915 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4916 tree val2 = NULL_TREE;
4917 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4918 wide_int mask = wi::zero (prec);
4919 unsigned int nprec = prec;
4920 enum tree_code rhs_code = ERROR_MARK;
4921
4922 if (is_gimple_assign (def_stmt))
4923 rhs_code = gimple_assign_rhs_code (def_stmt);
4924
4925 /* Add asserts for NAME cmp CST and NAME being defined
4926 as NAME = (int) NAME2. */
4927 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4928 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4929 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4930 && gimple_assign_cast_p (def_stmt))
4931 {
4932 name2 = gimple_assign_rhs1 (def_stmt);
4933 if (CONVERT_EXPR_CODE_P (rhs_code)
4934 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4935 && TYPE_UNSIGNED (TREE_TYPE (name2))
4936 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4937 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4938 || !tree_int_cst_equal (val,
4939 TYPE_MIN_VALUE (TREE_TYPE (val))))
4940 && live_on_edge (e, name2)
4941 && !has_single_use (name2))
4942 {
4943 tree tmp, cst;
4944 enum tree_code new_comp_code = comp_code;
4945
4946 cst = fold_convert (TREE_TYPE (name2),
4947 TYPE_MIN_VALUE (TREE_TYPE (val)));
4948 /* Build an expression for the range test. */
4949 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4950 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4951 fold_convert (TREE_TYPE (name2), val));
4952 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4953 {
4954 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4955 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4956 build_int_cst (TREE_TYPE (name2), 1));
4957 }
4958
4959 if (dump_file)
4960 {
4961 fprintf (dump_file, "Adding assert for ");
4962 print_generic_expr (dump_file, name2, 0);
4963 fprintf (dump_file, " from ");
4964 print_generic_expr (dump_file, tmp, 0);
4965 fprintf (dump_file, "\n");
4966 }
4967
4968 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
4969 e, bsi);
4970
4971 retval = true;
4972 }
4973 }
4974
4975 /* Add asserts for NAME cmp CST and NAME being defined as
4976 NAME = NAME2 >> CST2.
4977
4978 Extract CST2 from the right shift. */
4979 if (rhs_code == RSHIFT_EXPR)
4980 {
4981 name2 = gimple_assign_rhs1 (def_stmt);
4982 cst2 = gimple_assign_rhs2 (def_stmt);
4983 if (TREE_CODE (name2) == SSA_NAME
4984 && tree_fits_uhwi_p (cst2)
4985 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4986 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
4987 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
4988 && live_on_edge (e, name2)
4989 && !has_single_use (name2))
4990 {
4991 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
4992 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
4993 }
4994 }
4995 if (val2 != NULL_TREE
4996 && TREE_CODE (val2) == INTEGER_CST
4997 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
4998 TREE_TYPE (val),
4999 val2, cst2), val))
5000 {
5001 enum tree_code new_comp_code = comp_code;
5002 tree tmp, new_val;
5003
5004 tmp = name2;
5005 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5006 {
5007 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5008 {
5009 tree type = build_nonstandard_integer_type (prec, 1);
5010 tmp = build1 (NOP_EXPR, type, name2);
5011 val2 = fold_convert (type, val2);
5012 }
5013 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5014 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5015 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5016 }
5017 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5018 {
5019 wide_int minval
5020 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5021 new_val = val2;
5022 if (minval == wide_int (new_val))
5023 new_val = NULL_TREE;
5024 }
5025 else
5026 {
5027 wide_int maxval
5028 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5029 mask |= wide_int (val2);
5030 if (mask == maxval)
5031 new_val = NULL_TREE;
5032 else
5033 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5034 }
5035
5036 if (new_val)
5037 {
5038 if (dump_file)
5039 {
5040 fprintf (dump_file, "Adding assert for ");
5041 print_generic_expr (dump_file, name2, 0);
5042 fprintf (dump_file, " from ");
5043 print_generic_expr (dump_file, tmp, 0);
5044 fprintf (dump_file, "\n");
5045 }
5046
5047 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5048 NULL, e, bsi);
5049 retval = true;
5050 }
5051 }
5052
5053 /* Add asserts for NAME cmp CST and NAME being defined as
5054 NAME = NAME2 & CST2.
5055
5056 Extract CST2 from the and.
5057
5058 Also handle
5059 NAME = (unsigned) NAME2;
5060 casts where NAME's type is unsigned and has smaller precision
5061 than NAME2's type as if it was NAME = NAME2 & MASK. */
5062 names[0] = NULL_TREE;
5063 names[1] = NULL_TREE;
5064 cst2 = NULL_TREE;
5065 if (rhs_code == BIT_AND_EXPR
5066 || (CONVERT_EXPR_CODE_P (rhs_code)
5067 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5068 && TYPE_UNSIGNED (TREE_TYPE (val))
5069 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5070 > prec
5071 && !retval))
5072 {
5073 name2 = gimple_assign_rhs1 (def_stmt);
5074 if (rhs_code == BIT_AND_EXPR)
5075 cst2 = gimple_assign_rhs2 (def_stmt);
5076 else
5077 {
5078 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5079 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5080 }
5081 if (TREE_CODE (name2) == SSA_NAME
5082 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5083 && TREE_CODE (cst2) == INTEGER_CST
5084 && !integer_zerop (cst2)
5085 && (nprec > 1
5086 || TYPE_UNSIGNED (TREE_TYPE (val))))
5087 {
5088 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
5089 if (gimple_assign_cast_p (def_stmt2))
5090 {
5091 names[1] = gimple_assign_rhs1 (def_stmt2);
5092 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5093 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5094 || (TYPE_PRECISION (TREE_TYPE (name2))
5095 != TYPE_PRECISION (TREE_TYPE (names[1])))
5096 || !live_on_edge (e, names[1])
5097 || has_single_use (names[1]))
5098 names[1] = NULL_TREE;
5099 }
5100 if (live_on_edge (e, name2)
5101 && !has_single_use (name2))
5102 names[0] = name2;
5103 }
5104 }
5105 if (names[0] || names[1])
5106 {
5107 wide_int minv, maxv, valv, cst2v;
5108 wide_int tem, sgnbit;
5109 bool valid_p = false, valn = false, cst2n = false;
5110 enum tree_code ccode = comp_code;
5111
5112 valv = wide_int::from (val, nprec, UNSIGNED);
5113 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5114 if (TYPE_SIGN (TREE_TYPE (val)) == SIGNED)
5115 {
5116 valn = wi::neg_p (wi::sext (valv, nprec));
5117 cst2n = wi::neg_p (wi::sext (cst2v, nprec));
5118 }
5119 /* If CST2 doesn't have most significant bit set,
5120 but VAL is negative, we have comparison like
5121 if ((x & 0x123) > -4) (always true). Just give up. */
5122 if (!cst2n && valn)
5123 ccode = ERROR_MARK;
5124 if (cst2n)
5125 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5126 else
5127 sgnbit = wi::zero (nprec);
5128 minv = valv & cst2v;
5129 switch (ccode)
5130 {
5131 case EQ_EXPR:
5132 /* Minimum unsigned value for equality is VAL & CST2
5133 (should be equal to VAL, otherwise we probably should
5134 have folded the comparison into false) and
5135 maximum unsigned value is VAL | ~CST2. */
5136 maxv = valv | ~cst2v;
5137 maxv = wi::zext (maxv, nprec);
5138 valid_p = true;
5139 break;
5140
5141 case NE_EXPR:
5142 tem = valv | ~cst2v;
5143 tem = wi::zext (tem, nprec);
5144 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5145 if (valv == 0)
5146 {
5147 cst2n = false;
5148 sgnbit = wi::zero (nprec);
5149 goto gt_expr;
5150 }
5151 /* If (VAL | ~CST2) is all ones, handle it as
5152 (X & CST2) < VAL. */
5153 if (tem == -1)
5154 {
5155 cst2n = false;
5156 valn = false;
5157 sgnbit = wi::zero (nprec);
5158 goto lt_expr;
5159 }
5160 if (!cst2n && wi::neg_p (wi::sext (cst2v, nprec)))
5161 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5162 if (sgnbit != 0)
5163 {
5164 if (valv == sgnbit)
5165 {
5166 cst2n = true;
5167 valn = true;
5168 goto gt_expr;
5169 }
5170 if (tem == wi::mask (nprec - 1, false, nprec))
5171 {
5172 cst2n = true;
5173 goto lt_expr;
5174 }
5175 if (!cst2n)
5176 sgnbit = wi::zero (nprec);
5177 }
5178 break;
5179
5180 case GE_EXPR:
5181 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5182 is VAL and maximum unsigned value is ~0. For signed
5183 comparison, if CST2 doesn't have most significant bit
5184 set, handle it similarly. If CST2 has MSB set,
5185 the minimum is the same, and maximum is ~0U/2. */
5186 if (minv != valv)
5187 {
5188 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5189 VAL. */
5190 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5191 if (minv == valv)
5192 break;
5193 }
5194 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5195 valid_p = true;
5196 break;
5197
5198 case GT_EXPR:
5199 gt_expr:
5200 /* Find out smallest MINV where MINV > VAL
5201 && (MINV & CST2) == MINV, if any. If VAL is signed and
5202 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5203 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5204 if (minv == valv)
5205 break;
5206 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5207 valid_p = true;
5208 break;
5209
5210 case LE_EXPR:
5211 /* Minimum unsigned value for <= is 0 and maximum
5212 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5213 Otherwise, find smallest VAL2 where VAL2 > VAL
5214 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5215 as maximum.
5216 For signed comparison, if CST2 doesn't have most
5217 significant bit set, handle it similarly. If CST2 has
5218 MSB set, the maximum is the same and minimum is INT_MIN. */
5219 if (minv == valv)
5220 maxv = valv;
5221 else
5222 {
5223 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5224 if (maxv == valv)
5225 break;
5226 maxv -= 1;
5227 }
5228 maxv |= ~cst2v;
5229 maxv = wi::zext (maxv, nprec);
5230 minv = sgnbit;
5231 valid_p = true;
5232 break;
5233
5234 case LT_EXPR:
5235 lt_expr:
5236 /* Minimum unsigned value for < is 0 and maximum
5237 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5238 Otherwise, find smallest VAL2 where VAL2 > VAL
5239 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5240 as maximum.
5241 For signed comparison, if CST2 doesn't have most
5242 significant bit set, handle it similarly. If CST2 has
5243 MSB set, the maximum is the same and minimum is INT_MIN. */
5244 if (minv == valv)
5245 {
5246 if (valv == sgnbit)
5247 break;
5248 maxv = valv;
5249 }
5250 else
5251 {
5252 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5253 if (maxv == valv)
5254 break;
5255 }
5256 maxv -= 1;
5257 maxv |= ~cst2v;
5258 maxv = wi::zext (maxv, nprec);
5259 minv = sgnbit;
5260 valid_p = true;
5261 break;
5262
5263 default:
5264 break;
5265 }
5266 if (valid_p
5267 && wi::zext (maxv - minv, nprec) != wi::minus_one (nprec))
5268 {
5269 tree tmp, new_val, type;
5270 int i;
5271
5272 for (i = 0; i < 2; i++)
5273 if (names[i])
5274 {
5275 wide_int maxv2 = maxv;
5276 tmp = names[i];
5277 type = TREE_TYPE (names[i]);
5278 if (!TYPE_UNSIGNED (type))
5279 {
5280 type = build_nonstandard_integer_type (nprec, 1);
5281 tmp = build1 (NOP_EXPR, type, names[i]);
5282 }
5283 if (minv != 0)
5284 {
5285 tmp = build2 (PLUS_EXPR, type, tmp,
5286 wide_int_to_tree (type, -minv));
5287 maxv2 = maxv - minv;
5288 }
5289 new_val = wide_int_to_tree (type, maxv2);
5290
5291 if (dump_file)
5292 {
5293 fprintf (dump_file, "Adding assert for ");
5294 print_generic_expr (dump_file, names[i], 0);
5295 fprintf (dump_file, " from ");
5296 print_generic_expr (dump_file, tmp, 0);
5297 fprintf (dump_file, "\n");
5298 }
5299
5300 register_new_assert_for (names[i], tmp, LE_EXPR,
5301 new_val, NULL, e, bsi);
5302 retval = true;
5303 }
5304 }
5305 }
5306 }
5307
5308 return retval;
5309 }
5310
5311 /* OP is an operand of a truth value expression which is known to have
5312 a particular value. Register any asserts for OP and for any
5313 operands in OP's defining statement.
5314
5315 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5316 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5317
5318 static bool
5319 register_edge_assert_for_1 (tree op, enum tree_code code,
5320 edge e, gimple_stmt_iterator bsi)
5321 {
5322 bool retval = false;
5323 gimple op_def;
5324 tree val;
5325 enum tree_code rhs_code;
5326
5327 /* We only care about SSA_NAMEs. */
5328 if (TREE_CODE (op) != SSA_NAME)
5329 return false;
5330
5331 /* We know that OP will have a zero or nonzero value. If OP is used
5332 more than once go ahead and register an assert for OP.
5333
5334 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5335 it will always be set for OP (because OP is used in a COND_EXPR in
5336 the subgraph). */
5337 if (!has_single_use (op))
5338 {
5339 val = build_int_cst (TREE_TYPE (op), 0);
5340 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5341 retval = true;
5342 }
5343
5344 /* Now look at how OP is set. If it's set from a comparison,
5345 a truth operation or some bit operations, then we may be able
5346 to register information about the operands of that assignment. */
5347 op_def = SSA_NAME_DEF_STMT (op);
5348 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5349 return retval;
5350
5351 rhs_code = gimple_assign_rhs_code (op_def);
5352
5353 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5354 {
5355 bool invert = (code == EQ_EXPR ? true : false);
5356 tree op0 = gimple_assign_rhs1 (op_def);
5357 tree op1 = gimple_assign_rhs2 (op_def);
5358
5359 if (TREE_CODE (op0) == SSA_NAME)
5360 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5361 invert);
5362 if (TREE_CODE (op1) == SSA_NAME)
5363 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5364 invert);
5365 }
5366 else if ((code == NE_EXPR
5367 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5368 || (code == EQ_EXPR
5369 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5370 {
5371 /* Recurse on each operand. */
5372 tree op0 = gimple_assign_rhs1 (op_def);
5373 tree op1 = gimple_assign_rhs2 (op_def);
5374 if (TREE_CODE (op0) == SSA_NAME
5375 && has_single_use (op0))
5376 retval |= register_edge_assert_for_1 (op0, code, e, bsi);
5377 if (TREE_CODE (op1) == SSA_NAME
5378 && has_single_use (op1))
5379 retval |= register_edge_assert_for_1 (op1, code, e, bsi);
5380 }
5381 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5382 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5383 {
5384 /* Recurse, flipping CODE. */
5385 code = invert_tree_comparison (code, false);
5386 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5387 code, e, bsi);
5388 }
5389 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5390 {
5391 /* Recurse through the copy. */
5392 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5393 code, e, bsi);
5394 }
5395 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5396 {
5397 /* Recurse through the type conversion. */
5398 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5399 code, e, bsi);
5400 }
5401
5402 return retval;
5403 }
5404
5405 /* Try to register an edge assertion for SSA name NAME on edge E for
5406 the condition COND contributing to the conditional jump pointed to by SI.
5407 Return true if an assertion for NAME could be registered. */
5408
5409 static bool
5410 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5411 enum tree_code cond_code, tree cond_op0,
5412 tree cond_op1)
5413 {
5414 tree val;
5415 enum tree_code comp_code;
5416 bool retval = false;
5417 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5418
5419 /* Do not attempt to infer anything in names that flow through
5420 abnormal edges. */
5421 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5422 return false;
5423
5424 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5425 cond_op0, cond_op1,
5426 is_else_edge,
5427 &comp_code, &val))
5428 return false;
5429
5430 /* Register ASSERT_EXPRs for name. */
5431 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5432 cond_op1, is_else_edge);
5433
5434
5435 /* If COND is effectively an equality test of an SSA_NAME against
5436 the value zero or one, then we may be able to assert values
5437 for SSA_NAMEs which flow into COND. */
5438
5439 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5440 statement of NAME we can assert both operands of the BIT_AND_EXPR
5441 have nonzero value. */
5442 if (((comp_code == EQ_EXPR && integer_onep (val))
5443 || (comp_code == NE_EXPR && integer_zerop (val))))
5444 {
5445 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5446
5447 if (is_gimple_assign (def_stmt)
5448 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5449 {
5450 tree op0 = gimple_assign_rhs1 (def_stmt);
5451 tree op1 = gimple_assign_rhs2 (def_stmt);
5452 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5453 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5454 }
5455 }
5456
5457 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5458 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5459 have zero value. */
5460 if (((comp_code == EQ_EXPR && integer_zerop (val))
5461 || (comp_code == NE_EXPR && integer_onep (val))))
5462 {
5463 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5464
5465 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5466 necessarily zero value, or if type-precision is one. */
5467 if (is_gimple_assign (def_stmt)
5468 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5469 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5470 || comp_code == EQ_EXPR)))
5471 {
5472 tree op0 = gimple_assign_rhs1 (def_stmt);
5473 tree op1 = gimple_assign_rhs2 (def_stmt);
5474 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5475 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5476 }
5477 }
5478
5479 return retval;
5480 }
5481
5482
5483 /* Determine whether the outgoing edges of BB should receive an
5484 ASSERT_EXPR for each of the operands of BB's LAST statement.
5485 The last statement of BB must be a COND_EXPR.
5486
5487 If any of the sub-graphs rooted at BB have an interesting use of
5488 the predicate operands, an assert location node is added to the
5489 list of assertions for the corresponding operands. */
5490
5491 static bool
5492 find_conditional_asserts (basic_block bb, gimple last)
5493 {
5494 bool need_assert;
5495 gimple_stmt_iterator bsi;
5496 tree op;
5497 edge_iterator ei;
5498 edge e;
5499 ssa_op_iter iter;
5500
5501 need_assert = false;
5502 bsi = gsi_for_stmt (last);
5503
5504 /* Look for uses of the operands in each of the sub-graphs
5505 rooted at BB. We need to check each of the outgoing edges
5506 separately, so that we know what kind of ASSERT_EXPR to
5507 insert. */
5508 FOR_EACH_EDGE (e, ei, bb->succs)
5509 {
5510 if (e->dest == bb)
5511 continue;
5512
5513 /* Register the necessary assertions for each operand in the
5514 conditional predicate. */
5515 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5516 {
5517 need_assert |= register_edge_assert_for (op, e, bsi,
5518 gimple_cond_code (last),
5519 gimple_cond_lhs (last),
5520 gimple_cond_rhs (last));
5521 }
5522 }
5523
5524 return need_assert;
5525 }
5526
5527 struct case_info
5528 {
5529 tree expr;
5530 basic_block bb;
5531 };
5532
5533 /* Compare two case labels sorting first by the destination bb index
5534 and then by the case value. */
5535
5536 static int
5537 compare_case_labels (const void *p1, const void *p2)
5538 {
5539 const struct case_info *ci1 = (const struct case_info *) p1;
5540 const struct case_info *ci2 = (const struct case_info *) p2;
5541 int idx1 = ci1->bb->index;
5542 int idx2 = ci2->bb->index;
5543
5544 if (idx1 < idx2)
5545 return -1;
5546 else if (idx1 == idx2)
5547 {
5548 /* Make sure the default label is first in a group. */
5549 if (!CASE_LOW (ci1->expr))
5550 return -1;
5551 else if (!CASE_LOW (ci2->expr))
5552 return 1;
5553 else
5554 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5555 CASE_LOW (ci2->expr));
5556 }
5557 else
5558 return 1;
5559 }
5560
5561 /* Determine whether the outgoing edges of BB should receive an
5562 ASSERT_EXPR for each of the operands of BB's LAST statement.
5563 The last statement of BB must be a SWITCH_EXPR.
5564
5565 If any of the sub-graphs rooted at BB have an interesting use of
5566 the predicate operands, an assert location node is added to the
5567 list of assertions for the corresponding operands. */
5568
5569 static bool
5570 find_switch_asserts (basic_block bb, gimple last)
5571 {
5572 bool need_assert;
5573 gimple_stmt_iterator bsi;
5574 tree op;
5575 edge e;
5576 struct case_info *ci;
5577 size_t n = gimple_switch_num_labels (last);
5578 #if GCC_VERSION >= 4000
5579 unsigned int idx;
5580 #else
5581 /* Work around GCC 3.4 bug (PR 37086). */
5582 volatile unsigned int idx;
5583 #endif
5584
5585 need_assert = false;
5586 bsi = gsi_for_stmt (last);
5587 op = gimple_switch_index (last);
5588 if (TREE_CODE (op) != SSA_NAME)
5589 return false;
5590
5591 /* Build a vector of case labels sorted by destination label. */
5592 ci = XNEWVEC (struct case_info, n);
5593 for (idx = 0; idx < n; ++idx)
5594 {
5595 ci[idx].expr = gimple_switch_label (last, idx);
5596 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5597 }
5598 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5599
5600 for (idx = 0; idx < n; ++idx)
5601 {
5602 tree min, max;
5603 tree cl = ci[idx].expr;
5604 basic_block cbb = ci[idx].bb;
5605
5606 min = CASE_LOW (cl);
5607 max = CASE_HIGH (cl);
5608
5609 /* If there are multiple case labels with the same destination
5610 we need to combine them to a single value range for the edge. */
5611 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5612 {
5613 /* Skip labels until the last of the group. */
5614 do {
5615 ++idx;
5616 } while (idx < n && cbb == ci[idx].bb);
5617 --idx;
5618
5619 /* Pick up the maximum of the case label range. */
5620 if (CASE_HIGH (ci[idx].expr))
5621 max = CASE_HIGH (ci[idx].expr);
5622 else
5623 max = CASE_LOW (ci[idx].expr);
5624 }
5625
5626 /* Nothing to do if the range includes the default label until we
5627 can register anti-ranges. */
5628 if (min == NULL_TREE)
5629 continue;
5630
5631 /* Find the edge to register the assert expr on. */
5632 e = find_edge (bb, cbb);
5633
5634 /* Register the necessary assertions for the operand in the
5635 SWITCH_EXPR. */
5636 need_assert |= register_edge_assert_for (op, e, bsi,
5637 max ? GE_EXPR : EQ_EXPR,
5638 op,
5639 fold_convert (TREE_TYPE (op),
5640 min));
5641 if (max)
5642 {
5643 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5644 op,
5645 fold_convert (TREE_TYPE (op),
5646 max));
5647 }
5648 }
5649
5650 XDELETEVEC (ci);
5651 return need_assert;
5652 }
5653
5654
5655 /* Traverse all the statements in block BB looking for statements that
5656 may generate useful assertions for the SSA names in their operand.
5657 If a statement produces a useful assertion A for name N_i, then the
5658 list of assertions already generated for N_i is scanned to
5659 determine if A is actually needed.
5660
5661 If N_i already had the assertion A at a location dominating the
5662 current location, then nothing needs to be done. Otherwise, the
5663 new location for A is recorded instead.
5664
5665 1- For every statement S in BB, all the variables used by S are
5666 added to bitmap FOUND_IN_SUBGRAPH.
5667
5668 2- If statement S uses an operand N in a way that exposes a known
5669 value range for N, then if N was not already generated by an
5670 ASSERT_EXPR, create a new assert location for N. For instance,
5671 if N is a pointer and the statement dereferences it, we can
5672 assume that N is not NULL.
5673
5674 3- COND_EXPRs are a special case of #2. We can derive range
5675 information from the predicate but need to insert different
5676 ASSERT_EXPRs for each of the sub-graphs rooted at the
5677 conditional block. If the last statement of BB is a conditional
5678 expression of the form 'X op Y', then
5679
5680 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5681
5682 b) If the conditional is the only entry point to the sub-graph
5683 corresponding to the THEN_CLAUSE, recurse into it. On
5684 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5685 an ASSERT_EXPR is added for the corresponding variable.
5686
5687 c) Repeat step (b) on the ELSE_CLAUSE.
5688
5689 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5690
5691 For instance,
5692
5693 if (a == 9)
5694 b = a;
5695 else
5696 b = c + 1;
5697
5698 In this case, an assertion on the THEN clause is useful to
5699 determine that 'a' is always 9 on that edge. However, an assertion
5700 on the ELSE clause would be unnecessary.
5701
5702 4- If BB does not end in a conditional expression, then we recurse
5703 into BB's dominator children.
5704
5705 At the end of the recursive traversal, every SSA name will have a
5706 list of locations where ASSERT_EXPRs should be added. When a new
5707 location for name N is found, it is registered by calling
5708 register_new_assert_for. That function keeps track of all the
5709 registered assertions to prevent adding unnecessary assertions.
5710 For instance, if a pointer P_4 is dereferenced more than once in a
5711 dominator tree, only the location dominating all the dereference of
5712 P_4 will receive an ASSERT_EXPR.
5713
5714 If this function returns true, then it means that there are names
5715 for which we need to generate ASSERT_EXPRs. Those assertions are
5716 inserted by process_assert_insertions. */
5717
5718 static bool
5719 find_assert_locations_1 (basic_block bb, sbitmap live)
5720 {
5721 gimple_stmt_iterator si;
5722 gimple last;
5723 bool need_assert;
5724
5725 need_assert = false;
5726 last = last_stmt (bb);
5727
5728 /* If BB's last statement is a conditional statement involving integer
5729 operands, determine if we need to add ASSERT_EXPRs. */
5730 if (last
5731 && gimple_code (last) == GIMPLE_COND
5732 && !fp_predicate (last)
5733 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5734 need_assert |= find_conditional_asserts (bb, last);
5735
5736 /* If BB's last statement is a switch statement involving integer
5737 operands, determine if we need to add ASSERT_EXPRs. */
5738 if (last
5739 && gimple_code (last) == GIMPLE_SWITCH
5740 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5741 need_assert |= find_switch_asserts (bb, last);
5742
5743 /* Traverse all the statements in BB marking used names and looking
5744 for statements that may infer assertions for their used operands. */
5745 for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si))
5746 {
5747 gimple stmt;
5748 tree op;
5749 ssa_op_iter i;
5750
5751 stmt = gsi_stmt (si);
5752
5753 if (is_gimple_debug (stmt))
5754 continue;
5755
5756 /* See if we can derive an assertion for any of STMT's operands. */
5757 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5758 {
5759 tree value;
5760 enum tree_code comp_code;
5761
5762 /* If op is not live beyond this stmt, do not bother to insert
5763 asserts for it. */
5764 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
5765 continue;
5766
5767 /* If OP is used in such a way that we can infer a value
5768 range for it, and we don't find a previous assertion for
5769 it, create a new assertion location node for OP. */
5770 if (infer_value_range (stmt, op, &comp_code, &value))
5771 {
5772 /* If we are able to infer a nonzero value range for OP,
5773 then walk backwards through the use-def chain to see if OP
5774 was set via a typecast.
5775
5776 If so, then we can also infer a nonzero value range
5777 for the operand of the NOP_EXPR. */
5778 if (comp_code == NE_EXPR && integer_zerop (value))
5779 {
5780 tree t = op;
5781 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5782
5783 while (is_gimple_assign (def_stmt)
5784 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5785 && TREE_CODE
5786 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5787 && POINTER_TYPE_P
5788 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5789 {
5790 t = gimple_assign_rhs1 (def_stmt);
5791 def_stmt = SSA_NAME_DEF_STMT (t);
5792
5793 /* Note we want to register the assert for the
5794 operand of the NOP_EXPR after SI, not after the
5795 conversion. */
5796 if (! has_single_use (t))
5797 {
5798 register_new_assert_for (t, t, comp_code, value,
5799 bb, NULL, si);
5800 need_assert = true;
5801 }
5802 }
5803 }
5804
5805 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
5806 need_assert = true;
5807 }
5808 }
5809
5810 /* Update live. */
5811 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5812 bitmap_set_bit (live, SSA_NAME_VERSION (op));
5813 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
5814 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
5815 }
5816
5817 /* Traverse all PHI nodes in BB, updating live. */
5818 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
5819 {
5820 use_operand_p arg_p;
5821 ssa_op_iter i;
5822 gimple phi = gsi_stmt (si);
5823 tree res = gimple_phi_result (phi);
5824
5825 if (virtual_operand_p (res))
5826 continue;
5827
5828 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5829 {
5830 tree arg = USE_FROM_PTR (arg_p);
5831 if (TREE_CODE (arg) == SSA_NAME)
5832 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
5833 }
5834
5835 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
5836 }
5837
5838 return need_assert;
5839 }
5840
5841 /* Do an RPO walk over the function computing SSA name liveness
5842 on-the-fly and deciding on assert expressions to insert.
5843 Returns true if there are assert expressions to be inserted. */
5844
5845 static bool
5846 find_assert_locations (void)
5847 {
5848 int *rpo = XNEWVEC (int, last_basic_block);
5849 int *bb_rpo = XNEWVEC (int, last_basic_block);
5850 int *last_rpo = XCNEWVEC (int, last_basic_block);
5851 int rpo_cnt, i;
5852 bool need_asserts;
5853
5854 live = XCNEWVEC (sbitmap, last_basic_block);
5855 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5856 for (i = 0; i < rpo_cnt; ++i)
5857 bb_rpo[rpo[i]] = i;
5858
5859 need_asserts = false;
5860 for (i = rpo_cnt - 1; i >= 0; --i)
5861 {
5862 basic_block bb = BASIC_BLOCK (rpo[i]);
5863 edge e;
5864 edge_iterator ei;
5865
5866 if (!live[rpo[i]])
5867 {
5868 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5869 bitmap_clear (live[rpo[i]]);
5870 }
5871
5872 /* Process BB and update the live information with uses in
5873 this block. */
5874 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5875
5876 /* Merge liveness into the predecessor blocks and free it. */
5877 if (!bitmap_empty_p (live[rpo[i]]))
5878 {
5879 int pred_rpo = i;
5880 FOR_EACH_EDGE (e, ei, bb->preds)
5881 {
5882 int pred = e->src->index;
5883 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5884 continue;
5885
5886 if (!live[pred])
5887 {
5888 live[pred] = sbitmap_alloc (num_ssa_names);
5889 bitmap_clear (live[pred]);
5890 }
5891 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
5892
5893 if (bb_rpo[pred] < pred_rpo)
5894 pred_rpo = bb_rpo[pred];
5895 }
5896
5897 /* Record the RPO number of the last visited block that needs
5898 live information from this block. */
5899 last_rpo[rpo[i]] = pred_rpo;
5900 }
5901 else
5902 {
5903 sbitmap_free (live[rpo[i]]);
5904 live[rpo[i]] = NULL;
5905 }
5906
5907 /* We can free all successors live bitmaps if all their
5908 predecessors have been visited already. */
5909 FOR_EACH_EDGE (e, ei, bb->succs)
5910 if (last_rpo[e->dest->index] == i
5911 && live[e->dest->index])
5912 {
5913 sbitmap_free (live[e->dest->index]);
5914 live[e->dest->index] = NULL;
5915 }
5916 }
5917
5918 XDELETEVEC (rpo);
5919 XDELETEVEC (bb_rpo);
5920 XDELETEVEC (last_rpo);
5921 for (i = 0; i < last_basic_block; ++i)
5922 if (live[i])
5923 sbitmap_free (live[i]);
5924 XDELETEVEC (live);
5925
5926 return need_asserts;
5927 }
5928
5929 /* Create an ASSERT_EXPR for NAME and insert it in the location
5930 indicated by LOC. Return true if we made any edge insertions. */
5931
5932 static bool
5933 process_assert_insertions_for (tree name, assert_locus_t loc)
5934 {
5935 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5936 gimple stmt;
5937 tree cond;
5938 gimple assert_stmt;
5939 edge_iterator ei;
5940 edge e;
5941
5942 /* If we have X <=> X do not insert an assert expr for that. */
5943 if (loc->expr == loc->val)
5944 return false;
5945
5946 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5947 assert_stmt = build_assert_expr_for (cond, name);
5948 if (loc->e)
5949 {
5950 /* We have been asked to insert the assertion on an edge. This
5951 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5952 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5953 || (gimple_code (gsi_stmt (loc->si))
5954 == GIMPLE_SWITCH));
5955
5956 gsi_insert_on_edge (loc->e, assert_stmt);
5957 return true;
5958 }
5959
5960 /* Otherwise, we can insert right after LOC->SI iff the
5961 statement must not be the last statement in the block. */
5962 stmt = gsi_stmt (loc->si);
5963 if (!stmt_ends_bb_p (stmt))
5964 {
5965 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5966 return false;
5967 }
5968
5969 /* If STMT must be the last statement in BB, we can only insert new
5970 assertions on the non-abnormal edge out of BB. Note that since
5971 STMT is not control flow, there may only be one non-abnormal edge
5972 out of BB. */
5973 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5974 if (!(e->flags & EDGE_ABNORMAL))
5975 {
5976 gsi_insert_on_edge (e, assert_stmt);
5977 return true;
5978 }
5979
5980 gcc_unreachable ();
5981 }
5982
5983
5984 /* Process all the insertions registered for every name N_i registered
5985 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5986 found in ASSERTS_FOR[i]. */
5987
5988 static void
5989 process_assert_insertions (void)
5990 {
5991 unsigned i;
5992 bitmap_iterator bi;
5993 bool update_edges_p = false;
5994 int num_asserts = 0;
5995
5996 if (dump_file && (dump_flags & TDF_DETAILS))
5997 dump_all_asserts (dump_file);
5998
5999 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6000 {
6001 assert_locus_t loc = asserts_for[i];
6002 gcc_assert (loc);
6003
6004 while (loc)
6005 {
6006 assert_locus_t next = loc->next;
6007 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6008 free (loc);
6009 loc = next;
6010 num_asserts++;
6011 }
6012 }
6013
6014 if (update_edges_p)
6015 gsi_commit_edge_inserts ();
6016
6017 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6018 num_asserts);
6019 }
6020
6021
6022 /* Traverse the flowgraph looking for conditional jumps to insert range
6023 expressions. These range expressions are meant to provide information
6024 to optimizations that need to reason in terms of value ranges. They
6025 will not be expanded into RTL. For instance, given:
6026
6027 x = ...
6028 y = ...
6029 if (x < y)
6030 y = x - 2;
6031 else
6032 x = y + 3;
6033
6034 this pass will transform the code into:
6035
6036 x = ...
6037 y = ...
6038 if (x < y)
6039 {
6040 x = ASSERT_EXPR <x, x < y>
6041 y = x - 2
6042 }
6043 else
6044 {
6045 y = ASSERT_EXPR <y, x <= y>
6046 x = y + 3
6047 }
6048
6049 The idea is that once copy and constant propagation have run, other
6050 optimizations will be able to determine what ranges of values can 'x'
6051 take in different paths of the code, simply by checking the reaching
6052 definition of 'x'. */
6053
6054 static void
6055 insert_range_assertions (void)
6056 {
6057 need_assert_for = BITMAP_ALLOC (NULL);
6058 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
6059
6060 calculate_dominance_info (CDI_DOMINATORS);
6061
6062 if (find_assert_locations ())
6063 {
6064 process_assert_insertions ();
6065 update_ssa (TODO_update_ssa_no_phi);
6066 }
6067
6068 if (dump_file && (dump_flags & TDF_DETAILS))
6069 {
6070 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6071 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6072 }
6073
6074 free (asserts_for);
6075 BITMAP_FREE (need_assert_for);
6076 }
6077
6078 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6079 and "struct" hacks. If VRP can determine that the
6080 array subscript is a constant, check if it is outside valid
6081 range. If the array subscript is a RANGE, warn if it is
6082 non-overlapping with valid range.
6083 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6084
6085 static void
6086 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6087 {
6088 value_range_t* vr = NULL;
6089 tree low_sub, up_sub;
6090 tree low_bound, up_bound, up_bound_p1;
6091 tree base;
6092
6093 if (TREE_NO_WARNING (ref))
6094 return;
6095
6096 low_sub = up_sub = TREE_OPERAND (ref, 1);
6097 up_bound = array_ref_up_bound (ref);
6098
6099 /* Can not check flexible arrays. */
6100 if (!up_bound
6101 || TREE_CODE (up_bound) != INTEGER_CST)
6102 return;
6103
6104 /* Accesses to trailing arrays via pointers may access storage
6105 beyond the types array bounds. */
6106 base = get_base_address (ref);
6107 if (base && TREE_CODE (base) == MEM_REF)
6108 {
6109 tree cref, next = NULL_TREE;
6110
6111 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6112 return;
6113
6114 cref = TREE_OPERAND (ref, 0);
6115 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6116 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6117 next && TREE_CODE (next) != FIELD_DECL;
6118 next = DECL_CHAIN (next))
6119 ;
6120
6121 /* If this is the last field in a struct type or a field in a
6122 union type do not warn. */
6123 if (!next)
6124 return;
6125 }
6126
6127 low_bound = array_ref_low_bound (ref);
6128 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6129 build_int_cst (TREE_TYPE (up_bound), 1));
6130
6131 if (TREE_CODE (low_sub) == SSA_NAME)
6132 {
6133 vr = get_value_range (low_sub);
6134 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6135 {
6136 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6137 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6138 }
6139 }
6140
6141 if (vr && vr->type == VR_ANTI_RANGE)
6142 {
6143 if (TREE_CODE (up_sub) == INTEGER_CST
6144 && tree_int_cst_lt (up_bound, up_sub)
6145 && TREE_CODE (low_sub) == INTEGER_CST
6146 && tree_int_cst_lt (low_sub, low_bound))
6147 {
6148 warning_at (location, OPT_Warray_bounds,
6149 "array subscript is outside array bounds");
6150 TREE_NO_WARNING (ref) = 1;
6151 }
6152 }
6153 else if (TREE_CODE (up_sub) == INTEGER_CST
6154 && (ignore_off_by_one
6155 ? (tree_int_cst_lt (up_bound, up_sub)
6156 && !tree_int_cst_equal (up_bound_p1, up_sub))
6157 : (tree_int_cst_lt (up_bound, up_sub)
6158 || tree_int_cst_equal (up_bound_p1, up_sub))))
6159 {
6160 if (dump_file && (dump_flags & TDF_DETAILS))
6161 {
6162 fprintf (dump_file, "Array bound warning for ");
6163 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6164 fprintf (dump_file, "\n");
6165 }
6166 warning_at (location, OPT_Warray_bounds,
6167 "array subscript is above array bounds");
6168 TREE_NO_WARNING (ref) = 1;
6169 }
6170 else if (TREE_CODE (low_sub) == INTEGER_CST
6171 && tree_int_cst_lt (low_sub, low_bound))
6172 {
6173 if (dump_file && (dump_flags & TDF_DETAILS))
6174 {
6175 fprintf (dump_file, "Array bound warning for ");
6176 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6177 fprintf (dump_file, "\n");
6178 }
6179 warning_at (location, OPT_Warray_bounds,
6180 "array subscript is below array bounds");
6181 TREE_NO_WARNING (ref) = 1;
6182 }
6183 }
6184
6185 /* Searches if the expr T, located at LOCATION computes
6186 address of an ARRAY_REF, and call check_array_ref on it. */
6187
6188 static void
6189 search_for_addr_array (tree t, location_t location)
6190 {
6191 while (TREE_CODE (t) == SSA_NAME)
6192 {
6193 gimple g = SSA_NAME_DEF_STMT (t);
6194
6195 if (gimple_code (g) != GIMPLE_ASSIGN)
6196 return;
6197
6198 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
6199 != GIMPLE_SINGLE_RHS)
6200 return;
6201
6202 t = gimple_assign_rhs1 (g);
6203 }
6204
6205
6206 /* We are only interested in addresses of ARRAY_REF's. */
6207 if (TREE_CODE (t) != ADDR_EXPR)
6208 return;
6209
6210 /* Check each ARRAY_REFs in the reference chain. */
6211 do
6212 {
6213 if (TREE_CODE (t) == ARRAY_REF)
6214 check_array_ref (location, t, true /*ignore_off_by_one*/);
6215
6216 t = TREE_OPERAND (t, 0);
6217 }
6218 while (handled_component_p (t));
6219
6220 if (TREE_CODE (t) == MEM_REF
6221 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6222 && !TREE_NO_WARNING (t))
6223 {
6224 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6225 tree low_bound, up_bound, el_sz;
6226 addr_wide_int idx;
6227 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6228 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6229 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6230 return;
6231
6232 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6233 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6234 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6235 if (!low_bound
6236 || TREE_CODE (low_bound) != INTEGER_CST
6237 || !up_bound
6238 || TREE_CODE (up_bound) != INTEGER_CST
6239 || !el_sz
6240 || TREE_CODE (el_sz) != INTEGER_CST)
6241 return;
6242
6243 idx = mem_ref_offset (t);
6244 idx = wi::sdiv_trunc (idx, el_sz);
6245 if (wi::lts_p (idx, 0))
6246 {
6247 if (dump_file && (dump_flags & TDF_DETAILS))
6248 {
6249 fprintf (dump_file, "Array bound warning for ");
6250 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6251 fprintf (dump_file, "\n");
6252 }
6253 warning_at (location, OPT_Warray_bounds,
6254 "array subscript is below array bounds");
6255 TREE_NO_WARNING (t) = 1;
6256 }
6257 else if (wi::gts_p (idx, addr_wide_int (up_bound) - low_bound + 1))
6258 {
6259 if (dump_file && (dump_flags & TDF_DETAILS))
6260 {
6261 fprintf (dump_file, "Array bound warning for ");
6262 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6263 fprintf (dump_file, "\n");
6264 }
6265 warning_at (location, OPT_Warray_bounds,
6266 "array subscript is above array bounds");
6267 TREE_NO_WARNING (t) = 1;
6268 }
6269 }
6270 }
6271
6272 /* walk_tree() callback that checks if *TP is
6273 an ARRAY_REF inside an ADDR_EXPR (in which an array
6274 subscript one outside the valid range is allowed). Call
6275 check_array_ref for each ARRAY_REF found. The location is
6276 passed in DATA. */
6277
6278 static tree
6279 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6280 {
6281 tree t = *tp;
6282 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6283 location_t location;
6284
6285 if (EXPR_HAS_LOCATION (t))
6286 location = EXPR_LOCATION (t);
6287 else
6288 {
6289 location_t *locp = (location_t *) wi->info;
6290 location = *locp;
6291 }
6292
6293 *walk_subtree = TRUE;
6294
6295 if (TREE_CODE (t) == ARRAY_REF)
6296 check_array_ref (location, t, false /*ignore_off_by_one*/);
6297
6298 if (TREE_CODE (t) == MEM_REF
6299 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
6300 search_for_addr_array (TREE_OPERAND (t, 0), location);
6301
6302 if (TREE_CODE (t) == ADDR_EXPR)
6303 *walk_subtree = FALSE;
6304
6305 return NULL_TREE;
6306 }
6307
6308 /* Walk over all statements of all reachable BBs and call check_array_bounds
6309 on them. */
6310
6311 static void
6312 check_all_array_refs (void)
6313 {
6314 basic_block bb;
6315 gimple_stmt_iterator si;
6316
6317 FOR_EACH_BB (bb)
6318 {
6319 edge_iterator ei;
6320 edge e;
6321 bool executable = false;
6322
6323 /* Skip blocks that were found to be unreachable. */
6324 FOR_EACH_EDGE (e, ei, bb->preds)
6325 executable |= !!(e->flags & EDGE_EXECUTABLE);
6326 if (!executable)
6327 continue;
6328
6329 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6330 {
6331 gimple stmt = gsi_stmt (si);
6332 struct walk_stmt_info wi;
6333 if (!gimple_has_location (stmt))
6334 continue;
6335
6336 if (is_gimple_call (stmt))
6337 {
6338 size_t i;
6339 size_t n = gimple_call_num_args (stmt);
6340 for (i = 0; i < n; i++)
6341 {
6342 tree arg = gimple_call_arg (stmt, i);
6343 search_for_addr_array (arg, gimple_location (stmt));
6344 }
6345 }
6346 else
6347 {
6348 memset (&wi, 0, sizeof (wi));
6349 wi.info = CONST_CAST (void *, (const void *)
6350 gimple_location_ptr (stmt));
6351
6352 walk_gimple_op (gsi_stmt (si),
6353 check_array_bounds,
6354 &wi);
6355 }
6356 }
6357 }
6358 }
6359
6360 /* Convert range assertion expressions into the implied copies and
6361 copy propagate away the copies. Doing the trivial copy propagation
6362 here avoids the need to run the full copy propagation pass after
6363 VRP.
6364
6365 FIXME, this will eventually lead to copy propagation removing the
6366 names that had useful range information attached to them. For
6367 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6368 then N_i will have the range [3, +INF].
6369
6370 However, by converting the assertion into the implied copy
6371 operation N_i = N_j, we will then copy-propagate N_j into the uses
6372 of N_i and lose the range information. We may want to hold on to
6373 ASSERT_EXPRs a little while longer as the ranges could be used in
6374 things like jump threading.
6375
6376 The problem with keeping ASSERT_EXPRs around is that passes after
6377 VRP need to handle them appropriately.
6378
6379 Another approach would be to make the range information a first
6380 class property of the SSA_NAME so that it can be queried from
6381 any pass. This is made somewhat more complex by the need for
6382 multiple ranges to be associated with one SSA_NAME. */
6383
6384 static void
6385 remove_range_assertions (void)
6386 {
6387 basic_block bb;
6388 gimple_stmt_iterator si;
6389
6390 /* Note that the BSI iterator bump happens at the bottom of the
6391 loop and no bump is necessary if we're removing the statement
6392 referenced by the current BSI. */
6393 FOR_EACH_BB (bb)
6394 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
6395 {
6396 gimple stmt = gsi_stmt (si);
6397 gimple use_stmt;
6398
6399 if (is_gimple_assign (stmt)
6400 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6401 {
6402 tree rhs = gimple_assign_rhs1 (stmt);
6403 tree var;
6404 tree cond = fold (ASSERT_EXPR_COND (rhs));
6405 use_operand_p use_p;
6406 imm_use_iterator iter;
6407
6408 gcc_assert (cond != boolean_false_node);
6409
6410 /* Propagate the RHS into every use of the LHS. */
6411 var = ASSERT_EXPR_VAR (rhs);
6412 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
6413 gimple_assign_lhs (stmt))
6414 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6415 {
6416 SET_USE (use_p, var);
6417 gcc_assert (TREE_CODE (var) == SSA_NAME);
6418 }
6419
6420 /* And finally, remove the copy, it is not needed. */
6421 gsi_remove (&si, true);
6422 release_defs (stmt);
6423 }
6424 else
6425 gsi_next (&si);
6426 }
6427 }
6428
6429
6430 /* Return true if STMT is interesting for VRP. */
6431
6432 static bool
6433 stmt_interesting_for_vrp (gimple stmt)
6434 {
6435 if (gimple_code (stmt) == GIMPLE_PHI)
6436 {
6437 tree res = gimple_phi_result (stmt);
6438 return (!virtual_operand_p (res)
6439 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6440 || POINTER_TYPE_P (TREE_TYPE (res))));
6441 }
6442 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6443 {
6444 tree lhs = gimple_get_lhs (stmt);
6445
6446 /* In general, assignments with virtual operands are not useful
6447 for deriving ranges, with the obvious exception of calls to
6448 builtin functions. */
6449 if (lhs && TREE_CODE (lhs) == SSA_NAME
6450 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6451 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6452 && ((is_gimple_call (stmt)
6453 && gimple_call_fndecl (stmt) != NULL_TREE
6454 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6455 || !gimple_vuse (stmt)))
6456 return true;
6457 }
6458 else if (gimple_code (stmt) == GIMPLE_COND
6459 || gimple_code (stmt) == GIMPLE_SWITCH)
6460 return true;
6461
6462 return false;
6463 }
6464
6465
6466 /* Initialize local data structures for VRP. */
6467
6468 static void
6469 vrp_initialize (void)
6470 {
6471 basic_block bb;
6472
6473 values_propagated = false;
6474 num_vr_values = num_ssa_names;
6475 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6476 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6477
6478 FOR_EACH_BB (bb)
6479 {
6480 gimple_stmt_iterator si;
6481
6482 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6483 {
6484 gimple phi = gsi_stmt (si);
6485 if (!stmt_interesting_for_vrp (phi))
6486 {
6487 tree lhs = PHI_RESULT (phi);
6488 set_value_range_to_varying (get_value_range (lhs));
6489 prop_set_simulate_again (phi, false);
6490 }
6491 else
6492 prop_set_simulate_again (phi, true);
6493 }
6494
6495 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6496 {
6497 gimple stmt = gsi_stmt (si);
6498
6499 /* If the statement is a control insn, then we do not
6500 want to avoid simulating the statement once. Failure
6501 to do so means that those edges will never get added. */
6502 if (stmt_ends_bb_p (stmt))
6503 prop_set_simulate_again (stmt, true);
6504 else if (!stmt_interesting_for_vrp (stmt))
6505 {
6506 ssa_op_iter i;
6507 tree def;
6508 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6509 set_value_range_to_varying (get_value_range (def));
6510 prop_set_simulate_again (stmt, false);
6511 }
6512 else
6513 prop_set_simulate_again (stmt, true);
6514 }
6515 }
6516 }
6517
6518 /* Return the singleton value-range for NAME or NAME. */
6519
6520 static inline tree
6521 vrp_valueize (tree name)
6522 {
6523 if (TREE_CODE (name) == SSA_NAME)
6524 {
6525 value_range_t *vr = get_value_range (name);
6526 if (vr->type == VR_RANGE
6527 && (vr->min == vr->max
6528 || operand_equal_p (vr->min, vr->max, 0)))
6529 return vr->min;
6530 }
6531 return name;
6532 }
6533
6534 /* Visit assignment STMT. If it produces an interesting range, record
6535 the SSA name in *OUTPUT_P. */
6536
6537 static enum ssa_prop_result
6538 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6539 {
6540 tree def, lhs;
6541 ssa_op_iter iter;
6542 enum gimple_code code = gimple_code (stmt);
6543 lhs = gimple_get_lhs (stmt);
6544
6545 /* We only keep track of ranges in integral and pointer types. */
6546 if (TREE_CODE (lhs) == SSA_NAME
6547 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6548 /* It is valid to have NULL MIN/MAX values on a type. See
6549 build_range_type. */
6550 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6551 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6552 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6553 {
6554 value_range_t new_vr = VR_INITIALIZER;
6555
6556 /* Try folding the statement to a constant first. */
6557 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6558 if (tem && !is_overflow_infinity (tem))
6559 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6560 /* Then dispatch to value-range extracting functions. */
6561 else if (code == GIMPLE_CALL)
6562 extract_range_basic (&new_vr, stmt);
6563 else
6564 extract_range_from_assignment (&new_vr, stmt);
6565
6566 if (update_value_range (lhs, &new_vr))
6567 {
6568 *output_p = lhs;
6569
6570 if (dump_file && (dump_flags & TDF_DETAILS))
6571 {
6572 fprintf (dump_file, "Found new range for ");
6573 print_generic_expr (dump_file, lhs, 0);
6574 fprintf (dump_file, ": ");
6575 dump_value_range (dump_file, &new_vr);
6576 fprintf (dump_file, "\n\n");
6577 }
6578
6579 if (new_vr.type == VR_VARYING)
6580 return SSA_PROP_VARYING;
6581
6582 return SSA_PROP_INTERESTING;
6583 }
6584
6585 return SSA_PROP_NOT_INTERESTING;
6586 }
6587
6588 /* Every other statement produces no useful ranges. */
6589 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6590 set_value_range_to_varying (get_value_range (def));
6591
6592 return SSA_PROP_VARYING;
6593 }
6594
6595 /* Helper that gets the value range of the SSA_NAME with version I
6596 or a symbolic range containing the SSA_NAME only if the value range
6597 is varying or undefined. */
6598
6599 static inline value_range_t
6600 get_vr_for_comparison (int i)
6601 {
6602 value_range_t vr = *get_value_range (ssa_name (i));
6603
6604 /* If name N_i does not have a valid range, use N_i as its own
6605 range. This allows us to compare against names that may
6606 have N_i in their ranges. */
6607 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6608 {
6609 vr.type = VR_RANGE;
6610 vr.min = ssa_name (i);
6611 vr.max = ssa_name (i);
6612 }
6613
6614 return vr;
6615 }
6616
6617 /* Compare all the value ranges for names equivalent to VAR with VAL
6618 using comparison code COMP. Return the same value returned by
6619 compare_range_with_value, including the setting of
6620 *STRICT_OVERFLOW_P. */
6621
6622 static tree
6623 compare_name_with_value (enum tree_code comp, tree var, tree val,
6624 bool *strict_overflow_p)
6625 {
6626 bitmap_iterator bi;
6627 unsigned i;
6628 bitmap e;
6629 tree retval, t;
6630 int used_strict_overflow;
6631 bool sop;
6632 value_range_t equiv_vr;
6633
6634 /* Get the set of equivalences for VAR. */
6635 e = get_value_range (var)->equiv;
6636
6637 /* Start at -1. Set it to 0 if we do a comparison without relying
6638 on overflow, or 1 if all comparisons rely on overflow. */
6639 used_strict_overflow = -1;
6640
6641 /* Compare vars' value range with val. */
6642 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6643 sop = false;
6644 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6645 if (retval)
6646 used_strict_overflow = sop ? 1 : 0;
6647
6648 /* If the equiv set is empty we have done all work we need to do. */
6649 if (e == NULL)
6650 {
6651 if (retval
6652 && used_strict_overflow > 0)
6653 *strict_overflow_p = true;
6654 return retval;
6655 }
6656
6657 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6658 {
6659 equiv_vr = get_vr_for_comparison (i);
6660 sop = false;
6661 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6662 if (t)
6663 {
6664 /* If we get different answers from different members
6665 of the equivalence set this check must be in a dead
6666 code region. Folding it to a trap representation
6667 would be correct here. For now just return don't-know. */
6668 if (retval != NULL
6669 && t != retval)
6670 {
6671 retval = NULL_TREE;
6672 break;
6673 }
6674 retval = t;
6675
6676 if (!sop)
6677 used_strict_overflow = 0;
6678 else if (used_strict_overflow < 0)
6679 used_strict_overflow = 1;
6680 }
6681 }
6682
6683 if (retval
6684 && used_strict_overflow > 0)
6685 *strict_overflow_p = true;
6686
6687 return retval;
6688 }
6689
6690
6691 /* Given a comparison code COMP and names N1 and N2, compare all the
6692 ranges equivalent to N1 against all the ranges equivalent to N2
6693 to determine the value of N1 COMP N2. Return the same value
6694 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6695 whether we relied on an overflow infinity in the comparison. */
6696
6697
6698 static tree
6699 compare_names (enum tree_code comp, tree n1, tree n2,
6700 bool *strict_overflow_p)
6701 {
6702 tree t, retval;
6703 bitmap e1, e2;
6704 bitmap_iterator bi1, bi2;
6705 unsigned i1, i2;
6706 int used_strict_overflow;
6707 static bitmap_obstack *s_obstack = NULL;
6708 static bitmap s_e1 = NULL, s_e2 = NULL;
6709
6710 /* Compare the ranges of every name equivalent to N1 against the
6711 ranges of every name equivalent to N2. */
6712 e1 = get_value_range (n1)->equiv;
6713 e2 = get_value_range (n2)->equiv;
6714
6715 /* Use the fake bitmaps if e1 or e2 are not available. */
6716 if (s_obstack == NULL)
6717 {
6718 s_obstack = XNEW (bitmap_obstack);
6719 bitmap_obstack_initialize (s_obstack);
6720 s_e1 = BITMAP_ALLOC (s_obstack);
6721 s_e2 = BITMAP_ALLOC (s_obstack);
6722 }
6723 if (e1 == NULL)
6724 e1 = s_e1;
6725 if (e2 == NULL)
6726 e2 = s_e2;
6727
6728 /* Add N1 and N2 to their own set of equivalences to avoid
6729 duplicating the body of the loop just to check N1 and N2
6730 ranges. */
6731 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6732 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6733
6734 /* If the equivalence sets have a common intersection, then the two
6735 names can be compared without checking their ranges. */
6736 if (bitmap_intersect_p (e1, e2))
6737 {
6738 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6739 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6740
6741 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6742 ? boolean_true_node
6743 : boolean_false_node;
6744 }
6745
6746 /* Start at -1. Set it to 0 if we do a comparison without relying
6747 on overflow, or 1 if all comparisons rely on overflow. */
6748 used_strict_overflow = -1;
6749
6750 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6751 N2 to their own set of equivalences to avoid duplicating the body
6752 of the loop just to check N1 and N2 ranges. */
6753 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6754 {
6755 value_range_t vr1 = get_vr_for_comparison (i1);
6756
6757 t = retval = NULL_TREE;
6758 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6759 {
6760 bool sop = false;
6761
6762 value_range_t vr2 = get_vr_for_comparison (i2);
6763
6764 t = compare_ranges (comp, &vr1, &vr2, &sop);
6765 if (t)
6766 {
6767 /* If we get different answers from different members
6768 of the equivalence set this check must be in a dead
6769 code region. Folding it to a trap representation
6770 would be correct here. For now just return don't-know. */
6771 if (retval != NULL
6772 && t != retval)
6773 {
6774 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6775 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6776 return NULL_TREE;
6777 }
6778 retval = t;
6779
6780 if (!sop)
6781 used_strict_overflow = 0;
6782 else if (used_strict_overflow < 0)
6783 used_strict_overflow = 1;
6784 }
6785 }
6786
6787 if (retval)
6788 {
6789 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6790 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6791 if (used_strict_overflow > 0)
6792 *strict_overflow_p = true;
6793 return retval;
6794 }
6795 }
6796
6797 /* None of the equivalent ranges are useful in computing this
6798 comparison. */
6799 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6800 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6801 return NULL_TREE;
6802 }
6803
6804 /* Helper function for vrp_evaluate_conditional_warnv. */
6805
6806 static tree
6807 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6808 tree op0, tree op1,
6809 bool * strict_overflow_p)
6810 {
6811 value_range_t *vr0, *vr1;
6812
6813 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6814 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6815
6816 if (vr0 && vr1)
6817 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6818 else if (vr0 && vr1 == NULL)
6819 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6820 else if (vr0 == NULL && vr1)
6821 return (compare_range_with_value
6822 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6823 return NULL;
6824 }
6825
6826 /* Helper function for vrp_evaluate_conditional_warnv. */
6827
6828 static tree
6829 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6830 tree op1, bool use_equiv_p,
6831 bool *strict_overflow_p, bool *only_ranges)
6832 {
6833 tree ret;
6834 if (only_ranges)
6835 *only_ranges = true;
6836
6837 /* We only deal with integral and pointer types. */
6838 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6839 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6840 return NULL_TREE;
6841
6842 if (use_equiv_p)
6843 {
6844 if (only_ranges
6845 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6846 (code, op0, op1, strict_overflow_p)))
6847 return ret;
6848 *only_ranges = false;
6849 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6850 return compare_names (code, op0, op1, strict_overflow_p);
6851 else if (TREE_CODE (op0) == SSA_NAME)
6852 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6853 else if (TREE_CODE (op1) == SSA_NAME)
6854 return (compare_name_with_value
6855 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6856 }
6857 else
6858 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6859 strict_overflow_p);
6860 return NULL_TREE;
6861 }
6862
6863 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6864 information. Return NULL if the conditional can not be evaluated.
6865 The ranges of all the names equivalent with the operands in COND
6866 will be used when trying to compute the value. If the result is
6867 based on undefined signed overflow, issue a warning if
6868 appropriate. */
6869
6870 static tree
6871 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6872 {
6873 bool sop;
6874 tree ret;
6875 bool only_ranges;
6876
6877 /* Some passes and foldings leak constants with overflow flag set
6878 into the IL. Avoid doing wrong things with these and bail out. */
6879 if ((TREE_CODE (op0) == INTEGER_CST
6880 && TREE_OVERFLOW (op0))
6881 || (TREE_CODE (op1) == INTEGER_CST
6882 && TREE_OVERFLOW (op1)))
6883 return NULL_TREE;
6884
6885 sop = false;
6886 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6887 &only_ranges);
6888
6889 if (ret && sop)
6890 {
6891 enum warn_strict_overflow_code wc;
6892 const char* warnmsg;
6893
6894 if (is_gimple_min_invariant (ret))
6895 {
6896 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6897 warnmsg = G_("assuming signed overflow does not occur when "
6898 "simplifying conditional to constant");
6899 }
6900 else
6901 {
6902 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6903 warnmsg = G_("assuming signed overflow does not occur when "
6904 "simplifying conditional");
6905 }
6906
6907 if (issue_strict_overflow_warning (wc))
6908 {
6909 location_t location;
6910
6911 if (!gimple_has_location (stmt))
6912 location = input_location;
6913 else
6914 location = gimple_location (stmt);
6915 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6916 }
6917 }
6918
6919 if (warn_type_limits
6920 && ret && only_ranges
6921 && TREE_CODE_CLASS (code) == tcc_comparison
6922 && TREE_CODE (op0) == SSA_NAME)
6923 {
6924 /* If the comparison is being folded and the operand on the LHS
6925 is being compared against a constant value that is outside of
6926 the natural range of OP0's type, then the predicate will
6927 always fold regardless of the value of OP0. If -Wtype-limits
6928 was specified, emit a warning. */
6929 tree type = TREE_TYPE (op0);
6930 value_range_t *vr0 = get_value_range (op0);
6931
6932 if (vr0->type != VR_VARYING
6933 && INTEGRAL_TYPE_P (type)
6934 && vrp_val_is_min (vr0->min)
6935 && vrp_val_is_max (vr0->max)
6936 && is_gimple_min_invariant (op1))
6937 {
6938 location_t location;
6939
6940 if (!gimple_has_location (stmt))
6941 location = input_location;
6942 else
6943 location = gimple_location (stmt);
6944
6945 warning_at (location, OPT_Wtype_limits,
6946 integer_zerop (ret)
6947 ? G_("comparison always false "
6948 "due to limited range of data type")
6949 : G_("comparison always true "
6950 "due to limited range of data type"));
6951 }
6952 }
6953
6954 return ret;
6955 }
6956
6957
6958 /* Visit conditional statement STMT. If we can determine which edge
6959 will be taken out of STMT's basic block, record it in
6960 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6961 SSA_PROP_VARYING. */
6962
6963 static enum ssa_prop_result
6964 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6965 {
6966 tree val;
6967 bool sop;
6968
6969 *taken_edge_p = NULL;
6970
6971 if (dump_file && (dump_flags & TDF_DETAILS))
6972 {
6973 tree use;
6974 ssa_op_iter i;
6975
6976 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6977 print_gimple_stmt (dump_file, stmt, 0, 0);
6978 fprintf (dump_file, "\nWith known ranges\n");
6979
6980 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6981 {
6982 fprintf (dump_file, "\t");
6983 print_generic_expr (dump_file, use, 0);
6984 fprintf (dump_file, ": ");
6985 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6986 }
6987
6988 fprintf (dump_file, "\n");
6989 }
6990
6991 /* Compute the value of the predicate COND by checking the known
6992 ranges of each of its operands.
6993
6994 Note that we cannot evaluate all the equivalent ranges here
6995 because those ranges may not yet be final and with the current
6996 propagation strategy, we cannot determine when the value ranges
6997 of the names in the equivalence set have changed.
6998
6999 For instance, given the following code fragment
7000
7001 i_5 = PHI <8, i_13>
7002 ...
7003 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7004 if (i_14 == 1)
7005 ...
7006
7007 Assume that on the first visit to i_14, i_5 has the temporary
7008 range [8, 8] because the second argument to the PHI function is
7009 not yet executable. We derive the range ~[0, 0] for i_14 and the
7010 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7011 the first time, since i_14 is equivalent to the range [8, 8], we
7012 determine that the predicate is always false.
7013
7014 On the next round of propagation, i_13 is determined to be
7015 VARYING, which causes i_5 to drop down to VARYING. So, another
7016 visit to i_14 is scheduled. In this second visit, we compute the
7017 exact same range and equivalence set for i_14, namely ~[0, 0] and
7018 { i_5 }. But we did not have the previous range for i_5
7019 registered, so vrp_visit_assignment thinks that the range for
7020 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7021 is not visited again, which stops propagation from visiting
7022 statements in the THEN clause of that if().
7023
7024 To properly fix this we would need to keep the previous range
7025 value for the names in the equivalence set. This way we would've
7026 discovered that from one visit to the other i_5 changed from
7027 range [8, 8] to VR_VARYING.
7028
7029 However, fixing this apparent limitation may not be worth the
7030 additional checking. Testing on several code bases (GCC, DLV,
7031 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7032 4 more predicates folded in SPEC. */
7033 sop = false;
7034
7035 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7036 gimple_cond_lhs (stmt),
7037 gimple_cond_rhs (stmt),
7038 false, &sop, NULL);
7039 if (val)
7040 {
7041 if (!sop)
7042 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7043 else
7044 {
7045 if (dump_file && (dump_flags & TDF_DETAILS))
7046 fprintf (dump_file,
7047 "\nIgnoring predicate evaluation because "
7048 "it assumes that signed overflow is undefined");
7049 val = NULL_TREE;
7050 }
7051 }
7052
7053 if (dump_file && (dump_flags & TDF_DETAILS))
7054 {
7055 fprintf (dump_file, "\nPredicate evaluates to: ");
7056 if (val == NULL_TREE)
7057 fprintf (dump_file, "DON'T KNOW\n");
7058 else
7059 print_generic_stmt (dump_file, val, 0);
7060 }
7061
7062 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7063 }
7064
7065 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7066 that includes the value VAL. The search is restricted to the range
7067 [START_IDX, n - 1] where n is the size of VEC.
7068
7069 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7070 returned.
7071
7072 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7073 it is placed in IDX and false is returned.
7074
7075 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7076 returned. */
7077
7078 static bool
7079 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
7080 {
7081 size_t n = gimple_switch_num_labels (stmt);
7082 size_t low, high;
7083
7084 /* Find case label for minimum of the value range or the next one.
7085 At each iteration we are searching in [low, high - 1]. */
7086
7087 for (low = start_idx, high = n; high != low; )
7088 {
7089 tree t;
7090 int cmp;
7091 /* Note that i != high, so we never ask for n. */
7092 size_t i = (high + low) / 2;
7093 t = gimple_switch_label (stmt, i);
7094
7095 /* Cache the result of comparing CASE_LOW and val. */
7096 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7097
7098 if (cmp == 0)
7099 {
7100 /* Ranges cannot be empty. */
7101 *idx = i;
7102 return true;
7103 }
7104 else if (cmp > 0)
7105 high = i;
7106 else
7107 {
7108 low = i + 1;
7109 if (CASE_HIGH (t) != NULL
7110 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7111 {
7112 *idx = i;
7113 return true;
7114 }
7115 }
7116 }
7117
7118 *idx = high;
7119 return false;
7120 }
7121
7122 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7123 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7124 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7125 then MAX_IDX < MIN_IDX.
7126 Returns true if the default label is not needed. */
7127
7128 static bool
7129 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
7130 size_t *max_idx)
7131 {
7132 size_t i, j;
7133 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7134 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7135
7136 if (i == j
7137 && min_take_default
7138 && max_take_default)
7139 {
7140 /* Only the default case label reached.
7141 Return an empty range. */
7142 *min_idx = 1;
7143 *max_idx = 0;
7144 return false;
7145 }
7146 else
7147 {
7148 bool take_default = min_take_default || max_take_default;
7149 tree low, high;
7150 size_t k;
7151
7152 if (max_take_default)
7153 j--;
7154
7155 /* If the case label range is continuous, we do not need
7156 the default case label. Verify that. */
7157 high = CASE_LOW (gimple_switch_label (stmt, i));
7158 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7159 high = CASE_HIGH (gimple_switch_label (stmt, i));
7160 for (k = i + 1; k <= j; ++k)
7161 {
7162 low = CASE_LOW (gimple_switch_label (stmt, k));
7163 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7164 {
7165 take_default = true;
7166 break;
7167 }
7168 high = low;
7169 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7170 high = CASE_HIGH (gimple_switch_label (stmt, k));
7171 }
7172
7173 *min_idx = i;
7174 *max_idx = j;
7175 return !take_default;
7176 }
7177 }
7178
7179 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7180 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7181 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7182 Returns true if the default label is not needed. */
7183
7184 static bool
7185 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
7186 size_t *max_idx1, size_t *min_idx2,
7187 size_t *max_idx2)
7188 {
7189 size_t i, j, k, l;
7190 unsigned int n = gimple_switch_num_labels (stmt);
7191 bool take_default;
7192 tree case_low, case_high;
7193 tree min = vr->min, max = vr->max;
7194
7195 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7196
7197 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7198
7199 /* Set second range to emtpy. */
7200 *min_idx2 = 1;
7201 *max_idx2 = 0;
7202
7203 if (vr->type == VR_RANGE)
7204 {
7205 *min_idx1 = i;
7206 *max_idx1 = j;
7207 return !take_default;
7208 }
7209
7210 /* Set first range to all case labels. */
7211 *min_idx1 = 1;
7212 *max_idx1 = n - 1;
7213
7214 if (i > j)
7215 return false;
7216
7217 /* Make sure all the values of case labels [i , j] are contained in
7218 range [MIN, MAX]. */
7219 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7220 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7221 if (tree_int_cst_compare (case_low, min) < 0)
7222 i += 1;
7223 if (case_high != NULL_TREE
7224 && tree_int_cst_compare (max, case_high) < 0)
7225 j -= 1;
7226
7227 if (i > j)
7228 return false;
7229
7230 /* If the range spans case labels [i, j], the corresponding anti-range spans
7231 the labels [1, i - 1] and [j + 1, n - 1]. */
7232 k = j + 1;
7233 l = n - 1;
7234 if (k > l)
7235 {
7236 k = 1;
7237 l = 0;
7238 }
7239
7240 j = i - 1;
7241 i = 1;
7242 if (i > j)
7243 {
7244 i = k;
7245 j = l;
7246 k = 1;
7247 l = 0;
7248 }
7249
7250 *min_idx1 = i;
7251 *max_idx1 = j;
7252 *min_idx2 = k;
7253 *max_idx2 = l;
7254 return false;
7255 }
7256
7257 /* Visit switch statement STMT. If we can determine which edge
7258 will be taken out of STMT's basic block, record it in
7259 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7260 SSA_PROP_VARYING. */
7261
7262 static enum ssa_prop_result
7263 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
7264 {
7265 tree op, val;
7266 value_range_t *vr;
7267 size_t i = 0, j = 0, k, l;
7268 bool take_default;
7269
7270 *taken_edge_p = NULL;
7271 op = gimple_switch_index (stmt);
7272 if (TREE_CODE (op) != SSA_NAME)
7273 return SSA_PROP_VARYING;
7274
7275 vr = get_value_range (op);
7276 if (dump_file && (dump_flags & TDF_DETAILS))
7277 {
7278 fprintf (dump_file, "\nVisiting switch expression with operand ");
7279 print_generic_expr (dump_file, op, 0);
7280 fprintf (dump_file, " with known range ");
7281 dump_value_range (dump_file, vr);
7282 fprintf (dump_file, "\n");
7283 }
7284
7285 if ((vr->type != VR_RANGE
7286 && vr->type != VR_ANTI_RANGE)
7287 || symbolic_range_p (vr))
7288 return SSA_PROP_VARYING;
7289
7290 /* Find the single edge that is taken from the switch expression. */
7291 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7292
7293 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7294 label */
7295 if (j < i)
7296 {
7297 gcc_assert (take_default);
7298 val = gimple_switch_default_label (stmt);
7299 }
7300 else
7301 {
7302 /* Check if labels with index i to j and maybe the default label
7303 are all reaching the same label. */
7304
7305 val = gimple_switch_label (stmt, i);
7306 if (take_default
7307 && CASE_LABEL (gimple_switch_default_label (stmt))
7308 != CASE_LABEL (val))
7309 {
7310 if (dump_file && (dump_flags & TDF_DETAILS))
7311 fprintf (dump_file, " not a single destination for this "
7312 "range\n");
7313 return SSA_PROP_VARYING;
7314 }
7315 for (++i; i <= j; ++i)
7316 {
7317 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7318 {
7319 if (dump_file && (dump_flags & TDF_DETAILS))
7320 fprintf (dump_file, " not a single destination for this "
7321 "range\n");
7322 return SSA_PROP_VARYING;
7323 }
7324 }
7325 for (; k <= l; ++k)
7326 {
7327 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7328 {
7329 if (dump_file && (dump_flags & TDF_DETAILS))
7330 fprintf (dump_file, " not a single destination for this "
7331 "range\n");
7332 return SSA_PROP_VARYING;
7333 }
7334 }
7335 }
7336
7337 *taken_edge_p = find_edge (gimple_bb (stmt),
7338 label_to_block (CASE_LABEL (val)));
7339
7340 if (dump_file && (dump_flags & TDF_DETAILS))
7341 {
7342 fprintf (dump_file, " will take edge to ");
7343 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7344 }
7345
7346 return SSA_PROP_INTERESTING;
7347 }
7348
7349
7350 /* Evaluate statement STMT. If the statement produces a useful range,
7351 return SSA_PROP_INTERESTING and record the SSA name with the
7352 interesting range into *OUTPUT_P.
7353
7354 If STMT is a conditional branch and we can determine its truth
7355 value, the taken edge is recorded in *TAKEN_EDGE_P.
7356
7357 If STMT produces a varying value, return SSA_PROP_VARYING. */
7358
7359 static enum ssa_prop_result
7360 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7361 {
7362 tree def;
7363 ssa_op_iter iter;
7364
7365 if (dump_file && (dump_flags & TDF_DETAILS))
7366 {
7367 fprintf (dump_file, "\nVisiting statement:\n");
7368 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7369 fprintf (dump_file, "\n");
7370 }
7371
7372 if (!stmt_interesting_for_vrp (stmt))
7373 gcc_assert (stmt_ends_bb_p (stmt));
7374 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7375 {
7376 /* In general, assignments with virtual operands are not useful
7377 for deriving ranges, with the obvious exception of calls to
7378 builtin functions. */
7379 if ((is_gimple_call (stmt)
7380 && gimple_call_fndecl (stmt) != NULL_TREE
7381 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
7382 || !gimple_vuse (stmt))
7383 return vrp_visit_assignment_or_call (stmt, output_p);
7384 }
7385 else if (gimple_code (stmt) == GIMPLE_COND)
7386 return vrp_visit_cond_stmt (stmt, taken_edge_p);
7387 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7388 return vrp_visit_switch_stmt (stmt, taken_edge_p);
7389
7390 /* All other statements produce nothing of interest for VRP, so mark
7391 their outputs varying and prevent further simulation. */
7392 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7393 set_value_range_to_varying (get_value_range (def));
7394
7395 return SSA_PROP_VARYING;
7396 }
7397
7398 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7399 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7400 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7401 possible such range. The resulting range is not canonicalized. */
7402
7403 static void
7404 union_ranges (enum value_range_type *vr0type,
7405 tree *vr0min, tree *vr0max,
7406 enum value_range_type vr1type,
7407 tree vr1min, tree vr1max)
7408 {
7409 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7410 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7411
7412 /* [] is vr0, () is vr1 in the following classification comments. */
7413 if (mineq && maxeq)
7414 {
7415 /* [( )] */
7416 if (*vr0type == vr1type)
7417 /* Nothing to do for equal ranges. */
7418 ;
7419 else if ((*vr0type == VR_RANGE
7420 && vr1type == VR_ANTI_RANGE)
7421 || (*vr0type == VR_ANTI_RANGE
7422 && vr1type == VR_RANGE))
7423 {
7424 /* For anti-range with range union the result is varying. */
7425 goto give_up;
7426 }
7427 else
7428 gcc_unreachable ();
7429 }
7430 else if (operand_less_p (*vr0max, vr1min) == 1
7431 || operand_less_p (vr1max, *vr0min) == 1)
7432 {
7433 /* [ ] ( ) or ( ) [ ]
7434 If the ranges have an empty intersection, result of the union
7435 operation is the anti-range or if both are anti-ranges
7436 it covers all. */
7437 if (*vr0type == VR_ANTI_RANGE
7438 && vr1type == VR_ANTI_RANGE)
7439 goto give_up;
7440 else if (*vr0type == VR_ANTI_RANGE
7441 && vr1type == VR_RANGE)
7442 ;
7443 else if (*vr0type == VR_RANGE
7444 && vr1type == VR_ANTI_RANGE)
7445 {
7446 *vr0type = vr1type;
7447 *vr0min = vr1min;
7448 *vr0max = vr1max;
7449 }
7450 else if (*vr0type == VR_RANGE
7451 && vr1type == VR_RANGE)
7452 {
7453 /* The result is the convex hull of both ranges. */
7454 if (operand_less_p (*vr0max, vr1min) == 1)
7455 {
7456 /* If the result can be an anti-range, create one. */
7457 if (TREE_CODE (*vr0max) == INTEGER_CST
7458 && TREE_CODE (vr1min) == INTEGER_CST
7459 && vrp_val_is_min (*vr0min)
7460 && vrp_val_is_max (vr1max))
7461 {
7462 tree min = int_const_binop (PLUS_EXPR,
7463 *vr0max,
7464 build_int_cst (TREE_TYPE (*vr0max), 1));
7465 tree max = int_const_binop (MINUS_EXPR,
7466 vr1min,
7467 build_int_cst (TREE_TYPE (vr1min), 1));
7468 if (!operand_less_p (max, min))
7469 {
7470 *vr0type = VR_ANTI_RANGE;
7471 *vr0min = min;
7472 *vr0max = max;
7473 }
7474 else
7475 *vr0max = vr1max;
7476 }
7477 else
7478 *vr0max = vr1max;
7479 }
7480 else
7481 {
7482 /* If the result can be an anti-range, create one. */
7483 if (TREE_CODE (vr1max) == INTEGER_CST
7484 && TREE_CODE (*vr0min) == INTEGER_CST
7485 && vrp_val_is_min (vr1min)
7486 && vrp_val_is_max (*vr0max))
7487 {
7488 tree min = int_const_binop (PLUS_EXPR,
7489 vr1max,
7490 build_int_cst (TREE_TYPE (vr1max), 1));
7491 tree max = int_const_binop (MINUS_EXPR,
7492 *vr0min,
7493 build_int_cst (TREE_TYPE (*vr0min), 1));
7494 if (!operand_less_p (max, min))
7495 {
7496 *vr0type = VR_ANTI_RANGE;
7497 *vr0min = min;
7498 *vr0max = max;
7499 }
7500 else
7501 *vr0min = vr1min;
7502 }
7503 else
7504 *vr0min = vr1min;
7505 }
7506 }
7507 else
7508 gcc_unreachable ();
7509 }
7510 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7511 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7512 {
7513 /* [ ( ) ] or [( ) ] or [ ( )] */
7514 if (*vr0type == VR_RANGE
7515 && vr1type == VR_RANGE)
7516 ;
7517 else if (*vr0type == VR_ANTI_RANGE
7518 && vr1type == VR_ANTI_RANGE)
7519 {
7520 *vr0type = vr1type;
7521 *vr0min = vr1min;
7522 *vr0max = vr1max;
7523 }
7524 else if (*vr0type == VR_ANTI_RANGE
7525 && vr1type == VR_RANGE)
7526 {
7527 /* Arbitrarily choose the right or left gap. */
7528 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7529 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7530 build_int_cst (TREE_TYPE (vr1min), 1));
7531 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7532 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7533 build_int_cst (TREE_TYPE (vr1max), 1));
7534 else
7535 goto give_up;
7536 }
7537 else if (*vr0type == VR_RANGE
7538 && vr1type == VR_ANTI_RANGE)
7539 /* The result covers everything. */
7540 goto give_up;
7541 else
7542 gcc_unreachable ();
7543 }
7544 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7545 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7546 {
7547 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7548 if (*vr0type == VR_RANGE
7549 && vr1type == VR_RANGE)
7550 {
7551 *vr0type = vr1type;
7552 *vr0min = vr1min;
7553 *vr0max = vr1max;
7554 }
7555 else if (*vr0type == VR_ANTI_RANGE
7556 && vr1type == VR_ANTI_RANGE)
7557 ;
7558 else if (*vr0type == VR_RANGE
7559 && vr1type == VR_ANTI_RANGE)
7560 {
7561 *vr0type = VR_ANTI_RANGE;
7562 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7563 {
7564 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7565 build_int_cst (TREE_TYPE (*vr0min), 1));
7566 *vr0min = vr1min;
7567 }
7568 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7569 {
7570 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7571 build_int_cst (TREE_TYPE (*vr0max), 1));
7572 *vr0max = vr1max;
7573 }
7574 else
7575 goto give_up;
7576 }
7577 else if (*vr0type == VR_ANTI_RANGE
7578 && vr1type == VR_RANGE)
7579 /* The result covers everything. */
7580 goto give_up;
7581 else
7582 gcc_unreachable ();
7583 }
7584 else if ((operand_less_p (vr1min, *vr0max) == 1
7585 || operand_equal_p (vr1min, *vr0max, 0))
7586 && operand_less_p (*vr0min, vr1min) == 1)
7587 {
7588 /* [ ( ] ) or [ ]( ) */
7589 if (*vr0type == VR_RANGE
7590 && vr1type == VR_RANGE)
7591 *vr0max = vr1max;
7592 else if (*vr0type == VR_ANTI_RANGE
7593 && vr1type == VR_ANTI_RANGE)
7594 *vr0min = vr1min;
7595 else if (*vr0type == VR_ANTI_RANGE
7596 && vr1type == VR_RANGE)
7597 {
7598 if (TREE_CODE (vr1min) == INTEGER_CST)
7599 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7600 build_int_cst (TREE_TYPE (vr1min), 1));
7601 else
7602 goto give_up;
7603 }
7604 else if (*vr0type == VR_RANGE
7605 && vr1type == VR_ANTI_RANGE)
7606 {
7607 if (TREE_CODE (*vr0max) == INTEGER_CST)
7608 {
7609 *vr0type = vr1type;
7610 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7611 build_int_cst (TREE_TYPE (*vr0max), 1));
7612 *vr0max = vr1max;
7613 }
7614 else
7615 goto give_up;
7616 }
7617 else
7618 gcc_unreachable ();
7619 }
7620 else if ((operand_less_p (*vr0min, vr1max) == 1
7621 || operand_equal_p (*vr0min, vr1max, 0))
7622 && operand_less_p (vr1min, *vr0min) == 1)
7623 {
7624 /* ( [ ) ] or ( )[ ] */
7625 if (*vr0type == VR_RANGE
7626 && vr1type == VR_RANGE)
7627 *vr0min = vr1min;
7628 else if (*vr0type == VR_ANTI_RANGE
7629 && vr1type == VR_ANTI_RANGE)
7630 *vr0max = vr1max;
7631 else if (*vr0type == VR_ANTI_RANGE
7632 && vr1type == VR_RANGE)
7633 {
7634 if (TREE_CODE (vr1max) == INTEGER_CST)
7635 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7636 build_int_cst (TREE_TYPE (vr1max), 1));
7637 else
7638 goto give_up;
7639 }
7640 else if (*vr0type == VR_RANGE
7641 && vr1type == VR_ANTI_RANGE)
7642 {
7643 if (TREE_CODE (*vr0min) == INTEGER_CST)
7644 {
7645 *vr0type = vr1type;
7646 *vr0min = vr1min;
7647 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7648 build_int_cst (TREE_TYPE (*vr0min), 1));
7649 }
7650 else
7651 goto give_up;
7652 }
7653 else
7654 gcc_unreachable ();
7655 }
7656 else
7657 goto give_up;
7658
7659 return;
7660
7661 give_up:
7662 *vr0type = VR_VARYING;
7663 *vr0min = NULL_TREE;
7664 *vr0max = NULL_TREE;
7665 }
7666
7667 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7668 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7669 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7670 possible such range. The resulting range is not canonicalized. */
7671
7672 static void
7673 intersect_ranges (enum value_range_type *vr0type,
7674 tree *vr0min, tree *vr0max,
7675 enum value_range_type vr1type,
7676 tree vr1min, tree vr1max)
7677 {
7678 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7679 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7680
7681 /* [] is vr0, () is vr1 in the following classification comments. */
7682 if (mineq && maxeq)
7683 {
7684 /* [( )] */
7685 if (*vr0type == vr1type)
7686 /* Nothing to do for equal ranges. */
7687 ;
7688 else if ((*vr0type == VR_RANGE
7689 && vr1type == VR_ANTI_RANGE)
7690 || (*vr0type == VR_ANTI_RANGE
7691 && vr1type == VR_RANGE))
7692 {
7693 /* For anti-range with range intersection the result is empty. */
7694 *vr0type = VR_UNDEFINED;
7695 *vr0min = NULL_TREE;
7696 *vr0max = NULL_TREE;
7697 }
7698 else
7699 gcc_unreachable ();
7700 }
7701 else if (operand_less_p (*vr0max, vr1min) == 1
7702 || operand_less_p (vr1max, *vr0min) == 1)
7703 {
7704 /* [ ] ( ) or ( ) [ ]
7705 If the ranges have an empty intersection, the result of the
7706 intersect operation is the range for intersecting an
7707 anti-range with a range or empty when intersecting two ranges. */
7708 if (*vr0type == VR_RANGE
7709 && vr1type == VR_ANTI_RANGE)
7710 ;
7711 else if (*vr0type == VR_ANTI_RANGE
7712 && vr1type == VR_RANGE)
7713 {
7714 *vr0type = vr1type;
7715 *vr0min = vr1min;
7716 *vr0max = vr1max;
7717 }
7718 else if (*vr0type == VR_RANGE
7719 && vr1type == VR_RANGE)
7720 {
7721 *vr0type = VR_UNDEFINED;
7722 *vr0min = NULL_TREE;
7723 *vr0max = NULL_TREE;
7724 }
7725 else if (*vr0type == VR_ANTI_RANGE
7726 && vr1type == VR_ANTI_RANGE)
7727 {
7728 /* If the anti-ranges are adjacent to each other merge them. */
7729 if (TREE_CODE (*vr0max) == INTEGER_CST
7730 && TREE_CODE (vr1min) == INTEGER_CST
7731 && operand_less_p (*vr0max, vr1min) == 1
7732 && integer_onep (int_const_binop (MINUS_EXPR,
7733 vr1min, *vr0max)))
7734 *vr0max = vr1max;
7735 else if (TREE_CODE (vr1max) == INTEGER_CST
7736 && TREE_CODE (*vr0min) == INTEGER_CST
7737 && operand_less_p (vr1max, *vr0min) == 1
7738 && integer_onep (int_const_binop (MINUS_EXPR,
7739 *vr0min, vr1max)))
7740 *vr0min = vr1min;
7741 /* Else arbitrarily take VR0. */
7742 }
7743 }
7744 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7745 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7746 {
7747 /* [ ( ) ] or [( ) ] or [ ( )] */
7748 if (*vr0type == VR_RANGE
7749 && vr1type == VR_RANGE)
7750 {
7751 /* If both are ranges the result is the inner one. */
7752 *vr0type = vr1type;
7753 *vr0min = vr1min;
7754 *vr0max = vr1max;
7755 }
7756 else if (*vr0type == VR_RANGE
7757 && vr1type == VR_ANTI_RANGE)
7758 {
7759 /* Choose the right gap if the left one is empty. */
7760 if (mineq)
7761 {
7762 if (TREE_CODE (vr1max) == INTEGER_CST)
7763 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7764 build_int_cst (TREE_TYPE (vr1max), 1));
7765 else
7766 *vr0min = vr1max;
7767 }
7768 /* Choose the left gap if the right one is empty. */
7769 else if (maxeq)
7770 {
7771 if (TREE_CODE (vr1min) == INTEGER_CST)
7772 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7773 build_int_cst (TREE_TYPE (vr1min), 1));
7774 else
7775 *vr0max = vr1min;
7776 }
7777 /* Choose the anti-range if the range is effectively varying. */
7778 else if (vrp_val_is_min (*vr0min)
7779 && vrp_val_is_max (*vr0max))
7780 {
7781 *vr0type = vr1type;
7782 *vr0min = vr1min;
7783 *vr0max = vr1max;
7784 }
7785 /* Else choose the range. */
7786 }
7787 else if (*vr0type == VR_ANTI_RANGE
7788 && vr1type == VR_ANTI_RANGE)
7789 /* If both are anti-ranges the result is the outer one. */
7790 ;
7791 else if (*vr0type == VR_ANTI_RANGE
7792 && vr1type == VR_RANGE)
7793 {
7794 /* The intersection is empty. */
7795 *vr0type = VR_UNDEFINED;
7796 *vr0min = NULL_TREE;
7797 *vr0max = NULL_TREE;
7798 }
7799 else
7800 gcc_unreachable ();
7801 }
7802 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7803 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7804 {
7805 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7806 if (*vr0type == VR_RANGE
7807 && vr1type == VR_RANGE)
7808 /* Choose the inner range. */
7809 ;
7810 else if (*vr0type == VR_ANTI_RANGE
7811 && vr1type == VR_RANGE)
7812 {
7813 /* Choose the right gap if the left is empty. */
7814 if (mineq)
7815 {
7816 *vr0type = VR_RANGE;
7817 if (TREE_CODE (*vr0max) == INTEGER_CST)
7818 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7819 build_int_cst (TREE_TYPE (*vr0max), 1));
7820 else
7821 *vr0min = *vr0max;
7822 *vr0max = vr1max;
7823 }
7824 /* Choose the left gap if the right is empty. */
7825 else if (maxeq)
7826 {
7827 *vr0type = VR_RANGE;
7828 if (TREE_CODE (*vr0min) == INTEGER_CST)
7829 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7830 build_int_cst (TREE_TYPE (*vr0min), 1));
7831 else
7832 *vr0max = *vr0min;
7833 *vr0min = vr1min;
7834 }
7835 /* Choose the anti-range if the range is effectively varying. */
7836 else if (vrp_val_is_min (vr1min)
7837 && vrp_val_is_max (vr1max))
7838 ;
7839 /* Else choose the range. */
7840 else
7841 {
7842 *vr0type = vr1type;
7843 *vr0min = vr1min;
7844 *vr0max = vr1max;
7845 }
7846 }
7847 else if (*vr0type == VR_ANTI_RANGE
7848 && vr1type == VR_ANTI_RANGE)
7849 {
7850 /* If both are anti-ranges the result is the outer one. */
7851 *vr0type = vr1type;
7852 *vr0min = vr1min;
7853 *vr0max = vr1max;
7854 }
7855 else if (vr1type == VR_ANTI_RANGE
7856 && *vr0type == VR_RANGE)
7857 {
7858 /* The intersection is empty. */
7859 *vr0type = VR_UNDEFINED;
7860 *vr0min = NULL_TREE;
7861 *vr0max = NULL_TREE;
7862 }
7863 else
7864 gcc_unreachable ();
7865 }
7866 else if ((operand_less_p (vr1min, *vr0max) == 1
7867 || operand_equal_p (vr1min, *vr0max, 0))
7868 && operand_less_p (*vr0min, vr1min) == 1)
7869 {
7870 /* [ ( ] ) or [ ]( ) */
7871 if (*vr0type == VR_ANTI_RANGE
7872 && vr1type == VR_ANTI_RANGE)
7873 *vr0max = vr1max;
7874 else if (*vr0type == VR_RANGE
7875 && vr1type == VR_RANGE)
7876 *vr0min = vr1min;
7877 else if (*vr0type == VR_RANGE
7878 && vr1type == VR_ANTI_RANGE)
7879 {
7880 if (TREE_CODE (vr1min) == INTEGER_CST)
7881 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7882 build_int_cst (TREE_TYPE (vr1min), 1));
7883 else
7884 *vr0max = vr1min;
7885 }
7886 else if (*vr0type == VR_ANTI_RANGE
7887 && vr1type == VR_RANGE)
7888 {
7889 *vr0type = VR_RANGE;
7890 if (TREE_CODE (*vr0max) == INTEGER_CST)
7891 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7892 build_int_cst (TREE_TYPE (*vr0max), 1));
7893 else
7894 *vr0min = *vr0max;
7895 *vr0max = vr1max;
7896 }
7897 else
7898 gcc_unreachable ();
7899 }
7900 else if ((operand_less_p (*vr0min, vr1max) == 1
7901 || operand_equal_p (*vr0min, vr1max, 0))
7902 && operand_less_p (vr1min, *vr0min) == 1)
7903 {
7904 /* ( [ ) ] or ( )[ ] */
7905 if (*vr0type == VR_ANTI_RANGE
7906 && vr1type == VR_ANTI_RANGE)
7907 *vr0min = vr1min;
7908 else if (*vr0type == VR_RANGE
7909 && vr1type == VR_RANGE)
7910 *vr0max = vr1max;
7911 else if (*vr0type == VR_RANGE
7912 && vr1type == VR_ANTI_RANGE)
7913 {
7914 if (TREE_CODE (vr1max) == INTEGER_CST)
7915 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7916 build_int_cst (TREE_TYPE (vr1max), 1));
7917 else
7918 *vr0min = vr1max;
7919 }
7920 else if (*vr0type == VR_ANTI_RANGE
7921 && vr1type == VR_RANGE)
7922 {
7923 *vr0type = VR_RANGE;
7924 if (TREE_CODE (*vr0min) == INTEGER_CST)
7925 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7926 build_int_cst (TREE_TYPE (*vr0min), 1));
7927 else
7928 *vr0max = *vr0min;
7929 *vr0min = vr1min;
7930 }
7931 else
7932 gcc_unreachable ();
7933 }
7934
7935 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
7936 result for the intersection. That's always a conservative
7937 correct estimate. */
7938
7939 return;
7940 }
7941
7942
7943 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
7944 in *VR0. This may not be the smallest possible such range. */
7945
7946 static void
7947 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
7948 {
7949 value_range_t saved;
7950
7951 /* If either range is VR_VARYING the other one wins. */
7952 if (vr1->type == VR_VARYING)
7953 return;
7954 if (vr0->type == VR_VARYING)
7955 {
7956 copy_value_range (vr0, vr1);
7957 return;
7958 }
7959
7960 /* When either range is VR_UNDEFINED the resulting range is
7961 VR_UNDEFINED, too. */
7962 if (vr0->type == VR_UNDEFINED)
7963 return;
7964 if (vr1->type == VR_UNDEFINED)
7965 {
7966 set_value_range_to_undefined (vr0);
7967 return;
7968 }
7969
7970 /* Save the original vr0 so we can return it as conservative intersection
7971 result when our worker turns things to varying. */
7972 saved = *vr0;
7973 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
7974 vr1->type, vr1->min, vr1->max);
7975 /* Make sure to canonicalize the result though as the inversion of a
7976 VR_RANGE can still be a VR_RANGE. */
7977 set_and_canonicalize_value_range (vr0, vr0->type,
7978 vr0->min, vr0->max, vr0->equiv);
7979 /* If that failed, use the saved original VR0. */
7980 if (vr0->type == VR_VARYING)
7981 {
7982 *vr0 = saved;
7983 return;
7984 }
7985 /* If the result is VR_UNDEFINED there is no need to mess with
7986 the equivalencies. */
7987 if (vr0->type == VR_UNDEFINED)
7988 return;
7989
7990 /* The resulting set of equivalences for range intersection is the union of
7991 the two sets. */
7992 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
7993 bitmap_ior_into (vr0->equiv, vr1->equiv);
7994 else if (vr1->equiv && !vr0->equiv)
7995 bitmap_copy (vr0->equiv, vr1->equiv);
7996 }
7997
7998 static void
7999 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
8000 {
8001 if (dump_file && (dump_flags & TDF_DETAILS))
8002 {
8003 fprintf (dump_file, "Intersecting\n ");
8004 dump_value_range (dump_file, vr0);
8005 fprintf (dump_file, "\nand\n ");
8006 dump_value_range (dump_file, vr1);
8007 fprintf (dump_file, "\n");
8008 }
8009 vrp_intersect_ranges_1 (vr0, vr1);
8010 if (dump_file && (dump_flags & TDF_DETAILS))
8011 {
8012 fprintf (dump_file, "to\n ");
8013 dump_value_range (dump_file, vr0);
8014 fprintf (dump_file, "\n");
8015 }
8016 }
8017
8018 /* Meet operation for value ranges. Given two value ranges VR0 and
8019 VR1, store in VR0 a range that contains both VR0 and VR1. This
8020 may not be the smallest possible such range. */
8021
8022 static void
8023 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
8024 {
8025 value_range_t saved;
8026
8027 if (vr0->type == VR_UNDEFINED)
8028 {
8029 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8030 return;
8031 }
8032
8033 if (vr1->type == VR_UNDEFINED)
8034 {
8035 /* VR0 already has the resulting range. */
8036 return;
8037 }
8038
8039 if (vr0->type == VR_VARYING)
8040 {
8041 /* Nothing to do. VR0 already has the resulting range. */
8042 return;
8043 }
8044
8045 if (vr1->type == VR_VARYING)
8046 {
8047 set_value_range_to_varying (vr0);
8048 return;
8049 }
8050
8051 saved = *vr0;
8052 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8053 vr1->type, vr1->min, vr1->max);
8054 if (vr0->type == VR_VARYING)
8055 {
8056 /* Failed to find an efficient meet. Before giving up and setting
8057 the result to VARYING, see if we can at least derive a useful
8058 anti-range. FIXME, all this nonsense about distinguishing
8059 anti-ranges from ranges is necessary because of the odd
8060 semantics of range_includes_zero_p and friends. */
8061 if (((saved.type == VR_RANGE
8062 && range_includes_zero_p (saved.min, saved.max) == 0)
8063 || (saved.type == VR_ANTI_RANGE
8064 && range_includes_zero_p (saved.min, saved.max) == 1))
8065 && ((vr1->type == VR_RANGE
8066 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8067 || (vr1->type == VR_ANTI_RANGE
8068 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8069 {
8070 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8071
8072 /* Since this meet operation did not result from the meeting of
8073 two equivalent names, VR0 cannot have any equivalences. */
8074 if (vr0->equiv)
8075 bitmap_clear (vr0->equiv);
8076 return;
8077 }
8078
8079 set_value_range_to_varying (vr0);
8080 return;
8081 }
8082 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8083 vr0->equiv);
8084 if (vr0->type == VR_VARYING)
8085 return;
8086
8087 /* The resulting set of equivalences is always the intersection of
8088 the two sets. */
8089 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8090 bitmap_and_into (vr0->equiv, vr1->equiv);
8091 else if (vr0->equiv && !vr1->equiv)
8092 bitmap_clear (vr0->equiv);
8093 }
8094
8095 static void
8096 vrp_meet (value_range_t *vr0, value_range_t *vr1)
8097 {
8098 if (dump_file && (dump_flags & TDF_DETAILS))
8099 {
8100 fprintf (dump_file, "Meeting\n ");
8101 dump_value_range (dump_file, vr0);
8102 fprintf (dump_file, "\nand\n ");
8103 dump_value_range (dump_file, vr1);
8104 fprintf (dump_file, "\n");
8105 }
8106 vrp_meet_1 (vr0, vr1);
8107 if (dump_file && (dump_flags & TDF_DETAILS))
8108 {
8109 fprintf (dump_file, "to\n ");
8110 dump_value_range (dump_file, vr0);
8111 fprintf (dump_file, "\n");
8112 }
8113 }
8114
8115
8116 /* Visit all arguments for PHI node PHI that flow through executable
8117 edges. If a valid value range can be derived from all the incoming
8118 value ranges, set a new range for the LHS of PHI. */
8119
8120 static enum ssa_prop_result
8121 vrp_visit_phi_node (gimple phi)
8122 {
8123 size_t i;
8124 tree lhs = PHI_RESULT (phi);
8125 value_range_t *lhs_vr = get_value_range (lhs);
8126 value_range_t vr_result = VR_INITIALIZER;
8127 bool first = true;
8128 int edges, old_edges;
8129 struct loop *l;
8130
8131 if (dump_file && (dump_flags & TDF_DETAILS))
8132 {
8133 fprintf (dump_file, "\nVisiting PHI node: ");
8134 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8135 }
8136
8137 edges = 0;
8138 for (i = 0; i < gimple_phi_num_args (phi); i++)
8139 {
8140 edge e = gimple_phi_arg_edge (phi, i);
8141
8142 if (dump_file && (dump_flags & TDF_DETAILS))
8143 {
8144 fprintf (dump_file,
8145 "\n Argument #%d (%d -> %d %sexecutable)\n",
8146 (int) i, e->src->index, e->dest->index,
8147 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8148 }
8149
8150 if (e->flags & EDGE_EXECUTABLE)
8151 {
8152 tree arg = PHI_ARG_DEF (phi, i);
8153 value_range_t vr_arg;
8154
8155 ++edges;
8156
8157 if (TREE_CODE (arg) == SSA_NAME)
8158 {
8159 vr_arg = *(get_value_range (arg));
8160 /* Do not allow equivalences or symbolic ranges to leak in from
8161 backedges. That creates invalid equivalencies.
8162 See PR53465 and PR54767. */
8163 if (e->flags & EDGE_DFS_BACK
8164 && (vr_arg.type == VR_RANGE
8165 || vr_arg.type == VR_ANTI_RANGE))
8166 {
8167 vr_arg.equiv = NULL;
8168 if (symbolic_range_p (&vr_arg))
8169 {
8170 vr_arg.type = VR_VARYING;
8171 vr_arg.min = NULL_TREE;
8172 vr_arg.max = NULL_TREE;
8173 }
8174 }
8175 }
8176 else
8177 {
8178 if (is_overflow_infinity (arg))
8179 {
8180 arg = copy_node (arg);
8181 TREE_OVERFLOW (arg) = 0;
8182 }
8183
8184 vr_arg.type = VR_RANGE;
8185 vr_arg.min = arg;
8186 vr_arg.max = arg;
8187 vr_arg.equiv = NULL;
8188 }
8189
8190 if (dump_file && (dump_flags & TDF_DETAILS))
8191 {
8192 fprintf (dump_file, "\t");
8193 print_generic_expr (dump_file, arg, dump_flags);
8194 fprintf (dump_file, "\n\tValue: ");
8195 dump_value_range (dump_file, &vr_arg);
8196 fprintf (dump_file, "\n");
8197 }
8198
8199 if (first)
8200 copy_value_range (&vr_result, &vr_arg);
8201 else
8202 vrp_meet (&vr_result, &vr_arg);
8203 first = false;
8204
8205 if (vr_result.type == VR_VARYING)
8206 break;
8207 }
8208 }
8209
8210 if (vr_result.type == VR_VARYING)
8211 goto varying;
8212 else if (vr_result.type == VR_UNDEFINED)
8213 goto update_range;
8214
8215 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8216 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8217
8218 /* To prevent infinite iterations in the algorithm, derive ranges
8219 when the new value is slightly bigger or smaller than the
8220 previous one. We don't do this if we have seen a new executable
8221 edge; this helps us avoid an overflow infinity for conditionals
8222 which are not in a loop. If the old value-range was VR_UNDEFINED
8223 use the updated range and iterate one more time. */
8224 if (edges > 0
8225 && gimple_phi_num_args (phi) > 1
8226 && edges == old_edges
8227 && lhs_vr->type != VR_UNDEFINED)
8228 {
8229 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8230 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8231
8232 /* For non VR_RANGE or for pointers fall back to varying if
8233 the range changed. */
8234 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8235 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8236 && (cmp_min != 0 || cmp_max != 0))
8237 goto varying;
8238
8239 /* If the new minimum is smaller or larger than the previous
8240 one, go all the way to -INF. In the first case, to avoid
8241 iterating millions of times to reach -INF, and in the
8242 other case to avoid infinite bouncing between different
8243 minimums. */
8244 if (cmp_min > 0 || cmp_min < 0)
8245 {
8246 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
8247 || !vrp_var_may_overflow (lhs, phi))
8248 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
8249 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
8250 vr_result.min =
8251 negative_overflow_infinity (TREE_TYPE (vr_result.min));
8252 }
8253
8254 /* Similarly, if the new maximum is smaller or larger than
8255 the previous one, go all the way to +INF. */
8256 if (cmp_max < 0 || cmp_max > 0)
8257 {
8258 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
8259 || !vrp_var_may_overflow (lhs, phi))
8260 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
8261 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
8262 vr_result.max =
8263 positive_overflow_infinity (TREE_TYPE (vr_result.max));
8264 }
8265
8266 /* If we dropped either bound to +-INF then if this is a loop
8267 PHI node SCEV may known more about its value-range. */
8268 if ((cmp_min > 0 || cmp_min < 0
8269 || cmp_max < 0 || cmp_max > 0)
8270 && current_loops
8271 && (l = loop_containing_stmt (phi))
8272 && l->header == gimple_bb (phi))
8273 adjust_range_with_scev (&vr_result, l, phi, lhs);
8274
8275 /* If we will end up with a (-INF, +INF) range, set it to
8276 VARYING. Same if the previous max value was invalid for
8277 the type and we end up with vr_result.min > vr_result.max. */
8278 if ((vrp_val_is_max (vr_result.max)
8279 && vrp_val_is_min (vr_result.min))
8280 || compare_values (vr_result.min,
8281 vr_result.max) > 0)
8282 goto varying;
8283 }
8284
8285 /* If the new range is different than the previous value, keep
8286 iterating. */
8287 update_range:
8288 if (update_value_range (lhs, &vr_result))
8289 {
8290 if (dump_file && (dump_flags & TDF_DETAILS))
8291 {
8292 fprintf (dump_file, "Found new range for ");
8293 print_generic_expr (dump_file, lhs, 0);
8294 fprintf (dump_file, ": ");
8295 dump_value_range (dump_file, &vr_result);
8296 fprintf (dump_file, "\n\n");
8297 }
8298
8299 return SSA_PROP_INTERESTING;
8300 }
8301
8302 /* Nothing changed, don't add outgoing edges. */
8303 return SSA_PROP_NOT_INTERESTING;
8304
8305 /* No match found. Set the LHS to VARYING. */
8306 varying:
8307 set_value_range_to_varying (lhs_vr);
8308 return SSA_PROP_VARYING;
8309 }
8310
8311 /* Simplify boolean operations if the source is known
8312 to be already a boolean. */
8313 static bool
8314 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8315 {
8316 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8317 tree lhs, op0, op1;
8318 bool need_conversion;
8319
8320 /* We handle only !=/== case here. */
8321 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8322
8323 op0 = gimple_assign_rhs1 (stmt);
8324 if (!op_with_boolean_value_range_p (op0))
8325 return false;
8326
8327 op1 = gimple_assign_rhs2 (stmt);
8328 if (!op_with_boolean_value_range_p (op1))
8329 return false;
8330
8331 /* Reduce number of cases to handle to NE_EXPR. As there is no
8332 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8333 if (rhs_code == EQ_EXPR)
8334 {
8335 if (TREE_CODE (op1) == INTEGER_CST)
8336 op1 = int_const_binop (BIT_XOR_EXPR, op1,
8337 build_int_cst (TREE_TYPE (op1), 1));
8338 else
8339 return false;
8340 }
8341
8342 lhs = gimple_assign_lhs (stmt);
8343 need_conversion
8344 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8345
8346 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8347 if (need_conversion
8348 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8349 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8350 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8351 return false;
8352
8353 /* For A != 0 we can substitute A itself. */
8354 if (integer_zerop (op1))
8355 gimple_assign_set_rhs_with_ops (gsi,
8356 need_conversion
8357 ? NOP_EXPR : TREE_CODE (op0),
8358 op0, NULL_TREE);
8359 /* For A != B we substitute A ^ B. Either with conversion. */
8360 else if (need_conversion)
8361 {
8362 tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8363 gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8364 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8365 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8366 }
8367 /* Or without. */
8368 else
8369 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8370 update_stmt (gsi_stmt (*gsi));
8371
8372 return true;
8373 }
8374
8375 /* Simplify a division or modulo operator to a right shift or
8376 bitwise and if the first operand is unsigned or is greater
8377 than zero and the second operand is an exact power of two. */
8378
8379 static bool
8380 simplify_div_or_mod_using_ranges (gimple stmt)
8381 {
8382 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8383 tree val = NULL;
8384 tree op0 = gimple_assign_rhs1 (stmt);
8385 tree op1 = gimple_assign_rhs2 (stmt);
8386 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8387
8388 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8389 {
8390 val = integer_one_node;
8391 }
8392 else
8393 {
8394 bool sop = false;
8395
8396 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8397
8398 if (val
8399 && sop
8400 && integer_onep (val)
8401 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8402 {
8403 location_t location;
8404
8405 if (!gimple_has_location (stmt))
8406 location = input_location;
8407 else
8408 location = gimple_location (stmt);
8409 warning_at (location, OPT_Wstrict_overflow,
8410 "assuming signed overflow does not occur when "
8411 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8412 }
8413 }
8414
8415 if (val && integer_onep (val))
8416 {
8417 tree t;
8418
8419 if (rhs_code == TRUNC_DIV_EXPR)
8420 {
8421 t = build_int_cst (integer_type_node, tree_log2 (op1));
8422 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8423 gimple_assign_set_rhs1 (stmt, op0);
8424 gimple_assign_set_rhs2 (stmt, t);
8425 }
8426 else
8427 {
8428 t = build_int_cst (TREE_TYPE (op1), 1);
8429 t = int_const_binop (MINUS_EXPR, op1, t);
8430 t = fold_convert (TREE_TYPE (op0), t);
8431
8432 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8433 gimple_assign_set_rhs1 (stmt, op0);
8434 gimple_assign_set_rhs2 (stmt, t);
8435 }
8436
8437 update_stmt (stmt);
8438 return true;
8439 }
8440
8441 return false;
8442 }
8443
8444 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8445 ABS_EXPR. If the operand is <= 0, then simplify the
8446 ABS_EXPR into a NEGATE_EXPR. */
8447
8448 static bool
8449 simplify_abs_using_ranges (gimple stmt)
8450 {
8451 tree val = NULL;
8452 tree op = gimple_assign_rhs1 (stmt);
8453 tree type = TREE_TYPE (op);
8454 value_range_t *vr = get_value_range (op);
8455
8456 if (TYPE_UNSIGNED (type))
8457 {
8458 val = integer_zero_node;
8459 }
8460 else if (vr)
8461 {
8462 bool sop = false;
8463
8464 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8465 if (!val)
8466 {
8467 sop = false;
8468 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8469 &sop);
8470
8471 if (val)
8472 {
8473 if (integer_zerop (val))
8474 val = integer_one_node;
8475 else if (integer_onep (val))
8476 val = integer_zero_node;
8477 }
8478 }
8479
8480 if (val
8481 && (integer_onep (val) || integer_zerop (val)))
8482 {
8483 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8484 {
8485 location_t location;
8486
8487 if (!gimple_has_location (stmt))
8488 location = input_location;
8489 else
8490 location = gimple_location (stmt);
8491 warning_at (location, OPT_Wstrict_overflow,
8492 "assuming signed overflow does not occur when "
8493 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8494 }
8495
8496 gimple_assign_set_rhs1 (stmt, op);
8497 if (integer_onep (val))
8498 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8499 else
8500 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8501 update_stmt (stmt);
8502 return true;
8503 }
8504 }
8505
8506 return false;
8507 }
8508
8509 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8510 If all the bits that are being cleared by & are already
8511 known to be zero from VR, or all the bits that are being
8512 set by | are already known to be one from VR, the bit
8513 operation is redundant. */
8514
8515 static bool
8516 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8517 {
8518 tree op0 = gimple_assign_rhs1 (stmt);
8519 tree op1 = gimple_assign_rhs2 (stmt);
8520 tree op = NULL_TREE;
8521 value_range_t vr0 = VR_INITIALIZER;
8522 value_range_t vr1 = VR_INITIALIZER;
8523 wide_int may_be_nonzero0, may_be_nonzero1;
8524 wide_int must_be_nonzero0, must_be_nonzero1;
8525 wide_int mask;
8526
8527 if (TREE_CODE (op0) == SSA_NAME)
8528 vr0 = *(get_value_range (op0));
8529 else if (is_gimple_min_invariant (op0))
8530 set_value_range_to_value (&vr0, op0, NULL);
8531 else
8532 return false;
8533
8534 if (TREE_CODE (op1) == SSA_NAME)
8535 vr1 = *(get_value_range (op1));
8536 else if (is_gimple_min_invariant (op1))
8537 set_value_range_to_value (&vr1, op1, NULL);
8538 else
8539 return false;
8540
8541 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, &must_be_nonzero0))
8542 return false;
8543 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, &must_be_nonzero1))
8544 return false;
8545
8546 switch (gimple_assign_rhs_code (stmt))
8547 {
8548 case BIT_AND_EXPR:
8549 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8550 if (mask == 0)
8551 {
8552 op = op0;
8553 break;
8554 }
8555 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8556 if (mask == 0)
8557 {
8558 op = op1;
8559 break;
8560 }
8561 break;
8562 case BIT_IOR_EXPR:
8563 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8564 if (mask == 0)
8565 {
8566 op = op1;
8567 break;
8568 }
8569 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8570 if (mask == 0)
8571 {
8572 op = op0;
8573 break;
8574 }
8575 break;
8576 default:
8577 gcc_unreachable ();
8578 }
8579
8580 if (op == NULL_TREE)
8581 return false;
8582
8583 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8584 update_stmt (gsi_stmt (*gsi));
8585 return true;
8586 }
8587
8588 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8589 a known value range VR.
8590
8591 If there is one and only one value which will satisfy the
8592 conditional, then return that value. Else return NULL. */
8593
8594 static tree
8595 test_for_singularity (enum tree_code cond_code, tree op0,
8596 tree op1, value_range_t *vr)
8597 {
8598 tree min = NULL;
8599 tree max = NULL;
8600
8601 /* Extract minimum/maximum values which satisfy the
8602 the conditional as it was written. */
8603 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8604 {
8605 /* This should not be negative infinity; there is no overflow
8606 here. */
8607 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8608
8609 max = op1;
8610 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8611 {
8612 tree one = build_int_cst (TREE_TYPE (op0), 1);
8613 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8614 if (EXPR_P (max))
8615 TREE_NO_WARNING (max) = 1;
8616 }
8617 }
8618 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8619 {
8620 /* This should not be positive infinity; there is no overflow
8621 here. */
8622 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8623
8624 min = op1;
8625 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8626 {
8627 tree one = build_int_cst (TREE_TYPE (op0), 1);
8628 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8629 if (EXPR_P (min))
8630 TREE_NO_WARNING (min) = 1;
8631 }
8632 }
8633
8634 /* Now refine the minimum and maximum values using any
8635 value range information we have for op0. */
8636 if (min && max)
8637 {
8638 if (compare_values (vr->min, min) == 1)
8639 min = vr->min;
8640 if (compare_values (vr->max, max) == -1)
8641 max = vr->max;
8642
8643 /* If the new min/max values have converged to a single value,
8644 then there is only one value which can satisfy the condition,
8645 return that value. */
8646 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8647 return min;
8648 }
8649 return NULL;
8650 }
8651
8652 /* Return whether the value range *VR fits in an integer type specified
8653 by PRECISION and UNSIGNED_P. */
8654
8655 static bool
8656 range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn)
8657 {
8658 tree src_type;
8659 unsigned src_precision;
8660 max_wide_int tem;
8661 signop src_sgn;
8662
8663 /* We can only handle integral and pointer types. */
8664 src_type = TREE_TYPE (vr->min);
8665 if (!INTEGRAL_TYPE_P (src_type)
8666 && !POINTER_TYPE_P (src_type))
8667 return false;
8668
8669 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
8670 and so is an identity transform. */
8671 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8672 src_sgn = TYPE_SIGN (src_type);
8673 if ((src_precision < dest_precision
8674 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
8675 || (src_precision == dest_precision && src_sgn == dest_sgn))
8676 return true;
8677
8678 /* Now we can only handle ranges with constant bounds. */
8679 if (vr->type != VR_RANGE
8680 || TREE_CODE (vr->min) != INTEGER_CST
8681 || TREE_CODE (vr->max) != INTEGER_CST)
8682 return false;
8683
8684 /* For sign changes, the MSB of the wide_int has to be clear.
8685 An unsigned value with its MSB set cannot be represented by
8686 a signed wide_int, while a negative value cannot be represented
8687 by an unsigned wide_int. */
8688 if (src_sgn != dest_sgn
8689 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
8690 return false;
8691
8692 /* Then we can perform the conversion on both ends and compare
8693 the result for equality. */
8694 tem = wi::ext (vr->min, dest_precision, dest_sgn);
8695 if (tem != vr->min)
8696 return false;
8697 tem = wi::ext (vr->max, dest_precision, dest_sgn);
8698 if (tem != vr->max)
8699 return false;
8700
8701 return true;
8702 }
8703
8704 /* Simplify a conditional using a relational operator to an equality
8705 test if the range information indicates only one value can satisfy
8706 the original conditional. */
8707
8708 static bool
8709 simplify_cond_using_ranges (gimple stmt)
8710 {
8711 tree op0 = gimple_cond_lhs (stmt);
8712 tree op1 = gimple_cond_rhs (stmt);
8713 enum tree_code cond_code = gimple_cond_code (stmt);
8714
8715 if (cond_code != NE_EXPR
8716 && cond_code != EQ_EXPR
8717 && TREE_CODE (op0) == SSA_NAME
8718 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8719 && is_gimple_min_invariant (op1))
8720 {
8721 value_range_t *vr = get_value_range (op0);
8722
8723 /* If we have range information for OP0, then we might be
8724 able to simplify this conditional. */
8725 if (vr->type == VR_RANGE)
8726 {
8727 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8728
8729 if (new_tree)
8730 {
8731 if (dump_file)
8732 {
8733 fprintf (dump_file, "Simplified relational ");
8734 print_gimple_stmt (dump_file, stmt, 0, 0);
8735 fprintf (dump_file, " into ");
8736 }
8737
8738 gimple_cond_set_code (stmt, EQ_EXPR);
8739 gimple_cond_set_lhs (stmt, op0);
8740 gimple_cond_set_rhs (stmt, new_tree);
8741
8742 update_stmt (stmt);
8743
8744 if (dump_file)
8745 {
8746 print_gimple_stmt (dump_file, stmt, 0, 0);
8747 fprintf (dump_file, "\n");
8748 }
8749
8750 return true;
8751 }
8752
8753 /* Try again after inverting the condition. We only deal
8754 with integral types here, so no need to worry about
8755 issues with inverting FP comparisons. */
8756 cond_code = invert_tree_comparison (cond_code, false);
8757 new_tree = test_for_singularity (cond_code, op0, op1, vr);
8758
8759 if (new_tree)
8760 {
8761 if (dump_file)
8762 {
8763 fprintf (dump_file, "Simplified relational ");
8764 print_gimple_stmt (dump_file, stmt, 0, 0);
8765 fprintf (dump_file, " into ");
8766 }
8767
8768 gimple_cond_set_code (stmt, NE_EXPR);
8769 gimple_cond_set_lhs (stmt, op0);
8770 gimple_cond_set_rhs (stmt, new_tree);
8771
8772 update_stmt (stmt);
8773
8774 if (dump_file)
8775 {
8776 print_gimple_stmt (dump_file, stmt, 0, 0);
8777 fprintf (dump_file, "\n");
8778 }
8779
8780 return true;
8781 }
8782 }
8783 }
8784
8785 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
8786 see if OP0 was set by a type conversion where the source of
8787 the conversion is another SSA_NAME with a range that fits
8788 into the range of OP0's type.
8789
8790 If so, the conversion is redundant as the earlier SSA_NAME can be
8791 used for the comparison directly if we just massage the constant in the
8792 comparison. */
8793 if (TREE_CODE (op0) == SSA_NAME
8794 && TREE_CODE (op1) == INTEGER_CST)
8795 {
8796 gimple def_stmt = SSA_NAME_DEF_STMT (op0);
8797 tree innerop;
8798
8799 if (!is_gimple_assign (def_stmt)
8800 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8801 return false;
8802
8803 innerop = gimple_assign_rhs1 (def_stmt);
8804
8805 if (TREE_CODE (innerop) == SSA_NAME
8806 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
8807 {
8808 value_range_t *vr = get_value_range (innerop);
8809
8810 if (range_int_cst_p (vr)
8811 && range_fits_type_p (vr,
8812 TYPE_PRECISION (TREE_TYPE (op0)),
8813 TYPE_SIGN (TREE_TYPE (op0)))
8814 && int_fits_type_p (op1, TREE_TYPE (innerop))
8815 /* The range must not have overflowed, or if it did overflow
8816 we must not be wrapping/trapping overflow and optimizing
8817 with strict overflow semantics. */
8818 && ((!is_negative_overflow_infinity (vr->min)
8819 && !is_positive_overflow_infinity (vr->max))
8820 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
8821 {
8822 /* If the range overflowed and the user has asked for warnings
8823 when strict overflow semantics were used to optimize code,
8824 issue an appropriate warning. */
8825 if ((is_negative_overflow_infinity (vr->min)
8826 || is_positive_overflow_infinity (vr->max))
8827 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
8828 {
8829 location_t location;
8830
8831 if (!gimple_has_location (stmt))
8832 location = input_location;
8833 else
8834 location = gimple_location (stmt);
8835 warning_at (location, OPT_Wstrict_overflow,
8836 "assuming signed overflow does not occur when "
8837 "simplifying conditional");
8838 }
8839
8840 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
8841 gimple_cond_set_lhs (stmt, innerop);
8842 gimple_cond_set_rhs (stmt, newconst);
8843 return true;
8844 }
8845 }
8846 }
8847
8848 return false;
8849 }
8850
8851 /* Simplify a switch statement using the value range of the switch
8852 argument. */
8853
8854 static bool
8855 simplify_switch_using_ranges (gimple stmt)
8856 {
8857 tree op = gimple_switch_index (stmt);
8858 value_range_t *vr;
8859 bool take_default;
8860 edge e;
8861 edge_iterator ei;
8862 size_t i = 0, j = 0, n, n2;
8863 tree vec2;
8864 switch_update su;
8865 size_t k = 1, l = 0;
8866
8867 if (TREE_CODE (op) == SSA_NAME)
8868 {
8869 vr = get_value_range (op);
8870
8871 /* We can only handle integer ranges. */
8872 if ((vr->type != VR_RANGE
8873 && vr->type != VR_ANTI_RANGE)
8874 || symbolic_range_p (vr))
8875 return false;
8876
8877 /* Find case label for min/max of the value range. */
8878 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8879 }
8880 else if (TREE_CODE (op) == INTEGER_CST)
8881 {
8882 take_default = !find_case_label_index (stmt, 1, op, &i);
8883 if (take_default)
8884 {
8885 i = 1;
8886 j = 0;
8887 }
8888 else
8889 {
8890 j = i;
8891 }
8892 }
8893 else
8894 return false;
8895
8896 n = gimple_switch_num_labels (stmt);
8897
8898 /* Bail out if this is just all edges taken. */
8899 if (i == 1
8900 && j == n - 1
8901 && take_default)
8902 return false;
8903
8904 /* Build a new vector of taken case labels. */
8905 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
8906 n2 = 0;
8907
8908 /* Add the default edge, if necessary. */
8909 if (take_default)
8910 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
8911
8912 for (; i <= j; ++i, ++n2)
8913 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
8914
8915 for (; k <= l; ++k, ++n2)
8916 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
8917
8918 /* Mark needed edges. */
8919 for (i = 0; i < n2; ++i)
8920 {
8921 e = find_edge (gimple_bb (stmt),
8922 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
8923 e->aux = (void *)-1;
8924 }
8925
8926 /* Queue not needed edges for later removal. */
8927 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
8928 {
8929 if (e->aux == (void *)-1)
8930 {
8931 e->aux = NULL;
8932 continue;
8933 }
8934
8935 if (dump_file && (dump_flags & TDF_DETAILS))
8936 {
8937 fprintf (dump_file, "removing unreachable case label\n");
8938 }
8939 to_remove_edges.safe_push (e);
8940 e->flags &= ~EDGE_EXECUTABLE;
8941 }
8942
8943 /* And queue an update for the stmt. */
8944 su.stmt = stmt;
8945 su.vec = vec2;
8946 to_update_switch_stmts.safe_push (su);
8947 return false;
8948 }
8949
8950 /* Simplify an integral conversion from an SSA name in STMT. */
8951
8952 static bool
8953 simplify_conversion_using_ranges (gimple stmt)
8954 {
8955 tree innerop, middleop, finaltype;
8956 gimple def_stmt;
8957 value_range_t *innervr;
8958 signop inner_sgn, middle_sgn, final_sgn;
8959 unsigned inner_prec, middle_prec, final_prec;
8960 max_wide_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
8961
8962 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
8963 if (!INTEGRAL_TYPE_P (finaltype))
8964 return false;
8965 middleop = gimple_assign_rhs1 (stmt);
8966 def_stmt = SSA_NAME_DEF_STMT (middleop);
8967 if (!is_gimple_assign (def_stmt)
8968 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8969 return false;
8970 innerop = gimple_assign_rhs1 (def_stmt);
8971 if (TREE_CODE (innerop) != SSA_NAME
8972 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
8973 return false;
8974
8975 /* Get the value-range of the inner operand. */
8976 innervr = get_value_range (innerop);
8977 if (innervr->type != VR_RANGE
8978 || TREE_CODE (innervr->min) != INTEGER_CST
8979 || TREE_CODE (innervr->max) != INTEGER_CST)
8980 return false;
8981
8982 /* Simulate the conversion chain to check if the result is equal if
8983 the middle conversion is removed. */
8984 innermin = innervr->min;
8985 innermax = innervr->max;
8986
8987 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
8988 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
8989 final_prec = TYPE_PRECISION (finaltype);
8990
8991 /* If the first conversion is not injective, the second must not
8992 be widening. */
8993 if (wi::gtu_p (innermax - innermin,
8994 wi::mask <max_wide_int> (middle_prec, false))
8995 && middle_prec < final_prec)
8996 return false;
8997 /* We also want a medium value so that we can track the effect that
8998 narrowing conversions with sign change have. */
8999 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
9000 if (inner_sgn == UNSIGNED)
9001 innermed = wi::shifted_mask <max_wide_int> (1, inner_prec - 1, false);
9002 else
9003 innermed = 0;
9004 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
9005 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
9006 innermed = innermin;
9007
9008 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
9009 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
9010 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
9011 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
9012
9013 /* Require that the final conversion applied to both the original
9014 and the intermediate range produces the same result. */
9015 final_sgn = TYPE_SIGN (finaltype);
9016 if (wi::ext (middlemin, final_prec, final_sgn)
9017 != wi::ext (innermin, final_prec, final_sgn)
9018 || wi::ext (middlemed, final_prec, final_sgn)
9019 != wi::ext (innermed, final_prec, final_sgn)
9020 || wi::ext (middlemax, final_prec, final_sgn)
9021 != wi::ext (innermax, final_prec, final_sgn))
9022 return false;
9023
9024 gimple_assign_set_rhs1 (stmt, innerop);
9025 update_stmt (stmt);
9026 return true;
9027 }
9028
9029 /* Simplify a conversion from integral SSA name to float in STMT. */
9030
9031 static bool
9032 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
9033 {
9034 tree rhs1 = gimple_assign_rhs1 (stmt);
9035 value_range_t *vr = get_value_range (rhs1);
9036 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9037 enum machine_mode mode;
9038 tree tem;
9039 gimple conv;
9040
9041 /* We can only handle constant ranges. */
9042 if (vr->type != VR_RANGE
9043 || TREE_CODE (vr->min) != INTEGER_CST
9044 || TREE_CODE (vr->max) != INTEGER_CST)
9045 return false;
9046
9047 /* First check if we can use a signed type in place of an unsigned. */
9048 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9049 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9050 != CODE_FOR_nothing)
9051 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
9052 mode = TYPE_MODE (TREE_TYPE (rhs1));
9053 /* If we can do the conversion in the current input mode do nothing. */
9054 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9055 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9056 return false;
9057 /* Otherwise search for a mode we can use, starting from the narrowest
9058 integer mode available. */
9059 else
9060 {
9061 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9062 do
9063 {
9064 /* If we cannot do a signed conversion to float from mode
9065 or if the value-range does not fit in the signed type
9066 try with a wider mode. */
9067 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9068 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
9069 break;
9070
9071 mode = GET_MODE_WIDER_MODE (mode);
9072 /* But do not widen the input. Instead leave that to the
9073 optabs expansion code. */
9074 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9075 return false;
9076 }
9077 while (mode != VOIDmode);
9078 if (mode == VOIDmode)
9079 return false;
9080 }
9081
9082 /* It works, insert a truncation or sign-change before the
9083 float conversion. */
9084 tem = make_ssa_name (build_nonstandard_integer_type
9085 (GET_MODE_PRECISION (mode), 0), NULL);
9086 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
9087 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9088 gimple_assign_set_rhs1 (stmt, tem);
9089 update_stmt (stmt);
9090
9091 return true;
9092 }
9093
9094 /* Simplify STMT using ranges if possible. */
9095
9096 static bool
9097 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9098 {
9099 gimple stmt = gsi_stmt (*gsi);
9100
9101 if (is_gimple_assign (stmt))
9102 {
9103 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9104 tree rhs1 = gimple_assign_rhs1 (stmt);
9105
9106 switch (rhs_code)
9107 {
9108 case EQ_EXPR:
9109 case NE_EXPR:
9110 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9111 if the RHS is zero or one, and the LHS are known to be boolean
9112 values. */
9113 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9114 return simplify_truth_ops_using_ranges (gsi, stmt);
9115 break;
9116
9117 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9118 and BIT_AND_EXPR respectively if the first operand is greater
9119 than zero and the second operand is an exact power of two. */
9120 case TRUNC_DIV_EXPR:
9121 case TRUNC_MOD_EXPR:
9122 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
9123 && integer_pow2p (gimple_assign_rhs2 (stmt)))
9124 return simplify_div_or_mod_using_ranges (stmt);
9125 break;
9126
9127 /* Transform ABS (X) into X or -X as appropriate. */
9128 case ABS_EXPR:
9129 if (TREE_CODE (rhs1) == SSA_NAME
9130 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9131 return simplify_abs_using_ranges (stmt);
9132 break;
9133
9134 case BIT_AND_EXPR:
9135 case BIT_IOR_EXPR:
9136 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9137 if all the bits being cleared are already cleared or
9138 all the bits being set are already set. */
9139 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9140 return simplify_bit_ops_using_ranges (gsi, stmt);
9141 break;
9142
9143 CASE_CONVERT:
9144 if (TREE_CODE (rhs1) == SSA_NAME
9145 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9146 return simplify_conversion_using_ranges (stmt);
9147 break;
9148
9149 case FLOAT_EXPR:
9150 if (TREE_CODE (rhs1) == SSA_NAME
9151 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9152 return simplify_float_conversion_using_ranges (gsi, stmt);
9153 break;
9154
9155 default:
9156 break;
9157 }
9158 }
9159 else if (gimple_code (stmt) == GIMPLE_COND)
9160 return simplify_cond_using_ranges (stmt);
9161 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9162 return simplify_switch_using_ranges (stmt);
9163
9164 return false;
9165 }
9166
9167 /* If the statement pointed by SI has a predicate whose value can be
9168 computed using the value range information computed by VRP, compute
9169 its value and return true. Otherwise, return false. */
9170
9171 static bool
9172 fold_predicate_in (gimple_stmt_iterator *si)
9173 {
9174 bool assignment_p = false;
9175 tree val;
9176 gimple stmt = gsi_stmt (*si);
9177
9178 if (is_gimple_assign (stmt)
9179 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
9180 {
9181 assignment_p = true;
9182 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
9183 gimple_assign_rhs1 (stmt),
9184 gimple_assign_rhs2 (stmt),
9185 stmt);
9186 }
9187 else if (gimple_code (stmt) == GIMPLE_COND)
9188 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
9189 gimple_cond_lhs (stmt),
9190 gimple_cond_rhs (stmt),
9191 stmt);
9192 else
9193 return false;
9194
9195 if (val)
9196 {
9197 if (assignment_p)
9198 val = fold_convert (gimple_expr_type (stmt), val);
9199
9200 if (dump_file)
9201 {
9202 fprintf (dump_file, "Folding predicate ");
9203 print_gimple_expr (dump_file, stmt, 0, 0);
9204 fprintf (dump_file, " to ");
9205 print_generic_expr (dump_file, val, 0);
9206 fprintf (dump_file, "\n");
9207 }
9208
9209 if (is_gimple_assign (stmt))
9210 gimple_assign_set_rhs_from_tree (si, val);
9211 else
9212 {
9213 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9214 if (integer_zerop (val))
9215 gimple_cond_make_false (stmt);
9216 else if (integer_onep (val))
9217 gimple_cond_make_true (stmt);
9218 else
9219 gcc_unreachable ();
9220 }
9221
9222 return true;
9223 }
9224
9225 return false;
9226 }
9227
9228 /* Callback for substitute_and_fold folding the stmt at *SI. */
9229
9230 static bool
9231 vrp_fold_stmt (gimple_stmt_iterator *si)
9232 {
9233 if (fold_predicate_in (si))
9234 return true;
9235
9236 return simplify_stmt_using_ranges (si);
9237 }
9238
9239 /* Stack of dest,src equivalency pairs that need to be restored after
9240 each attempt to thread a block's incoming edge to an outgoing edge.
9241
9242 A NULL entry is used to mark the end of pairs which need to be
9243 restored. */
9244 static vec<tree> equiv_stack;
9245
9246 /* A trivial wrapper so that we can present the generic jump threading
9247 code with a simple API for simplifying statements. STMT is the
9248 statement we want to simplify, WITHIN_STMT provides the location
9249 for any overflow warnings. */
9250
9251 static tree
9252 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
9253 {
9254 if (gimple_code (stmt) == GIMPLE_COND)
9255 return vrp_evaluate_conditional (gimple_cond_code (stmt),
9256 gimple_cond_lhs (stmt),
9257 gimple_cond_rhs (stmt), within_stmt);
9258
9259 if (gimple_code (stmt) == GIMPLE_ASSIGN)
9260 {
9261 value_range_t new_vr = VR_INITIALIZER;
9262 tree lhs = gimple_assign_lhs (stmt);
9263
9264 if (TREE_CODE (lhs) == SSA_NAME
9265 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
9266 || POINTER_TYPE_P (TREE_TYPE (lhs))))
9267 {
9268 extract_range_from_assignment (&new_vr, stmt);
9269 if (range_int_cst_singleton_p (&new_vr))
9270 return new_vr.min;
9271 }
9272 }
9273
9274 return NULL_TREE;
9275 }
9276
9277 /* Blocks which have more than one predecessor and more than
9278 one successor present jump threading opportunities, i.e.,
9279 when the block is reached from a specific predecessor, we
9280 may be able to determine which of the outgoing edges will
9281 be traversed. When this optimization applies, we are able
9282 to avoid conditionals at runtime and we may expose secondary
9283 optimization opportunities.
9284
9285 This routine is effectively a driver for the generic jump
9286 threading code. It basically just presents the generic code
9287 with edges that may be suitable for jump threading.
9288
9289 Unlike DOM, we do not iterate VRP if jump threading was successful.
9290 While iterating may expose new opportunities for VRP, it is expected
9291 those opportunities would be very limited and the compile time cost
9292 to expose those opportunities would be significant.
9293
9294 As jump threading opportunities are discovered, they are registered
9295 for later realization. */
9296
9297 static void
9298 identify_jump_threads (void)
9299 {
9300 basic_block bb;
9301 gimple dummy;
9302 int i;
9303 edge e;
9304
9305 /* Ugh. When substituting values earlier in this pass we can
9306 wipe the dominance information. So rebuild the dominator
9307 information as we need it within the jump threading code. */
9308 calculate_dominance_info (CDI_DOMINATORS);
9309
9310 /* We do not allow VRP information to be used for jump threading
9311 across a back edge in the CFG. Otherwise it becomes too
9312 difficult to avoid eliminating loop exit tests. Of course
9313 EDGE_DFS_BACK is not accurate at this time so we have to
9314 recompute it. */
9315 mark_dfs_back_edges ();
9316
9317 /* Do not thread across edges we are about to remove. Just marking
9318 them as EDGE_DFS_BACK will do. */
9319 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9320 e->flags |= EDGE_DFS_BACK;
9321
9322 /* Allocate our unwinder stack to unwind any temporary equivalences
9323 that might be recorded. */
9324 equiv_stack.create (20);
9325
9326 /* To avoid lots of silly node creation, we create a single
9327 conditional and just modify it in-place when attempting to
9328 thread jumps. */
9329 dummy = gimple_build_cond (EQ_EXPR,
9330 integer_zero_node, integer_zero_node,
9331 NULL, NULL);
9332
9333 /* Walk through all the blocks finding those which present a
9334 potential jump threading opportunity. We could set this up
9335 as a dominator walker and record data during the walk, but
9336 I doubt it's worth the effort for the classes of jump
9337 threading opportunities we are trying to identify at this
9338 point in compilation. */
9339 FOR_EACH_BB (bb)
9340 {
9341 gimple last;
9342
9343 /* If the generic jump threading code does not find this block
9344 interesting, then there is nothing to do. */
9345 if (! potentially_threadable_block (bb))
9346 continue;
9347
9348 /* We only care about blocks ending in a COND_EXPR. While there
9349 may be some value in handling SWITCH_EXPR here, I doubt it's
9350 terribly important. */
9351 last = gsi_stmt (gsi_last_bb (bb));
9352
9353 /* We're basically looking for a switch or any kind of conditional with
9354 integral or pointer type arguments. Note the type of the second
9355 argument will be the same as the first argument, so no need to
9356 check it explicitly. */
9357 if (gimple_code (last) == GIMPLE_SWITCH
9358 || (gimple_code (last) == GIMPLE_COND
9359 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
9360 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
9361 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
9362 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
9363 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
9364 {
9365 edge_iterator ei;
9366
9367 /* We've got a block with multiple predecessors and multiple
9368 successors which also ends in a suitable conditional or
9369 switch statement. For each predecessor, see if we can thread
9370 it to a specific successor. */
9371 FOR_EACH_EDGE (e, ei, bb->preds)
9372 {
9373 /* Do not thread across back edges or abnormal edges
9374 in the CFG. */
9375 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
9376 continue;
9377
9378 thread_across_edge (dummy, e, true, &equiv_stack,
9379 simplify_stmt_for_jump_threading);
9380 }
9381 }
9382 }
9383
9384 /* We do not actually update the CFG or SSA graphs at this point as
9385 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9386 handle ASSERT_EXPRs gracefully. */
9387 }
9388
9389 /* We identified all the jump threading opportunities earlier, but could
9390 not transform the CFG at that time. This routine transforms the
9391 CFG and arranges for the dominator tree to be rebuilt if necessary.
9392
9393 Note the SSA graph update will occur during the normal TODO
9394 processing by the pass manager. */
9395 static void
9396 finalize_jump_threads (void)
9397 {
9398 thread_through_all_blocks (false);
9399 equiv_stack.release ();
9400 }
9401
9402
9403 /* Traverse all the blocks folding conditionals with known ranges. */
9404
9405 static void
9406 vrp_finalize (void)
9407 {
9408 size_t i;
9409
9410 values_propagated = true;
9411
9412 if (dump_file)
9413 {
9414 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9415 dump_all_value_ranges (dump_file);
9416 fprintf (dump_file, "\n");
9417 }
9418
9419 substitute_and_fold (op_with_constant_singleton_value_range,
9420 vrp_fold_stmt, false);
9421
9422 if (warn_array_bounds)
9423 check_all_array_refs ();
9424
9425 /* We must identify jump threading opportunities before we release
9426 the datastructures built by VRP. */
9427 identify_jump_threads ();
9428
9429 /* Free allocated memory. */
9430 for (i = 0; i < num_vr_values; i++)
9431 if (vr_value[i])
9432 {
9433 BITMAP_FREE (vr_value[i]->equiv);
9434 free (vr_value[i]);
9435 }
9436
9437 free (vr_value);
9438 free (vr_phi_edge_counts);
9439
9440 /* So that we can distinguish between VRP data being available
9441 and not available. */
9442 vr_value = NULL;
9443 vr_phi_edge_counts = NULL;
9444 }
9445
9446
9447 /* Main entry point to VRP (Value Range Propagation). This pass is
9448 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9449 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9450 Programming Language Design and Implementation, pp. 67-78, 1995.
9451 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9452
9453 This is essentially an SSA-CCP pass modified to deal with ranges
9454 instead of constants.
9455
9456 While propagating ranges, we may find that two or more SSA name
9457 have equivalent, though distinct ranges. For instance,
9458
9459 1 x_9 = p_3->a;
9460 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9461 3 if (p_4 == q_2)
9462 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9463 5 endif
9464 6 if (q_2)
9465
9466 In the code above, pointer p_5 has range [q_2, q_2], but from the
9467 code we can also determine that p_5 cannot be NULL and, if q_2 had
9468 a non-varying range, p_5's range should also be compatible with it.
9469
9470 These equivalences are created by two expressions: ASSERT_EXPR and
9471 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9472 result of another assertion, then we can use the fact that p_5 and
9473 p_4 are equivalent when evaluating p_5's range.
9474
9475 Together with value ranges, we also propagate these equivalences
9476 between names so that we can take advantage of information from
9477 multiple ranges when doing final replacement. Note that this
9478 equivalency relation is transitive but not symmetric.
9479
9480 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9481 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9482 in contexts where that assertion does not hold (e.g., in line 6).
9483
9484 TODO, the main difference between this pass and Patterson's is that
9485 we do not propagate edge probabilities. We only compute whether
9486 edges can be taken or not. That is, instead of having a spectrum
9487 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9488 DON'T KNOW. In the future, it may be worthwhile to propagate
9489 probabilities to aid branch prediction. */
9490
9491 static unsigned int
9492 execute_vrp (void)
9493 {
9494 int i;
9495 edge e;
9496 switch_update *su;
9497
9498 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9499 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9500 scev_initialize ();
9501
9502 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
9503 Inserting assertions may split edges which will invalidate
9504 EDGE_DFS_BACK. */
9505 insert_range_assertions ();
9506
9507 to_remove_edges.create (10);
9508 to_update_switch_stmts.create (5);
9509 threadedge_initialize_values ();
9510
9511 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
9512 mark_dfs_back_edges ();
9513
9514 vrp_initialize ();
9515 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9516 vrp_finalize ();
9517
9518 free_numbers_of_iterations_estimates ();
9519
9520 /* ASSERT_EXPRs must be removed before finalizing jump threads
9521 as finalizing jump threads calls the CFG cleanup code which
9522 does not properly handle ASSERT_EXPRs. */
9523 remove_range_assertions ();
9524
9525 /* If we exposed any new variables, go ahead and put them into
9526 SSA form now, before we handle jump threading. This simplifies
9527 interactions between rewriting of _DECL nodes into SSA form
9528 and rewriting SSA_NAME nodes into SSA form after block
9529 duplication and CFG manipulation. */
9530 update_ssa (TODO_update_ssa);
9531
9532 finalize_jump_threads ();
9533
9534 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9535 CFG in a broken state and requires a cfg_cleanup run. */
9536 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9537 remove_edge (e);
9538 /* Update SWITCH_EXPR case label vector. */
9539 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
9540 {
9541 size_t j;
9542 size_t n = TREE_VEC_LENGTH (su->vec);
9543 tree label;
9544 gimple_switch_set_num_labels (su->stmt, n);
9545 for (j = 0; j < n; j++)
9546 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9547 /* As we may have replaced the default label with a regular one
9548 make sure to make it a real default label again. This ensures
9549 optimal expansion. */
9550 label = gimple_switch_label (su->stmt, 0);
9551 CASE_LOW (label) = NULL_TREE;
9552 CASE_HIGH (label) = NULL_TREE;
9553 }
9554
9555 if (to_remove_edges.length () > 0)
9556 {
9557 free_dominance_info (CDI_DOMINATORS);
9558 if (current_loops)
9559 loops_state_set (LOOPS_NEED_FIXUP);
9560 }
9561
9562 to_remove_edges.release ();
9563 to_update_switch_stmts.release ();
9564 threadedge_finalize_values ();
9565
9566 scev_finalize ();
9567 loop_optimizer_finalize ();
9568 return 0;
9569 }
9570
9571 static bool
9572 gate_vrp (void)
9573 {
9574 return flag_tree_vrp != 0;
9575 }
9576
9577 namespace {
9578
9579 const pass_data pass_data_vrp =
9580 {
9581 GIMPLE_PASS, /* type */
9582 "vrp", /* name */
9583 OPTGROUP_NONE, /* optinfo_flags */
9584 true, /* has_gate */
9585 true, /* has_execute */
9586 TV_TREE_VRP, /* tv_id */
9587 PROP_ssa, /* properties_required */
9588 0, /* properties_provided */
9589 0, /* properties_destroyed */
9590 0, /* todo_flags_start */
9591 ( TODO_cleanup_cfg | TODO_update_ssa
9592 | TODO_verify_ssa
9593 | TODO_verify_flow ), /* todo_flags_finish */
9594 };
9595
9596 class pass_vrp : public gimple_opt_pass
9597 {
9598 public:
9599 pass_vrp(gcc::context *ctxt)
9600 : gimple_opt_pass(pass_data_vrp, ctxt)
9601 {}
9602
9603 /* opt_pass methods: */
9604 opt_pass * clone () { return new pass_vrp (ctxt_); }
9605 bool gate () { return gate_vrp (); }
9606 unsigned int execute () { return execute_vrp (); }
9607
9608 }; // class pass_vrp
9609
9610 } // anon namespace
9611
9612 gimple_opt_pass *
9613 make_pass_vrp (gcc::context *ctxt)
9614 {
9615 return new pass_vrp (ctxt);
9616 }