]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vrp.c
passes.c (init_optimization_passes): Strip now incorrect comment.
[thirdparty/gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "tree-flow.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "timevar.h"
33 #include "diagnostic.h"
34 #include "toplev.h"
35 #include "intl.h"
36 #include "cfgloop.h"
37 #include "tree-scalar-evolution.h"
38 #include "tree-ssa-propagate.h"
39 #include "tree-chrec.h"
40
41
42 /* Set of SSA names found live during the RPO traversal of the function
43 for still active basic-blocks. */
44 static sbitmap *live;
45
46 /* Return true if the SSA name NAME is live on the edge E. */
47
48 static bool
49 live_on_edge (edge e, tree name)
50 {
51 return (live[e->dest->index]
52 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
53 }
54
55 /* Local functions. */
56 static int compare_values (tree val1, tree val2);
57 static int compare_values_warnv (tree val1, tree val2, bool *);
58 static void vrp_meet (value_range_t *, value_range_t *);
59 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
60 tree, tree, bool, bool *,
61 bool *);
62
63 /* Location information for ASSERT_EXPRs. Each instance of this
64 structure describes an ASSERT_EXPR for an SSA name. Since a single
65 SSA name may have more than one assertion associated with it, these
66 locations are kept in a linked list attached to the corresponding
67 SSA name. */
68 struct assert_locus_d
69 {
70 /* Basic block where the assertion would be inserted. */
71 basic_block bb;
72
73 /* Some assertions need to be inserted on an edge (e.g., assertions
74 generated by COND_EXPRs). In those cases, BB will be NULL. */
75 edge e;
76
77 /* Pointer to the statement that generated this assertion. */
78 gimple_stmt_iterator si;
79
80 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
81 enum tree_code comp_code;
82
83 /* Value being compared against. */
84 tree val;
85
86 /* Expression to compare. */
87 tree expr;
88
89 /* Next node in the linked list. */
90 struct assert_locus_d *next;
91 };
92
93 typedef struct assert_locus_d *assert_locus_t;
94
95 /* If bit I is present, it means that SSA name N_i has a list of
96 assertions that should be inserted in the IL. */
97 static bitmap need_assert_for;
98
99 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
100 holds a list of ASSERT_LOCUS_T nodes that describe where
101 ASSERT_EXPRs for SSA name N_I should be inserted. */
102 static assert_locus_t *asserts_for;
103
104 /* Value range array. After propagation, VR_VALUE[I] holds the range
105 of values that SSA name N_I may take. */
106 static value_range_t **vr_value;
107
108 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
109 number of executable edges we saw the last time we visited the
110 node. */
111 static int *vr_phi_edge_counts;
112
113 typedef struct {
114 gimple stmt;
115 tree vec;
116 } switch_update;
117
118 static VEC (edge, heap) *to_remove_edges;
119 DEF_VEC_O(switch_update);
120 DEF_VEC_ALLOC_O(switch_update, heap);
121 static VEC (switch_update, heap) *to_update_switch_stmts;
122
123
124 /* Return the maximum value for TYPEs base type. */
125
126 static inline tree
127 vrp_val_max (const_tree type)
128 {
129 if (!INTEGRAL_TYPE_P (type))
130 return NULL_TREE;
131
132 /* For integer sub-types the values for the base type are relevant. */
133 if (TREE_TYPE (type))
134 type = TREE_TYPE (type);
135
136 return TYPE_MAX_VALUE (type);
137 }
138
139 /* Return the minimum value for TYPEs base type. */
140
141 static inline tree
142 vrp_val_min (const_tree type)
143 {
144 if (!INTEGRAL_TYPE_P (type))
145 return NULL_TREE;
146
147 /* For integer sub-types the values for the base type are relevant. */
148 if (TREE_TYPE (type))
149 type = TREE_TYPE (type);
150
151 return TYPE_MIN_VALUE (type);
152 }
153
154 /* Return whether VAL is equal to the maximum value of its type. This
155 will be true for a positive overflow infinity. We can't do a
156 simple equality comparison with TYPE_MAX_VALUE because C typedefs
157 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
158 to the integer constant with the same value in the type. */
159
160 static inline bool
161 vrp_val_is_max (const_tree val)
162 {
163 tree type_max = vrp_val_max (TREE_TYPE (val));
164 return (val == type_max
165 || (type_max != NULL_TREE
166 && operand_equal_p (val, type_max, 0)));
167 }
168
169 /* Return whether VAL is equal to the minimum value of its type. This
170 will be true for a negative overflow infinity. */
171
172 static inline bool
173 vrp_val_is_min (const_tree val)
174 {
175 tree type_min = vrp_val_min (TREE_TYPE (val));
176 return (val == type_min
177 || (type_min != NULL_TREE
178 && operand_equal_p (val, type_min, 0)));
179 }
180
181
182 /* Return whether TYPE should use an overflow infinity distinct from
183 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
184 represent a signed overflow during VRP computations. An infinity
185 is distinct from a half-range, which will go from some number to
186 TYPE_{MIN,MAX}_VALUE. */
187
188 static inline bool
189 needs_overflow_infinity (const_tree type)
190 {
191 return (INTEGRAL_TYPE_P (type)
192 && !TYPE_OVERFLOW_WRAPS (type)
193 /* Integer sub-types never overflow as they are never
194 operands of arithmetic operators. */
195 && !(TREE_TYPE (type) && TREE_TYPE (type) != type));
196 }
197
198 /* Return whether TYPE can support our overflow infinity
199 representation: we use the TREE_OVERFLOW flag, which only exists
200 for constants. If TYPE doesn't support this, we don't optimize
201 cases which would require signed overflow--we drop them to
202 VARYING. */
203
204 static inline bool
205 supports_overflow_infinity (const_tree type)
206 {
207 tree min = vrp_val_min (type), max = vrp_val_max (type);
208 #ifdef ENABLE_CHECKING
209 gcc_assert (needs_overflow_infinity (type));
210 #endif
211 return (min != NULL_TREE
212 && CONSTANT_CLASS_P (min)
213 && max != NULL_TREE
214 && CONSTANT_CLASS_P (max));
215 }
216
217 /* VAL is the maximum or minimum value of a type. Return a
218 corresponding overflow infinity. */
219
220 static inline tree
221 make_overflow_infinity (tree val)
222 {
223 #ifdef ENABLE_CHECKING
224 gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
225 #endif
226 val = copy_node (val);
227 TREE_OVERFLOW (val) = 1;
228 return val;
229 }
230
231 /* Return a negative overflow infinity for TYPE. */
232
233 static inline tree
234 negative_overflow_infinity (tree type)
235 {
236 #ifdef ENABLE_CHECKING
237 gcc_assert (supports_overflow_infinity (type));
238 #endif
239 return make_overflow_infinity (vrp_val_min (type));
240 }
241
242 /* Return a positive overflow infinity for TYPE. */
243
244 static inline tree
245 positive_overflow_infinity (tree type)
246 {
247 #ifdef ENABLE_CHECKING
248 gcc_assert (supports_overflow_infinity (type));
249 #endif
250 return make_overflow_infinity (vrp_val_max (type));
251 }
252
253 /* Return whether VAL is a negative overflow infinity. */
254
255 static inline bool
256 is_negative_overflow_infinity (const_tree val)
257 {
258 return (needs_overflow_infinity (TREE_TYPE (val))
259 && CONSTANT_CLASS_P (val)
260 && TREE_OVERFLOW (val)
261 && vrp_val_is_min (val));
262 }
263
264 /* Return whether VAL is a positive overflow infinity. */
265
266 static inline bool
267 is_positive_overflow_infinity (const_tree val)
268 {
269 return (needs_overflow_infinity (TREE_TYPE (val))
270 && CONSTANT_CLASS_P (val)
271 && TREE_OVERFLOW (val)
272 && vrp_val_is_max (val));
273 }
274
275 /* Return whether VAL is a positive or negative overflow infinity. */
276
277 static inline bool
278 is_overflow_infinity (const_tree val)
279 {
280 return (needs_overflow_infinity (TREE_TYPE (val))
281 && CONSTANT_CLASS_P (val)
282 && TREE_OVERFLOW (val)
283 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
284 }
285
286 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
287
288 static inline bool
289 stmt_overflow_infinity (gimple stmt)
290 {
291 if (is_gimple_assign (stmt)
292 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
293 GIMPLE_SINGLE_RHS)
294 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
295 return false;
296 }
297
298 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
299 the same value with TREE_OVERFLOW clear. This can be used to avoid
300 confusing a regular value with an overflow value. */
301
302 static inline tree
303 avoid_overflow_infinity (tree val)
304 {
305 if (!is_overflow_infinity (val))
306 return val;
307
308 if (vrp_val_is_max (val))
309 return vrp_val_max (TREE_TYPE (val));
310 else
311 {
312 #ifdef ENABLE_CHECKING
313 gcc_assert (vrp_val_is_min (val));
314 #endif
315 return vrp_val_min (TREE_TYPE (val));
316 }
317 }
318
319
320 /* Return true if ARG is marked with the nonnull attribute in the
321 current function signature. */
322
323 static bool
324 nonnull_arg_p (const_tree arg)
325 {
326 tree t, attrs, fntype;
327 unsigned HOST_WIDE_INT arg_num;
328
329 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
330
331 /* The static chain decl is always non null. */
332 if (arg == cfun->static_chain_decl)
333 return true;
334
335 fntype = TREE_TYPE (current_function_decl);
336 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
337
338 /* If "nonnull" wasn't specified, we know nothing about the argument. */
339 if (attrs == NULL_TREE)
340 return false;
341
342 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
343 if (TREE_VALUE (attrs) == NULL_TREE)
344 return true;
345
346 /* Get the position number for ARG in the function signature. */
347 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
348 t;
349 t = TREE_CHAIN (t), arg_num++)
350 {
351 if (t == arg)
352 break;
353 }
354
355 gcc_assert (t == arg);
356
357 /* Now see if ARG_NUM is mentioned in the nonnull list. */
358 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
359 {
360 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
361 return true;
362 }
363
364 return false;
365 }
366
367
368 /* Set value range VR to VR_VARYING. */
369
370 static inline void
371 set_value_range_to_varying (value_range_t *vr)
372 {
373 vr->type = VR_VARYING;
374 vr->min = vr->max = NULL_TREE;
375 if (vr->equiv)
376 bitmap_clear (vr->equiv);
377 }
378
379
380 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
381
382 static void
383 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
384 tree max, bitmap equiv)
385 {
386 #if defined ENABLE_CHECKING
387 /* Check the validity of the range. */
388 if (t == VR_RANGE || t == VR_ANTI_RANGE)
389 {
390 int cmp;
391
392 gcc_assert (min && max);
393
394 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
395 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
396
397 cmp = compare_values (min, max);
398 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
399
400 if (needs_overflow_infinity (TREE_TYPE (min)))
401 gcc_assert (!is_overflow_infinity (min)
402 || !is_overflow_infinity (max));
403 }
404
405 if (t == VR_UNDEFINED || t == VR_VARYING)
406 gcc_assert (min == NULL_TREE && max == NULL_TREE);
407
408 if (t == VR_UNDEFINED || t == VR_VARYING)
409 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
410 #endif
411
412 vr->type = t;
413 vr->min = min;
414 vr->max = max;
415
416 /* Since updating the equivalence set involves deep copying the
417 bitmaps, only do it if absolutely necessary. */
418 if (vr->equiv == NULL
419 && equiv != NULL)
420 vr->equiv = BITMAP_ALLOC (NULL);
421
422 if (equiv != vr->equiv)
423 {
424 if (equiv && !bitmap_empty_p (equiv))
425 bitmap_copy (vr->equiv, equiv);
426 else
427 bitmap_clear (vr->equiv);
428 }
429 }
430
431
432 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
433 This means adjusting T, MIN and MAX representing the case of a
434 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
435 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
436 In corner cases where MAX+1 or MIN-1 wraps this will fall back
437 to varying.
438 This routine exists to ease canonicalization in the case where we
439 extract ranges from var + CST op limit. */
440
441 static void
442 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
443 tree min, tree max, bitmap equiv)
444 {
445 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
446 if ((t != VR_RANGE
447 && t != VR_ANTI_RANGE)
448 || TREE_CODE (min) != INTEGER_CST
449 || TREE_CODE (max) != INTEGER_CST)
450 {
451 set_value_range (vr, t, min, max, equiv);
452 return;
453 }
454
455 /* Wrong order for min and max, to swap them and the VR type we need
456 to adjust them. */
457 if (tree_int_cst_lt (max, min))
458 {
459 tree one = build_int_cst (TREE_TYPE (min), 1);
460 tree tmp = int_const_binop (PLUS_EXPR, max, one, 0);
461 max = int_const_binop (MINUS_EXPR, min, one, 0);
462 min = tmp;
463
464 /* There's one corner case, if we had [C+1, C] before we now have
465 that again. But this represents an empty value range, so drop
466 to varying in this case. */
467 if (tree_int_cst_lt (max, min))
468 {
469 set_value_range_to_varying (vr);
470 return;
471 }
472
473 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
474 }
475
476 /* Anti-ranges that can be represented as ranges should be so. */
477 if (t == VR_ANTI_RANGE)
478 {
479 bool is_min = vrp_val_is_min (min);
480 bool is_max = vrp_val_is_max (max);
481
482 if (is_min && is_max)
483 {
484 /* We cannot deal with empty ranges, drop to varying. */
485 set_value_range_to_varying (vr);
486 return;
487 }
488 else if (is_min
489 /* As a special exception preserve non-null ranges. */
490 && !(TYPE_UNSIGNED (TREE_TYPE (min))
491 && integer_zerop (max)))
492 {
493 tree one = build_int_cst (TREE_TYPE (max), 1);
494 min = int_const_binop (PLUS_EXPR, max, one, 0);
495 max = vrp_val_max (TREE_TYPE (max));
496 t = VR_RANGE;
497 }
498 else if (is_max)
499 {
500 tree one = build_int_cst (TREE_TYPE (min), 1);
501 max = int_const_binop (MINUS_EXPR, min, one, 0);
502 min = vrp_val_min (TREE_TYPE (min));
503 t = VR_RANGE;
504 }
505 }
506
507 set_value_range (vr, t, min, max, equiv);
508 }
509
510 /* Copy value range FROM into value range TO. */
511
512 static inline void
513 copy_value_range (value_range_t *to, value_range_t *from)
514 {
515 set_value_range (to, from->type, from->min, from->max, from->equiv);
516 }
517
518 /* Set value range VR to a single value. This function is only called
519 with values we get from statements, and exists to clear the
520 TREE_OVERFLOW flag so that we don't think we have an overflow
521 infinity when we shouldn't. */
522
523 static inline void
524 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
525 {
526 gcc_assert (is_gimple_min_invariant (val));
527 val = avoid_overflow_infinity (val);
528 set_value_range (vr, VR_RANGE, val, val, equiv);
529 }
530
531 /* Set value range VR to a non-negative range of type TYPE.
532 OVERFLOW_INFINITY indicates whether to use an overflow infinity
533 rather than TYPE_MAX_VALUE; this should be true if we determine
534 that the range is nonnegative based on the assumption that signed
535 overflow does not occur. */
536
537 static inline void
538 set_value_range_to_nonnegative (value_range_t *vr, tree type,
539 bool overflow_infinity)
540 {
541 tree zero;
542
543 if (overflow_infinity && !supports_overflow_infinity (type))
544 {
545 set_value_range_to_varying (vr);
546 return;
547 }
548
549 zero = build_int_cst (type, 0);
550 set_value_range (vr, VR_RANGE, zero,
551 (overflow_infinity
552 ? positive_overflow_infinity (type)
553 : TYPE_MAX_VALUE (type)),
554 vr->equiv);
555 }
556
557 /* Set value range VR to a non-NULL range of type TYPE. */
558
559 static inline void
560 set_value_range_to_nonnull (value_range_t *vr, tree type)
561 {
562 tree zero = build_int_cst (type, 0);
563 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
564 }
565
566
567 /* Set value range VR to a NULL range of type TYPE. */
568
569 static inline void
570 set_value_range_to_null (value_range_t *vr, tree type)
571 {
572 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
573 }
574
575
576 /* Set value range VR to a range of a truthvalue of type TYPE. */
577
578 static inline void
579 set_value_range_to_truthvalue (value_range_t *vr, tree type)
580 {
581 if (TYPE_PRECISION (type) == 1)
582 set_value_range_to_varying (vr);
583 else
584 set_value_range (vr, VR_RANGE,
585 build_int_cst (type, 0), build_int_cst (type, 1),
586 vr->equiv);
587 }
588
589
590 /* Set value range VR to VR_UNDEFINED. */
591
592 static inline void
593 set_value_range_to_undefined (value_range_t *vr)
594 {
595 vr->type = VR_UNDEFINED;
596 vr->min = vr->max = NULL_TREE;
597 if (vr->equiv)
598 bitmap_clear (vr->equiv);
599 }
600
601
602 /* If abs (min) < abs (max), set VR to [-max, max], if
603 abs (min) >= abs (max), set VR to [-min, min]. */
604
605 static void
606 abs_extent_range (value_range_t *vr, tree min, tree max)
607 {
608 int cmp;
609
610 gcc_assert (TREE_CODE (min) == INTEGER_CST);
611 gcc_assert (TREE_CODE (max) == INTEGER_CST);
612 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
613 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
614 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
615 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
616 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
617 {
618 set_value_range_to_varying (vr);
619 return;
620 }
621 cmp = compare_values (min, max);
622 if (cmp == -1)
623 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
624 else if (cmp == 0 || cmp == 1)
625 {
626 max = min;
627 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
628 }
629 else
630 {
631 set_value_range_to_varying (vr);
632 return;
633 }
634 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
635 }
636
637
638 /* Return value range information for VAR.
639
640 If we have no values ranges recorded (ie, VRP is not running), then
641 return NULL. Otherwise create an empty range if none existed for VAR. */
642
643 static value_range_t *
644 get_value_range (const_tree var)
645 {
646 value_range_t *vr;
647 tree sym;
648 unsigned ver = SSA_NAME_VERSION (var);
649
650 /* If we have no recorded ranges, then return NULL. */
651 if (! vr_value)
652 return NULL;
653
654 vr = vr_value[ver];
655 if (vr)
656 return vr;
657
658 /* Create a default value range. */
659 vr_value[ver] = vr = XCNEW (value_range_t);
660
661 /* Defer allocating the equivalence set. */
662 vr->equiv = NULL;
663
664 /* If VAR is a default definition, the variable can take any value
665 in VAR's type. */
666 sym = SSA_NAME_VAR (var);
667 if (SSA_NAME_IS_DEFAULT_DEF (var))
668 {
669 /* Try to use the "nonnull" attribute to create ~[0, 0]
670 anti-ranges for pointers. Note that this is only valid with
671 default definitions of PARM_DECLs. */
672 if (TREE_CODE (sym) == PARM_DECL
673 && POINTER_TYPE_P (TREE_TYPE (sym))
674 && nonnull_arg_p (sym))
675 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
676 else
677 set_value_range_to_varying (vr);
678 }
679
680 return vr;
681 }
682
683 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
684
685 static inline bool
686 vrp_operand_equal_p (const_tree val1, const_tree val2)
687 {
688 if (val1 == val2)
689 return true;
690 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
691 return false;
692 if (is_overflow_infinity (val1))
693 return is_overflow_infinity (val2);
694 return true;
695 }
696
697 /* Return true, if the bitmaps B1 and B2 are equal. */
698
699 static inline bool
700 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
701 {
702 return (b1 == b2
703 || (b1 && b2
704 && bitmap_equal_p (b1, b2)));
705 }
706
707 /* Update the value range and equivalence set for variable VAR to
708 NEW_VR. Return true if NEW_VR is different from VAR's previous
709 value.
710
711 NOTE: This function assumes that NEW_VR is a temporary value range
712 object created for the sole purpose of updating VAR's range. The
713 storage used by the equivalence set from NEW_VR will be freed by
714 this function. Do not call update_value_range when NEW_VR
715 is the range object associated with another SSA name. */
716
717 static inline bool
718 update_value_range (const_tree var, value_range_t *new_vr)
719 {
720 value_range_t *old_vr;
721 bool is_new;
722
723 /* Update the value range, if necessary. */
724 old_vr = get_value_range (var);
725 is_new = old_vr->type != new_vr->type
726 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
727 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
728 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
729
730 if (is_new)
731 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
732 new_vr->equiv);
733
734 BITMAP_FREE (new_vr->equiv);
735
736 return is_new;
737 }
738
739
740 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
741 point where equivalence processing can be turned on/off. */
742
743 static void
744 add_equivalence (bitmap *equiv, const_tree var)
745 {
746 unsigned ver = SSA_NAME_VERSION (var);
747 value_range_t *vr = vr_value[ver];
748
749 if (*equiv == NULL)
750 *equiv = BITMAP_ALLOC (NULL);
751 bitmap_set_bit (*equiv, ver);
752 if (vr && vr->equiv)
753 bitmap_ior_into (*equiv, vr->equiv);
754 }
755
756
757 /* Return true if VR is ~[0, 0]. */
758
759 static inline bool
760 range_is_nonnull (value_range_t *vr)
761 {
762 return vr->type == VR_ANTI_RANGE
763 && integer_zerop (vr->min)
764 && integer_zerop (vr->max);
765 }
766
767
768 /* Return true if VR is [0, 0]. */
769
770 static inline bool
771 range_is_null (value_range_t *vr)
772 {
773 return vr->type == VR_RANGE
774 && integer_zerop (vr->min)
775 && integer_zerop (vr->max);
776 }
777
778
779 /* Return true if value range VR involves at least one symbol. */
780
781 static inline bool
782 symbolic_range_p (value_range_t *vr)
783 {
784 return (!is_gimple_min_invariant (vr->min)
785 || !is_gimple_min_invariant (vr->max));
786 }
787
788 /* Return true if value range VR uses an overflow infinity. */
789
790 static inline bool
791 overflow_infinity_range_p (value_range_t *vr)
792 {
793 return (vr->type == VR_RANGE
794 && (is_overflow_infinity (vr->min)
795 || is_overflow_infinity (vr->max)));
796 }
797
798 /* Return false if we can not make a valid comparison based on VR;
799 this will be the case if it uses an overflow infinity and overflow
800 is not undefined (i.e., -fno-strict-overflow is in effect).
801 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
802 uses an overflow infinity. */
803
804 static bool
805 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
806 {
807 gcc_assert (vr->type == VR_RANGE);
808 if (is_overflow_infinity (vr->min))
809 {
810 *strict_overflow_p = true;
811 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
812 return false;
813 }
814 if (is_overflow_infinity (vr->max))
815 {
816 *strict_overflow_p = true;
817 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
818 return false;
819 }
820 return true;
821 }
822
823
824 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
825 ranges obtained so far. */
826
827 static bool
828 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
829 {
830 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
831 || (TREE_CODE (expr) == SSA_NAME
832 && ssa_name_nonnegative_p (expr)));
833 }
834
835 /* Return true if the result of assignment STMT is know to be non-negative.
836 If the return value is based on the assumption that signed overflow is
837 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
838 *STRICT_OVERFLOW_P.*/
839
840 static bool
841 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
842 {
843 enum tree_code code = gimple_assign_rhs_code (stmt);
844 switch (get_gimple_rhs_class (code))
845 {
846 case GIMPLE_UNARY_RHS:
847 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
848 gimple_expr_type (stmt),
849 gimple_assign_rhs1 (stmt),
850 strict_overflow_p);
851 case GIMPLE_BINARY_RHS:
852 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
853 gimple_expr_type (stmt),
854 gimple_assign_rhs1 (stmt),
855 gimple_assign_rhs2 (stmt),
856 strict_overflow_p);
857 case GIMPLE_SINGLE_RHS:
858 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
859 strict_overflow_p);
860 case GIMPLE_INVALID_RHS:
861 gcc_unreachable ();
862 default:
863 gcc_unreachable ();
864 }
865 }
866
867 /* Return true if return value of call STMT is know to be non-negative.
868 If the return value is based on the assumption that signed overflow is
869 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
870 *STRICT_OVERFLOW_P.*/
871
872 static bool
873 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
874 {
875 tree arg0 = gimple_call_num_args (stmt) > 0 ?
876 gimple_call_arg (stmt, 0) : NULL_TREE;
877 tree arg1 = gimple_call_num_args (stmt) > 1 ?
878 gimple_call_arg (stmt, 1) : NULL_TREE;
879
880 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
881 gimple_call_fndecl (stmt),
882 arg0,
883 arg1,
884 strict_overflow_p);
885 }
886
887 /* Return true if STMT is know to to compute a non-negative value.
888 If the return value is based on the assumption that signed overflow is
889 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
890 *STRICT_OVERFLOW_P.*/
891
892 static bool
893 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
894 {
895 switch (gimple_code (stmt))
896 {
897 case GIMPLE_ASSIGN:
898 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
899 case GIMPLE_CALL:
900 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
901 default:
902 gcc_unreachable ();
903 }
904 }
905
906 /* Return true if the result of assignment STMT is know to be non-zero.
907 If the return value is based on the assumption that signed overflow is
908 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
909 *STRICT_OVERFLOW_P.*/
910
911 static bool
912 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
913 {
914 enum tree_code code = gimple_assign_rhs_code (stmt);
915 switch (get_gimple_rhs_class (code))
916 {
917 case GIMPLE_UNARY_RHS:
918 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
919 gimple_expr_type (stmt),
920 gimple_assign_rhs1 (stmt),
921 strict_overflow_p);
922 case GIMPLE_BINARY_RHS:
923 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
924 gimple_expr_type (stmt),
925 gimple_assign_rhs1 (stmt),
926 gimple_assign_rhs2 (stmt),
927 strict_overflow_p);
928 case GIMPLE_SINGLE_RHS:
929 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
930 strict_overflow_p);
931 case GIMPLE_INVALID_RHS:
932 gcc_unreachable ();
933 default:
934 gcc_unreachable ();
935 }
936 }
937
938 /* Return true if STMT is know to to compute a non-zero value.
939 If the return value is based on the assumption that signed overflow is
940 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
941 *STRICT_OVERFLOW_P.*/
942
943 static bool
944 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
945 {
946 switch (gimple_code (stmt))
947 {
948 case GIMPLE_ASSIGN:
949 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
950 case GIMPLE_CALL:
951 return gimple_alloca_call_p (stmt);
952 default:
953 gcc_unreachable ();
954 }
955 }
956
957 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
958 obtained so far. */
959
960 static bool
961 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
962 {
963 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
964 return true;
965
966 /* If we have an expression of the form &X->a, then the expression
967 is nonnull if X is nonnull. */
968 if (is_gimple_assign (stmt)
969 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
970 {
971 tree expr = gimple_assign_rhs1 (stmt);
972 tree base = get_base_address (TREE_OPERAND (expr, 0));
973
974 if (base != NULL_TREE
975 && TREE_CODE (base) == INDIRECT_REF
976 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
977 {
978 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
979 if (range_is_nonnull (vr))
980 return true;
981 }
982 }
983
984 return false;
985 }
986
987 /* Returns true if EXPR is a valid value (as expected by compare_values) --
988 a gimple invariant, or SSA_NAME +- CST. */
989
990 static bool
991 valid_value_p (tree expr)
992 {
993 if (TREE_CODE (expr) == SSA_NAME)
994 return true;
995
996 if (TREE_CODE (expr) == PLUS_EXPR
997 || TREE_CODE (expr) == MINUS_EXPR)
998 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
999 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1000
1001 return is_gimple_min_invariant (expr);
1002 }
1003
1004 /* Return
1005 1 if VAL < VAL2
1006 0 if !(VAL < VAL2)
1007 -2 if those are incomparable. */
1008 static inline int
1009 operand_less_p (tree val, tree val2)
1010 {
1011 /* LT is folded faster than GE and others. Inline the common case. */
1012 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1013 {
1014 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1015 return INT_CST_LT_UNSIGNED (val, val2);
1016 else
1017 {
1018 if (INT_CST_LT (val, val2))
1019 return 1;
1020 }
1021 }
1022 else
1023 {
1024 tree tcmp;
1025
1026 fold_defer_overflow_warnings ();
1027
1028 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1029
1030 fold_undefer_and_ignore_overflow_warnings ();
1031
1032 if (!tcmp
1033 || TREE_CODE (tcmp) != INTEGER_CST)
1034 return -2;
1035
1036 if (!integer_zerop (tcmp))
1037 return 1;
1038 }
1039
1040 /* val >= val2, not considering overflow infinity. */
1041 if (is_negative_overflow_infinity (val))
1042 return is_negative_overflow_infinity (val2) ? 0 : 1;
1043 else if (is_positive_overflow_infinity (val2))
1044 return is_positive_overflow_infinity (val) ? 0 : 1;
1045
1046 return 0;
1047 }
1048
1049 /* Compare two values VAL1 and VAL2. Return
1050
1051 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1052 -1 if VAL1 < VAL2,
1053 0 if VAL1 == VAL2,
1054 +1 if VAL1 > VAL2, and
1055 +2 if VAL1 != VAL2
1056
1057 This is similar to tree_int_cst_compare but supports pointer values
1058 and values that cannot be compared at compile time.
1059
1060 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1061 true if the return value is only valid if we assume that signed
1062 overflow is undefined. */
1063
1064 static int
1065 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1066 {
1067 if (val1 == val2)
1068 return 0;
1069
1070 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1071 both integers. */
1072 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1073 == POINTER_TYPE_P (TREE_TYPE (val2)));
1074 /* Convert the two values into the same type. This is needed because
1075 sizetype causes sign extension even for unsigned types. */
1076 val2 = fold_convert (TREE_TYPE (val1), val2);
1077 STRIP_USELESS_TYPE_CONVERSION (val2);
1078
1079 if ((TREE_CODE (val1) == SSA_NAME
1080 || TREE_CODE (val1) == PLUS_EXPR
1081 || TREE_CODE (val1) == MINUS_EXPR)
1082 && (TREE_CODE (val2) == SSA_NAME
1083 || TREE_CODE (val2) == PLUS_EXPR
1084 || TREE_CODE (val2) == MINUS_EXPR))
1085 {
1086 tree n1, c1, n2, c2;
1087 enum tree_code code1, code2;
1088
1089 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1090 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1091 same name, return -2. */
1092 if (TREE_CODE (val1) == SSA_NAME)
1093 {
1094 code1 = SSA_NAME;
1095 n1 = val1;
1096 c1 = NULL_TREE;
1097 }
1098 else
1099 {
1100 code1 = TREE_CODE (val1);
1101 n1 = TREE_OPERAND (val1, 0);
1102 c1 = TREE_OPERAND (val1, 1);
1103 if (tree_int_cst_sgn (c1) == -1)
1104 {
1105 if (is_negative_overflow_infinity (c1))
1106 return -2;
1107 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1108 if (!c1)
1109 return -2;
1110 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1111 }
1112 }
1113
1114 if (TREE_CODE (val2) == SSA_NAME)
1115 {
1116 code2 = SSA_NAME;
1117 n2 = val2;
1118 c2 = NULL_TREE;
1119 }
1120 else
1121 {
1122 code2 = TREE_CODE (val2);
1123 n2 = TREE_OPERAND (val2, 0);
1124 c2 = TREE_OPERAND (val2, 1);
1125 if (tree_int_cst_sgn (c2) == -1)
1126 {
1127 if (is_negative_overflow_infinity (c2))
1128 return -2;
1129 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1130 if (!c2)
1131 return -2;
1132 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1133 }
1134 }
1135
1136 /* Both values must use the same name. */
1137 if (n1 != n2)
1138 return -2;
1139
1140 if (code1 == SSA_NAME
1141 && code2 == SSA_NAME)
1142 /* NAME == NAME */
1143 return 0;
1144
1145 /* If overflow is defined we cannot simplify more. */
1146 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1147 return -2;
1148
1149 if (strict_overflow_p != NULL
1150 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1151 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1152 *strict_overflow_p = true;
1153
1154 if (code1 == SSA_NAME)
1155 {
1156 if (code2 == PLUS_EXPR)
1157 /* NAME < NAME + CST */
1158 return -1;
1159 else if (code2 == MINUS_EXPR)
1160 /* NAME > NAME - CST */
1161 return 1;
1162 }
1163 else if (code1 == PLUS_EXPR)
1164 {
1165 if (code2 == SSA_NAME)
1166 /* NAME + CST > NAME */
1167 return 1;
1168 else if (code2 == PLUS_EXPR)
1169 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1170 return compare_values_warnv (c1, c2, strict_overflow_p);
1171 else if (code2 == MINUS_EXPR)
1172 /* NAME + CST1 > NAME - CST2 */
1173 return 1;
1174 }
1175 else if (code1 == MINUS_EXPR)
1176 {
1177 if (code2 == SSA_NAME)
1178 /* NAME - CST < NAME */
1179 return -1;
1180 else if (code2 == PLUS_EXPR)
1181 /* NAME - CST1 < NAME + CST2 */
1182 return -1;
1183 else if (code2 == MINUS_EXPR)
1184 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1185 C1 and C2 are swapped in the call to compare_values. */
1186 return compare_values_warnv (c2, c1, strict_overflow_p);
1187 }
1188
1189 gcc_unreachable ();
1190 }
1191
1192 /* We cannot compare non-constants. */
1193 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1194 return -2;
1195
1196 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1197 {
1198 /* We cannot compare overflowed values, except for overflow
1199 infinities. */
1200 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1201 {
1202 if (strict_overflow_p != NULL)
1203 *strict_overflow_p = true;
1204 if (is_negative_overflow_infinity (val1))
1205 return is_negative_overflow_infinity (val2) ? 0 : -1;
1206 else if (is_negative_overflow_infinity (val2))
1207 return 1;
1208 else if (is_positive_overflow_infinity (val1))
1209 return is_positive_overflow_infinity (val2) ? 0 : 1;
1210 else if (is_positive_overflow_infinity (val2))
1211 return -1;
1212 return -2;
1213 }
1214
1215 return tree_int_cst_compare (val1, val2);
1216 }
1217 else
1218 {
1219 tree t;
1220
1221 /* First see if VAL1 and VAL2 are not the same. */
1222 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1223 return 0;
1224
1225 /* If VAL1 is a lower address than VAL2, return -1. */
1226 if (operand_less_p (val1, val2) == 1)
1227 return -1;
1228
1229 /* If VAL1 is a higher address than VAL2, return +1. */
1230 if (operand_less_p (val2, val1) == 1)
1231 return 1;
1232
1233 /* If VAL1 is different than VAL2, return +2.
1234 For integer constants we either have already returned -1 or 1
1235 or they are equivalent. We still might succeed in proving
1236 something about non-trivial operands. */
1237 if (TREE_CODE (val1) != INTEGER_CST
1238 || TREE_CODE (val2) != INTEGER_CST)
1239 {
1240 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1241 if (t && integer_onep (t))
1242 return 2;
1243 }
1244
1245 return -2;
1246 }
1247 }
1248
1249 /* Compare values like compare_values_warnv, but treat comparisons of
1250 nonconstants which rely on undefined overflow as incomparable. */
1251
1252 static int
1253 compare_values (tree val1, tree val2)
1254 {
1255 bool sop;
1256 int ret;
1257
1258 sop = false;
1259 ret = compare_values_warnv (val1, val2, &sop);
1260 if (sop
1261 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1262 ret = -2;
1263 return ret;
1264 }
1265
1266
1267 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1268 0 if VAL is not inside VR,
1269 -2 if we cannot tell either way.
1270
1271 FIXME, the current semantics of this functions are a bit quirky
1272 when taken in the context of VRP. In here we do not care
1273 about VR's type. If VR is the anti-range ~[3, 5] the call
1274 value_inside_range (4, VR) will return 1.
1275
1276 This is counter-intuitive in a strict sense, but the callers
1277 currently expect this. They are calling the function
1278 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1279 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1280 themselves.
1281
1282 This also applies to value_ranges_intersect_p and
1283 range_includes_zero_p. The semantics of VR_RANGE and
1284 VR_ANTI_RANGE should be encoded here, but that also means
1285 adapting the users of these functions to the new semantics.
1286
1287 Benchmark compile/20001226-1.c compilation time after changing this
1288 function. */
1289
1290 static inline int
1291 value_inside_range (tree val, value_range_t * vr)
1292 {
1293 int cmp1, cmp2;
1294
1295 cmp1 = operand_less_p (val, vr->min);
1296 if (cmp1 == -2)
1297 return -2;
1298 if (cmp1 == 1)
1299 return 0;
1300
1301 cmp2 = operand_less_p (vr->max, val);
1302 if (cmp2 == -2)
1303 return -2;
1304
1305 return !cmp2;
1306 }
1307
1308
1309 /* Return true if value ranges VR0 and VR1 have a non-empty
1310 intersection.
1311
1312 Benchmark compile/20001226-1.c compilation time after changing this
1313 function.
1314 */
1315
1316 static inline bool
1317 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1318 {
1319 /* The value ranges do not intersect if the maximum of the first range is
1320 less than the minimum of the second range or vice versa.
1321 When those relations are unknown, we can't do any better. */
1322 if (operand_less_p (vr0->max, vr1->min) != 0)
1323 return false;
1324 if (operand_less_p (vr1->max, vr0->min) != 0)
1325 return false;
1326 return true;
1327 }
1328
1329
1330 /* Return true if VR includes the value zero, false otherwise. FIXME,
1331 currently this will return false for an anti-range like ~[-4, 3].
1332 This will be wrong when the semantics of value_inside_range are
1333 modified (currently the users of this function expect these
1334 semantics). */
1335
1336 static inline bool
1337 range_includes_zero_p (value_range_t *vr)
1338 {
1339 tree zero;
1340
1341 gcc_assert (vr->type != VR_UNDEFINED
1342 && vr->type != VR_VARYING
1343 && !symbolic_range_p (vr));
1344
1345 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1346 return (value_inside_range (zero, vr) == 1);
1347 }
1348
1349 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1350 false otherwise or if no value range information is available. */
1351
1352 bool
1353 ssa_name_nonnegative_p (const_tree t)
1354 {
1355 value_range_t *vr = get_value_range (t);
1356
1357 if (!vr)
1358 return false;
1359
1360 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1361 which would return a useful value should be encoded as a VR_RANGE. */
1362 if (vr->type == VR_RANGE)
1363 {
1364 int result = compare_values (vr->min, integer_zero_node);
1365
1366 return (result == 0 || result == 1);
1367 }
1368 return false;
1369 }
1370
1371 /* If OP has a value range with a single constant value return that,
1372 otherwise return NULL_TREE. This returns OP itself if OP is a
1373 constant. */
1374
1375 static tree
1376 op_with_constant_singleton_value_range (tree op)
1377 {
1378 value_range_t *vr;
1379
1380 if (is_gimple_min_invariant (op))
1381 return op;
1382
1383 if (TREE_CODE (op) != SSA_NAME)
1384 return NULL_TREE;
1385
1386 vr = get_value_range (op);
1387 if (vr->type == VR_RANGE
1388 && operand_equal_p (vr->min, vr->max, 0)
1389 && is_gimple_min_invariant (vr->min))
1390 return vr->min;
1391
1392 return NULL_TREE;
1393 }
1394
1395
1396 /* Extract value range information from an ASSERT_EXPR EXPR and store
1397 it in *VR_P. */
1398
1399 static void
1400 extract_range_from_assert (value_range_t *vr_p, tree expr)
1401 {
1402 tree var, cond, limit, min, max, type;
1403 value_range_t *var_vr, *limit_vr;
1404 enum tree_code cond_code;
1405
1406 var = ASSERT_EXPR_VAR (expr);
1407 cond = ASSERT_EXPR_COND (expr);
1408
1409 gcc_assert (COMPARISON_CLASS_P (cond));
1410
1411 /* Find VAR in the ASSERT_EXPR conditional. */
1412 if (var == TREE_OPERAND (cond, 0)
1413 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1414 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1415 {
1416 /* If the predicate is of the form VAR COMP LIMIT, then we just
1417 take LIMIT from the RHS and use the same comparison code. */
1418 cond_code = TREE_CODE (cond);
1419 limit = TREE_OPERAND (cond, 1);
1420 cond = TREE_OPERAND (cond, 0);
1421 }
1422 else
1423 {
1424 /* If the predicate is of the form LIMIT COMP VAR, then we need
1425 to flip around the comparison code to create the proper range
1426 for VAR. */
1427 cond_code = swap_tree_comparison (TREE_CODE (cond));
1428 limit = TREE_OPERAND (cond, 0);
1429 cond = TREE_OPERAND (cond, 1);
1430 }
1431
1432 limit = avoid_overflow_infinity (limit);
1433
1434 type = TREE_TYPE (limit);
1435 gcc_assert (limit != var);
1436
1437 /* For pointer arithmetic, we only keep track of pointer equality
1438 and inequality. */
1439 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1440 {
1441 set_value_range_to_varying (vr_p);
1442 return;
1443 }
1444
1445 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1446 try to use LIMIT's range to avoid creating symbolic ranges
1447 unnecessarily. */
1448 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1449
1450 /* LIMIT's range is only interesting if it has any useful information. */
1451 if (limit_vr
1452 && (limit_vr->type == VR_UNDEFINED
1453 || limit_vr->type == VR_VARYING
1454 || symbolic_range_p (limit_vr)))
1455 limit_vr = NULL;
1456
1457 /* Initially, the new range has the same set of equivalences of
1458 VAR's range. This will be revised before returning the final
1459 value. Since assertions may be chained via mutually exclusive
1460 predicates, we will need to trim the set of equivalences before
1461 we are done. */
1462 gcc_assert (vr_p->equiv == NULL);
1463 add_equivalence (&vr_p->equiv, var);
1464
1465 /* Extract a new range based on the asserted comparison for VAR and
1466 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1467 will only use it for equality comparisons (EQ_EXPR). For any
1468 other kind of assertion, we cannot derive a range from LIMIT's
1469 anti-range that can be used to describe the new range. For
1470 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1471 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1472 no single range for x_2 that could describe LE_EXPR, so we might
1473 as well build the range [b_4, +INF] for it.
1474 One special case we handle is extracting a range from a
1475 range test encoded as (unsigned)var + CST <= limit. */
1476 if (TREE_CODE (cond) == NOP_EXPR
1477 || TREE_CODE (cond) == PLUS_EXPR)
1478 {
1479 if (TREE_CODE (cond) == PLUS_EXPR)
1480 {
1481 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1482 TREE_OPERAND (cond, 1));
1483 max = int_const_binop (PLUS_EXPR, limit, min, 0);
1484 cond = TREE_OPERAND (cond, 0);
1485 }
1486 else
1487 {
1488 min = build_int_cst (TREE_TYPE (var), 0);
1489 max = limit;
1490 }
1491
1492 /* Make sure to not set TREE_OVERFLOW on the final type
1493 conversion. We are willingly interpreting large positive
1494 unsigned values as negative singed values here. */
1495 min = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (min),
1496 TREE_INT_CST_HIGH (min), 0, false);
1497 max = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (max),
1498 TREE_INT_CST_HIGH (max), 0, false);
1499
1500 /* We can transform a max, min range to an anti-range or
1501 vice-versa. Use set_and_canonicalize_value_range which does
1502 this for us. */
1503 if (cond_code == LE_EXPR)
1504 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1505 min, max, vr_p->equiv);
1506 else if (cond_code == GT_EXPR)
1507 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1508 min, max, vr_p->equiv);
1509 else
1510 gcc_unreachable ();
1511 }
1512 else if (cond_code == EQ_EXPR)
1513 {
1514 enum value_range_type range_type;
1515
1516 if (limit_vr)
1517 {
1518 range_type = limit_vr->type;
1519 min = limit_vr->min;
1520 max = limit_vr->max;
1521 }
1522 else
1523 {
1524 range_type = VR_RANGE;
1525 min = limit;
1526 max = limit;
1527 }
1528
1529 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1530
1531 /* When asserting the equality VAR == LIMIT and LIMIT is another
1532 SSA name, the new range will also inherit the equivalence set
1533 from LIMIT. */
1534 if (TREE_CODE (limit) == SSA_NAME)
1535 add_equivalence (&vr_p->equiv, limit);
1536 }
1537 else if (cond_code == NE_EXPR)
1538 {
1539 /* As described above, when LIMIT's range is an anti-range and
1540 this assertion is an inequality (NE_EXPR), then we cannot
1541 derive anything from the anti-range. For instance, if
1542 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1543 not imply that VAR's range is [0, 0]. So, in the case of
1544 anti-ranges, we just assert the inequality using LIMIT and
1545 not its anti-range.
1546
1547 If LIMIT_VR is a range, we can only use it to build a new
1548 anti-range if LIMIT_VR is a single-valued range. For
1549 instance, if LIMIT_VR is [0, 1], the predicate
1550 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1551 Rather, it means that for value 0 VAR should be ~[0, 0]
1552 and for value 1, VAR should be ~[1, 1]. We cannot
1553 represent these ranges.
1554
1555 The only situation in which we can build a valid
1556 anti-range is when LIMIT_VR is a single-valued range
1557 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1558 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1559 if (limit_vr
1560 && limit_vr->type == VR_RANGE
1561 && compare_values (limit_vr->min, limit_vr->max) == 0)
1562 {
1563 min = limit_vr->min;
1564 max = limit_vr->max;
1565 }
1566 else
1567 {
1568 /* In any other case, we cannot use LIMIT's range to build a
1569 valid anti-range. */
1570 min = max = limit;
1571 }
1572
1573 /* If MIN and MAX cover the whole range for their type, then
1574 just use the original LIMIT. */
1575 if (INTEGRAL_TYPE_P (type)
1576 && vrp_val_is_min (min)
1577 && vrp_val_is_max (max))
1578 min = max = limit;
1579
1580 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1581 }
1582 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1583 {
1584 min = TYPE_MIN_VALUE (type);
1585
1586 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1587 max = limit;
1588 else
1589 {
1590 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1591 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1592 LT_EXPR. */
1593 max = limit_vr->max;
1594 }
1595
1596 /* If the maximum value forces us to be out of bounds, simply punt.
1597 It would be pointless to try and do anything more since this
1598 all should be optimized away above us. */
1599 if ((cond_code == LT_EXPR
1600 && compare_values (max, min) == 0)
1601 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1602 set_value_range_to_varying (vr_p);
1603 else
1604 {
1605 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1606 if (cond_code == LT_EXPR)
1607 {
1608 tree one = build_int_cst (type, 1);
1609 max = fold_build2 (MINUS_EXPR, type, max, one);
1610 if (EXPR_P (max))
1611 TREE_NO_WARNING (max) = 1;
1612 }
1613
1614 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1615 }
1616 }
1617 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1618 {
1619 max = TYPE_MAX_VALUE (type);
1620
1621 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1622 min = limit;
1623 else
1624 {
1625 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1626 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1627 GT_EXPR. */
1628 min = limit_vr->min;
1629 }
1630
1631 /* If the minimum value forces us to be out of bounds, simply punt.
1632 It would be pointless to try and do anything more since this
1633 all should be optimized away above us. */
1634 if ((cond_code == GT_EXPR
1635 && compare_values (min, max) == 0)
1636 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1637 set_value_range_to_varying (vr_p);
1638 else
1639 {
1640 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1641 if (cond_code == GT_EXPR)
1642 {
1643 tree one = build_int_cst (type, 1);
1644 min = fold_build2 (PLUS_EXPR, type, min, one);
1645 if (EXPR_P (min))
1646 TREE_NO_WARNING (min) = 1;
1647 }
1648
1649 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1650 }
1651 }
1652 else
1653 gcc_unreachable ();
1654
1655 /* If VAR already had a known range, it may happen that the new
1656 range we have computed and VAR's range are not compatible. For
1657 instance,
1658
1659 if (p_5 == NULL)
1660 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1661 x_7 = p_6->fld;
1662 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1663
1664 While the above comes from a faulty program, it will cause an ICE
1665 later because p_8 and p_6 will have incompatible ranges and at
1666 the same time will be considered equivalent. A similar situation
1667 would arise from
1668
1669 if (i_5 > 10)
1670 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1671 if (i_5 < 5)
1672 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1673
1674 Again i_6 and i_7 will have incompatible ranges. It would be
1675 pointless to try and do anything with i_7's range because
1676 anything dominated by 'if (i_5 < 5)' will be optimized away.
1677 Note, due to the wa in which simulation proceeds, the statement
1678 i_7 = ASSERT_EXPR <...> we would never be visited because the
1679 conditional 'if (i_5 < 5)' always evaluates to false. However,
1680 this extra check does not hurt and may protect against future
1681 changes to VRP that may get into a situation similar to the
1682 NULL pointer dereference example.
1683
1684 Note that these compatibility tests are only needed when dealing
1685 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1686 are both anti-ranges, they will always be compatible, because two
1687 anti-ranges will always have a non-empty intersection. */
1688
1689 var_vr = get_value_range (var);
1690
1691 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1692 ranges or anti-ranges. */
1693 if (vr_p->type == VR_VARYING
1694 || vr_p->type == VR_UNDEFINED
1695 || var_vr->type == VR_VARYING
1696 || var_vr->type == VR_UNDEFINED
1697 || symbolic_range_p (vr_p)
1698 || symbolic_range_p (var_vr))
1699 return;
1700
1701 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1702 {
1703 /* If the two ranges have a non-empty intersection, we can
1704 refine the resulting range. Since the assert expression
1705 creates an equivalency and at the same time it asserts a
1706 predicate, we can take the intersection of the two ranges to
1707 get better precision. */
1708 if (value_ranges_intersect_p (var_vr, vr_p))
1709 {
1710 /* Use the larger of the two minimums. */
1711 if (compare_values (vr_p->min, var_vr->min) == -1)
1712 min = var_vr->min;
1713 else
1714 min = vr_p->min;
1715
1716 /* Use the smaller of the two maximums. */
1717 if (compare_values (vr_p->max, var_vr->max) == 1)
1718 max = var_vr->max;
1719 else
1720 max = vr_p->max;
1721
1722 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1723 }
1724 else
1725 {
1726 /* The two ranges do not intersect, set the new range to
1727 VARYING, because we will not be able to do anything
1728 meaningful with it. */
1729 set_value_range_to_varying (vr_p);
1730 }
1731 }
1732 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1733 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1734 {
1735 /* A range and an anti-range will cancel each other only if
1736 their ends are the same. For instance, in the example above,
1737 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1738 so VR_P should be set to VR_VARYING. */
1739 if (compare_values (var_vr->min, vr_p->min) == 0
1740 && compare_values (var_vr->max, vr_p->max) == 0)
1741 set_value_range_to_varying (vr_p);
1742 else
1743 {
1744 tree min, max, anti_min, anti_max, real_min, real_max;
1745 int cmp;
1746
1747 /* We want to compute the logical AND of the two ranges;
1748 there are three cases to consider.
1749
1750
1751 1. The VR_ANTI_RANGE range is completely within the
1752 VR_RANGE and the endpoints of the ranges are
1753 different. In that case the resulting range
1754 should be whichever range is more precise.
1755 Typically that will be the VR_RANGE.
1756
1757 2. The VR_ANTI_RANGE is completely disjoint from
1758 the VR_RANGE. In this case the resulting range
1759 should be the VR_RANGE.
1760
1761 3. There is some overlap between the VR_ANTI_RANGE
1762 and the VR_RANGE.
1763
1764 3a. If the high limit of the VR_ANTI_RANGE resides
1765 within the VR_RANGE, then the result is a new
1766 VR_RANGE starting at the high limit of the
1767 VR_ANTI_RANGE + 1 and extending to the
1768 high limit of the original VR_RANGE.
1769
1770 3b. If the low limit of the VR_ANTI_RANGE resides
1771 within the VR_RANGE, then the result is a new
1772 VR_RANGE starting at the low limit of the original
1773 VR_RANGE and extending to the low limit of the
1774 VR_ANTI_RANGE - 1. */
1775 if (vr_p->type == VR_ANTI_RANGE)
1776 {
1777 anti_min = vr_p->min;
1778 anti_max = vr_p->max;
1779 real_min = var_vr->min;
1780 real_max = var_vr->max;
1781 }
1782 else
1783 {
1784 anti_min = var_vr->min;
1785 anti_max = var_vr->max;
1786 real_min = vr_p->min;
1787 real_max = vr_p->max;
1788 }
1789
1790
1791 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1792 not including any endpoints. */
1793 if (compare_values (anti_max, real_max) == -1
1794 && compare_values (anti_min, real_min) == 1)
1795 {
1796 /* If the range is covering the whole valid range of
1797 the type keep the anti-range. */
1798 if (!vrp_val_is_min (real_min)
1799 || !vrp_val_is_max (real_max))
1800 set_value_range (vr_p, VR_RANGE, real_min,
1801 real_max, vr_p->equiv);
1802 }
1803 /* Case 2, VR_ANTI_RANGE completely disjoint from
1804 VR_RANGE. */
1805 else if (compare_values (anti_min, real_max) == 1
1806 || compare_values (anti_max, real_min) == -1)
1807 {
1808 set_value_range (vr_p, VR_RANGE, real_min,
1809 real_max, vr_p->equiv);
1810 }
1811 /* Case 3a, the anti-range extends into the low
1812 part of the real range. Thus creating a new
1813 low for the real range. */
1814 else if (((cmp = compare_values (anti_max, real_min)) == 1
1815 || cmp == 0)
1816 && compare_values (anti_max, real_max) == -1)
1817 {
1818 gcc_assert (!is_positive_overflow_infinity (anti_max));
1819 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1820 && vrp_val_is_max (anti_max))
1821 {
1822 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1823 {
1824 set_value_range_to_varying (vr_p);
1825 return;
1826 }
1827 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1828 }
1829 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1830 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1831 anti_max,
1832 build_int_cst (TREE_TYPE (var_vr->min), 1));
1833 else
1834 min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1835 anti_max, size_int (1));
1836 max = real_max;
1837 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1838 }
1839 /* Case 3b, the anti-range extends into the high
1840 part of the real range. Thus creating a new
1841 higher for the real range. */
1842 else if (compare_values (anti_min, real_min) == 1
1843 && ((cmp = compare_values (anti_min, real_max)) == -1
1844 || cmp == 0))
1845 {
1846 gcc_assert (!is_negative_overflow_infinity (anti_min));
1847 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1848 && vrp_val_is_min (anti_min))
1849 {
1850 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1851 {
1852 set_value_range_to_varying (vr_p);
1853 return;
1854 }
1855 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1856 }
1857 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1858 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1859 anti_min,
1860 build_int_cst (TREE_TYPE (var_vr->min), 1));
1861 else
1862 max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1863 anti_min,
1864 size_int (-1));
1865 min = real_min;
1866 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1867 }
1868 }
1869 }
1870 }
1871
1872
1873 /* Extract range information from SSA name VAR and store it in VR. If
1874 VAR has an interesting range, use it. Otherwise, create the
1875 range [VAR, VAR] and return it. This is useful in situations where
1876 we may have conditionals testing values of VARYING names. For
1877 instance,
1878
1879 x_3 = y_5;
1880 if (x_3 > y_5)
1881 ...
1882
1883 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1884 always false. */
1885
1886 static void
1887 extract_range_from_ssa_name (value_range_t *vr, tree var)
1888 {
1889 value_range_t *var_vr = get_value_range (var);
1890
1891 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1892 copy_value_range (vr, var_vr);
1893 else
1894 set_value_range (vr, VR_RANGE, var, var, NULL);
1895
1896 add_equivalence (&vr->equiv, var);
1897 }
1898
1899
1900 /* Wrapper around int_const_binop. If the operation overflows and we
1901 are not using wrapping arithmetic, then adjust the result to be
1902 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1903 NULL_TREE if we need to use an overflow infinity representation but
1904 the type does not support it. */
1905
1906 static tree
1907 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1908 {
1909 tree res;
1910
1911 res = int_const_binop (code, val1, val2, 0);
1912
1913 /* If we are not using wrapping arithmetic, operate symbolically
1914 on -INF and +INF. */
1915 if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1916 {
1917 int checkz = compare_values (res, val1);
1918 bool overflow = false;
1919
1920 /* Ensure that res = val1 [+*] val2 >= val1
1921 or that res = val1 - val2 <= val1. */
1922 if ((code == PLUS_EXPR
1923 && !(checkz == 1 || checkz == 0))
1924 || (code == MINUS_EXPR
1925 && !(checkz == 0 || checkz == -1)))
1926 {
1927 overflow = true;
1928 }
1929 /* Checking for multiplication overflow is done by dividing the
1930 output of the multiplication by the first input of the
1931 multiplication. If the result of that division operation is
1932 not equal to the second input of the multiplication, then the
1933 multiplication overflowed. */
1934 else if (code == MULT_EXPR && !integer_zerop (val1))
1935 {
1936 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1937 res,
1938 val1, 0);
1939 int check = compare_values (tmp, val2);
1940
1941 if (check != 0)
1942 overflow = true;
1943 }
1944
1945 if (overflow)
1946 {
1947 res = copy_node (res);
1948 TREE_OVERFLOW (res) = 1;
1949 }
1950
1951 }
1952 else if ((TREE_OVERFLOW (res)
1953 && !TREE_OVERFLOW (val1)
1954 && !TREE_OVERFLOW (val2))
1955 || is_overflow_infinity (val1)
1956 || is_overflow_infinity (val2))
1957 {
1958 /* If the operation overflowed but neither VAL1 nor VAL2 are
1959 overflown, return -INF or +INF depending on the operation
1960 and the combination of signs of the operands. */
1961 int sgn1 = tree_int_cst_sgn (val1);
1962 int sgn2 = tree_int_cst_sgn (val2);
1963
1964 if (needs_overflow_infinity (TREE_TYPE (res))
1965 && !supports_overflow_infinity (TREE_TYPE (res)))
1966 return NULL_TREE;
1967
1968 /* We have to punt on adding infinities of different signs,
1969 since we can't tell what the sign of the result should be.
1970 Likewise for subtracting infinities of the same sign. */
1971 if (((code == PLUS_EXPR && sgn1 != sgn2)
1972 || (code == MINUS_EXPR && sgn1 == sgn2))
1973 && is_overflow_infinity (val1)
1974 && is_overflow_infinity (val2))
1975 return NULL_TREE;
1976
1977 /* Don't try to handle division or shifting of infinities. */
1978 if ((code == TRUNC_DIV_EXPR
1979 || code == FLOOR_DIV_EXPR
1980 || code == CEIL_DIV_EXPR
1981 || code == EXACT_DIV_EXPR
1982 || code == ROUND_DIV_EXPR
1983 || code == RSHIFT_EXPR)
1984 && (is_overflow_infinity (val1)
1985 || is_overflow_infinity (val2)))
1986 return NULL_TREE;
1987
1988 /* Notice that we only need to handle the restricted set of
1989 operations handled by extract_range_from_binary_expr.
1990 Among them, only multiplication, addition and subtraction
1991 can yield overflow without overflown operands because we
1992 are working with integral types only... except in the
1993 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1994 for division too. */
1995
1996 /* For multiplication, the sign of the overflow is given
1997 by the comparison of the signs of the operands. */
1998 if ((code == MULT_EXPR && sgn1 == sgn2)
1999 /* For addition, the operands must be of the same sign
2000 to yield an overflow. Its sign is therefore that
2001 of one of the operands, for example the first. For
2002 infinite operands X + -INF is negative, not positive. */
2003 || (code == PLUS_EXPR
2004 && (sgn1 >= 0
2005 ? !is_negative_overflow_infinity (val2)
2006 : is_positive_overflow_infinity (val2)))
2007 /* For subtraction, non-infinite operands must be of
2008 different signs to yield an overflow. Its sign is
2009 therefore that of the first operand or the opposite of
2010 that of the second operand. A first operand of 0 counts
2011 as positive here, for the corner case 0 - (-INF), which
2012 overflows, but must yield +INF. For infinite operands 0
2013 - INF is negative, not positive. */
2014 || (code == MINUS_EXPR
2015 && (sgn1 >= 0
2016 ? !is_positive_overflow_infinity (val2)
2017 : is_negative_overflow_infinity (val2)))
2018 /* We only get in here with positive shift count, so the
2019 overflow direction is the same as the sign of val1.
2020 Actually rshift does not overflow at all, but we only
2021 handle the case of shifting overflowed -INF and +INF. */
2022 || (code == RSHIFT_EXPR
2023 && sgn1 >= 0)
2024 /* For division, the only case is -INF / -1 = +INF. */
2025 || code == TRUNC_DIV_EXPR
2026 || code == FLOOR_DIV_EXPR
2027 || code == CEIL_DIV_EXPR
2028 || code == EXACT_DIV_EXPR
2029 || code == ROUND_DIV_EXPR)
2030 return (needs_overflow_infinity (TREE_TYPE (res))
2031 ? positive_overflow_infinity (TREE_TYPE (res))
2032 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2033 else
2034 return (needs_overflow_infinity (TREE_TYPE (res))
2035 ? negative_overflow_infinity (TREE_TYPE (res))
2036 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2037 }
2038
2039 return res;
2040 }
2041
2042
2043 /* Extract range information from a binary expression EXPR based on
2044 the ranges of each of its operands and the expression code. */
2045
2046 static void
2047 extract_range_from_binary_expr (value_range_t *vr,
2048 enum tree_code code,
2049 tree expr_type, tree op0, tree op1)
2050 {
2051 enum value_range_type type;
2052 tree min, max;
2053 int cmp;
2054 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2055 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2056
2057 /* Not all binary expressions can be applied to ranges in a
2058 meaningful way. Handle only arithmetic operations. */
2059 if (code != PLUS_EXPR
2060 && code != MINUS_EXPR
2061 && code != POINTER_PLUS_EXPR
2062 && code != MULT_EXPR
2063 && code != TRUNC_DIV_EXPR
2064 && code != FLOOR_DIV_EXPR
2065 && code != CEIL_DIV_EXPR
2066 && code != EXACT_DIV_EXPR
2067 && code != ROUND_DIV_EXPR
2068 && code != RSHIFT_EXPR
2069 && code != MIN_EXPR
2070 && code != MAX_EXPR
2071 && code != BIT_AND_EXPR
2072 && code != BIT_IOR_EXPR
2073 && code != TRUTH_AND_EXPR
2074 && code != TRUTH_OR_EXPR)
2075 {
2076 /* We can still do constant propagation here. */
2077 tree const_op0 = op_with_constant_singleton_value_range (op0);
2078 tree const_op1 = op_with_constant_singleton_value_range (op1);
2079 if (const_op0 || const_op1)
2080 {
2081 tree tem = fold_binary (code, expr_type,
2082 const_op0 ? const_op0 : op0,
2083 const_op1 ? const_op1 : op1);
2084 if (tem
2085 && is_gimple_min_invariant (tem)
2086 && !is_overflow_infinity (tem))
2087 {
2088 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2089 return;
2090 }
2091 }
2092 set_value_range_to_varying (vr);
2093 return;
2094 }
2095
2096 /* Get value ranges for each operand. For constant operands, create
2097 a new value range with the operand to simplify processing. */
2098 if (TREE_CODE (op0) == SSA_NAME)
2099 vr0 = *(get_value_range (op0));
2100 else if (is_gimple_min_invariant (op0))
2101 set_value_range_to_value (&vr0, op0, NULL);
2102 else
2103 set_value_range_to_varying (&vr0);
2104
2105 if (TREE_CODE (op1) == SSA_NAME)
2106 vr1 = *(get_value_range (op1));
2107 else if (is_gimple_min_invariant (op1))
2108 set_value_range_to_value (&vr1, op1, NULL);
2109 else
2110 set_value_range_to_varying (&vr1);
2111
2112 /* If either range is UNDEFINED, so is the result. */
2113 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2114 {
2115 set_value_range_to_undefined (vr);
2116 return;
2117 }
2118
2119 /* The type of the resulting value range defaults to VR0.TYPE. */
2120 type = vr0.type;
2121
2122 /* Refuse to operate on VARYING ranges, ranges of different kinds
2123 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2124 because we may be able to derive a useful range even if one of
2125 the operands is VR_VARYING or symbolic range. Similarly for
2126 divisions. TODO, we may be able to derive anti-ranges in
2127 some cases. */
2128 if (code != BIT_AND_EXPR
2129 && code != TRUTH_AND_EXPR
2130 && code != TRUTH_OR_EXPR
2131 && code != TRUNC_DIV_EXPR
2132 && code != FLOOR_DIV_EXPR
2133 && code != CEIL_DIV_EXPR
2134 && code != EXACT_DIV_EXPR
2135 && code != ROUND_DIV_EXPR
2136 && (vr0.type == VR_VARYING
2137 || vr1.type == VR_VARYING
2138 || vr0.type != vr1.type
2139 || symbolic_range_p (&vr0)
2140 || symbolic_range_p (&vr1)))
2141 {
2142 set_value_range_to_varying (vr);
2143 return;
2144 }
2145
2146 /* Now evaluate the expression to determine the new range. */
2147 if (POINTER_TYPE_P (expr_type)
2148 || POINTER_TYPE_P (TREE_TYPE (op0))
2149 || POINTER_TYPE_P (TREE_TYPE (op1)))
2150 {
2151 if (code == MIN_EXPR || code == MAX_EXPR)
2152 {
2153 /* For MIN/MAX expressions with pointers, we only care about
2154 nullness, if both are non null, then the result is nonnull.
2155 If both are null, then the result is null. Otherwise they
2156 are varying. */
2157 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2158 set_value_range_to_nonnull (vr, expr_type);
2159 else if (range_is_null (&vr0) && range_is_null (&vr1))
2160 set_value_range_to_null (vr, expr_type);
2161 else
2162 set_value_range_to_varying (vr);
2163
2164 return;
2165 }
2166 gcc_assert (code == POINTER_PLUS_EXPR);
2167 /* For pointer types, we are really only interested in asserting
2168 whether the expression evaluates to non-NULL. */
2169 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2170 set_value_range_to_nonnull (vr, expr_type);
2171 else if (range_is_null (&vr0) && range_is_null (&vr1))
2172 set_value_range_to_null (vr, expr_type);
2173 else
2174 set_value_range_to_varying (vr);
2175
2176 return;
2177 }
2178
2179 /* For integer ranges, apply the operation to each end of the
2180 range and see what we end up with. */
2181 if (code == TRUTH_AND_EXPR
2182 || code == TRUTH_OR_EXPR)
2183 {
2184 /* If one of the operands is zero, we know that the whole
2185 expression evaluates zero. */
2186 if (code == TRUTH_AND_EXPR
2187 && ((vr0.type == VR_RANGE
2188 && integer_zerop (vr0.min)
2189 && integer_zerop (vr0.max))
2190 || (vr1.type == VR_RANGE
2191 && integer_zerop (vr1.min)
2192 && integer_zerop (vr1.max))))
2193 {
2194 type = VR_RANGE;
2195 min = max = build_int_cst (expr_type, 0);
2196 }
2197 /* If one of the operands is one, we know that the whole
2198 expression evaluates one. */
2199 else if (code == TRUTH_OR_EXPR
2200 && ((vr0.type == VR_RANGE
2201 && integer_onep (vr0.min)
2202 && integer_onep (vr0.max))
2203 || (vr1.type == VR_RANGE
2204 && integer_onep (vr1.min)
2205 && integer_onep (vr1.max))))
2206 {
2207 type = VR_RANGE;
2208 min = max = build_int_cst (expr_type, 1);
2209 }
2210 else if (vr0.type != VR_VARYING
2211 && vr1.type != VR_VARYING
2212 && vr0.type == vr1.type
2213 && !symbolic_range_p (&vr0)
2214 && !overflow_infinity_range_p (&vr0)
2215 && !symbolic_range_p (&vr1)
2216 && !overflow_infinity_range_p (&vr1))
2217 {
2218 /* Boolean expressions cannot be folded with int_const_binop. */
2219 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2220 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2221 }
2222 else
2223 {
2224 /* The result of a TRUTH_*_EXPR is always true or false. */
2225 set_value_range_to_truthvalue (vr, expr_type);
2226 return;
2227 }
2228 }
2229 else if (code == PLUS_EXPR
2230 || code == MIN_EXPR
2231 || code == MAX_EXPR)
2232 {
2233 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2234 VR_VARYING. It would take more effort to compute a precise
2235 range for such a case. For example, if we have op0 == 1 and
2236 op1 == -1 with their ranges both being ~[0,0], we would have
2237 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2238 Note that we are guaranteed to have vr0.type == vr1.type at
2239 this point. */
2240 if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE)
2241 {
2242 set_value_range_to_varying (vr);
2243 return;
2244 }
2245
2246 /* For operations that make the resulting range directly
2247 proportional to the original ranges, apply the operation to
2248 the same end of each range. */
2249 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2250 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2251
2252 /* If both additions overflowed the range kind is still correct.
2253 This happens regularly with subtracting something in unsigned
2254 arithmetic.
2255 ??? See PR30318 for all the cases we do not handle. */
2256 if (code == PLUS_EXPR
2257 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2258 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2259 {
2260 min = build_int_cst_wide (TREE_TYPE (min),
2261 TREE_INT_CST_LOW (min),
2262 TREE_INT_CST_HIGH (min));
2263 max = build_int_cst_wide (TREE_TYPE (max),
2264 TREE_INT_CST_LOW (max),
2265 TREE_INT_CST_HIGH (max));
2266 }
2267 }
2268 else if (code == MULT_EXPR
2269 || code == TRUNC_DIV_EXPR
2270 || code == FLOOR_DIV_EXPR
2271 || code == CEIL_DIV_EXPR
2272 || code == EXACT_DIV_EXPR
2273 || code == ROUND_DIV_EXPR
2274 || code == RSHIFT_EXPR)
2275 {
2276 tree val[4];
2277 size_t i;
2278 bool sop;
2279
2280 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2281 drop to VR_VARYING. It would take more effort to compute a
2282 precise range for such a case. For example, if we have
2283 op0 == 65536 and op1 == 65536 with their ranges both being
2284 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2285 we cannot claim that the product is in ~[0,0]. Note that we
2286 are guaranteed to have vr0.type == vr1.type at this
2287 point. */
2288 if (code == MULT_EXPR
2289 && vr0.type == VR_ANTI_RANGE
2290 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2291 {
2292 set_value_range_to_varying (vr);
2293 return;
2294 }
2295
2296 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2297 then drop to VR_VARYING. Outside of this range we get undefined
2298 behavior from the shift operation. We cannot even trust
2299 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2300 shifts, and the operation at the tree level may be widened. */
2301 if (code == RSHIFT_EXPR)
2302 {
2303 if (vr1.type == VR_ANTI_RANGE
2304 || !vrp_expr_computes_nonnegative (op1, &sop)
2305 || (operand_less_p
2306 (build_int_cst (TREE_TYPE (vr1.max),
2307 TYPE_PRECISION (expr_type) - 1),
2308 vr1.max) != 0))
2309 {
2310 set_value_range_to_varying (vr);
2311 return;
2312 }
2313 }
2314
2315 else if ((code == TRUNC_DIV_EXPR
2316 || code == FLOOR_DIV_EXPR
2317 || code == CEIL_DIV_EXPR
2318 || code == EXACT_DIV_EXPR
2319 || code == ROUND_DIV_EXPR)
2320 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2321 {
2322 /* For division, if op1 has VR_RANGE but op0 does not, something
2323 can be deduced just from that range. Say [min, max] / [4, max]
2324 gives [min / 4, max / 4] range. */
2325 if (vr1.type == VR_RANGE
2326 && !symbolic_range_p (&vr1)
2327 && !range_includes_zero_p (&vr1))
2328 {
2329 vr0.type = type = VR_RANGE;
2330 vr0.min = vrp_val_min (TREE_TYPE (op0));
2331 vr0.max = vrp_val_max (TREE_TYPE (op1));
2332 }
2333 else
2334 {
2335 set_value_range_to_varying (vr);
2336 return;
2337 }
2338 }
2339
2340 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2341 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2342 include 0. */
2343 if ((code == TRUNC_DIV_EXPR
2344 || code == FLOOR_DIV_EXPR
2345 || code == CEIL_DIV_EXPR
2346 || code == EXACT_DIV_EXPR
2347 || code == ROUND_DIV_EXPR)
2348 && vr0.type == VR_RANGE
2349 && (vr1.type != VR_RANGE
2350 || symbolic_range_p (&vr1)
2351 || range_includes_zero_p (&vr1)))
2352 {
2353 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2354 int cmp;
2355
2356 sop = false;
2357 min = NULL_TREE;
2358 max = NULL_TREE;
2359 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2360 {
2361 /* For unsigned division or when divisor is known
2362 to be non-negative, the range has to cover
2363 all numbers from 0 to max for positive max
2364 and all numbers from min to 0 for negative min. */
2365 cmp = compare_values (vr0.max, zero);
2366 if (cmp == -1)
2367 max = zero;
2368 else if (cmp == 0 || cmp == 1)
2369 max = vr0.max;
2370 else
2371 type = VR_VARYING;
2372 cmp = compare_values (vr0.min, zero);
2373 if (cmp == 1)
2374 min = zero;
2375 else if (cmp == 0 || cmp == -1)
2376 min = vr0.min;
2377 else
2378 type = VR_VARYING;
2379 }
2380 else
2381 {
2382 /* Otherwise the range is -max .. max or min .. -min
2383 depending on which bound is bigger in absolute value,
2384 as the division can change the sign. */
2385 abs_extent_range (vr, vr0.min, vr0.max);
2386 return;
2387 }
2388 if (type == VR_VARYING)
2389 {
2390 set_value_range_to_varying (vr);
2391 return;
2392 }
2393 }
2394
2395 /* Multiplications and divisions are a bit tricky to handle,
2396 depending on the mix of signs we have in the two ranges, we
2397 need to operate on different values to get the minimum and
2398 maximum values for the new range. One approach is to figure
2399 out all the variations of range combinations and do the
2400 operations.
2401
2402 However, this involves several calls to compare_values and it
2403 is pretty convoluted. It's simpler to do the 4 operations
2404 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2405 MAX1) and then figure the smallest and largest values to form
2406 the new range. */
2407 else
2408 {
2409 gcc_assert ((vr0.type == VR_RANGE
2410 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2411 && vr0.type == vr1.type);
2412
2413 /* Compute the 4 cross operations. */
2414 sop = false;
2415 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2416 if (val[0] == NULL_TREE)
2417 sop = true;
2418
2419 if (vr1.max == vr1.min)
2420 val[1] = NULL_TREE;
2421 else
2422 {
2423 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2424 if (val[1] == NULL_TREE)
2425 sop = true;
2426 }
2427
2428 if (vr0.max == vr0.min)
2429 val[2] = NULL_TREE;
2430 else
2431 {
2432 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2433 if (val[2] == NULL_TREE)
2434 sop = true;
2435 }
2436
2437 if (vr0.min == vr0.max || vr1.min == vr1.max)
2438 val[3] = NULL_TREE;
2439 else
2440 {
2441 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2442 if (val[3] == NULL_TREE)
2443 sop = true;
2444 }
2445
2446 if (sop)
2447 {
2448 set_value_range_to_varying (vr);
2449 return;
2450 }
2451
2452 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2453 of VAL[i]. */
2454 min = val[0];
2455 max = val[0];
2456 for (i = 1; i < 4; i++)
2457 {
2458 if (!is_gimple_min_invariant (min)
2459 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2460 || !is_gimple_min_invariant (max)
2461 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2462 break;
2463
2464 if (val[i])
2465 {
2466 if (!is_gimple_min_invariant (val[i])
2467 || (TREE_OVERFLOW (val[i])
2468 && !is_overflow_infinity (val[i])))
2469 {
2470 /* If we found an overflowed value, set MIN and MAX
2471 to it so that we set the resulting range to
2472 VARYING. */
2473 min = max = val[i];
2474 break;
2475 }
2476
2477 if (compare_values (val[i], min) == -1)
2478 min = val[i];
2479
2480 if (compare_values (val[i], max) == 1)
2481 max = val[i];
2482 }
2483 }
2484 }
2485 }
2486 else if (code == MINUS_EXPR)
2487 {
2488 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2489 VR_VARYING. It would take more effort to compute a precise
2490 range for such a case. For example, if we have op0 == 1 and
2491 op1 == 1 with their ranges both being ~[0,0], we would have
2492 op0 - op1 == 0, so we cannot claim that the difference is in
2493 ~[0,0]. Note that we are guaranteed to have
2494 vr0.type == vr1.type at this point. */
2495 if (vr0.type == VR_ANTI_RANGE)
2496 {
2497 set_value_range_to_varying (vr);
2498 return;
2499 }
2500
2501 /* For MINUS_EXPR, apply the operation to the opposite ends of
2502 each range. */
2503 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2504 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2505 }
2506 else if (code == BIT_AND_EXPR)
2507 {
2508 if (vr0.type == VR_RANGE
2509 && vr0.min == vr0.max
2510 && TREE_CODE (vr0.max) == INTEGER_CST
2511 && !TREE_OVERFLOW (vr0.max)
2512 && tree_int_cst_sgn (vr0.max) >= 0)
2513 {
2514 min = build_int_cst (expr_type, 0);
2515 max = vr0.max;
2516 }
2517 else if (vr1.type == VR_RANGE
2518 && vr1.min == vr1.max
2519 && TREE_CODE (vr1.max) == INTEGER_CST
2520 && !TREE_OVERFLOW (vr1.max)
2521 && tree_int_cst_sgn (vr1.max) >= 0)
2522 {
2523 type = VR_RANGE;
2524 min = build_int_cst (expr_type, 0);
2525 max = vr1.max;
2526 }
2527 else
2528 {
2529 set_value_range_to_varying (vr);
2530 return;
2531 }
2532 }
2533 else if (code == BIT_IOR_EXPR)
2534 {
2535 if (vr0.type == VR_RANGE
2536 && vr1.type == VR_RANGE
2537 && TREE_CODE (vr0.min) == INTEGER_CST
2538 && TREE_CODE (vr1.min) == INTEGER_CST
2539 && TREE_CODE (vr0.max) == INTEGER_CST
2540 && TREE_CODE (vr1.max) == INTEGER_CST
2541 && tree_int_cst_sgn (vr0.min) >= 0
2542 && tree_int_cst_sgn (vr1.min) >= 0)
2543 {
2544 double_int vr0_max = tree_to_double_int (vr0.max);
2545 double_int vr1_max = tree_to_double_int (vr1.max);
2546 double_int ior_max;
2547
2548 /* Set all bits to the right of the most significant one to 1.
2549 For example, [0, 4] | [4, 4] = [4, 7]. */
2550 ior_max.low = vr0_max.low | vr1_max.low;
2551 ior_max.high = vr0_max.high | vr1_max.high;
2552 if (ior_max.high != 0)
2553 {
2554 ior_max.low = ~(unsigned HOST_WIDE_INT)0u;
2555 ior_max.high |= ((HOST_WIDE_INT) 1
2556 << floor_log2 (ior_max.high)) - 1;
2557 }
2558 else if (ior_max.low != 0)
2559 ior_max.low |= ((unsigned HOST_WIDE_INT) 1u
2560 << floor_log2 (ior_max.low)) - 1;
2561
2562 /* Both of these endpoints are conservative. */
2563 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2564 max = double_int_to_tree (expr_type, ior_max);
2565 }
2566 else
2567 {
2568 set_value_range_to_varying (vr);
2569 return;
2570 }
2571 }
2572 else
2573 gcc_unreachable ();
2574
2575 /* If either MIN or MAX overflowed, then set the resulting range to
2576 VARYING. But we do accept an overflow infinity
2577 representation. */
2578 if (min == NULL_TREE
2579 || !is_gimple_min_invariant (min)
2580 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2581 || max == NULL_TREE
2582 || !is_gimple_min_invariant (max)
2583 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2584 {
2585 set_value_range_to_varying (vr);
2586 return;
2587 }
2588
2589 /* We punt if:
2590 1) [-INF, +INF]
2591 2) [-INF, +-INF(OVF)]
2592 3) [+-INF(OVF), +INF]
2593 4) [+-INF(OVF), +-INF(OVF)]
2594 We learn nothing when we have INF and INF(OVF) on both sides.
2595 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2596 overflow. */
2597 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2598 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2599 {
2600 set_value_range_to_varying (vr);
2601 return;
2602 }
2603
2604 cmp = compare_values (min, max);
2605 if (cmp == -2 || cmp == 1)
2606 {
2607 /* If the new range has its limits swapped around (MIN > MAX),
2608 then the operation caused one of them to wrap around, mark
2609 the new range VARYING. */
2610 set_value_range_to_varying (vr);
2611 }
2612 else
2613 set_value_range (vr, type, min, max, NULL);
2614 }
2615
2616
2617 /* Extract range information from a unary expression EXPR based on
2618 the range of its operand and the expression code. */
2619
2620 static void
2621 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2622 tree type, tree op0)
2623 {
2624 tree min, max;
2625 int cmp;
2626 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2627
2628 /* Refuse to operate on certain unary expressions for which we
2629 cannot easily determine a resulting range. */
2630 if (code == FIX_TRUNC_EXPR
2631 || code == FLOAT_EXPR
2632 || code == BIT_NOT_EXPR
2633 || code == CONJ_EXPR)
2634 {
2635 /* We can still do constant propagation here. */
2636 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2637 {
2638 tree tem = fold_unary (code, type, op0);
2639 if (tem
2640 && is_gimple_min_invariant (tem)
2641 && !is_overflow_infinity (tem))
2642 {
2643 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2644 return;
2645 }
2646 }
2647 set_value_range_to_varying (vr);
2648 return;
2649 }
2650
2651 /* Get value ranges for the operand. For constant operands, create
2652 a new value range with the operand to simplify processing. */
2653 if (TREE_CODE (op0) == SSA_NAME)
2654 vr0 = *(get_value_range (op0));
2655 else if (is_gimple_min_invariant (op0))
2656 set_value_range_to_value (&vr0, op0, NULL);
2657 else
2658 set_value_range_to_varying (&vr0);
2659
2660 /* If VR0 is UNDEFINED, so is the result. */
2661 if (vr0.type == VR_UNDEFINED)
2662 {
2663 set_value_range_to_undefined (vr);
2664 return;
2665 }
2666
2667 /* Refuse to operate on symbolic ranges, or if neither operand is
2668 a pointer or integral type. */
2669 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2670 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2671 || (vr0.type != VR_VARYING
2672 && symbolic_range_p (&vr0)))
2673 {
2674 set_value_range_to_varying (vr);
2675 return;
2676 }
2677
2678 /* If the expression involves pointers, we are only interested in
2679 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2680 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2681 {
2682 bool sop;
2683
2684 sop = false;
2685 if (range_is_nonnull (&vr0)
2686 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2687 && !sop))
2688 set_value_range_to_nonnull (vr, type);
2689 else if (range_is_null (&vr0))
2690 set_value_range_to_null (vr, type);
2691 else
2692 set_value_range_to_varying (vr);
2693
2694 return;
2695 }
2696
2697 /* Handle unary expressions on integer ranges. */
2698 if (CONVERT_EXPR_CODE_P (code)
2699 && INTEGRAL_TYPE_P (type)
2700 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2701 {
2702 tree inner_type = TREE_TYPE (op0);
2703 tree outer_type = type;
2704
2705 /* Always use base-types here. This is important for the
2706 correct signedness. */
2707 if (TREE_TYPE (inner_type))
2708 inner_type = TREE_TYPE (inner_type);
2709 if (TREE_TYPE (outer_type))
2710 outer_type = TREE_TYPE (outer_type);
2711
2712 /* If VR0 is varying and we increase the type precision, assume
2713 a full range for the following transformation. */
2714 if (vr0.type == VR_VARYING
2715 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2716 {
2717 vr0.type = VR_RANGE;
2718 vr0.min = TYPE_MIN_VALUE (inner_type);
2719 vr0.max = TYPE_MAX_VALUE (inner_type);
2720 }
2721
2722 /* If VR0 is a constant range or anti-range and the conversion is
2723 not truncating we can convert the min and max values and
2724 canonicalize the resulting range. Otherwise we can do the
2725 conversion if the size of the range is less than what the
2726 precision of the target type can represent and the range is
2727 not an anti-range. */
2728 if ((vr0.type == VR_RANGE
2729 || vr0.type == VR_ANTI_RANGE)
2730 && TREE_CODE (vr0.min) == INTEGER_CST
2731 && TREE_CODE (vr0.max) == INTEGER_CST
2732 && !is_overflow_infinity (vr0.min)
2733 && !is_overflow_infinity (vr0.max)
2734 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2735 || (vr0.type == VR_RANGE
2736 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2737 int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0),
2738 size_int (TYPE_PRECISION (outer_type)), 0)))))
2739 {
2740 tree new_min, new_max;
2741 new_min = force_fit_type_double (outer_type,
2742 TREE_INT_CST_LOW (vr0.min),
2743 TREE_INT_CST_HIGH (vr0.min), 0, 0);
2744 new_max = force_fit_type_double (outer_type,
2745 TREE_INT_CST_LOW (vr0.max),
2746 TREE_INT_CST_HIGH (vr0.max), 0, 0);
2747 set_and_canonicalize_value_range (vr, vr0.type,
2748 new_min, new_max, NULL);
2749 return;
2750 }
2751
2752 set_value_range_to_varying (vr);
2753 return;
2754 }
2755
2756 /* Conversion of a VR_VARYING value to a wider type can result
2757 in a usable range. So wait until after we've handled conversions
2758 before dropping the result to VR_VARYING if we had a source
2759 operand that is VR_VARYING. */
2760 if (vr0.type == VR_VARYING)
2761 {
2762 set_value_range_to_varying (vr);
2763 return;
2764 }
2765
2766 /* Apply the operation to each end of the range and see what we end
2767 up with. */
2768 if (code == NEGATE_EXPR
2769 && !TYPE_UNSIGNED (type))
2770 {
2771 /* NEGATE_EXPR flips the range around. We need to treat
2772 TYPE_MIN_VALUE specially. */
2773 if (is_positive_overflow_infinity (vr0.max))
2774 min = negative_overflow_infinity (type);
2775 else if (is_negative_overflow_infinity (vr0.max))
2776 min = positive_overflow_infinity (type);
2777 else if (!vrp_val_is_min (vr0.max))
2778 min = fold_unary_to_constant (code, type, vr0.max);
2779 else if (needs_overflow_infinity (type))
2780 {
2781 if (supports_overflow_infinity (type)
2782 && !is_overflow_infinity (vr0.min)
2783 && !vrp_val_is_min (vr0.min))
2784 min = positive_overflow_infinity (type);
2785 else
2786 {
2787 set_value_range_to_varying (vr);
2788 return;
2789 }
2790 }
2791 else
2792 min = TYPE_MIN_VALUE (type);
2793
2794 if (is_positive_overflow_infinity (vr0.min))
2795 max = negative_overflow_infinity (type);
2796 else if (is_negative_overflow_infinity (vr0.min))
2797 max = positive_overflow_infinity (type);
2798 else if (!vrp_val_is_min (vr0.min))
2799 max = fold_unary_to_constant (code, type, vr0.min);
2800 else if (needs_overflow_infinity (type))
2801 {
2802 if (supports_overflow_infinity (type))
2803 max = positive_overflow_infinity (type);
2804 else
2805 {
2806 set_value_range_to_varying (vr);
2807 return;
2808 }
2809 }
2810 else
2811 max = TYPE_MIN_VALUE (type);
2812 }
2813 else if (code == NEGATE_EXPR
2814 && TYPE_UNSIGNED (type))
2815 {
2816 if (!range_includes_zero_p (&vr0))
2817 {
2818 max = fold_unary_to_constant (code, type, vr0.min);
2819 min = fold_unary_to_constant (code, type, vr0.max);
2820 }
2821 else
2822 {
2823 if (range_is_null (&vr0))
2824 set_value_range_to_null (vr, type);
2825 else
2826 set_value_range_to_varying (vr);
2827 return;
2828 }
2829 }
2830 else if (code == ABS_EXPR
2831 && !TYPE_UNSIGNED (type))
2832 {
2833 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2834 useful range. */
2835 if (!TYPE_OVERFLOW_UNDEFINED (type)
2836 && ((vr0.type == VR_RANGE
2837 && vrp_val_is_min (vr0.min))
2838 || (vr0.type == VR_ANTI_RANGE
2839 && !vrp_val_is_min (vr0.min)
2840 && !range_includes_zero_p (&vr0))))
2841 {
2842 set_value_range_to_varying (vr);
2843 return;
2844 }
2845
2846 /* ABS_EXPR may flip the range around, if the original range
2847 included negative values. */
2848 if (is_overflow_infinity (vr0.min))
2849 min = positive_overflow_infinity (type);
2850 else if (!vrp_val_is_min (vr0.min))
2851 min = fold_unary_to_constant (code, type, vr0.min);
2852 else if (!needs_overflow_infinity (type))
2853 min = TYPE_MAX_VALUE (type);
2854 else if (supports_overflow_infinity (type))
2855 min = positive_overflow_infinity (type);
2856 else
2857 {
2858 set_value_range_to_varying (vr);
2859 return;
2860 }
2861
2862 if (is_overflow_infinity (vr0.max))
2863 max = positive_overflow_infinity (type);
2864 else if (!vrp_val_is_min (vr0.max))
2865 max = fold_unary_to_constant (code, type, vr0.max);
2866 else if (!needs_overflow_infinity (type))
2867 max = TYPE_MAX_VALUE (type);
2868 else if (supports_overflow_infinity (type)
2869 /* We shouldn't generate [+INF, +INF] as set_value_range
2870 doesn't like this and ICEs. */
2871 && !is_positive_overflow_infinity (min))
2872 max = positive_overflow_infinity (type);
2873 else
2874 {
2875 set_value_range_to_varying (vr);
2876 return;
2877 }
2878
2879 cmp = compare_values (min, max);
2880
2881 /* If a VR_ANTI_RANGEs contains zero, then we have
2882 ~[-INF, min(MIN, MAX)]. */
2883 if (vr0.type == VR_ANTI_RANGE)
2884 {
2885 if (range_includes_zero_p (&vr0))
2886 {
2887 /* Take the lower of the two values. */
2888 if (cmp != 1)
2889 max = min;
2890
2891 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2892 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2893 flag_wrapv is set and the original anti-range doesn't include
2894 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2895 if (TYPE_OVERFLOW_WRAPS (type))
2896 {
2897 tree type_min_value = TYPE_MIN_VALUE (type);
2898
2899 min = (vr0.min != type_min_value
2900 ? int_const_binop (PLUS_EXPR, type_min_value,
2901 integer_one_node, 0)
2902 : type_min_value);
2903 }
2904 else
2905 {
2906 if (overflow_infinity_range_p (&vr0))
2907 min = negative_overflow_infinity (type);
2908 else
2909 min = TYPE_MIN_VALUE (type);
2910 }
2911 }
2912 else
2913 {
2914 /* All else has failed, so create the range [0, INF], even for
2915 flag_wrapv since TYPE_MIN_VALUE is in the original
2916 anti-range. */
2917 vr0.type = VR_RANGE;
2918 min = build_int_cst (type, 0);
2919 if (needs_overflow_infinity (type))
2920 {
2921 if (supports_overflow_infinity (type))
2922 max = positive_overflow_infinity (type);
2923 else
2924 {
2925 set_value_range_to_varying (vr);
2926 return;
2927 }
2928 }
2929 else
2930 max = TYPE_MAX_VALUE (type);
2931 }
2932 }
2933
2934 /* If the range contains zero then we know that the minimum value in the
2935 range will be zero. */
2936 else if (range_includes_zero_p (&vr0))
2937 {
2938 if (cmp == 1)
2939 max = min;
2940 min = build_int_cst (type, 0);
2941 }
2942 else
2943 {
2944 /* If the range was reversed, swap MIN and MAX. */
2945 if (cmp == 1)
2946 {
2947 tree t = min;
2948 min = max;
2949 max = t;
2950 }
2951 }
2952 }
2953 else
2954 {
2955 /* Otherwise, operate on each end of the range. */
2956 min = fold_unary_to_constant (code, type, vr0.min);
2957 max = fold_unary_to_constant (code, type, vr0.max);
2958
2959 if (needs_overflow_infinity (type))
2960 {
2961 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
2962
2963 /* If both sides have overflowed, we don't know
2964 anything. */
2965 if ((is_overflow_infinity (vr0.min)
2966 || TREE_OVERFLOW (min))
2967 && (is_overflow_infinity (vr0.max)
2968 || TREE_OVERFLOW (max)))
2969 {
2970 set_value_range_to_varying (vr);
2971 return;
2972 }
2973
2974 if (is_overflow_infinity (vr0.min))
2975 min = vr0.min;
2976 else if (TREE_OVERFLOW (min))
2977 {
2978 if (supports_overflow_infinity (type))
2979 min = (tree_int_cst_sgn (min) >= 0
2980 ? positive_overflow_infinity (TREE_TYPE (min))
2981 : negative_overflow_infinity (TREE_TYPE (min)));
2982 else
2983 {
2984 set_value_range_to_varying (vr);
2985 return;
2986 }
2987 }
2988
2989 if (is_overflow_infinity (vr0.max))
2990 max = vr0.max;
2991 else if (TREE_OVERFLOW (max))
2992 {
2993 if (supports_overflow_infinity (type))
2994 max = (tree_int_cst_sgn (max) >= 0
2995 ? positive_overflow_infinity (TREE_TYPE (max))
2996 : negative_overflow_infinity (TREE_TYPE (max)));
2997 else
2998 {
2999 set_value_range_to_varying (vr);
3000 return;
3001 }
3002 }
3003 }
3004 }
3005
3006 cmp = compare_values (min, max);
3007 if (cmp == -2 || cmp == 1)
3008 {
3009 /* If the new range has its limits swapped around (MIN > MAX),
3010 then the operation caused one of them to wrap around, mark
3011 the new range VARYING. */
3012 set_value_range_to_varying (vr);
3013 }
3014 else
3015 set_value_range (vr, vr0.type, min, max, NULL);
3016 }
3017
3018
3019 /* Extract range information from a conditional expression EXPR based on
3020 the ranges of each of its operands and the expression code. */
3021
3022 static void
3023 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3024 {
3025 tree op0, op1;
3026 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3027 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3028
3029 /* Get value ranges for each operand. For constant operands, create
3030 a new value range with the operand to simplify processing. */
3031 op0 = COND_EXPR_THEN (expr);
3032 if (TREE_CODE (op0) == SSA_NAME)
3033 vr0 = *(get_value_range (op0));
3034 else if (is_gimple_min_invariant (op0))
3035 set_value_range_to_value (&vr0, op0, NULL);
3036 else
3037 set_value_range_to_varying (&vr0);
3038
3039 op1 = COND_EXPR_ELSE (expr);
3040 if (TREE_CODE (op1) == SSA_NAME)
3041 vr1 = *(get_value_range (op1));
3042 else if (is_gimple_min_invariant (op1))
3043 set_value_range_to_value (&vr1, op1, NULL);
3044 else
3045 set_value_range_to_varying (&vr1);
3046
3047 /* The resulting value range is the union of the operand ranges */
3048 vrp_meet (&vr0, &vr1);
3049 copy_value_range (vr, &vr0);
3050 }
3051
3052
3053 /* Extract range information from a comparison expression EXPR based
3054 on the range of its operand and the expression code. */
3055
3056 static void
3057 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3058 tree type, tree op0, tree op1)
3059 {
3060 bool sop = false;
3061 tree val;
3062
3063 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3064 NULL);
3065
3066 /* A disadvantage of using a special infinity as an overflow
3067 representation is that we lose the ability to record overflow
3068 when we don't have an infinity. So we have to ignore a result
3069 which relies on overflow. */
3070
3071 if (val && !is_overflow_infinity (val) && !sop)
3072 {
3073 /* Since this expression was found on the RHS of an assignment,
3074 its type may be different from _Bool. Convert VAL to EXPR's
3075 type. */
3076 val = fold_convert (type, val);
3077 if (is_gimple_min_invariant (val))
3078 set_value_range_to_value (vr, val, vr->equiv);
3079 else
3080 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3081 }
3082 else
3083 /* The result of a comparison is always true or false. */
3084 set_value_range_to_truthvalue (vr, type);
3085 }
3086
3087 /* Try to derive a nonnegative or nonzero range out of STMT relying
3088 primarily on generic routines in fold in conjunction with range data.
3089 Store the result in *VR */
3090
3091 static void
3092 extract_range_basic (value_range_t *vr, gimple stmt)
3093 {
3094 bool sop = false;
3095 tree type = gimple_expr_type (stmt);
3096
3097 if (INTEGRAL_TYPE_P (type)
3098 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3099 set_value_range_to_nonnegative (vr, type,
3100 sop || stmt_overflow_infinity (stmt));
3101 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3102 && !sop)
3103 set_value_range_to_nonnull (vr, type);
3104 else
3105 set_value_range_to_varying (vr);
3106 }
3107
3108
3109 /* Try to compute a useful range out of assignment STMT and store it
3110 in *VR. */
3111
3112 static void
3113 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3114 {
3115 enum tree_code code = gimple_assign_rhs_code (stmt);
3116
3117 if (code == ASSERT_EXPR)
3118 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3119 else if (code == SSA_NAME)
3120 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3121 else if (TREE_CODE_CLASS (code) == tcc_binary
3122 || code == TRUTH_AND_EXPR
3123 || code == TRUTH_OR_EXPR
3124 || code == TRUTH_XOR_EXPR)
3125 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3126 gimple_expr_type (stmt),
3127 gimple_assign_rhs1 (stmt),
3128 gimple_assign_rhs2 (stmt));
3129 else if (TREE_CODE_CLASS (code) == tcc_unary)
3130 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3131 gimple_expr_type (stmt),
3132 gimple_assign_rhs1 (stmt));
3133 else if (code == COND_EXPR)
3134 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3135 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3136 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3137 gimple_expr_type (stmt),
3138 gimple_assign_rhs1 (stmt),
3139 gimple_assign_rhs2 (stmt));
3140 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3141 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3142 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3143 else
3144 set_value_range_to_varying (vr);
3145
3146 if (vr->type == VR_VARYING)
3147 extract_range_basic (vr, stmt);
3148 }
3149
3150 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3151 would be profitable to adjust VR using scalar evolution information
3152 for VAR. If so, update VR with the new limits. */
3153
3154 static void
3155 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3156 gimple stmt, tree var)
3157 {
3158 tree init, step, chrec, tmin, tmax, min, max, type;
3159 enum ev_direction dir;
3160
3161 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3162 better opportunities than a regular range, but I'm not sure. */
3163 if (vr->type == VR_ANTI_RANGE)
3164 return;
3165
3166 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3167
3168 /* Like in PR19590, scev can return a constant function. */
3169 if (is_gimple_min_invariant (chrec))
3170 {
3171 set_value_range_to_value (vr, chrec, vr->equiv);
3172 return;
3173 }
3174
3175 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3176 return;
3177
3178 init = initial_condition_in_loop_num (chrec, loop->num);
3179 step = evolution_part_in_loop_num (chrec, loop->num);
3180
3181 /* If STEP is symbolic, we can't know whether INIT will be the
3182 minimum or maximum value in the range. Also, unless INIT is
3183 a simple expression, compare_values and possibly other functions
3184 in tree-vrp won't be able to handle it. */
3185 if (step == NULL_TREE
3186 || !is_gimple_min_invariant (step)
3187 || !valid_value_p (init))
3188 return;
3189
3190 dir = scev_direction (chrec);
3191 if (/* Do not adjust ranges if we do not know whether the iv increases
3192 or decreases, ... */
3193 dir == EV_DIR_UNKNOWN
3194 /* ... or if it may wrap. */
3195 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3196 true))
3197 return;
3198
3199 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3200 negative_overflow_infinity and positive_overflow_infinity,
3201 because we have concluded that the loop probably does not
3202 wrap. */
3203
3204 type = TREE_TYPE (var);
3205 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3206 tmin = lower_bound_in_type (type, type);
3207 else
3208 tmin = TYPE_MIN_VALUE (type);
3209 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3210 tmax = upper_bound_in_type (type, type);
3211 else
3212 tmax = TYPE_MAX_VALUE (type);
3213
3214 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3215 {
3216 min = tmin;
3217 max = tmax;
3218
3219 /* For VARYING or UNDEFINED ranges, just about anything we get
3220 from scalar evolutions should be better. */
3221
3222 if (dir == EV_DIR_DECREASES)
3223 max = init;
3224 else
3225 min = init;
3226
3227 /* If we would create an invalid range, then just assume we
3228 know absolutely nothing. This may be over-conservative,
3229 but it's clearly safe, and should happen only in unreachable
3230 parts of code, or for invalid programs. */
3231 if (compare_values (min, max) == 1)
3232 return;
3233
3234 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3235 }
3236 else if (vr->type == VR_RANGE)
3237 {
3238 min = vr->min;
3239 max = vr->max;
3240
3241 if (dir == EV_DIR_DECREASES)
3242 {
3243 /* INIT is the maximum value. If INIT is lower than VR->MAX
3244 but no smaller than VR->MIN, set VR->MAX to INIT. */
3245 if (compare_values (init, max) == -1)
3246 {
3247 max = init;
3248
3249 /* If we just created an invalid range with the minimum
3250 greater than the maximum, we fail conservatively.
3251 This should happen only in unreachable
3252 parts of code, or for invalid programs. */
3253 if (compare_values (min, max) == 1)
3254 return;
3255 }
3256
3257 /* According to the loop information, the variable does not
3258 overflow. If we think it does, probably because of an
3259 overflow due to arithmetic on a different INF value,
3260 reset now. */
3261 if (is_negative_overflow_infinity (min))
3262 min = tmin;
3263 }
3264 else
3265 {
3266 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3267 if (compare_values (init, min) == 1)
3268 {
3269 min = init;
3270
3271 /* Again, avoid creating invalid range by failing. */
3272 if (compare_values (min, max) == 1)
3273 return;
3274 }
3275
3276 if (is_positive_overflow_infinity (max))
3277 max = tmax;
3278 }
3279
3280 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3281 }
3282 }
3283
3284 /* Return true if VAR may overflow at STMT. This checks any available
3285 loop information to see if we can determine that VAR does not
3286 overflow. */
3287
3288 static bool
3289 vrp_var_may_overflow (tree var, gimple stmt)
3290 {
3291 struct loop *l;
3292 tree chrec, init, step;
3293
3294 if (current_loops == NULL)
3295 return true;
3296
3297 l = loop_containing_stmt (stmt);
3298 if (l == NULL)
3299 return true;
3300
3301 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3302 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3303 return true;
3304
3305 init = initial_condition_in_loop_num (chrec, l->num);
3306 step = evolution_part_in_loop_num (chrec, l->num);
3307
3308 if (step == NULL_TREE
3309 || !is_gimple_min_invariant (step)
3310 || !valid_value_p (init))
3311 return true;
3312
3313 /* If we get here, we know something useful about VAR based on the
3314 loop information. If it wraps, it may overflow. */
3315
3316 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3317 true))
3318 return true;
3319
3320 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3321 {
3322 print_generic_expr (dump_file, var, 0);
3323 fprintf (dump_file, ": loop information indicates does not overflow\n");
3324 }
3325
3326 return false;
3327 }
3328
3329
3330 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3331
3332 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3333 all the values in the ranges.
3334
3335 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3336
3337 - Return NULL_TREE if it is not always possible to determine the
3338 value of the comparison.
3339
3340 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3341 overflow infinity was used in the test. */
3342
3343
3344 static tree
3345 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3346 bool *strict_overflow_p)
3347 {
3348 /* VARYING or UNDEFINED ranges cannot be compared. */
3349 if (vr0->type == VR_VARYING
3350 || vr0->type == VR_UNDEFINED
3351 || vr1->type == VR_VARYING
3352 || vr1->type == VR_UNDEFINED)
3353 return NULL_TREE;
3354
3355 /* Anti-ranges need to be handled separately. */
3356 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3357 {
3358 /* If both are anti-ranges, then we cannot compute any
3359 comparison. */
3360 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3361 return NULL_TREE;
3362
3363 /* These comparisons are never statically computable. */
3364 if (comp == GT_EXPR
3365 || comp == GE_EXPR
3366 || comp == LT_EXPR
3367 || comp == LE_EXPR)
3368 return NULL_TREE;
3369
3370 /* Equality can be computed only between a range and an
3371 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3372 if (vr0->type == VR_RANGE)
3373 {
3374 /* To simplify processing, make VR0 the anti-range. */
3375 value_range_t *tmp = vr0;
3376 vr0 = vr1;
3377 vr1 = tmp;
3378 }
3379
3380 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3381
3382 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3383 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3384 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3385
3386 return NULL_TREE;
3387 }
3388
3389 if (!usable_range_p (vr0, strict_overflow_p)
3390 || !usable_range_p (vr1, strict_overflow_p))
3391 return NULL_TREE;
3392
3393 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3394 operands around and change the comparison code. */
3395 if (comp == GT_EXPR || comp == GE_EXPR)
3396 {
3397 value_range_t *tmp;
3398 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3399 tmp = vr0;
3400 vr0 = vr1;
3401 vr1 = tmp;
3402 }
3403
3404 if (comp == EQ_EXPR)
3405 {
3406 /* Equality may only be computed if both ranges represent
3407 exactly one value. */
3408 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3409 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3410 {
3411 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3412 strict_overflow_p);
3413 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3414 strict_overflow_p);
3415 if (cmp_min == 0 && cmp_max == 0)
3416 return boolean_true_node;
3417 else if (cmp_min != -2 && cmp_max != -2)
3418 return boolean_false_node;
3419 }
3420 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3421 else if (compare_values_warnv (vr0->min, vr1->max,
3422 strict_overflow_p) == 1
3423 || compare_values_warnv (vr1->min, vr0->max,
3424 strict_overflow_p) == 1)
3425 return boolean_false_node;
3426
3427 return NULL_TREE;
3428 }
3429 else if (comp == NE_EXPR)
3430 {
3431 int cmp1, cmp2;
3432
3433 /* If VR0 is completely to the left or completely to the right
3434 of VR1, they are always different. Notice that we need to
3435 make sure that both comparisons yield similar results to
3436 avoid comparing values that cannot be compared at
3437 compile-time. */
3438 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3439 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3440 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3441 return boolean_true_node;
3442
3443 /* If VR0 and VR1 represent a single value and are identical,
3444 return false. */
3445 else if (compare_values_warnv (vr0->min, vr0->max,
3446 strict_overflow_p) == 0
3447 && compare_values_warnv (vr1->min, vr1->max,
3448 strict_overflow_p) == 0
3449 && compare_values_warnv (vr0->min, vr1->min,
3450 strict_overflow_p) == 0
3451 && compare_values_warnv (vr0->max, vr1->max,
3452 strict_overflow_p) == 0)
3453 return boolean_false_node;
3454
3455 /* Otherwise, they may or may not be different. */
3456 else
3457 return NULL_TREE;
3458 }
3459 else if (comp == LT_EXPR || comp == LE_EXPR)
3460 {
3461 int tst;
3462
3463 /* If VR0 is to the left of VR1, return true. */
3464 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3465 if ((comp == LT_EXPR && tst == -1)
3466 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3467 {
3468 if (overflow_infinity_range_p (vr0)
3469 || overflow_infinity_range_p (vr1))
3470 *strict_overflow_p = true;
3471 return boolean_true_node;
3472 }
3473
3474 /* If VR0 is to the right of VR1, return false. */
3475 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3476 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3477 || (comp == LE_EXPR && tst == 1))
3478 {
3479 if (overflow_infinity_range_p (vr0)
3480 || overflow_infinity_range_p (vr1))
3481 *strict_overflow_p = true;
3482 return boolean_false_node;
3483 }
3484
3485 /* Otherwise, we don't know. */
3486 return NULL_TREE;
3487 }
3488
3489 gcc_unreachable ();
3490 }
3491
3492
3493 /* Given a value range VR, a value VAL and a comparison code COMP, return
3494 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3495 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3496 always returns false. Return NULL_TREE if it is not always
3497 possible to determine the value of the comparison. Also set
3498 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3499 infinity was used in the test. */
3500
3501 static tree
3502 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3503 bool *strict_overflow_p)
3504 {
3505 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3506 return NULL_TREE;
3507
3508 /* Anti-ranges need to be handled separately. */
3509 if (vr->type == VR_ANTI_RANGE)
3510 {
3511 /* For anti-ranges, the only predicates that we can compute at
3512 compile time are equality and inequality. */
3513 if (comp == GT_EXPR
3514 || comp == GE_EXPR
3515 || comp == LT_EXPR
3516 || comp == LE_EXPR)
3517 return NULL_TREE;
3518
3519 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3520 if (value_inside_range (val, vr) == 1)
3521 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3522
3523 return NULL_TREE;
3524 }
3525
3526 if (!usable_range_p (vr, strict_overflow_p))
3527 return NULL_TREE;
3528
3529 if (comp == EQ_EXPR)
3530 {
3531 /* EQ_EXPR may only be computed if VR represents exactly
3532 one value. */
3533 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3534 {
3535 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3536 if (cmp == 0)
3537 return boolean_true_node;
3538 else if (cmp == -1 || cmp == 1 || cmp == 2)
3539 return boolean_false_node;
3540 }
3541 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3542 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3543 return boolean_false_node;
3544
3545 return NULL_TREE;
3546 }
3547 else if (comp == NE_EXPR)
3548 {
3549 /* If VAL is not inside VR, then they are always different. */
3550 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3551 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3552 return boolean_true_node;
3553
3554 /* If VR represents exactly one value equal to VAL, then return
3555 false. */
3556 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3557 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3558 return boolean_false_node;
3559
3560 /* Otherwise, they may or may not be different. */
3561 return NULL_TREE;
3562 }
3563 else if (comp == LT_EXPR || comp == LE_EXPR)
3564 {
3565 int tst;
3566
3567 /* If VR is to the left of VAL, return true. */
3568 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3569 if ((comp == LT_EXPR && tst == -1)
3570 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3571 {
3572 if (overflow_infinity_range_p (vr))
3573 *strict_overflow_p = true;
3574 return boolean_true_node;
3575 }
3576
3577 /* If VR is to the right of VAL, return false. */
3578 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3579 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3580 || (comp == LE_EXPR && tst == 1))
3581 {
3582 if (overflow_infinity_range_p (vr))
3583 *strict_overflow_p = true;
3584 return boolean_false_node;
3585 }
3586
3587 /* Otherwise, we don't know. */
3588 return NULL_TREE;
3589 }
3590 else if (comp == GT_EXPR || comp == GE_EXPR)
3591 {
3592 int tst;
3593
3594 /* If VR is to the right of VAL, return true. */
3595 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3596 if ((comp == GT_EXPR && tst == 1)
3597 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3598 {
3599 if (overflow_infinity_range_p (vr))
3600 *strict_overflow_p = true;
3601 return boolean_true_node;
3602 }
3603
3604 /* If VR is to the left of VAL, return false. */
3605 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3606 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3607 || (comp == GE_EXPR && tst == -1))
3608 {
3609 if (overflow_infinity_range_p (vr))
3610 *strict_overflow_p = true;
3611 return boolean_false_node;
3612 }
3613
3614 /* Otherwise, we don't know. */
3615 return NULL_TREE;
3616 }
3617
3618 gcc_unreachable ();
3619 }
3620
3621
3622 /* Debugging dumps. */
3623
3624 void dump_value_range (FILE *, value_range_t *);
3625 void debug_value_range (value_range_t *);
3626 void dump_all_value_ranges (FILE *);
3627 void debug_all_value_ranges (void);
3628 void dump_vr_equiv (FILE *, bitmap);
3629 void debug_vr_equiv (bitmap);
3630
3631
3632 /* Dump value range VR to FILE. */
3633
3634 void
3635 dump_value_range (FILE *file, value_range_t *vr)
3636 {
3637 if (vr == NULL)
3638 fprintf (file, "[]");
3639 else if (vr->type == VR_UNDEFINED)
3640 fprintf (file, "UNDEFINED");
3641 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3642 {
3643 tree type = TREE_TYPE (vr->min);
3644
3645 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3646
3647 if (is_negative_overflow_infinity (vr->min))
3648 fprintf (file, "-INF(OVF)");
3649 else if (INTEGRAL_TYPE_P (type)
3650 && !TYPE_UNSIGNED (type)
3651 && vrp_val_is_min (vr->min))
3652 fprintf (file, "-INF");
3653 else
3654 print_generic_expr (file, vr->min, 0);
3655
3656 fprintf (file, ", ");
3657
3658 if (is_positive_overflow_infinity (vr->max))
3659 fprintf (file, "+INF(OVF)");
3660 else if (INTEGRAL_TYPE_P (type)
3661 && vrp_val_is_max (vr->max))
3662 fprintf (file, "+INF");
3663 else
3664 print_generic_expr (file, vr->max, 0);
3665
3666 fprintf (file, "]");
3667
3668 if (vr->equiv)
3669 {
3670 bitmap_iterator bi;
3671 unsigned i, c = 0;
3672
3673 fprintf (file, " EQUIVALENCES: { ");
3674
3675 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3676 {
3677 print_generic_expr (file, ssa_name (i), 0);
3678 fprintf (file, " ");
3679 c++;
3680 }
3681
3682 fprintf (file, "} (%u elements)", c);
3683 }
3684 }
3685 else if (vr->type == VR_VARYING)
3686 fprintf (file, "VARYING");
3687 else
3688 fprintf (file, "INVALID RANGE");
3689 }
3690
3691
3692 /* Dump value range VR to stderr. */
3693
3694 void
3695 debug_value_range (value_range_t *vr)
3696 {
3697 dump_value_range (stderr, vr);
3698 fprintf (stderr, "\n");
3699 }
3700
3701
3702 /* Dump value ranges of all SSA_NAMEs to FILE. */
3703
3704 void
3705 dump_all_value_ranges (FILE *file)
3706 {
3707 size_t i;
3708
3709 for (i = 0; i < num_ssa_names; i++)
3710 {
3711 if (vr_value[i])
3712 {
3713 print_generic_expr (file, ssa_name (i), 0);
3714 fprintf (file, ": ");
3715 dump_value_range (file, vr_value[i]);
3716 fprintf (file, "\n");
3717 }
3718 }
3719
3720 fprintf (file, "\n");
3721 }
3722
3723
3724 /* Dump all value ranges to stderr. */
3725
3726 void
3727 debug_all_value_ranges (void)
3728 {
3729 dump_all_value_ranges (stderr);
3730 }
3731
3732
3733 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3734 create a new SSA name N and return the assertion assignment
3735 'V = ASSERT_EXPR <V, V OP W>'. */
3736
3737 static gimple
3738 build_assert_expr_for (tree cond, tree v)
3739 {
3740 tree n;
3741 gimple assertion;
3742
3743 gcc_assert (TREE_CODE (v) == SSA_NAME);
3744 n = duplicate_ssa_name (v, NULL);
3745
3746 if (COMPARISON_CLASS_P (cond))
3747 {
3748 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3749 assertion = gimple_build_assign (n, a);
3750 }
3751 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3752 {
3753 /* Given !V, build the assignment N = false. */
3754 tree op0 = TREE_OPERAND (cond, 0);
3755 gcc_assert (op0 == v);
3756 assertion = gimple_build_assign (n, boolean_false_node);
3757 }
3758 else if (TREE_CODE (cond) == SSA_NAME)
3759 {
3760 /* Given V, build the assignment N = true. */
3761 gcc_assert (v == cond);
3762 assertion = gimple_build_assign (n, boolean_true_node);
3763 }
3764 else
3765 gcc_unreachable ();
3766
3767 SSA_NAME_DEF_STMT (n) = assertion;
3768
3769 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3770 operand of the ASSERT_EXPR. Register the new name and the old one
3771 in the replacement table so that we can fix the SSA web after
3772 adding all the ASSERT_EXPRs. */
3773 register_new_name_mapping (n, v);
3774
3775 return assertion;
3776 }
3777
3778
3779 /* Return false if EXPR is a predicate expression involving floating
3780 point values. */
3781
3782 static inline bool
3783 fp_predicate (gimple stmt)
3784 {
3785 GIMPLE_CHECK (stmt, GIMPLE_COND);
3786
3787 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
3788 }
3789
3790
3791 /* If the range of values taken by OP can be inferred after STMT executes,
3792 return the comparison code (COMP_CODE_P) and value (VAL_P) that
3793 describes the inferred range. Return true if a range could be
3794 inferred. */
3795
3796 static bool
3797 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
3798 {
3799 *val_p = NULL_TREE;
3800 *comp_code_p = ERROR_MARK;
3801
3802 /* Do not attempt to infer anything in names that flow through
3803 abnormal edges. */
3804 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
3805 return false;
3806
3807 /* Similarly, don't infer anything from statements that may throw
3808 exceptions. */
3809 if (stmt_could_throw_p (stmt))
3810 return false;
3811
3812 /* If STMT is the last statement of a basic block with no
3813 successors, there is no point inferring anything about any of its
3814 operands. We would not be able to find a proper insertion point
3815 for the assertion, anyway. */
3816 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
3817 return false;
3818
3819 /* We can only assume that a pointer dereference will yield
3820 non-NULL if -fdelete-null-pointer-checks is enabled. */
3821 if (flag_delete_null_pointer_checks
3822 && POINTER_TYPE_P (TREE_TYPE (op))
3823 && gimple_code (stmt) != GIMPLE_ASM)
3824 {
3825 unsigned num_uses, num_loads, num_stores;
3826
3827 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
3828 if (num_loads + num_stores > 0)
3829 {
3830 *val_p = build_int_cst (TREE_TYPE (op), 0);
3831 *comp_code_p = NE_EXPR;
3832 return true;
3833 }
3834 }
3835
3836 return false;
3837 }
3838
3839
3840 void dump_asserts_for (FILE *, tree);
3841 void debug_asserts_for (tree);
3842 void dump_all_asserts (FILE *);
3843 void debug_all_asserts (void);
3844
3845 /* Dump all the registered assertions for NAME to FILE. */
3846
3847 void
3848 dump_asserts_for (FILE *file, tree name)
3849 {
3850 assert_locus_t loc;
3851
3852 fprintf (file, "Assertions to be inserted for ");
3853 print_generic_expr (file, name, 0);
3854 fprintf (file, "\n");
3855
3856 loc = asserts_for[SSA_NAME_VERSION (name)];
3857 while (loc)
3858 {
3859 fprintf (file, "\t");
3860 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
3861 fprintf (file, "\n\tBB #%d", loc->bb->index);
3862 if (loc->e)
3863 {
3864 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
3865 loc->e->dest->index);
3866 dump_edge_info (file, loc->e, 0);
3867 }
3868 fprintf (file, "\n\tPREDICATE: ");
3869 print_generic_expr (file, name, 0);
3870 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
3871 print_generic_expr (file, loc->val, 0);
3872 fprintf (file, "\n\n");
3873 loc = loc->next;
3874 }
3875
3876 fprintf (file, "\n");
3877 }
3878
3879
3880 /* Dump all the registered assertions for NAME to stderr. */
3881
3882 void
3883 debug_asserts_for (tree name)
3884 {
3885 dump_asserts_for (stderr, name);
3886 }
3887
3888
3889 /* Dump all the registered assertions for all the names to FILE. */
3890
3891 void
3892 dump_all_asserts (FILE *file)
3893 {
3894 unsigned i;
3895 bitmap_iterator bi;
3896
3897 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
3898 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3899 dump_asserts_for (file, ssa_name (i));
3900 fprintf (file, "\n");
3901 }
3902
3903
3904 /* Dump all the registered assertions for all the names to stderr. */
3905
3906 void
3907 debug_all_asserts (void)
3908 {
3909 dump_all_asserts (stderr);
3910 }
3911
3912
3913 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
3914 'EXPR COMP_CODE VAL' at a location that dominates block BB or
3915 E->DEST, then register this location as a possible insertion point
3916 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
3917
3918 BB, E and SI provide the exact insertion point for the new
3919 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
3920 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
3921 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
3922 must not be NULL. */
3923
3924 static void
3925 register_new_assert_for (tree name, tree expr,
3926 enum tree_code comp_code,
3927 tree val,
3928 basic_block bb,
3929 edge e,
3930 gimple_stmt_iterator si)
3931 {
3932 assert_locus_t n, loc, last_loc;
3933 bool found;
3934 basic_block dest_bb;
3935
3936 #if defined ENABLE_CHECKING
3937 gcc_assert (bb == NULL || e == NULL);
3938
3939 if (e == NULL)
3940 gcc_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
3941 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
3942 #endif
3943
3944 /* Never build an assert comparing against an integer constant with
3945 TREE_OVERFLOW set. This confuses our undefined overflow warning
3946 machinery. */
3947 if (TREE_CODE (val) == INTEGER_CST
3948 && TREE_OVERFLOW (val))
3949 val = build_int_cst_wide (TREE_TYPE (val),
3950 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
3951
3952 /* The new assertion A will be inserted at BB or E. We need to
3953 determine if the new location is dominated by a previously
3954 registered location for A. If we are doing an edge insertion,
3955 assume that A will be inserted at E->DEST. Note that this is not
3956 necessarily true.
3957
3958 If E is a critical edge, it will be split. But even if E is
3959 split, the new block will dominate the same set of blocks that
3960 E->DEST dominates.
3961
3962 The reverse, however, is not true, blocks dominated by E->DEST
3963 will not be dominated by the new block created to split E. So,
3964 if the insertion location is on a critical edge, we will not use
3965 the new location to move another assertion previously registered
3966 at a block dominated by E->DEST. */
3967 dest_bb = (bb) ? bb : e->dest;
3968
3969 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
3970 VAL at a block dominating DEST_BB, then we don't need to insert a new
3971 one. Similarly, if the same assertion already exists at a block
3972 dominated by DEST_BB and the new location is not on a critical
3973 edge, then update the existing location for the assertion (i.e.,
3974 move the assertion up in the dominance tree).
3975
3976 Note, this is implemented as a simple linked list because there
3977 should not be more than a handful of assertions registered per
3978 name. If this becomes a performance problem, a table hashed by
3979 COMP_CODE and VAL could be implemented. */
3980 loc = asserts_for[SSA_NAME_VERSION (name)];
3981 last_loc = loc;
3982 found = false;
3983 while (loc)
3984 {
3985 if (loc->comp_code == comp_code
3986 && (loc->val == val
3987 || operand_equal_p (loc->val, val, 0))
3988 && (loc->expr == expr
3989 || operand_equal_p (loc->expr, expr, 0)))
3990 {
3991 /* If the assertion NAME COMP_CODE VAL has already been
3992 registered at a basic block that dominates DEST_BB, then
3993 we don't need to insert the same assertion again. Note
3994 that we don't check strict dominance here to avoid
3995 replicating the same assertion inside the same basic
3996 block more than once (e.g., when a pointer is
3997 dereferenced several times inside a block).
3998
3999 An exception to this rule are edge insertions. If the
4000 new assertion is to be inserted on edge E, then it will
4001 dominate all the other insertions that we may want to
4002 insert in DEST_BB. So, if we are doing an edge
4003 insertion, don't do this dominance check. */
4004 if (e == NULL
4005 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4006 return;
4007
4008 /* Otherwise, if E is not a critical edge and DEST_BB
4009 dominates the existing location for the assertion, move
4010 the assertion up in the dominance tree by updating its
4011 location information. */
4012 if ((e == NULL || !EDGE_CRITICAL_P (e))
4013 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4014 {
4015 loc->bb = dest_bb;
4016 loc->e = e;
4017 loc->si = si;
4018 return;
4019 }
4020 }
4021
4022 /* Update the last node of the list and move to the next one. */
4023 last_loc = loc;
4024 loc = loc->next;
4025 }
4026
4027 /* If we didn't find an assertion already registered for
4028 NAME COMP_CODE VAL, add a new one at the end of the list of
4029 assertions associated with NAME. */
4030 n = XNEW (struct assert_locus_d);
4031 n->bb = dest_bb;
4032 n->e = e;
4033 n->si = si;
4034 n->comp_code = comp_code;
4035 n->val = val;
4036 n->expr = expr;
4037 n->next = NULL;
4038
4039 if (last_loc)
4040 last_loc->next = n;
4041 else
4042 asserts_for[SSA_NAME_VERSION (name)] = n;
4043
4044 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4045 }
4046
4047 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4048 Extract a suitable test code and value and store them into *CODE_P and
4049 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4050
4051 If no extraction was possible, return FALSE, otherwise return TRUE.
4052
4053 If INVERT is true, then we invert the result stored into *CODE_P. */
4054
4055 static bool
4056 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4057 tree cond_op0, tree cond_op1,
4058 bool invert, enum tree_code *code_p,
4059 tree *val_p)
4060 {
4061 enum tree_code comp_code;
4062 tree val;
4063
4064 /* Otherwise, we have a comparison of the form NAME COMP VAL
4065 or VAL COMP NAME. */
4066 if (name == cond_op1)
4067 {
4068 /* If the predicate is of the form VAL COMP NAME, flip
4069 COMP around because we need to register NAME as the
4070 first operand in the predicate. */
4071 comp_code = swap_tree_comparison (cond_code);
4072 val = cond_op0;
4073 }
4074 else
4075 {
4076 /* The comparison is of the form NAME COMP VAL, so the
4077 comparison code remains unchanged. */
4078 comp_code = cond_code;
4079 val = cond_op1;
4080 }
4081
4082 /* Invert the comparison code as necessary. */
4083 if (invert)
4084 comp_code = invert_tree_comparison (comp_code, 0);
4085
4086 /* VRP does not handle float types. */
4087 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4088 return false;
4089
4090 /* Do not register always-false predicates.
4091 FIXME: this works around a limitation in fold() when dealing with
4092 enumerations. Given 'enum { N1, N2 } x;', fold will not
4093 fold 'if (x > N2)' to 'if (0)'. */
4094 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4095 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4096 {
4097 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4098 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4099
4100 if (comp_code == GT_EXPR
4101 && (!max
4102 || compare_values (val, max) == 0))
4103 return false;
4104
4105 if (comp_code == LT_EXPR
4106 && (!min
4107 || compare_values (val, min) == 0))
4108 return false;
4109 }
4110 *code_p = comp_code;
4111 *val_p = val;
4112 return true;
4113 }
4114
4115 /* Try to register an edge assertion for SSA name NAME on edge E for
4116 the condition COND contributing to the conditional jump pointed to by BSI.
4117 Invert the condition COND if INVERT is true.
4118 Return true if an assertion for NAME could be registered. */
4119
4120 static bool
4121 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4122 enum tree_code cond_code,
4123 tree cond_op0, tree cond_op1, bool invert)
4124 {
4125 tree val;
4126 enum tree_code comp_code;
4127 bool retval = false;
4128
4129 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4130 cond_op0,
4131 cond_op1,
4132 invert, &comp_code, &val))
4133 return false;
4134
4135 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4136 reachable from E. */
4137 if (live_on_edge (e, name)
4138 && !has_single_use (name))
4139 {
4140 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4141 retval = true;
4142 }
4143
4144 /* In the case of NAME <= CST and NAME being defined as
4145 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4146 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4147 This catches range and anti-range tests. */
4148 if ((comp_code == LE_EXPR
4149 || comp_code == GT_EXPR)
4150 && TREE_CODE (val) == INTEGER_CST
4151 && TYPE_UNSIGNED (TREE_TYPE (val)))
4152 {
4153 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4154 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4155
4156 /* Extract CST2 from the (optional) addition. */
4157 if (is_gimple_assign (def_stmt)
4158 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4159 {
4160 name2 = gimple_assign_rhs1 (def_stmt);
4161 cst2 = gimple_assign_rhs2 (def_stmt);
4162 if (TREE_CODE (name2) == SSA_NAME
4163 && TREE_CODE (cst2) == INTEGER_CST)
4164 def_stmt = SSA_NAME_DEF_STMT (name2);
4165 }
4166
4167 /* Extract NAME2 from the (optional) sign-changing cast. */
4168 if (gimple_assign_cast_p (def_stmt))
4169 {
4170 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4171 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4172 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4173 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4174 name3 = gimple_assign_rhs1 (def_stmt);
4175 }
4176
4177 /* If name3 is used later, create an ASSERT_EXPR for it. */
4178 if (name3 != NULL_TREE
4179 && TREE_CODE (name3) == SSA_NAME
4180 && (cst2 == NULL_TREE
4181 || TREE_CODE (cst2) == INTEGER_CST)
4182 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4183 && live_on_edge (e, name3)
4184 && !has_single_use (name3))
4185 {
4186 tree tmp;
4187
4188 /* Build an expression for the range test. */
4189 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4190 if (cst2 != NULL_TREE)
4191 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4192
4193 if (dump_file)
4194 {
4195 fprintf (dump_file, "Adding assert for ");
4196 print_generic_expr (dump_file, name3, 0);
4197 fprintf (dump_file, " from ");
4198 print_generic_expr (dump_file, tmp, 0);
4199 fprintf (dump_file, "\n");
4200 }
4201
4202 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4203
4204 retval = true;
4205 }
4206
4207 /* If name2 is used later, create an ASSERT_EXPR for it. */
4208 if (name2 != NULL_TREE
4209 && TREE_CODE (name2) == SSA_NAME
4210 && TREE_CODE (cst2) == INTEGER_CST
4211 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4212 && live_on_edge (e, name2)
4213 && !has_single_use (name2))
4214 {
4215 tree tmp;
4216
4217 /* Build an expression for the range test. */
4218 tmp = name2;
4219 if (TREE_TYPE (name) != TREE_TYPE (name2))
4220 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4221 if (cst2 != NULL_TREE)
4222 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4223
4224 if (dump_file)
4225 {
4226 fprintf (dump_file, "Adding assert for ");
4227 print_generic_expr (dump_file, name2, 0);
4228 fprintf (dump_file, " from ");
4229 print_generic_expr (dump_file, tmp, 0);
4230 fprintf (dump_file, "\n");
4231 }
4232
4233 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4234
4235 retval = true;
4236 }
4237 }
4238
4239 return retval;
4240 }
4241
4242 /* OP is an operand of a truth value expression which is known to have
4243 a particular value. Register any asserts for OP and for any
4244 operands in OP's defining statement.
4245
4246 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4247 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4248
4249 static bool
4250 register_edge_assert_for_1 (tree op, enum tree_code code,
4251 edge e, gimple_stmt_iterator bsi)
4252 {
4253 bool retval = false;
4254 gimple op_def;
4255 tree val;
4256 enum tree_code rhs_code;
4257
4258 /* We only care about SSA_NAMEs. */
4259 if (TREE_CODE (op) != SSA_NAME)
4260 return false;
4261
4262 /* We know that OP will have a zero or nonzero value. If OP is used
4263 more than once go ahead and register an assert for OP.
4264
4265 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4266 it will always be set for OP (because OP is used in a COND_EXPR in
4267 the subgraph). */
4268 if (!has_single_use (op))
4269 {
4270 val = build_int_cst (TREE_TYPE (op), 0);
4271 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4272 retval = true;
4273 }
4274
4275 /* Now look at how OP is set. If it's set from a comparison,
4276 a truth operation or some bit operations, then we may be able
4277 to register information about the operands of that assignment. */
4278 op_def = SSA_NAME_DEF_STMT (op);
4279 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4280 return retval;
4281
4282 rhs_code = gimple_assign_rhs_code (op_def);
4283
4284 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4285 {
4286 bool invert = (code == EQ_EXPR ? true : false);
4287 tree op0 = gimple_assign_rhs1 (op_def);
4288 tree op1 = gimple_assign_rhs2 (op_def);
4289
4290 if (TREE_CODE (op0) == SSA_NAME)
4291 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4292 invert);
4293 if (TREE_CODE (op1) == SSA_NAME)
4294 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4295 invert);
4296 }
4297 else if ((code == NE_EXPR
4298 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4299 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4300 || (code == EQ_EXPR
4301 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4302 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4303 {
4304 /* Recurse on each operand. */
4305 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4306 code, e, bsi);
4307 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4308 code, e, bsi);
4309 }
4310 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4311 {
4312 /* Recurse, flipping CODE. */
4313 code = invert_tree_comparison (code, false);
4314 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4315 code, e, bsi);
4316 }
4317 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4318 {
4319 /* Recurse through the copy. */
4320 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4321 code, e, bsi);
4322 }
4323 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4324 {
4325 /* Recurse through the type conversion. */
4326 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4327 code, e, bsi);
4328 }
4329
4330 return retval;
4331 }
4332
4333 /* Try to register an edge assertion for SSA name NAME on edge E for
4334 the condition COND contributing to the conditional jump pointed to by SI.
4335 Return true if an assertion for NAME could be registered. */
4336
4337 static bool
4338 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4339 enum tree_code cond_code, tree cond_op0,
4340 tree cond_op1)
4341 {
4342 tree val;
4343 enum tree_code comp_code;
4344 bool retval = false;
4345 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4346
4347 /* Do not attempt to infer anything in names that flow through
4348 abnormal edges. */
4349 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4350 return false;
4351
4352 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4353 cond_op0, cond_op1,
4354 is_else_edge,
4355 &comp_code, &val))
4356 return false;
4357
4358 /* Register ASSERT_EXPRs for name. */
4359 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4360 cond_op1, is_else_edge);
4361
4362
4363 /* If COND is effectively an equality test of an SSA_NAME against
4364 the value zero or one, then we may be able to assert values
4365 for SSA_NAMEs which flow into COND. */
4366
4367 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4368 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4369 have nonzero value. */
4370 if (((comp_code == EQ_EXPR && integer_onep (val))
4371 || (comp_code == NE_EXPR && integer_zerop (val))))
4372 {
4373 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4374
4375 if (is_gimple_assign (def_stmt)
4376 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4377 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4378 {
4379 tree op0 = gimple_assign_rhs1 (def_stmt);
4380 tree op1 = gimple_assign_rhs2 (def_stmt);
4381 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4382 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4383 }
4384 }
4385
4386 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4387 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4388 have zero value. */
4389 if (((comp_code == EQ_EXPR && integer_zerop (val))
4390 || (comp_code == NE_EXPR && integer_onep (val))))
4391 {
4392 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4393
4394 if (is_gimple_assign (def_stmt)
4395 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4396 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4397 necessarily zero value. */
4398 || (comp_code == EQ_EXPR
4399 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4400 {
4401 tree op0 = gimple_assign_rhs1 (def_stmt);
4402 tree op1 = gimple_assign_rhs2 (def_stmt);
4403 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4404 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4405 }
4406 }
4407
4408 return retval;
4409 }
4410
4411
4412 /* Determine whether the outgoing edges of BB should receive an
4413 ASSERT_EXPR for each of the operands of BB's LAST statement.
4414 The last statement of BB must be a COND_EXPR.
4415
4416 If any of the sub-graphs rooted at BB have an interesting use of
4417 the predicate operands, an assert location node is added to the
4418 list of assertions for the corresponding operands. */
4419
4420 static bool
4421 find_conditional_asserts (basic_block bb, gimple last)
4422 {
4423 bool need_assert;
4424 gimple_stmt_iterator bsi;
4425 tree op;
4426 edge_iterator ei;
4427 edge e;
4428 ssa_op_iter iter;
4429
4430 need_assert = false;
4431 bsi = gsi_for_stmt (last);
4432
4433 /* Look for uses of the operands in each of the sub-graphs
4434 rooted at BB. We need to check each of the outgoing edges
4435 separately, so that we know what kind of ASSERT_EXPR to
4436 insert. */
4437 FOR_EACH_EDGE (e, ei, bb->succs)
4438 {
4439 if (e->dest == bb)
4440 continue;
4441
4442 /* Register the necessary assertions for each operand in the
4443 conditional predicate. */
4444 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4445 {
4446 need_assert |= register_edge_assert_for (op, e, bsi,
4447 gimple_cond_code (last),
4448 gimple_cond_lhs (last),
4449 gimple_cond_rhs (last));
4450 }
4451 }
4452
4453 return need_assert;
4454 }
4455
4456 /* Compare two case labels sorting first by the destination label uid
4457 and then by the case value. */
4458
4459 static int
4460 compare_case_labels (const void *p1, const void *p2)
4461 {
4462 const_tree const case1 = *(const_tree const*)p1;
4463 const_tree const case2 = *(const_tree const*)p2;
4464 unsigned int uid1 = DECL_UID (CASE_LABEL (case1));
4465 unsigned int uid2 = DECL_UID (CASE_LABEL (case2));
4466
4467 if (uid1 < uid2)
4468 return -1;
4469 else if (uid1 == uid2)
4470 {
4471 /* Make sure the default label is first in a group. */
4472 if (!CASE_LOW (case1))
4473 return -1;
4474 else if (!CASE_LOW (case2))
4475 return 1;
4476 else
4477 return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
4478 }
4479 else
4480 return 1;
4481 }
4482
4483 /* Determine whether the outgoing edges of BB should receive an
4484 ASSERT_EXPR for each of the operands of BB's LAST statement.
4485 The last statement of BB must be a SWITCH_EXPR.
4486
4487 If any of the sub-graphs rooted at BB have an interesting use of
4488 the predicate operands, an assert location node is added to the
4489 list of assertions for the corresponding operands. */
4490
4491 static bool
4492 find_switch_asserts (basic_block bb, gimple last)
4493 {
4494 bool need_assert;
4495 gimple_stmt_iterator bsi;
4496 tree op;
4497 edge e;
4498 tree vec2;
4499 size_t n = gimple_switch_num_labels(last);
4500 #if GCC_VERSION >= 4000
4501 unsigned int idx;
4502 #else
4503 /* Work around GCC 3.4 bug (PR 37086). */
4504 volatile unsigned int idx;
4505 #endif
4506
4507 need_assert = false;
4508 bsi = gsi_for_stmt (last);
4509 op = gimple_switch_index (last);
4510 if (TREE_CODE (op) != SSA_NAME)
4511 return false;
4512
4513 /* Build a vector of case labels sorted by destination label. */
4514 vec2 = make_tree_vec (n);
4515 for (idx = 0; idx < n; ++idx)
4516 TREE_VEC_ELT (vec2, idx) = gimple_switch_label (last, idx);
4517 qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels);
4518
4519 for (idx = 0; idx < n; ++idx)
4520 {
4521 tree min, max;
4522 tree cl = TREE_VEC_ELT (vec2, idx);
4523
4524 min = CASE_LOW (cl);
4525 max = CASE_HIGH (cl);
4526
4527 /* If there are multiple case labels with the same destination
4528 we need to combine them to a single value range for the edge. */
4529 if (idx + 1 < n
4530 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1)))
4531 {
4532 /* Skip labels until the last of the group. */
4533 do {
4534 ++idx;
4535 } while (idx < n
4536 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx)));
4537 --idx;
4538
4539 /* Pick up the maximum of the case label range. */
4540 if (CASE_HIGH (TREE_VEC_ELT (vec2, idx)))
4541 max = CASE_HIGH (TREE_VEC_ELT (vec2, idx));
4542 else
4543 max = CASE_LOW (TREE_VEC_ELT (vec2, idx));
4544 }
4545
4546 /* Nothing to do if the range includes the default label until we
4547 can register anti-ranges. */
4548 if (min == NULL_TREE)
4549 continue;
4550
4551 /* Find the edge to register the assert expr on. */
4552 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
4553
4554 /* Register the necessary assertions for the operand in the
4555 SWITCH_EXPR. */
4556 need_assert |= register_edge_assert_for (op, e, bsi,
4557 max ? GE_EXPR : EQ_EXPR,
4558 op,
4559 fold_convert (TREE_TYPE (op),
4560 min));
4561 if (max)
4562 {
4563 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4564 op,
4565 fold_convert (TREE_TYPE (op),
4566 max));
4567 }
4568 }
4569
4570 return need_assert;
4571 }
4572
4573
4574 /* Traverse all the statements in block BB looking for statements that
4575 may generate useful assertions for the SSA names in their operand.
4576 If a statement produces a useful assertion A for name N_i, then the
4577 list of assertions already generated for N_i is scanned to
4578 determine if A is actually needed.
4579
4580 If N_i already had the assertion A at a location dominating the
4581 current location, then nothing needs to be done. Otherwise, the
4582 new location for A is recorded instead.
4583
4584 1- For every statement S in BB, all the variables used by S are
4585 added to bitmap FOUND_IN_SUBGRAPH.
4586
4587 2- If statement S uses an operand N in a way that exposes a known
4588 value range for N, then if N was not already generated by an
4589 ASSERT_EXPR, create a new assert location for N. For instance,
4590 if N is a pointer and the statement dereferences it, we can
4591 assume that N is not NULL.
4592
4593 3- COND_EXPRs are a special case of #2. We can derive range
4594 information from the predicate but need to insert different
4595 ASSERT_EXPRs for each of the sub-graphs rooted at the
4596 conditional block. If the last statement of BB is a conditional
4597 expression of the form 'X op Y', then
4598
4599 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4600
4601 b) If the conditional is the only entry point to the sub-graph
4602 corresponding to the THEN_CLAUSE, recurse into it. On
4603 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4604 an ASSERT_EXPR is added for the corresponding variable.
4605
4606 c) Repeat step (b) on the ELSE_CLAUSE.
4607
4608 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4609
4610 For instance,
4611
4612 if (a == 9)
4613 b = a;
4614 else
4615 b = c + 1;
4616
4617 In this case, an assertion on the THEN clause is useful to
4618 determine that 'a' is always 9 on that edge. However, an assertion
4619 on the ELSE clause would be unnecessary.
4620
4621 4- If BB does not end in a conditional expression, then we recurse
4622 into BB's dominator children.
4623
4624 At the end of the recursive traversal, every SSA name will have a
4625 list of locations where ASSERT_EXPRs should be added. When a new
4626 location for name N is found, it is registered by calling
4627 register_new_assert_for. That function keeps track of all the
4628 registered assertions to prevent adding unnecessary assertions.
4629 For instance, if a pointer P_4 is dereferenced more than once in a
4630 dominator tree, only the location dominating all the dereference of
4631 P_4 will receive an ASSERT_EXPR.
4632
4633 If this function returns true, then it means that there are names
4634 for which we need to generate ASSERT_EXPRs. Those assertions are
4635 inserted by process_assert_insertions. */
4636
4637 static bool
4638 find_assert_locations_1 (basic_block bb, sbitmap live)
4639 {
4640 gimple_stmt_iterator si;
4641 gimple last;
4642 gimple phi;
4643 bool need_assert;
4644
4645 need_assert = false;
4646 last = last_stmt (bb);
4647
4648 /* If BB's last statement is a conditional statement involving integer
4649 operands, determine if we need to add ASSERT_EXPRs. */
4650 if (last
4651 && gimple_code (last) == GIMPLE_COND
4652 && !fp_predicate (last)
4653 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4654 need_assert |= find_conditional_asserts (bb, last);
4655
4656 /* If BB's last statement is a switch statement involving integer
4657 operands, determine if we need to add ASSERT_EXPRs. */
4658 if (last
4659 && gimple_code (last) == GIMPLE_SWITCH
4660 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4661 need_assert |= find_switch_asserts (bb, last);
4662
4663 /* Traverse all the statements in BB marking used names and looking
4664 for statements that may infer assertions for their used operands. */
4665 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4666 {
4667 gimple stmt;
4668 tree op;
4669 ssa_op_iter i;
4670
4671 stmt = gsi_stmt (si);
4672
4673 /* See if we can derive an assertion for any of STMT's operands. */
4674 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4675 {
4676 tree value;
4677 enum tree_code comp_code;
4678
4679 /* Mark OP in our live bitmap. */
4680 SET_BIT (live, SSA_NAME_VERSION (op));
4681
4682 /* If OP is used in such a way that we can infer a value
4683 range for it, and we don't find a previous assertion for
4684 it, create a new assertion location node for OP. */
4685 if (infer_value_range (stmt, op, &comp_code, &value))
4686 {
4687 /* If we are able to infer a nonzero value range for OP,
4688 then walk backwards through the use-def chain to see if OP
4689 was set via a typecast.
4690
4691 If so, then we can also infer a nonzero value range
4692 for the operand of the NOP_EXPR. */
4693 if (comp_code == NE_EXPR && integer_zerop (value))
4694 {
4695 tree t = op;
4696 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4697
4698 while (is_gimple_assign (def_stmt)
4699 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4700 && TREE_CODE
4701 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4702 && POINTER_TYPE_P
4703 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4704 {
4705 t = gimple_assign_rhs1 (def_stmt);
4706 def_stmt = SSA_NAME_DEF_STMT (t);
4707
4708 /* Note we want to register the assert for the
4709 operand of the NOP_EXPR after SI, not after the
4710 conversion. */
4711 if (! has_single_use (t))
4712 {
4713 register_new_assert_for (t, t, comp_code, value,
4714 bb, NULL, si);
4715 need_assert = true;
4716 }
4717 }
4718 }
4719
4720 /* If OP is used only once, namely in this STMT, don't
4721 bother creating an ASSERT_EXPR for it. Such an
4722 ASSERT_EXPR would do nothing but increase compile time. */
4723 if (!has_single_use (op))
4724 {
4725 register_new_assert_for (op, op, comp_code, value,
4726 bb, NULL, si);
4727 need_assert = true;
4728 }
4729 }
4730 }
4731 }
4732
4733 /* Traverse all PHI nodes in BB marking used operands. */
4734 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4735 {
4736 use_operand_p arg_p;
4737 ssa_op_iter i;
4738 phi = gsi_stmt (si);
4739
4740 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4741 {
4742 tree arg = USE_FROM_PTR (arg_p);
4743 if (TREE_CODE (arg) == SSA_NAME)
4744 SET_BIT (live, SSA_NAME_VERSION (arg));
4745 }
4746 }
4747
4748 return need_assert;
4749 }
4750
4751 /* Do an RPO walk over the function computing SSA name liveness
4752 on-the-fly and deciding on assert expressions to insert.
4753 Returns true if there are assert expressions to be inserted. */
4754
4755 static bool
4756 find_assert_locations (void)
4757 {
4758 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4759 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4760 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4761 int rpo_cnt, i;
4762 bool need_asserts;
4763
4764 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4765 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4766 for (i = 0; i < rpo_cnt; ++i)
4767 bb_rpo[rpo[i]] = i;
4768
4769 need_asserts = false;
4770 for (i = rpo_cnt-1; i >= 0; --i)
4771 {
4772 basic_block bb = BASIC_BLOCK (rpo[i]);
4773 edge e;
4774 edge_iterator ei;
4775
4776 if (!live[rpo[i]])
4777 {
4778 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4779 sbitmap_zero (live[rpo[i]]);
4780 }
4781
4782 /* Process BB and update the live information with uses in
4783 this block. */
4784 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
4785
4786 /* Merge liveness into the predecessor blocks and free it. */
4787 if (!sbitmap_empty_p (live[rpo[i]]))
4788 {
4789 int pred_rpo = i;
4790 FOR_EACH_EDGE (e, ei, bb->preds)
4791 {
4792 int pred = e->src->index;
4793 if (e->flags & EDGE_DFS_BACK)
4794 continue;
4795
4796 if (!live[pred])
4797 {
4798 live[pred] = sbitmap_alloc (num_ssa_names);
4799 sbitmap_zero (live[pred]);
4800 }
4801 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
4802
4803 if (bb_rpo[pred] < pred_rpo)
4804 pred_rpo = bb_rpo[pred];
4805 }
4806
4807 /* Record the RPO number of the last visited block that needs
4808 live information from this block. */
4809 last_rpo[rpo[i]] = pred_rpo;
4810 }
4811 else
4812 {
4813 sbitmap_free (live[rpo[i]]);
4814 live[rpo[i]] = NULL;
4815 }
4816
4817 /* We can free all successors live bitmaps if all their
4818 predecessors have been visited already. */
4819 FOR_EACH_EDGE (e, ei, bb->succs)
4820 if (last_rpo[e->dest->index] == i
4821 && live[e->dest->index])
4822 {
4823 sbitmap_free (live[e->dest->index]);
4824 live[e->dest->index] = NULL;
4825 }
4826 }
4827
4828 XDELETEVEC (rpo);
4829 XDELETEVEC (bb_rpo);
4830 XDELETEVEC (last_rpo);
4831 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
4832 if (live[i])
4833 sbitmap_free (live[i]);
4834 XDELETEVEC (live);
4835
4836 return need_asserts;
4837 }
4838
4839 /* Create an ASSERT_EXPR for NAME and insert it in the location
4840 indicated by LOC. Return true if we made any edge insertions. */
4841
4842 static bool
4843 process_assert_insertions_for (tree name, assert_locus_t loc)
4844 {
4845 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4846 gimple stmt;
4847 tree cond;
4848 gimple assert_stmt;
4849 edge_iterator ei;
4850 edge e;
4851
4852 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4853 assert_stmt = build_assert_expr_for (cond, name);
4854 if (loc->e)
4855 {
4856 /* We have been asked to insert the assertion on an edge. This
4857 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4858 #if defined ENABLE_CHECKING
4859 gcc_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4860 || gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH);
4861 #endif
4862
4863 gsi_insert_on_edge (loc->e, assert_stmt);
4864 return true;
4865 }
4866
4867 /* Otherwise, we can insert right after LOC->SI iff the
4868 statement must not be the last statement in the block. */
4869 stmt = gsi_stmt (loc->si);
4870 if (!stmt_ends_bb_p (stmt))
4871 {
4872 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4873 return false;
4874 }
4875
4876 /* If STMT must be the last statement in BB, we can only insert new
4877 assertions on the non-abnormal edge out of BB. Note that since
4878 STMT is not control flow, there may only be one non-abnormal edge
4879 out of BB. */
4880 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4881 if (!(e->flags & EDGE_ABNORMAL))
4882 {
4883 gsi_insert_on_edge (e, assert_stmt);
4884 return true;
4885 }
4886
4887 gcc_unreachable ();
4888 }
4889
4890
4891 /* Process all the insertions registered for every name N_i registered
4892 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4893 found in ASSERTS_FOR[i]. */
4894
4895 static void
4896 process_assert_insertions (void)
4897 {
4898 unsigned i;
4899 bitmap_iterator bi;
4900 bool update_edges_p = false;
4901 int num_asserts = 0;
4902
4903 if (dump_file && (dump_flags & TDF_DETAILS))
4904 dump_all_asserts (dump_file);
4905
4906 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4907 {
4908 assert_locus_t loc = asserts_for[i];
4909 gcc_assert (loc);
4910
4911 while (loc)
4912 {
4913 assert_locus_t next = loc->next;
4914 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4915 free (loc);
4916 loc = next;
4917 num_asserts++;
4918 }
4919 }
4920
4921 if (update_edges_p)
4922 gsi_commit_edge_inserts ();
4923
4924 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4925 num_asserts);
4926 }
4927
4928
4929 /* Traverse the flowgraph looking for conditional jumps to insert range
4930 expressions. These range expressions are meant to provide information
4931 to optimizations that need to reason in terms of value ranges. They
4932 will not be expanded into RTL. For instance, given:
4933
4934 x = ...
4935 y = ...
4936 if (x < y)
4937 y = x - 2;
4938 else
4939 x = y + 3;
4940
4941 this pass will transform the code into:
4942
4943 x = ...
4944 y = ...
4945 if (x < y)
4946 {
4947 x = ASSERT_EXPR <x, x < y>
4948 y = x - 2
4949 }
4950 else
4951 {
4952 y = ASSERT_EXPR <y, x <= y>
4953 x = y + 3
4954 }
4955
4956 The idea is that once copy and constant propagation have run, other
4957 optimizations will be able to determine what ranges of values can 'x'
4958 take in different paths of the code, simply by checking the reaching
4959 definition of 'x'. */
4960
4961 static void
4962 insert_range_assertions (void)
4963 {
4964 need_assert_for = BITMAP_ALLOC (NULL);
4965 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
4966
4967 calculate_dominance_info (CDI_DOMINATORS);
4968
4969 if (find_assert_locations ())
4970 {
4971 process_assert_insertions ();
4972 update_ssa (TODO_update_ssa_no_phi);
4973 }
4974
4975 if (dump_file && (dump_flags & TDF_DETAILS))
4976 {
4977 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4978 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4979 }
4980
4981 free (asserts_for);
4982 BITMAP_FREE (need_assert_for);
4983 }
4984
4985 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4986 and "struct" hacks. If VRP can determine that the
4987 array subscript is a constant, check if it is outside valid
4988 range. If the array subscript is a RANGE, warn if it is
4989 non-overlapping with valid range.
4990 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4991
4992 static void
4993 check_array_ref (tree ref, location_t location, bool ignore_off_by_one)
4994 {
4995 value_range_t* vr = NULL;
4996 tree low_sub, up_sub;
4997 tree low_bound, up_bound = array_ref_up_bound (ref);
4998
4999 low_sub = up_sub = TREE_OPERAND (ref, 1);
5000
5001 if (!up_bound || TREE_NO_WARNING (ref)
5002 || TREE_CODE (up_bound) != INTEGER_CST
5003 /* Can not check flexible arrays. */
5004 || (TYPE_SIZE (TREE_TYPE (ref)) == NULL_TREE
5005 && TYPE_DOMAIN (TREE_TYPE (ref)) != NULL_TREE
5006 && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (ref))) == NULL_TREE)
5007 /* Accesses after the end of arrays of size 0 (gcc
5008 extension) and 1 are likely intentional ("struct
5009 hack"). */
5010 || compare_tree_int (up_bound, 1) <= 0)
5011 return;
5012
5013 low_bound = array_ref_low_bound (ref);
5014
5015 if (TREE_CODE (low_sub) == SSA_NAME)
5016 {
5017 vr = get_value_range (low_sub);
5018 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5019 {
5020 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5021 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5022 }
5023 }
5024
5025 if (vr && vr->type == VR_ANTI_RANGE)
5026 {
5027 if (TREE_CODE (up_sub) == INTEGER_CST
5028 && tree_int_cst_lt (up_bound, up_sub)
5029 && TREE_CODE (low_sub) == INTEGER_CST
5030 && tree_int_cst_lt (low_sub, low_bound))
5031 {
5032 warning_at (location, OPT_Warray_bounds,
5033 "array subscript is outside array bounds");
5034 TREE_NO_WARNING (ref) = 1;
5035 }
5036 }
5037 else if (TREE_CODE (up_sub) == INTEGER_CST
5038 && tree_int_cst_lt (up_bound, up_sub)
5039 && !tree_int_cst_equal (up_bound, up_sub)
5040 && (!ignore_off_by_one
5041 || !tree_int_cst_equal (int_const_binop (PLUS_EXPR,
5042 up_bound,
5043 integer_one_node,
5044 0),
5045 up_sub)))
5046 {
5047 warning_at (location, OPT_Warray_bounds,
5048 "array subscript is above array bounds");
5049 TREE_NO_WARNING (ref) = 1;
5050 }
5051 else if (TREE_CODE (low_sub) == INTEGER_CST
5052 && tree_int_cst_lt (low_sub, low_bound))
5053 {
5054 warning_at (location, OPT_Warray_bounds,
5055 "array subscript is below array bounds");
5056 TREE_NO_WARNING (ref) = 1;
5057 }
5058 }
5059
5060 /* Searches if the expr T, located at LOCATION computes
5061 address of an ARRAY_REF, and call check_array_ref on it. */
5062
5063 static void
5064 search_for_addr_array (tree t, location_t location)
5065 {
5066 while (TREE_CODE (t) == SSA_NAME)
5067 {
5068 gimple g = SSA_NAME_DEF_STMT (t);
5069
5070 if (gimple_code (g) != GIMPLE_ASSIGN)
5071 return;
5072
5073 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5074 != GIMPLE_SINGLE_RHS)
5075 return;
5076
5077 t = gimple_assign_rhs1 (g);
5078 }
5079
5080
5081 /* We are only interested in addresses of ARRAY_REF's. */
5082 if (TREE_CODE (t) != ADDR_EXPR)
5083 return;
5084
5085 /* Check each ARRAY_REFs in the reference chain. */
5086 do
5087 {
5088 if (TREE_CODE (t) == ARRAY_REF)
5089 check_array_ref (t, location, true /*ignore_off_by_one*/);
5090
5091 t = TREE_OPERAND (t, 0);
5092 }
5093 while (handled_component_p (t));
5094 }
5095
5096 /* walk_tree() callback that checks if *TP is
5097 an ARRAY_REF inside an ADDR_EXPR (in which an array
5098 subscript one outside the valid range is allowed). Call
5099 check_array_ref for each ARRAY_REF found. The location is
5100 passed in DATA. */
5101
5102 static tree
5103 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5104 {
5105 tree t = *tp;
5106 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5107 const location_t *location = (const location_t *) wi->info;
5108
5109 *walk_subtree = TRUE;
5110
5111 if (TREE_CODE (t) == ARRAY_REF)
5112 check_array_ref (t, *location, false /*ignore_off_by_one*/);
5113
5114 if (TREE_CODE (t) == INDIRECT_REF
5115 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5116 search_for_addr_array (TREE_OPERAND (t, 0), *location);
5117
5118 if (TREE_CODE (t) == ADDR_EXPR)
5119 *walk_subtree = FALSE;
5120
5121 return NULL_TREE;
5122 }
5123
5124 /* Walk over all statements of all reachable BBs and call check_array_bounds
5125 on them. */
5126
5127 static void
5128 check_all_array_refs (void)
5129 {
5130 basic_block bb;
5131 gimple_stmt_iterator si;
5132
5133 FOR_EACH_BB (bb)
5134 {
5135 /* Skip bb's that are clearly unreachable. */
5136 if (single_pred_p (bb))
5137 {
5138 int i;
5139 bool reachable = true;
5140 edge e2;
5141 edge e = EDGE_PRED (bb, 0);
5142 basic_block pred_bb = e->src;
5143 gimple ls = NULL;
5144
5145 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e2); ++i)
5146 if (e == e2)
5147 {
5148 reachable = false;
5149 break;
5150 }
5151
5152 if (!reachable)
5153 continue;
5154
5155 if (!gsi_end_p (gsi_last_bb (pred_bb)))
5156 ls = gsi_stmt (gsi_last_bb (pred_bb));
5157
5158 if (ls && gimple_code (ls) == GIMPLE_COND
5159 && ((gimple_cond_false_p (ls)
5160 && (EDGE_PRED (bb, 0)->flags & EDGE_TRUE_VALUE))
5161 || (gimple_cond_true_p (ls)
5162 && (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE))))
5163 continue;
5164 }
5165 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5166 {
5167 gimple stmt = gsi_stmt (si);
5168 struct walk_stmt_info wi;
5169 if (!gimple_has_location (stmt))
5170 continue;
5171
5172 if (is_gimple_call (stmt))
5173 {
5174 size_t i;
5175 size_t n = gimple_call_num_args (stmt);
5176 for (i = 0; i < n; i++)
5177 {
5178 tree arg = gimple_call_arg (stmt, i);
5179 search_for_addr_array (arg, gimple_location (stmt));
5180 }
5181 }
5182 else
5183 {
5184 memset (&wi, 0, sizeof (wi));
5185 wi.info = CONST_CAST (void *, (const void *)
5186 gimple_location_ptr (stmt));
5187
5188 walk_gimple_op (gsi_stmt (si),
5189 check_array_bounds,
5190 &wi);
5191 }
5192 }
5193 }
5194 }
5195
5196 /* Convert range assertion expressions into the implied copies and
5197 copy propagate away the copies. Doing the trivial copy propagation
5198 here avoids the need to run the full copy propagation pass after
5199 VRP.
5200
5201 FIXME, this will eventually lead to copy propagation removing the
5202 names that had useful range information attached to them. For
5203 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5204 then N_i will have the range [3, +INF].
5205
5206 However, by converting the assertion into the implied copy
5207 operation N_i = N_j, we will then copy-propagate N_j into the uses
5208 of N_i and lose the range information. We may want to hold on to
5209 ASSERT_EXPRs a little while longer as the ranges could be used in
5210 things like jump threading.
5211
5212 The problem with keeping ASSERT_EXPRs around is that passes after
5213 VRP need to handle them appropriately.
5214
5215 Another approach would be to make the range information a first
5216 class property of the SSA_NAME so that it can be queried from
5217 any pass. This is made somewhat more complex by the need for
5218 multiple ranges to be associated with one SSA_NAME. */
5219
5220 static void
5221 remove_range_assertions (void)
5222 {
5223 basic_block bb;
5224 gimple_stmt_iterator si;
5225
5226 /* Note that the BSI iterator bump happens at the bottom of the
5227 loop and no bump is necessary if we're removing the statement
5228 referenced by the current BSI. */
5229 FOR_EACH_BB (bb)
5230 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5231 {
5232 gimple stmt = gsi_stmt (si);
5233 gimple use_stmt;
5234
5235 if (is_gimple_assign (stmt)
5236 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5237 {
5238 tree rhs = gimple_assign_rhs1 (stmt);
5239 tree var;
5240 tree cond = fold (ASSERT_EXPR_COND (rhs));
5241 use_operand_p use_p;
5242 imm_use_iterator iter;
5243
5244 gcc_assert (cond != boolean_false_node);
5245
5246 /* Propagate the RHS into every use of the LHS. */
5247 var = ASSERT_EXPR_VAR (rhs);
5248 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5249 gimple_assign_lhs (stmt))
5250 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5251 {
5252 SET_USE (use_p, var);
5253 gcc_assert (TREE_CODE (var) == SSA_NAME);
5254 }
5255
5256 /* And finally, remove the copy, it is not needed. */
5257 gsi_remove (&si, true);
5258 release_defs (stmt);
5259 }
5260 else
5261 gsi_next (&si);
5262 }
5263 }
5264
5265
5266 /* Return true if STMT is interesting for VRP. */
5267
5268 static bool
5269 stmt_interesting_for_vrp (gimple stmt)
5270 {
5271 if (gimple_code (stmt) == GIMPLE_PHI
5272 && is_gimple_reg (gimple_phi_result (stmt))
5273 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5274 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5275 return true;
5276 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5277 {
5278 tree lhs = gimple_get_lhs (stmt);
5279
5280 /* In general, assignments with virtual operands are not useful
5281 for deriving ranges, with the obvious exception of calls to
5282 builtin functions. */
5283 if (lhs && TREE_CODE (lhs) == SSA_NAME
5284 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5285 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5286 && ((is_gimple_call (stmt)
5287 && gimple_call_fndecl (stmt) != NULL_TREE
5288 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5289 || !gimple_vuse (stmt)))
5290 return true;
5291 }
5292 else if (gimple_code (stmt) == GIMPLE_COND
5293 || gimple_code (stmt) == GIMPLE_SWITCH)
5294 return true;
5295
5296 return false;
5297 }
5298
5299
5300 /* Initialize local data structures for VRP. */
5301
5302 static void
5303 vrp_initialize (void)
5304 {
5305 basic_block bb;
5306
5307 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5308 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5309
5310 FOR_EACH_BB (bb)
5311 {
5312 gimple_stmt_iterator si;
5313
5314 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5315 {
5316 gimple phi = gsi_stmt (si);
5317 if (!stmt_interesting_for_vrp (phi))
5318 {
5319 tree lhs = PHI_RESULT (phi);
5320 set_value_range_to_varying (get_value_range (lhs));
5321 prop_set_simulate_again (phi, false);
5322 }
5323 else
5324 prop_set_simulate_again (phi, true);
5325 }
5326
5327 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5328 {
5329 gimple stmt = gsi_stmt (si);
5330
5331 if (!stmt_interesting_for_vrp (stmt))
5332 {
5333 ssa_op_iter i;
5334 tree def;
5335 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5336 set_value_range_to_varying (get_value_range (def));
5337 prop_set_simulate_again (stmt, false);
5338 }
5339 else
5340 {
5341 prop_set_simulate_again (stmt, true);
5342 }
5343 }
5344 }
5345 }
5346
5347
5348 /* Visit assignment STMT. If it produces an interesting range, record
5349 the SSA name in *OUTPUT_P. */
5350
5351 static enum ssa_prop_result
5352 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5353 {
5354 tree def, lhs;
5355 ssa_op_iter iter;
5356 enum gimple_code code = gimple_code (stmt);
5357 lhs = gimple_get_lhs (stmt);
5358
5359 /* We only keep track of ranges in integral and pointer types. */
5360 if (TREE_CODE (lhs) == SSA_NAME
5361 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5362 /* It is valid to have NULL MIN/MAX values on a type. See
5363 build_range_type. */
5364 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5365 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5366 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5367 {
5368 struct loop *l;
5369 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5370
5371 if (code == GIMPLE_CALL)
5372 extract_range_basic (&new_vr, stmt);
5373 else
5374 extract_range_from_assignment (&new_vr, stmt);
5375
5376 /* If STMT is inside a loop, we may be able to know something
5377 else about the range of LHS by examining scalar evolution
5378 information. */
5379 if (current_loops && (l = loop_containing_stmt (stmt)))
5380 adjust_range_with_scev (&new_vr, l, stmt, lhs);
5381
5382 if (update_value_range (lhs, &new_vr))
5383 {
5384 *output_p = lhs;
5385
5386 if (dump_file && (dump_flags & TDF_DETAILS))
5387 {
5388 fprintf (dump_file, "Found new range for ");
5389 print_generic_expr (dump_file, lhs, 0);
5390 fprintf (dump_file, ": ");
5391 dump_value_range (dump_file, &new_vr);
5392 fprintf (dump_file, "\n\n");
5393 }
5394
5395 if (new_vr.type == VR_VARYING)
5396 return SSA_PROP_VARYING;
5397
5398 return SSA_PROP_INTERESTING;
5399 }
5400
5401 return SSA_PROP_NOT_INTERESTING;
5402 }
5403
5404 /* Every other statement produces no useful ranges. */
5405 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5406 set_value_range_to_varying (get_value_range (def));
5407
5408 return SSA_PROP_VARYING;
5409 }
5410
5411 /* Helper that gets the value range of the SSA_NAME with version I
5412 or a symbolic range containing the SSA_NAME only if the value range
5413 is varying or undefined. */
5414
5415 static inline value_range_t
5416 get_vr_for_comparison (int i)
5417 {
5418 value_range_t vr = *(vr_value[i]);
5419
5420 /* If name N_i does not have a valid range, use N_i as its own
5421 range. This allows us to compare against names that may
5422 have N_i in their ranges. */
5423 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5424 {
5425 vr.type = VR_RANGE;
5426 vr.min = ssa_name (i);
5427 vr.max = ssa_name (i);
5428 }
5429
5430 return vr;
5431 }
5432
5433 /* Compare all the value ranges for names equivalent to VAR with VAL
5434 using comparison code COMP. Return the same value returned by
5435 compare_range_with_value, including the setting of
5436 *STRICT_OVERFLOW_P. */
5437
5438 static tree
5439 compare_name_with_value (enum tree_code comp, tree var, tree val,
5440 bool *strict_overflow_p)
5441 {
5442 bitmap_iterator bi;
5443 unsigned i;
5444 bitmap e;
5445 tree retval, t;
5446 int used_strict_overflow;
5447 bool sop;
5448 value_range_t equiv_vr;
5449
5450 /* Get the set of equivalences for VAR. */
5451 e = get_value_range (var)->equiv;
5452
5453 /* Start at -1. Set it to 0 if we do a comparison without relying
5454 on overflow, or 1 if all comparisons rely on overflow. */
5455 used_strict_overflow = -1;
5456
5457 /* Compare vars' value range with val. */
5458 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5459 sop = false;
5460 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5461 if (retval)
5462 used_strict_overflow = sop ? 1 : 0;
5463
5464 /* If the equiv set is empty we have done all work we need to do. */
5465 if (e == NULL)
5466 {
5467 if (retval
5468 && used_strict_overflow > 0)
5469 *strict_overflow_p = true;
5470 return retval;
5471 }
5472
5473 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5474 {
5475 equiv_vr = get_vr_for_comparison (i);
5476 sop = false;
5477 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5478 if (t)
5479 {
5480 /* If we get different answers from different members
5481 of the equivalence set this check must be in a dead
5482 code region. Folding it to a trap representation
5483 would be correct here. For now just return don't-know. */
5484 if (retval != NULL
5485 && t != retval)
5486 {
5487 retval = NULL_TREE;
5488 break;
5489 }
5490 retval = t;
5491
5492 if (!sop)
5493 used_strict_overflow = 0;
5494 else if (used_strict_overflow < 0)
5495 used_strict_overflow = 1;
5496 }
5497 }
5498
5499 if (retval
5500 && used_strict_overflow > 0)
5501 *strict_overflow_p = true;
5502
5503 return retval;
5504 }
5505
5506
5507 /* Given a comparison code COMP and names N1 and N2, compare all the
5508 ranges equivalent to N1 against all the ranges equivalent to N2
5509 to determine the value of N1 COMP N2. Return the same value
5510 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5511 whether we relied on an overflow infinity in the comparison. */
5512
5513
5514 static tree
5515 compare_names (enum tree_code comp, tree n1, tree n2,
5516 bool *strict_overflow_p)
5517 {
5518 tree t, retval;
5519 bitmap e1, e2;
5520 bitmap_iterator bi1, bi2;
5521 unsigned i1, i2;
5522 int used_strict_overflow;
5523 static bitmap_obstack *s_obstack = NULL;
5524 static bitmap s_e1 = NULL, s_e2 = NULL;
5525
5526 /* Compare the ranges of every name equivalent to N1 against the
5527 ranges of every name equivalent to N2. */
5528 e1 = get_value_range (n1)->equiv;
5529 e2 = get_value_range (n2)->equiv;
5530
5531 /* Use the fake bitmaps if e1 or e2 are not available. */
5532 if (s_obstack == NULL)
5533 {
5534 s_obstack = XNEW (bitmap_obstack);
5535 bitmap_obstack_initialize (s_obstack);
5536 s_e1 = BITMAP_ALLOC (s_obstack);
5537 s_e2 = BITMAP_ALLOC (s_obstack);
5538 }
5539 if (e1 == NULL)
5540 e1 = s_e1;
5541 if (e2 == NULL)
5542 e2 = s_e2;
5543
5544 /* Add N1 and N2 to their own set of equivalences to avoid
5545 duplicating the body of the loop just to check N1 and N2
5546 ranges. */
5547 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5548 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5549
5550 /* If the equivalence sets have a common intersection, then the two
5551 names can be compared without checking their ranges. */
5552 if (bitmap_intersect_p (e1, e2))
5553 {
5554 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5555 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5556
5557 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5558 ? boolean_true_node
5559 : boolean_false_node;
5560 }
5561
5562 /* Start at -1. Set it to 0 if we do a comparison without relying
5563 on overflow, or 1 if all comparisons rely on overflow. */
5564 used_strict_overflow = -1;
5565
5566 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5567 N2 to their own set of equivalences to avoid duplicating the body
5568 of the loop just to check N1 and N2 ranges. */
5569 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5570 {
5571 value_range_t vr1 = get_vr_for_comparison (i1);
5572
5573 t = retval = NULL_TREE;
5574 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5575 {
5576 bool sop = false;
5577
5578 value_range_t vr2 = get_vr_for_comparison (i2);
5579
5580 t = compare_ranges (comp, &vr1, &vr2, &sop);
5581 if (t)
5582 {
5583 /* If we get different answers from different members
5584 of the equivalence set this check must be in a dead
5585 code region. Folding it to a trap representation
5586 would be correct here. For now just return don't-know. */
5587 if (retval != NULL
5588 && t != retval)
5589 {
5590 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5591 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5592 return NULL_TREE;
5593 }
5594 retval = t;
5595
5596 if (!sop)
5597 used_strict_overflow = 0;
5598 else if (used_strict_overflow < 0)
5599 used_strict_overflow = 1;
5600 }
5601 }
5602
5603 if (retval)
5604 {
5605 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5606 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5607 if (used_strict_overflow > 0)
5608 *strict_overflow_p = true;
5609 return retval;
5610 }
5611 }
5612
5613 /* None of the equivalent ranges are useful in computing this
5614 comparison. */
5615 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5616 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5617 return NULL_TREE;
5618 }
5619
5620 /* Helper function for vrp_evaluate_conditional_warnv. */
5621
5622 static tree
5623 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5624 tree op0, tree op1,
5625 bool * strict_overflow_p)
5626 {
5627 value_range_t *vr0, *vr1;
5628
5629 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5630 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5631
5632 if (vr0 && vr1)
5633 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5634 else if (vr0 && vr1 == NULL)
5635 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5636 else if (vr0 == NULL && vr1)
5637 return (compare_range_with_value
5638 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5639 return NULL;
5640 }
5641
5642 /* Helper function for vrp_evaluate_conditional_warnv. */
5643
5644 static tree
5645 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5646 tree op1, bool use_equiv_p,
5647 bool *strict_overflow_p, bool *only_ranges)
5648 {
5649 tree ret;
5650 if (only_ranges)
5651 *only_ranges = true;
5652
5653 /* We only deal with integral and pointer types. */
5654 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5655 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5656 return NULL_TREE;
5657
5658 if (use_equiv_p)
5659 {
5660 if (only_ranges
5661 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5662 (code, op0, op1, strict_overflow_p)))
5663 return ret;
5664 *only_ranges = false;
5665 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5666 return compare_names (code, op0, op1, strict_overflow_p);
5667 else if (TREE_CODE (op0) == SSA_NAME)
5668 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5669 else if (TREE_CODE (op1) == SSA_NAME)
5670 return (compare_name_with_value
5671 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5672 }
5673 else
5674 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5675 strict_overflow_p);
5676 return NULL_TREE;
5677 }
5678
5679 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5680 information. Return NULL if the conditional can not be evaluated.
5681 The ranges of all the names equivalent with the operands in COND
5682 will be used when trying to compute the value. If the result is
5683 based on undefined signed overflow, issue a warning if
5684 appropriate. */
5685
5686 tree
5687 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5688 {
5689 bool sop;
5690 tree ret;
5691 bool only_ranges;
5692
5693 sop = false;
5694 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
5695 &only_ranges);
5696
5697 if (ret && sop)
5698 {
5699 enum warn_strict_overflow_code wc;
5700 const char* warnmsg;
5701
5702 if (is_gimple_min_invariant (ret))
5703 {
5704 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
5705 warnmsg = G_("assuming signed overflow does not occur when "
5706 "simplifying conditional to constant");
5707 }
5708 else
5709 {
5710 wc = WARN_STRICT_OVERFLOW_COMPARISON;
5711 warnmsg = G_("assuming signed overflow does not occur when "
5712 "simplifying conditional");
5713 }
5714
5715 if (issue_strict_overflow_warning (wc))
5716 {
5717 location_t location;
5718
5719 if (!gimple_has_location (stmt))
5720 location = input_location;
5721 else
5722 location = gimple_location (stmt);
5723 warning (OPT_Wstrict_overflow, "%H%s", &location, warnmsg);
5724 }
5725 }
5726
5727 if (warn_type_limits
5728 && ret && only_ranges
5729 && TREE_CODE_CLASS (code) == tcc_comparison
5730 && TREE_CODE (op0) == SSA_NAME)
5731 {
5732 /* If the comparison is being folded and the operand on the LHS
5733 is being compared against a constant value that is outside of
5734 the natural range of OP0's type, then the predicate will
5735 always fold regardless of the value of OP0. If -Wtype-limits
5736 was specified, emit a warning. */
5737 const char *warnmsg = NULL;
5738 tree type = TREE_TYPE (op0);
5739 value_range_t *vr0 = get_value_range (op0);
5740
5741 if (vr0->type != VR_VARYING
5742 && INTEGRAL_TYPE_P (type)
5743 && vrp_val_is_min (vr0->min)
5744 && vrp_val_is_max (vr0->max)
5745 && is_gimple_min_invariant (op1))
5746 {
5747 if (integer_zerop (ret))
5748 warnmsg = G_("comparison always false due to limited range of "
5749 "data type");
5750 else
5751 warnmsg = G_("comparison always true due to limited range of "
5752 "data type");
5753 }
5754
5755 if (warnmsg)
5756 {
5757 location_t location;
5758
5759 if (!gimple_has_location (stmt))
5760 location = input_location;
5761 else
5762 location = gimple_location (stmt);
5763
5764 warning (OPT_Wtype_limits, "%H%s", &location, warnmsg);
5765 }
5766 }
5767
5768 return ret;
5769 }
5770
5771
5772 /* Visit conditional statement STMT. If we can determine which edge
5773 will be taken out of STMT's basic block, record it in
5774 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
5775 SSA_PROP_VARYING. */
5776
5777 static enum ssa_prop_result
5778 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
5779 {
5780 tree val;
5781 bool sop;
5782
5783 *taken_edge_p = NULL;
5784
5785 if (dump_file && (dump_flags & TDF_DETAILS))
5786 {
5787 tree use;
5788 ssa_op_iter i;
5789
5790 fprintf (dump_file, "\nVisiting conditional with predicate: ");
5791 print_gimple_stmt (dump_file, stmt, 0, 0);
5792 fprintf (dump_file, "\nWith known ranges\n");
5793
5794 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
5795 {
5796 fprintf (dump_file, "\t");
5797 print_generic_expr (dump_file, use, 0);
5798 fprintf (dump_file, ": ");
5799 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
5800 }
5801
5802 fprintf (dump_file, "\n");
5803 }
5804
5805 /* Compute the value of the predicate COND by checking the known
5806 ranges of each of its operands.
5807
5808 Note that we cannot evaluate all the equivalent ranges here
5809 because those ranges may not yet be final and with the current
5810 propagation strategy, we cannot determine when the value ranges
5811 of the names in the equivalence set have changed.
5812
5813 For instance, given the following code fragment
5814
5815 i_5 = PHI <8, i_13>
5816 ...
5817 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
5818 if (i_14 == 1)
5819 ...
5820
5821 Assume that on the first visit to i_14, i_5 has the temporary
5822 range [8, 8] because the second argument to the PHI function is
5823 not yet executable. We derive the range ~[0, 0] for i_14 and the
5824 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
5825 the first time, since i_14 is equivalent to the range [8, 8], we
5826 determine that the predicate is always false.
5827
5828 On the next round of propagation, i_13 is determined to be
5829 VARYING, which causes i_5 to drop down to VARYING. So, another
5830 visit to i_14 is scheduled. In this second visit, we compute the
5831 exact same range and equivalence set for i_14, namely ~[0, 0] and
5832 { i_5 }. But we did not have the previous range for i_5
5833 registered, so vrp_visit_assignment thinks that the range for
5834 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
5835 is not visited again, which stops propagation from visiting
5836 statements in the THEN clause of that if().
5837
5838 To properly fix this we would need to keep the previous range
5839 value for the names in the equivalence set. This way we would've
5840 discovered that from one visit to the other i_5 changed from
5841 range [8, 8] to VR_VARYING.
5842
5843 However, fixing this apparent limitation may not be worth the
5844 additional checking. Testing on several code bases (GCC, DLV,
5845 MICO, TRAMP3D and SPEC2000) showed that doing this results in
5846 4 more predicates folded in SPEC. */
5847 sop = false;
5848
5849 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
5850 gimple_cond_lhs (stmt),
5851 gimple_cond_rhs (stmt),
5852 false, &sop, NULL);
5853 if (val)
5854 {
5855 if (!sop)
5856 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
5857 else
5858 {
5859 if (dump_file && (dump_flags & TDF_DETAILS))
5860 fprintf (dump_file,
5861 "\nIgnoring predicate evaluation because "
5862 "it assumes that signed overflow is undefined");
5863 val = NULL_TREE;
5864 }
5865 }
5866
5867 if (dump_file && (dump_flags & TDF_DETAILS))
5868 {
5869 fprintf (dump_file, "\nPredicate evaluates to: ");
5870 if (val == NULL_TREE)
5871 fprintf (dump_file, "DON'T KNOW\n");
5872 else
5873 print_generic_stmt (dump_file, val, 0);
5874 }
5875
5876 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5877 }
5878
5879 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5880 that includes the value VAL. The search is restricted to the range
5881 [START_IDX, n - 1] where n is the size of VEC.
5882
5883 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5884 returned.
5885
5886 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5887 it is placed in IDX and false is returned.
5888
5889 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5890 returned. */
5891
5892 static bool
5893 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
5894 {
5895 size_t n = gimple_switch_num_labels (stmt);
5896 size_t low, high;
5897
5898 /* Find case label for minimum of the value range or the next one.
5899 At each iteration we are searching in [low, high - 1]. */
5900
5901 for (low = start_idx, high = n; high != low; )
5902 {
5903 tree t;
5904 int cmp;
5905 /* Note that i != high, so we never ask for n. */
5906 size_t i = (high + low) / 2;
5907 t = gimple_switch_label (stmt, i);
5908
5909 /* Cache the result of comparing CASE_LOW and val. */
5910 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5911
5912 if (cmp == 0)
5913 {
5914 /* Ranges cannot be empty. */
5915 *idx = i;
5916 return true;
5917 }
5918 else if (cmp > 0)
5919 high = i;
5920 else
5921 {
5922 low = i + 1;
5923 if (CASE_HIGH (t) != NULL
5924 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5925 {
5926 *idx = i;
5927 return true;
5928 }
5929 }
5930 }
5931
5932 *idx = high;
5933 return false;
5934 }
5935
5936 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5937 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5938 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5939 then MAX_IDX < MIN_IDX.
5940 Returns true if the default label is not needed. */
5941
5942 static bool
5943 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
5944 size_t *max_idx)
5945 {
5946 size_t i, j;
5947 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5948 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5949
5950 if (i == j
5951 && min_take_default
5952 && max_take_default)
5953 {
5954 /* Only the default case label reached.
5955 Return an empty range. */
5956 *min_idx = 1;
5957 *max_idx = 0;
5958 return false;
5959 }
5960 else
5961 {
5962 bool take_default = min_take_default || max_take_default;
5963 tree low, high;
5964 size_t k;
5965
5966 if (max_take_default)
5967 j--;
5968
5969 /* If the case label range is continuous, we do not need
5970 the default case label. Verify that. */
5971 high = CASE_LOW (gimple_switch_label (stmt, i));
5972 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5973 high = CASE_HIGH (gimple_switch_label (stmt, i));
5974 for (k = i + 1; k <= j; ++k)
5975 {
5976 low = CASE_LOW (gimple_switch_label (stmt, k));
5977 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0)))
5978 {
5979 take_default = true;
5980 break;
5981 }
5982 high = low;
5983 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5984 high = CASE_HIGH (gimple_switch_label (stmt, k));
5985 }
5986
5987 *min_idx = i;
5988 *max_idx = j;
5989 return !take_default;
5990 }
5991 }
5992
5993 /* Visit switch statement STMT. If we can determine which edge
5994 will be taken out of STMT's basic block, record it in
5995 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
5996 SSA_PROP_VARYING. */
5997
5998 static enum ssa_prop_result
5999 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6000 {
6001 tree op, val;
6002 value_range_t *vr;
6003 size_t i = 0, j = 0, n;
6004 bool take_default;
6005
6006 *taken_edge_p = NULL;
6007 op = gimple_switch_index (stmt);
6008 if (TREE_CODE (op) != SSA_NAME)
6009 return SSA_PROP_VARYING;
6010
6011 vr = get_value_range (op);
6012 if (dump_file && (dump_flags & TDF_DETAILS))
6013 {
6014 fprintf (dump_file, "\nVisiting switch expression with operand ");
6015 print_generic_expr (dump_file, op, 0);
6016 fprintf (dump_file, " with known range ");
6017 dump_value_range (dump_file, vr);
6018 fprintf (dump_file, "\n");
6019 }
6020
6021 if (vr->type != VR_RANGE
6022 || symbolic_range_p (vr))
6023 return SSA_PROP_VARYING;
6024
6025 /* Find the single edge that is taken from the switch expression. */
6026 n = gimple_switch_num_labels (stmt);
6027
6028 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6029
6030 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6031 label */
6032 if (j < i)
6033 {
6034 gcc_assert (take_default);
6035 val = gimple_switch_default_label (stmt);
6036 }
6037 else
6038 {
6039 /* Check if labels with index i to j and maybe the default label
6040 are all reaching the same label. */
6041
6042 val = gimple_switch_label (stmt, i);
6043 if (take_default
6044 && CASE_LABEL (gimple_switch_default_label (stmt))
6045 != CASE_LABEL (val))
6046 {
6047 if (dump_file && (dump_flags & TDF_DETAILS))
6048 fprintf (dump_file, " not a single destination for this "
6049 "range\n");
6050 return SSA_PROP_VARYING;
6051 }
6052 for (++i; i <= j; ++i)
6053 {
6054 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6055 {
6056 if (dump_file && (dump_flags & TDF_DETAILS))
6057 fprintf (dump_file, " not a single destination for this "
6058 "range\n");
6059 return SSA_PROP_VARYING;
6060 }
6061 }
6062 }
6063
6064 *taken_edge_p = find_edge (gimple_bb (stmt),
6065 label_to_block (CASE_LABEL (val)));
6066
6067 if (dump_file && (dump_flags & TDF_DETAILS))
6068 {
6069 fprintf (dump_file, " will take edge to ");
6070 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6071 }
6072
6073 return SSA_PROP_INTERESTING;
6074 }
6075
6076
6077 /* Evaluate statement STMT. If the statement produces a useful range,
6078 return SSA_PROP_INTERESTING and record the SSA name with the
6079 interesting range into *OUTPUT_P.
6080
6081 If STMT is a conditional branch and we can determine its truth
6082 value, the taken edge is recorded in *TAKEN_EDGE_P.
6083
6084 If STMT produces a varying value, return SSA_PROP_VARYING. */
6085
6086 static enum ssa_prop_result
6087 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6088 {
6089 tree def;
6090 ssa_op_iter iter;
6091
6092 if (dump_file && (dump_flags & TDF_DETAILS))
6093 {
6094 fprintf (dump_file, "\nVisiting statement:\n");
6095 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6096 fprintf (dump_file, "\n");
6097 }
6098
6099 if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6100 {
6101 /* In general, assignments with virtual operands are not useful
6102 for deriving ranges, with the obvious exception of calls to
6103 builtin functions. */
6104
6105 if ((is_gimple_call (stmt)
6106 && gimple_call_fndecl (stmt) != NULL_TREE
6107 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6108 || !gimple_vuse (stmt))
6109 return vrp_visit_assignment_or_call (stmt, output_p);
6110 }
6111 else if (gimple_code (stmt) == GIMPLE_COND)
6112 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6113 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6114 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6115
6116 /* All other statements produce nothing of interest for VRP, so mark
6117 their outputs varying and prevent further simulation. */
6118 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6119 set_value_range_to_varying (get_value_range (def));
6120
6121 return SSA_PROP_VARYING;
6122 }
6123
6124
6125 /* Meet operation for value ranges. Given two value ranges VR0 and
6126 VR1, store in VR0 a range that contains both VR0 and VR1. This
6127 may not be the smallest possible such range. */
6128
6129 static void
6130 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6131 {
6132 if (vr0->type == VR_UNDEFINED)
6133 {
6134 copy_value_range (vr0, vr1);
6135 return;
6136 }
6137
6138 if (vr1->type == VR_UNDEFINED)
6139 {
6140 /* Nothing to do. VR0 already has the resulting range. */
6141 return;
6142 }
6143
6144 if (vr0->type == VR_VARYING)
6145 {
6146 /* Nothing to do. VR0 already has the resulting range. */
6147 return;
6148 }
6149
6150 if (vr1->type == VR_VARYING)
6151 {
6152 set_value_range_to_varying (vr0);
6153 return;
6154 }
6155
6156 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6157 {
6158 int cmp;
6159 tree min, max;
6160
6161 /* Compute the convex hull of the ranges. The lower limit of
6162 the new range is the minimum of the two ranges. If they
6163 cannot be compared, then give up. */
6164 cmp = compare_values (vr0->min, vr1->min);
6165 if (cmp == 0 || cmp == 1)
6166 min = vr1->min;
6167 else if (cmp == -1)
6168 min = vr0->min;
6169 else
6170 goto give_up;
6171
6172 /* Similarly, the upper limit of the new range is the maximum
6173 of the two ranges. If they cannot be compared, then
6174 give up. */
6175 cmp = compare_values (vr0->max, vr1->max);
6176 if (cmp == 0 || cmp == -1)
6177 max = vr1->max;
6178 else if (cmp == 1)
6179 max = vr0->max;
6180 else
6181 goto give_up;
6182
6183 /* Check for useless ranges. */
6184 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6185 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6186 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6187 goto give_up;
6188
6189 /* The resulting set of equivalences is the intersection of
6190 the two sets. */
6191 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6192 bitmap_and_into (vr0->equiv, vr1->equiv);
6193 else if (vr0->equiv && !vr1->equiv)
6194 bitmap_clear (vr0->equiv);
6195
6196 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6197 }
6198 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6199 {
6200 /* Two anti-ranges meet only if their complements intersect.
6201 Only handle the case of identical ranges. */
6202 if (compare_values (vr0->min, vr1->min) == 0
6203 && compare_values (vr0->max, vr1->max) == 0
6204 && compare_values (vr0->min, vr0->max) == 0)
6205 {
6206 /* The resulting set of equivalences is the intersection of
6207 the two sets. */
6208 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6209 bitmap_and_into (vr0->equiv, vr1->equiv);
6210 else if (vr0->equiv && !vr1->equiv)
6211 bitmap_clear (vr0->equiv);
6212 }
6213 else
6214 goto give_up;
6215 }
6216 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6217 {
6218 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6219 only handle the case where the ranges have an empty intersection.
6220 The result of the meet operation is the anti-range. */
6221 if (!symbolic_range_p (vr0)
6222 && !symbolic_range_p (vr1)
6223 && !value_ranges_intersect_p (vr0, vr1))
6224 {
6225 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6226 set. We need to compute the intersection of the two
6227 equivalence sets. */
6228 if (vr1->type == VR_ANTI_RANGE)
6229 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6230
6231 /* The resulting set of equivalences is the intersection of
6232 the two sets. */
6233 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6234 bitmap_and_into (vr0->equiv, vr1->equiv);
6235 else if (vr0->equiv && !vr1->equiv)
6236 bitmap_clear (vr0->equiv);
6237 }
6238 else
6239 goto give_up;
6240 }
6241 else
6242 gcc_unreachable ();
6243
6244 return;
6245
6246 give_up:
6247 /* Failed to find an efficient meet. Before giving up and setting
6248 the result to VARYING, see if we can at least derive a useful
6249 anti-range. FIXME, all this nonsense about distinguishing
6250 anti-ranges from ranges is necessary because of the odd
6251 semantics of range_includes_zero_p and friends. */
6252 if (!symbolic_range_p (vr0)
6253 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6254 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6255 && !symbolic_range_p (vr1)
6256 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6257 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6258 {
6259 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6260
6261 /* Since this meet operation did not result from the meeting of
6262 two equivalent names, VR0 cannot have any equivalences. */
6263 if (vr0->equiv)
6264 bitmap_clear (vr0->equiv);
6265 }
6266 else
6267 set_value_range_to_varying (vr0);
6268 }
6269
6270
6271 /* Visit all arguments for PHI node PHI that flow through executable
6272 edges. If a valid value range can be derived from all the incoming
6273 value ranges, set a new range for the LHS of PHI. */
6274
6275 static enum ssa_prop_result
6276 vrp_visit_phi_node (gimple phi)
6277 {
6278 size_t i;
6279 tree lhs = PHI_RESULT (phi);
6280 value_range_t *lhs_vr = get_value_range (lhs);
6281 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6282 int edges, old_edges;
6283
6284 copy_value_range (&vr_result, lhs_vr);
6285
6286 if (dump_file && (dump_flags & TDF_DETAILS))
6287 {
6288 fprintf (dump_file, "\nVisiting PHI node: ");
6289 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6290 }
6291
6292 edges = 0;
6293 for (i = 0; i < gimple_phi_num_args (phi); i++)
6294 {
6295 edge e = gimple_phi_arg_edge (phi, i);
6296
6297 if (dump_file && (dump_flags & TDF_DETAILS))
6298 {
6299 fprintf (dump_file,
6300 "\n Argument #%d (%d -> %d %sexecutable)\n",
6301 (int) i, e->src->index, e->dest->index,
6302 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6303 }
6304
6305 if (e->flags & EDGE_EXECUTABLE)
6306 {
6307 tree arg = PHI_ARG_DEF (phi, i);
6308 value_range_t vr_arg;
6309
6310 ++edges;
6311
6312 if (TREE_CODE (arg) == SSA_NAME)
6313 {
6314 vr_arg = *(get_value_range (arg));
6315 }
6316 else
6317 {
6318 if (is_overflow_infinity (arg))
6319 {
6320 arg = copy_node (arg);
6321 TREE_OVERFLOW (arg) = 0;
6322 }
6323
6324 vr_arg.type = VR_RANGE;
6325 vr_arg.min = arg;
6326 vr_arg.max = arg;
6327 vr_arg.equiv = NULL;
6328 }
6329
6330 if (dump_file && (dump_flags & TDF_DETAILS))
6331 {
6332 fprintf (dump_file, "\t");
6333 print_generic_expr (dump_file, arg, dump_flags);
6334 fprintf (dump_file, "\n\tValue: ");
6335 dump_value_range (dump_file, &vr_arg);
6336 fprintf (dump_file, "\n");
6337 }
6338
6339 vrp_meet (&vr_result, &vr_arg);
6340
6341 if (vr_result.type == VR_VARYING)
6342 break;
6343 }
6344 }
6345
6346 if (vr_result.type == VR_VARYING)
6347 goto varying;
6348
6349 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6350 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6351
6352 /* To prevent infinite iterations in the algorithm, derive ranges
6353 when the new value is slightly bigger or smaller than the
6354 previous one. We don't do this if we have seen a new executable
6355 edge; this helps us avoid an overflow infinity for conditionals
6356 which are not in a loop. */
6357 if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE
6358 && edges <= old_edges)
6359 {
6360 if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
6361 {
6362 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6363 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6364
6365 /* If the new minimum is smaller or larger than the previous
6366 one, go all the way to -INF. In the first case, to avoid
6367 iterating millions of times to reach -INF, and in the
6368 other case to avoid infinite bouncing between different
6369 minimums. */
6370 if (cmp_min > 0 || cmp_min < 0)
6371 {
6372 /* If we will end up with a (-INF, +INF) range, set it to
6373 VARYING. Same if the previous max value was invalid for
6374 the type and we'd end up with vr_result.min > vr_result.max. */
6375 if (vrp_val_is_max (vr_result.max)
6376 || compare_values (TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)),
6377 vr_result.max) > 0)
6378 goto varying;
6379
6380 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6381 || !vrp_var_may_overflow (lhs, phi))
6382 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6383 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6384 vr_result.min =
6385 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6386 else
6387 goto varying;
6388 }
6389
6390 /* Similarly, if the new maximum is smaller or larger than
6391 the previous one, go all the way to +INF. */
6392 if (cmp_max < 0 || cmp_max > 0)
6393 {
6394 /* If we will end up with a (-INF, +INF) range, set it to
6395 VARYING. Same if the previous min value was invalid for
6396 the type and we'd end up with vr_result.max < vr_result.min. */
6397 if (vrp_val_is_min (vr_result.min)
6398 || compare_values (TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)),
6399 vr_result.min) < 0)
6400 goto varying;
6401
6402 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6403 || !vrp_var_may_overflow (lhs, phi))
6404 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6405 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6406 vr_result.max =
6407 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6408 else
6409 goto varying;
6410 }
6411 }
6412 }
6413
6414 /* If the new range is different than the previous value, keep
6415 iterating. */
6416 if (update_value_range (lhs, &vr_result))
6417 return SSA_PROP_INTERESTING;
6418
6419 /* Nothing changed, don't add outgoing edges. */
6420 return SSA_PROP_NOT_INTERESTING;
6421
6422 /* No match found. Set the LHS to VARYING. */
6423 varying:
6424 set_value_range_to_varying (lhs_vr);
6425 return SSA_PROP_VARYING;
6426 }
6427
6428 /* Simplify boolean operations if the source is known
6429 to be already a boolean. */
6430 static bool
6431 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6432 {
6433 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6434 tree val = NULL;
6435 tree op0, op1;
6436 value_range_t *vr;
6437 bool sop = false;
6438 bool need_conversion;
6439
6440 op0 = gimple_assign_rhs1 (stmt);
6441 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6442 {
6443 if (TREE_CODE (op0) != SSA_NAME)
6444 return false;
6445 vr = get_value_range (op0);
6446
6447 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6448 if (!val || !integer_onep (val))
6449 return false;
6450
6451 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6452 if (!val || !integer_onep (val))
6453 return false;
6454 }
6455
6456 if (rhs_code == TRUTH_NOT_EXPR)
6457 {
6458 rhs_code = NE_EXPR;
6459 op1 = build_int_cst (TREE_TYPE (op0), 1);
6460 }
6461 else
6462 {
6463 op1 = gimple_assign_rhs2 (stmt);
6464
6465 /* Reduce number of cases to handle. */
6466 if (is_gimple_min_invariant (op1))
6467 {
6468 /* Exclude anything that should have been already folded. */
6469 if (rhs_code != EQ_EXPR
6470 && rhs_code != NE_EXPR
6471 && rhs_code != TRUTH_XOR_EXPR)
6472 return false;
6473
6474 if (!integer_zerop (op1)
6475 && !integer_onep (op1)
6476 && !integer_all_onesp (op1))
6477 return false;
6478
6479 /* Limit the number of cases we have to consider. */
6480 if (rhs_code == EQ_EXPR)
6481 {
6482 rhs_code = NE_EXPR;
6483 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6484 }
6485 }
6486 else
6487 {
6488 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6489 if (rhs_code == EQ_EXPR)
6490 return false;
6491
6492 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6493 {
6494 vr = get_value_range (op1);
6495 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6496 if (!val || !integer_onep (val))
6497 return false;
6498
6499 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6500 if (!val || !integer_onep (val))
6501 return false;
6502 }
6503 }
6504 }
6505
6506 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6507 {
6508 location_t location;
6509
6510 if (!gimple_has_location (stmt))
6511 location = input_location;
6512 else
6513 location = gimple_location (stmt);
6514
6515 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6516 warning_at (location, OPT_Wstrict_overflow,
6517 _("assuming signed overflow does not occur when "
6518 "simplifying && or || to & or |"));
6519 else
6520 warning_at (location, OPT_Wstrict_overflow,
6521 _("assuming signed overflow does not occur when "
6522 "simplifying ==, != or ! to identity or ^"));
6523 }
6524
6525 need_conversion =
6526 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6527 TREE_TYPE (op0));
6528
6529 /* Make sure to not sign-extend -1 as a boolean value. */
6530 if (need_conversion
6531 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6532 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6533 return false;
6534
6535 switch (rhs_code)
6536 {
6537 case TRUTH_AND_EXPR:
6538 rhs_code = BIT_AND_EXPR;
6539 break;
6540 case TRUTH_OR_EXPR:
6541 rhs_code = BIT_IOR_EXPR;
6542 break;
6543 case TRUTH_XOR_EXPR:
6544 case NE_EXPR:
6545 if (integer_zerop (op1))
6546 {
6547 gimple_assign_set_rhs_with_ops (gsi,
6548 need_conversion ? NOP_EXPR : SSA_NAME,
6549 op0, NULL);
6550 update_stmt (gsi_stmt (*gsi));
6551 return true;
6552 }
6553
6554 rhs_code = BIT_XOR_EXPR;
6555 break;
6556 default:
6557 gcc_unreachable ();
6558 }
6559
6560 if (need_conversion)
6561 return false;
6562
6563 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6564 update_stmt (gsi_stmt (*gsi));
6565 return true;
6566 }
6567
6568 /* Simplify a division or modulo operator to a right shift or
6569 bitwise and if the first operand is unsigned or is greater
6570 than zero and the second operand is an exact power of two. */
6571
6572 static bool
6573 simplify_div_or_mod_using_ranges (gimple stmt)
6574 {
6575 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6576 tree val = NULL;
6577 tree op0 = gimple_assign_rhs1 (stmt);
6578 tree op1 = gimple_assign_rhs2 (stmt);
6579 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6580
6581 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6582 {
6583 val = integer_one_node;
6584 }
6585 else
6586 {
6587 bool sop = false;
6588
6589 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6590
6591 if (val
6592 && sop
6593 && integer_onep (val)
6594 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6595 {
6596 location_t location;
6597
6598 if (!gimple_has_location (stmt))
6599 location = input_location;
6600 else
6601 location = gimple_location (stmt);
6602 warning (OPT_Wstrict_overflow,
6603 ("%Hassuming signed overflow does not occur when "
6604 "simplifying / or %% to >> or &"),
6605 &location);
6606 }
6607 }
6608
6609 if (val && integer_onep (val))
6610 {
6611 tree t;
6612
6613 if (rhs_code == TRUNC_DIV_EXPR)
6614 {
6615 t = build_int_cst (NULL_TREE, tree_log2 (op1));
6616 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6617 gimple_assign_set_rhs1 (stmt, op0);
6618 gimple_assign_set_rhs2 (stmt, t);
6619 }
6620 else
6621 {
6622 t = build_int_cst (TREE_TYPE (op1), 1);
6623 t = int_const_binop (MINUS_EXPR, op1, t, 0);
6624 t = fold_convert (TREE_TYPE (op0), t);
6625
6626 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6627 gimple_assign_set_rhs1 (stmt, op0);
6628 gimple_assign_set_rhs2 (stmt, t);
6629 }
6630
6631 update_stmt (stmt);
6632 return true;
6633 }
6634
6635 return false;
6636 }
6637
6638 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6639 ABS_EXPR. If the operand is <= 0, then simplify the
6640 ABS_EXPR into a NEGATE_EXPR. */
6641
6642 static bool
6643 simplify_abs_using_ranges (gimple stmt)
6644 {
6645 tree val = NULL;
6646 tree op = gimple_assign_rhs1 (stmt);
6647 tree type = TREE_TYPE (op);
6648 value_range_t *vr = get_value_range (op);
6649
6650 if (TYPE_UNSIGNED (type))
6651 {
6652 val = integer_zero_node;
6653 }
6654 else if (vr)
6655 {
6656 bool sop = false;
6657
6658 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6659 if (!val)
6660 {
6661 sop = false;
6662 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6663 &sop);
6664
6665 if (val)
6666 {
6667 if (integer_zerop (val))
6668 val = integer_one_node;
6669 else if (integer_onep (val))
6670 val = integer_zero_node;
6671 }
6672 }
6673
6674 if (val
6675 && (integer_onep (val) || integer_zerop (val)))
6676 {
6677 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6678 {
6679 location_t location;
6680
6681 if (!gimple_has_location (stmt))
6682 location = input_location;
6683 else
6684 location = gimple_location (stmt);
6685 warning (OPT_Wstrict_overflow,
6686 ("%Hassuming signed overflow does not occur when "
6687 "simplifying abs (X) to X or -X"),
6688 &location);
6689 }
6690
6691 gimple_assign_set_rhs1 (stmt, op);
6692 if (integer_onep (val))
6693 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
6694 else
6695 gimple_assign_set_rhs_code (stmt, SSA_NAME);
6696 update_stmt (stmt);
6697 return true;
6698 }
6699 }
6700
6701 return false;
6702 }
6703
6704 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
6705 a known value range VR.
6706
6707 If there is one and only one value which will satisfy the
6708 conditional, then return that value. Else return NULL. */
6709
6710 static tree
6711 test_for_singularity (enum tree_code cond_code, tree op0,
6712 tree op1, value_range_t *vr)
6713 {
6714 tree min = NULL;
6715 tree max = NULL;
6716
6717 /* Extract minimum/maximum values which satisfy the
6718 the conditional as it was written. */
6719 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
6720 {
6721 /* This should not be negative infinity; there is no overflow
6722 here. */
6723 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
6724
6725 max = op1;
6726 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
6727 {
6728 tree one = build_int_cst (TREE_TYPE (op0), 1);
6729 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
6730 if (EXPR_P (max))
6731 TREE_NO_WARNING (max) = 1;
6732 }
6733 }
6734 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
6735 {
6736 /* This should not be positive infinity; there is no overflow
6737 here. */
6738 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
6739
6740 min = op1;
6741 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
6742 {
6743 tree one = build_int_cst (TREE_TYPE (op0), 1);
6744 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
6745 if (EXPR_P (min))
6746 TREE_NO_WARNING (min) = 1;
6747 }
6748 }
6749
6750 /* Now refine the minimum and maximum values using any
6751 value range information we have for op0. */
6752 if (min && max)
6753 {
6754 if (compare_values (vr->min, min) == -1)
6755 min = min;
6756 else
6757 min = vr->min;
6758 if (compare_values (vr->max, max) == 1)
6759 max = max;
6760 else
6761 max = vr->max;
6762
6763 /* If the new min/max values have converged to a single value,
6764 then there is only one value which can satisfy the condition,
6765 return that value. */
6766 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
6767 return min;
6768 }
6769 return NULL;
6770 }
6771
6772 /* Simplify a conditional using a relational operator to an equality
6773 test if the range information indicates only one value can satisfy
6774 the original conditional. */
6775
6776 static bool
6777 simplify_cond_using_ranges (gimple stmt)
6778 {
6779 tree op0 = gimple_cond_lhs (stmt);
6780 tree op1 = gimple_cond_rhs (stmt);
6781 enum tree_code cond_code = gimple_cond_code (stmt);
6782
6783 if (cond_code != NE_EXPR
6784 && cond_code != EQ_EXPR
6785 && TREE_CODE (op0) == SSA_NAME
6786 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
6787 && is_gimple_min_invariant (op1))
6788 {
6789 value_range_t *vr = get_value_range (op0);
6790
6791 /* If we have range information for OP0, then we might be
6792 able to simplify this conditional. */
6793 if (vr->type == VR_RANGE)
6794 {
6795 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
6796
6797 if (new_tree)
6798 {
6799 if (dump_file)
6800 {
6801 fprintf (dump_file, "Simplified relational ");
6802 print_gimple_stmt (dump_file, stmt, 0, 0);
6803 fprintf (dump_file, " into ");
6804 }
6805
6806 gimple_cond_set_code (stmt, EQ_EXPR);
6807 gimple_cond_set_lhs (stmt, op0);
6808 gimple_cond_set_rhs (stmt, new_tree);
6809
6810 update_stmt (stmt);
6811
6812 if (dump_file)
6813 {
6814 print_gimple_stmt (dump_file, stmt, 0, 0);
6815 fprintf (dump_file, "\n");
6816 }
6817
6818 return true;
6819 }
6820
6821 /* Try again after inverting the condition. We only deal
6822 with integral types here, so no need to worry about
6823 issues with inverting FP comparisons. */
6824 cond_code = invert_tree_comparison (cond_code, false);
6825 new_tree = test_for_singularity (cond_code, op0, op1, vr);
6826
6827 if (new_tree)
6828 {
6829 if (dump_file)
6830 {
6831 fprintf (dump_file, "Simplified relational ");
6832 print_gimple_stmt (dump_file, stmt, 0, 0);
6833 fprintf (dump_file, " into ");
6834 }
6835
6836 gimple_cond_set_code (stmt, NE_EXPR);
6837 gimple_cond_set_lhs (stmt, op0);
6838 gimple_cond_set_rhs (stmt, new_tree);
6839
6840 update_stmt (stmt);
6841
6842 if (dump_file)
6843 {
6844 print_gimple_stmt (dump_file, stmt, 0, 0);
6845 fprintf (dump_file, "\n");
6846 }
6847
6848 return true;
6849 }
6850 }
6851 }
6852
6853 return false;
6854 }
6855
6856 /* Simplify a switch statement using the value range of the switch
6857 argument. */
6858
6859 static bool
6860 simplify_switch_using_ranges (gimple stmt)
6861 {
6862 tree op = gimple_switch_index (stmt);
6863 value_range_t *vr;
6864 bool take_default;
6865 edge e;
6866 edge_iterator ei;
6867 size_t i = 0, j = 0, n, n2;
6868 tree vec2;
6869 switch_update su;
6870
6871 if (TREE_CODE (op) == SSA_NAME)
6872 {
6873 vr = get_value_range (op);
6874
6875 /* We can only handle integer ranges. */
6876 if (vr->type != VR_RANGE
6877 || symbolic_range_p (vr))
6878 return false;
6879
6880 /* Find case label for min/max of the value range. */
6881 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6882 }
6883 else if (TREE_CODE (op) == INTEGER_CST)
6884 {
6885 take_default = !find_case_label_index (stmt, 1, op, &i);
6886 if (take_default)
6887 {
6888 i = 1;
6889 j = 0;
6890 }
6891 else
6892 {
6893 j = i;
6894 }
6895 }
6896 else
6897 return false;
6898
6899 n = gimple_switch_num_labels (stmt);
6900
6901 /* Bail out if this is just all edges taken. */
6902 if (i == 1
6903 && j == n - 1
6904 && take_default)
6905 return false;
6906
6907 /* Build a new vector of taken case labels. */
6908 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
6909 n2 = 0;
6910
6911 /* Add the default edge, if necessary. */
6912 if (take_default)
6913 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
6914
6915 for (; i <= j; ++i, ++n2)
6916 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
6917
6918 /* Mark needed edges. */
6919 for (i = 0; i < n2; ++i)
6920 {
6921 e = find_edge (gimple_bb (stmt),
6922 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
6923 e->aux = (void *)-1;
6924 }
6925
6926 /* Queue not needed edges for later removal. */
6927 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
6928 {
6929 if (e->aux == (void *)-1)
6930 {
6931 e->aux = NULL;
6932 continue;
6933 }
6934
6935 if (dump_file && (dump_flags & TDF_DETAILS))
6936 {
6937 fprintf (dump_file, "removing unreachable case label\n");
6938 }
6939 VEC_safe_push (edge, heap, to_remove_edges, e);
6940 }
6941
6942 /* And queue an update for the stmt. */
6943 su.stmt = stmt;
6944 su.vec = vec2;
6945 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
6946 return false;
6947 }
6948
6949 /* Simplify STMT using ranges if possible. */
6950
6951 bool
6952 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6953 {
6954 gimple stmt = gsi_stmt (*gsi);
6955 if (is_gimple_assign (stmt))
6956 {
6957 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6958
6959 switch (rhs_code)
6960 {
6961 case EQ_EXPR:
6962 case NE_EXPR:
6963 case TRUTH_NOT_EXPR:
6964 case TRUTH_AND_EXPR:
6965 case TRUTH_OR_EXPR:
6966 case TRUTH_XOR_EXPR:
6967 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
6968 or identity if the RHS is zero or one, and the LHS are known
6969 to be boolean values. Transform all TRUTH_*_EXPR into
6970 BIT_*_EXPR if both arguments are known to be boolean values. */
6971 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
6972 return simplify_truth_ops_using_ranges (gsi, stmt);
6973 break;
6974
6975 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
6976 and BIT_AND_EXPR respectively if the first operand is greater
6977 than zero and the second operand is an exact power of two. */
6978 case TRUNC_DIV_EXPR:
6979 case TRUNC_MOD_EXPR:
6980 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6981 && integer_pow2p (gimple_assign_rhs2 (stmt)))
6982 return simplify_div_or_mod_using_ranges (stmt);
6983 break;
6984
6985 /* Transform ABS (X) into X or -X as appropriate. */
6986 case ABS_EXPR:
6987 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
6988 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
6989 return simplify_abs_using_ranges (stmt);
6990 break;
6991
6992 default:
6993 break;
6994 }
6995 }
6996 else if (gimple_code (stmt) == GIMPLE_COND)
6997 return simplify_cond_using_ranges (stmt);
6998 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6999 return simplify_switch_using_ranges (stmt);
7000
7001 return false;
7002 }
7003
7004 /* Stack of dest,src equivalency pairs that need to be restored after
7005 each attempt to thread a block's incoming edge to an outgoing edge.
7006
7007 A NULL entry is used to mark the end of pairs which need to be
7008 restored. */
7009 static VEC(tree,heap) *stack;
7010
7011 /* A trivial wrapper so that we can present the generic jump threading
7012 code with a simple API for simplifying statements. STMT is the
7013 statement we want to simplify, WITHIN_STMT provides the location
7014 for any overflow warnings. */
7015
7016 static tree
7017 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7018 {
7019 /* We only use VRP information to simplify conditionals. This is
7020 overly conservative, but it's unclear if doing more would be
7021 worth the compile time cost. */
7022 if (gimple_code (stmt) != GIMPLE_COND)
7023 return NULL;
7024
7025 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7026 gimple_cond_lhs (stmt),
7027 gimple_cond_rhs (stmt), within_stmt);
7028 }
7029
7030 /* Blocks which have more than one predecessor and more than
7031 one successor present jump threading opportunities, i.e.,
7032 when the block is reached from a specific predecessor, we
7033 may be able to determine which of the outgoing edges will
7034 be traversed. When this optimization applies, we are able
7035 to avoid conditionals at runtime and we may expose secondary
7036 optimization opportunities.
7037
7038 This routine is effectively a driver for the generic jump
7039 threading code. It basically just presents the generic code
7040 with edges that may be suitable for jump threading.
7041
7042 Unlike DOM, we do not iterate VRP if jump threading was successful.
7043 While iterating may expose new opportunities for VRP, it is expected
7044 those opportunities would be very limited and the compile time cost
7045 to expose those opportunities would be significant.
7046
7047 As jump threading opportunities are discovered, they are registered
7048 for later realization. */
7049
7050 static void
7051 identify_jump_threads (void)
7052 {
7053 basic_block bb;
7054 gimple dummy;
7055 int i;
7056 edge e;
7057
7058 /* Ugh. When substituting values earlier in this pass we can
7059 wipe the dominance information. So rebuild the dominator
7060 information as we need it within the jump threading code. */
7061 calculate_dominance_info (CDI_DOMINATORS);
7062
7063 /* We do not allow VRP information to be used for jump threading
7064 across a back edge in the CFG. Otherwise it becomes too
7065 difficult to avoid eliminating loop exit tests. Of course
7066 EDGE_DFS_BACK is not accurate at this time so we have to
7067 recompute it. */
7068 mark_dfs_back_edges ();
7069
7070 /* Do not thread across edges we are about to remove. Just marking
7071 them as EDGE_DFS_BACK will do. */
7072 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7073 e->flags |= EDGE_DFS_BACK;
7074
7075 /* Allocate our unwinder stack to unwind any temporary equivalences
7076 that might be recorded. */
7077 stack = VEC_alloc (tree, heap, 20);
7078
7079 /* To avoid lots of silly node creation, we create a single
7080 conditional and just modify it in-place when attempting to
7081 thread jumps. */
7082 dummy = gimple_build_cond (EQ_EXPR,
7083 integer_zero_node, integer_zero_node,
7084 NULL, NULL);
7085
7086 /* Walk through all the blocks finding those which present a
7087 potential jump threading opportunity. We could set this up
7088 as a dominator walker and record data during the walk, but
7089 I doubt it's worth the effort for the classes of jump
7090 threading opportunities we are trying to identify at this
7091 point in compilation. */
7092 FOR_EACH_BB (bb)
7093 {
7094 gimple last;
7095
7096 /* If the generic jump threading code does not find this block
7097 interesting, then there is nothing to do. */
7098 if (! potentially_threadable_block (bb))
7099 continue;
7100
7101 /* We only care about blocks ending in a COND_EXPR. While there
7102 may be some value in handling SWITCH_EXPR here, I doubt it's
7103 terribly important. */
7104 last = gsi_stmt (gsi_last_bb (bb));
7105 if (gimple_code (last) != GIMPLE_COND)
7106 continue;
7107
7108 /* We're basically looking for any kind of conditional with
7109 integral type arguments. */
7110 if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7111 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7112 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7113 || is_gimple_min_invariant (gimple_cond_rhs (last)))
7114 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last))))
7115 {
7116 edge_iterator ei;
7117
7118 /* We've got a block with multiple predecessors and multiple
7119 successors which also ends in a suitable conditional. For
7120 each predecessor, see if we can thread it to a specific
7121 successor. */
7122 FOR_EACH_EDGE (e, ei, bb->preds)
7123 {
7124 /* Do not thread across back edges or abnormal edges
7125 in the CFG. */
7126 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7127 continue;
7128
7129 thread_across_edge (dummy, e, true, &stack,
7130 simplify_stmt_for_jump_threading);
7131 }
7132 }
7133 }
7134
7135 /* We do not actually update the CFG or SSA graphs at this point as
7136 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7137 handle ASSERT_EXPRs gracefully. */
7138 }
7139
7140 /* We identified all the jump threading opportunities earlier, but could
7141 not transform the CFG at that time. This routine transforms the
7142 CFG and arranges for the dominator tree to be rebuilt if necessary.
7143
7144 Note the SSA graph update will occur during the normal TODO
7145 processing by the pass manager. */
7146 static void
7147 finalize_jump_threads (void)
7148 {
7149 thread_through_all_blocks (false);
7150 VEC_free (tree, heap, stack);
7151 }
7152
7153
7154 /* Traverse all the blocks folding conditionals with known ranges. */
7155
7156 static void
7157 vrp_finalize (void)
7158 {
7159 size_t i;
7160 prop_value_t *single_val_range;
7161 bool do_value_subst_p;
7162
7163 if (dump_file)
7164 {
7165 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7166 dump_all_value_ranges (dump_file);
7167 fprintf (dump_file, "\n");
7168 }
7169
7170 /* We may have ended with ranges that have exactly one value. Those
7171 values can be substituted as any other copy/const propagated
7172 value using substitute_and_fold. */
7173 single_val_range = XCNEWVEC (prop_value_t, num_ssa_names);
7174
7175 do_value_subst_p = false;
7176 for (i = 0; i < num_ssa_names; i++)
7177 if (vr_value[i]
7178 && vr_value[i]->type == VR_RANGE
7179 && vr_value[i]->min == vr_value[i]->max)
7180 {
7181 single_val_range[i].value = vr_value[i]->min;
7182 do_value_subst_p = true;
7183 }
7184
7185 if (!do_value_subst_p)
7186 {
7187 /* We found no single-valued ranges, don't waste time trying to
7188 do single value substitution in substitute_and_fold. */
7189 free (single_val_range);
7190 single_val_range = NULL;
7191 }
7192
7193 substitute_and_fold (single_val_range, true);
7194
7195 if (warn_array_bounds)
7196 check_all_array_refs ();
7197
7198 /* We must identify jump threading opportunities before we release
7199 the datastructures built by VRP. */
7200 identify_jump_threads ();
7201
7202 /* Free allocated memory. */
7203 for (i = 0; i < num_ssa_names; i++)
7204 if (vr_value[i])
7205 {
7206 BITMAP_FREE (vr_value[i]->equiv);
7207 free (vr_value[i]);
7208 }
7209
7210 free (single_val_range);
7211 free (vr_value);
7212 free (vr_phi_edge_counts);
7213
7214 /* So that we can distinguish between VRP data being available
7215 and not available. */
7216 vr_value = NULL;
7217 vr_phi_edge_counts = NULL;
7218 }
7219
7220
7221 /* Main entry point to VRP (Value Range Propagation). This pass is
7222 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7223 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7224 Programming Language Design and Implementation, pp. 67-78, 1995.
7225 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7226
7227 This is essentially an SSA-CCP pass modified to deal with ranges
7228 instead of constants.
7229
7230 While propagating ranges, we may find that two or more SSA name
7231 have equivalent, though distinct ranges. For instance,
7232
7233 1 x_9 = p_3->a;
7234 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7235 3 if (p_4 == q_2)
7236 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7237 5 endif
7238 6 if (q_2)
7239
7240 In the code above, pointer p_5 has range [q_2, q_2], but from the
7241 code we can also determine that p_5 cannot be NULL and, if q_2 had
7242 a non-varying range, p_5's range should also be compatible with it.
7243
7244 These equivalences are created by two expressions: ASSERT_EXPR and
7245 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7246 result of another assertion, then we can use the fact that p_5 and
7247 p_4 are equivalent when evaluating p_5's range.
7248
7249 Together with value ranges, we also propagate these equivalences
7250 between names so that we can take advantage of information from
7251 multiple ranges when doing final replacement. Note that this
7252 equivalency relation is transitive but not symmetric.
7253
7254 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7255 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7256 in contexts where that assertion does not hold (e.g., in line 6).
7257
7258 TODO, the main difference between this pass and Patterson's is that
7259 we do not propagate edge probabilities. We only compute whether
7260 edges can be taken or not. That is, instead of having a spectrum
7261 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7262 DON'T KNOW. In the future, it may be worthwhile to propagate
7263 probabilities to aid branch prediction. */
7264
7265 static unsigned int
7266 execute_vrp (void)
7267 {
7268 int i;
7269 edge e;
7270 switch_update *su;
7271
7272 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7273 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7274 scev_initialize ();
7275
7276 insert_range_assertions ();
7277
7278 to_remove_edges = VEC_alloc (edge, heap, 10);
7279 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7280 threadedge_initialize_values ();
7281
7282 vrp_initialize ();
7283 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7284 vrp_finalize ();
7285
7286 /* ASSERT_EXPRs must be removed before finalizing jump threads
7287 as finalizing jump threads calls the CFG cleanup code which
7288 does not properly handle ASSERT_EXPRs. */
7289 remove_range_assertions ();
7290
7291 /* If we exposed any new variables, go ahead and put them into
7292 SSA form now, before we handle jump threading. This simplifies
7293 interactions between rewriting of _DECL nodes into SSA form
7294 and rewriting SSA_NAME nodes into SSA form after block
7295 duplication and CFG manipulation. */
7296 update_ssa (TODO_update_ssa);
7297
7298 finalize_jump_threads ();
7299
7300 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7301 CFG in a broken state and requires a cfg_cleanup run. */
7302 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7303 remove_edge (e);
7304 /* Update SWITCH_EXPR case label vector. */
7305 for (i = 0; VEC_iterate (switch_update, to_update_switch_stmts, i, su); ++i)
7306 {
7307 size_t j;
7308 size_t n = TREE_VEC_LENGTH (su->vec);
7309 tree label;
7310 gimple_switch_set_num_labels (su->stmt, n);
7311 for (j = 0; j < n; j++)
7312 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7313 /* As we may have replaced the default label with a regular one
7314 make sure to make it a real default label again. This ensures
7315 optimal expansion. */
7316 label = gimple_switch_default_label (su->stmt);
7317 CASE_LOW (label) = NULL_TREE;
7318 CASE_HIGH (label) = NULL_TREE;
7319 }
7320
7321 if (VEC_length (edge, to_remove_edges) > 0)
7322 free_dominance_info (CDI_DOMINATORS);
7323
7324 VEC_free (edge, heap, to_remove_edges);
7325 VEC_free (switch_update, heap, to_update_switch_stmts);
7326 threadedge_finalize_values ();
7327
7328 scev_finalize ();
7329 loop_optimizer_finalize ();
7330 return 0;
7331 }
7332
7333 static bool
7334 gate_vrp (void)
7335 {
7336 return flag_tree_vrp != 0;
7337 }
7338
7339 struct gimple_opt_pass pass_vrp =
7340 {
7341 {
7342 GIMPLE_PASS,
7343 "vrp", /* name */
7344 gate_vrp, /* gate */
7345 execute_vrp, /* execute */
7346 NULL, /* sub */
7347 NULL, /* next */
7348 0, /* static_pass_number */
7349 TV_TREE_VRP, /* tv_id */
7350 PROP_ssa, /* properties_required */
7351 0, /* properties_provided */
7352 0, /* properties_destroyed */
7353 0, /* todo_flags_start */
7354 TODO_cleanup_cfg
7355 | TODO_ggc_collect
7356 | TODO_verify_ssa
7357 | TODO_dump_func
7358 | TODO_update_ssa /* todo_flags_finish */
7359 }
7360 };